mesh: sidecar proxy controller improvements (#19083)

This change builds on #19043 and #19067 and updates the sidecar controller to use those computed resources. This achieves several benefits:

   * The cache is now simplified which helps us solve for previous bugs (such as multiple Upstreams/Destinations targeting the same service would overwrite each other)
   * We no longer need proxy config cache
   * We no longer need to do merging of proxy configs as part of the controller logic
   * Controller watches are simplified because we no longer need to have complex mapping using cache and can instead use the simple ReplaceType mapper.

It also makes several other improvements/refactors:

  * Unifies all caches into one. This is because originally the caches were more independent, however, now that they need to interact with each other it made sense to unify them where sidecar proxy controller uses one cache with 3 bimappers
   * Unifies cache and mappers. Mapper already needed all caches anyway and so it made sense to make the cache do the mapping also now that the cache is unified.
   * Gets rid of service endpoints watches. This was needed to get updates in a case when service's identities have changed and we need to update proxy state template's spiffe IDs for those destinations. This will however generate a lot of reconcile requests for this controller as service endpoints objects can change a lot because they contain workload's health status. This is solved by adding a status to the service object tracking "bound identities" and have service endpoints controller update it. Having service's status updated allows us to get updates in the sidecar proxy controller because it's already watching service objects
   * Add a watch for workloads. We need it so that we get updates if workload's ports change. This also ensures that we update cached identities in case workload's identity changes.
This commit is contained in:
Iryna Shustava 2023-10-12 13:20:13 -06:00 committed by GitHub
parent ad06c96456
commit 54a12ab3c9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
66 changed files with 1426 additions and 3135 deletions

View File

@ -34,6 +34,9 @@ var (
EndpointsStatusConditionEndpointsManaged = endpoints.StatusConditionEndpointsManaged EndpointsStatusConditionEndpointsManaged = endpoints.StatusConditionEndpointsManaged
EndpointsStatusConditionManaged = endpoints.ConditionManaged EndpointsStatusConditionManaged = endpoints.ConditionManaged
EndpointsStatusConditionUnmanaged = endpoints.ConditionUnmanaged EndpointsStatusConditionUnmanaged = endpoints.ConditionUnmanaged
StatusConditionBoundIdentities = endpoints.StatusConditionBoundIdentities
StatusReasonWorkloadIdentitiesFound = endpoints.StatusReasonWorkloadIdentitiesFound
StatusReasonNoWorkloadIdentitiesFound = endpoints.StatusReasonNoWorkloadIdentitiesFound
FailoverStatusKey = failover.StatusKey FailoverStatusKey = failover.StatusKey
FailoverStatusConditionAccepted = failover.StatusConditionAccepted FailoverStatusConditionAccepted = failover.StatusConditionAccepted

View File

@ -105,12 +105,12 @@ func (r *serviceEndpointsReconciler) Reconcile(ctx context.Context, rt controlle
return err return err
} }
var status *pbresource.Condition var statusConditions []*pbresource.Condition
if serviceUnderManagement(serviceData.service) { if serviceUnderManagement(serviceData.service) {
rt.Logger.Trace("service is enabled for automatic endpoint management") rt.Logger.Trace("service is enabled for automatic endpoint management")
// This service should have its endpoints automatically managed // This service should have its endpoints automatically managed
status = ConditionManaged statusConditions = append(statusConditions, ConditionManaged)
// Inform the WorkloadMapper to track this service and its selectors. So // Inform the WorkloadMapper to track this service and its selectors. So
// future workload updates that would be matched by the services selectors // future workload updates that would be matched by the services selectors
@ -133,6 +133,12 @@ func (r *serviceEndpointsReconciler) Reconcile(ctx context.Context, rt controlle
// Calculate the latest endpoints from the already gathered workloads // Calculate the latest endpoints from the already gathered workloads
latestEndpoints := workloadsToEndpoints(serviceData.service, workloadData) latestEndpoints := workloadsToEndpoints(serviceData.service, workloadData)
// Add status
if endpointsData != nil {
statusConditions = append(statusConditions,
workloadIdentityStatusFromEndpoints(latestEndpoints))
}
// Before writing the endpoints actually check to see if they are changed // Before writing the endpoints actually check to see if they are changed
if endpointsData == nil || !proto.Equal(endpointsData.endpoints, latestEndpoints) { if endpointsData == nil || !proto.Equal(endpointsData.endpoints, latestEndpoints) {
rt.Logger.Trace("endpoints have changed") rt.Logger.Trace("endpoints have changed")
@ -168,7 +174,7 @@ func (r *serviceEndpointsReconciler) Reconcile(ctx context.Context, rt controlle
} else { } else {
rt.Logger.Trace("endpoints are not being automatically managed") rt.Logger.Trace("endpoints are not being automatically managed")
// This service is not having its endpoints automatically managed // This service is not having its endpoints automatically managed
status = ConditionUnmanaged statusConditions = append(statusConditions, ConditionUnmanaged)
// Inform the WorkloadMapper that it no longer needs to track this service // Inform the WorkloadMapper that it no longer needs to track this service
// as it is no longer under endpoint management // as it is no longer under endpoint management
@ -203,9 +209,7 @@ func (r *serviceEndpointsReconciler) Reconcile(ctx context.Context, rt controlle
// for that object existing or not. // for that object existing or not.
newStatus := &pbresource.Status{ newStatus := &pbresource.Status{
ObservedGeneration: serviceData.resource.Generation, ObservedGeneration: serviceData.resource.Generation,
Conditions: []*pbresource.Condition{ Conditions: statusConditions,
status,
},
} }
// If the status is unchanged then we should return and avoid the unnecessary write // If the status is unchanged then we should return and avoid the unnecessary write
if resource.EqualStatus(serviceData.resource.Status[StatusKey], newStatus, false) { if resource.EqualStatus(serviceData.resource.Status[StatusKey], newStatus, false) {
@ -388,3 +392,13 @@ func workloadToEndpoint(svc *pbcatalog.Service, data *workloadData) *pbcatalog.E
Identity: data.workload.Identity, Identity: data.workload.Identity,
} }
} }
func workloadIdentityStatusFromEndpoints(endpoints *pbcatalog.ServiceEndpoints) *pbresource.Condition {
identities := endpoints.GetIdentities()
if len(identities) > 0 {
return ConditionIdentitiesFound(identities)
}
return ConditionIdentitiesNotFound
}

View File

@ -388,6 +388,50 @@ func TestDetermineWorkloadHealth(t *testing.T) {
} }
} }
func TestWorkloadIdentityStatusFromEndpoints(t *testing.T) {
cases := map[string]struct {
endpoints *pbcatalog.ServiceEndpoints
expStatus *pbresource.Condition
}{
"endpoints are nil": {
expStatus: ConditionIdentitiesNotFound,
},
"endpoints without identities": {
endpoints: &pbcatalog.ServiceEndpoints{},
expStatus: ConditionIdentitiesNotFound,
},
"endpoints with identities": {
endpoints: &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
{
Identity: "foo",
},
},
},
expStatus: ConditionIdentitiesFound([]string{"foo"}),
},
"endpoints with multiple identities": {
endpoints: &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
{
Identity: "foo",
},
{
Identity: "bar",
},
},
},
expStatus: ConditionIdentitiesFound([]string{"bar", "foo"}),
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
prototest.AssertDeepEqual(t, c.expStatus, workloadIdentityStatusFromEndpoints(c.endpoints))
})
}
}
type controllerSuite struct { type controllerSuite struct {
suite.Suite suite.Suite
@ -646,6 +690,7 @@ func (suite *controllerSuite) TestController() {
res := suite.client.WaitForReconciliation(suite.T(), service.Id, StatusKey) res := suite.client.WaitForReconciliation(suite.T(), service.Id, StatusKey)
// Check that the services status was updated accordingly // Check that the services status was updated accordingly
rtest.RequireStatusCondition(suite.T(), res, StatusKey, ConditionManaged) rtest.RequireStatusCondition(suite.T(), res, StatusKey, ConditionManaged)
rtest.RequireStatusCondition(suite.T(), res, StatusKey, ConditionIdentitiesNotFound)
// Check that the endpoints resource exists and contains 0 endpoints // Check that the endpoints resource exists and contains 0 endpoints
endpointsID := rtest.Resource(pbcatalog.ServiceEndpointsType, "api").ID() endpointsID := rtest.Resource(pbcatalog.ServiceEndpointsType, "api").ID()
@ -665,6 +710,9 @@ func (suite *controllerSuite) TestController() {
}). }).
Write(suite.T(), suite.client) Write(suite.T(), suite.client)
suite.client.WaitForStatusCondition(suite.T(), service.Id, StatusKey,
ConditionIdentitiesFound([]string{"api"}))
// Wait for the endpoints to be regenerated // Wait for the endpoints to be regenerated
endpoints = suite.client.WaitForNewVersion(suite.T(), endpointsID, endpoints.Version) endpoints = suite.client.WaitForNewVersion(suite.T(), endpointsID, endpoints.Version)
@ -713,6 +761,34 @@ func (suite *controllerSuite) TestController() {
Identity: "api", Identity: "api",
}) })
// Update workload identity and check that the status on the service is updated
workload = rtest.Resource(pbcatalog.WorkloadType, "api-1").
WithData(suite.T(), &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "127.0.0.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
},
Identity: "endpoints-api-identity",
}).
Write(suite.T(), suite.client)
suite.client.WaitForStatusCondition(suite.T(), service.Id, StatusKey, ConditionIdentitiesFound([]string{"endpoints-api-identity"}))
// Verify that the generated endpoints now contain the workload
endpoints = suite.client.WaitForNewVersion(suite.T(), endpointsID, endpoints.Version)
suite.requireEndpoints(endpoints, &pbcatalog.Endpoint{
TargetRef: workload.Id,
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "127.0.0.1", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
Identity: "endpoints-api-identity",
})
// rewrite the service to add more selection criteria. This should trigger // rewrite the service to add more selection criteria. This should trigger
// reconciliation but shouldn't result in updating the endpoints because // reconciliation but shouldn't result in updating the endpoints because
// the actual list of currently selected workloads has not changed // the actual list of currently selected workloads has not changed

View File

@ -3,7 +3,12 @@
package endpoints package endpoints
import "github.com/hashicorp/consul/proto-public/pbresource" import (
"fmt"
"strings"
"github.com/hashicorp/consul/proto-public/pbresource"
)
const ( const (
StatusKey = "consul.io/endpoint-manager" StatusKey = "consul.io/endpoint-manager"
@ -12,8 +17,16 @@ const (
StatusReasonSelectorNotFound = "SelectorNotFound" StatusReasonSelectorNotFound = "SelectorNotFound"
StatusReasonSelectorFound = "SelectorFound" StatusReasonSelectorFound = "SelectorFound"
SelectorFoundMessage = "A valid workload selector is present within the service." selectorFoundMessage = "A valid workload selector is present within the service."
SelectorNotFoundMessage = "Either the workload selector was not present or contained no selection criteria." selectorNotFoundMessage = "Either the workload selector was not present or contained no selection criteria."
StatusConditionBoundIdentities = "BoundIdentities"
StatusReasonWorkloadIdentitiesFound = "WorkloadIdentitiesFound"
StatusReasonNoWorkloadIdentitiesFound = "NoWorkloadIdentitiesFound"
identitiesFoundMessageFormat = "Found workload identities associated with this service: %q."
identitiesNotFoundChangedMessage = "No associated workload identities found."
) )
var ( var (
@ -21,13 +34,29 @@ var (
Type: StatusConditionEndpointsManaged, Type: StatusConditionEndpointsManaged,
State: pbresource.Condition_STATE_TRUE, State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonSelectorFound, Reason: StatusReasonSelectorFound,
Message: SelectorFoundMessage, Message: selectorFoundMessage,
} }
ConditionUnmanaged = &pbresource.Condition{ ConditionUnmanaged = &pbresource.Condition{
Type: StatusConditionEndpointsManaged, Type: StatusConditionEndpointsManaged,
State: pbresource.Condition_STATE_FALSE, State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonSelectorNotFound, Reason: StatusReasonSelectorNotFound,
Message: SelectorNotFoundMessage, Message: selectorNotFoundMessage,
}
ConditionIdentitiesNotFound = &pbresource.Condition{
Type: StatusConditionBoundIdentities,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonNoWorkloadIdentitiesFound,
Message: identitiesNotFoundChangedMessage,
} }
) )
func ConditionIdentitiesFound(identities []string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionBoundIdentities,
State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonWorkloadIdentitiesFound,
Message: fmt.Sprintf(identitiesFoundMessageFormat, strings.Join(identities, ",")),
}
}

View File

@ -8,7 +8,6 @@ import (
"github.com/hashicorp/consul/internal/mesh/internal/controllers" "github.com/hashicorp/consul/internal/mesh/internal/controllers"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes" "github.com/hashicorp/consul/internal/mesh/internal/controllers/routes"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status"
"github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource"
) )
@ -16,16 +15,6 @@ import (
var ( var (
// Controller statuses. // Controller statuses.
// Sidecar-proxy controller.
SidecarProxyStatusKey = sidecarproxy.ControllerName
SidecarProxyStatusConditionMeshDestination = status.StatusConditionDestinationAccepted
SidecarProxyStatusReasonNonMeshDestination = status.StatusReasonMeshProtocolNotFound
SidecarProxyStatusReasonMeshDestination = status.StatusReasonMeshProtocolFound
SidecarProxyStatusReasonDestinationServiceNotFound = status.StatusReasonDestinationServiceNotFound
SidecarProxyStatusReasonDestinationServiceFound = status.StatusReasonDestinationServiceFound
SidecarProxyStatusReasonMeshProtocolDestinationPort = status.StatusReasonMeshProtocolDestinationPort
SidecarProxyStatusReasonNonMeshProtocolDestinationPort = status.StatusReasonNonMeshProtocolDestinationPort
// Routes controller // Routes controller
RoutesStatusKey = routes.StatusKey RoutesStatusKey = routes.StatusKey
RoutesStatusConditionAccepted = routes.StatusConditionAccepted RoutesStatusConditionAccepted = routes.StatusConditionAccepted

View File

@ -1,43 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxycache
import (
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/mappers/bimapper"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
type ComputedRoutesCache struct {
mapper *bimapper.Mapper
}
func NewComputedRoutesCache() *ComputedRoutesCache {
return &ComputedRoutesCache{
mapper: bimapper.New(pbmesh.ComputedRoutesType, pbcatalog.ServiceType),
}
}
func (c *ComputedRoutesCache) TrackComputedRoutes(computedRoutes *types.DecodedComputedRoutes) {
var serviceRefs []resource.ReferenceOrID
for _, pcr := range computedRoutes.Data.PortedConfigs {
for _, details := range pcr.Targets {
serviceRefs = append(serviceRefs, details.BackendRef.Ref)
}
}
c.mapper.TrackItem(computedRoutes.Resource.Id, serviceRefs)
}
func (c *ComputedRoutesCache) UntrackComputedRoutes(computedRoutesID *pbresource.ID) {
c.mapper.UntrackItem(computedRoutesID)
}
func (c *ComputedRoutesCache) ComputedRoutesByService(id resource.ReferenceOrID) []*pbresource.ID {
return c.mapper.ItemIDsForLink(id)
}

View File

@ -1,248 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxycache
import (
"sync"
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
"github.com/hashicorp/consul/internal/resource"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// DestinationsCache stores information needed for the sidecar-proxy controller to reconcile efficiently.
// This currently means storing a list of all destinations for easy look up
// as well as indices of source proxies where those destinations are referenced.
//
// It is the responsibility of the controller and its subcomponents (like mapper and data fetcher)
// to keep this cache up-to-date as we're observing new data.
type DestinationsCache struct {
lock sync.RWMutex
// store is a map from destination service reference and port as a reference key
// to the object representing destination reference.
store map[ReferenceKeyWithPort]intermediate.CombinedDestinationRef
storedPorts map[resource.ReferenceKey]map[string]struct{}
// sourceProxiesIndex stores a map from a reference key of source proxy IDs
// to the keys in the store map.
sourceProxiesIndex map[resource.ReferenceKey]storeKeys
}
type storeKeys map[ReferenceKeyWithPort]struct{}
func NewDestinationsCache() *DestinationsCache {
return &DestinationsCache{
store: make(map[ReferenceKeyWithPort]intermediate.CombinedDestinationRef),
storedPorts: make(map[resource.ReferenceKey]map[string]struct{}),
sourceProxiesIndex: make(map[resource.ReferenceKey]storeKeys),
}
}
type ReferenceKeyWithPort struct {
resource.ReferenceKey
port string
}
func KeyFromRefAndPort(ref *pbresource.Reference, port string) ReferenceKeyWithPort {
refKey := resource.NewReferenceKey(ref)
return ReferenceKeyWithPort{refKey, port}
}
// WriteDestination adds destination reference to the cache.
func (c *DestinationsCache) WriteDestination(d intermediate.CombinedDestinationRef) {
// Check that reference is a catalog.Service type.
if !resource.EqualType(pbcatalog.ServiceType, d.ServiceRef.Type) {
panic("ref must of type catalog.Service")
}
// Also, check that explicit destination reference is a mesh.Upstreams type.
if d.ExplicitDestinationsID != nil &&
!resource.EqualType(pbmesh.DestinationsType, d.ExplicitDestinationsID.Type) {
panic("ExplicitDestinationsID must be of type mesh.Upstreams")
}
c.lock.Lock()
defer c.lock.Unlock()
c.deleteLocked(d.ServiceRef, d.Port)
c.addLocked(d)
}
// DeleteDestination deletes a given destination reference and port from cache.
func (c *DestinationsCache) DeleteDestination(ref *pbresource.Reference, port string) {
// Check that reference is a catalog.Service type.
if !resource.EqualType(pbcatalog.ServiceType, ref.Type) {
panic("ref must of type catalog.Service")
}
c.lock.Lock()
defer c.lock.Unlock()
c.deleteLocked(ref, port)
}
func (c *DestinationsCache) addLocked(d intermediate.CombinedDestinationRef) {
key := KeyFromRefAndPort(d.ServiceRef, d.Port)
c.store[key] = d
c.addPortLocked(d.ServiceRef, d.Port)
// Update source proxies index.
for proxyRef := range d.SourceProxies {
_, ok := c.sourceProxiesIndex[proxyRef]
if !ok {
c.sourceProxiesIndex[proxyRef] = make(storeKeys)
}
c.sourceProxiesIndex[proxyRef][key] = struct{}{}
}
}
func (c *DestinationsCache) addPortLocked(ref *pbresource.Reference, port string) {
rk := resource.NewReferenceKey(ref)
m, ok := c.storedPorts[rk]
if !ok {
m = make(map[string]struct{})
c.storedPorts[rk] = m
}
m[port] = struct{}{}
}
func (c *DestinationsCache) deleteLocked(ref *pbresource.Reference, port string) {
key := KeyFromRefAndPort(ref, port)
// First get it from the store.
dest, ok := c.store[key]
if !ok {
// If it's not there, return as there's nothing for us to.
return
}
// Update source proxies indices.
for proxyRef := range dest.SourceProxies {
// Delete our destination key from this source proxy.
delete(c.sourceProxiesIndex[proxyRef], key)
}
// Finally, delete this destination from the store.
delete(c.store, key)
c.deletePortLocked(ref, port)
}
func (c *DestinationsCache) deletePortLocked(ref *pbresource.Reference, port string) {
rk := resource.NewReferenceKey(ref)
m, ok := c.storedPorts[rk]
if !ok {
return
}
delete(m, port)
if len(m) == 0 {
delete(c.storedPorts, rk)
}
}
// DeleteSourceProxy deletes the source proxy given by id from the cache.
func (c *DestinationsCache) DeleteSourceProxy(id *pbresource.ID) {
// Check that id is the ProxyStateTemplate type.
if !resource.EqualType(pbmesh.ProxyStateTemplateType, id.Type) {
panic("id must of type mesh.ProxyStateTemplate")
}
c.lock.Lock()
defer c.lock.Unlock()
proxyIDKey := resource.NewReferenceKey(id)
// Get all destination keys.
destKeys := c.sourceProxiesIndex[proxyIDKey]
for destKey := range destKeys {
// Read destination.
dest, ok := c.store[destKey]
if !ok {
// If there's no destination with that key, skip it as there's nothing for us to do.
continue
}
// Delete the source proxy ID.
delete(dest.SourceProxies, proxyIDKey)
}
// Finally, delete the index for this proxy.
delete(c.sourceProxiesIndex, proxyIDKey)
}
// ReadDestination returns a destination reference for the given service reference and port.
func (c *DestinationsCache) ReadDestination(ref *pbresource.Reference, port string) (intermediate.CombinedDestinationRef, bool) {
// Check that reference is a catalog.Service type.
if !resource.EqualType(pbcatalog.ServiceType, ref.Type) {
panic("ref must of type catalog.Service")
}
c.lock.RLock()
defer c.lock.RUnlock()
key := KeyFromRefAndPort(ref, port)
d, found := c.store[key]
return d, found
}
func (c *DestinationsCache) ReadDestinationsByServiceAllPorts(ref *pbresource.Reference) []intermediate.CombinedDestinationRef {
// Check that reference is a catalog.Service type.
if !resource.EqualType(pbcatalog.ServiceType, ref.Type) {
panic("ref must of type catalog.Service")
}
c.lock.RLock()
defer c.lock.RUnlock()
rk := resource.NewReferenceKey(ref)
ports, ok := c.storedPorts[rk]
if !ok {
return nil
}
var destinations []intermediate.CombinedDestinationRef
for port := range ports {
key := KeyFromRefAndPort(ref, port)
d, found := c.store[key]
if found {
destinations = append(destinations, d)
}
}
return destinations
}
// DestinationsBySourceProxy returns all destinations that are a referenced by the given source proxy id.
func (c *DestinationsCache) DestinationsBySourceProxy(id *pbresource.ID) []intermediate.CombinedDestinationRef {
// Check that id is the ProxyStateTemplate type.
if !resource.EqualType(pbmesh.ProxyStateTemplateType, id.Type) {
panic("id must of type mesh.ProxyStateTemplate")
}
c.lock.RLock()
defer c.lock.RUnlock()
var destinations []intermediate.CombinedDestinationRef
proxyIDKey := resource.NewReferenceKey(id)
for destKey := range c.sourceProxiesIndex[proxyIDKey] {
destinations = append(destinations, c.store[destKey])
}
return destinations
}

View File

@ -1,242 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxycache
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestWrite_Create(t *testing.T) {
cache := NewDestinationsCache()
proxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
destination := testDestination(proxyID)
cache.WriteDestination(destination)
destKey := KeyFromRefAndPort(destination.ServiceRef, destination.Port)
require.Equal(t, destination, cache.store[destKey])
actualSourceProxies := cache.sourceProxiesIndex
expectedSourceProxies := map[resource.ReferenceKey]storeKeys{
resource.NewReferenceKey(proxyID): {destKey: struct{}{}},
}
require.Equal(t, expectedSourceProxies, actualSourceProxies)
// Check that we can read back the destination successfully.
actualDestination, found := cache.ReadDestination(destination.ServiceRef, destination.Port)
require.True(t, found)
require.Equal(t, destination, actualDestination)
}
func TestWrite_Update(t *testing.T) {
cache := NewDestinationsCache()
proxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
destination1 := testDestination(proxyID)
cache.WriteDestination(destination1)
// Add another destination for the same proxy ID.
destination2 := testDestination(proxyID)
destination2.ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "test-service-2").ReferenceNoSection()
cache.WriteDestination(destination2)
// Check that the source proxies are updated.
actualSourceProxies := cache.sourceProxiesIndex
expectedSourceProxies := map[resource.ReferenceKey]storeKeys{
resource.NewReferenceKey(proxyID): {
KeyFromRefAndPort(destination1.ServiceRef, destination1.Port): struct{}{},
KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{},
},
}
require.Equal(t, expectedSourceProxies, actualSourceProxies)
// Add another destination for a different proxy.
anotherProxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-def").ID()
destination3 := testDestination(anotherProxyID)
destination3.ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "test-service-3").
WithTenancy(resource.DefaultNamespacedTenancy()).ReferenceNoSection()
cache.WriteDestination(destination3)
actualSourceProxies = cache.sourceProxiesIndex
expectedSourceProxies = map[resource.ReferenceKey]storeKeys{
resource.NewReferenceKey(proxyID): {
KeyFromRefAndPort(destination1.ServiceRef, destination1.Port): struct{}{},
KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{},
},
resource.NewReferenceKey(anotherProxyID): {
KeyFromRefAndPort(destination3.ServiceRef, destination3.Port): struct{}{},
},
}
require.Equal(t, expectedSourceProxies, actualSourceProxies)
// Overwrite the proxy id completely.
destination1.SourceProxies = map[resource.ReferenceKey]struct{}{resource.NewReferenceKey(anotherProxyID): {}}
cache.WriteDestination(destination1)
actualSourceProxies = cache.sourceProxiesIndex
expectedSourceProxies = map[resource.ReferenceKey]storeKeys{
resource.NewReferenceKey(proxyID): {
KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{},
},
resource.NewReferenceKey(anotherProxyID): {
KeyFromRefAndPort(destination1.ServiceRef, destination1.Port): struct{}{},
KeyFromRefAndPort(destination3.ServiceRef, destination3.Port): struct{}{},
},
}
require.Equal(t, expectedSourceProxies, actualSourceProxies)
}
func TestWrite_Delete(t *testing.T) {
cache := NewDestinationsCache()
proxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
destination1 := testDestination(proxyID)
cache.WriteDestination(destination1)
// Add another destination for the same proxy ID.
destination2 := testDestination(proxyID)
destination2.ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "test-service-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ReferenceNoSection()
cache.WriteDestination(destination2)
cache.DeleteDestination(destination1.ServiceRef, destination1.Port)
require.NotContains(t, cache.store, KeyFromRefAndPort(destination1.ServiceRef, destination1.Port))
// Check that the source proxies are updated.
actualSourceProxies := cache.sourceProxiesIndex
expectedSourceProxies := map[resource.ReferenceKey]storeKeys{
resource.NewReferenceKey(proxyID): {
KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{},
},
}
require.Equal(t, expectedSourceProxies, actualSourceProxies)
// Try to delete non-existing destination and check that nothing has changed..
cache.DeleteDestination(
resourcetest.Resource(pbcatalog.ServiceType, "does-not-exist").
WithTenancy(resource.DefaultNamespacedTenancy()).ReferenceNoSection(),
"doesn't-matter")
require.Contains(t, cache.store, KeyFromRefAndPort(destination2.ServiceRef, destination2.Port))
require.Equal(t, expectedSourceProxies, cache.sourceProxiesIndex)
}
func TestDeleteSourceProxy(t *testing.T) {
cache := NewDestinationsCache()
proxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
destination1 := testDestination(proxyID)
cache.WriteDestination(destination1)
// Add another destination for the same proxy ID.
destination2 := testDestination(proxyID)
destination2.ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "test-service-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ReferenceNoSection()
cache.WriteDestination(destination2)
cache.DeleteSourceProxy(proxyID)
// Check that source proxy index is gone.
proxyKey := resource.NewReferenceKey(proxyID)
require.NotContains(t, cache.sourceProxiesIndex, proxyKey)
// Check that the destinations no longer have this proxy as the source.
require.NotContains(t, destination1.SourceProxies, proxyKey)
require.NotContains(t, destination2.SourceProxies, proxyKey)
// Try to add a non-existent key to source proxy index
cache.sourceProxiesIndex[proxyKey] = map[ReferenceKeyWithPort]struct{}{
{port: "doesn't-matter"}: {}}
cache.DeleteSourceProxy(proxyID)
// Check that source proxy index is gone.
require.NotContains(t, cache.sourceProxiesIndex, proxyKey)
// Check that the destinations no longer have this proxy as the source.
require.NotContains(t, destination1.SourceProxies, proxyKey)
require.NotContains(t, destination2.SourceProxies, proxyKey)
}
func TestDestinationsBySourceProxy(t *testing.T) {
cache := NewDestinationsCache()
proxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
destination1 := testDestination(proxyID)
cache.WriteDestination(destination1)
// Add another destination for the same proxy ID.
destination2 := testDestination(proxyID)
destination2.ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "test-service-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ReferenceNoSection()
cache.WriteDestination(destination2)
actualDestinations := cache.DestinationsBySourceProxy(proxyID)
expectedDestinations := []intermediate.CombinedDestinationRef{destination1, destination2}
require.ElementsMatch(t, expectedDestinations, actualDestinations)
}
func TestReadDestinationsByServiceAllPorts(t *testing.T) {
cache := NewDestinationsCache()
proxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
// test-service@tcp
destination1 := testDestination(proxyID)
cache.WriteDestination(destination1)
// test-service@tcp2
destination2 := testDestination(proxyID)
destination2.Port = "tcp2"
cache.WriteDestination(destination2)
// other-service@tcp
destination3 := testDestination(proxyID)
destination3.ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "other-service").
WithTenancy(resource.DefaultNamespacedTenancy()).ReferenceNoSection()
cache.WriteDestination(destination3)
t.Run("test-service referenced by two ports", func(t *testing.T) {
dests := cache.ReadDestinationsByServiceAllPorts(destination1.ServiceRef)
require.Len(t, dests, 2)
prototest.AssertElementsMatch(t, []intermediate.CombinedDestinationRef{
destination1, destination2,
}, dests)
})
t.Run("other-service referenced by one port", func(t *testing.T) {
dests := cache.ReadDestinationsByServiceAllPorts(destination3.ServiceRef)
require.Len(t, dests, 1)
prototest.AssertElementsMatch(t, []intermediate.CombinedDestinationRef{
destination3,
}, dests)
})
}
func testDestination(proxyID *pbresource.ID) intermediate.CombinedDestinationRef {
return intermediate.CombinedDestinationRef{
ServiceRef: resourcetest.Resource(pbcatalog.ServiceType, "test-service").
WithTenancy(resource.DefaultNamespacedTenancy()).ReferenceNoSection(),
Port: "tcp",
ExplicitDestinationsID: resourcetest.Resource(pbmesh.DestinationsType, "test-servicedestinations").
WithTenancy(resource.DefaultNamespacedTenancy()).ID(),
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(proxyID): {},
},
}
}

View File

@ -1,38 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxycache
import (
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/mappers/bimapper"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// IdentitiesCache tracks mappings between workload identities and proxy IDs
// that a configuration applies to. It is the responsibility of the controller to
// keep this cache up-to-date.
type IdentitiesCache struct {
mapper *bimapper.Mapper
}
func NewIdentitiesCache() *IdentitiesCache {
return &IdentitiesCache{
mapper: bimapper.New(pbmesh.ProxyStateTemplateType, pbauth.WorkloadIdentityType),
}
}
func (c *IdentitiesCache) ProxyIDsByWorkloadIdentity(id *pbresource.ID) []*pbresource.ID {
return c.mapper.ItemIDsForLink(id)
}
func (c *IdentitiesCache) TrackPair(identityID *pbresource.ID, proxyID *pbresource.ID) {
c.mapper.TrackItem(proxyID, []resource.ReferenceOrID{identityID})
}
// UntrackProxyID removes tracking for the given proxy state template ID.
func (c *IdentitiesCache) UntrackProxyID(proxyID *pbresource.ID) {
c.mapper.UntrackItem(proxyID)
}

View File

@ -1,59 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxycache
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
func TestIdentitiesCache(t *testing.T) {
cache := NewIdentitiesCache()
identityID1 := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-1").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
identityID2 := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxyID1 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-1").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxyID2 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
// Empty cache
require.Nil(t, cache.ProxyIDsByWorkloadIdentity(identityID1))
require.Nil(t, cache.ProxyIDsByWorkloadIdentity(identityID2))
// Insert value and fetch it.
cache.TrackPair(identityID1, proxyID1)
require.Equal(t, []*pbresource.ID{proxyID1}, cache.ProxyIDsByWorkloadIdentity(identityID1))
require.Nil(t, cache.ProxyIDsByWorkloadIdentity(identityID2))
// Insert another value referencing the same identity.
cache.TrackPair(identityID1, proxyID2)
require.ElementsMatch(t, []*pbresource.ID{proxyID1, proxyID2}, cache.ProxyIDsByWorkloadIdentity(identityID1))
require.Nil(t, cache.ProxyIDsByWorkloadIdentity(identityID2))
// Now proxy 1 uses identity 2
cache.TrackPair(identityID2, proxyID1)
require.Equal(t, []*pbresource.ID{proxyID1}, cache.ProxyIDsByWorkloadIdentity(identityID2))
require.Equal(t, []*pbresource.ID{proxyID2}, cache.ProxyIDsByWorkloadIdentity(identityID1))
// Untrack proxy 2
cache.UntrackProxyID(proxyID2)
require.Equal(t, []*pbresource.ID{proxyID1}, cache.ProxyIDsByWorkloadIdentity(identityID2))
require.Nil(t, cache.ProxyIDsByWorkloadIdentity(identityID1))
// Untrack proxy 1
cache.UntrackProxyID(proxyID1)
require.Nil(t, cache.ProxyIDsByWorkloadIdentity(identityID2))
require.Nil(t, cache.ProxyIDsByWorkloadIdentity(identityID1))
}

View File

@ -1,44 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxycache
import (
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/mappers/bimapper"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// ProxyConfigurationCache tracks mappings between proxy configurations and proxy IDs
// that a configuration applies to. It is the responsibility of the controller to
// keep this cache up-to-date.
type ProxyConfigurationCache struct {
mapper *bimapper.Mapper
}
func NewProxyConfigurationCache() *ProxyConfigurationCache {
return &ProxyConfigurationCache{
mapper: bimapper.New(pbmesh.ProxyConfigurationType, pbmesh.ProxyStateTemplateType),
}
}
// ProxyConfigurationsByProxyID returns proxy configuration IDs given the id of the proxy state template.
func (c *ProxyConfigurationCache) ProxyConfigurationsByProxyID(id *pbresource.ID) []*pbresource.ID {
return c.mapper.ItemIDsForLink(id)
}
// TrackProxyConfiguration tracks given proxy configuration ID and the linked proxy state template IDs.
func (c *ProxyConfigurationCache) TrackProxyConfiguration(proxyCfgID *pbresource.ID, proxyIDs []resource.ReferenceOrID) {
c.mapper.TrackItem(proxyCfgID, proxyIDs)
}
// UntrackProxyConfiguration removes tracking for the given proxy configuration ID.
func (c *ProxyConfigurationCache) UntrackProxyConfiguration(proxyCfgID *pbresource.ID) {
c.mapper.UntrackItem(proxyCfgID)
}
// UntrackProxyID removes tracking for the given proxy state template ID.
func (c *ProxyConfigurationCache) UntrackProxyID(proxyID *pbresource.ID) {
c.mapper.UntrackLink(proxyID)
}

View File

@ -1,80 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxycache
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestProxyConfigurationCache(t *testing.T) {
cache := NewProxyConfigurationCache()
// Create some proxy configurations.
proxyCfg1 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "test-cfg-1").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxyCfg2 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "test-cfg-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxyCfg3 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "test-cfg-3").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
// Create some proxy state templates.
p1 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "w-111").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
p2 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "w-222").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
p3 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "w-333").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
p4 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "w-444").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
p5 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "w-555").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
// Track these and make sure there's some overlap.
cache.TrackProxyConfiguration(proxyCfg1, []resource.ReferenceOrID{p1, p2, p4})
cache.TrackProxyConfiguration(proxyCfg2, []resource.ReferenceOrID{p3, p4, p5})
cache.TrackProxyConfiguration(proxyCfg3, []resource.ReferenceOrID{p1, p3})
// Read proxy configurations by proxy.
requireProxyConfigurations(t, cache, p1, proxyCfg1, proxyCfg3)
requireProxyConfigurations(t, cache, p2, proxyCfg1)
requireProxyConfigurations(t, cache, p3, proxyCfg2, proxyCfg3)
requireProxyConfigurations(t, cache, p4, proxyCfg1, proxyCfg2)
requireProxyConfigurations(t, cache, p5, proxyCfg2)
// Untrack some proxy IDs.
cache.UntrackProxyID(p1)
requireProxyConfigurations(t, cache, p1)
// Untrack some proxy IDs.
cache.UntrackProxyID(p3)
requireProxyConfigurations(t, cache, p3)
// Untrack proxy cfg.
cache.UntrackProxyConfiguration(proxyCfg1)
requireProxyConfigurations(t, cache, p1) // no-op because we untracked it earlier
requireProxyConfigurations(t, cache, p2)
requireProxyConfigurations(t, cache, p3) // no-op because we untracked it earlier
requireProxyConfigurations(t, cache, p4, proxyCfg2)
requireProxyConfigurations(t, cache, p5, proxyCfg2)
}
func requireProxyConfigurations(t *testing.T, cache *ProxyConfigurationCache, proxyID *pbresource.ID, proxyCfgs ...*pbresource.ID) {
t.Helper()
actualProxyCfgs := cache.ProxyConfigurationsByProxyID(proxyID)
require.Len(t, actualProxyCfgs, len(proxyCfgs))
prototest.AssertElementsMatch(t, proxyCfgs, actualProxyCfgs)
}

View File

@ -24,6 +24,7 @@ import (
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest" "github.com/hashicorp/consul/proto/private/prototest"
"github.com/hashicorp/consul/sdk/iptables"
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
) )
@ -103,6 +104,7 @@ func (suite *controllerTestSuite) SetupTest() {
suite.expComputedProxyCfg = &pbmesh.ComputedProxyConfiguration{ suite.expComputedProxyCfg = &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{ DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT, Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{OutboundListenerPort: iptables.DefaultTProxyOutboundPort},
LocalConnection: map[string]*pbmesh.ConnectionConfig{ LocalConnection: map[string]*pbmesh.ConnectionConfig{
"tcp": {ConnectTimeout: durationpb.New(2 * time.Second)}, "tcp": {ConnectTimeout: durationpb.New(2 * time.Second)},
}, },
@ -265,6 +267,7 @@ func (suite *controllerTestSuite) TestController() {
expProxyCfg := &pbmesh.ComputedProxyConfiguration{ expProxyCfg := &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{ DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT, Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{OutboundListenerPort: iptables.DefaultTProxyOutboundPort},
}, },
BootstrapConfig: &pbmesh.BootstrapConfig{ BootstrapConfig: &pbmesh.BootstrapConfig{
PrometheusBindAddr: "0.0.0.0:9000", PrometheusBindAddr: "0.0.0.0:9000",

View File

@ -8,14 +8,13 @@ import (
"github.com/hashicorp/consul/agent/leafcert" "github.com/hashicorp/consul/agent/leafcert"
"github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/explicitdestinations" "github.com/hashicorp/consul/internal/mesh/internal/controllers/explicitdestinations"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/explicitdestinations/mapper" "github.com/hashicorp/consul/internal/mesh/internal/controllers/explicitdestinations/mapper"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/proxyconfiguration" "github.com/hashicorp/consul/internal/mesh/internal/controllers/proxyconfiguration"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes" "github.com/hashicorp/consul/internal/mesh/internal/controllers/routes"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/xds" "github.com/hashicorp/consul/internal/mesh/internal/controllers/xds"
"github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper"
"github.com/hashicorp/consul/internal/mesh/internal/mappers/workloadselectionmapper" "github.com/hashicorp/consul/internal/mesh/internal/mappers/workloadselectionmapper"
"github.com/hashicorp/consul/internal/resource/mappers/bimapper" "github.com/hashicorp/consul/internal/resource/mappers/bimapper"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
@ -41,15 +40,8 @@ func Register(mgr *controller.Manager, deps Dependencies) {
} }
mgr.Register(xds.Controller(endpointsMapper, deps.ProxyUpdater, deps.TrustBundleFetcher, deps.LeafCertManager, leafMapper, leafCancels, deps.LocalDatacenter)) mgr.Register(xds.Controller(endpointsMapper, deps.ProxyUpdater, deps.TrustBundleFetcher, deps.LeafCertManager, leafMapper, leafCancels, deps.LocalDatacenter))
var (
destinationsCache = sidecarproxycache.NewDestinationsCache()
proxyCfgCache = sidecarproxycache.NewProxyConfigurationCache()
computedRoutesCache = sidecarproxycache.NewComputedRoutesCache()
identitiesCache = sidecarproxycache.NewIdentitiesCache()
m = sidecarproxymapper.New(destinationsCache, proxyCfgCache, computedRoutesCache, identitiesCache)
)
mgr.Register( mgr.Register(
sidecarproxy.Controller(destinationsCache, proxyCfgCache, computedRoutesCache, identitiesCache, m, deps.TrustDomainFetcher, deps.LocalDatacenter, deps.DefaultAllow), sidecarproxy.Controller(cache.New(), deps.TrustDomainFetcher, deps.LocalDatacenter, deps.DefaultAllow),
) )
mgr.Register(routes.Controller()) mgr.Register(routes.Controller())

View File

@ -4,6 +4,10 @@
package builder package builder
import ( import (
"fmt"
"github.com/hashicorp/consul/internal/resource"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbmesh/v2beta1/pbproxystate" "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1/pbproxystate"
"github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto-public/pbresource"
@ -13,7 +17,7 @@ import (
type Builder struct { type Builder struct {
id *pbresource.ID id *pbresource.ID
proxyStateTemplate *pbmesh.ProxyStateTemplate proxyStateTemplate *pbmesh.ProxyStateTemplate
proxyCfg *pbmesh.ProxyConfiguration proxyCfg *pbmesh.ComputedProxyConfiguration
trustDomain string trustDomain string
localDatacenter string localDatacenter string
defaultAllow bool defaultAllow bool
@ -25,8 +29,15 @@ func New(
trustDomain string, trustDomain string,
dc string, dc string,
defaultAllow bool, defaultAllow bool,
proxyCfg *pbmesh.ProxyConfiguration, proxyCfg *pbmesh.ComputedProxyConfiguration,
) *Builder { ) *Builder {
if !resource.EqualType(pbmesh.ProxyStateTemplateType, id.GetType()) {
panic(fmt.Sprintf("wrong type: expected pbmesh.ProxyStateTemplate, but got %T", id.Type))
}
if !resource.EqualType(pbauth.WorkloadIdentityType, identity.GetType()) {
panic(fmt.Sprintf("wrong type: expected pbauth.WorkloadIdentityType, but got %T", identity.Type))
}
return &Builder{ return &Builder{
id: id, id: id,
trustDomain: trustDomain, trustDomain: trustDomain,

View File

@ -32,7 +32,7 @@ func TestBuildMultiportImplicitDestinations(t *testing.T) {
trustDomain = "foo.consul" trustDomain = "foo.consul"
datacenter = "dc1" datacenter = "dc1"
) )
proxyCfg := &pbmesh.ProxyConfiguration{ proxyCfg := &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{ DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT, Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{ TransparentProxy: &pbmesh.TransparentProxy{

View File

@ -497,7 +497,7 @@ func TestBuildImplicitDestinations(t *testing.T) {
) )
require.NotNil(t, api2ComputedRoutes) require.NotNil(t, api2ComputedRoutes)
proxyCfg := &pbmesh.ProxyConfiguration{ proxyCfg := &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{ DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT, Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{ TransparentProxy: &pbmesh.TransparentProxy{

View File

@ -17,21 +17,21 @@ import (
func TestBuildExposePaths_NilChecks(t *testing.T) { func TestBuildExposePaths_NilChecks(t *testing.T) {
testutil.RunStep(t, "proxy cfg is nil", func(t *testing.T) { testutil.RunStep(t, "proxy cfg is nil", func(t *testing.T) {
b := New(nil, nil, "foo.consul", "dc1", true, nil) b := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul", "dc1", true, nil)
require.NotPanics(t, func() { require.NotPanics(t, func() {
b.buildExposePaths(nil) b.buildExposePaths(nil)
}) })
}) })
testutil.RunStep(t, "dynamic cfg is nil", func(t *testing.T) { testutil.RunStep(t, "dynamic cfg is nil", func(t *testing.T) {
b := New(nil, nil, "foo.consul", "dc1", true, &pbmesh.ProxyConfiguration{}) b := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul", "dc1", true, &pbmesh.ComputedProxyConfiguration{})
require.NotPanics(t, func() { require.NotPanics(t, func() {
b.buildExposePaths(nil) b.buildExposePaths(nil)
}) })
}) })
testutil.RunStep(t, "expose cfg is nil", func(t *testing.T) { testutil.RunStep(t, "expose cfg is nil", func(t *testing.T) {
b := New(nil, nil, "foo.consul", "dc1", true, &pbmesh.ProxyConfiguration{ b := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul", "dc1", true, &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{}, DynamicConfig: &pbmesh.DynamicConfig{},
}) })
require.NotPanics(t, func() { require.NotPanics(t, func() {
@ -51,7 +51,7 @@ func TestBuildExposePaths_NoExternalMeshWorkloadAddress(t *testing.T) {
}, },
} }
proxycfg := &pbmesh.ProxyConfiguration{ proxycfg := &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{ DynamicConfig: &pbmesh.DynamicConfig{
ExposeConfig: &pbmesh.ExposeConfig{ ExposeConfig: &pbmesh.ExposeConfig{
ExposePaths: []*pbmesh.ExposePath{ ExposePaths: []*pbmesh.ExposePath{
@ -65,7 +65,7 @@ func TestBuildExposePaths_NoExternalMeshWorkloadAddress(t *testing.T) {
}, },
} }
b := New(nil, nil, "foo.consul", "dc1", true, proxycfg) b := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul", "dc1", true, proxycfg)
b.buildExposePaths(workload) b.buildExposePaths(workload)
require.Empty(t, b.proxyStateTemplate.ProxyState.Listeners) require.Empty(t, b.proxyStateTemplate.ProxyState.Listeners)
} }
@ -81,7 +81,7 @@ func TestBuildExposePaths_InvalidProtocol(t *testing.T) {
}, },
} }
proxycfg := &pbmesh.ProxyConfiguration{ proxycfg := &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{ DynamicConfig: &pbmesh.DynamicConfig{
ExposeConfig: &pbmesh.ExposeConfig{ ExposeConfig: &pbmesh.ExposeConfig{
ExposePaths: []*pbmesh.ExposePath{ ExposePaths: []*pbmesh.ExposePath{
@ -96,7 +96,7 @@ func TestBuildExposePaths_InvalidProtocol(t *testing.T) {
}, },
} }
b := New(nil, nil, "foo.consul", "dc1", true, proxycfg) b := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul", "dc1", true, proxycfg)
require.PanicsWithValue(t, "unsupported expose paths protocol", func() { require.PanicsWithValue(t, "unsupported expose paths protocol", func() {
b.buildExposePaths(workload) b.buildExposePaths(workload)
}) })

View File

@ -104,7 +104,7 @@ func TestBuildLocalApp(t *testing.T) {
func TestBuildLocalApp_WithProxyConfiguration(t *testing.T) { func TestBuildLocalApp_WithProxyConfiguration(t *testing.T) {
cases := map[string]struct { cases := map[string]struct {
workload *pbcatalog.Workload workload *pbcatalog.Workload
proxyCfg *pbmesh.ProxyConfiguration proxyCfg *pbmesh.ComputedProxyConfiguration
}{ }{
"source/l7-expose-paths": { "source/l7-expose-paths": {
workload: &pbcatalog.Workload{ workload: &pbcatalog.Workload{
@ -118,7 +118,7 @@ func TestBuildLocalApp_WithProxyConfiguration(t *testing.T) {
"port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, "port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
}, },
}, },
proxyCfg: &pbmesh.ProxyConfiguration{ proxyCfg: &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{ DynamicConfig: &pbmesh.DynamicConfig{
ExposeConfig: &pbmesh.ExposeConfig{ ExposeConfig: &pbmesh.ExposeConfig{
ExposePaths: []*pbmesh.ExposePath{ ExposePaths: []*pbmesh.ExposePath{
@ -488,5 +488,6 @@ func testIdentityRef() *pbresource.Reference {
Partition: "default", Partition: "default",
PeerName: "local", PeerName: "local",
}, },
Type: pbauth.WorkloadIdentityType,
} }
} }

View File

@ -72,6 +72,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -126,6 +126,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -72,6 +72,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -72,6 +72,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -35,6 +35,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -45,6 +45,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -158,6 +158,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -180,6 +180,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -99,6 +99,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -99,6 +99,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -26,6 +26,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -26,6 +26,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -26,6 +26,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -58,6 +58,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -42,6 +42,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -42,6 +42,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -42,6 +42,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -14,6 +14,11 @@
"namespace": "default", "namespace": "default",
"partition": "default", "partition": "default",
"peerName": "local" "peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
} }
}, },
"listeners": [ "listeners": [

View File

@ -0,0 +1,221 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cache
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/mappers/bimapper"
"github.com/hashicorp/consul/internal/storage"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
type Cache struct {
computedRoutes *bimapper.Mapper
identities *bimapper.Mapper
computedDestinations *bimapper.Mapper
}
func New() *Cache {
return &Cache{
computedRoutes: bimapper.New(pbmesh.ComputedRoutesType, pbcatalog.ServiceType),
identities: bimapper.New(pbcatalog.WorkloadType, pbauth.WorkloadIdentityType),
computedDestinations: bimapper.New(pbmesh.ComputedExplicitDestinationsType, pbcatalog.ServiceType),
}
}
func (c *Cache) TrackComputedDestinations(computedDestinations *types.DecodedComputedDestinations) {
var serviceRefs []resource.ReferenceOrID
for _, dest := range computedDestinations.Data.Destinations {
serviceRefs = append(serviceRefs, dest.DestinationRef)
}
c.computedDestinations.TrackItem(computedDestinations.Resource.Id, serviceRefs)
}
func (c *Cache) UntrackComputedDestinations(computedDestinationsID *pbresource.ID) {
c.computedDestinations.UntrackItem(computedDestinationsID)
}
func (c *Cache) UntrackComputedRoutes(computedRoutesID *pbresource.ID) {
c.computedRoutes.UntrackItem(computedRoutesID)
}
func (c *Cache) TrackWorkload(workload *types.DecodedWorkload) {
identityID := &pbresource.ID{
Name: workload.GetData().Identity,
Tenancy: workload.GetResource().Id.Tenancy,
Type: pbauth.WorkloadIdentityType,
}
c.identities.TrackItem(workload.GetResource().GetId(), []resource.ReferenceOrID{identityID})
}
// UntrackWorkload removes tracking for the given workload ID.
func (c *Cache) UntrackWorkload(wID *pbresource.ID) {
c.identities.UntrackItem(wID)
}
func (c *Cache) ComputedDestinationsByService(id resource.ReferenceOrID) []*pbresource.ID {
return c.computedDestinations.ItemIDsForLink(id)
}
func (c *Cache) trackComputedRoutes(computedRoutes *types.DecodedComputedRoutes) {
var serviceRefs []resource.ReferenceOrID
for _, pcr := range computedRoutes.Data.PortedConfigs {
for _, details := range pcr.Targets {
serviceRefs = append(serviceRefs, details.BackendRef.Ref)
}
}
c.computedRoutes.TrackItem(computedRoutes.Resource.Id, serviceRefs)
}
func (c *Cache) computedRoutesByService(id resource.ReferenceOrID) []*pbresource.ID {
return c.computedRoutes.ItemIDsForLink(id)
}
func (c *Cache) WorkloadsByWorkloadIdentity(id *pbresource.ID) []*pbresource.ID {
return c.identities.ItemIDsForLink(id)
}
func (c *Cache) MapComputedRoutes(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
computedRoutes, err := resource.Decode[*pbmesh.ComputedRoutes](res)
if err != nil {
return nil, err
}
ids, err := c.mapComputedRoutesToProxyStateTemplate(ctx, rt, res.Id)
if err != nil {
return nil, err
}
c.trackComputedRoutes(computedRoutes)
return controller.MakeRequests(pbmesh.ProxyStateTemplateType, ids), nil
}
func (c *Cache) mapComputedRoutesToProxyStateTemplate(ctx context.Context, rt controller.Runtime, computedRoutesID *pbresource.ID) ([]*pbresource.ID, error) {
// Each Destination gets a single ComputedRoutes.
serviceID := resource.ReplaceType(pbcatalog.ServiceType, computedRoutesID)
serviceRef := resource.Reference(serviceID, "")
return c.mapServiceThroughDestinations(ctx, rt, serviceRef)
}
func (c *Cache) MapService(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
serviceRef := resource.Reference(res.Id, "")
pstIDs, err := c.mapServiceThroughDestinations(ctx, rt, serviceRef)
if err != nil {
return nil, err
}
// Now walk the mesh configuration information backwards because
// we need to find any PST that needs to DISCOVER endpoints for this
// service as a part of mesh configuration and traffic routing.
// Find all ComputedRoutes that reference this service.
routeIDs := c.computedRoutesByService(serviceRef)
for _, routeID := range routeIDs {
// Find all Upstreams that reference a Service aligned with this ComputedRoutes.
// Afterwards, find all Workloads selected by the Upstreams, and align a PST with those.
ids, err := c.mapComputedRoutesToProxyStateTemplate(ctx, rt, routeID)
if err != nil {
return nil, err
}
pstIDs = append(pstIDs, ids...)
}
return controller.MakeRequests(pbmesh.ProxyStateTemplateType, pstIDs), nil
}
// mapServiceThroughDestinations takes an explicit
// Service and traverses back through Destinations to Workloads to
// ProxyStateTemplates.
//
// This is in a separate function so it can be chained for more complicated
// relationships.
func (c *Cache) mapServiceThroughDestinations(
ctx context.Context,
rt controller.Runtime,
serviceRef *pbresource.Reference,
) ([]*pbresource.ID, error) {
// The relationship is:
//
// - PST (replace type) Workload
// - Workload (name-aligned) ComputedDestinations
// - ComputedDestinations (contains) Service
//
// When we wake up for Service we should:
//
// - look up computed destinations for the service
// - rewrite computed destination types to PST
var pstIDs []*pbresource.ID
// Get all source proxies if they're referenced in any explicit destinations from computed destinations (name-aligned with workload/PST).
sources := c.ComputedDestinationsByService(serviceRef)
for _, cdID := range sources {
pstIDs = append(pstIDs, resource.ReplaceType(pbmesh.ProxyStateTemplateType, cdID))
}
// TODO(v2): remove this after we can do proper performant implicit upstream determination
//
// TODO(rb): shouldn't this instead list all Workloads that have a mesh port?
allIDs, err := c.listAllProxyStateTemplatesTemporarily(ctx, rt, serviceRef.Tenancy)
if err != nil {
return nil, err
}
pstIDs = append(pstIDs, allIDs...)
return pstIDs, nil
}
func (c *Cache) listAllProxyStateTemplatesTemporarily(ctx context.Context, rt controller.Runtime, tenancy *pbresource.Tenancy) ([]*pbresource.ID, error) {
// todo (ishustava): this is a stub for now until we implement implicit destinations.
// For tproxy, we generate requests for all proxy states in the cluster.
// This will generate duplicate events for proxies already added above,
// however, we expect that the controller runtime will de-dup for us.
rsp, err := rt.Client.List(ctx, &pbresource.ListRequest{
Type: pbmesh.ProxyStateTemplateType,
Tenancy: &pbresource.Tenancy{
Namespace: storage.Wildcard,
Partition: tenancy.Partition,
PeerName: tenancy.PeerName,
},
})
if err != nil {
return nil, err
}
result := make([]*pbresource.ID, 0, len(rsp.Resources))
for _, r := range rsp.Resources {
result = append(result, r.Id)
}
return result, nil
}
func (c *Cache) MapComputedTrafficPermissions(_ context.Context, _ controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
var ctp pbauth.ComputedTrafficPermissions
err := res.Data.UnmarshalTo(&ctp)
if err != nil {
return nil, err
}
workloadIdentityID := resource.ReplaceType(pbauth.WorkloadIdentityType, res.Id)
ids := c.WorkloadsByWorkloadIdentity(workloadIdentityID)
return controller.MakeRequests(pbmesh.ProxyStateTemplateType, ids), nil
}

View File

@ -0,0 +1,420 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cache
import (
"context"
"testing"
"github.com/stretchr/testify/require"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/routestest"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestIdentities(t *testing.T) {
cache := New()
identityID1 := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-1").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
identityID2 := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
w1 := resourcetest.Resource(pbcatalog.WorkloadType, "service-workload-1").
WithData(t, &pbcatalog.Workload{
Identity: identityID1.Name,
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
decW1 := resourcetest.MustDecode[*pbcatalog.Workload](t, w1)
w2 := resourcetest.Resource(pbcatalog.WorkloadType, "service-workload-2").
WithData(t, &pbcatalog.Workload{
Identity: identityID2.Name,
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
decW2 := resourcetest.MustDecode[*pbcatalog.Workload](t, w2)
// Empty cache
require.Nil(t, cache.WorkloadsByWorkloadIdentity(identityID1))
require.Nil(t, cache.WorkloadsByWorkloadIdentity(identityID2))
// Insert value and fetch it.
cache.TrackWorkload(decW1)
require.Equal(t, []*pbresource.ID{w1.Id}, cache.WorkloadsByWorkloadIdentity(identityID1))
require.Nil(t, cache.WorkloadsByWorkloadIdentity(identityID2))
// Insert another value referencing the same identity.
decW2.GetData().Identity = identityID1.Name
cache.TrackWorkload(decW2)
require.ElementsMatch(t, []*pbresource.ID{w1.Id, w2.Id}, cache.WorkloadsByWorkloadIdentity(identityID1))
require.Nil(t, cache.WorkloadsByWorkloadIdentity(identityID2))
// Now workload 1 uses identity 2
decW1.GetData().Identity = identityID2.Name
cache.TrackWorkload(decW1)
require.Equal(t, []*pbresource.ID{w1.Id}, cache.WorkloadsByWorkloadIdentity(identityID2))
require.Equal(t, []*pbresource.ID{w2.Id}, cache.WorkloadsByWorkloadIdentity(identityID1))
// Untrack workload 2
cache.UntrackWorkload(w2.Id)
require.Equal(t, []*pbresource.ID{w1.Id}, cache.WorkloadsByWorkloadIdentity(identityID2))
require.Nil(t, cache.WorkloadsByWorkloadIdentity(identityID1))
// Untrack workload 1
cache.UntrackWorkload(w1.Id)
require.Nil(t, cache.WorkloadsByWorkloadIdentity(identityID2))
require.Nil(t, cache.WorkloadsByWorkloadIdentity(identityID1))
}
func TestMapComputedTrafficPermissions(t *testing.T) {
client := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
ctp := resourcetest.Resource(pbauth.ComputedTrafficPermissionsType, "workload-identity-1").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbauth.ComputedTrafficPermissions{}).
Build()
c := New()
// Empty results when the cache isn't populated.
requests, err := c.MapComputedTrafficPermissions(context.Background(), controller.Runtime{Client: client}, ctp)
require.NoError(t, err)
require.Len(t, requests, 0)
identityID1 := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-1").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
w1 := resourcetest.Resource(pbcatalog.WorkloadType, "service-workload-1").
WithData(t, &pbcatalog.Workload{
Identity: identityID1.Name,
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
decW1 := resourcetest.MustDecode[*pbcatalog.Workload](t, w1)
w2 := resourcetest.Resource(pbcatalog.WorkloadType, "service-workload-2").
WithData(t, &pbcatalog.Workload{
Identity: identityID1.Name,
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
decW2 := resourcetest.MustDecode[*pbcatalog.Workload](t, w2)
c.TrackWorkload(decW1)
// Empty results when the cache isn't populated.
requests, err = c.MapComputedTrafficPermissions(context.Background(), controller.Runtime{Client: client}, ctp)
require.NoError(t, err)
prototest.AssertElementsMatch(t,
[]controller.Request{{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w1.Id)}}, requests)
c.TrackWorkload(decW2)
// Empty results when the cache isn't populated.
requests, err = c.MapComputedTrafficPermissions(context.Background(), controller.Runtime{Client: client}, ctp)
require.NoError(t, err)
prototest.AssertElementsMatch(t, []controller.Request{
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w1.Id)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w2.Id)},
}, requests)
}
func TestUnified_AllMappingsToProxyStateTemplate(t *testing.T) {
var (
cache = New()
client = svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
)
anyServiceData := &pbcatalog.Service{
Ports: []*pbcatalog.ServicePort{
{
TargetPort: "tcp1",
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
},
{
TargetPort: "tcp2",
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
},
{
TargetPort: "mesh",
Protocol: pbcatalog.Protocol_PROTOCOL_MESH,
},
},
}
// The thing we link through Destinations.
destService := resourcetest.Resource(pbcatalog.ServiceType, "web").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, anyServiceData).
Build()
destServiceRef := resource.Reference(destService.Id, "")
// The thing we reach through the mesh config.
targetService := resourcetest.Resource(pbcatalog.ServiceType, "db").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, anyServiceData).
Build()
targetServiceRef := resource.Reference(targetService.Id, "")
backupTargetService := resourcetest.Resource(pbcatalog.ServiceType, "db-backup").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, anyServiceData).
Build()
backupTargetServiceRef := resource.Reference(backupTargetService.Id, "")
// The way we make 'web' actually route traffic to 'db'.
tcpRoute := resourcetest.Resource(pbmesh.TCPRouteType, "tcp-route").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbmesh.TCPRoute{
ParentRefs: []*pbmesh.ParentReference{{
Ref: destServiceRef,
}},
Rules: []*pbmesh.TCPRouteRule{{
BackendRefs: []*pbmesh.TCPBackendRef{{
BackendRef: &pbmesh.BackendReference{
Ref: targetServiceRef,
},
}},
}},
}).
Build()
failoverPolicy := resourcetest.ResourceID(resource.ReplaceType(pbcatalog.FailoverPolicyType, targetService.Id)).
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.FailoverPolicy{
Config: &pbcatalog.FailoverConfig{
Destinations: []*pbcatalog.FailoverDestination{{
Ref: backupTargetServiceRef,
}},
},
}).
Build()
webRoutes := routestest.BuildComputedRoutes(t, resource.ReplaceType(pbmesh.ComputedRoutesType, destService.Id),
resourcetest.MustDecode[*pbcatalog.Service](t, destService),
resourcetest.MustDecode[*pbcatalog.Service](t, targetService),
resourcetest.MustDecode[*pbcatalog.Service](t, backupTargetService),
resourcetest.MustDecode[*pbmesh.TCPRoute](t, tcpRoute),
resourcetest.MustDecode[*pbcatalog.FailoverPolicy](t, failoverPolicy),
)
var (
sourceProxy1 = newID(pbmesh.ProxyStateTemplateType, "src-workload-1")
sourceProxy2 = newID(pbmesh.ProxyStateTemplateType, "src-workload-2")
sourceProxy3 = newID(pbmesh.ProxyStateTemplateType, "src-workload-3")
sourceProxy4 = newID(pbmesh.ProxyStateTemplateType, "src-workload-4")
sourceProxy5 = newID(pbmesh.ProxyStateTemplateType, "src-workload-5")
sourceProxy6 = newID(pbmesh.ProxyStateTemplateType, "src-workload-6")
)
compDestProxy1 := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, sourceProxy1.Name).
WithData(t, &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{
{
DestinationRef: destServiceRef,
DestinationPort: "tcp1",
},
},
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
compDestProxy2 := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, sourceProxy2.Name).
WithData(t, &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{
{
DestinationRef: destServiceRef,
DestinationPort: "tcp1",
},
},
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
compDestProxy3 := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, sourceProxy3.Name).
WithData(t, &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{
{
DestinationRef: destServiceRef,
DestinationPort: "tcp2",
},
},
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
compDestProxy4 := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, sourceProxy4.Name).
WithData(t, &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{
{
DestinationRef: destServiceRef,
DestinationPort: "tcp2",
},
},
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
compDestProxy5 := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, sourceProxy5.Name).
WithData(t, &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{
{
DestinationRef: destServiceRef,
DestinationPort: "mesh",
},
},
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
compDestProxy6 := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, sourceProxy6.Name).
WithData(t, &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{
{
DestinationRef: destServiceRef,
DestinationPort: "mesh",
},
},
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
cache.trackComputedRoutes(webRoutes)
cache.TrackComputedDestinations(resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](t, compDestProxy1))
cache.TrackComputedDestinations(resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](t, compDestProxy2))
cache.TrackComputedDestinations(resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](t, compDestProxy3))
cache.TrackComputedDestinations(resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](t, compDestProxy4))
cache.TrackComputedDestinations(resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](t, compDestProxy5))
cache.TrackComputedDestinations(resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](t, compDestProxy6))
t.Run("Service", func(t *testing.T) {
t.Run("map dest service", func(t *testing.T) {
requests, err := cache.MapService(
context.Background(),
controller.Runtime{Client: client},
destService,
)
require.NoError(t, err)
// Only wake up things with dest as an upstream.
expRequests := []controller.Request{
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
t.Run("map target endpoints (TCPRoute)", func(t *testing.T) {
requests, err := cache.MapService(
context.Background(),
controller.Runtime{Client: client},
targetService,
)
require.NoError(t, err)
requests = testDeduplicateRequests(requests)
expRequests := []controller.Request{
// Wakeup things that have destService as a destination b/c of the TCPRoute reference.
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
t.Run("map backup target endpoints (FailoverPolicy)", func(t *testing.T) {
requests, err := cache.MapService(
context.Background(),
controller.Runtime{Client: client},
backupTargetService,
)
require.NoError(t, err)
requests = testDeduplicateRequests(requests)
expRequests := []controller.Request{
// Wakeup things that have destService as a destination b/c of the FailoverPolicy reference.
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
})
t.Run("ComputedRoutes", func(t *testing.T) {
t.Run("map web routes", func(t *testing.T) {
requests, err := cache.MapComputedRoutes(
context.Background(),
controller.Runtime{Client: client},
webRoutes.Resource,
)
require.NoError(t, err)
// Only wake up things with dest as an upstream.
expRequests := []controller.Request{
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
})
}
func newID(typ *pbresource.Type, name string) *pbresource.ID {
return &pbresource.ID{
Type: typ,
Tenancy: resource.DefaultNamespacedTenancy(),
Name: name,
}
}
func testDeduplicateRequests(reqs []controller.Request) []controller.Request {
type resID struct {
resource.ReferenceKey
UID string
}
out := make([]controller.Request, 0, len(reqs))
seen := make(map[resID]struct{})
for _, req := range reqs {
rid := resID{
ReferenceKey: resource.NewReferenceKey(req.ID),
UID: req.ID.Uid,
}
if _, ok := seen[rid]; !ok {
out = append(out, req)
seen[rid] = struct{}{}
}
}
return out
}

View File

@ -10,10 +10,9 @@ import (
"google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/anypb"
"github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/builder" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/builder"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/fetcher" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/fetcher"
"github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper"
"github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
@ -28,37 +27,34 @@ const ControllerName = "consul.io/sidecar-proxy-controller"
type TrustDomainFetcher func() (string, error) type TrustDomainFetcher func() (string, error)
func Controller( func Controller(
destinationsCache *sidecarproxycache.DestinationsCache, cache *cache.Cache,
proxyCfgCache *sidecarproxycache.ProxyConfigurationCache,
computedRoutesCache *sidecarproxycache.ComputedRoutesCache,
identitiesCache *sidecarproxycache.IdentitiesCache,
mapper *sidecarproxymapper.Mapper,
trustDomainFetcher TrustDomainFetcher, trustDomainFetcher TrustDomainFetcher,
dc string, dc string,
defaultAllow bool, defaultAllow bool,
) controller.Controller { ) controller.Controller {
if destinationsCache == nil || proxyCfgCache == nil || computedRoutesCache == nil || identitiesCache == nil || mapper == nil || trustDomainFetcher == nil { if cache == nil || trustDomainFetcher == nil {
panic("destinations cache, proxy configuration cache, computed routes cache, identities cache, mapper, and trust domain fetcher are required") panic("cache and trust domain fetcher are required")
} }
/* /*
Workload <align> PST Workload <align> PST
Upstreams <select> PST(==Workload) ComputedDestinations <align> PST(==Workload)
Upstreams <contain> Service(upstream) ComputedDestinations <contain> Service(destinations)
ProxyConfiguration <select> PST(==Workload) ComputedProxyConfiguration <align> PST(==Workload)
ComputedRoutes <align> Service(upstream) ComputedRoutes <align> Service(upstream)
ComputedRoutes <contain> Service(disco) ComputedRoutes <contain> Service(disco)
ServiceEndpoints <align> Service(disco) ComputedTrafficPermissions <align> WorkloadIdentity
Workload <contain> WorkloadIdentity
These relationships then dicate the following reconcile logic. These relationships then dictate the following reconcile logic.
controller: read workload for PST controller: read workload for PST
controller: read previous PST controller: read previous PST
controller: read ProxyConfiguration for Workload controller: read ComputedProxyConfiguration for Workload
controller: use cached Upstreams data to walk explicit upstreams controller: read ComputedDestinations for workload to walk explicit upstreams
controller: read ComputedTrafficPermissions for workload using workload.identity field.
<EXPLICIT-for-each> <EXPLICIT-for-each>
fetcher: read Upstreams to find single Upstream fetcher: read Service(Destination)
fetcher: read Service(upstream)
fetcher: read ComputedRoutes fetcher: read ComputedRoutes
<TARGET-for-each> <TARGET-for-each>
fetcher: read ServiceEndpoints fetcher: read ServiceEndpoints
@ -78,26 +74,24 @@ func Controller(
/* /*
Which means for equivalence, the following mapper relationships should exist: Which means for equivalence, the following mapper relationships should exist:
Service: find upstreams with Service; Recurse(Upstreams) Service: find destinations with Service; Recurse(ComputedDestinations);
ServiceEndpoints: ServiceEndpoints=>Service; find ComputedRoutes with this in a Target or FailoverConfig; Recurse(ComputedRoutes) find ComputedRoutes with this in a Target or FailoverConfig; Recurse(ComputedRoutes)
Upstreams: use selector to select workloads; workloads=>PST ComputedDestinations: replace type CED=>PST
ProxyConfiguration: use selector to select workloads; workloads=>PST ComputedProxyConfiguration: replace type CPC=>PST
ComputedRoutes: CR=>Service; find upstreams with Service; Recurse(Upstreams) ComputedRoutes: CR=>Service; find destinations with Service; Recurse(Destinations)
[implicit/temp]: trigger all [implicit/temp]: trigger all
ComputedTrafficPermissions: find workloads in cache stored for this CTP=Workload, workloads=>PST reconcile requests
*/ */
return controller.ForType(pbmesh.ProxyStateTemplateType). return controller.ForType(pbmesh.ProxyStateTemplateType).
WithWatch(pbcatalog.ServiceType, mapper.MapServiceToProxyStateTemplate). WithWatch(pbcatalog.ServiceType, cache.MapService).
WithWatch(pbcatalog.ServiceEndpointsType, mapper.MapServiceEndpointsToProxyStateTemplate). WithWatch(pbcatalog.WorkloadType, controller.ReplaceType(pbmesh.ProxyStateTemplateType)).
WithWatch(pbmesh.DestinationsType, mapper.MapDestinationsToProxyStateTemplate). WithWatch(pbmesh.ComputedExplicitDestinationsType, controller.ReplaceType(pbmesh.ProxyStateTemplateType)).
WithWatch(pbmesh.ProxyConfigurationType, mapper.MapProxyConfigurationToProxyStateTemplate). WithWatch(pbmesh.ComputedProxyConfigurationType, controller.ReplaceType(pbmesh.ProxyStateTemplateType)).
WithWatch(pbmesh.ComputedRoutesType, mapper.MapComputedRoutesToProxyStateTemplate). WithWatch(pbmesh.ComputedRoutesType, cache.MapComputedRoutes).
WithWatch(pbauth.ComputedTrafficPermissionsType, mapper.MapComputedTrafficPermissionsToProxyStateTemplate). WithWatch(pbauth.ComputedTrafficPermissionsType, cache.MapComputedTrafficPermissions).
WithReconciler(&reconciler{ WithReconciler(&reconciler{
destinationsCache: destinationsCache, cache: cache,
proxyCfgCache: proxyCfgCache,
computedRoutesCache: computedRoutesCache,
identitiesCache: identitiesCache,
getTrustDomain: trustDomainFetcher, getTrustDomain: trustDomainFetcher,
dc: dc, dc: dc,
defaultAllow: defaultAllow, defaultAllow: defaultAllow,
@ -105,10 +99,7 @@ func Controller(
} }
type reconciler struct { type reconciler struct {
destinationsCache *sidecarproxycache.DestinationsCache cache *cache.Cache
proxyCfgCache *sidecarproxycache.ProxyConfigurationCache
computedRoutesCache *sidecarproxycache.ComputedRoutesCache
identitiesCache *sidecarproxycache.IdentitiesCache
getTrustDomain TrustDomainFetcher getTrustDomain TrustDomainFetcher
defaultAllow bool defaultAllow bool
dc string dc string
@ -120,13 +111,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c
rt.Logger.Trace("reconciling proxy state template") rt.Logger.Trace("reconciling proxy state template")
// Instantiate a data fetcher to fetch all reconciliation data. // Instantiate a data fetcher to fetch all reconciliation data.
dataFetcher := fetcher.New( dataFetcher := fetcher.New(rt.Client, r.cache)
rt.Client,
r.destinationsCache,
r.proxyCfgCache,
r.computedRoutesCache,
r.identitiesCache,
)
// Check if the workload exists. // Check if the workload exists.
workloadID := resource.ReplaceType(pbcatalog.WorkloadType, req.ID) workloadID := resource.ReplaceType(pbcatalog.WorkloadType, req.ID)
@ -153,7 +138,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c
rt.Logger.Trace("proxy state template for this workload doesn't yet exist; generating a new one") rt.Logger.Trace("proxy state template for this workload doesn't yet exist; generating a new one")
} }
if !fetcher.IsWorkloadMeshEnabled(workload.Data.Ports) { if !workload.GetData().IsMeshEnabled() {
// Skip non-mesh workloads. // Skip non-mesh workloads.
// If there's existing proxy state template, delete it. // If there's existing proxy state template, delete it.
@ -164,9 +149,6 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c
rt.Logger.Error("error deleting existing proxy state template", "error", err) rt.Logger.Error("error deleting existing proxy state template", "error", err)
return err return err
} }
// Remove it from destinationsCache.
r.destinationsCache.DeleteSourceProxy(req.ID)
} }
rt.Logger.Trace("skipping proxy state template generation because workload is not on the mesh", "workload", workload.Resource.Id) rt.Logger.Trace("skipping proxy state template generation because workload is not on the mesh", "workload", workload.Resource.Id)
return nil return nil
@ -180,7 +162,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c
} }
// Fetch proxy configuration. // Fetch proxy configuration.
proxyCfg, err := dataFetcher.FetchAndMergeProxyConfigurations(ctx, req.ID) proxyCfg, err := dataFetcher.FetchComputedProxyConfiguration(ctx, req.ID)
if err != nil { if err != nil {
rt.Logger.Error("error fetching proxy and merging proxy configurations", "error", err) rt.Logger.Error("error fetching proxy and merging proxy configurations", "error", err)
return err return err
@ -197,23 +179,18 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c
ctp = trafficPermissions.Data ctp = trafficPermissions.Data
} }
b := builder.New(req.ID, identityRefFromWorkload(workload), trustDomain, r.dc, r.defaultAllow, proxyCfg). b := builder.New(req.ID, identityRefFromWorkload(workload), trustDomain, r.dc, r.defaultAllow, proxyCfg.GetData()).
BuildLocalApp(workload.Data, ctp) BuildLocalApp(workload.Data, ctp)
// Get all destinationsData. // Get all destinationsData.
destinationsRefs := r.destinationsCache.DestinationsBySourceProxy(req.ID) destinationsData, err := dataFetcher.FetchExplicitDestinationsData(ctx, req.ID)
if len(destinationsRefs) > 0 {
rt.Logger.Trace("found destinations for this proxy", "id", req.ID, "destination_refs", destinationsRefs)
} else {
rt.Logger.Trace("did not find any destinations for this proxy", "id", req.ID)
}
destinationsData, statuses, err := dataFetcher.FetchExplicitDestinationsData(ctx, destinationsRefs)
if err != nil { if err != nil {
rt.Logger.Error("error fetching explicit destinations for this proxy", "error", err) rt.Logger.Error("error fetching explicit destinations for this proxy", "error", err)
return err return err
} }
if proxyCfg.IsTransparentProxy() { if proxyCfg.GetData() != nil && proxyCfg.GetData().IsTransparentProxy() {
rt.Logger.Trace("transparent proxy is enabled; fetching implicit destinations")
destinationsData, err = dataFetcher.FetchImplicitDestinationsData(ctx, req.ID, destinationsData) destinationsData, err = dataFetcher.FetchImplicitDestinationsData(ctx, req.ID, destinationsData)
if err != nil { if err != nil {
rt.Logger.Error("error fetching implicit destinations for this proxy", "error", err) rt.Logger.Error("error fetching implicit destinations for this proxy", "error", err)
@ -250,26 +227,6 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c
rt.Logger.Trace("proxy state template data has not changed, skipping update") rt.Logger.Trace("proxy state template data has not changed, skipping update")
} }
// Update any statuses.
for _, status := range statuses {
updatedStatus := &pbresource.Status{
ObservedGeneration: status.Generation,
}
updatedStatus.Conditions = status.Conditions
// If the status is unchanged then we should return and avoid the unnecessary write
if !resource.EqualStatus(status.OldStatus[ControllerName], updatedStatus, false) {
rt.Logger.Trace("updating status", "id", status.ID)
_, err = rt.Client.WriteStatus(ctx, &pbresource.WriteStatusRequest{
Id: status.ID,
Key: ControllerName,
Status: updatedStatus,
})
if err != nil {
rt.Logger.Error("error writing new status", "id", status.ID, "error", err)
return err
}
}
}
return nil return nil
} }
@ -277,6 +234,7 @@ func identityRefFromWorkload(w *types.DecodedWorkload) *pbresource.Reference {
return &pbresource.Reference{ return &pbresource.Reference{
Name: w.Data.Identity, Name: w.Data.Identity,
Tenancy: w.Resource.Id.Tenancy, Tenancy: w.Resource.Id.Tenancy,
Type: pbauth.WorkloadIdentityType,
} }
} }

View File

@ -16,11 +16,9 @@ import (
"github.com/hashicorp/consul/internal/auth" "github.com/hashicorp/consul/internal/auth"
"github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/routestest" "github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/routestest"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/builder" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/builder"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache"
"github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper"
"github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest" "github.com/hashicorp/consul/internal/resource/resourcetest"
@ -69,9 +67,7 @@ func (suite *meshControllerTestSuite) SetupTest() {
suite.ctx = testutil.TestContext(suite.T()) suite.ctx = testutil.TestContext(suite.T())
suite.ctl = &reconciler{ suite.ctl = &reconciler{
destinationsCache: sidecarproxycache.NewDestinationsCache(), cache: cache.New(),
proxyCfgCache: sidecarproxycache.NewProxyConfigurationCache(),
identitiesCache: sidecarproxycache.NewIdentitiesCache(),
getTrustDomain: func() (string, error) { getTrustDomain: func() (string, error) {
return "test.consul", nil return "test.consul", nil
}, },
@ -229,9 +225,11 @@ func (suite *meshControllerTestSuite) SetupTest() {
identityRef := &pbresource.Reference{ identityRef := &pbresource.Reference{
Name: suite.apiWorkload.Identity, Name: suite.apiWorkload.Identity,
Tenancy: suite.apiWorkloadID.Tenancy, Tenancy: suite.apiWorkloadID.Tenancy,
Type: pbauth.WorkloadIdentityType,
} }
suite.proxyStateTemplate = builder.New(suite.apiWorkloadID, identityRef, "test.consul", "dc1", false, nil). suite.proxyStateTemplate = builder.New(resource.ReplaceType(pbmesh.ProxyStateTemplateType, suite.apiWorkloadID),
identityRef, "test.consul", "dc1", false, nil).
BuildLocalApp(suite.apiWorkload, suite.apiComputedTrafficPermissionsData). BuildLocalApp(suite.apiWorkload, suite.apiComputedTrafficPermissionsData).
Build() Build()
} }
@ -353,16 +351,10 @@ func (suite *meshControllerTestSuite) TestController() {
mgr := controller.NewManager(suite.client, suite.runtime.Logger) mgr := controller.NewManager(suite.client, suite.runtime.Logger)
// Initialize controller dependencies. // Initialize controller dependencies.
var ( c := cache.New()
destinationsCache = sidecarproxycache.NewDestinationsCache()
proxyCfgCache = sidecarproxycache.NewProxyConfigurationCache()
computedRoutesCache = sidecarproxycache.NewComputedRoutesCache()
identitiesCache = sidecarproxycache.NewIdentitiesCache()
m = sidecarproxymapper.New(destinationsCache, proxyCfgCache, computedRoutesCache, identitiesCache)
)
trustDomainFetcher := func() (string, error) { return "test.consul", nil } trustDomainFetcher := func() (string, error) { return "test.consul", nil }
mgr.Register(Controller(destinationsCache, proxyCfgCache, computedRoutesCache, identitiesCache, m, trustDomainFetcher, "dc1", false)) mgr.Register(Controller(c, trustDomainFetcher, "dc1", false))
mgr.SetRaftLeader(true) mgr.SetRaftLeader(true)
go mgr.Run(suite.ctx) go mgr.Run(suite.ctx)
@ -376,7 +368,7 @@ func (suite *meshControllerTestSuite) TestController() {
apiProxyStateTemplate *pbresource.Resource apiProxyStateTemplate *pbresource.Resource
webProxyStateTemplate *pbresource.Resource webProxyStateTemplate *pbresource.Resource
webDestinations *pbresource.Resource webComputedDestinations *pbresource.Resource
) )
testutil.RunStep(suite.T(), "proxy state template generation", func(t *testing.T) { testutil.RunStep(suite.T(), "proxy state template generation", func(t *testing.T) {
@ -394,9 +386,8 @@ func (suite *meshControllerTestSuite) TestController() {
) )
// Add a source service and check that a new proxy state is generated. // Add a source service and check that a new proxy state is generated.
webDestinations = resourcetest.Resource(pbmesh.DestinationsType, "web-destinations"). webComputedDestinations = resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, suite.webWorkload.Id.Name).
WithData(suite.T(), &pbmesh.Destinations{ WithData(suite.T(), &pbmesh.ComputedExplicitDestinations{
Workloads: &pbcatalog.WorkloadSelector{Names: []string{"web-def"}},
Destinations: []*pbmesh.Destination{ Destinations: []*pbmesh.Destination{
{ {
DestinationRef: resource.Reference(suite.apiService.Id, ""), DestinationRef: resource.Reference(suite.apiService.Id, ""),
@ -471,11 +462,6 @@ func (suite *meshControllerTestSuite) TestController() {
suite.client.RequireResourceNotFound(r, apiProxyStateTemplateID) suite.client.RequireResourceNotFound(r, apiProxyStateTemplateID)
}) })
// Check status on the pbmesh.Destinations resource.
serviceRef := resource.ReferenceToString(resource.Reference(suite.apiService.Id, ""))
suite.client.WaitForStatusCondition(t, webDestinations.Id, ControllerName,
status.ConditionMeshProtocolNotFound(serviceRef))
// We should get a new web proxy template resource because this destination should be removed. // We should get a new web proxy template resource because this destination should be removed.
webProxyStateTemplate = suite.client.WaitForNewVersion(t, webProxyStateTemplateID, webProxyStateTemplate.Version) webProxyStateTemplate = suite.client.WaitForNewVersion(t, webProxyStateTemplateID, webProxyStateTemplate.Version)
@ -504,10 +490,6 @@ func (suite *meshControllerTestSuite) TestController() {
resourcetest.MustDecode[*pbcatalog.Service](t, suite.apiService), resourcetest.MustDecode[*pbcatalog.Service](t, suite.apiService),
) )
serviceRef := resource.ReferenceToString(resource.Reference(suite.apiService.Id, ""))
suite.client.WaitForStatusCondition(t, webDestinations.Id, ControllerName,
status.ConditionMeshProtocolFound(serviceRef))
// We should also get a new web proxy template resource as this destination should be added again. // We should also get a new web proxy template resource as this destination should be added again.
webProxyStateTemplate = suite.client.WaitForNewVersion(t, webProxyStateTemplateID, webProxyStateTemplate.Version) webProxyStateTemplate = suite.client.WaitForNewVersion(t, webProxyStateTemplateID, webProxyStateTemplate.Version)
@ -527,10 +509,10 @@ func (suite *meshControllerTestSuite) TestController() {
testutil.RunStep(suite.T(), "add implicit upstream and enable tproxy", func(t *testing.T) { testutil.RunStep(suite.T(), "add implicit upstream and enable tproxy", func(t *testing.T) {
// Delete explicit destinations resource. // Delete explicit destinations resource.
suite.runtime.Logger.Trace("deleting web destinations") suite.runtime.Logger.Trace("deleting web destinations")
_, err := suite.client.Delete(suite.ctx, &pbresource.DeleteRequest{Id: webDestinations.Id}) _, err := suite.client.Delete(suite.ctx, &pbresource.DeleteRequest{Id: webComputedDestinations.Id})
require.NoError(t, err) require.NoError(t, err)
webProxyStateTemplate = suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version) webProxyStateTemplate = suite.client.WaitForNewVersion(t, webProxyStateTemplateID, webProxyStateTemplate.Version)
// Write a default ComputedRoutes for db, so it's eligible. // Write a default ComputedRoutes for db, so it's eligible.
dbCR := routestest.ReconcileComputedRoutes(suite.T(), suite.client, dbComputedRoutesID, dbCR := routestest.ReconcileComputedRoutes(suite.T(), suite.client, dbComputedRoutesID,
@ -539,11 +521,8 @@ func (suite *meshControllerTestSuite) TestController() {
require.NotNil(t, dbCR) require.NotNil(t, dbCR)
// Enable transparent proxy for the web proxy. // Enable transparent proxy for the web proxy.
resourcetest.Resource(pbmesh.ProxyConfigurationType, "proxy-config"). resourcetest.Resource(pbmesh.ComputedProxyConfigurationType, suite.webWorkload.Id.Name).
WithData(t, &pbmesh.ProxyConfiguration{ WithData(t, &pbmesh.ComputedProxyConfiguration{
Workloads: &pbcatalog.WorkloadSelector{
Prefixes: []string{"web"},
},
DynamicConfig: &pbmesh.DynamicConfig{ DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT, Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{ TransparentProxy: &pbmesh.TransparentProxy{
@ -637,16 +616,10 @@ func (suite *meshControllerTestSuite) TestControllerDefaultAllow() {
mgr := controller.NewManager(suite.client, suite.runtime.Logger) mgr := controller.NewManager(suite.client, suite.runtime.Logger)
// Initialize controller dependencies. // Initialize controller dependencies.
var ( c := cache.New()
destinationsCache = sidecarproxycache.NewDestinationsCache()
proxyCfgCache = sidecarproxycache.NewProxyConfigurationCache()
computedRoutesCache = sidecarproxycache.NewComputedRoutesCache()
identitiesCache = sidecarproxycache.NewIdentitiesCache()
m = sidecarproxymapper.New(destinationsCache, proxyCfgCache, computedRoutesCache, identitiesCache)
)
trustDomainFetcher := func() (string, error) { return "test.consul", nil } trustDomainFetcher := func() (string, error) { return "test.consul", nil }
mgr.Register(Controller(destinationsCache, proxyCfgCache, computedRoutesCache, identitiesCache, m, trustDomainFetcher, "dc1", true)) mgr.Register(Controller(c, trustDomainFetcher, "dc1", true))
mgr.SetRaftLeader(true) mgr.SetRaftLeader(true)
go mgr.Run(suite.ctx) go mgr.Run(suite.ctx)

View File

@ -8,12 +8,9 @@ import (
"fmt" "fmt"
"strings" "strings"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache"
ctrlStatus "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status"
"github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types"
intermediateTypes "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" intermediateTypes "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
"github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource"
@ -25,72 +22,50 @@ import (
) )
type Fetcher struct { type Fetcher struct {
Client pbresource.ResourceServiceClient client pbresource.ResourceServiceClient
DestinationsCache *sidecarproxycache.DestinationsCache cache *cache.Cache
ProxyCfgCache *sidecarproxycache.ProxyConfigurationCache
ComputedRoutesCache *sidecarproxycache.ComputedRoutesCache
IdentitiesCache *sidecarproxycache.IdentitiesCache
} }
func New( func New(client pbresource.ResourceServiceClient, cache *cache.Cache) *Fetcher {
client pbresource.ResourceServiceClient,
dCache *sidecarproxycache.DestinationsCache,
pcfgCache *sidecarproxycache.ProxyConfigurationCache,
computedRoutesCache *sidecarproxycache.ComputedRoutesCache,
iCache *sidecarproxycache.IdentitiesCache,
) *Fetcher {
return &Fetcher{ return &Fetcher{
Client: client, client: client,
DestinationsCache: dCache, cache: cache,
ProxyCfgCache: pcfgCache,
ComputedRoutesCache: computedRoutesCache,
IdentitiesCache: iCache,
} }
} }
func (f *Fetcher) FetchWorkload(ctx context.Context, id *pbresource.ID) (*types.DecodedWorkload, error) { func (f *Fetcher) FetchWorkload(ctx context.Context, id *pbresource.ID) (*types.DecodedWorkload, error) {
proxyID := resource.ReplaceType(pbmesh.ProxyStateTemplateType, id) dec, err := resource.GetDecodedResource[*pbcatalog.Workload](ctx, f.client, id)
dec, err := resource.GetDecodedResource[*pbcatalog.Workload](ctx, f.Client, id)
if err != nil { if err != nil {
return nil, err return nil, err
} else if dec == nil { } else if dec == nil {
// We also need to make sure to delete the associated proxy from cache. // We also need to make sure to delete the associated proxy from cache.
// We are ignoring errors from cache here as this deletion is best effort. f.cache.UntrackWorkload(id)
f.DestinationsCache.DeleteSourceProxy(proxyID)
f.ProxyCfgCache.UntrackProxyID(proxyID)
f.IdentitiesCache.UntrackProxyID(proxyID)
return nil, nil return nil, nil
} }
identityID := &pbresource.ID{ f.cache.TrackWorkload(dec)
Name: dec.Data.Identity,
Tenancy: dec.Resource.Id.Tenancy,
Type: pbauth.WorkloadIdentityType,
}
f.IdentitiesCache.TrackPair(identityID, proxyID)
return dec, err return dec, err
} }
func (f *Fetcher) FetchProxyStateTemplate(ctx context.Context, id *pbresource.ID) (*types.DecodedProxyStateTemplate, error) { func (f *Fetcher) FetchProxyStateTemplate(ctx context.Context, id *pbresource.ID) (*types.DecodedProxyStateTemplate, error) {
return resource.GetDecodedResource[*pbmesh.ProxyStateTemplate](ctx, f.Client, id) return resource.GetDecodedResource[*pbmesh.ProxyStateTemplate](ctx, f.client, id)
} }
func (f *Fetcher) FetchComputedTrafficPermissions(ctx context.Context, id *pbresource.ID) (*types.DecodedComputedTrafficPermissions, error) { func (f *Fetcher) FetchComputedTrafficPermissions(ctx context.Context, id *pbresource.ID) (*types.DecodedComputedTrafficPermissions, error) {
return resource.GetDecodedResource[*pbauth.ComputedTrafficPermissions](ctx, f.Client, id) return resource.GetDecodedResource[*pbauth.ComputedTrafficPermissions](ctx, f.client, id)
} }
func (f *Fetcher) FetchServiceEndpoints(ctx context.Context, id *pbresource.ID) (*types.DecodedServiceEndpoints, error) { func (f *Fetcher) FetchServiceEndpoints(ctx context.Context, id *pbresource.ID) (*types.DecodedServiceEndpoints, error) {
return resource.GetDecodedResource[*pbcatalog.ServiceEndpoints](ctx, f.Client, id) return resource.GetDecodedResource[*pbcatalog.ServiceEndpoints](ctx, f.client, id)
} }
func (f *Fetcher) FetchService(ctx context.Context, id *pbresource.ID) (*types.DecodedService, error) { func (f *Fetcher) FetchService(ctx context.Context, id *pbresource.ID) (*types.DecodedService, error) {
return resource.GetDecodedResource[*pbcatalog.Service](ctx, f.Client, id) return resource.GetDecodedResource[*pbcatalog.Service](ctx, f.client, id)
} }
func (f *Fetcher) FetchDestinations(ctx context.Context, id *pbresource.ID) (*types.DecodedDestinations, error) { func (f *Fetcher) FetchDestinations(ctx context.Context, id *pbresource.ID) (*types.DecodedDestinations, error) {
return resource.GetDecodedResource[*pbmesh.Destinations](ctx, f.Client, id) return resource.GetDecodedResource[*pbmesh.Destinations](ctx, f.client, id)
} }
func (f *Fetcher) FetchComputedRoutes(ctx context.Context, id *pbresource.ID) (*types.DecodedComputedRoutes, error) { func (f *Fetcher) FetchComputedRoutes(ctx context.Context, id *pbresource.ID) (*types.DecodedComputedRoutes, error) {
@ -98,11 +73,11 @@ func (f *Fetcher) FetchComputedRoutes(ctx context.Context, id *pbresource.ID) (*
return nil, fmt.Errorf("id must be a ComputedRoutes type") return nil, fmt.Errorf("id must be a ComputedRoutes type")
} }
dec, err := resource.GetDecodedResource[*pbmesh.ComputedRoutes](ctx, f.Client, id) dec, err := resource.GetDecodedResource[*pbmesh.ComputedRoutes](ctx, f.client, id)
if err != nil { if err != nil {
return nil, err return nil, err
} else if dec == nil { } else if dec == nil {
f.ComputedRoutesCache.UntrackComputedRoutes(id) f.cache.UntrackComputedRoutes(id)
} }
return dec, err return dec, err
@ -110,120 +85,84 @@ func (f *Fetcher) FetchComputedRoutes(ctx context.Context, id *pbresource.ID) (*
func (f *Fetcher) FetchExplicitDestinationsData( func (f *Fetcher) FetchExplicitDestinationsData(
ctx context.Context, ctx context.Context,
explDestRefs []intermediateTypes.CombinedDestinationRef, proxyID *pbresource.ID,
) ([]*intermediateTypes.Destination, map[string]*intermediateTypes.Status, error) { ) ([]*intermediateTypes.Destination, error) {
var (
destinations []*intermediateTypes.Destination
statuses = make(map[string]*intermediateTypes.Status)
)
for _, dest := range explDestRefs { var destinations []*intermediateTypes.Destination
// Fetch Destinations resource if there is one.
us, err := f.FetchDestinations(ctx, dest.ExplicitDestinationsID) // Fetch computed explicit destinations first.
cdID := resource.ReplaceType(pbmesh.ComputedExplicitDestinationsType, proxyID)
cd, err := resource.GetDecodedResource[*pbmesh.ComputedExplicitDestinations](ctx, f.client, cdID)
if err != nil { if err != nil {
// If there's an error, return and force another reconcile instead of computing return nil, err
// partial proxy state. }
return nil, statuses, err if cd == nil {
f.cache.UntrackComputedDestinations(cdID)
return nil, nil
} }
if us == nil { // Otherwise, track this resource in the destinations cache.
// If the Destinations resource is not found, then we should delete it from cache and continue. f.cache.TrackComputedDestinations(cd)
f.DestinationsCache.DeleteDestination(dest.ServiceRef, dest.Port)
continue
}
for _, dest := range cd.GetData().GetDestinations() {
d := &intermediateTypes.Destination{} d := &intermediateTypes.Destination{}
var ( var (
serviceID = resource.IDFromReference(dest.ServiceRef) serviceID = resource.IDFromReference(dest.DestinationRef)
serviceRef = resource.ReferenceToString(dest.ServiceRef)
upstreamsRef = resource.IDToString(us.Resource.Id)
) )
// Fetch Service // Fetch Service
svc, err := f.FetchService(ctx, serviceID) svc, err := f.FetchService(ctx, serviceID)
if err != nil { if err != nil {
return nil, statuses, err return nil, err
} }
if svc == nil { if svc == nil {
// If the Service resource is not found, then we update the status // If the Service resource is not found, skip this destination.
// of the Upstreams resource but don't remove it from cache in case
// it comes back.
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionDestinationServiceNotFound(serviceRef))
continue continue
} else {
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionDestinationServiceFound(serviceRef))
} }
d.Service = svc d.Service = svc
// Check if this endpoints is mesh-enabled. If not, remove it from cache and return an error. // Check if this service is mesh-enabled. If not, update the status.
if !IsMeshEnabled(svc.Data.Ports) { if !svc.GetData().IsMeshEnabled() {
// Add invalid status but don't remove from cache. If this state changes,
// we want to be able to detect this change.
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionMeshProtocolNotFound(serviceRef))
// This error should not cause the execution to stop, as we want to make sure that this non-mesh destination // This error should not cause the execution to stop, as we want to make sure that this non-mesh destination
// gets removed from the proxy state. // service gets removed from the proxy state.
continue
}
// Check if the desired port exists on the service and skip it doesn't.
if svc.GetData().FindServicePort(dest.DestinationPort) == nil {
continue continue
} else {
// If everything was successful, add an empty condition so that we can remove any existing statuses.
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionMeshProtocolFound(serviceRef))
} }
// No destination port should point to a port with "mesh" protocol, // No destination port should point to a port with "mesh" protocol,
// so check if destination port has the mesh protocol and update the status. // so check if destination port has the mesh protocol and skip it if it does.
if isServicePortMeshProtocol(svc.Data.Ports, dest.Port) { if svc.GetData().FindServicePort(dest.DestinationPort).GetProtocol() == pbcatalog.Protocol_PROTOCOL_MESH {
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionMeshProtocolDestinationPort(serviceRef, dest.Port))
continue continue
} else {
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionNonMeshProtocolDestinationPort(serviceRef, dest.Port))
} }
// Fetch ComputedRoutes. // Fetch ComputedRoutes.
cr, err := f.FetchComputedRoutes(ctx, resource.ReplaceType(pbmesh.ComputedRoutesType, serviceID)) cr, err := f.FetchComputedRoutes(ctx, resource.ReplaceType(pbmesh.ComputedRoutesType, serviceID))
if err != nil { if err != nil {
return nil, statuses, err return nil, err
} else if cr == nil { } else if cr == nil {
// This is required, so wait until it exists. // This is required, so wait until it exists.
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation,
ctrlStatus.ConditionDestinationComputedRoutesNotFound(serviceRef))
continue continue
} else {
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation,
ctrlStatus.ConditionDestinationComputedRoutesFound(serviceRef))
} }
portConfig, ok := cr.Data.PortedConfigs[dest.Port] portConfig, ok := cr.Data.PortedConfigs[dest.DestinationPort]
if !ok { if !ok {
// This is required, so wait until it exists. // This is required, so wait until it exists.
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation,
ctrlStatus.ConditionDestinationComputedRoutesPortNotFound(serviceRef, dest.Port))
continue continue
} else {
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation,
ctrlStatus.ConditionDestinationComputedRoutesPortFound(serviceRef, dest.Port))
} }
// Copy this so we can mutate the targets. // Copy this so we can mutate the targets.
d.ComputedPortRoutes = proto.Clone(portConfig).(*pbmesh.ComputedPortRoutes) d.ComputedPortRoutes = proto.Clone(portConfig).(*pbmesh.ComputedPortRoutes)
// As Destinations resource contains a list of destinations, // As Destinations resource contains a list of destinations,
// we need to find the one that references our service and port. // we need to find the one that references our service and port.
d.Explicit = findDestination(dest.ServiceRef, dest.Port, us.Data) d.Explicit = dest
if d.Explicit == nil {
continue // the cache is out of sync
}
// NOTE: we collect both DIRECT and INDIRECT target information here. // NOTE: we collect both DIRECT and INDIRECT target information here.
for _, routeTarget := range d.ComputedPortRoutes.Targets { for _, routeTarget := range d.ComputedPortRoutes.Targets {
@ -232,7 +171,7 @@ func (f *Fetcher) FetchExplicitDestinationsData(
// Fetch ServiceEndpoints. // Fetch ServiceEndpoints.
se, err := f.FetchServiceEndpoints(ctx, resource.ReplaceType(pbcatalog.ServiceEndpointsType, targetServiceID)) se, err := f.FetchServiceEndpoints(ctx, resource.ReplaceType(pbcatalog.ServiceEndpointsType, targetServiceID))
if err != nil { if err != nil {
return nil, statuses, err return nil, err
} }
if se != nil { if se != nil {
@ -241,9 +180,9 @@ func (f *Fetcher) FetchExplicitDestinationsData(
// Gather all identities. // Gather all identities.
var identities []*pbresource.Reference var identities []*pbresource.Reference
for _, ep := range se.Data.Endpoints { for _, identity := range se.GetData().GetIdentities() {
identities = append(identities, &pbresource.Reference{ identities = append(identities, &pbresource.Reference{
Name: ep.Identity, Name: identity,
Tenancy: se.Resource.Id.Tenancy, Tenancy: se.Resource.Id.Tenancy,
}) })
} }
@ -254,7 +193,7 @@ func (f *Fetcher) FetchExplicitDestinationsData(
destinations = append(destinations, d) destinations = append(destinations, d)
} }
return destinations, statuses, nil return destinations, nil
} }
type PortReferenceKey struct { type PortReferenceKey struct {
@ -284,7 +223,7 @@ func (f *Fetcher) FetchImplicitDestinationsData(
} }
// For now we need to look up all computed routes within a partition. // For now we need to look up all computed routes within a partition.
rsp, err := f.Client.List(ctx, &pbresource.ListRequest{ rsp, err := f.client.List(ctx, &pbresource.ListRequest{
Type: pbmesh.ComputedRoutesType, Type: pbmesh.ComputedRoutesType,
Tenancy: &pbresource.Tenancy{ Tenancy: &pbresource.Tenancy{
Namespace: storage.Wildcard, Namespace: storage.Wildcard,
@ -408,115 +347,12 @@ func (f *Fetcher) FetchImplicitDestinationsData(
return addToDestinations, err return addToDestinations, err
} }
// FetchAndMergeProxyConfigurations fetches proxy configurations for the proxy state template provided by id // FetchComputedProxyConfiguration fetches proxy configurations for the proxy state template provided by id
// and merges them into one object. // and merges them into one object.
func (f *Fetcher) FetchAndMergeProxyConfigurations(ctx context.Context, id *pbresource.ID) (*pbmesh.ProxyConfiguration, error) { func (f *Fetcher) FetchComputedProxyConfiguration(ctx context.Context, id *pbresource.ID) (*types.DecodedComputedProxyConfiguration, error) {
proxyCfgRefs := f.ProxyCfgCache.ProxyConfigurationsByProxyID(id) compProxyCfgID := resource.ReplaceType(pbmesh.ComputedProxyConfigurationType, id)
result := &pbmesh.ProxyConfiguration{ return resource.GetDecodedResource[*pbmesh.ComputedProxyConfiguration](ctx, f.client, compProxyCfgID)
DynamicConfig: &pbmesh.DynamicConfig{},
}
for _, ref := range proxyCfgRefs {
proxyCfgID := &pbresource.ID{
Name: ref.GetName(),
Type: ref.GetType(),
Tenancy: ref.GetTenancy(),
}
rsp, err := f.Client.Read(ctx, &pbresource.ReadRequest{
Id: proxyCfgID,
})
switch {
case status.Code(err) == codes.NotFound:
f.ProxyCfgCache.UntrackProxyConfiguration(proxyCfgID)
return nil, nil
case err != nil:
return nil, err
}
var proxyCfg pbmesh.ProxyConfiguration
err = rsp.Resource.Data.UnmarshalTo(&proxyCfg)
if err != nil {
return nil, err
}
// Note that we only care about dynamic config as bootstrap config
// will not be updated dynamically by this controller.
// todo (ishustava): do sorting etc.
proto.Merge(result.DynamicConfig, proxyCfg.DynamicConfig)
}
// Default the outbound listener port. If we don't do the nil check here, then BuildDestinations will panic creating
// the outbound listener.
if result.DynamicConfig.TransparentProxy == nil {
result.DynamicConfig.TransparentProxy = &pbmesh.TransparentProxy{OutboundListenerPort: 15001}
}
return result, nil
}
// IsWorkloadMeshEnabled returns true if the workload or service endpoints port
// contain a port with the "mesh" protocol.
func IsWorkloadMeshEnabled(ports map[string]*pbcatalog.WorkloadPort) bool {
for _, port := range ports {
if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
return true
}
}
return false
}
// IsMeshEnabled returns true if the service ports contain a port with the
// "mesh" protocol.
func IsMeshEnabled(ports []*pbcatalog.ServicePort) bool {
for _, port := range ports {
if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
return true
}
}
return false
}
func isServicePortMeshProtocol(ports []*pbcatalog.ServicePort, name string) bool {
sp := findServicePort(ports, name)
return sp != nil && sp.Protocol == pbcatalog.Protocol_PROTOCOL_MESH
}
func findServicePort(ports []*pbcatalog.ServicePort, name string) *pbcatalog.ServicePort {
for _, port := range ports {
if port.TargetPort == name {
return port
}
}
return nil
}
func findDestination(ref *pbresource.Reference, port string, destinations *pbmesh.Destinations) *pbmesh.Destination {
for _, destination := range destinations.Destinations {
if resource.EqualReference(ref, destination.DestinationRef) &&
port == destination.DestinationPort {
return destination
}
}
return nil
}
func updateStatusCondition(
statuses map[string]*intermediateTypes.Status,
key string,
id *pbresource.ID,
oldStatus map[string]*pbresource.Status,
generation string,
condition *pbresource.Condition) {
if _, ok := statuses[key]; ok {
statuses[key].Conditions = append(statuses[key].Conditions, condition)
} else {
statuses[key] = &intermediateTypes.Status{
ID: id,
Generation: generation,
Conditions: []*pbresource.Condition{condition},
OldStatus: oldStatus,
}
}
} }
func isPartOfService(workloadID *pbresource.ID, svc *types.DecodedService) bool { func isPartOfService(workloadID *pbresource.ID, svc *types.DecodedService) bool {

View File

@ -9,15 +9,12 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite" "github.com/stretchr/testify/suite"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog" "github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/routestest" "github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/routestest"
meshStatus "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status" "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache"
"github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate" "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
"github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource"
@ -30,98 +27,6 @@ import (
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
) )
func TestIsMeshEnabled(t *testing.T) {
cases := map[string]struct {
ports []*pbcatalog.ServicePort
exp bool
}{
"nil ports": {
ports: nil,
exp: false,
},
"empty ports": {
ports: []*pbcatalog.ServicePort{},
exp: false,
},
"no mesh ports": {
ports: []*pbcatalog.ServicePort{
{VirtualPort: 1000, TargetPort: "p1", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
{VirtualPort: 2000, TargetPort: "p2", Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
},
exp: false,
},
"one mesh port": {
ports: []*pbcatalog.ServicePort{
{VirtualPort: 1000, TargetPort: "p1", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
{VirtualPort: 2000, TargetPort: "p2", Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
{VirtualPort: 3000, TargetPort: "p3", Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
exp: true,
},
"multiple mesh ports": {
ports: []*pbcatalog.ServicePort{
{VirtualPort: 1000, TargetPort: "p1", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
{VirtualPort: 2000, TargetPort: "p2", Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
{VirtualPort: 3000, TargetPort: "p3", Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
{VirtualPort: 4000, TargetPort: "p4", Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
exp: true,
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
require.Equal(t, c.exp, IsMeshEnabled(c.ports))
})
}
}
func TestIsWorkloadMeshEnabled(t *testing.T) {
cases := map[string]struct {
ports map[string]*pbcatalog.WorkloadPort
exp bool
}{
"nil ports": {
ports: nil,
exp: false,
},
"empty ports": {
ports: make(map[string]*pbcatalog.WorkloadPort),
exp: false,
},
"no mesh ports": {
ports: map[string]*pbcatalog.WorkloadPort{
"p1": {Port: 1000, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"p2": {Port: 2000, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
},
exp: false,
},
"one mesh port": {
ports: map[string]*pbcatalog.WorkloadPort{
"p1": {Port: 1000, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"p2": {Port: 2000, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
"p3": {Port: 3000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
exp: true,
},
"multiple mesh ports": {
ports: map[string]*pbcatalog.WorkloadPort{
"p1": {Port: 1000, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"p2": {Port: 2000, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
"p3": {Port: 3000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
"p4": {Port: 4000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
exp: true,
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
require.Equal(t, c.exp, IsWorkloadMeshEnabled(c.ports))
})
}
}
type dataFetcherSuite struct { type dataFetcherSuite struct {
suite.Suite suite.Suite
@ -137,8 +42,7 @@ type dataFetcherSuite struct {
api1ServiceEndpointsData *pbcatalog.ServiceEndpoints api1ServiceEndpointsData *pbcatalog.ServiceEndpoints
api2ServiceEndpoints *pbresource.Resource api2ServiceEndpoints *pbresource.Resource
api2ServiceEndpointsData *pbcatalog.ServiceEndpoints api2ServiceEndpointsData *pbcatalog.ServiceEndpoints
webDestinations *pbresource.Resource webComputedDestinationsData *pbmesh.ComputedExplicitDestinations
webDestinationsData *pbmesh.Destinations
webProxy *pbresource.Resource webProxy *pbresource.Resource
webWorkload *pbresource.Resource webWorkload *pbresource.Resource
} }
@ -205,7 +109,7 @@ func (suite *dataFetcherSuite) SetupTest() {
WithData(suite.T(), suite.api2ServiceEndpointsData). WithData(suite.T(), suite.api2ServiceEndpointsData).
Write(suite.T(), suite.client) Write(suite.T(), suite.client)
suite.webDestinationsData = &pbmesh.Destinations{ suite.webComputedDestinationsData = &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{ Destinations: []*pbmesh.Destination{
{ {
DestinationRef: resource.Reference(suite.api1Service.Id, ""), DestinationRef: resource.Reference(suite.api1Service.Id, ""),
@ -222,10 +126,6 @@ func (suite *dataFetcherSuite) SetupTest() {
}, },
} }
suite.webDestinations = resourcetest.Resource(pbmesh.DestinationsType, "web-destinations").
WithData(suite.T(), suite.webDestinationsData).
Write(suite.T(), suite.client)
suite.webProxy = resourcetest.Resource(pbmesh.ProxyStateTemplateType, "web-abc"). suite.webProxy = resourcetest.Resource(pbmesh.ProxyStateTemplateType, "web-abc").
WithData(suite.T(), &pbmesh.ProxyStateTemplate{}). WithData(suite.T(), &pbmesh.ProxyStateTemplate{}).
Write(suite.T(), suite.client) Write(suite.T(), suite.client)
@ -239,48 +139,43 @@ func (suite *dataFetcherSuite) SetupTest() {
} }
func (suite *dataFetcherSuite) TestFetcher_FetchWorkload_WorkloadNotFound() { func (suite *dataFetcherSuite) TestFetcher_FetchWorkload_WorkloadNotFound() {
proxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).
ID()
identityID := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-abc").ID() identityID := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-abc").ID()
// Create cache and pre-populate it. // Create cache and pre-populate it.
var ( c := cache.New()
destCache = sidecarproxycache.NewDestinationsCache()
proxyCfgCache = sidecarproxycache.NewProxyConfigurationCache()
computedRoutesCache = sidecarproxycache.NewComputedRoutesCache()
identitiesCache = sidecarproxycache.NewIdentitiesCache()
)
f := Fetcher{ f := Fetcher{
DestinationsCache: destCache, cache: c,
ProxyCfgCache: proxyCfgCache, client: suite.client,
ComputedRoutesCache: computedRoutesCache,
IdentitiesCache: identitiesCache,
Client: suite.client,
} }
// Prepopulate the cache. workloadID := resourcetest.Resource(pbcatalog.WorkloadType, "not-found").ID()
dest1 := intermediate.CombinedDestinationRef{
ServiceRef: resourcetest.Resource(pbcatalog.ServiceType, "test-service-1").ReferenceNoSection(),
Port: "tcp",
ExplicitDestinationsID: resourcetest.Resource(pbmesh.DestinationsType, "test-servicedestinations-1").ID(),
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(proxyID): {},
},
}
dest2 := intermediate.CombinedDestinationRef{
ServiceRef: resourcetest.Resource(pbcatalog.ServiceType, "test-service-2").ReferenceNoSection(),
Port: "tcp",
ExplicitDestinationsID: resourcetest.Resource(pbmesh.DestinationsType, "test-servicedestinations-2").ID(),
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(proxyID): {},
},
}
destCache.WriteDestination(dest1) // Track workload with its identity.
destCache.WriteDestination(dest2) workload := resourcetest.Resource(pbcatalog.WorkloadType, workloadID.GetName()).
suite.syncDestinations(dest1, dest2) WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(suite.T(), &pbcatalog.Workload{
Identity: identityID.Name,
}).Build()
c.TrackWorkload(resourcetest.MustDecode[*pbcatalog.Workload](suite.T(), workload))
// Now fetch the workload so that we can check that it's been removed from cache.
_, err := f.FetchWorkload(context.Background(), workloadID)
require.NoError(suite.T(), err)
require.Nil(suite.T(), c.WorkloadsByWorkloadIdentity(identityID))
}
func (suite *dataFetcherSuite) TestFetcher_FetchWorkload_WorkloadFound() {
identityID := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-abc").ID()
// Create cache and pre-populate it.
c := cache.New()
f := Fetcher{
cache: c,
client: suite.client,
}
workload := resourcetest.Resource(pbcatalog.WorkloadType, "service-workload-abc"). workload := resourcetest.Resource(pbcatalog.WorkloadType, "service-workload-abc").
WithTenancy(resource.DefaultNamespacedTenancy()). WithTenancy(resource.DefaultNamespacedTenancy()).
@ -297,254 +192,74 @@ func (suite *dataFetcherSuite) TestFetcher_FetchWorkload_WorkloadNotFound() {
}, },
}).Write(suite.T(), suite.client) }).Write(suite.T(), suite.client)
// Track the workload's identity // This call should track the workload's identity
_, err := f.FetchWorkload(context.Background(), workload.Id) _, err := f.FetchWorkload(context.Background(), workload.Id)
require.NoError(suite.T(), err) require.NoError(suite.T(), err)
require.NotNil(suite.T(), destCache.DestinationsBySourceProxy(proxyID))
require.Nil(suite.T(), proxyCfgCache.ProxyConfigurationsByProxyID(proxyID))
require.Nil(suite.T(), proxyCfgCache.ProxyConfigurationsByProxyID(proxyID))
require.Equal(suite.T(), []*pbresource.ID{proxyID}, identitiesCache.ProxyIDsByWorkloadIdentity(identityID))
proxyCfgID := resourcetest.Resource(pbmesh.ProxyConfigurationType, "proxy-config").ID() // Check that the workload is tracked
proxyCfgCache.TrackProxyConfiguration(proxyCfgID, []resource.ReferenceOrID{proxyID}) workload.Id.Uid = ""
prototest.AssertElementsMatch(suite.T(), []*pbresource.ID{workload.Id}, c.WorkloadsByWorkloadIdentity(identityID))
_, err = f.FetchWorkload(context.Background(), proxyID)
require.NoError(suite.T(), err)
// Check that cache is updated to remove proxy id.
require.Nil(suite.T(), destCache.DestinationsBySourceProxy(proxyID))
require.Nil(suite.T(), proxyCfgCache.ProxyConfigurationsByProxyID(proxyID))
require.Nil(suite.T(), proxyCfgCache.ProxyConfigurationsByProxyID(proxyID))
require.Nil(suite.T(), identitiesCache.ProxyIDsByWorkloadIdentity(identityID))
}
func (suite *dataFetcherSuite) TestFetcher_NotFound() {
// This test checks that we ignore not found errors for various types we need to fetch.
f := Fetcher{
Client: suite.client,
}
cases := map[string]struct {
typ *pbresource.Type
fetchFunc func(id *pbresource.ID) error
}{
"proxy state template": {
typ: pbmesh.ProxyStateTemplateType,
fetchFunc: func(id *pbresource.ID) error {
_, err := f.FetchProxyStateTemplate(context.Background(), id)
return err
},
},
"service endpoints": {
typ: pbcatalog.ServiceEndpointsType,
fetchFunc: func(id *pbresource.ID) error {
_, err := f.FetchServiceEndpoints(context.Background(), id)
return err
},
},
"destinations": {
typ: pbmesh.DestinationsType,
fetchFunc: func(id *pbresource.ID) error {
_, err := f.FetchDestinations(context.Background(), id)
return err
},
},
"service": {
typ: pbcatalog.ServiceType,
fetchFunc: func(id *pbresource.ID) error {
_, err := f.FetchService(context.Background(), id)
return err
},
},
}
for name, c := range cases {
suite.T().Run(name, func(t *testing.T) {
err := c.fetchFunc(resourcetest.Resource(c.typ, "not-found").ID())
require.NoError(t, err)
})
}
}
func (suite *dataFetcherSuite) TestFetcher_FetchErrors() {
f := Fetcher{
Client: suite.client,
}
cases := map[string]struct {
name string
fetchFunc func(id *pbresource.ID) error
}{
"workload": {
name: "web-abc",
fetchFunc: func(id *pbresource.ID) error {
_, err := f.FetchWorkload(context.Background(), id)
return err
},
},
"proxy state template": {
name: "web-abc",
fetchFunc: func(id *pbresource.ID) error {
_, err := f.FetchProxyStateTemplate(context.Background(), id)
return err
},
},
"service endpoints": {
name: "api-1",
fetchFunc: func(id *pbresource.ID) error {
_, err := f.FetchServiceEndpoints(context.Background(), id)
return err
},
},
"destinations": {
name: "web-destinations",
fetchFunc: func(id *pbresource.ID) error {
_, err := f.FetchDestinations(context.Background(), id)
return err
},
},
"service": {
name: "web-service",
fetchFunc: func(id *pbresource.ID) error {
_, err := f.FetchService(context.Background(), id)
return err
},
},
}
for name, c := range cases {
suite.T().Run(name+"-read", func(t *testing.T) {
badType := &pbresource.Type{
Group: "not",
Kind: "found",
GroupVersion: "vfake",
}
err := c.fetchFunc(resourcetest.Resource(badType, c.name).ID())
require.Error(t, err)
require.Equal(t, codes.InvalidArgument, status.Code(err))
})
suite.T().Run(name+"-unmarshal", func(t *testing.T) {
// Create a dummy health checks type as it won't be any of the types mesh controller cares about
resourcetest.Resource(pbcatalog.HealthChecksType, c.name).
WithData(suite.T(), &pbcatalog.HealthChecks{
Workloads: &pbcatalog.WorkloadSelector{Names: []string{"web-abc"}},
}).
Write(suite.T(), suite.client)
err := c.fetchFunc(resourcetest.Resource(pbcatalog.HealthChecksType, c.name).ID())
require.Error(t, err)
var parseErr resource.ErrDataParse
require.ErrorAs(t, err, &parseErr)
})
}
}
func (suite *dataFetcherSuite) syncDestinations(destinations ...intermediate.CombinedDestinationRef) {
data := &pbmesh.Destinations{}
for _, dest := range destinations {
data.Destinations = append(data.Destinations, &pbmesh.Destination{
DestinationRef: dest.ServiceRef,
DestinationPort: dest.Port,
})
}
suite.webDestinations = resourcetest.Resource(pbmesh.DestinationsType, "web-destinations").
WithData(suite.T(), data).
Write(suite.T(), suite.client)
} }
func (suite *dataFetcherSuite) TestFetcher_FetchExplicitDestinationsData() { func (suite *dataFetcherSuite) TestFetcher_FetchExplicitDestinationsData() {
var ( c := cache.New()
c = sidecarproxycache.NewDestinationsCache()
crCache = sidecarproxycache.NewComputedRoutesCache()
)
writeDestination := func(t *testing.T, dest intermediate.CombinedDestinationRef) {
c.WriteDestination(dest)
t.Cleanup(func() {
c.DeleteDestination(dest.ServiceRef, dest.Port)
})
}
var ( var (
api1ServiceRef = resource.Reference(suite.api1Service.Id, "") api1ServiceRef = resource.Reference(suite.api1Service.Id, "")
) )
f := Fetcher{ f := Fetcher{
DestinationsCache: c, cache: c,
ComputedRoutesCache: crCache, client: suite.client,
Client: suite.client,
} }
testutil.RunStep(suite.T(), "invalid destinations: destinations not found", func(t *testing.T) { testutil.RunStep(suite.T(), "computed destinations not found", func(t *testing.T) {
destinationRefNoDestinations := intermediate.CombinedDestinationRef{ // First add computed destination to cache so we can check if it's untracked later.
ServiceRef: api1ServiceRef, compDest := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, suite.webProxy.Id.Name).
Port: "tcp", WithData(t, &pbmesh.ComputedExplicitDestinations{
ExplicitDestinationsID: resourcetest.Resource(pbmesh.DestinationsType, "not-found").ID(), Destinations: []*pbmesh.Destination{
SourceProxies: map[resource.ReferenceKey]struct{}{ {
resource.NewReferenceKey(suite.webProxy.Id): {}, DestinationRef: api1ServiceRef,
DestinationPort: "tcp1",
}, },
} },
c.WriteDestination(destinationRefNoDestinations) }).
suite.syncDestinations(destinationRefNoDestinations) WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
c.TrackComputedDestinations(resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](t, compDest))
destinationRefs := []intermediate.CombinedDestinationRef{destinationRefNoDestinations} // We will try to fetch explicit destinations for a proxy that doesn't have one.
destinations, _, err := f.FetchExplicitDestinationsData(suite.ctx, destinationRefs) destinations, err := f.FetchExplicitDestinationsData(suite.ctx, suite.webProxy.Id)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, destinations) require.Nil(t, destinations)
_, foundDest := c.ReadDestination(destinationRefNoDestinations.ServiceRef, destinationRefNoDestinations.Port)
require.False(t, foundDest) // Check that cache no longer has this destination.
require.Nil(t, c.ComputedDestinationsByService(resource.IDFromReference(api1ServiceRef)))
}) })
testutil.RunStep(suite.T(), "invalid destinations: service not found", func(t *testing.T) { testutil.RunStep(suite.T(), "invalid destinations: service not found", func(t *testing.T) {
notFoundServiceRef := resourcetest.Resource(pbcatalog.ServiceType, "not-found"). notFoundServiceRef := resourcetest.Resource(pbcatalog.ServiceType, "not-found").
WithTenancy(resource.DefaultNamespacedTenancy()). WithTenancy(resource.DefaultNamespacedTenancy()).
ReferenceNoSection() ReferenceNoSection()
destinationNoServiceEndpoints := intermediate.CombinedDestinationRef{
ServiceRef: notFoundServiceRef,
Port: "tcp",
ExplicitDestinationsID: suite.webDestinations.Id,
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(suite.webProxy.Id): {},
},
}
c.WriteDestination(destinationNoServiceEndpoints)
suite.syncDestinations(destinationNoServiceEndpoints)
t.Cleanup(func() { compDest := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, suite.webProxy.Id.Name).
// Restore this for the next test step. WithData(t, &pbmesh.ComputedExplicitDestinations{
suite.webDestinations = resourcetest.Resource(pbmesh.DestinationsType, "web-destinations"). Destinations: []*pbmesh.Destination{
WithData(suite.T(), suite.webDestinationsData). {
Write(suite.T(), suite.client)
})
suite.webDestinations = resourcetest.Resource(pbmesh.DestinationsType, "web-destinations").
WithData(suite.T(), &pbmesh.Destinations{
Destinations: []*pbmesh.Destination{{
DestinationRef: notFoundServiceRef, DestinationRef: notFoundServiceRef,
DestinationPort: "tcp", DestinationPort: "tcp",
}}, },
},
}). }).
Write(suite.T(), suite.client) WithTenancy(resource.DefaultNamespacedTenancy()).
Write(t, suite.client)
destinationRefs := []intermediate.CombinedDestinationRef{destinationNoServiceEndpoints} destinations, err := f.FetchExplicitDestinationsData(suite.ctx, suite.webProxy.Id)
destinations, statuses, err := f.FetchExplicitDestinationsData(suite.ctx, destinationRefs)
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, destinations) require.Nil(t, destinations)
cachedCompDestIDs := c.ComputedDestinationsByService(resource.IDFromReference(notFoundServiceRef))
destinationRef := resource.IDToString(destinationNoServiceEndpoints.ExplicitDestinationsID) compDest.Id.Uid = ""
serviceRef := resource.ReferenceToString(destinationNoServiceEndpoints.ServiceRef) prototest.AssertElementsMatch(t, []*pbresource.ID{compDest.Id}, cachedCompDestIDs)
destStatus, exists := statuses[destinationRef]
require.True(t, exists, "status map does not contain service: %s", destinationRef)
require.Len(t, destStatus.Conditions, 1)
require.Equal(t, destStatus.Conditions[0],
meshStatus.ConditionDestinationServiceNotFound(serviceRef))
_, foundDest := c.ReadDestination(destinationNoServiceEndpoints.ServiceRef, destinationNoServiceEndpoints.Port)
require.True(t, foundDest)
}) })
testutil.RunStep(suite.T(), "invalid destinations: service not on mesh", func(t *testing.T) { testutil.RunStep(suite.T(), "invalid destinations: service not on mesh", func(t *testing.T) {
@ -553,150 +268,93 @@ func (suite *dataFetcherSuite) TestFetcher_FetchExplicitDestinationsData() {
{TargetPort: "tcp", Protocol: pbcatalog.Protocol_PROTOCOL_TCP}, {TargetPort: "tcp", Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
}, },
} }
suite.api1Service = resourcetest.ResourceID(suite.api1Service.Id). resourcetest.ResourceID(suite.api1Service.Id).
WithData(suite.T(), apiNonMeshServiceData). WithData(t, apiNonMeshServiceData).
Write(suite.T(), suite.client) Write(t, suite.client)
destinationNonMeshServiceEndpoints := intermediate.CombinedDestinationRef{ compDest := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, suite.webProxy.Id.Name).
ServiceRef: api1ServiceRef, WithData(t, &pbmesh.ComputedExplicitDestinations{
Port: "tcp", Destinations: []*pbmesh.Destination{
ExplicitDestinationsID: suite.webDestinations.Id, {
SourceProxies: map[resource.ReferenceKey]struct{}{ DestinationRef: api1ServiceRef,
resource.NewReferenceKey(suite.webProxy.Id): {}, DestinationPort: "tcp",
}, },
} },
c.WriteDestination(destinationNonMeshServiceEndpoints) }).
suite.syncDestinations(destinationNonMeshServiceEndpoints) WithTenancy(resource.DefaultNamespacedTenancy()).
Write(t, suite.client)
destinationRefs := []intermediate.CombinedDestinationRef{destinationNonMeshServiceEndpoints} destinations, err := f.FetchExplicitDestinationsData(suite.ctx, suite.webProxy.Id)
destinations, statuses, err := f.FetchExplicitDestinationsData(suite.ctx, destinationRefs)
require.NoError(t, err) require.NoError(t, err)
require.Nil(t, destinations) require.Nil(t, destinations)
cachedCompDestIDs := c.ComputedDestinationsByService(resource.IDFromReference(api1ServiceRef))
compDest.Id.Uid = ""
prototest.AssertElementsMatch(t, []*pbresource.ID{compDest.Id}, cachedCompDestIDs)
})
destinationRef := resource.IDToString(destinationNonMeshServiceEndpoints.ExplicitDestinationsID) testutil.RunStep(suite.T(), "invalid destinations: destination port not found", func(t *testing.T) {
serviceRef := resource.ReferenceToString(destinationNonMeshServiceEndpoints.ServiceRef) resourcetest.ResourceID(suite.api1Service.Id).
WithData(t, &pbcatalog.Service{
Ports: []*pbcatalog.ServicePort{
{TargetPort: "some-other-port", Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
{TargetPort: "mesh", Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
}).
Write(t, suite.client)
compDest := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, suite.webProxy.Id.Name).
WithData(t, &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{
{
DestinationRef: api1ServiceRef,
DestinationPort: "tcp",
},
},
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Write(t, suite.client)
destStatus, exists := statuses[destinationRef] destinations, err := f.FetchExplicitDestinationsData(suite.ctx, suite.webProxy.Id)
require.True(t, exists, "status map does not contain service: %s", destinationRef) require.NoError(t, err)
require.Nil(t, destinations)
cachedCompDestIDs := c.ComputedDestinationsByService(resource.IDFromReference(api1ServiceRef))
compDest.Id.Uid = ""
prototest.AssertElementsMatch(t, []*pbresource.ID{compDest.Id}, cachedCompDestIDs)
})
prototest.AssertElementsMatch(t, []*pbresource.Condition{
meshStatus.ConditionDestinationServiceFound(serviceRef),
meshStatus.ConditionMeshProtocolNotFound(serviceRef),
}, destStatus.Conditions)
_, foundDest := c.ReadDestination(destinationNonMeshServiceEndpoints.ServiceRef, destinationNonMeshServiceEndpoints.Port)
require.True(t, foundDest)
// Update the service to be mesh enabled again and check that the status is now valid.
suite.api1Service = resourcetest.ResourceID(suite.api1Service.Id). suite.api1Service = resourcetest.ResourceID(suite.api1Service.Id).
WithData(suite.T(), suite.api1ServiceData). WithData(suite.T(), suite.api1ServiceData).
Write(suite.T(), suite.client) Write(suite.T(), suite.client)
destinations, statuses, err = f.FetchExplicitDestinationsData(suite.ctx, destinationRefs) suite.api2Service = resourcetest.ResourceID(suite.api2Service.Id).
require.NoError(t, err) WithData(suite.T(), suite.api2ServiceData).
require.Nil(t, destinations) Write(suite.T(), suite.client)
destStatus, exists = statuses[destinationRef]
require.True(t, exists, "status map does not contain service: %s", destinationRef)
prototest.AssertElementsMatch(t, []*pbresource.Condition{
meshStatus.ConditionDestinationServiceFound(serviceRef),
meshStatus.ConditionMeshProtocolFound(serviceRef),
meshStatus.ConditionNonMeshProtocolDestinationPort(serviceRef, destinationNonMeshServiceEndpoints.Port),
meshStatus.ConditionDestinationComputedRoutesNotFound(serviceRef),
}, destStatus.Conditions)
})
testutil.RunStep(suite.T(), "invalid destinations: destination is pointing to a mesh port", func(t *testing.T) { testutil.RunStep(suite.T(), "invalid destinations: destination is pointing to a mesh port", func(t *testing.T) {
// Create a destination pointing to the mesh port. // Create a computed destinations resource pointing to the mesh port.
destinationMeshDestinationPort := intermediate.CombinedDestinationRef{ compDest := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, suite.webProxy.Id.Name).
ServiceRef: api1ServiceRef, WithData(t, &pbmesh.ComputedExplicitDestinations{
Port: "mesh", Destinations: []*pbmesh.Destination{
ExplicitDestinationsID: suite.webDestinations.Id, {
SourceProxies: map[resource.ReferenceKey]struct{}{ DestinationRef: api1ServiceRef,
resource.NewReferenceKey(suite.webProxy.Id): {}, DestinationPort: "mesh",
}, },
}
c.WriteDestination(destinationMeshDestinationPort)
suite.syncDestinations(destinationMeshDestinationPort)
destinationRefs := []intermediate.CombinedDestinationRef{destinationMeshDestinationPort}
destinations, statuses, err := f.FetchExplicitDestinationsData(suite.ctx, destinationRefs)
serviceRef := resource.ReferenceToString(destinationMeshDestinationPort.ServiceRef)
destinationRef := resource.IDToString(destinationMeshDestinationPort.ExplicitDestinationsID)
expectedStatus := &intermediate.Status{
ID: suite.webDestinations.Id,
Generation: suite.webDestinations.Generation,
Conditions: []*pbresource.Condition{
meshStatus.ConditionDestinationServiceFound(serviceRef),
meshStatus.ConditionMeshProtocolFound(serviceRef),
meshStatus.ConditionMeshProtocolDestinationPort(serviceRef, destinationMeshDestinationPort.Port),
}, },
} }).
WithTenancy(resource.DefaultNamespacedTenancy()).
Write(t, suite.client)
destinations, err := f.FetchExplicitDestinationsData(suite.ctx, suite.webProxy.Id)
require.NoError(t, err) require.NoError(t, err)
destStatus, exists := statuses[destinationRef]
require.True(t, exists, "status map does not contain service: %s", destinationRef)
// Check that the status is generated correctly.
prototest.AssertDeepEqual(t, expectedStatus, destStatus)
// Check that we didn't return any destinations.
require.Empty(t, destinations) require.Empty(t, destinations)
// Check that destination service is still in cache because it's still referenced from the pbmesh.Destinations cachedCompDestIDs := c.ComputedDestinationsByService(resource.IDFromReference(api1ServiceRef))
// resource. compDest.Id.Uid = ""
_, foundDest := c.ReadDestination(destinationMeshDestinationPort.ServiceRef, destinationMeshDestinationPort.Port) prototest.AssertElementsMatch(t, []*pbresource.ID{compDest.Id}, cachedCompDestIDs)
require.True(t, foundDest)
// Update the destination to point to a non-mesh port and check that the status is now updated.
destinationRefs[0].Port = "tcp"
c.WriteDestination(destinationMeshDestinationPort)
suite.syncDestinations(destinationMeshDestinationPort)
expectedStatus = &intermediate.Status{
ID: suite.webDestinations.Id,
Generation: suite.webDestinations.Generation,
Conditions: []*pbresource.Condition{
meshStatus.ConditionDestinationServiceFound(serviceRef),
meshStatus.ConditionMeshProtocolFound(serviceRef),
meshStatus.ConditionNonMeshProtocolDestinationPort(serviceRef, destinationRefs[0].Port),
meshStatus.ConditionDestinationComputedRoutesNotFound(serviceRef),
},
}
_, statuses, err = f.FetchExplicitDestinationsData(suite.ctx, destinationRefs)
require.NoError(t, err)
destStatus, exists = statuses[destinationRef]
require.True(t, exists, "status map does not contain service: %s", destinationRef)
prototest.AssertDeepEqual(t, expectedStatus, destStatus)
}) })
destination1 := intermediate.CombinedDestinationRef{ compDest := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, suite.webProxy.Id.Name).
ServiceRef: resource.Reference(suite.api1Service.Id, ""), WithData(suite.T(), suite.webComputedDestinationsData).
Port: "tcp", WithTenancy(resource.DefaultNamespacedTenancy()).
ExplicitDestinationsID: suite.webDestinations.Id, Write(suite.T(), suite.client)
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(suite.webProxy.Id): {},
},
}
destination2 := intermediate.CombinedDestinationRef{
ServiceRef: resource.Reference(suite.api2Service.Id, ""),
Port: "tcp1",
ExplicitDestinationsID: suite.webDestinations.Id,
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(suite.webProxy.Id): {},
},
}
destination3 := intermediate.CombinedDestinationRef{
ServiceRef: resource.Reference(suite.api2Service.Id, ""),
Port: "tcp2",
ExplicitDestinationsID: suite.webDestinations.Id,
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(suite.webProxy.Id): {},
},
}
testutil.RunStep(suite.T(), "invalid destinations: destination is pointing to a port but computed routes is not aware of it yet", func(t *testing.T) { testutil.RunStep(suite.T(), "invalid destinations: destination is pointing to a port but computed routes is not aware of it yet", func(t *testing.T) {
apiNonTCPServiceData := &pbcatalog.Service{ apiNonTCPServiceData := &pbcatalog.Service{
@ -706,7 +364,7 @@ func (suite *dataFetcherSuite) TestFetcher_FetchExplicitDestinationsData() {
}, },
} }
apiNonTCPService := resourcetest.ResourceID(suite.api1Service.Id). apiNonTCPService := resourcetest.ResourceID(suite.api1Service.Id).
WithData(suite.T(), apiNonTCPServiceData). WithData(t, apiNonTCPServiceData).
Build() Build()
api1ComputedRoutesID := resource.ReplaceType(pbmesh.ComputedRoutesType, suite.api1Service.Id) api1ComputedRoutesID := resource.ReplaceType(pbmesh.ComputedRoutesType, suite.api1Service.Id)
@ -716,94 +374,20 @@ func (suite *dataFetcherSuite) TestFetcher_FetchExplicitDestinationsData() {
require.NotNil(suite.T(), api1ComputedRoutes) require.NotNil(suite.T(), api1ComputedRoutes)
// This destination points to TCP, but the computed routes is stale and only knows about HTTP. // This destination points to TCP, but the computed routes is stale and only knows about HTTP.
writeDestination(t, destination1) destinations, err := f.FetchExplicitDestinationsData(suite.ctx, suite.webProxy.Id)
suite.syncDestinations(destination1)
destinationRefs := []intermediate.CombinedDestinationRef{destination1}
destinations, statuses, err := f.FetchExplicitDestinationsData(suite.ctx, destinationRefs)
serviceRef := resource.ReferenceToString(destination1.ServiceRef)
destinationRef := resource.IDToString(destination1.ExplicitDestinationsID)
expectedStatus := &intermediate.Status{
ID: suite.webDestinations.Id,
Generation: suite.webDestinations.Generation,
Conditions: []*pbresource.Condition{
meshStatus.ConditionDestinationServiceFound(serviceRef),
meshStatus.ConditionMeshProtocolFound(serviceRef),
meshStatus.ConditionNonMeshProtocolDestinationPort(serviceRef, destination1.Port),
meshStatus.ConditionDestinationComputedRoutesFound(serviceRef),
meshStatus.ConditionDestinationComputedRoutesPortNotFound(serviceRef, destination1.Port),
},
}
require.NoError(t, err) require.NoError(t, err)
destStatus, exists := statuses[destinationRef]
require.True(t, exists, "status map does not contain service: %s", destinationRef)
// Check that the status is generated correctly.
prototest.AssertDeepEqual(t, expectedStatus, destStatus)
// Check that we didn't return any destinations. // Check that we didn't return any destinations.
require.Nil(t, destinations) require.Nil(t, destinations)
// Check that destination service is still in cache because it's still referenced from the pbmesh.Destinations // Check that destination service is still in cache because it's still referenced from the pbmesh.Destinations
// resource. // resource.
_, foundDest := c.ReadDestination(destination1.ServiceRef, destination1.Port) cachedCompDestIDs := c.ComputedDestinationsByService(resource.IDFromReference(api1ServiceRef))
require.True(t, foundDest) compDest.Id.Uid = ""
prototest.AssertElementsMatch(t, []*pbresource.ID{compDest.Id}, cachedCompDestIDs)
// Update the computed routes not not lag.
api1ComputedRoutes = routestest.ReconcileComputedRoutes(suite.T(), suite.client, api1ComputedRoutesID,
resourcetest.MustDecode[*pbcatalog.Service](suite.T(), suite.api1Service),
)
require.NotNil(suite.T(), api1ComputedRoutes)
expectedStatus = &intermediate.Status{
ID: suite.webDestinations.Id,
Generation: suite.webDestinations.Generation,
Conditions: []*pbresource.Condition{
meshStatus.ConditionDestinationServiceFound(serviceRef),
meshStatus.ConditionMeshProtocolFound(serviceRef),
meshStatus.ConditionNonMeshProtocolDestinationPort(serviceRef, destination1.Port),
meshStatus.ConditionDestinationComputedRoutesFound(serviceRef),
meshStatus.ConditionDestinationComputedRoutesPortFound(serviceRef, destination1.Port),
},
}
actualDestinations, statuses, err := f.FetchExplicitDestinationsData(suite.ctx, destinationRefs)
require.NoError(t, err)
destStatus, exists = statuses[destinationRef]
require.True(t, exists, "status map does not contain service: %s", destinationRef)
prototest.AssertDeepEqual(t, expectedStatus, destStatus)
expectedDestinations := []*intermediate.Destination{
{
Explicit: suite.webDestinationsData.Destinations[0],
Service: resourcetest.MustDecode[*pbcatalog.Service](suite.T(), suite.api1Service),
ComputedPortRoutes: routestest.MutateTargets(suite.T(), api1ComputedRoutes.Data, "tcp", func(t *testing.T, details *pbmesh.BackendTargetDetails) {
switch {
case resource.ReferenceOrIDMatch(suite.api1Service.Id, details.BackendRef.Ref) && details.BackendRef.Port == "tcp":
se := resourcetest.MustDecode[*pbcatalog.ServiceEndpoints](suite.T(), suite.api1ServiceEndpoints)
details.ServiceEndpointsId = se.Resource.Id
details.ServiceEndpoints = se.Data
details.IdentityRefs = []*pbresource.Reference{{
Name: "api-1-identity",
Tenancy: suite.api1Service.Id.Tenancy,
}}
}
}),
},
}
prototest.AssertElementsMatch(t, expectedDestinations, actualDestinations)
}) })
testutil.RunStep(suite.T(), "happy path", func(t *testing.T) { testutil.RunStep(suite.T(), "happy path", func(t *testing.T) {
writeDestination(t, destination1)
writeDestination(t, destination2)
writeDestination(t, destination3)
suite.syncDestinations(destination1, destination2, destination3)
// Write a default ComputedRoutes for api1 and api2. // Write a default ComputedRoutes for api1 and api2.
var ( var (
api1ComputedRoutesID = resource.ReplaceType(pbmesh.ComputedRoutesType, suite.api1Service.Id) api1ComputedRoutesID = resource.ReplaceType(pbmesh.ComputedRoutesType, suite.api1Service.Id)
@ -818,10 +402,11 @@ func (suite *dataFetcherSuite) TestFetcher_FetchExplicitDestinationsData() {
) )
require.NotNil(suite.T(), api2ComputedRoutes) require.NotNil(suite.T(), api2ComputedRoutes)
destinationRefs := []intermediate.CombinedDestinationRef{destination1, destination2, destination3} resourcetest.ResourceID(suite.api1Service.Id)
expectedDestinations := []*intermediate.Destination{ expectedDestinations := []*intermediate.Destination{
{ {
Explicit: suite.webDestinationsData.Destinations[0], Explicit: suite.webComputedDestinationsData.Destinations[0],
Service: resourcetest.MustDecode[*pbcatalog.Service](suite.T(), suite.api1Service), Service: resourcetest.MustDecode[*pbcatalog.Service](suite.T(), suite.api1Service),
ComputedPortRoutes: routestest.MutateTargets(suite.T(), api1ComputedRoutes.Data, "tcp", func(t *testing.T, details *pbmesh.BackendTargetDetails) { ComputedPortRoutes: routestest.MutateTargets(suite.T(), api1ComputedRoutes.Data, "tcp", func(t *testing.T, details *pbmesh.BackendTargetDetails) {
switch { switch {
@ -837,7 +422,7 @@ func (suite *dataFetcherSuite) TestFetcher_FetchExplicitDestinationsData() {
}), }),
}, },
{ {
Explicit: suite.webDestinationsData.Destinations[1], Explicit: suite.webComputedDestinationsData.Destinations[1],
Service: resourcetest.MustDecode[*pbcatalog.Service](suite.T(), suite.api2Service), Service: resourcetest.MustDecode[*pbcatalog.Service](suite.T(), suite.api2Service),
ComputedPortRoutes: routestest.MutateTargets(suite.T(), api2ComputedRoutes.Data, "tcp1", func(t *testing.T, details *pbmesh.BackendTargetDetails) { ComputedPortRoutes: routestest.MutateTargets(suite.T(), api2ComputedRoutes.Data, "tcp1", func(t *testing.T, details *pbmesh.BackendTargetDetails) {
switch { switch {
@ -853,7 +438,7 @@ func (suite *dataFetcherSuite) TestFetcher_FetchExplicitDestinationsData() {
}), }),
}, },
{ {
Explicit: suite.webDestinationsData.Destinations[2], Explicit: suite.webComputedDestinationsData.Destinations[2],
Service: resourcetest.MustDecode[*pbcatalog.Service](suite.T(), suite.api2Service), Service: resourcetest.MustDecode[*pbcatalog.Service](suite.T(), suite.api2Service),
ComputedPortRoutes: routestest.MutateTargets(suite.T(), api2ComputedRoutes.Data, "tcp2", func(t *testing.T, details *pbmesh.BackendTargetDetails) { ComputedPortRoutes: routestest.MutateTargets(suite.T(), api2ComputedRoutes.Data, "tcp2", func(t *testing.T, details *pbmesh.BackendTargetDetails) {
switch { switch {
@ -869,25 +454,10 @@ func (suite *dataFetcherSuite) TestFetcher_FetchExplicitDestinationsData() {
}), }),
}, },
} }
var expectedConditions []*pbresource.Condition
for _, d := range destinationRefs {
ref := resource.ReferenceToString(d.ServiceRef)
expectedConditions = append(expectedConditions,
meshStatus.ConditionDestinationServiceFound(ref),
meshStatus.ConditionMeshProtocolFound(ref),
meshStatus.ConditionNonMeshProtocolDestinationPort(ref, d.Port),
meshStatus.ConditionDestinationComputedRoutesFound(ref),
meshStatus.ConditionDestinationComputedRoutesPortFound(ref, d.Port),
)
}
actualDestinations, statuses, err := f.FetchExplicitDestinationsData(suite.ctx, destinationRefs) actualDestinations, err := f.FetchExplicitDestinationsData(suite.ctx, suite.webProxy.Id)
require.NoError(t, err) require.NoError(t, err)
// Check that all statuses have "happy" conditions.
dref := resource.IDToString(destination1.ExplicitDestinationsID)
prototest.AssertElementsMatch(t, expectedConditions, statuses[dref].Conditions)
// Check that we've computed expanded destinations correctly. // Check that we've computed expanded destinations correctly.
prototest.AssertElementsMatch(t, expectedDestinations, actualDestinations) prototest.AssertElementsMatch(t, expectedDestinations, actualDestinations)
}) })
@ -947,7 +517,7 @@ func (suite *dataFetcherSuite) TestFetcher_FetchImplicitDestinationsData() {
existingDestinations := []*intermediate.Destination{ existingDestinations := []*intermediate.Destination{
{ {
Explicit: suite.webDestinationsData.Destinations[0], Explicit: suite.webComputedDestinationsData.Destinations[0],
Service: resourcetest.MustDecode[*pbcatalog.Service](suite.T(), suite.api1Service), Service: resourcetest.MustDecode[*pbcatalog.Service](suite.T(), suite.api1Service),
ComputedPortRoutes: routestest.MutateTargets(suite.T(), api1ComputedRoutes.Data, "tcp", func(t *testing.T, details *pbmesh.BackendTargetDetails) { ComputedPortRoutes: routestest.MutateTargets(suite.T(), api1ComputedRoutes.Data, "tcp", func(t *testing.T, details *pbmesh.BackendTargetDetails) {
switch { switch {
@ -963,7 +533,7 @@ func (suite *dataFetcherSuite) TestFetcher_FetchImplicitDestinationsData() {
}), }),
}, },
{ {
Explicit: suite.webDestinationsData.Destinations[1], Explicit: suite.webComputedDestinationsData.Destinations[1],
Service: resourcetest.MustDecode[*pbcatalog.Service](suite.T(), suite.api2Service), Service: resourcetest.MustDecode[*pbcatalog.Service](suite.T(), suite.api2Service),
ComputedPortRoutes: routestest.MutateTargets(suite.T(), api2ComputedRoutes.Data, "tcp1", func(t *testing.T, details *pbmesh.BackendTargetDetails) { ComputedPortRoutes: routestest.MutateTargets(suite.T(), api2ComputedRoutes.Data, "tcp1", func(t *testing.T, details *pbmesh.BackendTargetDetails) {
switch { switch {
@ -979,7 +549,7 @@ func (suite *dataFetcherSuite) TestFetcher_FetchImplicitDestinationsData() {
}), }),
}, },
{ {
Explicit: suite.webDestinationsData.Destinations[2], Explicit: suite.webComputedDestinationsData.Destinations[2],
Service: resourcetest.MustDecode[*pbcatalog.Service](suite.T(), suite.api2Service), Service: resourcetest.MustDecode[*pbcatalog.Service](suite.T(), suite.api2Service),
ComputedPortRoutes: routestest.MutateTargets(suite.T(), api2ComputedRoutes.Data, "tcp2", func(t *testing.T, details *pbmesh.BackendTargetDetails) { ComputedPortRoutes: routestest.MutateTargets(suite.T(), api2ComputedRoutes.Data, "tcp2", func(t *testing.T, details *pbmesh.BackendTargetDetails) {
switch { switch {
@ -1014,7 +584,7 @@ func (suite *dataFetcherSuite) TestFetcher_FetchImplicitDestinationsData() {
} }
f := Fetcher{ f := Fetcher{
Client: suite.client, client: suite.client,
} }
actualDestinations, err := f.FetchImplicitDestinationsData(context.Background(), suite.webProxy.Id, existingDestinations) actualDestinations, err := f.FetchImplicitDestinationsData(context.Background(), suite.webProxy.Id, existingDestinations)
@ -1023,61 +593,6 @@ func (suite *dataFetcherSuite) TestFetcher_FetchImplicitDestinationsData() {
prototest.AssertElementsMatch(suite.T(), existingDestinations, actualDestinations) prototest.AssertElementsMatch(suite.T(), existingDestinations, actualDestinations)
} }
func (suite *dataFetcherSuite) TestFetcher_FetchAndMergeProxyConfigurations() {
// Create some proxy configurations.
proxyCfg1Data := &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
},
}
proxyCfg2Data := &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
MutualTlsMode: pbmesh.MutualTLSMode_MUTUAL_TLS_MODE_DEFAULT,
},
}
proxyCfg1 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "config-1").
WithData(suite.T(), proxyCfg1Data).
Write(suite.T(), suite.client)
proxyCfg2 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "config-2").
WithData(suite.T(), proxyCfg2Data).
Write(suite.T(), suite.client)
proxyCfgCache := sidecarproxycache.NewProxyConfigurationCache()
proxyCfgCache.TrackProxyConfiguration(proxyCfg1.Id, []resource.ReferenceOrID{suite.webProxy.Id})
proxyCfgCache.TrackProxyConfiguration(proxyCfg2.Id, []resource.ReferenceOrID{suite.webProxy.Id})
expectedProxyCfg := &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
MutualTlsMode: pbmesh.MutualTLSMode_MUTUAL_TLS_MODE_DEFAULT,
TransparentProxy: &pbmesh.TransparentProxy{
OutboundListenerPort: 15001,
},
},
}
fetcher := Fetcher{Client: suite.client, ProxyCfgCache: proxyCfgCache}
actualProxyCfg, err := fetcher.FetchAndMergeProxyConfigurations(suite.ctx, suite.webProxy.Id)
require.NoError(suite.T(), err)
prototest.AssertDeepEqual(suite.T(), expectedProxyCfg, actualProxyCfg)
// Delete proxy cfg and check that the cache gets updated.
_, err = suite.client.Delete(suite.ctx, &pbresource.DeleteRequest{Id: proxyCfg1.Id})
require.NoError(suite.T(), err)
_, err = fetcher.FetchAndMergeProxyConfigurations(suite.ctx, suite.webProxy.Id)
require.NoError(suite.T(), err)
proxyCfg2.Id.Uid = ""
prototest.AssertElementsMatch(suite.T(),
[]*pbresource.ID{proxyCfg2.Id},
fetcher.ProxyCfgCache.ProxyConfigurationsByProxyID(suite.webProxy.Id))
}
func TestDataFetcher(t *testing.T) { func TestDataFetcher(t *testing.T) {
suite.Run(t, new(dataFetcherSuite)) suite.Run(t, new(dataFetcherSuite))
} }

View File

@ -1,119 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package status
import (
"fmt"
"github.com/hashicorp/consul/proto-public/pbresource"
)
const (
StatusConditionDestinationAccepted = "DestinationAccepted"
StatusReasonMeshProtocolNotFound = "MeshPortProtocolNotFound"
StatusReasonMeshProtocolFound = "MeshPortProtocolFound"
StatusReasonMeshProtocolDestinationPort = "DestinationWithMeshPortProtocol"
StatusReasonNonMeshProtocolDestinationPort = "DestinationWithNonMeshPortProtocol"
StatusReasonDestinationServiceNotFound = "ServiceNotFound"
StatusReasonDestinationServiceFound = "ServiceFound"
StatusReasonDestinationComputedRoutesNotFound = "ComputedRoutesNotFound"
StatusReasonDestinationComputedRoutesFound = "ComputedRoutesFound"
StatusReasonDestinationComputedRoutesPortNotFound = "ComputedRoutesPortNotFound"
StatusReasonDestinationComputedRoutesPortFound = "ComputedRoutesPortFound"
)
func ConditionMeshProtocolNotFound(serviceRef string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonMeshProtocolNotFound,
Message: fmt.Sprintf("service %q cannot be referenced as a Destination because it's not mesh-enabled.", serviceRef),
}
}
func ConditionMeshProtocolFound(serviceRef string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonMeshProtocolFound,
Message: fmt.Sprintf("service %q is on the mesh.", serviceRef),
}
}
func ConditionDestinationServiceNotFound(serviceRef string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonDestinationServiceNotFound,
Message: fmt.Sprintf("service %q does not exist.", serviceRef),
}
}
func ConditionDestinationServiceFound(serviceRef string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonDestinationServiceFound,
Message: fmt.Sprintf("service %q exists.", serviceRef),
}
}
func ConditionMeshProtocolDestinationPort(serviceRef, port string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonMeshProtocolDestinationPort,
Message: fmt.Sprintf("destination port %q for service %q has PROTOCOL_MESH which is unsupported for destination services", port, serviceRef),
}
}
func ConditionNonMeshProtocolDestinationPort(serviceRef, port string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonNonMeshProtocolDestinationPort,
Message: fmt.Sprintf("destination port %q for service %q has a non-mesh protocol", port, serviceRef),
}
}
func ConditionDestinationComputedRoutesNotFound(serviceRef string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonDestinationComputedRoutesNotFound,
Message: fmt.Sprintf("computed routes %q does not exist.", serviceRef),
}
}
func ConditionDestinationComputedRoutesFound(serviceRef string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonDestinationComputedRoutesNotFound,
Message: fmt.Sprintf("computed routes %q exists.", serviceRef),
}
}
func ConditionDestinationComputedRoutesPortNotFound(serviceRef, port string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonDestinationComputedRoutesPortNotFound,
Message: fmt.Sprintf("computed routes %q does not exist for port %q.", serviceRef, port),
}
}
func ConditionDestinationComputedRoutesPortFound(serviceRef, port string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonDestinationComputedRoutesPortNotFound,
Message: fmt.Sprintf("computed routes %q exists for port %q.", serviceRef, port),
}
}

View File

@ -1,43 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
func (m *Mapper) MapComputedRoutesToProxyStateTemplate(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
computedRoutes, err := resource.Decode[*pbmesh.ComputedRoutes](res)
if err != nil {
return nil, err
}
reqs, err := m.mapComputedRoutesToProxyStateTemplate(ctx, rt, res.Id)
if err != nil {
return nil, err
}
m.computedRoutesCache.TrackComputedRoutes(computedRoutes)
return reqs, nil
}
func (m *Mapper) mapComputedRoutesToProxyStateTemplate(ctx context.Context, rt controller.Runtime, computedRoutesID *pbresource.ID) ([]controller.Request, error) {
// Each Destination gets a single ComputedRoutes.
serviceID := resource.ReplaceType(pbcatalog.ServiceType, computedRoutesID)
serviceRef := resource.Reference(serviceID, "")
ids, err := m.mapServiceThroughDestinationsToProxyStateTemplates(ctx, rt, serviceRef)
if err != nil {
return nil, err
}
return controller.MakeRequests(pbmesh.ProxyStateTemplateType, ids), nil
}

View File

@ -1,44 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
"github.com/hashicorp/consul/internal/resource"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
func (m *Mapper) MapDestinationsToProxyStateTemplate(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
destinations, err := resource.Decode[*pbmesh.Destinations](res)
if err != nil {
return nil, err
}
// Look up workloads for this destinations.
sourceProxyIDs := make(map[resource.ReferenceKey]struct{})
requests, err := mapSelectorToProxyStateTemplates(ctx, rt.Client, destinations.Data.Workloads, res.Id.Tenancy, func(id *pbresource.ID) {
sourceProxyIDs[resource.NewReferenceKey(id)] = struct{}{}
})
if err != nil {
return nil, err
}
// Add this destination to destinationsCache.
for _, destination := range destinations.Data.Destinations {
destinationRef := intermediate.CombinedDestinationRef{
ServiceRef: destination.DestinationRef,
Port: destination.DestinationPort,
ExplicitDestinationsID: res.Id,
SourceProxies: sourceProxyIDs,
}
m.destinationsCache.WriteDestination(destinationRef)
}
return requests, nil
}

View File

@ -1,120 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"testing"
"github.com/stretchr/testify/require"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestMapDestinationsToProxyStateTemplate(t *testing.T) {
client := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
webWorkload1 := resourcetest.Resource(pbcatalog.WorkloadType, "web-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}},
}).
Write(t, client)
webWorkload2 := resourcetest.Resource(pbcatalog.WorkloadType, "web-def").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.2"}},
Ports: map[string]*pbcatalog.WorkloadPort{"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}},
}).
Write(t, client)
webWorkload3 := resourcetest.Resource(pbcatalog.WorkloadType, "non-prefix-web").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.3"}},
Ports: map[string]*pbcatalog.WorkloadPort{"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}},
}).
Write(t, client)
var (
api1ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "api-1").
WithTenancy(resource.DefaultNamespacedTenancy()).
ReferenceNoSection()
api2ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "api-2").
WithTenancy(resource.DefaultNamespacedTenancy()).
ReferenceNoSection()
)
webDestinationsData := &pbmesh.Destinations{
Workloads: &pbcatalog.WorkloadSelector{
Names: []string{"non-prefix-web"},
Prefixes: []string{"web"},
},
Destinations: []*pbmesh.Destination{
{
DestinationRef: api1ServiceRef,
DestinationPort: "tcp",
},
{
DestinationRef: api2ServiceRef,
DestinationPort: "tcp1",
},
{
DestinationRef: api2ServiceRef,
DestinationPort: "tcp2",
},
},
}
webDestinations := resourcetest.Resource(pbmesh.DestinationsType, "web-destinations").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, webDestinationsData).
Write(t, client)
c := sidecarproxycache.NewDestinationsCache()
mapper := &Mapper{destinationsCache: c}
expRequests := []controller.Request{
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, webWorkload1.Id)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, webWorkload2.Id)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, webWorkload3.Id)},
}
requests, err := mapper.MapDestinationsToProxyStateTemplate(context.Background(), controller.Runtime{Client: client}, webDestinations)
require.NoError(t, err)
prototest.AssertElementsMatch(t, expRequests, requests)
var (
proxy1ID = resourcetest.Resource(pbmesh.ProxyStateTemplateType, webWorkload1.Id.Name).
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxy2ID = resourcetest.Resource(pbmesh.ProxyStateTemplateType, webWorkload2.Id.Name).
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxy3ID = resourcetest.Resource(pbmesh.ProxyStateTemplateType, webWorkload3.Id.Name).
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
)
for _, u := range webDestinationsData.Destinations {
expDestination := intermediate.CombinedDestinationRef{
ServiceRef: u.DestinationRef,
Port: u.DestinationPort,
ExplicitDestinationsID: webDestinations.Id,
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(proxy1ID): {},
resource.NewReferenceKey(proxy2ID): {},
resource.NewReferenceKey(proxy3ID): {},
},
}
actualDestination, found := c.ReadDestination(u.DestinationRef, u.DestinationPort)
require.True(t, found)
prototest.AssertDeepEqual(t, expDestination, actualDestination)
}
}

View File

@ -1,82 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"fmt"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/resource"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
type Mapper struct {
destinationsCache *sidecarproxycache.DestinationsCache
proxyCfgCache *sidecarproxycache.ProxyConfigurationCache
computedRoutesCache *sidecarproxycache.ComputedRoutesCache
identitiesCache *sidecarproxycache.IdentitiesCache
}
func New(
destinationsCache *sidecarproxycache.DestinationsCache,
proxyCfgCache *sidecarproxycache.ProxyConfigurationCache,
computedRoutesCache *sidecarproxycache.ComputedRoutesCache,
identitiesCache *sidecarproxycache.IdentitiesCache,
) *Mapper {
return &Mapper{
destinationsCache: destinationsCache,
proxyCfgCache: proxyCfgCache,
computedRoutesCache: computedRoutesCache,
identitiesCache: identitiesCache,
}
}
// mapSelectorToProxyStateTemplates returns ProxyStateTemplate requests given a workload
// selector and tenancy. The cacheFunc can be called if the resulting ids need to be cached.
func mapSelectorToProxyStateTemplates(ctx context.Context,
client pbresource.ResourceServiceClient,
selector *pbcatalog.WorkloadSelector,
tenancy *pbresource.Tenancy,
cacheFunc func(id *pbresource.ID)) ([]controller.Request, error) {
var result []controller.Request
for _, prefix := range selector.Prefixes {
resp, err := client.List(ctx, &pbresource.ListRequest{
Type: pbcatalog.WorkloadType,
Tenancy: tenancy,
NamePrefix: prefix,
})
if err != nil {
return nil, err
}
if len(resp.Resources) == 0 {
return nil, fmt.Errorf("no workloads found")
}
for _, r := range resp.Resources {
id := resource.ReplaceType(pbmesh.ProxyStateTemplateType, r.Id)
result = append(result, controller.Request{
ID: id,
})
cacheFunc(id)
}
}
for _, name := range selector.Names {
id := &pbresource.ID{
Name: name,
Tenancy: tenancy,
Type: pbmesh.ProxyStateTemplateType,
}
result = append(result, controller.Request{
ID: id,
})
cacheFunc(id)
}
return result, nil
}

View File

@ -1,80 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"testing"
"github.com/stretchr/testify/require"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestMapWorkloadsBySelector(t *testing.T) {
client := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
// Create some workloads.
// For this test, we don't care about the workload data, so we will re-use
// the same data for all workloads.
workloadData := &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "127.0.0.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{"p1": {Port: 8080}},
}
w1 := resourcetest.Resource(pbcatalog.WorkloadType, "w1").
WithData(t, workloadData).
Write(t, client).Id
w2 := resourcetest.Resource(pbcatalog.WorkloadType, "w2").
WithData(t, workloadData).
Write(t, client).Id
w3 := resourcetest.Resource(pbcatalog.WorkloadType, "prefix-w3").
WithData(t, workloadData).
Write(t, client).Id
w4 := resourcetest.Resource(pbcatalog.WorkloadType, "prefix-w4").
WithData(t, workloadData).
Write(t, client).Id
// This workload should not be used as it's not selected by the workload selector.
resourcetest.Resource(pbcatalog.WorkloadType, "not-selected-workload").
WithData(t, workloadData).
Write(t, client)
selector := &pbcatalog.WorkloadSelector{
Names: []string{"w1", "w2"},
Prefixes: []string{"prefix"},
}
expReqs := []controller.Request{
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w1)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w2)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w3)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w4)},
}
var cachedReqs []controller.Request
reqs, err := mapSelectorToProxyStateTemplates(context.Background(), client, selector, defaultTenancy(), func(id *pbresource.ID) {
// save IDs to check that the cache func is called
cachedReqs = append(cachedReqs, controller.Request{ID: id})
})
require.NoError(t, err)
require.Len(t, reqs, len(expReqs))
prototest.AssertElementsMatch(t, expReqs, reqs)
prototest.AssertElementsMatch(t, expReqs, cachedReqs)
}
func defaultTenancy() *pbresource.Tenancy {
return &pbresource.Tenancy{
Namespace: "default",
Partition: "default",
PeerName: "local",
}
}

View File

@ -1,34 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
func (m *Mapper) MapProxyConfigurationToProxyStateTemplate(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
var proxyConfig pbmesh.ProxyConfiguration
err := res.Data.UnmarshalTo(&proxyConfig)
if err != nil {
return nil, err
}
var proxyIDs []resource.ReferenceOrID
requests, err := mapSelectorToProxyStateTemplates(ctx, rt.Client, proxyConfig.Workloads, res.Id.Tenancy, func(id *pbresource.ID) {
proxyIDs = append(proxyIDs, id)
})
if err != nil {
return nil, err
}
m.proxyCfgCache.TrackProxyConfiguration(res.Id, proxyIDs)
return requests, nil
}

View File

@ -1,78 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"testing"
"github.com/stretchr/testify/require"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestProxyConfigurationMapper(t *testing.T) {
client := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
// Create some workloads.
// For this test, we don't care about the workload data, so we will re-use
// the same data for all workloads.
workloadData := &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "127.0.0.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{"p1": {Port: 8080}},
}
w1 := resourcetest.Resource(pbcatalog.WorkloadType, "w1").
WithData(t, workloadData).
Write(t, client).Id
w2 := resourcetest.Resource(pbcatalog.WorkloadType, "w2").
WithData(t, workloadData).
Write(t, client).Id
// Create proxy configuration.
proxyCfgData := &pbmesh.ProxyConfiguration{
Workloads: &pbcatalog.WorkloadSelector{
Names: []string{"w1", "w2"},
},
}
pCfg := resourcetest.Resource(pbmesh.ProxyConfigurationType, "proxy-config").
WithData(t, proxyCfgData).
Write(t, client)
m := Mapper{proxyCfgCache: sidecarproxycache.NewProxyConfigurationCache()}
reqs, err := m.MapProxyConfigurationToProxyStateTemplate(context.Background(), controller.Runtime{
Client: client,
}, pCfg)
require.NoError(t, err)
p1 := resource.ReplaceType(pbmesh.ProxyStateTemplateType, w1)
p2 := resource.ReplaceType(pbmesh.ProxyStateTemplateType, w2)
expReqs := []controller.Request{
{ID: p1},
{ID: p2},
}
prototest.AssertElementsMatch(t, expReqs, reqs)
// Check that the cache is populated.
// Clean out UID as we don't care about it in the cache.
pCfg.Id.Uid = ""
prototest.AssertElementsMatch(t,
[]*pbresource.ID{pCfg.Id},
m.proxyCfgCache.ProxyConfigurationsByProxyID(p1))
prototest.AssertElementsMatch(t,
[]*pbresource.ID{pCfg.Id},
m.proxyCfgCache.ProxyConfigurationsByProxyID(p2))
}

View File

@ -1,71 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// MapServiceEndpointsToProxyStateTemplate maps catalog.ServiceEndpoints objects to the IDs of
// ProxyStateTemplate.
func (m *Mapper) MapServiceEndpointsToProxyStateTemplate(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
// This mapper has two jobs:
//
// 1. It needs to look up workload IDs from service endpoints and replace
// them with ProxyStateTemplate type. We do this so we don't need to watch
// Workloads to discover them, since ProxyStateTemplates are name-aligned
// with Workloads.
//
// 2. It needs to find any PST that needs to DISCOVER endpoints for this
// service as a part of mesh configuration and traffic routing.
serviceEndpoints, err := resource.Decode[*pbcatalog.ServiceEndpoints](res)
if err != nil {
return nil, err
}
var result []controller.Request
// (1) First, we need to generate requests from workloads this "endpoints"
// points to so that we can re-generate proxy state for the sidecar proxy.
for _, endpoint := range serviceEndpoints.Data.Endpoints {
// Convert the reference to a workload to a ProxyStateTemplate ID.
// Because these resources are name and tenancy aligned, we only need to change the type.
// Skip service endpoints without target refs. These resources would typically be created for
// services external to Consul, and we don't need to reconcile those as they don't have
// associated workloads.
if endpoint.TargetRef != nil {
result = append(result, controller.Request{
ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, endpoint.TargetRef),
})
}
}
// (2) Now walk the mesh configuration information backwards.
// ServiceEndpoints -> Service
targetServiceRef := resource.ReplaceType(pbcatalog.ServiceType, res.Id)
// Find all ComputedRoutes that reference this service.
routeIDs := m.computedRoutesCache.ComputedRoutesByService(targetServiceRef)
for _, routeID := range routeIDs {
// Find all Upstreams that reference a Service aligned with this ComputedRoutes.
// Afterwards, find all Workloads selected by the Upstreams, and align a PST with those.
reqs, err := m.mapComputedRoutesToProxyStateTemplate(ctx, rt, routeID)
if err != nil {
return nil, err
}
result = append(result, reqs...)
}
return result, nil
}

View File

@ -1,95 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/storage"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
func (m *Mapper) MapServiceToProxyStateTemplate(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
serviceRef := resource.Reference(res.Id, "")
ids, err := m.mapServiceThroughDestinationsToProxyStateTemplates(ctx, rt, serviceRef)
if err != nil {
return nil, err
}
return controller.MakeRequests(pbmesh.ProxyStateTemplateType, ids), nil
}
// mapServiceThroughDestinationsToProxyStateTemplates takes an explicit
// Service and traverses back through Destinations to Workloads to
// ProxyStateTemplates.
//
// This is in a separate function so it can be chained for more complicated
// relationships.
func (m *Mapper) mapServiceThroughDestinationsToProxyStateTemplates(
ctx context.Context,
rt controller.Runtime,
serviceRef *pbresource.Reference,
) ([]*pbresource.ID, error) {
// The relationship is:
//
// - PST (replace type) Workload
// - Workload (selected by) Upstreams
// - Upstream (contains) Service
//
// When we wake up for Service we should:
//
// - look for Service in all Destinations(upstreams)
// - follow selectors backwards to Workloads
// - rewrite types to PST
var pstIDs []*pbresource.ID
destinations := m.destinationsCache.ReadDestinationsByServiceAllPorts(serviceRef)
for _, destination := range destinations {
for refKey := range destination.SourceProxies {
pstIDs = append(pstIDs, refKey.ToID())
}
}
// TODO(v2): remove this after we can do proper performant implicit upstream determination
//
// TODO(rb): shouldn't this instead list all Workloads that have a mesh port?
allIDs, err := m.listAllProxyStateTemplatesTemporarily(ctx, rt, serviceRef.Tenancy)
if err != nil {
return nil, err
}
pstIDs = append(pstIDs, allIDs...)
return pstIDs, nil
}
func (m *Mapper) listAllProxyStateTemplatesTemporarily(ctx context.Context, rt controller.Runtime, tenancy *pbresource.Tenancy) ([]*pbresource.ID, error) {
// todo (ishustava): this is a stub for now until we implement implicit destinations.
// For tproxy, we generate requests for all proxy states in the cluster.
// This will generate duplicate events for proxies already added above,
// however, we expect that the controller runtime will de-dup for us.
rsp, err := rt.Client.List(ctx, &pbresource.ListRequest{
Type: pbmesh.ProxyStateTemplateType,
Tenancy: &pbresource.Tenancy{
Namespace: storage.Wildcard,
Partition: tenancy.Partition,
PeerName: tenancy.PeerName,
},
})
if err != nil {
return nil, err
}
result := make([]*pbresource.ID, 0, len(rsp.Resources))
for _, r := range rsp.Resources {
result = append(result, r.Id)
}
return result, nil
}

View File

@ -1,34 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
func (m *Mapper) MapComputedTrafficPermissionsToProxyStateTemplate(_ context.Context, _ controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
var ctp pbauth.ComputedTrafficPermissions
err := res.Data.UnmarshalTo(&ctp)
if err != nil {
return nil, err
}
pid := resource.ReplaceType(pbauth.WorkloadIdentityType, res.Id)
ids := m.identitiesCache.ProxyIDsByWorkloadIdentity(pid)
requests := make([]controller.Request, 0, len(ids))
for _, id := range ids {
requests = append(requests, controller.Request{
ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, id)},
)
}
return requests, nil
}

View File

@ -1,63 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"testing"
"github.com/stretchr/testify/require"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestMapComputedTrafficPermissionsToProxyStateTemplate(t *testing.T) {
client := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
ctp := resourcetest.Resource(pbauth.ComputedTrafficPermissionsType, "workload-identity-1").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbauth.ComputedTrafficPermissions{}).
Build()
i := sidecarproxycache.NewIdentitiesCache()
mapper := &Mapper{identitiesCache: i}
// Empty results when the cache isn't populated.
requests, err := mapper.MapComputedTrafficPermissionsToProxyStateTemplate(context.Background(), controller.Runtime{Client: client}, ctp)
require.NoError(t, err)
require.Len(t, requests, 0)
identityID1 := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-1").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxyID1 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-1").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxyID2 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
i.TrackPair(identityID1, proxyID1)
// Empty results when the cache isn't populated.
requests, err = mapper.MapComputedTrafficPermissionsToProxyStateTemplate(context.Background(), controller.Runtime{Client: client}, ctp)
require.NoError(t, err)
prototest.AssertElementsMatch(t, []controller.Request{{ID: proxyID1}}, requests)
i.TrackPair(identityID1, proxyID2)
// Empty results when the cache isn't populated.
requests, err = mapper.MapComputedTrafficPermissionsToProxyStateTemplate(context.Background(), controller.Runtime{Client: client}, ctp)
require.NoError(t, err)
prototest.AssertElementsMatch(t, []controller.Request{
{ID: proxyID1},
{ID: proxyID2},
}, requests)
}

View File

@ -1,404 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"testing"
"github.com/stretchr/testify/require"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/routestest"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestUnified_AllMappingsToProxyStateTemplate(t *testing.T) {
var (
destCache = sidecarproxycache.NewDestinationsCache()
// proxyCfgCache = sidecarproxycache.NewProxyConfigurationCache()
routesCache = sidecarproxycache.NewComputedRoutesCache()
mapper = New(destCache, nil, routesCache, nil)
client = svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
)
anyServiceData := &pbcatalog.Service{
Ports: []*pbcatalog.ServicePort{
{
TargetPort: "tcp1",
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
},
{
TargetPort: "tcp2",
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
},
{
TargetPort: "mesh",
Protocol: pbcatalog.Protocol_PROTOCOL_MESH,
},
},
}
anyWorkloadPorts := map[string]*pbcatalog.WorkloadPort{
"tcp1": {Port: 8080},
"tcp2": {Port: 8081},
"mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
}
// The thing we link through Destinations.
destService := resourcetest.Resource(pbcatalog.ServiceType, "web").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, anyServiceData).
Build()
destServiceRef := resource.Reference(destService.Id, "")
// The thing we reach through the mesh config.
targetService := resourcetest.Resource(pbcatalog.ServiceType, "db").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, anyServiceData).
Build()
targetServiceRef := resource.Reference(targetService.Id, "")
backupTargetService := resourcetest.Resource(pbcatalog.ServiceType, "db-backup").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, anyServiceData).
Build()
backupTargetServiceRef := resource.Reference(backupTargetService.Id, "")
// The way we make 'web' actually route traffic to 'db'.
tcpRoute := resourcetest.Resource(pbmesh.TCPRouteType, "tcp-route").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbmesh.TCPRoute{
ParentRefs: []*pbmesh.ParentReference{{
Ref: destServiceRef,
}},
Rules: []*pbmesh.TCPRouteRule{{
BackendRefs: []*pbmesh.TCPBackendRef{{
BackendRef: &pbmesh.BackendReference{
Ref: targetServiceRef,
},
}},
}},
}).
Build()
failoverPolicy := resourcetest.ResourceID(resource.ReplaceType(pbcatalog.FailoverPolicyType, targetService.Id)).
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.FailoverPolicy{
Config: &pbcatalog.FailoverConfig{
Destinations: []*pbcatalog.FailoverDestination{{
Ref: backupTargetServiceRef,
}},
},
}).
Build()
webRoutes := routestest.BuildComputedRoutes(t, resource.ReplaceType(pbmesh.ComputedRoutesType, destService.Id),
resourcetest.MustDecode[*pbcatalog.Service](t, destService),
resourcetest.MustDecode[*pbcatalog.Service](t, targetService),
resourcetest.MustDecode[*pbcatalog.Service](t, backupTargetService),
resourcetest.MustDecode[*pbmesh.TCPRoute](t, tcpRoute),
resourcetest.MustDecode[*pbcatalog.FailoverPolicy](t, failoverPolicy),
)
var (
destWorkload1 = newID(pbcatalog.WorkloadType, "dest-workload-1")
destWorkload2 = newID(pbcatalog.WorkloadType, "dest-workload-2")
destProxy1 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, destWorkload1)
destProxy2 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, destWorkload2)
)
// Endpoints for original destination
destEndpoints := resourcetest.ResourceID(resource.ReplaceType(pbcatalog.ServiceEndpointsType, destService.Id)).
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
{
TargetRef: destWorkload1,
Ports: anyWorkloadPorts,
},
{
TargetRef: destWorkload2,
Ports: anyWorkloadPorts,
},
},
}).
Build()
var (
targetWorkload1 = newID(pbcatalog.WorkloadType, "target-workload-1")
targetWorkload2 = newID(pbcatalog.WorkloadType, "target-workload-2")
targetWorkload3 = newID(pbcatalog.WorkloadType, "target-workload-3")
backupTargetWorkload1 = newID(pbcatalog.WorkloadType, "backup-target-workload-1")
backupTargetWorkload2 = newID(pbcatalog.WorkloadType, "backup-target-workload-2")
backupTargetWorkload3 = newID(pbcatalog.WorkloadType, "backup-target-workload-3")
targetProxy1 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, targetWorkload1)
targetProxy2 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, targetWorkload2)
targetProxy3 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, targetWorkload3)
backupTargetProxy1 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, backupTargetWorkload1)
backupTargetProxy2 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, backupTargetWorkload2)
backupTargetProxy3 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, backupTargetWorkload3)
)
// Endpoints for actual destination
targetEndpoints := resourcetest.ResourceID(resource.ReplaceType(pbcatalog.ServiceEndpointsType, targetService.Id)).
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
{
TargetRef: targetWorkload1,
Ports: anyWorkloadPorts,
},
{
TargetRef: targetWorkload2,
Ports: anyWorkloadPorts,
},
{
TargetRef: targetWorkload3,
Ports: anyWorkloadPorts,
},
},
}).
Build()
backupTargetEndpoints := resourcetest.ResourceID(resource.ReplaceType(pbcatalog.ServiceEndpointsType, backupTargetService.Id)).
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
{
TargetRef: backupTargetWorkload1,
Ports: anyWorkloadPorts,
},
{
TargetRef: backupTargetWorkload2,
Ports: anyWorkloadPorts,
},
{
TargetRef: backupTargetWorkload3,
Ports: anyWorkloadPorts,
},
},
}).
Build()
var (
sourceProxy1 = newID(pbmesh.ProxyStateTemplateType, "src-workload-1")
sourceProxy2 = newID(pbmesh.ProxyStateTemplateType, "src-workload-2")
sourceProxy3 = newID(pbmesh.ProxyStateTemplateType, "src-workload-3")
sourceProxy4 = newID(pbmesh.ProxyStateTemplateType, "src-workload-4")
sourceProxy5 = newID(pbmesh.ProxyStateTemplateType, "src-workload-5")
sourceProxy6 = newID(pbmesh.ProxyStateTemplateType, "src-workload-6")
)
destination1 := intermediate.CombinedDestinationRef{
ServiceRef: destServiceRef,
Port: "tcp1",
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(sourceProxy1): {},
resource.NewReferenceKey(sourceProxy2): {},
},
}
destination2 := intermediate.CombinedDestinationRef{
ServiceRef: destServiceRef,
Port: "tcp2",
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(sourceProxy3): {},
resource.NewReferenceKey(sourceProxy4): {},
},
}
destination3 := intermediate.CombinedDestinationRef{
ServiceRef: destServiceRef,
Port: "mesh",
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(sourceProxy5): {},
resource.NewReferenceKey(sourceProxy6): {},
},
}
routesCache.TrackComputedRoutes(webRoutes)
destCache.WriteDestination(destination1)
destCache.WriteDestination(destination2)
destCache.WriteDestination(destination3)
t.Run("ServiceEndpoints", func(t *testing.T) {
t.Run("map dest endpoints", func(t *testing.T) {
requests, err := mapper.MapServiceEndpointsToProxyStateTemplate(
context.Background(),
controller.Runtime{Client: client},
destEndpoints,
)
require.NoError(t, err)
expRequests := []controller.Request{
// Just wakeup proxies for these workloads.
{ID: destProxy1},
{ID: destProxy2},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
t.Run("map target endpoints (TCPRoute)", func(t *testing.T) {
requests, err := mapper.MapServiceEndpointsToProxyStateTemplate(
context.Background(),
controller.Runtime{Client: client},
targetEndpoints,
)
require.NoError(t, err)
requests = testDeduplicateRequests(requests)
expRequests := []controller.Request{
// Wakeup proxies for these workloads.
{ID: targetProxy1},
{ID: targetProxy2},
{ID: targetProxy3},
// Also wakeup things that have destService as a destination b/c of the TCPRoute reference.
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
t.Run("map backup target endpoints (FailoverPolicy)", func(t *testing.T) {
requests, err := mapper.MapServiceEndpointsToProxyStateTemplate(
context.Background(),
controller.Runtime{Client: client},
backupTargetEndpoints,
)
require.NoError(t, err)
requests = testDeduplicateRequests(requests)
expRequests := []controller.Request{
// Wakeup proxies for these workloads.
{ID: backupTargetProxy1},
{ID: backupTargetProxy2},
{ID: backupTargetProxy3},
// Also wakeup things that have destService as a destination b/c of the FailoverPolicy reference.
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
})
t.Run("Service", func(t *testing.T) {
t.Run("map dest service", func(t *testing.T) {
requests, err := mapper.MapServiceToProxyStateTemplate(
context.Background(),
controller.Runtime{Client: client},
destService,
)
require.NoError(t, err)
// Only wake up things with dest as an upstream.
expRequests := []controller.Request{
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
t.Run("map target endpoints (TCPRoute)", func(t *testing.T) {
requests, err := mapper.MapServiceToProxyStateTemplate(
context.Background(),
controller.Runtime{Client: client},
targetService,
)
require.NoError(t, err)
requests = testDeduplicateRequests(requests)
// No upstream referrs to target directly.
expRequests := []controller.Request{}
prototest.AssertElementsMatch(t, expRequests, requests)
})
})
t.Run("ComputedRoutes", func(t *testing.T) {
t.Run("map web routes", func(t *testing.T) {
requests, err := mapper.MapComputedRoutesToProxyStateTemplate(
context.Background(),
controller.Runtime{Client: client},
webRoutes.Resource,
)
require.NoError(t, err)
// Only wake up things with dest as an upstream.
expRequests := []controller.Request{
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
})
}
func newRef(typ *pbresource.Type, name string) *pbresource.Reference {
return resource.Reference(newID(typ, name), "")
}
func newID(typ *pbresource.Type, name string) *pbresource.ID {
return &pbresource.ID{
Type: typ,
Tenancy: resource.DefaultNamespacedTenancy(),
Name: name,
}
}
func testDeduplicateRequests(reqs []controller.Request) []controller.Request {
type resID struct {
resource.ReferenceKey
UID string
}
out := make([]controller.Request, 0, len(reqs))
seen := make(map[resID]struct{})
for _, req := range reqs {
rid := resID{
ReferenceKey: resource.NewReferenceKey(req.ID),
UID: req.ID.Uid,
}
if _, ok := seen[rid]; !ok {
out = append(out, req)
seen[rid] = struct{}{}
}
}
return out
}

View File

@ -22,6 +22,8 @@ type (
DecodedServiceEndpoints = resource.DecodedResource[*pbcatalog.ServiceEndpoints] DecodedServiceEndpoints = resource.DecodedResource[*pbcatalog.ServiceEndpoints]
DecodedWorkload = resource.DecodedResource[*pbcatalog.Workload] DecodedWorkload = resource.DecodedResource[*pbcatalog.Workload]
DecodedProxyConfiguration = resource.DecodedResource[*pbmesh.ProxyConfiguration] DecodedProxyConfiguration = resource.DecodedResource[*pbmesh.ProxyConfiguration]
DecodedComputedProxyConfiguration = resource.DecodedResource[*pbmesh.ComputedProxyConfiguration]
DecodedDestinations = resource.DecodedResource[*pbmesh.Destinations] DecodedDestinations = resource.DecodedResource[*pbmesh.Destinations]
DecodedComputedDestinations = resource.DecodedResource[*pbmesh.ComputedExplicitDestinations]
DecodedProxyStateTemplate = resource.DecodedResource[*pbmesh.ProxyStateTemplate] DecodedProxyStateTemplate = resource.DecodedResource[*pbmesh.ProxyStateTemplate]
) )

View File

@ -5,28 +5,10 @@ package intermediate
import ( import (
"github.com/hashicorp/consul/internal/mesh/internal/types" "github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource" "github.com/hashicorp/consul/proto-public/pbresource"
) )
// CombinedDestinationRef contains all references we need for a specific
// destination on the mesh.
type CombinedDestinationRef struct {
// ServiceRef is the reference to the destination service.
ServiceRef *pbresource.Reference
// Port is the port name for this destination.
Port string
// SourceProxies are the reference keys of source proxy state template resources.
SourceProxies map[resource.ReferenceKey]struct{}
// ExplicitDestinationsID is the id of the pbmesh.Destinations resource. For implicit destinations,
// this should be nil.
ExplicitDestinationsID *pbresource.ID
}
type Destination struct { type Destination struct {
Explicit *pbmesh.Destination Explicit *pbmesh.Destination
Service *types.DecodedService // for the name of this destination Service *types.DecodedService // for the name of this destination

View File

@ -6,6 +6,8 @@ package types
import ( import (
"github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/internal/resource"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/sdk/iptables"
) )
func RegisterProxyConfiguration(r resource.Registry) { func RegisterProxyConfiguration(r resource.Registry) {
@ -15,5 +17,35 @@ func RegisterProxyConfiguration(r resource.Registry) {
Scope: resource.ScopeNamespace, Scope: resource.ScopeNamespace,
// TODO(rb): add validation for proxy configuration // TODO(rb): add validation for proxy configuration
Validate: nil, Validate: nil,
Mutate: MutateProxyConfiguration,
}) })
} }
func MutateProxyConfiguration(res *pbresource.Resource) error {
var proxyCfg pbmesh.ProxyConfiguration
err := res.Data.UnmarshalTo(&proxyCfg)
if err != nil {
return resource.NewErrDataParse(&proxyCfg, err)
}
changed := false
// Default the tproxy outbound port.
if proxyCfg.IsTransparentProxy() {
if proxyCfg.GetDynamicConfig().GetTransparentProxy() == nil {
proxyCfg.DynamicConfig.TransparentProxy = &pbmesh.TransparentProxy{
OutboundListenerPort: iptables.DefaultTProxyOutboundPort,
}
changed = true
} else if proxyCfg.GetDynamicConfig().GetTransparentProxy().OutboundListenerPort == 0 {
proxyCfg.DynamicConfig.TransparentProxy.OutboundListenerPort = iptables.DefaultTProxyOutboundPort
changed = true
}
}
if !changed {
return nil
}
return res.Data.MarshalFrom(&proxyCfg)
}

View File

@ -0,0 +1,84 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package types
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto/private/prototest"
"github.com/hashicorp/consul/sdk/iptables"
)
func TestMutateProxyConfiguration(t *testing.T) {
cases := map[string]struct {
data *pbmesh.ProxyConfiguration
expData *pbmesh.ProxyConfiguration
}{
"tproxy disabled": {
data: &pbmesh.ProxyConfiguration{},
expData: &pbmesh.ProxyConfiguration{},
},
"tproxy disabled explicitly": {
data: &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_DIRECT,
},
},
expData: &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_DIRECT,
},
},
},
"tproxy enabled and tproxy config is nil": {
data: &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
},
},
expData: &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{
OutboundListenerPort: iptables.DefaultTProxyOutboundPort,
},
},
},
},
"tproxy enabled and tproxy config is empty": {
data: &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{},
},
},
expData: &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{
OutboundListenerPort: iptables.DefaultTProxyOutboundPort,
},
},
},
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
res := resourcetest.Resource(pbmesh.ProxyConfigurationType, "test").
WithData(t, c.data).
Build()
err := MutateProxyConfiguration(res)
require.NoError(t, err)
got := resourcetest.MustDecode[*pbmesh.ProxyConfiguration](t, res)
prototest.AssertDeepEqual(t, c.expData, got.GetData())
})
}
}

View File

@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestIsMeshEnabled(t *testing.T) { func TestServiceIsMeshEnabled(t *testing.T) {
cases := map[string]struct { cases := map[string]struct {
service *Service service *Service
exp bool exp bool

View File

@ -0,0 +1,26 @@
package catalogv2beta1
import (
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
// GetIdentities returns a list of unique identities that this service endpoints points to.
func (s *ServiceEndpoints) GetIdentities() []string {
uniqueIdentities := make(map[string]struct{})
for _, ep := range s.GetEndpoints() {
if ep.GetIdentity() != "" {
uniqueIdentities[ep.GetIdentity()] = struct{}{}
}
}
if len(uniqueIdentities) == 0 {
return nil
}
identities := maps.Keys(uniqueIdentities)
slices.Sort(identities)
return identities
}

View File

@ -0,0 +1,49 @@
package catalogv2beta1
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestServiceEndpoints_GetIdentities(t *testing.T) {
cases := map[string]struct {
endpoints []*Endpoint
expIdentities []string
}{
"no endpoints": {
endpoints: nil,
expIdentities: nil,
},
"no identities": {
endpoints: []*Endpoint{
{},
{},
},
expIdentities: nil,
},
"single identity": {
endpoints: []*Endpoint{
{Identity: "foo"},
{Identity: "foo"},
{Identity: "foo"},
},
expIdentities: []string{"foo"},
},
"multiple identities": {
endpoints: []*Endpoint{
{Identity: "foo"},
{Identity: "foo"},
{Identity: "bar"},
},
expIdentities: []string{"bar", "foo"},
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
se := &ServiceEndpoints{Endpoints: c.endpoints}
require.Equal(t, c.expIdentities, se.GetIdentities())
})
}
}

View File

@ -51,7 +51,7 @@ func TestGetMeshPort(t *testing.T) {
} }
} }
func TestIsMeshEnabled(t *testing.T) { func TestWorkloadIsMeshEnabled(t *testing.T) {
cases := map[string]struct { cases := map[string]struct {
ports map[string]*WorkloadPort ports map[string]*WorkloadPort
exp bool exp bool

View File

@ -1,5 +1,10 @@
package meshv2beta1 package meshv2beta1
func (p *ComputedProxyConfiguration) IsTransparentProxy() bool {
return p.GetDynamicConfig() != nil &&
p.DynamicConfig.Mode == ProxyMode_PROXY_MODE_TRANSPARENT
}
func (p *ProxyConfiguration) IsTransparentProxy() bool { func (p *ProxyConfiguration) IsTransparentProxy() bool {
return p.GetDynamicConfig() != nil && return p.GetDynamicConfig() != nil &&
p.DynamicConfig.Mode == ProxyMode_PROXY_MODE_TRANSPARENT p.DynamicConfig.Mode == ProxyMode_PROXY_MODE_TRANSPARENT

View File

@ -6,44 +6,45 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestIsTransprentProxy(t *testing.T) { func TestIsTransparentProxy(t *testing.T) {
cases := map[string]struct { cases := map[string]struct {
proxyCfg *ProxyConfiguration dynamicConfig *DynamicConfig
exp bool exp bool
}{ }{
"nil dynamic config": { "nil dynamic config": {
proxyCfg: &ProxyConfiguration{}, dynamicConfig: nil,
exp: false, exp: false,
}, },
"default mode": { "default mode": {
proxyCfg: &ProxyConfiguration{ dynamicConfig: &DynamicConfig{
DynamicConfig: &DynamicConfig{
Mode: ProxyMode_PROXY_MODE_DEFAULT, Mode: ProxyMode_PROXY_MODE_DEFAULT,
}, },
},
exp: false, exp: false,
}, },
"direct mode": { "direct mode": {
proxyCfg: &ProxyConfiguration{ dynamicConfig: &DynamicConfig{
DynamicConfig: &DynamicConfig{
Mode: ProxyMode_PROXY_MODE_DEFAULT, Mode: ProxyMode_PROXY_MODE_DEFAULT,
}, },
},
exp: false, exp: false,
}, },
"transparent mode": { "transparent mode": {
proxyCfg: &ProxyConfiguration{ dynamicConfig: &DynamicConfig{
DynamicConfig: &DynamicConfig{
Mode: ProxyMode_PROXY_MODE_TRANSPARENT, Mode: ProxyMode_PROXY_MODE_TRANSPARENT,
}, },
},
exp: true, exp: true,
}, },
} }
for name, c := range cases { for name, c := range cases {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
require.Equal(t, c.exp, c.proxyCfg.IsTransparentProxy()) proxyCfg := &ProxyConfiguration{
DynamicConfig: c.dynamicConfig,
}
compProxyCfg := &ComputedProxyConfiguration{
DynamicConfig: c.dynamicConfig,
}
require.Equal(t, c.exp, proxyCfg.IsTransparentProxy())
require.Equal(t, c.exp, compProxyCfg.IsTransparentProxy())
}) })
} }
} }