mesh: sidecar proxy controller improvements (#19083)

This change builds on #19043 and #19067 and updates the sidecar controller to use those computed resources. This achieves several benefits:

   * The cache is now simplified which helps us solve for previous bugs (such as multiple Upstreams/Destinations targeting the same service would overwrite each other)
   * We no longer need proxy config cache
   * We no longer need to do merging of proxy configs as part of the controller logic
   * Controller watches are simplified because we no longer need to have complex mapping using cache and can instead use the simple ReplaceType mapper.

It also makes several other improvements/refactors:

  * Unifies all caches into one. This is because originally the caches were more independent, however, now that they need to interact with each other it made sense to unify them where sidecar proxy controller uses one cache with 3 bimappers
   * Unifies cache and mappers. Mapper already needed all caches anyway and so it made sense to make the cache do the mapping also now that the cache is unified.
   * Gets rid of service endpoints watches. This was needed to get updates in a case when service's identities have changed and we need to update proxy state template's spiffe IDs for those destinations. This will however generate a lot of reconcile requests for this controller as service endpoints objects can change a lot because they contain workload's health status. This is solved by adding a status to the service object tracking "bound identities" and have service endpoints controller update it. Having service's status updated allows us to get updates in the sidecar proxy controller because it's already watching service objects
   * Add a watch for workloads. We need it so that we get updates if workload's ports change. This also ensures that we update cached identities in case workload's identity changes.
This commit is contained in:
Iryna Shustava 2023-10-12 13:20:13 -06:00 committed by GitHub
parent ad06c96456
commit 54a12ab3c9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
66 changed files with 1426 additions and 3135 deletions

View File

@ -34,6 +34,9 @@ var (
EndpointsStatusConditionEndpointsManaged = endpoints.StatusConditionEndpointsManaged
EndpointsStatusConditionManaged = endpoints.ConditionManaged
EndpointsStatusConditionUnmanaged = endpoints.ConditionUnmanaged
StatusConditionBoundIdentities = endpoints.StatusConditionBoundIdentities
StatusReasonWorkloadIdentitiesFound = endpoints.StatusReasonWorkloadIdentitiesFound
StatusReasonNoWorkloadIdentitiesFound = endpoints.StatusReasonNoWorkloadIdentitiesFound
FailoverStatusKey = failover.StatusKey
FailoverStatusConditionAccepted = failover.StatusConditionAccepted

View File

@ -105,12 +105,12 @@ func (r *serviceEndpointsReconciler) Reconcile(ctx context.Context, rt controlle
return err
}
var status *pbresource.Condition
var statusConditions []*pbresource.Condition
if serviceUnderManagement(serviceData.service) {
rt.Logger.Trace("service is enabled for automatic endpoint management")
// This service should have its endpoints automatically managed
status = ConditionManaged
statusConditions = append(statusConditions, ConditionManaged)
// Inform the WorkloadMapper to track this service and its selectors. So
// future workload updates that would be matched by the services selectors
@ -133,6 +133,12 @@ func (r *serviceEndpointsReconciler) Reconcile(ctx context.Context, rt controlle
// Calculate the latest endpoints from the already gathered workloads
latestEndpoints := workloadsToEndpoints(serviceData.service, workloadData)
// Add status
if endpointsData != nil {
statusConditions = append(statusConditions,
workloadIdentityStatusFromEndpoints(latestEndpoints))
}
// Before writing the endpoints actually check to see if they are changed
if endpointsData == nil || !proto.Equal(endpointsData.endpoints, latestEndpoints) {
rt.Logger.Trace("endpoints have changed")
@ -168,7 +174,7 @@ func (r *serviceEndpointsReconciler) Reconcile(ctx context.Context, rt controlle
} else {
rt.Logger.Trace("endpoints are not being automatically managed")
// This service is not having its endpoints automatically managed
status = ConditionUnmanaged
statusConditions = append(statusConditions, ConditionUnmanaged)
// Inform the WorkloadMapper that it no longer needs to track this service
// as it is no longer under endpoint management
@ -203,9 +209,7 @@ func (r *serviceEndpointsReconciler) Reconcile(ctx context.Context, rt controlle
// for that object existing or not.
newStatus := &pbresource.Status{
ObservedGeneration: serviceData.resource.Generation,
Conditions: []*pbresource.Condition{
status,
},
Conditions: statusConditions,
}
// If the status is unchanged then we should return and avoid the unnecessary write
if resource.EqualStatus(serviceData.resource.Status[StatusKey], newStatus, false) {
@ -388,3 +392,13 @@ func workloadToEndpoint(svc *pbcatalog.Service, data *workloadData) *pbcatalog.E
Identity: data.workload.Identity,
}
}
func workloadIdentityStatusFromEndpoints(endpoints *pbcatalog.ServiceEndpoints) *pbresource.Condition {
identities := endpoints.GetIdentities()
if len(identities) > 0 {
return ConditionIdentitiesFound(identities)
}
return ConditionIdentitiesNotFound
}

View File

@ -388,6 +388,50 @@ func TestDetermineWorkloadHealth(t *testing.T) {
}
}
func TestWorkloadIdentityStatusFromEndpoints(t *testing.T) {
cases := map[string]struct {
endpoints *pbcatalog.ServiceEndpoints
expStatus *pbresource.Condition
}{
"endpoints are nil": {
expStatus: ConditionIdentitiesNotFound,
},
"endpoints without identities": {
endpoints: &pbcatalog.ServiceEndpoints{},
expStatus: ConditionIdentitiesNotFound,
},
"endpoints with identities": {
endpoints: &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
{
Identity: "foo",
},
},
},
expStatus: ConditionIdentitiesFound([]string{"foo"}),
},
"endpoints with multiple identities": {
endpoints: &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
{
Identity: "foo",
},
{
Identity: "bar",
},
},
},
expStatus: ConditionIdentitiesFound([]string{"bar", "foo"}),
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
prototest.AssertDeepEqual(t, c.expStatus, workloadIdentityStatusFromEndpoints(c.endpoints))
})
}
}
type controllerSuite struct {
suite.Suite
@ -646,6 +690,7 @@ func (suite *controllerSuite) TestController() {
res := suite.client.WaitForReconciliation(suite.T(), service.Id, StatusKey)
// Check that the services status was updated accordingly
rtest.RequireStatusCondition(suite.T(), res, StatusKey, ConditionManaged)
rtest.RequireStatusCondition(suite.T(), res, StatusKey, ConditionIdentitiesNotFound)
// Check that the endpoints resource exists and contains 0 endpoints
endpointsID := rtest.Resource(pbcatalog.ServiceEndpointsType, "api").ID()
@ -665,6 +710,9 @@ func (suite *controllerSuite) TestController() {
}).
Write(suite.T(), suite.client)
suite.client.WaitForStatusCondition(suite.T(), service.Id, StatusKey,
ConditionIdentitiesFound([]string{"api"}))
// Wait for the endpoints to be regenerated
endpoints = suite.client.WaitForNewVersion(suite.T(), endpointsID, endpoints.Version)
@ -713,6 +761,34 @@ func (suite *controllerSuite) TestController() {
Identity: "api",
})
// Update workload identity and check that the status on the service is updated
workload = rtest.Resource(pbcatalog.WorkloadType, "api-1").
WithData(suite.T(), &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "127.0.0.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
"grpc": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_GRPC},
},
Identity: "endpoints-api-identity",
}).
Write(suite.T(), suite.client)
suite.client.WaitForStatusCondition(suite.T(), service.Id, StatusKey, ConditionIdentitiesFound([]string{"endpoints-api-identity"}))
// Verify that the generated endpoints now contain the workload
endpoints = suite.client.WaitForNewVersion(suite.T(), endpointsID, endpoints.Version)
suite.requireEndpoints(endpoints, &pbcatalog.Endpoint{
TargetRef: workload.Id,
Addresses: []*pbcatalog.WorkloadAddress{
{Host: "127.0.0.1", Ports: []string{"http"}},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
},
HealthStatus: pbcatalog.Health_HEALTH_PASSING,
Identity: "endpoints-api-identity",
})
// rewrite the service to add more selection criteria. This should trigger
// reconciliation but shouldn't result in updating the endpoints because
// the actual list of currently selected workloads has not changed

View File

@ -3,7 +3,12 @@
package endpoints
import "github.com/hashicorp/consul/proto-public/pbresource"
import (
"fmt"
"strings"
"github.com/hashicorp/consul/proto-public/pbresource"
)
const (
StatusKey = "consul.io/endpoint-manager"
@ -12,8 +17,16 @@ const (
StatusReasonSelectorNotFound = "SelectorNotFound"
StatusReasonSelectorFound = "SelectorFound"
SelectorFoundMessage = "A valid workload selector is present within the service."
SelectorNotFoundMessage = "Either the workload selector was not present or contained no selection criteria."
selectorFoundMessage = "A valid workload selector is present within the service."
selectorNotFoundMessage = "Either the workload selector was not present or contained no selection criteria."
StatusConditionBoundIdentities = "BoundIdentities"
StatusReasonWorkloadIdentitiesFound = "WorkloadIdentitiesFound"
StatusReasonNoWorkloadIdentitiesFound = "NoWorkloadIdentitiesFound"
identitiesFoundMessageFormat = "Found workload identities associated with this service: %q."
identitiesNotFoundChangedMessage = "No associated workload identities found."
)
var (
@ -21,13 +34,29 @@ var (
Type: StatusConditionEndpointsManaged,
State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonSelectorFound,
Message: SelectorFoundMessage,
Message: selectorFoundMessage,
}
ConditionUnmanaged = &pbresource.Condition{
Type: StatusConditionEndpointsManaged,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonSelectorNotFound,
Message: SelectorNotFoundMessage,
Message: selectorNotFoundMessage,
}
ConditionIdentitiesNotFound = &pbresource.Condition{
Type: StatusConditionBoundIdentities,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonNoWorkloadIdentitiesFound,
Message: identitiesNotFoundChangedMessage,
}
)
func ConditionIdentitiesFound(identities []string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionBoundIdentities,
State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonWorkloadIdentitiesFound,
Message: fmt.Sprintf(identitiesFoundMessageFormat, strings.Join(identities, ",")),
}
}

View File

@ -8,7 +8,6 @@ import (
"github.com/hashicorp/consul/internal/mesh/internal/controllers"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
)
@ -16,16 +15,6 @@ import (
var (
// Controller statuses.
// Sidecar-proxy controller.
SidecarProxyStatusKey = sidecarproxy.ControllerName
SidecarProxyStatusConditionMeshDestination = status.StatusConditionDestinationAccepted
SidecarProxyStatusReasonNonMeshDestination = status.StatusReasonMeshProtocolNotFound
SidecarProxyStatusReasonMeshDestination = status.StatusReasonMeshProtocolFound
SidecarProxyStatusReasonDestinationServiceNotFound = status.StatusReasonDestinationServiceNotFound
SidecarProxyStatusReasonDestinationServiceFound = status.StatusReasonDestinationServiceFound
SidecarProxyStatusReasonMeshProtocolDestinationPort = status.StatusReasonMeshProtocolDestinationPort
SidecarProxyStatusReasonNonMeshProtocolDestinationPort = status.StatusReasonNonMeshProtocolDestinationPort
// Routes controller
RoutesStatusKey = routes.StatusKey
RoutesStatusConditionAccepted = routes.StatusConditionAccepted

View File

@ -1,43 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxycache
import (
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/mappers/bimapper"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
type ComputedRoutesCache struct {
mapper *bimapper.Mapper
}
func NewComputedRoutesCache() *ComputedRoutesCache {
return &ComputedRoutesCache{
mapper: bimapper.New(pbmesh.ComputedRoutesType, pbcatalog.ServiceType),
}
}
func (c *ComputedRoutesCache) TrackComputedRoutes(computedRoutes *types.DecodedComputedRoutes) {
var serviceRefs []resource.ReferenceOrID
for _, pcr := range computedRoutes.Data.PortedConfigs {
for _, details := range pcr.Targets {
serviceRefs = append(serviceRefs, details.BackendRef.Ref)
}
}
c.mapper.TrackItem(computedRoutes.Resource.Id, serviceRefs)
}
func (c *ComputedRoutesCache) UntrackComputedRoutes(computedRoutesID *pbresource.ID) {
c.mapper.UntrackItem(computedRoutesID)
}
func (c *ComputedRoutesCache) ComputedRoutesByService(id resource.ReferenceOrID) []*pbresource.ID {
return c.mapper.ItemIDsForLink(id)
}

View File

@ -1,248 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxycache
import (
"sync"
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
"github.com/hashicorp/consul/internal/resource"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// DestinationsCache stores information needed for the sidecar-proxy controller to reconcile efficiently.
// This currently means storing a list of all destinations for easy look up
// as well as indices of source proxies where those destinations are referenced.
//
// It is the responsibility of the controller and its subcomponents (like mapper and data fetcher)
// to keep this cache up-to-date as we're observing new data.
type DestinationsCache struct {
lock sync.RWMutex
// store is a map from destination service reference and port as a reference key
// to the object representing destination reference.
store map[ReferenceKeyWithPort]intermediate.CombinedDestinationRef
storedPorts map[resource.ReferenceKey]map[string]struct{}
// sourceProxiesIndex stores a map from a reference key of source proxy IDs
// to the keys in the store map.
sourceProxiesIndex map[resource.ReferenceKey]storeKeys
}
type storeKeys map[ReferenceKeyWithPort]struct{}
func NewDestinationsCache() *DestinationsCache {
return &DestinationsCache{
store: make(map[ReferenceKeyWithPort]intermediate.CombinedDestinationRef),
storedPorts: make(map[resource.ReferenceKey]map[string]struct{}),
sourceProxiesIndex: make(map[resource.ReferenceKey]storeKeys),
}
}
type ReferenceKeyWithPort struct {
resource.ReferenceKey
port string
}
func KeyFromRefAndPort(ref *pbresource.Reference, port string) ReferenceKeyWithPort {
refKey := resource.NewReferenceKey(ref)
return ReferenceKeyWithPort{refKey, port}
}
// WriteDestination adds destination reference to the cache.
func (c *DestinationsCache) WriteDestination(d intermediate.CombinedDestinationRef) {
// Check that reference is a catalog.Service type.
if !resource.EqualType(pbcatalog.ServiceType, d.ServiceRef.Type) {
panic("ref must of type catalog.Service")
}
// Also, check that explicit destination reference is a mesh.Upstreams type.
if d.ExplicitDestinationsID != nil &&
!resource.EqualType(pbmesh.DestinationsType, d.ExplicitDestinationsID.Type) {
panic("ExplicitDestinationsID must be of type mesh.Upstreams")
}
c.lock.Lock()
defer c.lock.Unlock()
c.deleteLocked(d.ServiceRef, d.Port)
c.addLocked(d)
}
// DeleteDestination deletes a given destination reference and port from cache.
func (c *DestinationsCache) DeleteDestination(ref *pbresource.Reference, port string) {
// Check that reference is a catalog.Service type.
if !resource.EqualType(pbcatalog.ServiceType, ref.Type) {
panic("ref must of type catalog.Service")
}
c.lock.Lock()
defer c.lock.Unlock()
c.deleteLocked(ref, port)
}
func (c *DestinationsCache) addLocked(d intermediate.CombinedDestinationRef) {
key := KeyFromRefAndPort(d.ServiceRef, d.Port)
c.store[key] = d
c.addPortLocked(d.ServiceRef, d.Port)
// Update source proxies index.
for proxyRef := range d.SourceProxies {
_, ok := c.sourceProxiesIndex[proxyRef]
if !ok {
c.sourceProxiesIndex[proxyRef] = make(storeKeys)
}
c.sourceProxiesIndex[proxyRef][key] = struct{}{}
}
}
func (c *DestinationsCache) addPortLocked(ref *pbresource.Reference, port string) {
rk := resource.NewReferenceKey(ref)
m, ok := c.storedPorts[rk]
if !ok {
m = make(map[string]struct{})
c.storedPorts[rk] = m
}
m[port] = struct{}{}
}
func (c *DestinationsCache) deleteLocked(ref *pbresource.Reference, port string) {
key := KeyFromRefAndPort(ref, port)
// First get it from the store.
dest, ok := c.store[key]
if !ok {
// If it's not there, return as there's nothing for us to.
return
}
// Update source proxies indices.
for proxyRef := range dest.SourceProxies {
// Delete our destination key from this source proxy.
delete(c.sourceProxiesIndex[proxyRef], key)
}
// Finally, delete this destination from the store.
delete(c.store, key)
c.deletePortLocked(ref, port)
}
func (c *DestinationsCache) deletePortLocked(ref *pbresource.Reference, port string) {
rk := resource.NewReferenceKey(ref)
m, ok := c.storedPorts[rk]
if !ok {
return
}
delete(m, port)
if len(m) == 0 {
delete(c.storedPorts, rk)
}
}
// DeleteSourceProxy deletes the source proxy given by id from the cache.
func (c *DestinationsCache) DeleteSourceProxy(id *pbresource.ID) {
// Check that id is the ProxyStateTemplate type.
if !resource.EqualType(pbmesh.ProxyStateTemplateType, id.Type) {
panic("id must of type mesh.ProxyStateTemplate")
}
c.lock.Lock()
defer c.lock.Unlock()
proxyIDKey := resource.NewReferenceKey(id)
// Get all destination keys.
destKeys := c.sourceProxiesIndex[proxyIDKey]
for destKey := range destKeys {
// Read destination.
dest, ok := c.store[destKey]
if !ok {
// If there's no destination with that key, skip it as there's nothing for us to do.
continue
}
// Delete the source proxy ID.
delete(dest.SourceProxies, proxyIDKey)
}
// Finally, delete the index for this proxy.
delete(c.sourceProxiesIndex, proxyIDKey)
}
// ReadDestination returns a destination reference for the given service reference and port.
func (c *DestinationsCache) ReadDestination(ref *pbresource.Reference, port string) (intermediate.CombinedDestinationRef, bool) {
// Check that reference is a catalog.Service type.
if !resource.EqualType(pbcatalog.ServiceType, ref.Type) {
panic("ref must of type catalog.Service")
}
c.lock.RLock()
defer c.lock.RUnlock()
key := KeyFromRefAndPort(ref, port)
d, found := c.store[key]
return d, found
}
func (c *DestinationsCache) ReadDestinationsByServiceAllPorts(ref *pbresource.Reference) []intermediate.CombinedDestinationRef {
// Check that reference is a catalog.Service type.
if !resource.EqualType(pbcatalog.ServiceType, ref.Type) {
panic("ref must of type catalog.Service")
}
c.lock.RLock()
defer c.lock.RUnlock()
rk := resource.NewReferenceKey(ref)
ports, ok := c.storedPorts[rk]
if !ok {
return nil
}
var destinations []intermediate.CombinedDestinationRef
for port := range ports {
key := KeyFromRefAndPort(ref, port)
d, found := c.store[key]
if found {
destinations = append(destinations, d)
}
}
return destinations
}
// DestinationsBySourceProxy returns all destinations that are a referenced by the given source proxy id.
func (c *DestinationsCache) DestinationsBySourceProxy(id *pbresource.ID) []intermediate.CombinedDestinationRef {
// Check that id is the ProxyStateTemplate type.
if !resource.EqualType(pbmesh.ProxyStateTemplateType, id.Type) {
panic("id must of type mesh.ProxyStateTemplate")
}
c.lock.RLock()
defer c.lock.RUnlock()
var destinations []intermediate.CombinedDestinationRef
proxyIDKey := resource.NewReferenceKey(id)
for destKey := range c.sourceProxiesIndex[proxyIDKey] {
destinations = append(destinations, c.store[destKey])
}
return destinations
}

View File

@ -1,242 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxycache
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestWrite_Create(t *testing.T) {
cache := NewDestinationsCache()
proxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
destination := testDestination(proxyID)
cache.WriteDestination(destination)
destKey := KeyFromRefAndPort(destination.ServiceRef, destination.Port)
require.Equal(t, destination, cache.store[destKey])
actualSourceProxies := cache.sourceProxiesIndex
expectedSourceProxies := map[resource.ReferenceKey]storeKeys{
resource.NewReferenceKey(proxyID): {destKey: struct{}{}},
}
require.Equal(t, expectedSourceProxies, actualSourceProxies)
// Check that we can read back the destination successfully.
actualDestination, found := cache.ReadDestination(destination.ServiceRef, destination.Port)
require.True(t, found)
require.Equal(t, destination, actualDestination)
}
func TestWrite_Update(t *testing.T) {
cache := NewDestinationsCache()
proxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
destination1 := testDestination(proxyID)
cache.WriteDestination(destination1)
// Add another destination for the same proxy ID.
destination2 := testDestination(proxyID)
destination2.ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "test-service-2").ReferenceNoSection()
cache.WriteDestination(destination2)
// Check that the source proxies are updated.
actualSourceProxies := cache.sourceProxiesIndex
expectedSourceProxies := map[resource.ReferenceKey]storeKeys{
resource.NewReferenceKey(proxyID): {
KeyFromRefAndPort(destination1.ServiceRef, destination1.Port): struct{}{},
KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{},
},
}
require.Equal(t, expectedSourceProxies, actualSourceProxies)
// Add another destination for a different proxy.
anotherProxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-def").ID()
destination3 := testDestination(anotherProxyID)
destination3.ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "test-service-3").
WithTenancy(resource.DefaultNamespacedTenancy()).ReferenceNoSection()
cache.WriteDestination(destination3)
actualSourceProxies = cache.sourceProxiesIndex
expectedSourceProxies = map[resource.ReferenceKey]storeKeys{
resource.NewReferenceKey(proxyID): {
KeyFromRefAndPort(destination1.ServiceRef, destination1.Port): struct{}{},
KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{},
},
resource.NewReferenceKey(anotherProxyID): {
KeyFromRefAndPort(destination3.ServiceRef, destination3.Port): struct{}{},
},
}
require.Equal(t, expectedSourceProxies, actualSourceProxies)
// Overwrite the proxy id completely.
destination1.SourceProxies = map[resource.ReferenceKey]struct{}{resource.NewReferenceKey(anotherProxyID): {}}
cache.WriteDestination(destination1)
actualSourceProxies = cache.sourceProxiesIndex
expectedSourceProxies = map[resource.ReferenceKey]storeKeys{
resource.NewReferenceKey(proxyID): {
KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{},
},
resource.NewReferenceKey(anotherProxyID): {
KeyFromRefAndPort(destination1.ServiceRef, destination1.Port): struct{}{},
KeyFromRefAndPort(destination3.ServiceRef, destination3.Port): struct{}{},
},
}
require.Equal(t, expectedSourceProxies, actualSourceProxies)
}
func TestWrite_Delete(t *testing.T) {
cache := NewDestinationsCache()
proxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
destination1 := testDestination(proxyID)
cache.WriteDestination(destination1)
// Add another destination for the same proxy ID.
destination2 := testDestination(proxyID)
destination2.ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "test-service-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ReferenceNoSection()
cache.WriteDestination(destination2)
cache.DeleteDestination(destination1.ServiceRef, destination1.Port)
require.NotContains(t, cache.store, KeyFromRefAndPort(destination1.ServiceRef, destination1.Port))
// Check that the source proxies are updated.
actualSourceProxies := cache.sourceProxiesIndex
expectedSourceProxies := map[resource.ReferenceKey]storeKeys{
resource.NewReferenceKey(proxyID): {
KeyFromRefAndPort(destination2.ServiceRef, destination2.Port): struct{}{},
},
}
require.Equal(t, expectedSourceProxies, actualSourceProxies)
// Try to delete non-existing destination and check that nothing has changed..
cache.DeleteDestination(
resourcetest.Resource(pbcatalog.ServiceType, "does-not-exist").
WithTenancy(resource.DefaultNamespacedTenancy()).ReferenceNoSection(),
"doesn't-matter")
require.Contains(t, cache.store, KeyFromRefAndPort(destination2.ServiceRef, destination2.Port))
require.Equal(t, expectedSourceProxies, cache.sourceProxiesIndex)
}
func TestDeleteSourceProxy(t *testing.T) {
cache := NewDestinationsCache()
proxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
destination1 := testDestination(proxyID)
cache.WriteDestination(destination1)
// Add another destination for the same proxy ID.
destination2 := testDestination(proxyID)
destination2.ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "test-service-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ReferenceNoSection()
cache.WriteDestination(destination2)
cache.DeleteSourceProxy(proxyID)
// Check that source proxy index is gone.
proxyKey := resource.NewReferenceKey(proxyID)
require.NotContains(t, cache.sourceProxiesIndex, proxyKey)
// Check that the destinations no longer have this proxy as the source.
require.NotContains(t, destination1.SourceProxies, proxyKey)
require.NotContains(t, destination2.SourceProxies, proxyKey)
// Try to add a non-existent key to source proxy index
cache.sourceProxiesIndex[proxyKey] = map[ReferenceKeyWithPort]struct{}{
{port: "doesn't-matter"}: {}}
cache.DeleteSourceProxy(proxyID)
// Check that source proxy index is gone.
require.NotContains(t, cache.sourceProxiesIndex, proxyKey)
// Check that the destinations no longer have this proxy as the source.
require.NotContains(t, destination1.SourceProxies, proxyKey)
require.NotContains(t, destination2.SourceProxies, proxyKey)
}
func TestDestinationsBySourceProxy(t *testing.T) {
cache := NewDestinationsCache()
proxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
destination1 := testDestination(proxyID)
cache.WriteDestination(destination1)
// Add another destination for the same proxy ID.
destination2 := testDestination(proxyID)
destination2.ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "test-service-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ReferenceNoSection()
cache.WriteDestination(destination2)
actualDestinations := cache.DestinationsBySourceProxy(proxyID)
expectedDestinations := []intermediate.CombinedDestinationRef{destination1, destination2}
require.ElementsMatch(t, expectedDestinations, actualDestinations)
}
func TestReadDestinationsByServiceAllPorts(t *testing.T) {
cache := NewDestinationsCache()
proxyID := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
// test-service@tcp
destination1 := testDestination(proxyID)
cache.WriteDestination(destination1)
// test-service@tcp2
destination2 := testDestination(proxyID)
destination2.Port = "tcp2"
cache.WriteDestination(destination2)
// other-service@tcp
destination3 := testDestination(proxyID)
destination3.ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "other-service").
WithTenancy(resource.DefaultNamespacedTenancy()).ReferenceNoSection()
cache.WriteDestination(destination3)
t.Run("test-service referenced by two ports", func(t *testing.T) {
dests := cache.ReadDestinationsByServiceAllPorts(destination1.ServiceRef)
require.Len(t, dests, 2)
prototest.AssertElementsMatch(t, []intermediate.CombinedDestinationRef{
destination1, destination2,
}, dests)
})
t.Run("other-service referenced by one port", func(t *testing.T) {
dests := cache.ReadDestinationsByServiceAllPorts(destination3.ServiceRef)
require.Len(t, dests, 1)
prototest.AssertElementsMatch(t, []intermediate.CombinedDestinationRef{
destination3,
}, dests)
})
}
func testDestination(proxyID *pbresource.ID) intermediate.CombinedDestinationRef {
return intermediate.CombinedDestinationRef{
ServiceRef: resourcetest.Resource(pbcatalog.ServiceType, "test-service").
WithTenancy(resource.DefaultNamespacedTenancy()).ReferenceNoSection(),
Port: "tcp",
ExplicitDestinationsID: resourcetest.Resource(pbmesh.DestinationsType, "test-servicedestinations").
WithTenancy(resource.DefaultNamespacedTenancy()).ID(),
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(proxyID): {},
},
}
}

View File

@ -1,38 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxycache
import (
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/mappers/bimapper"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// IdentitiesCache tracks mappings between workload identities and proxy IDs
// that a configuration applies to. It is the responsibility of the controller to
// keep this cache up-to-date.
type IdentitiesCache struct {
mapper *bimapper.Mapper
}
func NewIdentitiesCache() *IdentitiesCache {
return &IdentitiesCache{
mapper: bimapper.New(pbmesh.ProxyStateTemplateType, pbauth.WorkloadIdentityType),
}
}
func (c *IdentitiesCache) ProxyIDsByWorkloadIdentity(id *pbresource.ID) []*pbresource.ID {
return c.mapper.ItemIDsForLink(id)
}
func (c *IdentitiesCache) TrackPair(identityID *pbresource.ID, proxyID *pbresource.ID) {
c.mapper.TrackItem(proxyID, []resource.ReferenceOrID{identityID})
}
// UntrackProxyID removes tracking for the given proxy state template ID.
func (c *IdentitiesCache) UntrackProxyID(proxyID *pbresource.ID) {
c.mapper.UntrackItem(proxyID)
}

View File

@ -1,59 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxycache
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
func TestIdentitiesCache(t *testing.T) {
cache := NewIdentitiesCache()
identityID1 := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-1").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
identityID2 := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxyID1 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-1").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxyID2 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
// Empty cache
require.Nil(t, cache.ProxyIDsByWorkloadIdentity(identityID1))
require.Nil(t, cache.ProxyIDsByWorkloadIdentity(identityID2))
// Insert value and fetch it.
cache.TrackPair(identityID1, proxyID1)
require.Equal(t, []*pbresource.ID{proxyID1}, cache.ProxyIDsByWorkloadIdentity(identityID1))
require.Nil(t, cache.ProxyIDsByWorkloadIdentity(identityID2))
// Insert another value referencing the same identity.
cache.TrackPair(identityID1, proxyID2)
require.ElementsMatch(t, []*pbresource.ID{proxyID1, proxyID2}, cache.ProxyIDsByWorkloadIdentity(identityID1))
require.Nil(t, cache.ProxyIDsByWorkloadIdentity(identityID2))
// Now proxy 1 uses identity 2
cache.TrackPair(identityID2, proxyID1)
require.Equal(t, []*pbresource.ID{proxyID1}, cache.ProxyIDsByWorkloadIdentity(identityID2))
require.Equal(t, []*pbresource.ID{proxyID2}, cache.ProxyIDsByWorkloadIdentity(identityID1))
// Untrack proxy 2
cache.UntrackProxyID(proxyID2)
require.Equal(t, []*pbresource.ID{proxyID1}, cache.ProxyIDsByWorkloadIdentity(identityID2))
require.Nil(t, cache.ProxyIDsByWorkloadIdentity(identityID1))
// Untrack proxy 1
cache.UntrackProxyID(proxyID1)
require.Nil(t, cache.ProxyIDsByWorkloadIdentity(identityID2))
require.Nil(t, cache.ProxyIDsByWorkloadIdentity(identityID1))
}

View File

@ -1,44 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxycache
import (
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/mappers/bimapper"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// ProxyConfigurationCache tracks mappings between proxy configurations and proxy IDs
// that a configuration applies to. It is the responsibility of the controller to
// keep this cache up-to-date.
type ProxyConfigurationCache struct {
mapper *bimapper.Mapper
}
func NewProxyConfigurationCache() *ProxyConfigurationCache {
return &ProxyConfigurationCache{
mapper: bimapper.New(pbmesh.ProxyConfigurationType, pbmesh.ProxyStateTemplateType),
}
}
// ProxyConfigurationsByProxyID returns proxy configuration IDs given the id of the proxy state template.
func (c *ProxyConfigurationCache) ProxyConfigurationsByProxyID(id *pbresource.ID) []*pbresource.ID {
return c.mapper.ItemIDsForLink(id)
}
// TrackProxyConfiguration tracks given proxy configuration ID and the linked proxy state template IDs.
func (c *ProxyConfigurationCache) TrackProxyConfiguration(proxyCfgID *pbresource.ID, proxyIDs []resource.ReferenceOrID) {
c.mapper.TrackItem(proxyCfgID, proxyIDs)
}
// UntrackProxyConfiguration removes tracking for the given proxy configuration ID.
func (c *ProxyConfigurationCache) UntrackProxyConfiguration(proxyCfgID *pbresource.ID) {
c.mapper.UntrackItem(proxyCfgID)
}
// UntrackProxyID removes tracking for the given proxy state template ID.
func (c *ProxyConfigurationCache) UntrackProxyID(proxyID *pbresource.ID) {
c.mapper.UntrackLink(proxyID)
}

View File

@ -1,80 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxycache
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestProxyConfigurationCache(t *testing.T) {
cache := NewProxyConfigurationCache()
// Create some proxy configurations.
proxyCfg1 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "test-cfg-1").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxyCfg2 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "test-cfg-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxyCfg3 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "test-cfg-3").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
// Create some proxy state templates.
p1 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "w-111").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
p2 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "w-222").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
p3 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "w-333").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
p4 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "w-444").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
p5 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "w-555").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
// Track these and make sure there's some overlap.
cache.TrackProxyConfiguration(proxyCfg1, []resource.ReferenceOrID{p1, p2, p4})
cache.TrackProxyConfiguration(proxyCfg2, []resource.ReferenceOrID{p3, p4, p5})
cache.TrackProxyConfiguration(proxyCfg3, []resource.ReferenceOrID{p1, p3})
// Read proxy configurations by proxy.
requireProxyConfigurations(t, cache, p1, proxyCfg1, proxyCfg3)
requireProxyConfigurations(t, cache, p2, proxyCfg1)
requireProxyConfigurations(t, cache, p3, proxyCfg2, proxyCfg3)
requireProxyConfigurations(t, cache, p4, proxyCfg1, proxyCfg2)
requireProxyConfigurations(t, cache, p5, proxyCfg2)
// Untrack some proxy IDs.
cache.UntrackProxyID(p1)
requireProxyConfigurations(t, cache, p1)
// Untrack some proxy IDs.
cache.UntrackProxyID(p3)
requireProxyConfigurations(t, cache, p3)
// Untrack proxy cfg.
cache.UntrackProxyConfiguration(proxyCfg1)
requireProxyConfigurations(t, cache, p1) // no-op because we untracked it earlier
requireProxyConfigurations(t, cache, p2)
requireProxyConfigurations(t, cache, p3) // no-op because we untracked it earlier
requireProxyConfigurations(t, cache, p4, proxyCfg2)
requireProxyConfigurations(t, cache, p5, proxyCfg2)
}
func requireProxyConfigurations(t *testing.T, cache *ProxyConfigurationCache, proxyID *pbresource.ID, proxyCfgs ...*pbresource.ID) {
t.Helper()
actualProxyCfgs := cache.ProxyConfigurationsByProxyID(proxyID)
require.Len(t, actualProxyCfgs, len(proxyCfgs))
prototest.AssertElementsMatch(t, proxyCfgs, actualProxyCfgs)
}

View File

@ -24,6 +24,7 @@ import (
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
"github.com/hashicorp/consul/sdk/iptables"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
)
@ -102,7 +103,8 @@ func (suite *controllerTestSuite) SetupTest() {
suite.expComputedProxyCfg = &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{OutboundListenerPort: iptables.DefaultTProxyOutboundPort},
LocalConnection: map[string]*pbmesh.ConnectionConfig{
"tcp": {ConnectTimeout: durationpb.New(2 * time.Second)},
},
@ -264,7 +266,8 @@ func (suite *controllerTestSuite) TestController() {
// The "test-workload" computed proxy configurations should now be updated to use only proxy cfg 1 and 3.
expProxyCfg := &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{OutboundListenerPort: iptables.DefaultTProxyOutboundPort},
},
BootstrapConfig: &pbmesh.BootstrapConfig{
PrometheusBindAddr: "0.0.0.0:9000",

View File

@ -8,14 +8,13 @@ import (
"github.com/hashicorp/consul/agent/leafcert"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/explicitdestinations"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/explicitdestinations/mapper"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/proxyconfiguration"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/xds"
"github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper"
"github.com/hashicorp/consul/internal/mesh/internal/mappers/workloadselectionmapper"
"github.com/hashicorp/consul/internal/resource/mappers/bimapper"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
@ -41,15 +40,8 @@ func Register(mgr *controller.Manager, deps Dependencies) {
}
mgr.Register(xds.Controller(endpointsMapper, deps.ProxyUpdater, deps.TrustBundleFetcher, deps.LeafCertManager, leafMapper, leafCancels, deps.LocalDatacenter))
var (
destinationsCache = sidecarproxycache.NewDestinationsCache()
proxyCfgCache = sidecarproxycache.NewProxyConfigurationCache()
computedRoutesCache = sidecarproxycache.NewComputedRoutesCache()
identitiesCache = sidecarproxycache.NewIdentitiesCache()
m = sidecarproxymapper.New(destinationsCache, proxyCfgCache, computedRoutesCache, identitiesCache)
)
mgr.Register(
sidecarproxy.Controller(destinationsCache, proxyCfgCache, computedRoutesCache, identitiesCache, m, deps.TrustDomainFetcher, deps.LocalDatacenter, deps.DefaultAllow),
sidecarproxy.Controller(cache.New(), deps.TrustDomainFetcher, deps.LocalDatacenter, deps.DefaultAllow),
)
mgr.Register(routes.Controller())

View File

@ -4,6 +4,10 @@
package builder
import (
"fmt"
"github.com/hashicorp/consul/internal/resource"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbmesh/v2beta1/pbproxystate"
"github.com/hashicorp/consul/proto-public/pbresource"
@ -13,7 +17,7 @@ import (
type Builder struct {
id *pbresource.ID
proxyStateTemplate *pbmesh.ProxyStateTemplate
proxyCfg *pbmesh.ProxyConfiguration
proxyCfg *pbmesh.ComputedProxyConfiguration
trustDomain string
localDatacenter string
defaultAllow bool
@ -25,8 +29,15 @@ func New(
trustDomain string,
dc string,
defaultAllow bool,
proxyCfg *pbmesh.ProxyConfiguration,
proxyCfg *pbmesh.ComputedProxyConfiguration,
) *Builder {
if !resource.EqualType(pbmesh.ProxyStateTemplateType, id.GetType()) {
panic(fmt.Sprintf("wrong type: expected pbmesh.ProxyStateTemplate, but got %T", id.Type))
}
if !resource.EqualType(pbauth.WorkloadIdentityType, identity.GetType()) {
panic(fmt.Sprintf("wrong type: expected pbauth.WorkloadIdentityType, but got %T", identity.Type))
}
return &Builder{
id: id,
trustDomain: trustDomain,

View File

@ -32,7 +32,7 @@ func TestBuildMultiportImplicitDestinations(t *testing.T) {
trustDomain = "foo.consul"
datacenter = "dc1"
)
proxyCfg := &pbmesh.ProxyConfiguration{
proxyCfg := &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{

View File

@ -497,7 +497,7 @@ func TestBuildImplicitDestinations(t *testing.T) {
)
require.NotNil(t, api2ComputedRoutes)
proxyCfg := &pbmesh.ProxyConfiguration{
proxyCfg := &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{

View File

@ -17,21 +17,21 @@ import (
func TestBuildExposePaths_NilChecks(t *testing.T) {
testutil.RunStep(t, "proxy cfg is nil", func(t *testing.T) {
b := New(nil, nil, "foo.consul", "dc1", true, nil)
b := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul", "dc1", true, nil)
require.NotPanics(t, func() {
b.buildExposePaths(nil)
})
})
testutil.RunStep(t, "dynamic cfg is nil", func(t *testing.T) {
b := New(nil, nil, "foo.consul", "dc1", true, &pbmesh.ProxyConfiguration{})
b := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul", "dc1", true, &pbmesh.ComputedProxyConfiguration{})
require.NotPanics(t, func() {
b.buildExposePaths(nil)
})
})
testutil.RunStep(t, "expose cfg is nil", func(t *testing.T) {
b := New(nil, nil, "foo.consul", "dc1", true, &pbmesh.ProxyConfiguration{
b := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul", "dc1", true, &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{},
})
require.NotPanics(t, func() {
@ -51,7 +51,7 @@ func TestBuildExposePaths_NoExternalMeshWorkloadAddress(t *testing.T) {
},
}
proxycfg := &pbmesh.ProxyConfiguration{
proxycfg := &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
ExposeConfig: &pbmesh.ExposeConfig{
ExposePaths: []*pbmesh.ExposePath{
@ -65,7 +65,7 @@ func TestBuildExposePaths_NoExternalMeshWorkloadAddress(t *testing.T) {
},
}
b := New(nil, nil, "foo.consul", "dc1", true, proxycfg)
b := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul", "dc1", true, proxycfg)
b.buildExposePaths(workload)
require.Empty(t, b.proxyStateTemplate.ProxyState.Listeners)
}
@ -81,7 +81,7 @@ func TestBuildExposePaths_InvalidProtocol(t *testing.T) {
},
}
proxycfg := &pbmesh.ProxyConfiguration{
proxycfg := &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
ExposeConfig: &pbmesh.ExposeConfig{
ExposePaths: []*pbmesh.ExposePath{
@ -96,7 +96,7 @@ func TestBuildExposePaths_InvalidProtocol(t *testing.T) {
},
}
b := New(nil, nil, "foo.consul", "dc1", true, proxycfg)
b := New(testProxyStateTemplateID(), testIdentityRef(), "foo.consul", "dc1", true, proxycfg)
require.PanicsWithValue(t, "unsupported expose paths protocol", func() {
b.buildExposePaths(workload)
})

View File

@ -104,7 +104,7 @@ func TestBuildLocalApp(t *testing.T) {
func TestBuildLocalApp_WithProxyConfiguration(t *testing.T) {
cases := map[string]struct {
workload *pbcatalog.Workload
proxyCfg *pbmesh.ProxyConfiguration
proxyCfg *pbmesh.ComputedProxyConfiguration
}{
"source/l7-expose-paths": {
workload: &pbcatalog.Workload{
@ -118,7 +118,7 @@ func TestBuildLocalApp_WithProxyConfiguration(t *testing.T) {
"port2": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
},
proxyCfg: &pbmesh.ProxyConfiguration{
proxyCfg: &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
ExposeConfig: &pbmesh.ExposeConfig{
ExposePaths: []*pbmesh.ExposePath{
@ -488,5 +488,6 @@ func testIdentityRef() *pbresource.Reference {
Partition: "default",
PeerName: "local",
},
Type: pbauth.WorkloadIdentityType,
}
}

View File

@ -72,6 +72,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -126,6 +126,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -72,6 +72,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -72,6 +72,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -35,6 +35,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -45,6 +45,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -158,6 +158,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -180,6 +180,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -99,6 +99,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -99,6 +99,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -26,6 +26,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -26,6 +26,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -26,6 +26,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -58,6 +58,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -42,6 +42,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -42,6 +42,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -42,6 +42,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -14,6 +14,11 @@
"namespace": "default",
"partition": "default",
"peerName": "local"
},
"type": {
"group": "auth",
"groupVersion": "v2beta1",
"kind": "WorkloadIdentity"
}
},
"listeners": [

View File

@ -0,0 +1,221 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cache
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/mappers/bimapper"
"github.com/hashicorp/consul/internal/storage"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
type Cache struct {
computedRoutes *bimapper.Mapper
identities *bimapper.Mapper
computedDestinations *bimapper.Mapper
}
func New() *Cache {
return &Cache{
computedRoutes: bimapper.New(pbmesh.ComputedRoutesType, pbcatalog.ServiceType),
identities: bimapper.New(pbcatalog.WorkloadType, pbauth.WorkloadIdentityType),
computedDestinations: bimapper.New(pbmesh.ComputedExplicitDestinationsType, pbcatalog.ServiceType),
}
}
func (c *Cache) TrackComputedDestinations(computedDestinations *types.DecodedComputedDestinations) {
var serviceRefs []resource.ReferenceOrID
for _, dest := range computedDestinations.Data.Destinations {
serviceRefs = append(serviceRefs, dest.DestinationRef)
}
c.computedDestinations.TrackItem(computedDestinations.Resource.Id, serviceRefs)
}
func (c *Cache) UntrackComputedDestinations(computedDestinationsID *pbresource.ID) {
c.computedDestinations.UntrackItem(computedDestinationsID)
}
func (c *Cache) UntrackComputedRoutes(computedRoutesID *pbresource.ID) {
c.computedRoutes.UntrackItem(computedRoutesID)
}
func (c *Cache) TrackWorkload(workload *types.DecodedWorkload) {
identityID := &pbresource.ID{
Name: workload.GetData().Identity,
Tenancy: workload.GetResource().Id.Tenancy,
Type: pbauth.WorkloadIdentityType,
}
c.identities.TrackItem(workload.GetResource().GetId(), []resource.ReferenceOrID{identityID})
}
// UntrackWorkload removes tracking for the given workload ID.
func (c *Cache) UntrackWorkload(wID *pbresource.ID) {
c.identities.UntrackItem(wID)
}
func (c *Cache) ComputedDestinationsByService(id resource.ReferenceOrID) []*pbresource.ID {
return c.computedDestinations.ItemIDsForLink(id)
}
func (c *Cache) trackComputedRoutes(computedRoutes *types.DecodedComputedRoutes) {
var serviceRefs []resource.ReferenceOrID
for _, pcr := range computedRoutes.Data.PortedConfigs {
for _, details := range pcr.Targets {
serviceRefs = append(serviceRefs, details.BackendRef.Ref)
}
}
c.computedRoutes.TrackItem(computedRoutes.Resource.Id, serviceRefs)
}
func (c *Cache) computedRoutesByService(id resource.ReferenceOrID) []*pbresource.ID {
return c.computedRoutes.ItemIDsForLink(id)
}
func (c *Cache) WorkloadsByWorkloadIdentity(id *pbresource.ID) []*pbresource.ID {
return c.identities.ItemIDsForLink(id)
}
func (c *Cache) MapComputedRoutes(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
computedRoutes, err := resource.Decode[*pbmesh.ComputedRoutes](res)
if err != nil {
return nil, err
}
ids, err := c.mapComputedRoutesToProxyStateTemplate(ctx, rt, res.Id)
if err != nil {
return nil, err
}
c.trackComputedRoutes(computedRoutes)
return controller.MakeRequests(pbmesh.ProxyStateTemplateType, ids), nil
}
func (c *Cache) mapComputedRoutesToProxyStateTemplate(ctx context.Context, rt controller.Runtime, computedRoutesID *pbresource.ID) ([]*pbresource.ID, error) {
// Each Destination gets a single ComputedRoutes.
serviceID := resource.ReplaceType(pbcatalog.ServiceType, computedRoutesID)
serviceRef := resource.Reference(serviceID, "")
return c.mapServiceThroughDestinations(ctx, rt, serviceRef)
}
func (c *Cache) MapService(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
serviceRef := resource.Reference(res.Id, "")
pstIDs, err := c.mapServiceThroughDestinations(ctx, rt, serviceRef)
if err != nil {
return nil, err
}
// Now walk the mesh configuration information backwards because
// we need to find any PST that needs to DISCOVER endpoints for this
// service as a part of mesh configuration and traffic routing.
// Find all ComputedRoutes that reference this service.
routeIDs := c.computedRoutesByService(serviceRef)
for _, routeID := range routeIDs {
// Find all Upstreams that reference a Service aligned with this ComputedRoutes.
// Afterwards, find all Workloads selected by the Upstreams, and align a PST with those.
ids, err := c.mapComputedRoutesToProxyStateTemplate(ctx, rt, routeID)
if err != nil {
return nil, err
}
pstIDs = append(pstIDs, ids...)
}
return controller.MakeRequests(pbmesh.ProxyStateTemplateType, pstIDs), nil
}
// mapServiceThroughDestinations takes an explicit
// Service and traverses back through Destinations to Workloads to
// ProxyStateTemplates.
//
// This is in a separate function so it can be chained for more complicated
// relationships.
func (c *Cache) mapServiceThroughDestinations(
ctx context.Context,
rt controller.Runtime,
serviceRef *pbresource.Reference,
) ([]*pbresource.ID, error) {
// The relationship is:
//
// - PST (replace type) Workload
// - Workload (name-aligned) ComputedDestinations
// - ComputedDestinations (contains) Service
//
// When we wake up for Service we should:
//
// - look up computed destinations for the service
// - rewrite computed destination types to PST
var pstIDs []*pbresource.ID
// Get all source proxies if they're referenced in any explicit destinations from computed destinations (name-aligned with workload/PST).
sources := c.ComputedDestinationsByService(serviceRef)
for _, cdID := range sources {
pstIDs = append(pstIDs, resource.ReplaceType(pbmesh.ProxyStateTemplateType, cdID))
}
// TODO(v2): remove this after we can do proper performant implicit upstream determination
//
// TODO(rb): shouldn't this instead list all Workloads that have a mesh port?
allIDs, err := c.listAllProxyStateTemplatesTemporarily(ctx, rt, serviceRef.Tenancy)
if err != nil {
return nil, err
}
pstIDs = append(pstIDs, allIDs...)
return pstIDs, nil
}
func (c *Cache) listAllProxyStateTemplatesTemporarily(ctx context.Context, rt controller.Runtime, tenancy *pbresource.Tenancy) ([]*pbresource.ID, error) {
// todo (ishustava): this is a stub for now until we implement implicit destinations.
// For tproxy, we generate requests for all proxy states in the cluster.
// This will generate duplicate events for proxies already added above,
// however, we expect that the controller runtime will de-dup for us.
rsp, err := rt.Client.List(ctx, &pbresource.ListRequest{
Type: pbmesh.ProxyStateTemplateType,
Tenancy: &pbresource.Tenancy{
Namespace: storage.Wildcard,
Partition: tenancy.Partition,
PeerName: tenancy.PeerName,
},
})
if err != nil {
return nil, err
}
result := make([]*pbresource.ID, 0, len(rsp.Resources))
for _, r := range rsp.Resources {
result = append(result, r.Id)
}
return result, nil
}
func (c *Cache) MapComputedTrafficPermissions(_ context.Context, _ controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
var ctp pbauth.ComputedTrafficPermissions
err := res.Data.UnmarshalTo(&ctp)
if err != nil {
return nil, err
}
workloadIdentityID := resource.ReplaceType(pbauth.WorkloadIdentityType, res.Id)
ids := c.WorkloadsByWorkloadIdentity(workloadIdentityID)
return controller.MakeRequests(pbmesh.ProxyStateTemplateType, ids), nil
}

View File

@ -0,0 +1,420 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cache
import (
"context"
"testing"
"github.com/stretchr/testify/require"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/routestest"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestIdentities(t *testing.T) {
cache := New()
identityID1 := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-1").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
identityID2 := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
w1 := resourcetest.Resource(pbcatalog.WorkloadType, "service-workload-1").
WithData(t, &pbcatalog.Workload{
Identity: identityID1.Name,
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
decW1 := resourcetest.MustDecode[*pbcatalog.Workload](t, w1)
w2 := resourcetest.Resource(pbcatalog.WorkloadType, "service-workload-2").
WithData(t, &pbcatalog.Workload{
Identity: identityID2.Name,
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
decW2 := resourcetest.MustDecode[*pbcatalog.Workload](t, w2)
// Empty cache
require.Nil(t, cache.WorkloadsByWorkloadIdentity(identityID1))
require.Nil(t, cache.WorkloadsByWorkloadIdentity(identityID2))
// Insert value and fetch it.
cache.TrackWorkload(decW1)
require.Equal(t, []*pbresource.ID{w1.Id}, cache.WorkloadsByWorkloadIdentity(identityID1))
require.Nil(t, cache.WorkloadsByWorkloadIdentity(identityID2))
// Insert another value referencing the same identity.
decW2.GetData().Identity = identityID1.Name
cache.TrackWorkload(decW2)
require.ElementsMatch(t, []*pbresource.ID{w1.Id, w2.Id}, cache.WorkloadsByWorkloadIdentity(identityID1))
require.Nil(t, cache.WorkloadsByWorkloadIdentity(identityID2))
// Now workload 1 uses identity 2
decW1.GetData().Identity = identityID2.Name
cache.TrackWorkload(decW1)
require.Equal(t, []*pbresource.ID{w1.Id}, cache.WorkloadsByWorkloadIdentity(identityID2))
require.Equal(t, []*pbresource.ID{w2.Id}, cache.WorkloadsByWorkloadIdentity(identityID1))
// Untrack workload 2
cache.UntrackWorkload(w2.Id)
require.Equal(t, []*pbresource.ID{w1.Id}, cache.WorkloadsByWorkloadIdentity(identityID2))
require.Nil(t, cache.WorkloadsByWorkloadIdentity(identityID1))
// Untrack workload 1
cache.UntrackWorkload(w1.Id)
require.Nil(t, cache.WorkloadsByWorkloadIdentity(identityID2))
require.Nil(t, cache.WorkloadsByWorkloadIdentity(identityID1))
}
func TestMapComputedTrafficPermissions(t *testing.T) {
client := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
ctp := resourcetest.Resource(pbauth.ComputedTrafficPermissionsType, "workload-identity-1").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbauth.ComputedTrafficPermissions{}).
Build()
c := New()
// Empty results when the cache isn't populated.
requests, err := c.MapComputedTrafficPermissions(context.Background(), controller.Runtime{Client: client}, ctp)
require.NoError(t, err)
require.Len(t, requests, 0)
identityID1 := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-1").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
w1 := resourcetest.Resource(pbcatalog.WorkloadType, "service-workload-1").
WithData(t, &pbcatalog.Workload{
Identity: identityID1.Name,
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
decW1 := resourcetest.MustDecode[*pbcatalog.Workload](t, w1)
w2 := resourcetest.Resource(pbcatalog.WorkloadType, "service-workload-2").
WithData(t, &pbcatalog.Workload{
Identity: identityID1.Name,
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
decW2 := resourcetest.MustDecode[*pbcatalog.Workload](t, w2)
c.TrackWorkload(decW1)
// Empty results when the cache isn't populated.
requests, err = c.MapComputedTrafficPermissions(context.Background(), controller.Runtime{Client: client}, ctp)
require.NoError(t, err)
prototest.AssertElementsMatch(t,
[]controller.Request{{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w1.Id)}}, requests)
c.TrackWorkload(decW2)
// Empty results when the cache isn't populated.
requests, err = c.MapComputedTrafficPermissions(context.Background(), controller.Runtime{Client: client}, ctp)
require.NoError(t, err)
prototest.AssertElementsMatch(t, []controller.Request{
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w1.Id)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w2.Id)},
}, requests)
}
func TestUnified_AllMappingsToProxyStateTemplate(t *testing.T) {
var (
cache = New()
client = svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
)
anyServiceData := &pbcatalog.Service{
Ports: []*pbcatalog.ServicePort{
{
TargetPort: "tcp1",
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
},
{
TargetPort: "tcp2",
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
},
{
TargetPort: "mesh",
Protocol: pbcatalog.Protocol_PROTOCOL_MESH,
},
},
}
// The thing we link through Destinations.
destService := resourcetest.Resource(pbcatalog.ServiceType, "web").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, anyServiceData).
Build()
destServiceRef := resource.Reference(destService.Id, "")
// The thing we reach through the mesh config.
targetService := resourcetest.Resource(pbcatalog.ServiceType, "db").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, anyServiceData).
Build()
targetServiceRef := resource.Reference(targetService.Id, "")
backupTargetService := resourcetest.Resource(pbcatalog.ServiceType, "db-backup").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, anyServiceData).
Build()
backupTargetServiceRef := resource.Reference(backupTargetService.Id, "")
// The way we make 'web' actually route traffic to 'db'.
tcpRoute := resourcetest.Resource(pbmesh.TCPRouteType, "tcp-route").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbmesh.TCPRoute{
ParentRefs: []*pbmesh.ParentReference{{
Ref: destServiceRef,
}},
Rules: []*pbmesh.TCPRouteRule{{
BackendRefs: []*pbmesh.TCPBackendRef{{
BackendRef: &pbmesh.BackendReference{
Ref: targetServiceRef,
},
}},
}},
}).
Build()
failoverPolicy := resourcetest.ResourceID(resource.ReplaceType(pbcatalog.FailoverPolicyType, targetService.Id)).
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.FailoverPolicy{
Config: &pbcatalog.FailoverConfig{
Destinations: []*pbcatalog.FailoverDestination{{
Ref: backupTargetServiceRef,
}},
},
}).
Build()
webRoutes := routestest.BuildComputedRoutes(t, resource.ReplaceType(pbmesh.ComputedRoutesType, destService.Id),
resourcetest.MustDecode[*pbcatalog.Service](t, destService),
resourcetest.MustDecode[*pbcatalog.Service](t, targetService),
resourcetest.MustDecode[*pbcatalog.Service](t, backupTargetService),
resourcetest.MustDecode[*pbmesh.TCPRoute](t, tcpRoute),
resourcetest.MustDecode[*pbcatalog.FailoverPolicy](t, failoverPolicy),
)
var (
sourceProxy1 = newID(pbmesh.ProxyStateTemplateType, "src-workload-1")
sourceProxy2 = newID(pbmesh.ProxyStateTemplateType, "src-workload-2")
sourceProxy3 = newID(pbmesh.ProxyStateTemplateType, "src-workload-3")
sourceProxy4 = newID(pbmesh.ProxyStateTemplateType, "src-workload-4")
sourceProxy5 = newID(pbmesh.ProxyStateTemplateType, "src-workload-5")
sourceProxy6 = newID(pbmesh.ProxyStateTemplateType, "src-workload-6")
)
compDestProxy1 := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, sourceProxy1.Name).
WithData(t, &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{
{
DestinationRef: destServiceRef,
DestinationPort: "tcp1",
},
},
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
compDestProxy2 := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, sourceProxy2.Name).
WithData(t, &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{
{
DestinationRef: destServiceRef,
DestinationPort: "tcp1",
},
},
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
compDestProxy3 := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, sourceProxy3.Name).
WithData(t, &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{
{
DestinationRef: destServiceRef,
DestinationPort: "tcp2",
},
},
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
compDestProxy4 := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, sourceProxy4.Name).
WithData(t, &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{
{
DestinationRef: destServiceRef,
DestinationPort: "tcp2",
},
},
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
compDestProxy5 := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, sourceProxy5.Name).
WithData(t, &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{
{
DestinationRef: destServiceRef,
DestinationPort: "mesh",
},
},
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
compDestProxy6 := resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, sourceProxy6.Name).
WithData(t, &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{
{
DestinationRef: destServiceRef,
DestinationPort: "mesh",
},
},
}).
WithTenancy(resource.DefaultNamespacedTenancy()).
Build()
cache.trackComputedRoutes(webRoutes)
cache.TrackComputedDestinations(resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](t, compDestProxy1))
cache.TrackComputedDestinations(resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](t, compDestProxy2))
cache.TrackComputedDestinations(resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](t, compDestProxy3))
cache.TrackComputedDestinations(resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](t, compDestProxy4))
cache.TrackComputedDestinations(resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](t, compDestProxy5))
cache.TrackComputedDestinations(resourcetest.MustDecode[*pbmesh.ComputedExplicitDestinations](t, compDestProxy6))
t.Run("Service", func(t *testing.T) {
t.Run("map dest service", func(t *testing.T) {
requests, err := cache.MapService(
context.Background(),
controller.Runtime{Client: client},
destService,
)
require.NoError(t, err)
// Only wake up things with dest as an upstream.
expRequests := []controller.Request{
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
t.Run("map target endpoints (TCPRoute)", func(t *testing.T) {
requests, err := cache.MapService(
context.Background(),
controller.Runtime{Client: client},
targetService,
)
require.NoError(t, err)
requests = testDeduplicateRequests(requests)
expRequests := []controller.Request{
// Wakeup things that have destService as a destination b/c of the TCPRoute reference.
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
t.Run("map backup target endpoints (FailoverPolicy)", func(t *testing.T) {
requests, err := cache.MapService(
context.Background(),
controller.Runtime{Client: client},
backupTargetService,
)
require.NoError(t, err)
requests = testDeduplicateRequests(requests)
expRequests := []controller.Request{
// Wakeup things that have destService as a destination b/c of the FailoverPolicy reference.
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
})
t.Run("ComputedRoutes", func(t *testing.T) {
t.Run("map web routes", func(t *testing.T) {
requests, err := cache.MapComputedRoutes(
context.Background(),
controller.Runtime{Client: client},
webRoutes.Resource,
)
require.NoError(t, err)
// Only wake up things with dest as an upstream.
expRequests := []controller.Request{
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
})
}
func newID(typ *pbresource.Type, name string) *pbresource.ID {
return &pbresource.ID{
Type: typ,
Tenancy: resource.DefaultNamespacedTenancy(),
Name: name,
}
}
func testDeduplicateRequests(reqs []controller.Request) []controller.Request {
type resID struct {
resource.ReferenceKey
UID string
}
out := make([]controller.Request, 0, len(reqs))
seen := make(map[resID]struct{})
for _, req := range reqs {
rid := resID{
ReferenceKey: resource.NewReferenceKey(req.ID),
UID: req.ID.Uid,
}
if _, ok := seen[rid]; !ok {
out = append(out, req)
seen[rid] = struct{}{}
}
}
return out
}

View File

@ -10,10 +10,9 @@ import (
"google.golang.org/protobuf/types/known/anypb"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/builder"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/fetcher"
"github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
@ -28,90 +27,82 @@ const ControllerName = "consul.io/sidecar-proxy-controller"
type TrustDomainFetcher func() (string, error)
func Controller(
destinationsCache *sidecarproxycache.DestinationsCache,
proxyCfgCache *sidecarproxycache.ProxyConfigurationCache,
computedRoutesCache *sidecarproxycache.ComputedRoutesCache,
identitiesCache *sidecarproxycache.IdentitiesCache,
mapper *sidecarproxymapper.Mapper,
cache *cache.Cache,
trustDomainFetcher TrustDomainFetcher,
dc string,
defaultAllow bool,
) controller.Controller {
if destinationsCache == nil || proxyCfgCache == nil || computedRoutesCache == nil || identitiesCache == nil || mapper == nil || trustDomainFetcher == nil {
panic("destinations cache, proxy configuration cache, computed routes cache, identities cache, mapper, and trust domain fetcher are required")
if cache == nil || trustDomainFetcher == nil {
panic("cache and trust domain fetcher are required")
}
/*
Workload <align> PST
Upstreams <select> PST(==Workload)
Upstreams <contain> Service(upstream)
ProxyConfiguration <select> PST(==Workload)
ComputedRoutes <align> Service(upstream)
ComputedRoutes <contain> Service(disco)
ServiceEndpoints <align> Service(disco)
Workload <align> PST
ComputedDestinations <align> PST(==Workload)
ComputedDestinations <contain> Service(destinations)
ComputedProxyConfiguration <align> PST(==Workload)
ComputedRoutes <align> Service(upstream)
ComputedRoutes <contain> Service(disco)
ComputedTrafficPermissions <align> WorkloadIdentity
Workload <contain> WorkloadIdentity
These relationships then dicate the following reconcile logic.
These relationships then dictate the following reconcile logic.
controller: read workload for PST
controller: read previous PST
controller: read ProxyConfiguration for Workload
controller: use cached Upstreams data to walk explicit upstreams
<EXPLICIT-for-each>
fetcher: read Upstreams to find single Upstream
fetcher: read Service(upstream)
fetcher: read ComputedRoutes
<TARGET-for-each>
fetcher: read ServiceEndpoints
</TARGET-for-each>
</EXPLICIT-for-each>
<IMPLICIT>
fetcher: list ALL ComputedRoutes
<CR-for-each>
fetcher: read Service(upstream)
controller: read workload for PST
controller: read previous PST
controller: read ComputedProxyConfiguration for Workload
controller: read ComputedDestinations for workload to walk explicit upstreams
controller: read ComputedTrafficPermissions for workload using workload.identity field.
<EXPLICIT-for-each>
fetcher: read Service(Destination)
fetcher: read ComputedRoutes
<TARGET-for-each>
fetcher: read ServiceEndpoints
</TARGET-for-each>
</CR-for-each>
</IMPLICIT>
</EXPLICIT-for-each>
<IMPLICIT>
fetcher: list ALL ComputedRoutes
<CR-for-each>
fetcher: read Service(upstream)
<TARGET-for-each>
fetcher: read ServiceEndpoints
</TARGET-for-each>
</CR-for-each>
</IMPLICIT>
*/
/*
Which means for equivalence, the following mapper relationships should exist:
Which means for equivalence, the following mapper relationships should exist:
Service: find upstreams with Service; Recurse(Upstreams)
ServiceEndpoints: ServiceEndpoints=>Service; find ComputedRoutes with this in a Target or FailoverConfig; Recurse(ComputedRoutes)
Upstreams: use selector to select workloads; workloads=>PST
ProxyConfiguration: use selector to select workloads; workloads=>PST
ComputedRoutes: CR=>Service; find upstreams with Service; Recurse(Upstreams)
[implicit/temp]: trigger all
Service: find destinations with Service; Recurse(ComputedDestinations);
find ComputedRoutes with this in a Target or FailoverConfig; Recurse(ComputedRoutes)
ComputedDestinations: replace type CED=>PST
ComputedProxyConfiguration: replace type CPC=>PST
ComputedRoutes: CR=>Service; find destinations with Service; Recurse(Destinations)
[implicit/temp]: trigger all
ComputedTrafficPermissions: find workloads in cache stored for this CTP=Workload, workloads=>PST reconcile requests
*/
return controller.ForType(pbmesh.ProxyStateTemplateType).
WithWatch(pbcatalog.ServiceType, mapper.MapServiceToProxyStateTemplate).
WithWatch(pbcatalog.ServiceEndpointsType, mapper.MapServiceEndpointsToProxyStateTemplate).
WithWatch(pbmesh.DestinationsType, mapper.MapDestinationsToProxyStateTemplate).
WithWatch(pbmesh.ProxyConfigurationType, mapper.MapProxyConfigurationToProxyStateTemplate).
WithWatch(pbmesh.ComputedRoutesType, mapper.MapComputedRoutesToProxyStateTemplate).
WithWatch(pbauth.ComputedTrafficPermissionsType, mapper.MapComputedTrafficPermissionsToProxyStateTemplate).
WithWatch(pbcatalog.ServiceType, cache.MapService).
WithWatch(pbcatalog.WorkloadType, controller.ReplaceType(pbmesh.ProxyStateTemplateType)).
WithWatch(pbmesh.ComputedExplicitDestinationsType, controller.ReplaceType(pbmesh.ProxyStateTemplateType)).
WithWatch(pbmesh.ComputedProxyConfigurationType, controller.ReplaceType(pbmesh.ProxyStateTemplateType)).
WithWatch(pbmesh.ComputedRoutesType, cache.MapComputedRoutes).
WithWatch(pbauth.ComputedTrafficPermissionsType, cache.MapComputedTrafficPermissions).
WithReconciler(&reconciler{
destinationsCache: destinationsCache,
proxyCfgCache: proxyCfgCache,
computedRoutesCache: computedRoutesCache,
identitiesCache: identitiesCache,
getTrustDomain: trustDomainFetcher,
dc: dc,
defaultAllow: defaultAllow,
cache: cache,
getTrustDomain: trustDomainFetcher,
dc: dc,
defaultAllow: defaultAllow,
})
}
type reconciler struct {
destinationsCache *sidecarproxycache.DestinationsCache
proxyCfgCache *sidecarproxycache.ProxyConfigurationCache
computedRoutesCache *sidecarproxycache.ComputedRoutesCache
identitiesCache *sidecarproxycache.IdentitiesCache
getTrustDomain TrustDomainFetcher
defaultAllow bool
dc string
cache *cache.Cache
getTrustDomain TrustDomainFetcher
defaultAllow bool
dc string
}
func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error {
@ -120,13 +111,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c
rt.Logger.Trace("reconciling proxy state template")
// Instantiate a data fetcher to fetch all reconciliation data.
dataFetcher := fetcher.New(
rt.Client,
r.destinationsCache,
r.proxyCfgCache,
r.computedRoutesCache,
r.identitiesCache,
)
dataFetcher := fetcher.New(rt.Client, r.cache)
// Check if the workload exists.
workloadID := resource.ReplaceType(pbcatalog.WorkloadType, req.ID)
@ -153,7 +138,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c
rt.Logger.Trace("proxy state template for this workload doesn't yet exist; generating a new one")
}
if !fetcher.IsWorkloadMeshEnabled(workload.Data.Ports) {
if !workload.GetData().IsMeshEnabled() {
// Skip non-mesh workloads.
// If there's existing proxy state template, delete it.
@ -164,9 +149,6 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c
rt.Logger.Error("error deleting existing proxy state template", "error", err)
return err
}
// Remove it from destinationsCache.
r.destinationsCache.DeleteSourceProxy(req.ID)
}
rt.Logger.Trace("skipping proxy state template generation because workload is not on the mesh", "workload", workload.Resource.Id)
return nil
@ -180,7 +162,7 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c
}
// Fetch proxy configuration.
proxyCfg, err := dataFetcher.FetchAndMergeProxyConfigurations(ctx, req.ID)
proxyCfg, err := dataFetcher.FetchComputedProxyConfiguration(ctx, req.ID)
if err != nil {
rt.Logger.Error("error fetching proxy and merging proxy configurations", "error", err)
return err
@ -197,23 +179,18 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c
ctp = trafficPermissions.Data
}
b := builder.New(req.ID, identityRefFromWorkload(workload), trustDomain, r.dc, r.defaultAllow, proxyCfg).
b := builder.New(req.ID, identityRefFromWorkload(workload), trustDomain, r.dc, r.defaultAllow, proxyCfg.GetData()).
BuildLocalApp(workload.Data, ctp)
// Get all destinationsData.
destinationsRefs := r.destinationsCache.DestinationsBySourceProxy(req.ID)
if len(destinationsRefs) > 0 {
rt.Logger.Trace("found destinations for this proxy", "id", req.ID, "destination_refs", destinationsRefs)
} else {
rt.Logger.Trace("did not find any destinations for this proxy", "id", req.ID)
}
destinationsData, statuses, err := dataFetcher.FetchExplicitDestinationsData(ctx, destinationsRefs)
destinationsData, err := dataFetcher.FetchExplicitDestinationsData(ctx, req.ID)
if err != nil {
rt.Logger.Error("error fetching explicit destinations for this proxy", "error", err)
return err
}
if proxyCfg.IsTransparentProxy() {
if proxyCfg.GetData() != nil && proxyCfg.GetData().IsTransparentProxy() {
rt.Logger.Trace("transparent proxy is enabled; fetching implicit destinations")
destinationsData, err = dataFetcher.FetchImplicitDestinationsData(ctx, req.ID, destinationsData)
if err != nil {
rt.Logger.Error("error fetching implicit destinations for this proxy", "error", err)
@ -250,26 +227,6 @@ func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req c
rt.Logger.Trace("proxy state template data has not changed, skipping update")
}
// Update any statuses.
for _, status := range statuses {
updatedStatus := &pbresource.Status{
ObservedGeneration: status.Generation,
}
updatedStatus.Conditions = status.Conditions
// If the status is unchanged then we should return and avoid the unnecessary write
if !resource.EqualStatus(status.OldStatus[ControllerName], updatedStatus, false) {
rt.Logger.Trace("updating status", "id", status.ID)
_, err = rt.Client.WriteStatus(ctx, &pbresource.WriteStatusRequest{
Id: status.ID,
Key: ControllerName,
Status: updatedStatus,
})
if err != nil {
rt.Logger.Error("error writing new status", "id", status.ID, "error", err)
return err
}
}
}
return nil
}
@ -277,6 +234,7 @@ func identityRefFromWorkload(w *types.DecodedWorkload) *pbresource.Reference {
return &pbresource.Reference{
Name: w.Data.Identity,
Tenancy: w.Resource.Id.Tenancy,
Type: pbauth.WorkloadIdentityType,
}
}

View File

@ -16,11 +16,9 @@ import (
"github.com/hashicorp/consul/internal/auth"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/routestest"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/builder"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status"
"github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
@ -69,9 +67,7 @@ func (suite *meshControllerTestSuite) SetupTest() {
suite.ctx = testutil.TestContext(suite.T())
suite.ctl = &reconciler{
destinationsCache: sidecarproxycache.NewDestinationsCache(),
proxyCfgCache: sidecarproxycache.NewProxyConfigurationCache(),
identitiesCache: sidecarproxycache.NewIdentitiesCache(),
cache: cache.New(),
getTrustDomain: func() (string, error) {
return "test.consul", nil
},
@ -229,9 +225,11 @@ func (suite *meshControllerTestSuite) SetupTest() {
identityRef := &pbresource.Reference{
Name: suite.apiWorkload.Identity,
Tenancy: suite.apiWorkloadID.Tenancy,
Type: pbauth.WorkloadIdentityType,
}
suite.proxyStateTemplate = builder.New(suite.apiWorkloadID, identityRef, "test.consul", "dc1", false, nil).
suite.proxyStateTemplate = builder.New(resource.ReplaceType(pbmesh.ProxyStateTemplateType, suite.apiWorkloadID),
identityRef, "test.consul", "dc1", false, nil).
BuildLocalApp(suite.apiWorkload, suite.apiComputedTrafficPermissionsData).
Build()
}
@ -353,16 +351,10 @@ func (suite *meshControllerTestSuite) TestController() {
mgr := controller.NewManager(suite.client, suite.runtime.Logger)
// Initialize controller dependencies.
var (
destinationsCache = sidecarproxycache.NewDestinationsCache()
proxyCfgCache = sidecarproxycache.NewProxyConfigurationCache()
computedRoutesCache = sidecarproxycache.NewComputedRoutesCache()
identitiesCache = sidecarproxycache.NewIdentitiesCache()
m = sidecarproxymapper.New(destinationsCache, proxyCfgCache, computedRoutesCache, identitiesCache)
)
c := cache.New()
trustDomainFetcher := func() (string, error) { return "test.consul", nil }
mgr.Register(Controller(destinationsCache, proxyCfgCache, computedRoutesCache, identitiesCache, m, trustDomainFetcher, "dc1", false))
mgr.Register(Controller(c, trustDomainFetcher, "dc1", false))
mgr.SetRaftLeader(true)
go mgr.Run(suite.ctx)
@ -374,9 +366,9 @@ func (suite *meshControllerTestSuite) TestController() {
apiComputedRoutesID = resource.ReplaceType(pbmesh.ComputedRoutesType, suite.apiService.Id)
dbComputedRoutesID = resource.ReplaceType(pbmesh.ComputedRoutesType, suite.dbService.Id)
apiProxyStateTemplate *pbresource.Resource
webProxyStateTemplate *pbresource.Resource
webDestinations *pbresource.Resource
apiProxyStateTemplate *pbresource.Resource
webProxyStateTemplate *pbresource.Resource
webComputedDestinations *pbresource.Resource
)
testutil.RunStep(suite.T(), "proxy state template generation", func(t *testing.T) {
@ -394,9 +386,8 @@ func (suite *meshControllerTestSuite) TestController() {
)
// Add a source service and check that a new proxy state is generated.
webDestinations = resourcetest.Resource(pbmesh.DestinationsType, "web-destinations").
WithData(suite.T(), &pbmesh.Destinations{
Workloads: &pbcatalog.WorkloadSelector{Names: []string{"web-def"}},
webComputedDestinations = resourcetest.Resource(pbmesh.ComputedExplicitDestinationsType, suite.webWorkload.Id.Name).
WithData(suite.T(), &pbmesh.ComputedExplicitDestinations{
Destinations: []*pbmesh.Destination{
{
DestinationRef: resource.Reference(suite.apiService.Id, ""),
@ -471,11 +462,6 @@ func (suite *meshControllerTestSuite) TestController() {
suite.client.RequireResourceNotFound(r, apiProxyStateTemplateID)
})
// Check status on the pbmesh.Destinations resource.
serviceRef := resource.ReferenceToString(resource.Reference(suite.apiService.Id, ""))
suite.client.WaitForStatusCondition(t, webDestinations.Id, ControllerName,
status.ConditionMeshProtocolNotFound(serviceRef))
// We should get a new web proxy template resource because this destination should be removed.
webProxyStateTemplate = suite.client.WaitForNewVersion(t, webProxyStateTemplateID, webProxyStateTemplate.Version)
@ -504,10 +490,6 @@ func (suite *meshControllerTestSuite) TestController() {
resourcetest.MustDecode[*pbcatalog.Service](t, suite.apiService),
)
serviceRef := resource.ReferenceToString(resource.Reference(suite.apiService.Id, ""))
suite.client.WaitForStatusCondition(t, webDestinations.Id, ControllerName,
status.ConditionMeshProtocolFound(serviceRef))
// We should also get a new web proxy template resource as this destination should be added again.
webProxyStateTemplate = suite.client.WaitForNewVersion(t, webProxyStateTemplateID, webProxyStateTemplate.Version)
@ -527,10 +509,10 @@ func (suite *meshControllerTestSuite) TestController() {
testutil.RunStep(suite.T(), "add implicit upstream and enable tproxy", func(t *testing.T) {
// Delete explicit destinations resource.
suite.runtime.Logger.Trace("deleting web destinations")
_, err := suite.client.Delete(suite.ctx, &pbresource.DeleteRequest{Id: webDestinations.Id})
_, err := suite.client.Delete(suite.ctx, &pbresource.DeleteRequest{Id: webComputedDestinations.Id})
require.NoError(t, err)
webProxyStateTemplate = suite.client.WaitForNewVersion(suite.T(), webProxyStateTemplateID, webProxyStateTemplate.Version)
webProxyStateTemplate = suite.client.WaitForNewVersion(t, webProxyStateTemplateID, webProxyStateTemplate.Version)
// Write a default ComputedRoutes for db, so it's eligible.
dbCR := routestest.ReconcileComputedRoutes(suite.T(), suite.client, dbComputedRoutesID,
@ -539,11 +521,8 @@ func (suite *meshControllerTestSuite) TestController() {
require.NotNil(t, dbCR)
// Enable transparent proxy for the web proxy.
resourcetest.Resource(pbmesh.ProxyConfigurationType, "proxy-config").
WithData(t, &pbmesh.ProxyConfiguration{
Workloads: &pbcatalog.WorkloadSelector{
Prefixes: []string{"web"},
},
resourcetest.Resource(pbmesh.ComputedProxyConfigurationType, suite.webWorkload.Id.Name).
WithData(t, &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{
@ -637,16 +616,10 @@ func (suite *meshControllerTestSuite) TestControllerDefaultAllow() {
mgr := controller.NewManager(suite.client, suite.runtime.Logger)
// Initialize controller dependencies.
var (
destinationsCache = sidecarproxycache.NewDestinationsCache()
proxyCfgCache = sidecarproxycache.NewProxyConfigurationCache()
computedRoutesCache = sidecarproxycache.NewComputedRoutesCache()
identitiesCache = sidecarproxycache.NewIdentitiesCache()
m = sidecarproxymapper.New(destinationsCache, proxyCfgCache, computedRoutesCache, identitiesCache)
)
c := cache.New()
trustDomainFetcher := func() (string, error) { return "test.consul", nil }
mgr.Register(Controller(destinationsCache, proxyCfgCache, computedRoutesCache, identitiesCache, m, trustDomainFetcher, "dc1", true))
mgr.Register(Controller(c, trustDomainFetcher, "dc1", true))
mgr.SetRaftLeader(true)
go mgr.Run(suite.ctx)

View File

@ -8,12 +8,9 @@ import (
"fmt"
"strings"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
ctrlStatus "github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/cache"
"github.com/hashicorp/consul/internal/mesh/internal/types"
intermediateTypes "github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
"github.com/hashicorp/consul/internal/resource"
@ -25,72 +22,50 @@ import (
)
type Fetcher struct {
Client pbresource.ResourceServiceClient
DestinationsCache *sidecarproxycache.DestinationsCache
ProxyCfgCache *sidecarproxycache.ProxyConfigurationCache
ComputedRoutesCache *sidecarproxycache.ComputedRoutesCache
IdentitiesCache *sidecarproxycache.IdentitiesCache
client pbresource.ResourceServiceClient
cache *cache.Cache
}
func New(
client pbresource.ResourceServiceClient,
dCache *sidecarproxycache.DestinationsCache,
pcfgCache *sidecarproxycache.ProxyConfigurationCache,
computedRoutesCache *sidecarproxycache.ComputedRoutesCache,
iCache *sidecarproxycache.IdentitiesCache,
) *Fetcher {
func New(client pbresource.ResourceServiceClient, cache *cache.Cache) *Fetcher {
return &Fetcher{
Client: client,
DestinationsCache: dCache,
ProxyCfgCache: pcfgCache,
ComputedRoutesCache: computedRoutesCache,
IdentitiesCache: iCache,
client: client,
cache: cache,
}
}
func (f *Fetcher) FetchWorkload(ctx context.Context, id *pbresource.ID) (*types.DecodedWorkload, error) {
proxyID := resource.ReplaceType(pbmesh.ProxyStateTemplateType, id)
dec, err := resource.GetDecodedResource[*pbcatalog.Workload](ctx, f.Client, id)
dec, err := resource.GetDecodedResource[*pbcatalog.Workload](ctx, f.client, id)
if err != nil {
return nil, err
} else if dec == nil {
// We also need to make sure to delete the associated proxy from cache.
// We are ignoring errors from cache here as this deletion is best effort.
f.DestinationsCache.DeleteSourceProxy(proxyID)
f.ProxyCfgCache.UntrackProxyID(proxyID)
f.IdentitiesCache.UntrackProxyID(proxyID)
f.cache.UntrackWorkload(id)
return nil, nil
}
identityID := &pbresource.ID{
Name: dec.Data.Identity,
Tenancy: dec.Resource.Id.Tenancy,
Type: pbauth.WorkloadIdentityType,
}
f.IdentitiesCache.TrackPair(identityID, proxyID)
f.cache.TrackWorkload(dec)
return dec, err
}
func (f *Fetcher) FetchProxyStateTemplate(ctx context.Context, id *pbresource.ID) (*types.DecodedProxyStateTemplate, error) {
return resource.GetDecodedResource[*pbmesh.ProxyStateTemplate](ctx, f.Client, id)
return resource.GetDecodedResource[*pbmesh.ProxyStateTemplate](ctx, f.client, id)
}
func (f *Fetcher) FetchComputedTrafficPermissions(ctx context.Context, id *pbresource.ID) (*types.DecodedComputedTrafficPermissions, error) {
return resource.GetDecodedResource[*pbauth.ComputedTrafficPermissions](ctx, f.Client, id)
return resource.GetDecodedResource[*pbauth.ComputedTrafficPermissions](ctx, f.client, id)
}
func (f *Fetcher) FetchServiceEndpoints(ctx context.Context, id *pbresource.ID) (*types.DecodedServiceEndpoints, error) {
return resource.GetDecodedResource[*pbcatalog.ServiceEndpoints](ctx, f.Client, id)
return resource.GetDecodedResource[*pbcatalog.ServiceEndpoints](ctx, f.client, id)
}
func (f *Fetcher) FetchService(ctx context.Context, id *pbresource.ID) (*types.DecodedService, error) {
return resource.GetDecodedResource[*pbcatalog.Service](ctx, f.Client, id)
return resource.GetDecodedResource[*pbcatalog.Service](ctx, f.client, id)
}
func (f *Fetcher) FetchDestinations(ctx context.Context, id *pbresource.ID) (*types.DecodedDestinations, error) {
return resource.GetDecodedResource[*pbmesh.Destinations](ctx, f.Client, id)
return resource.GetDecodedResource[*pbmesh.Destinations](ctx, f.client, id)
}
func (f *Fetcher) FetchComputedRoutes(ctx context.Context, id *pbresource.ID) (*types.DecodedComputedRoutes, error) {
@ -98,11 +73,11 @@ func (f *Fetcher) FetchComputedRoutes(ctx context.Context, id *pbresource.ID) (*
return nil, fmt.Errorf("id must be a ComputedRoutes type")
}
dec, err := resource.GetDecodedResource[*pbmesh.ComputedRoutes](ctx, f.Client, id)
dec, err := resource.GetDecodedResource[*pbmesh.ComputedRoutes](ctx, f.client, id)
if err != nil {
return nil, err
} else if dec == nil {
f.ComputedRoutesCache.UntrackComputedRoutes(id)
f.cache.UntrackComputedRoutes(id)
}
return dec, err
@ -110,120 +85,84 @@ func (f *Fetcher) FetchComputedRoutes(ctx context.Context, id *pbresource.ID) (*
func (f *Fetcher) FetchExplicitDestinationsData(
ctx context.Context,
explDestRefs []intermediateTypes.CombinedDestinationRef,
) ([]*intermediateTypes.Destination, map[string]*intermediateTypes.Status, error) {
var (
destinations []*intermediateTypes.Destination
statuses = make(map[string]*intermediateTypes.Status)
)
proxyID *pbresource.ID,
) ([]*intermediateTypes.Destination, error) {
for _, dest := range explDestRefs {
// Fetch Destinations resource if there is one.
us, err := f.FetchDestinations(ctx, dest.ExplicitDestinationsID)
if err != nil {
// If there's an error, return and force another reconcile instead of computing
// partial proxy state.
return nil, statuses, err
}
var destinations []*intermediateTypes.Destination
if us == nil {
// If the Destinations resource is not found, then we should delete it from cache and continue.
f.DestinationsCache.DeleteDestination(dest.ServiceRef, dest.Port)
continue
}
// Fetch computed explicit destinations first.
cdID := resource.ReplaceType(pbmesh.ComputedExplicitDestinationsType, proxyID)
cd, err := resource.GetDecodedResource[*pbmesh.ComputedExplicitDestinations](ctx, f.client, cdID)
if err != nil {
return nil, err
}
if cd == nil {
f.cache.UntrackComputedDestinations(cdID)
return nil, nil
}
// Otherwise, track this resource in the destinations cache.
f.cache.TrackComputedDestinations(cd)
for _, dest := range cd.GetData().GetDestinations() {
d := &intermediateTypes.Destination{}
var (
serviceID = resource.IDFromReference(dest.ServiceRef)
serviceRef = resource.ReferenceToString(dest.ServiceRef)
upstreamsRef = resource.IDToString(us.Resource.Id)
serviceID = resource.IDFromReference(dest.DestinationRef)
)
// Fetch Service
svc, err := f.FetchService(ctx, serviceID)
if err != nil {
return nil, statuses, err
return nil, err
}
if svc == nil {
// If the Service resource is not found, then we update the status
// of the Upstreams resource but don't remove it from cache in case
// it comes back.
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionDestinationServiceNotFound(serviceRef))
// If the Service resource is not found, skip this destination.
continue
} else {
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionDestinationServiceFound(serviceRef))
}
d.Service = svc
// Check if this endpoints is mesh-enabled. If not, remove it from cache and return an error.
if !IsMeshEnabled(svc.Data.Ports) {
// Add invalid status but don't remove from cache. If this state changes,
// we want to be able to detect this change.
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionMeshProtocolNotFound(serviceRef))
// Check if this service is mesh-enabled. If not, update the status.
if !svc.GetData().IsMeshEnabled() {
// This error should not cause the execution to stop, as we want to make sure that this non-mesh destination
// gets removed from the proxy state.
// service gets removed from the proxy state.
continue
}
// Check if the desired port exists on the service and skip it doesn't.
if svc.GetData().FindServicePort(dest.DestinationPort) == nil {
continue
} else {
// If everything was successful, add an empty condition so that we can remove any existing statuses.
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionMeshProtocolFound(serviceRef))
}
// No destination port should point to a port with "mesh" protocol,
// so check if destination port has the mesh protocol and update the status.
if isServicePortMeshProtocol(svc.Data.Ports, dest.Port) {
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionMeshProtocolDestinationPort(serviceRef, dest.Port))
// so check if destination port has the mesh protocol and skip it if it does.
if svc.GetData().FindServicePort(dest.DestinationPort).GetProtocol() == pbcatalog.Protocol_PROTOCOL_MESH {
continue
} else {
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation, ctrlStatus.ConditionNonMeshProtocolDestinationPort(serviceRef, dest.Port))
}
// Fetch ComputedRoutes.
cr, err := f.FetchComputedRoutes(ctx, resource.ReplaceType(pbmesh.ComputedRoutesType, serviceID))
if err != nil {
return nil, statuses, err
return nil, err
} else if cr == nil {
// This is required, so wait until it exists.
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation,
ctrlStatus.ConditionDestinationComputedRoutesNotFound(serviceRef))
continue
} else {
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation,
ctrlStatus.ConditionDestinationComputedRoutesFound(serviceRef))
}
portConfig, ok := cr.Data.PortedConfigs[dest.Port]
portConfig, ok := cr.Data.PortedConfigs[dest.DestinationPort]
if !ok {
// This is required, so wait until it exists.
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation,
ctrlStatus.ConditionDestinationComputedRoutesPortNotFound(serviceRef, dest.Port))
continue
} else {
updateStatusCondition(statuses, upstreamsRef, dest.ExplicitDestinationsID,
us.Resource.Status, us.Resource.Generation,
ctrlStatus.ConditionDestinationComputedRoutesPortFound(serviceRef, dest.Port))
}
// Copy this so we can mutate the targets.
d.ComputedPortRoutes = proto.Clone(portConfig).(*pbmesh.ComputedPortRoutes)
// As Destinations resource contains a list of destinations,
// we need to find the one that references our service and port.
d.Explicit = findDestination(dest.ServiceRef, dest.Port, us.Data)
if d.Explicit == nil {
continue // the cache is out of sync
}
d.Explicit = dest
// NOTE: we collect both DIRECT and INDIRECT target information here.
for _, routeTarget := range d.ComputedPortRoutes.Targets {
@ -232,7 +171,7 @@ func (f *Fetcher) FetchExplicitDestinationsData(
// Fetch ServiceEndpoints.
se, err := f.FetchServiceEndpoints(ctx, resource.ReplaceType(pbcatalog.ServiceEndpointsType, targetServiceID))
if err != nil {
return nil, statuses, err
return nil, err
}
if se != nil {
@ -241,9 +180,9 @@ func (f *Fetcher) FetchExplicitDestinationsData(
// Gather all identities.
var identities []*pbresource.Reference
for _, ep := range se.Data.Endpoints {
for _, identity := range se.GetData().GetIdentities() {
identities = append(identities, &pbresource.Reference{
Name: ep.Identity,
Name: identity,
Tenancy: se.Resource.Id.Tenancy,
})
}
@ -254,7 +193,7 @@ func (f *Fetcher) FetchExplicitDestinationsData(
destinations = append(destinations, d)
}
return destinations, statuses, nil
return destinations, nil
}
type PortReferenceKey struct {
@ -284,7 +223,7 @@ func (f *Fetcher) FetchImplicitDestinationsData(
}
// For now we need to look up all computed routes within a partition.
rsp, err := f.Client.List(ctx, &pbresource.ListRequest{
rsp, err := f.client.List(ctx, &pbresource.ListRequest{
Type: pbmesh.ComputedRoutesType,
Tenancy: &pbresource.Tenancy{
Namespace: storage.Wildcard,
@ -408,115 +347,12 @@ func (f *Fetcher) FetchImplicitDestinationsData(
return addToDestinations, err
}
// FetchAndMergeProxyConfigurations fetches proxy configurations for the proxy state template provided by id
// FetchComputedProxyConfiguration fetches proxy configurations for the proxy state template provided by id
// and merges them into one object.
func (f *Fetcher) FetchAndMergeProxyConfigurations(ctx context.Context, id *pbresource.ID) (*pbmesh.ProxyConfiguration, error) {
proxyCfgRefs := f.ProxyCfgCache.ProxyConfigurationsByProxyID(id)
func (f *Fetcher) FetchComputedProxyConfiguration(ctx context.Context, id *pbresource.ID) (*types.DecodedComputedProxyConfiguration, error) {
compProxyCfgID := resource.ReplaceType(pbmesh.ComputedProxyConfigurationType, id)
result := &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{},
}
for _, ref := range proxyCfgRefs {
proxyCfgID := &pbresource.ID{
Name: ref.GetName(),
Type: ref.GetType(),
Tenancy: ref.GetTenancy(),
}
rsp, err := f.Client.Read(ctx, &pbresource.ReadRequest{
Id: proxyCfgID,
})
switch {
case status.Code(err) == codes.NotFound:
f.ProxyCfgCache.UntrackProxyConfiguration(proxyCfgID)
return nil, nil
case err != nil:
return nil, err
}
var proxyCfg pbmesh.ProxyConfiguration
err = rsp.Resource.Data.UnmarshalTo(&proxyCfg)
if err != nil {
return nil, err
}
// Note that we only care about dynamic config as bootstrap config
// will not be updated dynamically by this controller.
// todo (ishustava): do sorting etc.
proto.Merge(result.DynamicConfig, proxyCfg.DynamicConfig)
}
// Default the outbound listener port. If we don't do the nil check here, then BuildDestinations will panic creating
// the outbound listener.
if result.DynamicConfig.TransparentProxy == nil {
result.DynamicConfig.TransparentProxy = &pbmesh.TransparentProxy{OutboundListenerPort: 15001}
}
return result, nil
}
// IsWorkloadMeshEnabled returns true if the workload or service endpoints port
// contain a port with the "mesh" protocol.
func IsWorkloadMeshEnabled(ports map[string]*pbcatalog.WorkloadPort) bool {
for _, port := range ports {
if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
return true
}
}
return false
}
// IsMeshEnabled returns true if the service ports contain a port with the
// "mesh" protocol.
func IsMeshEnabled(ports []*pbcatalog.ServicePort) bool {
for _, port := range ports {
if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
return true
}
}
return false
}
func isServicePortMeshProtocol(ports []*pbcatalog.ServicePort, name string) bool {
sp := findServicePort(ports, name)
return sp != nil && sp.Protocol == pbcatalog.Protocol_PROTOCOL_MESH
}
func findServicePort(ports []*pbcatalog.ServicePort, name string) *pbcatalog.ServicePort {
for _, port := range ports {
if port.TargetPort == name {
return port
}
}
return nil
}
func findDestination(ref *pbresource.Reference, port string, destinations *pbmesh.Destinations) *pbmesh.Destination {
for _, destination := range destinations.Destinations {
if resource.EqualReference(ref, destination.DestinationRef) &&
port == destination.DestinationPort {
return destination
}
}
return nil
}
func updateStatusCondition(
statuses map[string]*intermediateTypes.Status,
key string,
id *pbresource.ID,
oldStatus map[string]*pbresource.Status,
generation string,
condition *pbresource.Condition) {
if _, ok := statuses[key]; ok {
statuses[key].Conditions = append(statuses[key].Conditions, condition)
} else {
statuses[key] = &intermediateTypes.Status{
ID: id,
Generation: generation,
Conditions: []*pbresource.Condition{condition},
OldStatus: oldStatus,
}
}
return resource.GetDecodedResource[*pbmesh.ComputedProxyConfiguration](ctx, f.client, compProxyCfgID)
}
func isPartOfService(workloadID *pbresource.ID, svc *types.DecodedService) bool {

View File

@ -1,119 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package status
import (
"fmt"
"github.com/hashicorp/consul/proto-public/pbresource"
)
const (
StatusConditionDestinationAccepted = "DestinationAccepted"
StatusReasonMeshProtocolNotFound = "MeshPortProtocolNotFound"
StatusReasonMeshProtocolFound = "MeshPortProtocolFound"
StatusReasonMeshProtocolDestinationPort = "DestinationWithMeshPortProtocol"
StatusReasonNonMeshProtocolDestinationPort = "DestinationWithNonMeshPortProtocol"
StatusReasonDestinationServiceNotFound = "ServiceNotFound"
StatusReasonDestinationServiceFound = "ServiceFound"
StatusReasonDestinationComputedRoutesNotFound = "ComputedRoutesNotFound"
StatusReasonDestinationComputedRoutesFound = "ComputedRoutesFound"
StatusReasonDestinationComputedRoutesPortNotFound = "ComputedRoutesPortNotFound"
StatusReasonDestinationComputedRoutesPortFound = "ComputedRoutesPortFound"
)
func ConditionMeshProtocolNotFound(serviceRef string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonMeshProtocolNotFound,
Message: fmt.Sprintf("service %q cannot be referenced as a Destination because it's not mesh-enabled.", serviceRef),
}
}
func ConditionMeshProtocolFound(serviceRef string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonMeshProtocolFound,
Message: fmt.Sprintf("service %q is on the mesh.", serviceRef),
}
}
func ConditionDestinationServiceNotFound(serviceRef string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonDestinationServiceNotFound,
Message: fmt.Sprintf("service %q does not exist.", serviceRef),
}
}
func ConditionDestinationServiceFound(serviceRef string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonDestinationServiceFound,
Message: fmt.Sprintf("service %q exists.", serviceRef),
}
}
func ConditionMeshProtocolDestinationPort(serviceRef, port string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonMeshProtocolDestinationPort,
Message: fmt.Sprintf("destination port %q for service %q has PROTOCOL_MESH which is unsupported for destination services", port, serviceRef),
}
}
func ConditionNonMeshProtocolDestinationPort(serviceRef, port string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonNonMeshProtocolDestinationPort,
Message: fmt.Sprintf("destination port %q for service %q has a non-mesh protocol", port, serviceRef),
}
}
func ConditionDestinationComputedRoutesNotFound(serviceRef string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonDestinationComputedRoutesNotFound,
Message: fmt.Sprintf("computed routes %q does not exist.", serviceRef),
}
}
func ConditionDestinationComputedRoutesFound(serviceRef string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonDestinationComputedRoutesNotFound,
Message: fmt.Sprintf("computed routes %q exists.", serviceRef),
}
}
func ConditionDestinationComputedRoutesPortNotFound(serviceRef, port string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonDestinationComputedRoutesPortNotFound,
Message: fmt.Sprintf("computed routes %q does not exist for port %q.", serviceRef, port),
}
}
func ConditionDestinationComputedRoutesPortFound(serviceRef, port string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionDestinationAccepted,
State: pbresource.Condition_STATE_TRUE,
Reason: StatusReasonDestinationComputedRoutesPortNotFound,
Message: fmt.Sprintf("computed routes %q exists for port %q.", serviceRef, port),
}
}

View File

@ -1,43 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
func (m *Mapper) MapComputedRoutesToProxyStateTemplate(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
computedRoutes, err := resource.Decode[*pbmesh.ComputedRoutes](res)
if err != nil {
return nil, err
}
reqs, err := m.mapComputedRoutesToProxyStateTemplate(ctx, rt, res.Id)
if err != nil {
return nil, err
}
m.computedRoutesCache.TrackComputedRoutes(computedRoutes)
return reqs, nil
}
func (m *Mapper) mapComputedRoutesToProxyStateTemplate(ctx context.Context, rt controller.Runtime, computedRoutesID *pbresource.ID) ([]controller.Request, error) {
// Each Destination gets a single ComputedRoutes.
serviceID := resource.ReplaceType(pbcatalog.ServiceType, computedRoutesID)
serviceRef := resource.Reference(serviceID, "")
ids, err := m.mapServiceThroughDestinationsToProxyStateTemplates(ctx, rt, serviceRef)
if err != nil {
return nil, err
}
return controller.MakeRequests(pbmesh.ProxyStateTemplateType, ids), nil
}

View File

@ -1,44 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
"github.com/hashicorp/consul/internal/resource"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
func (m *Mapper) MapDestinationsToProxyStateTemplate(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
destinations, err := resource.Decode[*pbmesh.Destinations](res)
if err != nil {
return nil, err
}
// Look up workloads for this destinations.
sourceProxyIDs := make(map[resource.ReferenceKey]struct{})
requests, err := mapSelectorToProxyStateTemplates(ctx, rt.Client, destinations.Data.Workloads, res.Id.Tenancy, func(id *pbresource.ID) {
sourceProxyIDs[resource.NewReferenceKey(id)] = struct{}{}
})
if err != nil {
return nil, err
}
// Add this destination to destinationsCache.
for _, destination := range destinations.Data.Destinations {
destinationRef := intermediate.CombinedDestinationRef{
ServiceRef: destination.DestinationRef,
Port: destination.DestinationPort,
ExplicitDestinationsID: res.Id,
SourceProxies: sourceProxyIDs,
}
m.destinationsCache.WriteDestination(destinationRef)
}
return requests, nil
}

View File

@ -1,120 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"testing"
"github.com/stretchr/testify/require"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestMapDestinationsToProxyStateTemplate(t *testing.T) {
client := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
webWorkload1 := resourcetest.Resource(pbcatalog.WorkloadType, "web-abc").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}},
}).
Write(t, client)
webWorkload2 := resourcetest.Resource(pbcatalog.WorkloadType, "web-def").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.2"}},
Ports: map[string]*pbcatalog.WorkloadPort{"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}},
}).
Write(t, client)
webWorkload3 := resourcetest.Resource(pbcatalog.WorkloadType, "non-prefix-web").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "10.0.0.3"}},
Ports: map[string]*pbcatalog.WorkloadPort{"tcp": {Port: 8081, Protocol: pbcatalog.Protocol_PROTOCOL_TCP}},
}).
Write(t, client)
var (
api1ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "api-1").
WithTenancy(resource.DefaultNamespacedTenancy()).
ReferenceNoSection()
api2ServiceRef = resourcetest.Resource(pbcatalog.ServiceType, "api-2").
WithTenancy(resource.DefaultNamespacedTenancy()).
ReferenceNoSection()
)
webDestinationsData := &pbmesh.Destinations{
Workloads: &pbcatalog.WorkloadSelector{
Names: []string{"non-prefix-web"},
Prefixes: []string{"web"},
},
Destinations: []*pbmesh.Destination{
{
DestinationRef: api1ServiceRef,
DestinationPort: "tcp",
},
{
DestinationRef: api2ServiceRef,
DestinationPort: "tcp1",
},
{
DestinationRef: api2ServiceRef,
DestinationPort: "tcp2",
},
},
}
webDestinations := resourcetest.Resource(pbmesh.DestinationsType, "web-destinations").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, webDestinationsData).
Write(t, client)
c := sidecarproxycache.NewDestinationsCache()
mapper := &Mapper{destinationsCache: c}
expRequests := []controller.Request{
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, webWorkload1.Id)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, webWorkload2.Id)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, webWorkload3.Id)},
}
requests, err := mapper.MapDestinationsToProxyStateTemplate(context.Background(), controller.Runtime{Client: client}, webDestinations)
require.NoError(t, err)
prototest.AssertElementsMatch(t, expRequests, requests)
var (
proxy1ID = resourcetest.Resource(pbmesh.ProxyStateTemplateType, webWorkload1.Id.Name).
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxy2ID = resourcetest.Resource(pbmesh.ProxyStateTemplateType, webWorkload2.Id.Name).
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxy3ID = resourcetest.Resource(pbmesh.ProxyStateTemplateType, webWorkload3.Id.Name).
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
)
for _, u := range webDestinationsData.Destinations {
expDestination := intermediate.CombinedDestinationRef{
ServiceRef: u.DestinationRef,
Port: u.DestinationPort,
ExplicitDestinationsID: webDestinations.Id,
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(proxy1ID): {},
resource.NewReferenceKey(proxy2ID): {},
resource.NewReferenceKey(proxy3ID): {},
},
}
actualDestination, found := c.ReadDestination(u.DestinationRef, u.DestinationPort)
require.True(t, found)
prototest.AssertDeepEqual(t, expDestination, actualDestination)
}
}

View File

@ -1,82 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"fmt"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/resource"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
type Mapper struct {
destinationsCache *sidecarproxycache.DestinationsCache
proxyCfgCache *sidecarproxycache.ProxyConfigurationCache
computedRoutesCache *sidecarproxycache.ComputedRoutesCache
identitiesCache *sidecarproxycache.IdentitiesCache
}
func New(
destinationsCache *sidecarproxycache.DestinationsCache,
proxyCfgCache *sidecarproxycache.ProxyConfigurationCache,
computedRoutesCache *sidecarproxycache.ComputedRoutesCache,
identitiesCache *sidecarproxycache.IdentitiesCache,
) *Mapper {
return &Mapper{
destinationsCache: destinationsCache,
proxyCfgCache: proxyCfgCache,
computedRoutesCache: computedRoutesCache,
identitiesCache: identitiesCache,
}
}
// mapSelectorToProxyStateTemplates returns ProxyStateTemplate requests given a workload
// selector and tenancy. The cacheFunc can be called if the resulting ids need to be cached.
func mapSelectorToProxyStateTemplates(ctx context.Context,
client pbresource.ResourceServiceClient,
selector *pbcatalog.WorkloadSelector,
tenancy *pbresource.Tenancy,
cacheFunc func(id *pbresource.ID)) ([]controller.Request, error) {
var result []controller.Request
for _, prefix := range selector.Prefixes {
resp, err := client.List(ctx, &pbresource.ListRequest{
Type: pbcatalog.WorkloadType,
Tenancy: tenancy,
NamePrefix: prefix,
})
if err != nil {
return nil, err
}
if len(resp.Resources) == 0 {
return nil, fmt.Errorf("no workloads found")
}
for _, r := range resp.Resources {
id := resource.ReplaceType(pbmesh.ProxyStateTemplateType, r.Id)
result = append(result, controller.Request{
ID: id,
})
cacheFunc(id)
}
}
for _, name := range selector.Names {
id := &pbresource.ID{
Name: name,
Tenancy: tenancy,
Type: pbmesh.ProxyStateTemplateType,
}
result = append(result, controller.Request{
ID: id,
})
cacheFunc(id)
}
return result, nil
}

View File

@ -1,80 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"testing"
"github.com/stretchr/testify/require"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestMapWorkloadsBySelector(t *testing.T) {
client := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
// Create some workloads.
// For this test, we don't care about the workload data, so we will re-use
// the same data for all workloads.
workloadData := &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "127.0.0.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{"p1": {Port: 8080}},
}
w1 := resourcetest.Resource(pbcatalog.WorkloadType, "w1").
WithData(t, workloadData).
Write(t, client).Id
w2 := resourcetest.Resource(pbcatalog.WorkloadType, "w2").
WithData(t, workloadData).
Write(t, client).Id
w3 := resourcetest.Resource(pbcatalog.WorkloadType, "prefix-w3").
WithData(t, workloadData).
Write(t, client).Id
w4 := resourcetest.Resource(pbcatalog.WorkloadType, "prefix-w4").
WithData(t, workloadData).
Write(t, client).Id
// This workload should not be used as it's not selected by the workload selector.
resourcetest.Resource(pbcatalog.WorkloadType, "not-selected-workload").
WithData(t, workloadData).
Write(t, client)
selector := &pbcatalog.WorkloadSelector{
Names: []string{"w1", "w2"},
Prefixes: []string{"prefix"},
}
expReqs := []controller.Request{
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w1)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w2)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w3)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w4)},
}
var cachedReqs []controller.Request
reqs, err := mapSelectorToProxyStateTemplates(context.Background(), client, selector, defaultTenancy(), func(id *pbresource.ID) {
// save IDs to check that the cache func is called
cachedReqs = append(cachedReqs, controller.Request{ID: id})
})
require.NoError(t, err)
require.Len(t, reqs, len(expReqs))
prototest.AssertElementsMatch(t, expReqs, reqs)
prototest.AssertElementsMatch(t, expReqs, cachedReqs)
}
func defaultTenancy() *pbresource.Tenancy {
return &pbresource.Tenancy{
Namespace: "default",
Partition: "default",
PeerName: "local",
}
}

View File

@ -1,34 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
func (m *Mapper) MapProxyConfigurationToProxyStateTemplate(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
var proxyConfig pbmesh.ProxyConfiguration
err := res.Data.UnmarshalTo(&proxyConfig)
if err != nil {
return nil, err
}
var proxyIDs []resource.ReferenceOrID
requests, err := mapSelectorToProxyStateTemplates(ctx, rt.Client, proxyConfig.Workloads, res.Id.Tenancy, func(id *pbresource.ID) {
proxyIDs = append(proxyIDs, id)
})
if err != nil {
return nil, err
}
m.proxyCfgCache.TrackProxyConfiguration(res.Id, proxyIDs)
return requests, nil
}

View File

@ -1,78 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"testing"
"github.com/stretchr/testify/require"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestProxyConfigurationMapper(t *testing.T) {
client := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
// Create some workloads.
// For this test, we don't care about the workload data, so we will re-use
// the same data for all workloads.
workloadData := &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "127.0.0.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{"p1": {Port: 8080}},
}
w1 := resourcetest.Resource(pbcatalog.WorkloadType, "w1").
WithData(t, workloadData).
Write(t, client).Id
w2 := resourcetest.Resource(pbcatalog.WorkloadType, "w2").
WithData(t, workloadData).
Write(t, client).Id
// Create proxy configuration.
proxyCfgData := &pbmesh.ProxyConfiguration{
Workloads: &pbcatalog.WorkloadSelector{
Names: []string{"w1", "w2"},
},
}
pCfg := resourcetest.Resource(pbmesh.ProxyConfigurationType, "proxy-config").
WithData(t, proxyCfgData).
Write(t, client)
m := Mapper{proxyCfgCache: sidecarproxycache.NewProxyConfigurationCache()}
reqs, err := m.MapProxyConfigurationToProxyStateTemplate(context.Background(), controller.Runtime{
Client: client,
}, pCfg)
require.NoError(t, err)
p1 := resource.ReplaceType(pbmesh.ProxyStateTemplateType, w1)
p2 := resource.ReplaceType(pbmesh.ProxyStateTemplateType, w2)
expReqs := []controller.Request{
{ID: p1},
{ID: p2},
}
prototest.AssertElementsMatch(t, expReqs, reqs)
// Check that the cache is populated.
// Clean out UID as we don't care about it in the cache.
pCfg.Id.Uid = ""
prototest.AssertElementsMatch(t,
[]*pbresource.ID{pCfg.Id},
m.proxyCfgCache.ProxyConfigurationsByProxyID(p1))
prototest.AssertElementsMatch(t,
[]*pbresource.ID{pCfg.Id},
m.proxyCfgCache.ProxyConfigurationsByProxyID(p2))
}

View File

@ -1,71 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// MapServiceEndpointsToProxyStateTemplate maps catalog.ServiceEndpoints objects to the IDs of
// ProxyStateTemplate.
func (m *Mapper) MapServiceEndpointsToProxyStateTemplate(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
// This mapper has two jobs:
//
// 1. It needs to look up workload IDs from service endpoints and replace
// them with ProxyStateTemplate type. We do this so we don't need to watch
// Workloads to discover them, since ProxyStateTemplates are name-aligned
// with Workloads.
//
// 2. It needs to find any PST that needs to DISCOVER endpoints for this
// service as a part of mesh configuration and traffic routing.
serviceEndpoints, err := resource.Decode[*pbcatalog.ServiceEndpoints](res)
if err != nil {
return nil, err
}
var result []controller.Request
// (1) First, we need to generate requests from workloads this "endpoints"
// points to so that we can re-generate proxy state for the sidecar proxy.
for _, endpoint := range serviceEndpoints.Data.Endpoints {
// Convert the reference to a workload to a ProxyStateTemplate ID.
// Because these resources are name and tenancy aligned, we only need to change the type.
// Skip service endpoints without target refs. These resources would typically be created for
// services external to Consul, and we don't need to reconcile those as they don't have
// associated workloads.
if endpoint.TargetRef != nil {
result = append(result, controller.Request{
ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, endpoint.TargetRef),
})
}
}
// (2) Now walk the mesh configuration information backwards.
// ServiceEndpoints -> Service
targetServiceRef := resource.ReplaceType(pbcatalog.ServiceType, res.Id)
// Find all ComputedRoutes that reference this service.
routeIDs := m.computedRoutesCache.ComputedRoutesByService(targetServiceRef)
for _, routeID := range routeIDs {
// Find all Upstreams that reference a Service aligned with this ComputedRoutes.
// Afterwards, find all Workloads selected by the Upstreams, and align a PST with those.
reqs, err := m.mapComputedRoutesToProxyStateTemplate(ctx, rt, routeID)
if err != nil {
return nil, err
}
result = append(result, reqs...)
}
return result, nil
}

View File

@ -1,95 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/storage"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
func (m *Mapper) MapServiceToProxyStateTemplate(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
serviceRef := resource.Reference(res.Id, "")
ids, err := m.mapServiceThroughDestinationsToProxyStateTemplates(ctx, rt, serviceRef)
if err != nil {
return nil, err
}
return controller.MakeRequests(pbmesh.ProxyStateTemplateType, ids), nil
}
// mapServiceThroughDestinationsToProxyStateTemplates takes an explicit
// Service and traverses back through Destinations to Workloads to
// ProxyStateTemplates.
//
// This is in a separate function so it can be chained for more complicated
// relationships.
func (m *Mapper) mapServiceThroughDestinationsToProxyStateTemplates(
ctx context.Context,
rt controller.Runtime,
serviceRef *pbresource.Reference,
) ([]*pbresource.ID, error) {
// The relationship is:
//
// - PST (replace type) Workload
// - Workload (selected by) Upstreams
// - Upstream (contains) Service
//
// When we wake up for Service we should:
//
// - look for Service in all Destinations(upstreams)
// - follow selectors backwards to Workloads
// - rewrite types to PST
var pstIDs []*pbresource.ID
destinations := m.destinationsCache.ReadDestinationsByServiceAllPorts(serviceRef)
for _, destination := range destinations {
for refKey := range destination.SourceProxies {
pstIDs = append(pstIDs, refKey.ToID())
}
}
// TODO(v2): remove this after we can do proper performant implicit upstream determination
//
// TODO(rb): shouldn't this instead list all Workloads that have a mesh port?
allIDs, err := m.listAllProxyStateTemplatesTemporarily(ctx, rt, serviceRef.Tenancy)
if err != nil {
return nil, err
}
pstIDs = append(pstIDs, allIDs...)
return pstIDs, nil
}
func (m *Mapper) listAllProxyStateTemplatesTemporarily(ctx context.Context, rt controller.Runtime, tenancy *pbresource.Tenancy) ([]*pbresource.ID, error) {
// todo (ishustava): this is a stub for now until we implement implicit destinations.
// For tproxy, we generate requests for all proxy states in the cluster.
// This will generate duplicate events for proxies already added above,
// however, we expect that the controller runtime will de-dup for us.
rsp, err := rt.Client.List(ctx, &pbresource.ListRequest{
Type: pbmesh.ProxyStateTemplateType,
Tenancy: &pbresource.Tenancy{
Namespace: storage.Wildcard,
Partition: tenancy.Partition,
PeerName: tenancy.PeerName,
},
})
if err != nil {
return nil, err
}
result := make([]*pbresource.ID, 0, len(rsp.Resources))
for _, r := range rsp.Resources {
result = append(result, r.Id)
}
return result, nil
}

View File

@ -1,34 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
func (m *Mapper) MapComputedTrafficPermissionsToProxyStateTemplate(_ context.Context, _ controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
var ctp pbauth.ComputedTrafficPermissions
err := res.Data.UnmarshalTo(&ctp)
if err != nil {
return nil, err
}
pid := resource.ReplaceType(pbauth.WorkloadIdentityType, res.Id)
ids := m.identitiesCache.ProxyIDsByWorkloadIdentity(pid)
requests := make([]controller.Request, 0, len(ids))
for _, id := range ids {
requests = append(requests, controller.Request{
ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, id)},
)
}
return requests, nil
}

View File

@ -1,63 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"testing"
"github.com/stretchr/testify/require"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestMapComputedTrafficPermissionsToProxyStateTemplate(t *testing.T) {
client := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
ctp := resourcetest.Resource(pbauth.ComputedTrafficPermissionsType, "workload-identity-1").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbauth.ComputedTrafficPermissions{}).
Build()
i := sidecarproxycache.NewIdentitiesCache()
mapper := &Mapper{identitiesCache: i}
// Empty results when the cache isn't populated.
requests, err := mapper.MapComputedTrafficPermissionsToProxyStateTemplate(context.Background(), controller.Runtime{Client: client}, ctp)
require.NoError(t, err)
require.Len(t, requests, 0)
identityID1 := resourcetest.Resource(pbauth.WorkloadIdentityType, "workload-identity-1").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxyID1 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-1").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
proxyID2 := resourcetest.Resource(pbmesh.ProxyStateTemplateType, "service-workload-2").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
i.TrackPair(identityID1, proxyID1)
// Empty results when the cache isn't populated.
requests, err = mapper.MapComputedTrafficPermissionsToProxyStateTemplate(context.Background(), controller.Runtime{Client: client}, ctp)
require.NoError(t, err)
prototest.AssertElementsMatch(t, []controller.Request{{ID: proxyID1}}, requests)
i.TrackPair(identityID1, proxyID2)
// Empty results when the cache isn't populated.
requests, err = mapper.MapComputedTrafficPermissionsToProxyStateTemplate(context.Background(), controller.Runtime{Client: client}, ctp)
require.NoError(t, err)
prototest.AssertElementsMatch(t, []controller.Request{
{ID: proxyID1},
{ID: proxyID2},
}, requests)
}

View File

@ -1,404 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sidecarproxymapper
import (
"context"
"testing"
"github.com/stretchr/testify/require"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/routestest"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/mesh/internal/types/intermediate"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestUnified_AllMappingsToProxyStateTemplate(t *testing.T) {
var (
destCache = sidecarproxycache.NewDestinationsCache()
// proxyCfgCache = sidecarproxycache.NewProxyConfigurationCache()
routesCache = sidecarproxycache.NewComputedRoutesCache()
mapper = New(destCache, nil, routesCache, nil)
client = svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
)
anyServiceData := &pbcatalog.Service{
Ports: []*pbcatalog.ServicePort{
{
TargetPort: "tcp1",
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
},
{
TargetPort: "tcp2",
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
},
{
TargetPort: "mesh",
Protocol: pbcatalog.Protocol_PROTOCOL_MESH,
},
},
}
anyWorkloadPorts := map[string]*pbcatalog.WorkloadPort{
"tcp1": {Port: 8080},
"tcp2": {Port: 8081},
"mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
}
// The thing we link through Destinations.
destService := resourcetest.Resource(pbcatalog.ServiceType, "web").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, anyServiceData).
Build()
destServiceRef := resource.Reference(destService.Id, "")
// The thing we reach through the mesh config.
targetService := resourcetest.Resource(pbcatalog.ServiceType, "db").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, anyServiceData).
Build()
targetServiceRef := resource.Reference(targetService.Id, "")
backupTargetService := resourcetest.Resource(pbcatalog.ServiceType, "db-backup").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, anyServiceData).
Build()
backupTargetServiceRef := resource.Reference(backupTargetService.Id, "")
// The way we make 'web' actually route traffic to 'db'.
tcpRoute := resourcetest.Resource(pbmesh.TCPRouteType, "tcp-route").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbmesh.TCPRoute{
ParentRefs: []*pbmesh.ParentReference{{
Ref: destServiceRef,
}},
Rules: []*pbmesh.TCPRouteRule{{
BackendRefs: []*pbmesh.TCPBackendRef{{
BackendRef: &pbmesh.BackendReference{
Ref: targetServiceRef,
},
}},
}},
}).
Build()
failoverPolicy := resourcetest.ResourceID(resource.ReplaceType(pbcatalog.FailoverPolicyType, targetService.Id)).
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.FailoverPolicy{
Config: &pbcatalog.FailoverConfig{
Destinations: []*pbcatalog.FailoverDestination{{
Ref: backupTargetServiceRef,
}},
},
}).
Build()
webRoutes := routestest.BuildComputedRoutes(t, resource.ReplaceType(pbmesh.ComputedRoutesType, destService.Id),
resourcetest.MustDecode[*pbcatalog.Service](t, destService),
resourcetest.MustDecode[*pbcatalog.Service](t, targetService),
resourcetest.MustDecode[*pbcatalog.Service](t, backupTargetService),
resourcetest.MustDecode[*pbmesh.TCPRoute](t, tcpRoute),
resourcetest.MustDecode[*pbcatalog.FailoverPolicy](t, failoverPolicy),
)
var (
destWorkload1 = newID(pbcatalog.WorkloadType, "dest-workload-1")
destWorkload2 = newID(pbcatalog.WorkloadType, "dest-workload-2")
destProxy1 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, destWorkload1)
destProxy2 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, destWorkload2)
)
// Endpoints for original destination
destEndpoints := resourcetest.ResourceID(resource.ReplaceType(pbcatalog.ServiceEndpointsType, destService.Id)).
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
{
TargetRef: destWorkload1,
Ports: anyWorkloadPorts,
},
{
TargetRef: destWorkload2,
Ports: anyWorkloadPorts,
},
},
}).
Build()
var (
targetWorkload1 = newID(pbcatalog.WorkloadType, "target-workload-1")
targetWorkload2 = newID(pbcatalog.WorkloadType, "target-workload-2")
targetWorkload3 = newID(pbcatalog.WorkloadType, "target-workload-3")
backupTargetWorkload1 = newID(pbcatalog.WorkloadType, "backup-target-workload-1")
backupTargetWorkload2 = newID(pbcatalog.WorkloadType, "backup-target-workload-2")
backupTargetWorkload3 = newID(pbcatalog.WorkloadType, "backup-target-workload-3")
targetProxy1 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, targetWorkload1)
targetProxy2 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, targetWorkload2)
targetProxy3 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, targetWorkload3)
backupTargetProxy1 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, backupTargetWorkload1)
backupTargetProxy2 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, backupTargetWorkload2)
backupTargetProxy3 = resource.ReplaceType(pbmesh.ProxyStateTemplateType, backupTargetWorkload3)
)
// Endpoints for actual destination
targetEndpoints := resourcetest.ResourceID(resource.ReplaceType(pbcatalog.ServiceEndpointsType, targetService.Id)).
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
{
TargetRef: targetWorkload1,
Ports: anyWorkloadPorts,
},
{
TargetRef: targetWorkload2,
Ports: anyWorkloadPorts,
},
{
TargetRef: targetWorkload3,
Ports: anyWorkloadPorts,
},
},
}).
Build()
backupTargetEndpoints := resourcetest.ResourceID(resource.ReplaceType(pbcatalog.ServiceEndpointsType, backupTargetService.Id)).
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbcatalog.ServiceEndpoints{
Endpoints: []*pbcatalog.Endpoint{
{
TargetRef: backupTargetWorkload1,
Ports: anyWorkloadPorts,
},
{
TargetRef: backupTargetWorkload2,
Ports: anyWorkloadPorts,
},
{
TargetRef: backupTargetWorkload3,
Ports: anyWorkloadPorts,
},
},
}).
Build()
var (
sourceProxy1 = newID(pbmesh.ProxyStateTemplateType, "src-workload-1")
sourceProxy2 = newID(pbmesh.ProxyStateTemplateType, "src-workload-2")
sourceProxy3 = newID(pbmesh.ProxyStateTemplateType, "src-workload-3")
sourceProxy4 = newID(pbmesh.ProxyStateTemplateType, "src-workload-4")
sourceProxy5 = newID(pbmesh.ProxyStateTemplateType, "src-workload-5")
sourceProxy6 = newID(pbmesh.ProxyStateTemplateType, "src-workload-6")
)
destination1 := intermediate.CombinedDestinationRef{
ServiceRef: destServiceRef,
Port: "tcp1",
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(sourceProxy1): {},
resource.NewReferenceKey(sourceProxy2): {},
},
}
destination2 := intermediate.CombinedDestinationRef{
ServiceRef: destServiceRef,
Port: "tcp2",
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(sourceProxy3): {},
resource.NewReferenceKey(sourceProxy4): {},
},
}
destination3 := intermediate.CombinedDestinationRef{
ServiceRef: destServiceRef,
Port: "mesh",
SourceProxies: map[resource.ReferenceKey]struct{}{
resource.NewReferenceKey(sourceProxy5): {},
resource.NewReferenceKey(sourceProxy6): {},
},
}
routesCache.TrackComputedRoutes(webRoutes)
destCache.WriteDestination(destination1)
destCache.WriteDestination(destination2)
destCache.WriteDestination(destination3)
t.Run("ServiceEndpoints", func(t *testing.T) {
t.Run("map dest endpoints", func(t *testing.T) {
requests, err := mapper.MapServiceEndpointsToProxyStateTemplate(
context.Background(),
controller.Runtime{Client: client},
destEndpoints,
)
require.NoError(t, err)
expRequests := []controller.Request{
// Just wakeup proxies for these workloads.
{ID: destProxy1},
{ID: destProxy2},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
t.Run("map target endpoints (TCPRoute)", func(t *testing.T) {
requests, err := mapper.MapServiceEndpointsToProxyStateTemplate(
context.Background(),
controller.Runtime{Client: client},
targetEndpoints,
)
require.NoError(t, err)
requests = testDeduplicateRequests(requests)
expRequests := []controller.Request{
// Wakeup proxies for these workloads.
{ID: targetProxy1},
{ID: targetProxy2},
{ID: targetProxy3},
// Also wakeup things that have destService as a destination b/c of the TCPRoute reference.
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
t.Run("map backup target endpoints (FailoverPolicy)", func(t *testing.T) {
requests, err := mapper.MapServiceEndpointsToProxyStateTemplate(
context.Background(),
controller.Runtime{Client: client},
backupTargetEndpoints,
)
require.NoError(t, err)
requests = testDeduplicateRequests(requests)
expRequests := []controller.Request{
// Wakeup proxies for these workloads.
{ID: backupTargetProxy1},
{ID: backupTargetProxy2},
{ID: backupTargetProxy3},
// Also wakeup things that have destService as a destination b/c of the FailoverPolicy reference.
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
})
t.Run("Service", func(t *testing.T) {
t.Run("map dest service", func(t *testing.T) {
requests, err := mapper.MapServiceToProxyStateTemplate(
context.Background(),
controller.Runtime{Client: client},
destService,
)
require.NoError(t, err)
// Only wake up things with dest as an upstream.
expRequests := []controller.Request{
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
t.Run("map target endpoints (TCPRoute)", func(t *testing.T) {
requests, err := mapper.MapServiceToProxyStateTemplate(
context.Background(),
controller.Runtime{Client: client},
targetService,
)
require.NoError(t, err)
requests = testDeduplicateRequests(requests)
// No upstream referrs to target directly.
expRequests := []controller.Request{}
prototest.AssertElementsMatch(t, expRequests, requests)
})
})
t.Run("ComputedRoutes", func(t *testing.T) {
t.Run("map web routes", func(t *testing.T) {
requests, err := mapper.MapComputedRoutesToProxyStateTemplate(
context.Background(),
controller.Runtime{Client: client},
webRoutes.Resource,
)
require.NoError(t, err)
// Only wake up things with dest as an upstream.
expRequests := []controller.Request{
{ID: sourceProxy1},
{ID: sourceProxy2},
{ID: sourceProxy3},
{ID: sourceProxy4},
{ID: sourceProxy5},
{ID: sourceProxy6},
}
prototest.AssertElementsMatch(t, expRequests, requests)
})
})
}
func newRef(typ *pbresource.Type, name string) *pbresource.Reference {
return resource.Reference(newID(typ, name), "")
}
func newID(typ *pbresource.Type, name string) *pbresource.ID {
return &pbresource.ID{
Type: typ,
Tenancy: resource.DefaultNamespacedTenancy(),
Name: name,
}
}
func testDeduplicateRequests(reqs []controller.Request) []controller.Request {
type resID struct {
resource.ReferenceKey
UID string
}
out := make([]controller.Request, 0, len(reqs))
seen := make(map[resID]struct{})
for _, req := range reqs {
rid := resID{
ReferenceKey: resource.NewReferenceKey(req.ID),
UID: req.ID.Uid,
}
if _, ok := seen[rid]; !ok {
out = append(out, req)
seen[rid] = struct{}{}
}
}
return out
}

View File

@ -22,6 +22,8 @@ type (
DecodedServiceEndpoints = resource.DecodedResource[*pbcatalog.ServiceEndpoints]
DecodedWorkload = resource.DecodedResource[*pbcatalog.Workload]
DecodedProxyConfiguration = resource.DecodedResource[*pbmesh.ProxyConfiguration]
DecodedComputedProxyConfiguration = resource.DecodedResource[*pbmesh.ComputedProxyConfiguration]
DecodedDestinations = resource.DecodedResource[*pbmesh.Destinations]
DecodedComputedDestinations = resource.DecodedResource[*pbmesh.ComputedExplicitDestinations]
DecodedProxyStateTemplate = resource.DecodedResource[*pbmesh.ProxyStateTemplate]
)

View File

@ -5,28 +5,10 @@ package intermediate
import (
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// CombinedDestinationRef contains all references we need for a specific
// destination on the mesh.
type CombinedDestinationRef struct {
// ServiceRef is the reference to the destination service.
ServiceRef *pbresource.Reference
// Port is the port name for this destination.
Port string
// SourceProxies are the reference keys of source proxy state template resources.
SourceProxies map[resource.ReferenceKey]struct{}
// ExplicitDestinationsID is the id of the pbmesh.Destinations resource. For implicit destinations,
// this should be nil.
ExplicitDestinationsID *pbresource.ID
}
type Destination struct {
Explicit *pbmesh.Destination
Service *types.DecodedService // for the name of this destination

View File

@ -6,6 +6,8 @@ package types
import (
"github.com/hashicorp/consul/internal/resource"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/sdk/iptables"
)
func RegisterProxyConfiguration(r resource.Registry) {
@ -15,5 +17,35 @@ func RegisterProxyConfiguration(r resource.Registry) {
Scope: resource.ScopeNamespace,
// TODO(rb): add validation for proxy configuration
Validate: nil,
Mutate: MutateProxyConfiguration,
})
}
func MutateProxyConfiguration(res *pbresource.Resource) error {
var proxyCfg pbmesh.ProxyConfiguration
err := res.Data.UnmarshalTo(&proxyCfg)
if err != nil {
return resource.NewErrDataParse(&proxyCfg, err)
}
changed := false
// Default the tproxy outbound port.
if proxyCfg.IsTransparentProxy() {
if proxyCfg.GetDynamicConfig().GetTransparentProxy() == nil {
proxyCfg.DynamicConfig.TransparentProxy = &pbmesh.TransparentProxy{
OutboundListenerPort: iptables.DefaultTProxyOutboundPort,
}
changed = true
} else if proxyCfg.GetDynamicConfig().GetTransparentProxy().OutboundListenerPort == 0 {
proxyCfg.DynamicConfig.TransparentProxy.OutboundListenerPort = iptables.DefaultTProxyOutboundPort
changed = true
}
}
if !changed {
return nil
}
return res.Data.MarshalFrom(&proxyCfg)
}

View File

@ -0,0 +1,84 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package types
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto/private/prototest"
"github.com/hashicorp/consul/sdk/iptables"
)
func TestMutateProxyConfiguration(t *testing.T) {
cases := map[string]struct {
data *pbmesh.ProxyConfiguration
expData *pbmesh.ProxyConfiguration
}{
"tproxy disabled": {
data: &pbmesh.ProxyConfiguration{},
expData: &pbmesh.ProxyConfiguration{},
},
"tproxy disabled explicitly": {
data: &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_DIRECT,
},
},
expData: &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_DIRECT,
},
},
},
"tproxy enabled and tproxy config is nil": {
data: &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
},
},
expData: &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{
OutboundListenerPort: iptables.DefaultTProxyOutboundPort,
},
},
},
},
"tproxy enabled and tproxy config is empty": {
data: &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{},
},
},
expData: &pbmesh.ProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
TransparentProxy: &pbmesh.TransparentProxy{
OutboundListenerPort: iptables.DefaultTProxyOutboundPort,
},
},
},
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
res := resourcetest.Resource(pbmesh.ProxyConfigurationType, "test").
WithData(t, c.data).
Build()
err := MutateProxyConfiguration(res)
require.NoError(t, err)
got := resourcetest.MustDecode[*pbmesh.ProxyConfiguration](t, res)
prototest.AssertDeepEqual(t, c.expData, got.GetData())
})
}
}

View File

@ -6,7 +6,7 @@ import (
"github.com/stretchr/testify/require"
)
func TestIsMeshEnabled(t *testing.T) {
func TestServiceIsMeshEnabled(t *testing.T) {
cases := map[string]struct {
service *Service
exp bool

View File

@ -0,0 +1,26 @@
package catalogv2beta1
import (
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
// GetIdentities returns a list of unique identities that this service endpoints points to.
func (s *ServiceEndpoints) GetIdentities() []string {
uniqueIdentities := make(map[string]struct{})
for _, ep := range s.GetEndpoints() {
if ep.GetIdentity() != "" {
uniqueIdentities[ep.GetIdentity()] = struct{}{}
}
}
if len(uniqueIdentities) == 0 {
return nil
}
identities := maps.Keys(uniqueIdentities)
slices.Sort(identities)
return identities
}

View File

@ -0,0 +1,49 @@
package catalogv2beta1
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestServiceEndpoints_GetIdentities(t *testing.T) {
cases := map[string]struct {
endpoints []*Endpoint
expIdentities []string
}{
"no endpoints": {
endpoints: nil,
expIdentities: nil,
},
"no identities": {
endpoints: []*Endpoint{
{},
{},
},
expIdentities: nil,
},
"single identity": {
endpoints: []*Endpoint{
{Identity: "foo"},
{Identity: "foo"},
{Identity: "foo"},
},
expIdentities: []string{"foo"},
},
"multiple identities": {
endpoints: []*Endpoint{
{Identity: "foo"},
{Identity: "foo"},
{Identity: "bar"},
},
expIdentities: []string{"bar", "foo"},
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
se := &ServiceEndpoints{Endpoints: c.endpoints}
require.Equal(t, c.expIdentities, se.GetIdentities())
})
}
}

View File

@ -51,7 +51,7 @@ func TestGetMeshPort(t *testing.T) {
}
}
func TestIsMeshEnabled(t *testing.T) {
func TestWorkloadIsMeshEnabled(t *testing.T) {
cases := map[string]struct {
ports map[string]*WorkloadPort
exp bool

View File

@ -1,5 +1,10 @@
package meshv2beta1
func (p *ComputedProxyConfiguration) IsTransparentProxy() bool {
return p.GetDynamicConfig() != nil &&
p.DynamicConfig.Mode == ProxyMode_PROXY_MODE_TRANSPARENT
}
func (p *ProxyConfiguration) IsTransparentProxy() bool {
return p.GetDynamicConfig() != nil &&
p.DynamicConfig.Mode == ProxyMode_PROXY_MODE_TRANSPARENT

View File

@ -6,36 +6,30 @@ import (
"github.com/stretchr/testify/require"
)
func TestIsTransprentProxy(t *testing.T) {
func TestIsTransparentProxy(t *testing.T) {
cases := map[string]struct {
proxyCfg *ProxyConfiguration
exp bool
dynamicConfig *DynamicConfig
exp bool
}{
"nil dynamic config": {
proxyCfg: &ProxyConfiguration{},
exp: false,
dynamicConfig: nil,
exp: false,
},
"default mode": {
proxyCfg: &ProxyConfiguration{
DynamicConfig: &DynamicConfig{
Mode: ProxyMode_PROXY_MODE_DEFAULT,
},
dynamicConfig: &DynamicConfig{
Mode: ProxyMode_PROXY_MODE_DEFAULT,
},
exp: false,
},
"direct mode": {
proxyCfg: &ProxyConfiguration{
DynamicConfig: &DynamicConfig{
Mode: ProxyMode_PROXY_MODE_DEFAULT,
},
dynamicConfig: &DynamicConfig{
Mode: ProxyMode_PROXY_MODE_DEFAULT,
},
exp: false,
},
"transparent mode": {
proxyCfg: &ProxyConfiguration{
DynamicConfig: &DynamicConfig{
Mode: ProxyMode_PROXY_MODE_TRANSPARENT,
},
dynamicConfig: &DynamicConfig{
Mode: ProxyMode_PROXY_MODE_TRANSPARENT,
},
exp: true,
},
@ -43,7 +37,14 @@ func TestIsTransprentProxy(t *testing.T) {
for name, c := range cases {
t.Run(name, func(t *testing.T) {
require.Equal(t, c.exp, c.proxyCfg.IsTransparentProxy())
proxyCfg := &ProxyConfiguration{
DynamicConfig: c.dynamicConfig,
}
compProxyCfg := &ComputedProxyConfiguration{
DynamicConfig: c.dynamicConfig,
}
require.Equal(t, c.exp, proxyCfg.IsTransparentProxy())
require.Equal(t, c.exp, compProxyCfg.IsTransparentProxy())
})
}
}