mesh: Add ComputedProxyConfiguration and a controller that computes it. (#19043)

* Introduce a new type `ComputedProxyConfiguration` and add a controller for it. This is needed for two reasons. The first one is that external integrations like kubernetes may need to read the fully computed and sorted proxy configuration per workload. The second reasons is that it makes sidecar-proxy controller logic quite a bit simpler as it no longer needs to do this.
* Generalize workload selection mapper and fix a bug where it would delete IDs from the tree if only one is left after a removal is done.
This commit is contained in:
Iryna Shustava 2023-10-10 17:34:53 -06:00 committed by GitHub
parent 679b0f650f
commit c35df12c95
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 1826 additions and 326 deletions

View File

@ -11,10 +11,10 @@ import (
"github.com/hashicorp/consul/internal/catalog/internal/controllers/workloadhealth"
"github.com/hashicorp/consul/internal/catalog/internal/mappers/failovermapper"
"github.com/hashicorp/consul/internal/catalog/internal/mappers/nodemapper"
"github.com/hashicorp/consul/internal/catalog/internal/mappers/selectiontracker"
"github.com/hashicorp/consul/internal/catalog/internal/types"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/mappers/selectiontracker"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)

View File

@ -12,9 +12,9 @@ import (
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog/internal/controllers/workloadhealth"
"github.com/hashicorp/consul/internal/catalog/internal/mappers/selectiontracker"
"github.com/hashicorp/consul/internal/catalog/internal/types"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource/mappers/selectiontracker"
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"

View File

@ -1,278 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package selectiontracker
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/radix"
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
var (
workloadData = &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{
{
Host: "198.18.0.1",
},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {
Port: 8080,
Protocol: pbcatalog.Protocol_PROTOCOL_HTTP,
},
},
}
)
func TestRemoveIDFromTreeAtPaths(t *testing.T) {
tree := radix.New[[]controller.Request]()
toRemove := rtest.Resource(pbcatalog.ServiceEndpointsType, "blah").ID()
other1 := rtest.Resource(pbcatalog.ServiceEndpointsType, "other1").ID()
other2 := rtest.Resource(pbcatalog.ServiceEndpointsType, "other1").ID()
// we are trying to create a tree such that removal of the toRemove id causes a
// few things to happen.
//
// * All the slice modification conditions are executed
// - removal from beginning of the list
// - removal from the end of the list
// - removal of only element in the list
// - removal from middle of the list
// * Paths without matching ids are ignored
notMatching := []controller.Request{
{ID: other1},
{ID: other2},
}
matchAtBeginning := []controller.Request{
{ID: toRemove},
{ID: other1},
{ID: other2},
}
matchAtEnd := []controller.Request{
{ID: other1},
{ID: other2},
{ID: toRemove},
}
matchInMiddle := []controller.Request{
{ID: other1},
{ID: toRemove},
{ID: other2},
}
matchOnly := []controller.Request{
{ID: toRemove},
}
tree.Insert("no-match", notMatching)
tree.Insert("match-beginning", matchAtBeginning)
tree.Insert("match-end", matchAtEnd)
tree.Insert("match-middle", matchInMiddle)
tree.Insert("match-only", matchOnly)
removeIDFromTreeAtPaths(tree, toRemove, []string{
"no-match",
"match-beginning",
"match-end",
"match-middle",
"match-only",
})
reqs, found := tree.Get("no-match")
require.True(t, found)
require.Equal(t, notMatching, reqs)
reqs, found = tree.Get("match-beginning")
require.True(t, found)
require.Equal(t, notMatching, reqs)
reqs, found = tree.Get("match-end")
require.True(t, found)
require.Equal(t, notMatching, reqs)
reqs, found = tree.Get("match-middle")
require.True(t, found)
require.Equal(t, notMatching, reqs)
// The last tracked request should cause removal from the tree
_, found = tree.Get("match-only")
require.False(t, found)
}
type selectionTrackerSuite struct {
suite.Suite
rt controller.Runtime
tracker *WorkloadSelectionTracker
workloadAPI1 *pbresource.Resource
workloadWeb1 *pbresource.Resource
endpointsFoo *pbresource.ID
endpointsBar *pbresource.ID
}
func (suite *selectionTrackerSuite) SetupTest() {
suite.tracker = New()
suite.workloadAPI1 = rtest.Resource(pbcatalog.WorkloadType, "api-1").WithData(suite.T(), workloadData).Build()
suite.workloadWeb1 = rtest.Resource(pbcatalog.WorkloadType, "web-1").WithData(suite.T(), workloadData).Build()
suite.endpointsFoo = rtest.Resource(pbcatalog.ServiceEndpointsType, "foo").ID()
suite.endpointsBar = rtest.Resource(pbcatalog.ServiceEndpointsType, "bar").ID()
}
func (suite *selectionTrackerSuite) requireMappedIDs(workload *pbresource.Resource, ids ...*pbresource.ID) {
suite.T().Helper()
reqs, err := suite.tracker.MapWorkload(context.Background(), suite.rt, workload)
require.NoError(suite.T(), err)
require.Len(suite.T(), reqs, len(ids))
for _, id := range ids {
prototest.AssertContainsElement(suite.T(), reqs, controller.Request{ID: id})
}
}
func (suite *selectionTrackerSuite) TestMapWorkload_Empty() {
// If we aren't tracking anything than the default mapping behavior
// should be to return an empty list of requests.
suite.requireMappedIDs(suite.workloadAPI1)
}
func (suite *selectionTrackerSuite) TestUntrackID_Empty() {
// this test has no assertions but mainly is here to prove that things
// dont explode if this is attempted.
suite.tracker.UntrackID(suite.endpointsFoo)
}
func (suite *selectionTrackerSuite) TestTrackAndMap_SingleResource_MultipleWorkloadMappings() {
// This test aims to prove that tracking a resources workload selector and
// then mapping a workload back to that resource works as expected when the
// result set is a single resource. This test will ensure that both prefix
// and exact match criteria are handle correctly and that one resource
// can be mapped from multiple distinct workloads.
// associate the foo endpoints with some workloads
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Names: []string{"bar", "api", "web-1"},
Prefixes: []string{"api-"},
})
// Ensure that mappings tracked by prefix work.
suite.requireMappedIDs(suite.workloadAPI1, suite.endpointsFoo)
// Ensure that mappings tracked by exact match work.
suite.requireMappedIDs(suite.workloadWeb1, suite.endpointsFoo)
}
func (suite *selectionTrackerSuite) TestTrackAndMap_MultiResource_SingleWorkloadMapping() {
// This test aims to prove that multiple resources selecting of a workload
// will result in multiple requests when mapping that workload.
// associate the foo endpoints with some workloads
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
})
// associate the bar endpoints with some workloads
suite.tracker.TrackIDForSelector(suite.endpointsBar, &pbcatalog.WorkloadSelector{
Names: []string{"api-1"},
})
// now the mapping should return both endpoints resource ids
suite.requireMappedIDs(suite.workloadAPI1, suite.endpointsFoo, suite.endpointsBar)
}
func (suite *selectionTrackerSuite) TestDuplicateTracking() {
// This test aims to prove that tracking some ID multiple times doesn't
// result in multiple requests for the same ID
// associate the foo endpoints with some workloads 3 times without changing
// the selection criteria. The second two times should be no-ops
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
})
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
})
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
})
// regardless of the number of times tracked we should only see a single request
suite.requireMappedIDs(suite.workloadAPI1, suite.endpointsFoo)
}
func (suite *selectionTrackerSuite) TestModifyTracking() {
// This test aims to prove that modifying selection criteria for a resource
// works as expected. Adding new criteria results in all being tracked.
// Removal of some criteria does't result in removal of all etc. More or
// less we want to ensure that updating selection criteria leaves the
// tracker in a consistent/expected state.
// track the web-1 workload
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Names: []string{"web-1"},
})
// ensure that api-1 isn't mapped but web-1 is
suite.requireMappedIDs(suite.workloadAPI1)
suite.requireMappedIDs(suite.workloadWeb1, suite.endpointsFoo)
// now also track the api- prefix
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Names: []string{"web-1"},
Prefixes: []string{"api-"},
})
// ensure that both workloads are mapped appropriately
suite.requireMappedIDs(suite.workloadAPI1, suite.endpointsFoo)
suite.requireMappedIDs(suite.workloadWeb1, suite.endpointsFoo)
// now remove the web tracking
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
})
// ensure that only api-1 is mapped
suite.requireMappedIDs(suite.workloadAPI1, suite.endpointsFoo)
suite.requireMappedIDs(suite.workloadWeb1)
}
func (suite *selectionTrackerSuite) TestRemove() {
// This test aims to prove that removal of a resource from tracking
// actually prevents subsequent mapping calls from returning the
// workload.
// track the web-1 workload
suite.tracker.TrackIDForSelector(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Names: []string{"web-1"},
})
// ensure that api-1 isn't mapped but web-1 is
suite.requireMappedIDs(suite.workloadWeb1, suite.endpointsFoo)
// untrack the resource
suite.tracker.UntrackID(suite.endpointsFoo)
// ensure that we no longer map the previous workload to the resource
suite.requireMappedIDs(suite.workloadWeb1)
}
func TestWorkloadSelectionSuite(t *testing.T) {
suite.Run(t, new(selectionTrackerSuite))
}

View File

@ -10,7 +10,7 @@ import (
// MakeRequests accepts a list of pbresource.ID and pbresource.Reference items,
// and mirrors them into a slice of []controller.Request items where the Type
// of of the items has replaced by 'typ'.
// of the items has replaced by 'typ'.
func MakeRequests[V resource.ReferenceOrID](
typ *pbresource.Type,
refs []V,
@ -21,6 +21,10 @@ func MakeRequests[V resource.ReferenceOrID](
out := make([]Request, 0, len(refs))
for _, ref := range refs {
// if type is not provided, we will use the type in the ID or reference.
if typ == nil {
typ = ref.GetType()
}
out = append(out, Request{
ID: &pbresource.ID{
Type: typ,

View File

@ -0,0 +1,174 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package proxyconfiguration
import (
"context"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/proxyconfiguration/mapper"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
const ControllerName = "consul.io/proxy-configuration-controller"
func Controller(proxyConfigMapper *mapper.Mapper) controller.Controller {
if proxyConfigMapper == nil {
panic("proxy config mapper is required")
}
return controller.ForType(pbmesh.ComputedProxyConfigurationType).
WithWatch(pbmesh.ProxyConfigurationType, proxyConfigMapper.MapProxyConfiguration).
WithWatch(pbcatalog.WorkloadType, controller.ReplaceType(pbmesh.ComputedProxyConfigurationType)).
WithReconciler(&reconciler{proxyConfigMapper: proxyConfigMapper})
}
type reconciler struct {
proxyConfigMapper *mapper.Mapper
}
func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error {
rt.Logger = rt.Logger.With("controller", ControllerName, "id", req.ID)
// Look up the associated workload.
workloadID := resource.ReplaceType(pbcatalog.WorkloadType, req.ID)
workload, err := resource.GetDecodedResource[*pbcatalog.Workload](ctx, rt.Client, workloadID)
if err != nil {
rt.Logger.Error("error fetching workload", "error", err)
return err
}
// If workload is not found, the decoded resource will be nil.
if workload == nil {
// When workload is not there, we don't need to manually delete the resource
// because it is owned by the workload. In this case, we skip reconcile
// because there's nothing for us to do.
rt.Logger.Trace("the corresponding workload does not exist", "id", workloadID)
return nil
}
// Get existing ComputedProxyConfiguration resource (if any).
cpc, err := resource.GetDecodedResource[*pbmesh.ComputedProxyConfiguration](ctx, rt.Client, req.ID)
if err != nil {
rt.Logger.Error("error fetching ComputedProxyConfiguration", "error", err)
return err
}
// If workload is not on the mesh, we need to delete the resource and return
// as for non-mesh workloads there should be no proxy configuration.
if !workload.GetData().IsMeshEnabled() {
rt.Logger.Trace("workload is not on the mesh, skipping reconcile and deleting any corresponding ComputedProxyConfiguration", "id", workloadID)
// Delete CPC only if it exists.
if cpc != nil {
_, err = rt.Client.Delete(ctx, &pbresource.DeleteRequest{Id: req.ID})
if err != nil {
// If there's an error deleting CPC, we want to re-trigger reconcile again.
rt.Logger.Error("error deleting ComputedProxyConfiguration", "error", err)
return err
}
}
// Otherwise, return as there's nothing else for us to do.
return nil
}
// Now get any proxy configurations IDs that we have in the cache that have selectors matching the name
// of this CPC (name-aligned with the workload).
proxyCfgIDs := r.proxyConfigMapper.ProxyConfigurationsForWorkload(req.ID)
rt.Logger.Trace("cached proxy cfg IDs", "ids", proxyCfgIDs)
decodedProxyCfgs, err := r.fetchProxyConfigs(ctx, rt.Client, proxyCfgIDs)
if err != nil {
rt.Logger.Error("error fetching proxy configurations", "error", err)
return err
}
// If after fetching, we don't have any proxy configs, we need to skip reconcile and delete the resource.
if len(decodedProxyCfgs) == 0 {
rt.Logger.Trace("found no proxy configurations associated with this workload")
if cpc != nil {
rt.Logger.Trace("deleting ComputedProxyConfiguration")
_, err = rt.Client.Delete(ctx, &pbresource.DeleteRequest{Id: req.ID})
if err != nil {
// If there's an error deleting CPC, we want to re-trigger reconcile again.
rt.Logger.Error("error deleting ComputedProxyConfiguration", "error", err)
return err
}
}
return nil
}
// Next, we need to sort configs so that we can resolve conflicts.
sortedProxyCfgs := SortProxyConfigurations(decodedProxyCfgs, req.ID.GetName())
mergedProxyCfg := &pbmesh.ProxyConfiguration{}
// Walk sorted configs in reverse order so that the ones that take precedence
// do not overwrite the ones that don't.
for i := len(sortedProxyCfgs) - 1; i >= 0; i-- {
proto.Merge(mergedProxyCfg, sortedProxyCfgs[i].GetData())
}
newCPCData := &pbmesh.ComputedProxyConfiguration{
DynamicConfig: mergedProxyCfg.GetDynamicConfig(),
BootstrapConfig: mergedProxyCfg.GetBootstrapConfig(),
}
// Lastly, write the resource.
if cpc == nil || !proto.Equal(cpc.GetData(), newCPCData) {
rt.Logger.Trace("writing new ComputedProxyConfiguration")
// First encode the endpoints data as an Any type.
cpcDataAsAny, err := anypb.New(newCPCData)
if err != nil {
rt.Logger.Error("error marshalling latest ComputedProxyConfiguration", "error", err)
return err
}
_, err = rt.Client.Write(ctx, &pbresource.WriteRequest{
Resource: &pbresource.Resource{
Id: req.ID,
Owner: workloadID,
Data: cpcDataAsAny,
},
})
if err != nil {
rt.Logger.Error("error writing latest ComputedProxyConfiguration", "error", err)
return err
}
}
return nil
}
func (r *reconciler) fetchProxyConfigs(
ctx context.Context,
client pbresource.ResourceServiceClient,
proxyCfgIds []*pbresource.ID) ([]*types.DecodedProxyConfiguration, error) {
var decoded []*types.DecodedProxyConfiguration
for _, id := range proxyCfgIds {
res, err := resource.GetDecodedResource[*pbmesh.ProxyConfiguration](ctx, client, id)
if err != nil {
return nil, err
}
if res == nil || res.GetResource() == nil || res.GetData() == nil {
// If resource is not found, we should untrack it.
r.proxyConfigMapper.UntrackProxyConfiguration(id)
continue
}
decoded = append(decoded, res)
}
return decoded, nil
}

View File

@ -0,0 +1,306 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package proxyconfiguration
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/durationpb"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/proxyconfiguration/mapper"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
)
type controllerTestSuite struct {
suite.Suite
client *resourcetest.Client
runtime controller.Runtime
ctl *reconciler
ctx context.Context
workload *pbcatalog.Workload
workloadRes *pbresource.Resource
proxyCfg1 *pbmesh.ProxyConfiguration
proxyCfg2 *pbmesh.ProxyConfiguration
proxyCfg3 *pbmesh.ProxyConfiguration
expComputedProxyCfg *pbmesh.ComputedProxyConfiguration
}
func (suite *controllerTestSuite) SetupTest() {
resourceClient := svctest.RunResourceService(suite.T(), types.Register, catalog.RegisterTypes)
suite.client = resourcetest.NewClient(resourceClient)
suite.runtime = controller.Runtime{Client: resourceClient, Logger: testutil.Logger(suite.T())}
suite.ctx = testutil.TestContext(suite.T())
suite.ctl = &reconciler{
proxyConfigMapper: mapper.New(),
}
suite.workload = &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "1.1.1.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{
"tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
"mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
Identity: "test",
}
suite.workloadRes = resourcetest.Resource(pbcatalog.WorkloadType, "test-workload").
WithData(suite.T(), suite.workload).
Write(suite.T(), suite.client)
suite.proxyCfg1 = &pbmesh.ProxyConfiguration{
Workloads: &pbcatalog.WorkloadSelector{
Names: []string{suite.workloadRes.Id.Name},
},
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
},
}
suite.proxyCfg2 = &pbmesh.ProxyConfiguration{
Workloads: &pbcatalog.WorkloadSelector{
Prefixes: []string{"test-"},
},
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_DIRECT, // this setting should be overridden by proxycfg1
LocalConnection: map[string]*pbmesh.ConnectionConfig{
"tcp": {ConnectTimeout: durationpb.New(2 * time.Second)},
},
},
}
suite.proxyCfg3 = &pbmesh.ProxyConfiguration{
Workloads: &pbcatalog.WorkloadSelector{
Prefixes: []string{"test-wor"},
},
BootstrapConfig: &pbmesh.BootstrapConfig{
PrometheusBindAddr: "0.0.0.0:9000",
},
}
suite.expComputedProxyCfg = &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
LocalConnection: map[string]*pbmesh.ConnectionConfig{
"tcp": {ConnectTimeout: durationpb.New(2 * time.Second)},
},
},
BootstrapConfig: &pbmesh.BootstrapConfig{
PrometheusBindAddr: "0.0.0.0:9000",
},
}
}
func (suite *controllerTestSuite) TestReconcile_NoWorkload() {
// This test ensures that removed workloads are ignored and don't result
// in the creation of the proxy state template.
id := resourcetest.Resource(pbmesh.ComputedProxyConfigurationType, "not-found").ID()
err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
ID: id,
})
require.NoError(suite.T(), err)
suite.client.RequireResourceNotFound(suite.T(), id)
}
func (suite *controllerTestSuite) TestReconcile_NonMeshWorkload() {
resourcetest.Resource(pbcatalog.WorkloadType, "non-mesh").
WithData(suite.T(), &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "1.1.1.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{
"tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
},
}).
Write(suite.T(), suite.client)
cpcID := resourcetest.Resource(pbmesh.ComputedProxyConfigurationType, "non-mesh").
Write(suite.T(), suite.client).Id
err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
ID: cpcID,
})
require.NoError(suite.T(), err)
suite.client.RequireResourceNotFound(suite.T(), cpcID)
}
func (suite *controllerTestSuite) TestReconcile_HappyPath() {
// Write all three proxy cfgs.
pCfg1 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "cfg1").
WithData(suite.T(), suite.proxyCfg1).
Write(suite.T(), suite.client)
_, err := suite.ctl.proxyConfigMapper.MapProxyConfiguration(suite.ctx, suite.runtime, pCfg1)
require.NoError(suite.T(), err)
pCfg2 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "cfg2").
WithData(suite.T(), suite.proxyCfg2).
Write(suite.T(), suite.client)
_, err = suite.ctl.proxyConfigMapper.MapProxyConfiguration(suite.ctx, suite.runtime, pCfg2)
require.NoError(suite.T(), err)
pCfg3 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "cfg3").
WithData(suite.T(), suite.proxyCfg3).
Write(suite.T(), suite.client)
_, err = suite.ctl.proxyConfigMapper.MapProxyConfiguration(suite.ctx, suite.runtime, pCfg3)
require.NoError(suite.T(), err)
cpcID := resource.ReplaceType(pbmesh.ComputedProxyConfigurationType, suite.workloadRes.Id)
err = suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
ID: cpcID,
})
require.NoError(suite.T(), err)
suite.requireComputedProxyConfiguration(suite.T(), cpcID)
}
func (suite *controllerTestSuite) TestReconcile_NoProxyConfigs() {
// Create a proxy cfg and map it so that it gets saved to cache.
pCfg1 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "cfg1").
WithData(suite.T(), suite.proxyCfg1).
Build()
_, err := suite.ctl.proxyConfigMapper.MapProxyConfiguration(suite.ctx, suite.runtime, pCfg1)
require.NoError(suite.T(), err)
cpcID := resourcetest.Resource(pbmesh.ComputedProxyConfigurationType, suite.workloadRes.Id.Name).
Write(suite.T(), suite.client).Id
err = suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
ID: cpcID,
})
require.NoError(suite.T(), err)
suite.client.RequireResourceNotFound(suite.T(), cpcID)
}
func (suite *controllerTestSuite) TestController() {
// Run the controller manager
mgr := controller.NewManager(suite.client, suite.runtime.Logger)
m := mapper.New()
mgr.Register(Controller(m))
mgr.SetRaftLeader(true)
go mgr.Run(suite.ctx)
// Write proxy configs.
pCfg1 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "cfg1").
WithData(suite.T(), suite.proxyCfg1).
Write(suite.T(), suite.client)
pCfg2 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "cfg2").
WithData(suite.T(), suite.proxyCfg2).
Write(suite.T(), suite.client)
pCfg3 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "cfg3").
WithData(suite.T(), suite.proxyCfg3).
Write(suite.T(), suite.client)
cpcID := resource.ReplaceType(pbmesh.ComputedProxyConfigurationType, suite.workloadRes.Id)
testutil.RunStep(suite.T(), "computed proxy config generation", func(t *testing.T) {
retry.Run(t, func(r *retry.R) {
suite.client.RequireResourceExists(r, cpcID)
suite.requireComputedProxyConfiguration(r, cpcID)
})
})
testutil.RunStep(suite.T(), "add another workload", func(t *testing.T) {
// Create another workload that will match only proxyCfg2.
matchingWorkload := resourcetest.Resource(pbcatalog.WorkloadType, "test-extra-workload").
WithData(t, suite.workload).
Write(t, suite.client)
matchingWorkloadCPCID := resource.ReplaceType(pbmesh.ComputedProxyConfigurationType, matchingWorkload.Id)
retry.Run(t, func(r *retry.R) {
suite.client.RequireResourceExists(r, cpcID)
suite.requireComputedProxyConfiguration(r, cpcID)
matchingWorkloadCPC := suite.client.RequireResourceExists(r, matchingWorkloadCPCID)
dec := resourcetest.MustDecode[*pbmesh.ComputedProxyConfiguration](r, matchingWorkloadCPC)
prototest.AssertDeepEqual(r, suite.proxyCfg2.GetDynamicConfig(), dec.GetData().GetDynamicConfig())
prototest.AssertDeepEqual(r, suite.proxyCfg2.GetBootstrapConfig(), dec.GetData().GetBootstrapConfig())
})
})
testutil.RunStep(suite.T(), "update proxy config selector", func(t *testing.T) {
t.Log("running update proxy config selector")
// Update proxy config selector to no longer select "test-workload"
updatedProxyCfg := proto.Clone(suite.proxyCfg2).(*pbmesh.ProxyConfiguration)
updatedProxyCfg.Workloads = &pbcatalog.WorkloadSelector{
Names: []string{"test-extra-workload"},
}
matchingWorkload := resourcetest.Resource(pbcatalog.WorkloadType, "test-extra-workload").
WithData(t, suite.workload).
Write(t, suite.client)
matchingWorkloadCPCID := resource.ReplaceType(pbmesh.ComputedProxyConfigurationType, matchingWorkload.Id)
resourcetest.Resource(pbmesh.ProxyConfigurationType, "cfg2").
WithData(suite.T(), updatedProxyCfg).
Write(suite.T(), suite.client)
retry.Run(t, func(r *retry.R) {
res := suite.client.RequireResourceExists(r, cpcID)
// The "test-workload" computed traffic permissions should now be updated to use only proxy cfg 1 and 3.
expProxyCfg := &pbmesh.ComputedProxyConfiguration{
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
},
BootstrapConfig: &pbmesh.BootstrapConfig{
PrometheusBindAddr: "0.0.0.0:9000",
},
}
dec := resourcetest.MustDecode[*pbmesh.ComputedProxyConfiguration](t, res)
prototest.AssertDeepEqual(r, expProxyCfg.GetDynamicConfig(), dec.GetData().GetDynamicConfig())
prototest.AssertDeepEqual(r, expProxyCfg.GetBootstrapConfig(), dec.GetData().GetBootstrapConfig())
matchingWorkloadCPC := suite.client.RequireResourceExists(r, matchingWorkloadCPCID)
dec = resourcetest.MustDecode[*pbmesh.ComputedProxyConfiguration](r, matchingWorkloadCPC)
prototest.AssertDeepEqual(r, suite.proxyCfg2.GetDynamicConfig(), dec.GetData().GetDynamicConfig())
prototest.AssertDeepEqual(r, suite.proxyCfg2.GetBootstrapConfig(), dec.GetData().GetBootstrapConfig())
})
})
// Delete all proxy cfgs.
suite.client.MustDelete(suite.T(), pCfg1.Id)
suite.client.MustDelete(suite.T(), pCfg2.Id)
suite.client.MustDelete(suite.T(), pCfg3.Id)
testutil.RunStep(suite.T(), "all proxy configs are deleted", func(t *testing.T) {
retry.Run(t, func(r *retry.R) {
suite.client.RequireResourceNotFound(r, cpcID)
})
})
}
func TestControllerSuite(t *testing.T) {
suite.Run(t, new(controllerTestSuite))
}
func (suite *controllerTestSuite) requireComputedProxyConfiguration(t resourcetest.T, id *pbresource.ID) {
cpcRes := suite.client.RequireResourceExists(t, id)
decCPC := resourcetest.MustDecode[*pbmesh.ComputedProxyConfiguration](t, cpcRes)
prototest.AssertDeepEqual(t, suite.expComputedProxyCfg, decCPC.Data)
resourcetest.RequireOwner(t, cpcRes, resource.ReplaceType(pbcatalog.WorkloadType, id), true)
}

View File

@ -0,0 +1,71 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package mapper
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/mappers/common"
"github.com/hashicorp/consul/internal/resource/mappers/selectiontracker"
"github.com/hashicorp/consul/lib/stringslice"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
type Mapper struct {
workloadSelectionTracker *selectiontracker.WorkloadSelectionTracker
}
func New() *Mapper {
return &Mapper{
workloadSelectionTracker: selectiontracker.New(),
}
}
// MapProxyConfiguration is responsible for mapping ProxyConfiguration resources to the corresponding ComputedProxyConfiguration
// resource which are name-aligned with the workload.
func (m *Mapper) MapProxyConfiguration(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
var proxyConfig pbmesh.ProxyConfiguration
err := res.Data.UnmarshalTo(&proxyConfig)
if err != nil {
return nil, err
}
// First, we return any existing workloads that this proxy configuration selects.
// The number of selected workloads may change in the future, but for this even we
// only need to care about triggering reconcile requests for the current ones.
requests, err := common.MapSelector(ctx, rt.Client, pbmesh.ComputedProxyConfigurationType,
proxyConfig.GetWorkloads(), res.Id.Tenancy)
if err != nil {
return nil, err
}
// Then generate requests for any previously selected workloads.
prevSelector := m.workloadSelectionTracker.GetSelector(res.GetId())
if !(stringslice.Equal(prevSelector.GetNames(), proxyConfig.GetWorkloads().GetNames()) &&
stringslice.Equal(prevSelector.GetPrefixes(), proxyConfig.GetWorkloads().GetPrefixes())) {
// the selector is different, so we need to map those selectors as well.
requestsForPrevSelector, err := common.MapSelector(ctx, rt.Client, pbmesh.ComputedProxyConfigurationType,
prevSelector, res.Id.Tenancy)
if err != nil {
return nil, err
}
requests = append(requests, requestsForPrevSelector...)
}
// Second, we track this proxy configuration's selector and ID in the tracker.
m.workloadSelectionTracker.TrackIDForSelector(res.Id, proxyConfig.GetWorkloads())
return requests, nil
}
func (m *Mapper) ProxyConfigurationsForWorkload(id *pbresource.ID) []*pbresource.ID {
return m.workloadSelectionTracker.GetIDsForWorkload(id)
}
func (m *Mapper) UntrackProxyConfiguration(id *pbresource.ID) {
m.workloadSelectionTracker.UntrackID(id)
}

View File

@ -0,0 +1,142 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package mapper
import (
"context"
"testing"
"github.com/stretchr/testify/require"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestMapProxyConfiguration(t *testing.T) {
resourceClient := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
mapper := New()
workloadData := &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "1.1.1.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{
"tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
"mesh": {Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
},
Identity: "test",
}
wID1 := resourcetest.Resource(pbcatalog.WorkloadType, "api-1").
WithData(t, workloadData).
Write(t, resourceClient).GetId()
wID2 := resourcetest.Resource(pbcatalog.WorkloadType, "api-2").
WithData(t, workloadData).
Write(t, resourceClient).GetId()
wID3 := resourcetest.Resource(pbcatalog.WorkloadType, "api-abc").
WithData(t, workloadData).
Write(t, resourceClient).GetId()
wID4 := resourcetest.Resource(pbcatalog.WorkloadType, "foo").
WithData(t, workloadData).
Write(t, resourceClient).GetId()
pCfg1 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "cfg1").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbmesh.ProxyConfiguration{
Workloads: &pbcatalog.WorkloadSelector{
Names: []string{"foo", "api-1"},
Prefixes: []string{"api-a"},
},
DynamicConfig: &pbmesh.DynamicConfig{
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
},
}).Build()
reqs, err := mapper.MapProxyConfiguration(context.Background(), controller.Runtime{Client: resourceClient}, pCfg1)
require.NoError(t, err)
prototest.AssertElementsMatch(t,
controller.MakeRequests(pbmesh.ComputedProxyConfigurationType, []*pbresource.ID{wID1, wID3, wID4}),
reqs)
pCfg2 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "cfg2").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbmesh.ProxyConfiguration{
Workloads: &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
Names: []string{"foo"},
},
BootstrapConfig: &pbmesh.BootstrapConfig{
PrometheusBindAddr: "0.0.0.0:9000",
},
}).Build()
reqs, err = mapper.MapProxyConfiguration(context.Background(), controller.Runtime{Client: resourceClient}, pCfg2)
require.NoError(t, err)
prototest.AssertElementsMatch(t,
controller.MakeRequests(pbmesh.ComputedProxyConfigurationType, []*pbresource.ID{wID1, wID2, wID3, wID4}),
reqs)
// Check mapper state for each workload.
ids := mapper.ProxyConfigurationsForWorkload(wID1)
prototest.AssertElementsMatch(t, []*pbresource.ID{pCfg1.Id, pCfg2.Id}, ids)
ids = mapper.ProxyConfigurationsForWorkload(wID2)
prototest.AssertElementsMatch(t, []*pbresource.ID{pCfg2.Id}, ids)
ids = mapper.ProxyConfigurationsForWorkload(wID3)
prototest.AssertElementsMatch(t, []*pbresource.ID{pCfg1.Id, pCfg2.Id}, ids)
ids = mapper.ProxyConfigurationsForWorkload(wID4)
prototest.AssertElementsMatch(t, []*pbresource.ID{pCfg1.Id, pCfg2.Id}, ids)
// Update pCfg2's selector and check that we generate requests for previous and new selector.
pCfg2 = resourcetest.Resource(pbmesh.ProxyConfigurationType, "cfg2").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(t, &pbmesh.ProxyConfiguration{
Workloads: &pbcatalog.WorkloadSelector{
Names: []string{"foo"},
},
BootstrapConfig: &pbmesh.BootstrapConfig{
PrometheusBindAddr: "0.0.0.0:9000",
},
}).Build()
reqs, err = mapper.MapProxyConfiguration(context.Background(), controller.Runtime{Client: resourceClient}, pCfg2)
require.NoError(t, err)
prototest.AssertElementsMatch(t,
controller.MakeRequests(pbmesh.ComputedProxyConfigurationType, []*pbresource.ID{wID4, wID1, wID2, wID3, wID4}),
reqs)
// Check mapper state for each workload.
ids = mapper.ProxyConfigurationsForWorkload(wID1)
prototest.AssertElementsMatch(t, []*pbresource.ID{pCfg1.Id}, ids)
ids = mapper.ProxyConfigurationsForWorkload(wID2)
prototest.AssertElementsMatch(t, []*pbresource.ID{}, ids)
ids = mapper.ProxyConfigurationsForWorkload(wID3)
prototest.AssertElementsMatch(t, []*pbresource.ID{pCfg1.Id}, ids)
ids = mapper.ProxyConfigurationsForWorkload(wID4)
prototest.AssertElementsMatch(t, []*pbresource.ID{pCfg1.Id, pCfg2.Id}, ids)
// Untrack one of the proxy cfgs and check that mapper is updated.
mapper.UntrackProxyConfiguration(pCfg1.Id)
ids = mapper.ProxyConfigurationsForWorkload(wID1)
prototest.AssertElementsMatch(t, []*pbresource.ID{}, ids)
ids = mapper.ProxyConfigurationsForWorkload(wID2)
prototest.AssertElementsMatch(t, []*pbresource.ID{}, ids)
ids = mapper.ProxyConfigurationsForWorkload(wID3)
prototest.AssertElementsMatch(t, []*pbresource.ID{}, ids)
ids = mapper.ProxyConfigurationsForWorkload(wID4)
prototest.AssertElementsMatch(t, []*pbresource.ID{pCfg2.Id}, ids)
}

View File

@ -0,0 +1,108 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package proxyconfiguration
import (
"sort"
"strings"
"github.com/oklog/ulid/v2"
"golang.org/x/exp/slices"
"github.com/hashicorp/consul/internal/mesh/internal/types"
)
// SortProxyConfigurations sorts proxy configurations using the following rules:
//
// 1. Proxy config with a more specific selector wins. For example,
// if there's a proxy config with a name selector and another conflicting
// with a prefix selector, we will choose the one that selects by name because
// it's more specific. For two prefix-based conflicting proxy configs, we will choose
// the one that has the longer prefix.
// 2. Otherwise, the proxy configuration created first (i.e. with an earlier timestamp) wins.
// 3. Lastly, if creation timestamps are the same, the conflict will be resolved using lexicographic
// order.
//
// It returns them in order such that proxy configurations that take precedence occur first in the list.
func SortProxyConfigurations(proxyCfgs []*types.DecodedProxyConfiguration, workloadName string) []*types.DecodedProxyConfiguration {
// Shallow-copy proxy configs so that we don't mutate the original slice.
proxyCfgsToSort := make([]*types.DecodedProxyConfiguration, len(proxyCfgs))
for i, cfg := range proxyCfgs {
proxyCfgsToSort[i] = cfg
}
sorter := proxyCfgsSorter{
proxyCfgs: proxyCfgsToSort,
workloadName: workloadName,
}
sort.Sort(sorter)
return sorter.proxyCfgs
}
type proxyCfgsSorter struct {
proxyCfgs []*types.DecodedProxyConfiguration
workloadName string
}
func (p proxyCfgsSorter) Len() int { return len(p.proxyCfgs) }
// Less returns true if i-th element is less than j-th element.
func (p proxyCfgsSorter) Less(i, j int) bool {
iPrefixMatch := p.findLongestPrefixMatch(i)
iMatchesByName := p.matchesByName(i)
jPrefixMatch := p.findLongestPrefixMatch(j)
jMatchesByName := p.matchesByName(j)
switch {
// If i matches by name but j doesn't, then i should come before j.
case iMatchesByName && !jMatchesByName:
return true
case !iMatchesByName && jMatchesByName:
return false
case !iMatchesByName && !jMatchesByName:
if len(iPrefixMatch) != len(jPrefixMatch) {
// In this case, the longest prefix wins.
return len(iPrefixMatch) > len(jPrefixMatch)
}
// Fallthrough to the default case if lengths of prefix matches are the same.
fallthrough
case iMatchesByName && jMatchesByName:
fallthrough
default:
iID := ulid.MustParse(p.proxyCfgs[i].Resource.Id.Uid)
jID := ulid.MustParse(p.proxyCfgs[j].Resource.Id.Uid)
if iID.Time() != jID.Time() {
return iID.Time() < jID.Time()
} else {
// It's impossible for names to be equal, and so we are checking if
// i's name is "less" lexicographically than j's name.
return p.proxyCfgs[i].GetResource().GetId().GetName() < p.proxyCfgs[j].GetResource().GetId().GetName()
}
}
}
func (p proxyCfgsSorter) Swap(i, j int) {
p.proxyCfgs[i], p.proxyCfgs[j] = p.proxyCfgs[j], p.proxyCfgs[i]
}
func (p proxyCfgsSorter) matchesByName(idx int) bool {
return slices.Contains(p.proxyCfgs[idx].GetData().GetWorkloads().GetNames(), p.workloadName)
}
func (p proxyCfgsSorter) findLongestPrefixMatch(idx int) string {
var prefixMatch string
for _, prefix := range p.proxyCfgs[idx].GetData().GetWorkloads().GetPrefixes() {
if strings.Contains(p.workloadName, prefix) &&
len(prefix) > len(prefixMatch) {
prefixMatch = prefix
}
}
return prefixMatch
}

View File

@ -0,0 +1,157 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package proxyconfiguration
import (
"fmt"
"testing"
"time"
"github.com/oklog/ulid/v2"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestSortProxyConfigurations(t *testing.T) {
workloadName := "foo-123"
cases := map[string]struct {
selectors []*pbcatalog.WorkloadSelector
expSortedIndices []int
}{
"first matched by name, second by prefix": {
selectors: []*pbcatalog.WorkloadSelector{
{
Names: []string{workloadName},
},
{
Prefixes: []string{"foo-"},
},
},
expSortedIndices: []int{0, 1},
},
"first matched by prefix, second by name": {
selectors: []*pbcatalog.WorkloadSelector{
{
Prefixes: []string{"foo-"},
},
{
Names: []string{workloadName},
},
},
expSortedIndices: []int{1, 0},
},
"both matched by name (sorted order should match the order of creation)": {
selectors: []*pbcatalog.WorkloadSelector{
{
Names: []string{workloadName},
},
{
Names: []string{workloadName},
},
},
expSortedIndices: []int{0, 1},
},
"both matched by different prefix": {
selectors: []*pbcatalog.WorkloadSelector{
{
Prefixes: []string{"foo"},
},
{
Prefixes: []string{"foo-"},
},
},
expSortedIndices: []int{1, 0},
},
"both matched by the same prefix": {
selectors: []*pbcatalog.WorkloadSelector{
{
Prefixes: []string{"foo-"},
},
{
Prefixes: []string{"foo-"},
},
},
expSortedIndices: []int{0, 1},
},
"both matched by the multiple different prefixes": {
selectors: []*pbcatalog.WorkloadSelector{
{
Prefixes: []string{"foo-1", "foo-"},
},
{
Prefixes: []string{"foo-1", "foo-12"},
},
},
expSortedIndices: []int{1, 0},
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
resourceClient := svctest.RunResourceService(t, types.Register)
var decProxyCfgs []*types.DecodedProxyConfiguration
for i, ws := range c.selectors {
proxyCfg := &pbmesh.ProxyConfiguration{
Workloads: ws,
}
resName := fmt.Sprintf("cfg-%d", i)
proxyCfgRes := resourcetest.Resource(pbmesh.ProxyConfigurationType, resName).
WithData(t, proxyCfg).
// We need to run it through resource service so that ULIDs are set.
Write(t, resourceClient)
decProxyCfgs = append(decProxyCfgs, resourcetest.MustDecode[*pbmesh.ProxyConfiguration](t, proxyCfgRes))
// Wait for a few milliseconds so that creation timestamp will always be different between resources.
time.Sleep(2 * time.Millisecond)
}
sortedCfgs := SortProxyConfigurations(decProxyCfgs, workloadName)
for i, idx := range c.expSortedIndices {
prototest.AssertDeepEqual(t, decProxyCfgs[idx], sortedCfgs[i])
}
})
}
}
func TestSortProxyConfigurations_SameCreationTime(t *testing.T) {
var decProxyCfgs []*types.DecodedProxyConfiguration
proxyCfg := &pbmesh.ProxyConfiguration{
Workloads: &pbcatalog.WorkloadSelector{
Names: []string{"foo-123"},
},
}
// Make cfg1 name such that it should appear after cfg2 lexicographically.
cfg1 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "def-cfg-1").
WithData(t, proxyCfg).
Build()
// Explicitly set ulid. For the first one, we'll just the current timestamp.
cfg1.Id.Uid = ulid.Make().String()
decProxyCfgs = append(decProxyCfgs, resourcetest.MustDecode[*pbmesh.ProxyConfiguration](t, cfg1))
cfg2 := resourcetest.Resource(pbmesh.ProxyConfigurationType, "abc-cfg-2").
WithData(t, proxyCfg).
Build()
// Explicitly set ulid. For the second one, we'll the timestamp of the first one.
parsedCfg1Ulid := ulid.MustParse(cfg1.Id.Uid)
cfg2.Id.Uid = ulid.MustNew(parsedCfg1Ulid.Time(), ulid.DefaultEntropy()).String()
decProxyCfgs = append(decProxyCfgs, resourcetest.MustDecode[*pbmesh.ProxyConfiguration](t, cfg2))
sortedCfgs := SortProxyConfigurations(decProxyCfgs, "foo-123")
// We expect that given the same creation timestamp, the second proxy cfg should be first
// in the sorted order because of its name.
prototest.AssertDeepEqual(t, decProxyCfgs[0], sortedCfgs[1])
prototest.AssertDeepEqual(t, decProxyCfgs[1], sortedCfgs[0])
}

View File

@ -9,6 +9,8 @@ import (
"github.com/hashicorp/consul/agent/leafcert"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/proxyconfiguration"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/proxyconfiguration/mapper"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/xds"
@ -49,4 +51,6 @@ func Register(mgr *controller.Manager, deps Dependencies) {
)
mgr.Register(routes.Controller())
mgr.Register(proxyconfiguration.Controller(mapper.New()))
}

View File

@ -0,0 +1,58 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package common
import (
"context"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// MapSelector returns requests of the provided type given a workload
// selector and tenancy. The type has to be name-aligned with the workload.
func MapSelector(ctx context.Context,
client pbresource.ResourceServiceClient,
typ *pbresource.Type,
selector *pbcatalog.WorkloadSelector,
tenancy *pbresource.Tenancy) ([]controller.Request, error) {
if selector == nil {
return nil, nil
}
var result []controller.Request
for _, prefix := range selector.GetPrefixes() {
resp, err := client.List(ctx, &pbresource.ListRequest{
Type: pbcatalog.WorkloadType,
Tenancy: tenancy,
NamePrefix: prefix,
})
if err != nil {
return nil, err
}
for _, r := range resp.Resources {
id := resource.ReplaceType(typ, r.Id)
result = append(result, controller.Request{
ID: id,
})
}
}
// We don't do lookups for names as this should be done in the controller's reconcile.
for _, name := range selector.GetNames() {
id := &pbresource.ID{
Name: name,
Tenancy: tenancy,
Type: typ,
}
result = append(result, controller.Request{
ID: id,
})
}
return result, nil
}

View File

@ -0,0 +1,66 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package common
import (
"context"
"testing"
"github.com/stretchr/testify/require"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/types"
"github.com/hashicorp/consul/internal/resource"
"github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto/private/prototest"
)
func TestMapSelector(t *testing.T) {
client := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
// Create some workloads.
// For this test, we don't care about the workload data, so we will re-use
// the same data for all workloads.
workloadData := &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{{Host: "127.0.0.1"}},
Ports: map[string]*pbcatalog.WorkloadPort{"p1": {Port: 8080}},
}
w1 := resourcetest.Resource(pbcatalog.WorkloadType, "w1").
WithData(t, workloadData).
Write(t, client).Id
w2 := resourcetest.Resource(pbcatalog.WorkloadType, "w2").
WithData(t, workloadData).
Write(t, client).Id
w3 := resourcetest.Resource(pbcatalog.WorkloadType, "prefix-w3").
WithData(t, workloadData).
Write(t, client).Id
w4 := resourcetest.Resource(pbcatalog.WorkloadType, "prefix-w4").
WithData(t, workloadData).
Write(t, client).Id
// This workload should not be used as it's not selected by the workload selector.
resourcetest.Resource(pbcatalog.WorkloadType, "not-selected-workload").
WithData(t, workloadData).
Write(t, client)
selector := &pbcatalog.WorkloadSelector{
Names: []string{"w1", "w2"},
Prefixes: []string{"prefix"},
}
expReqs := []controller.Request{
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w1)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w2)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w3)},
{ID: resource.ReplaceType(pbmesh.ProxyStateTemplateType, w4)},
}
reqs, err := MapSelector(context.Background(), client,
pbmesh.ProxyStateTemplateType, selector, resource.DefaultNamespacedTenancy())
require.NoError(t, err)
require.Len(t, reqs, len(expReqs))
prototest.AssertElementsMatch(t, expReqs, reqs)
}

View File

@ -0,0 +1,17 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package types
import (
"github.com/hashicorp/consul/internal/resource"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
)
func RegisterComputedProxyConfiguration(r resource.Registry) {
r.Register(resource.Registration{
Type: pbmesh.ComputedProxyConfigurationType,
Proto: &pbmesh.ComputedProxyConfiguration{},
Scope: resource.ScopeNamespace,
})
}

View File

@ -9,6 +9,7 @@ import (
func Register(r resource.Registry) {
RegisterProxyConfiguration(r)
RegisterComputedProxyConfiguration(r)
RegisterUpstreams(r)
RegisterUpstreamsConfiguration(r)
RegisterProxyStateTemplate(r)

View File

@ -5,8 +5,11 @@ package selectiontracker
import (
"context"
"fmt"
"sync"
"golang.org/x/exp/slices"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/radix"
"github.com/hashicorp/consul/internal/resource"
@ -17,45 +20,53 @@ import (
type WorkloadSelectionTracker struct {
lock sync.Mutex
prefixes *radix.Tree[[]controller.Request]
exact *radix.Tree[[]controller.Request]
prefixes *radix.Tree[[]*pbresource.ID]
exact *radix.Tree[[]*pbresource.ID]
// workloadSelectors contains a map keyed on resource names with values
// workloadSelectors contains a map keyed on resource references with values
// being the selector that resource is currently associated with. This map
// is kept mainly to make tracking removal operations more efficient.
// Generally any operation that could take advantage of knowing where
// in the trees the resource id is referenced can use this to prevent
// needing to search the whole tree.
workloadSelectors map[string]*pbcatalog.WorkloadSelector
workloadSelectors map[resource.ReferenceKey]*pbcatalog.WorkloadSelector
}
func New() *WorkloadSelectionTracker {
return &WorkloadSelectionTracker{
prefixes: radix.New[[]controller.Request](),
exact: radix.New[[]controller.Request](),
workloadSelectors: make(map[string]*pbcatalog.WorkloadSelector),
prefixes: radix.New[[]*pbresource.ID](),
exact: radix.New[[]*pbresource.ID](),
workloadSelectors: make(map[resource.ReferenceKey]*pbcatalog.WorkloadSelector),
}
}
// MapWorkload will return a slice of controller.Requests with 1 resource for
// each resource that selects the specified Workload resource.
func (t *WorkloadSelectionTracker) MapWorkload(_ context.Context, _ controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
resIds := t.GetIDsForWorkload(res.Id)
return controller.MakeRequests(nil, resIds), nil
}
func (t *WorkloadSelectionTracker) GetIDsForWorkload(id *pbresource.ID) []*pbresource.ID {
t.lock.Lock()
defer t.lock.Unlock()
var reqs []controller.Request
var result []*pbresource.ID
workloadTreeKey := treePathFromNameOrPrefix(id.GetTenancy(), id.GetName())
// gather the list of all resources that select the specified workload using a prefix match
t.prefixes.WalkPath(res.Id.Name, func(path string, requests []controller.Request) bool {
reqs = append(reqs, requests...)
t.prefixes.WalkPath(workloadTreeKey, func(path string, ids []*pbresource.ID) bool {
result = append(result, ids...)
return false
})
// gather the list of all resources that select the specified workload using an exact match
exactReqs, _ := t.exact.Get(res.Id.Name)
exactReqs, _ := t.exact.Get(workloadTreeKey)
// return the combined list of all resources that select the specified workload
return append(reqs, exactReqs...), nil
return append(result, exactReqs...)
}
// TrackIDForSelector will associate workloads matching the specified workload
@ -64,7 +75,8 @@ func (t *WorkloadSelectionTracker) TrackIDForSelector(id *pbresource.ID, selecto
t.lock.Lock()
defer t.lock.Unlock()
if previousSelector, found := t.workloadSelectors[id.Name]; found {
ref := resource.NewReferenceKey(id)
if previousSelector, found := t.workloadSelectors[ref]; found {
if stringslice.Equal(previousSelector.Names, selector.Names) &&
stringslice.Equal(previousSelector.Prefixes, selector.Prefixes) {
// the selector is unchanged so do nothing
@ -81,24 +93,28 @@ func (t *WorkloadSelectionTracker) TrackIDForSelector(id *pbresource.ID, selecto
// loop over all the exact matching rules and associate those workload names
// with the given resource id
for _, name := range selector.GetNames() {
key := treePathFromNameOrPrefix(id.GetTenancy(), name)
// lookup any resource id associations for the given workload name
leaf, _ := t.exact.Get(name)
leaf, _ := t.exact.Get(key)
// append the ID to the existing request list
t.exact.Insert(name, append(leaf, controller.Request{ID: id}))
t.exact.Insert(key, append(leaf, id))
}
// loop over all the prefix matching rules and associate those prefixes
// with the given resource id.
for _, prefix := range selector.GetPrefixes() {
key := treePathFromNameOrPrefix(id.GetTenancy(), prefix)
// lookup any resource id associations for the given workload name prefix
leaf, _ := t.prefixes.Get(prefix)
leaf, _ := t.prefixes.Get(key)
// append the new resource ID to the existing request list
t.prefixes.Insert(prefix, append(leaf, controller.Request{ID: id}))
t.prefixes.Insert(key, append(leaf, id))
}
t.workloadSelectors[id.Name] = selector
t.workloadSelectors[ref] = selector
}
// UntrackID causes the tracker to stop tracking the given resource ID
@ -108,56 +124,82 @@ func (t *WorkloadSelectionTracker) UntrackID(id *pbresource.ID) {
t.untrackID(id)
}
// GetSelector returns the currently stored selector for the given ID.
func (t *WorkloadSelectionTracker) GetSelector(id *pbresource.ID) *pbcatalog.WorkloadSelector {
t.lock.Lock()
defer t.lock.Unlock()
return t.workloadSelectors[resource.NewReferenceKey(id)]
}
// untrackID should be called to stop tracking a resource ID.
// This method assumes the lock is already held. Besides modifying
// the prefix & name trees to not reference this ID, it will also
// delete any corresponding entry within the workloadSelectors map
func (t *WorkloadSelectionTracker) untrackID(id *pbresource.ID) {
selector, found := t.workloadSelectors[id.Name]
ref := resource.NewReferenceKey(id)
selector, found := t.workloadSelectors[ref]
if !found {
return
}
removeIDFromTreeAtPaths(t.exact, id, selector.Names)
removeIDFromTreeAtPaths(t.prefixes, id, selector.Prefixes)
exactTreePaths := make([]string, len(selector.GetNames()))
for i, name := range selector.GetNames() {
exactTreePaths[i] = treePathFromNameOrPrefix(id.GetTenancy(), name)
}
prefixTreePaths := make([]string, len(selector.GetPrefixes()))
for i, prefix := range selector.GetPrefixes() {
prefixTreePaths[i] = treePathFromNameOrPrefix(id.GetTenancy(), prefix)
}
removeIDFromTreeAtPaths(t.exact, id, exactTreePaths)
removeIDFromTreeAtPaths(t.prefixes, id, prefixTreePaths)
// If we don't do this deletion then reinsertion of the id for
// tracking in the future could prevent selection criteria from
// being properly inserted into the radix trees.
delete(t.workloadSelectors, id.Name)
delete(t.workloadSelectors, ref)
}
// removeIDFromTree will remove the given resource ID from all leaf nodes in the radix tree.
func removeIDFromTreeAtPaths(t *radix.Tree[[]controller.Request], id *pbresource.ID, paths []string) {
func removeIDFromTreeAtPaths(t *radix.Tree[[]*pbresource.ID], id *pbresource.ID, paths []string) {
for _, path := range paths {
requests, _ := t.Get(path)
ids, _ := t.Get(path)
foundIdx := -1
for idx, req := range requests {
if resource.EqualID(req.ID, id) {
for idx, resID := range ids {
if resource.EqualID(resID, id) {
foundIdx = idx
break
}
}
if foundIdx != -1 {
l := len(requests)
l := len(ids)
if l == 1 {
requests = nil
} else if foundIdx == l-1 {
requests = requests[:foundIdx]
} else if foundIdx == 0 {
requests = requests[1:]
if foundIdx == l-1 {
ids = ids[:foundIdx]
} else {
requests = append(requests[:foundIdx], requests[foundIdx+1:]...)
ids = slices.Delete(ids, foundIdx, foundIdx+1)
}
if len(requests) > 1 {
t.Insert(path, requests)
if len(ids) > 0 {
t.Insert(path, ids)
} else {
t.Delete(path)
}
}
}
}
// treePathFromNameOrPrefix computes radix tree key from the resource tenancy and a selector name or prefix.
// The keys will be computed in the following form:
// <partition>/<peer>/<namespace>/<name or prefix>.
func treePathFromNameOrPrefix(tenancy *pbresource.Tenancy, nameOrPrefix string) string {
return fmt.Sprintf("%s/%s/%s/%s",
tenancy.GetPartition(),
tenancy.GetPeerName(),
tenancy.GetNamespace(),
nameOrPrefix)
}

View File

@ -0,0 +1,383 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package selectiontracker
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/radix"
"github.com/hashicorp/consul/internal/resource"
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/prototest"
)
var (
workloadData = &pbcatalog.Workload{
Addresses: []*pbcatalog.WorkloadAddress{
{
Host: "198.18.0.1",
},
},
Ports: map[string]*pbcatalog.WorkloadPort{
"http": {
Port: 8080,
Protocol: pbcatalog.Protocol_PROTOCOL_HTTP,
},
},
}
tenancyCases = map[string]*pbresource.Tenancy{
"default": resource.DefaultNamespacedTenancy(),
"bar ns, default partition, local peer": {
Partition: "default",
Namespace: "bar",
PeerName: "local",
},
"default ns, baz partition, local peer": {
Partition: "baz",
Namespace: "default",
PeerName: "local",
},
"bar ns, baz partition, local peer": {
Partition: "baz",
Namespace: "bar",
PeerName: "local",
},
"bar ns, baz partition, non-local peer": {
Partition: "baz",
Namespace: "bar",
PeerName: "non-local",
},
}
)
func TestRemoveIDFromTreeAtPaths(t *testing.T) {
tree := radix.New[[]*pbresource.ID]()
tenancy := resource.DefaultNamespacedTenancy()
toRemove := rtest.Resource(pbcatalog.ServiceEndpointsType, "blah").WithTenancy(tenancy).ID()
other1 := rtest.Resource(pbcatalog.ServiceEndpointsType, "other1").WithTenancy(tenancy).ID()
other2 := rtest.Resource(pbcatalog.ServiceEndpointsType, "other2").WithTenancy(tenancy).ID()
// we are trying to create a tree such that removal of the toRemove id causes a
// few things to happen.
//
// * All the slice modification conditions are executed
// - removal from beginning of the list
// - removal from the end of the list
// - removal of only element in the list
// - removal from middle of the list
// * Paths without matching ids are ignored
notMatching := []*pbresource.ID{
other1,
other2,
}
matchAtBeginning := []*pbresource.ID{
toRemove,
other1,
other2,
}
// For this case, we only add one other not matching to test that we don't remove
// non-matching ones.
matchAtEnd := []*pbresource.ID{
other1,
toRemove,
}
matchInMiddle := []*pbresource.ID{
other1,
toRemove,
other2,
}
matchOnly := []*pbresource.ID{
toRemove,
}
noMatchKey := treePathFromNameOrPrefix(tenancy, "no-match")
matchBeginningKey := treePathFromNameOrPrefix(tenancy, "match-beginning")
matchEndKey := treePathFromNameOrPrefix(tenancy, "match-end")
matchMiddleKey := treePathFromNameOrPrefix(tenancy, "match-middle")
matchOnlyKey := treePathFromNameOrPrefix(tenancy, "match-only")
tree.Insert(noMatchKey, notMatching)
tree.Insert(matchBeginningKey, matchAtBeginning)
tree.Insert(matchEndKey, matchAtEnd)
tree.Insert(matchMiddleKey, matchInMiddle)
tree.Insert(matchOnlyKey, matchOnly)
removeIDFromTreeAtPaths(tree, toRemove, []string{
noMatchKey,
matchBeginningKey,
matchEndKey,
matchMiddleKey,
matchOnlyKey,
})
reqs, found := tree.Get(noMatchKey)
require.True(t, found)
require.Equal(t, notMatching, reqs)
reqs, found = tree.Get(matchBeginningKey)
require.True(t, found)
require.Equal(t, notMatching, reqs)
reqs, found = tree.Get(matchEndKey)
require.True(t, found)
require.Equal(t, []*pbresource.ID{other1}, reqs)
reqs, found = tree.Get(matchMiddleKey)
require.True(t, found)
require.Equal(t, notMatching, reqs)
// The last tracked request should cause removal from the tree
_, found = tree.Get(matchOnlyKey)
require.False(t, found)
}
type selectionTrackerSuite struct {
suite.Suite
rt controller.Runtime
tracker *WorkloadSelectionTracker
// The test setup adds resources with various tenancy settings. Because tenancies are stored in a map,
// it adds "free" order randomization, and so we need to remember the order in which those tenancy were executed.
executedTenancies []*pbresource.Tenancy
endpointsFoo []*pbresource.ID
endpointsBar []*pbresource.ID
workloadsAPI []*pbresource.Resource
workloadsWeb []*pbresource.Resource
}
func (suite *selectionTrackerSuite) SetupTest() {
suite.tracker = New()
for _, tenancy := range tenancyCases {
suite.executedTenancies = append(suite.executedTenancies, tenancy)
endpointsFooID := rtest.Resource(pbcatalog.ServiceEndpointsType, "foo").
WithTenancy(tenancy).ID()
suite.endpointsFoo = append(suite.endpointsFoo, endpointsFooID)
endpointsBarID := rtest.Resource(pbcatalog.ServiceEndpointsType, "bar").
WithTenancy(tenancy).ID()
suite.endpointsBar = append(suite.endpointsBar, endpointsBarID)
suite.workloadsAPI = append(suite.workloadsAPI, rtest.Resource(pbcatalog.WorkloadType, "api-1").
WithData(suite.T(), workloadData).
WithTenancy(tenancy).
Build())
suite.workloadsWeb = append(suite.workloadsWeb, rtest.Resource(pbcatalog.WorkloadType, "web-1").
WithData(suite.T(), workloadData).
WithTenancy(tenancy).
Build())
}
}
func (suite *selectionTrackerSuite) TearDownTest() {
suite.executedTenancies = nil
suite.workloadsAPI = nil
suite.workloadsWeb = nil
suite.endpointsFoo = nil
suite.endpointsBar = nil
}
func (suite *selectionTrackerSuite) requireMappedIDs(t *testing.T, workload *pbresource.Resource, ids ...*pbresource.ID) {
t.Helper()
reqs, err := suite.tracker.MapWorkload(context.Background(), suite.rt, workload)
require.NoError(suite.T(), err)
require.Len(t, reqs, len(ids))
for _, id := range ids {
prototest.AssertContainsElement(t, reqs, controller.Request{ID: id})
}
}
func (suite *selectionTrackerSuite) requireMappedIDsAllTenancies(t *testing.T, workloads []*pbresource.Resource, ids ...[]*pbresource.ID) {
t.Helper()
for i := range suite.executedTenancies {
reqs, err := suite.tracker.MapWorkload(context.Background(), suite.rt, workloads[i])
require.NoError(suite.T(), err)
require.Len(t, reqs, len(ids))
for _, id := range ids {
prototest.AssertContainsElement(t, reqs, controller.Request{ID: id[i]})
}
}
}
func (suite *selectionTrackerSuite) trackIDForSelectorInAllTenancies(ids []*pbresource.ID, selector *pbcatalog.WorkloadSelector) {
suite.T().Helper()
for i := range suite.executedTenancies {
suite.tracker.TrackIDForSelector(ids[i], selector)
}
}
func (suite *selectionTrackerSuite) TestMapWorkload_Empty() {
// If we aren't tracking anything than the default mapping behavior
// should be to return an empty list of requests.
suite.requireMappedIDs(suite.T(),
rtest.Resource(pbcatalog.WorkloadType, "api-1").WithData(suite.T(), workloadData).Build())
}
func (suite *selectionTrackerSuite) TestUntrackID_Empty() {
// this test has no assertions but mainly is here to prove that things
// don't explode if this is attempted.
suite.tracker.UntrackID(rtest.Resource(pbcatalog.ServiceEndpointsType, "foo").ID())
}
func (suite *selectionTrackerSuite) TestTrackAndMap_SingleResource_MultipleWorkloadMappings() {
// This test aims to prove that tracking a resources workload selector and
// then mapping a workload back to that resource works as expected when the
// result set is a single resource. This test will ensure that both prefix
// and exact match criteria are handle correctly and that one resource
// can be mapped from multiple distinct workloads.
// Create resources for the test and track endpoints.
suite.trackIDForSelectorInAllTenancies(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Names: []string{"bar", "api", "web-1"},
Prefixes: []string{"api-"},
})
suite.requireMappedIDsAllTenancies(suite.T(), suite.workloadsAPI, suite.endpointsFoo)
suite.requireMappedIDsAllTenancies(suite.T(), suite.workloadsWeb, suite.endpointsFoo)
}
func (suite *selectionTrackerSuite) TestTrackAndMap_MultiResource_SingleWorkloadMapping() {
// This test aims to prove that multiple resources selecting of a workload
// will result in multiple requests when mapping that workload.
cases := map[string]struct {
selector *pbcatalog.WorkloadSelector
}{
"names": {
selector: &pbcatalog.WorkloadSelector{
Names: []string{"api-1"},
},
},
"prefixes": {
selector: &pbcatalog.WorkloadSelector{
Prefixes: []string{"api"},
},
},
}
for name, c := range cases {
suite.T().Run(name, func(t *testing.T) {
for i := range suite.executedTenancies {
// associate the foo endpoints with some workloads
suite.tracker.TrackIDForSelector(suite.endpointsFoo[i], c.selector)
// associate the bar endpoints with some workloads
suite.tracker.TrackIDForSelector(suite.endpointsBar[i], c.selector)
}
// now the mapping should return both endpoints resource ids
suite.requireMappedIDsAllTenancies(t, suite.workloadsAPI, suite.endpointsFoo, suite.endpointsBar)
})
}
}
func (suite *selectionTrackerSuite) TestDuplicateTracking() {
// This test aims to prove that tracking some ID multiple times doesn't
// result in multiple requests for the same ID
// associate the foo endpoints with some workloads 3 times without changing
// the selection criteria. The second two times should be no-ops
workloadAPI1 := rtest.Resource(pbcatalog.WorkloadType, "api-1").
WithTenancy(resource.DefaultNamespacedTenancy()).
WithData(suite.T(), workloadData).Build()
endpointsFoo := rtest.Resource(pbcatalog.ServiceEndpointsType, "foo").
WithTenancy(resource.DefaultNamespacedTenancy()).ID()
suite.tracker.TrackIDForSelector(endpointsFoo, &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
})
suite.tracker.TrackIDForSelector(endpointsFoo, &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
})
suite.tracker.TrackIDForSelector(endpointsFoo, &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
})
// regardless of the number of times tracked we should only see a single request
suite.requireMappedIDs(suite.T(), workloadAPI1, endpointsFoo)
}
func (suite *selectionTrackerSuite) TestModifyTracking() {
// This test aims to prove that modifying selection criteria for a resource
// works as expected. Adding new criteria results in all being tracked.
// Removal of some criteria doesn't result in removal of all etc. More or
// less we want to ensure that updating selection criteria leaves the
// tracker in a consistent/expected state.
// Create resources for the test and track endpoints.
suite.trackIDForSelectorInAllTenancies(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Names: []string{"web-1"},
})
// ensure that api-1 isn't mapped but web-1 is
suite.requireMappedIDsAllTenancies(suite.T(), suite.workloadsAPI)
suite.requireMappedIDsAllTenancies(suite.T(), suite.workloadsWeb, suite.endpointsFoo)
// now also track the api- prefix
suite.trackIDForSelectorInAllTenancies(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Names: []string{"web-1"},
Prefixes: []string{"api-"},
})
// ensure that both workloads are mapped appropriately
suite.requireMappedIDsAllTenancies(suite.T(), suite.workloadsAPI, suite.endpointsFoo)
suite.requireMappedIDsAllTenancies(suite.T(), suite.workloadsWeb, suite.endpointsFoo)
// now remove the web tracking
suite.trackIDForSelectorInAllTenancies(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Prefixes: []string{"api-"},
})
// ensure that only api-1 is mapped
suite.requireMappedIDsAllTenancies(suite.T(), suite.workloadsAPI, suite.endpointsFoo)
suite.requireMappedIDsAllTenancies(suite.T(), suite.workloadsWeb)
}
func (suite *selectionTrackerSuite) TestRemove() {
// This test aims to prove that removal of a resource from tracking
// actually prevents subsequent mapping calls from returning the
// workload.
// track the web-1 workload
suite.trackIDForSelectorInAllTenancies(suite.endpointsFoo, &pbcatalog.WorkloadSelector{
Names: []string{"web-1"},
})
// ensure web-1 is mapped
suite.requireMappedIDsAllTenancies(suite.T(), suite.workloadsWeb, suite.endpointsFoo)
for i := range suite.executedTenancies {
// untrack the resource
suite.tracker.UntrackID(suite.endpointsFoo[i])
}
// ensure that we no longer map the previous workload to the resource
suite.requireMappedIDsAllTenancies(suite.T(), suite.workloadsWeb)
}
func TestWorkloadSelectionSuite(t *testing.T) {
suite.Run(t, new(selectionTrackerSuite))
}

View File

@ -0,0 +1,18 @@
// Code generated by protoc-gen-go-binary. DO NOT EDIT.
// source: pbmesh/v2beta1/computed_proxy_configuration.proto
package meshv2beta1
import (
"google.golang.org/protobuf/proto"
)
// MarshalBinary implements encoding.BinaryMarshaler
func (msg *ComputedProxyConfiguration) MarshalBinary() ([]byte, error) {
return proto.Marshal(msg)
}
// UnmarshalBinary implements encoding.BinaryUnmarshaler
func (msg *ComputedProxyConfiguration) UnmarshalBinary(b []byte) error {
return proto.Unmarshal(b, msg)
}

View File

@ -0,0 +1,199 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.30.0
// protoc (unknown)
// source: pbmesh/v2beta1/computed_proxy_configuration.proto
package meshv2beta1
import (
_ "github.com/hashicorp/consul/proto-public/pbresource"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ComputedProxyConfiguration struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// dynamic_config is the configuration that could be changed
// dynamically (i.e. without needing restart).
DynamicConfig *DynamicConfig `protobuf:"bytes,2,opt,name=dynamic_config,json=dynamicConfig,proto3" json:"dynamic_config,omitempty"`
// bootstrap_config is the configuration that requires proxies
// to be restarted to be applied.
BootstrapConfig *BootstrapConfig `protobuf:"bytes,3,opt,name=bootstrap_config,json=bootstrapConfig,proto3" json:"bootstrap_config,omitempty"`
}
func (x *ComputedProxyConfiguration) Reset() {
*x = ComputedProxyConfiguration{}
if protoimpl.UnsafeEnabled {
mi := &file_pbmesh_v2beta1_computed_proxy_configuration_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ComputedProxyConfiguration) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ComputedProxyConfiguration) ProtoMessage() {}
func (x *ComputedProxyConfiguration) ProtoReflect() protoreflect.Message {
mi := &file_pbmesh_v2beta1_computed_proxy_configuration_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ComputedProxyConfiguration.ProtoReflect.Descriptor instead.
func (*ComputedProxyConfiguration) Descriptor() ([]byte, []int) {
return file_pbmesh_v2beta1_computed_proxy_configuration_proto_rawDescGZIP(), []int{0}
}
func (x *ComputedProxyConfiguration) GetDynamicConfig() *DynamicConfig {
if x != nil {
return x.DynamicConfig
}
return nil
}
func (x *ComputedProxyConfiguration) GetBootstrapConfig() *BootstrapConfig {
if x != nil {
return x.BootstrapConfig
}
return nil
}
var File_pbmesh_v2beta1_computed_proxy_configuration_proto protoreflect.FileDescriptor
var file_pbmesh_v2beta1_computed_proxy_configuration_proto_rawDesc = []byte{
0x0a, 0x31, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31,
0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f,
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63,
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74,
0x61, 0x31, 0x1a, 0x28, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x32, 0x62, 0x65, 0x74,
0x61, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x70, 0x62,
0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd4, 0x01, 0x0a, 0x1a, 0x43,
0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x0e, 0x64, 0x79, 0x6e,
0x61, 0x6d, 0x69, 0x63, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x2c, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f,
0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61,
0x31, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
0x0d, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x59,
0x0a, 0x10, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69,
0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68,
0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x06, 0xa2, 0x93, 0x04, 0x02, 0x08,
0x03, 0x42, 0xa0, 0x02, 0x0a, 0x21, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63,
0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e,
0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x42, 0x1f, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65,
0x64, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x43, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75,
0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x32, 0x62, 0x65,
0x74, 0x61, 0x31, 0x3b, 0x6d, 0x65, 0x73, 0x68, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xa2,
0x02, 0x03, 0x48, 0x43, 0x4d, 0xaa, 0x02, 0x1d, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x2e, 0x56, 0x32,
0x62, 0x65, 0x74, 0x61, 0x31, 0xca, 0x02, 0x1d, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, 0x5c, 0x56, 0x32,
0x62, 0x65, 0x74, 0x61, 0x31, 0xe2, 0x02, 0x29, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72,
0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, 0x68, 0x5c, 0x56, 0x32,
0x62, 0x65, 0x74, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
0x61, 0xea, 0x02, 0x20, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a, 0x43,
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x4d, 0x65, 0x73, 0x68, 0x3a, 0x3a, 0x56, 0x32, 0x62,
0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_pbmesh_v2beta1_computed_proxy_configuration_proto_rawDescOnce sync.Once
file_pbmesh_v2beta1_computed_proxy_configuration_proto_rawDescData = file_pbmesh_v2beta1_computed_proxy_configuration_proto_rawDesc
)
func file_pbmesh_v2beta1_computed_proxy_configuration_proto_rawDescGZIP() []byte {
file_pbmesh_v2beta1_computed_proxy_configuration_proto_rawDescOnce.Do(func() {
file_pbmesh_v2beta1_computed_proxy_configuration_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbmesh_v2beta1_computed_proxy_configuration_proto_rawDescData)
})
return file_pbmesh_v2beta1_computed_proxy_configuration_proto_rawDescData
}
var file_pbmesh_v2beta1_computed_proxy_configuration_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_pbmesh_v2beta1_computed_proxy_configuration_proto_goTypes = []interface{}{
(*ComputedProxyConfiguration)(nil), // 0: hashicorp.consul.mesh.v2beta1.ComputedProxyConfiguration
(*DynamicConfig)(nil), // 1: hashicorp.consul.mesh.v2beta1.DynamicConfig
(*BootstrapConfig)(nil), // 2: hashicorp.consul.mesh.v2beta1.BootstrapConfig
}
var file_pbmesh_v2beta1_computed_proxy_configuration_proto_depIdxs = []int32{
1, // 0: hashicorp.consul.mesh.v2beta1.ComputedProxyConfiguration.dynamic_config:type_name -> hashicorp.consul.mesh.v2beta1.DynamicConfig
2, // 1: hashicorp.consul.mesh.v2beta1.ComputedProxyConfiguration.bootstrap_config:type_name -> hashicorp.consul.mesh.v2beta1.BootstrapConfig
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_pbmesh_v2beta1_computed_proxy_configuration_proto_init() }
func file_pbmesh_v2beta1_computed_proxy_configuration_proto_init() {
if File_pbmesh_v2beta1_computed_proxy_configuration_proto != nil {
return
}
file_pbmesh_v2beta1_proxy_configuration_proto_init()
if !protoimpl.UnsafeEnabled {
file_pbmesh_v2beta1_computed_proxy_configuration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ComputedProxyConfiguration); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pbmesh_v2beta1_computed_proxy_configuration_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_pbmesh_v2beta1_computed_proxy_configuration_proto_goTypes,
DependencyIndexes: file_pbmesh_v2beta1_computed_proxy_configuration_proto_depIdxs,
MessageInfos: file_pbmesh_v2beta1_computed_proxy_configuration_proto_msgTypes,
}.Build()
File_pbmesh_v2beta1_computed_proxy_configuration_proto = out.File
file_pbmesh_v2beta1_computed_proxy_configuration_proto_rawDesc = nil
file_pbmesh_v2beta1_computed_proxy_configuration_proto_goTypes = nil
file_pbmesh_v2beta1_computed_proxy_configuration_proto_depIdxs = nil
}

View File

@ -0,0 +1,21 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
syntax = "proto3";
package hashicorp.consul.mesh.v2beta1;
import "pbmesh/v2beta1/proxy_configuration.proto";
import "pbresource/annotations.proto";
message ComputedProxyConfiguration {
option (hashicorp.consul.resource.spec) = {scope: SCOPE_NAMESPACE};
// dynamic_config is the configuration that could be changed
// dynamically (i.e. without needing restart).
DynamicConfig dynamic_config = 2;
// bootstrap_config is the configuration that requires proxies
// to be restarted to be applied.
BootstrapConfig bootstrap_config = 3;
}

View File

@ -10,18 +10,25 @@ const (
GroupName = "mesh"
Version = "v2beta1"
ComputedRoutesKind = "ComputedRoutes"
DestinationPolicyKind = "DestinationPolicy"
DestinationsKind = "Destinations"
DestinationsConfigurationKind = "DestinationsConfiguration"
GRPCRouteKind = "GRPCRoute"
HTTPRouteKind = "HTTPRoute"
ProxyConfigurationKind = "ProxyConfiguration"
ProxyStateTemplateKind = "ProxyStateTemplate"
TCPRouteKind = "TCPRoute"
ComputedProxyConfigurationKind = "ComputedProxyConfiguration"
ComputedRoutesKind = "ComputedRoutes"
DestinationPolicyKind = "DestinationPolicy"
DestinationsKind = "Destinations"
DestinationsConfigurationKind = "DestinationsConfiguration"
GRPCRouteKind = "GRPCRoute"
HTTPRouteKind = "HTTPRoute"
ProxyConfigurationKind = "ProxyConfiguration"
ProxyStateTemplateKind = "ProxyStateTemplate"
TCPRouteKind = "TCPRoute"
)
var (
ComputedProxyConfigurationType = &pbresource.Type{
Group: GroupName,
GroupVersion: Version,
Kind: ComputedProxyConfigurationKind,
}
ComputedRoutesType = &pbresource.Type{
Group: GroupName,
GroupVersion: Version,