diff --git a/docs/v2-architecture/controller-architecture/controllers.md b/docs/v2-architecture/controller-architecture/controllers.md index b0fe269630..e521a07208 100644 --- a/docs/v2-architecture/controller-architecture/controllers.md +++ b/docs/v2-architecture/controller-architecture/controllers.md @@ -191,6 +191,57 @@ a `Baz` resource gets updated to no longer have a value, it should not be repres 3. Update the dependency mappers to query the cache index *in addition to* looking at the current state of the dependent resource. In our example above the `Baz` dependency mapper could use the [`MultiMapper`] to combine querying the cache for `Baz` types that currently should be associated with a `ComputedBaz` and querying the index added in step 2 for previous references. +#### Footgun: Needing Bound References + +When an interior (mutable) foreign key pointer on watched data is used to +determine the resources's applicability in a dependency mapper, it is subject +to the "orphaned computed resource" problem. + +(An example of this would be a ParentRef on an xRoute, or the Destination field +of a TrafficPermission.) + +When you edit the mutable pointer to point elsewhere, the DependencyMapper will +only witness the NEW value and will trigger reconciles for things derived from +the NEW pointer, but side effects from a prior reconcile using the OLD pointer +will be orphaned until some other event triggers that reconcile (if ever). + +This applies equally to all varieties of controller: + +- creates computed resources +- only updates status conditions on existing resources +- has other external side effects (xDS controller writes envoy config over a stream) + +To solve this we need to collect the list of bound references that were +"ingredients" into a computed resource's output and persist them on the newly +written resource. Then we load them up and index them such that we can use them +to AUGMENT a mapper event with additional maps using the OLD data as well. + +We have only actively worked to solve this for the computed resource flavor of +controller: + +1. The top level of the resource data protobuf needs a + `BoundReferences []*pbresource.Reference` field. + +2. Use a `*resource.BoundReferenceCollector` to capture any resource during + `Reconcile` that directly contributes to the final output resource data + payload. + +3. Call `brc.List()` on the above and set it to the `BoundReferences` field on + the computed resource before persisting. + +4. Use `indexers.BoundRefsIndex` to index this field on the primary type of the + controller. + +5. Create `boundRefsMapper := dependency.CacheListMapper(ZZZ, boundRefsIndex.Name())` + +6. For each watched type, wrap its DependencyMapper with + `dependency.MultiMapper(boundRefsMapper, ZZZ)` + +7. That's it. + +This will cause each reconcile to index the prior list of inputs and augment +the results of future mapper events with historical references. + ### Custom Watches In some cases, we may want to trigger reconciles for events that aren't generated from CRUD operations on resources, for example diff --git a/internal/catalog/exports.go b/internal/catalog/exports.go index 4d8f69f670..322df4adcd 100644 --- a/internal/catalog/exports.go +++ b/internal/catalog/exports.go @@ -96,6 +96,10 @@ func ValidateSelector(sel *pbcatalog.WorkloadSelector, allowEmpty bool) error { return types.ValidateSelector(sel, allowEmpty) } +func ValidatePortName(id string) error { + return types.ValidatePortName(id) +} + func ValidateServicePortID(id string) error { return types.ValidateServicePortID(id) } diff --git a/internal/controller/controller.go b/internal/controller/controller.go index 4515b5d686..8d4297e00c 100644 --- a/internal/controller/controller.go +++ b/internal/controller/controller.go @@ -9,11 +9,12 @@ import ( "strings" "time" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/consul/internal/controller/cache" "github.com/hashicorp/consul/internal/controller/cache/index" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/go-hclog" ) // DependencyMapper is called when a dependency watched via WithWatch is changed @@ -189,6 +190,27 @@ func (ctl *Controller) buildCache() cache.Cache { return c } +// dryRunMapper will trigger the appropriate DependencyMapper for an update of +// the provided type and return the requested reconciles. +// +// This is mainly to be used by the TestController. +func (ctl *Controller) dryRunMapper( + ctx context.Context, + rt Runtime, + res *pbresource.Resource, +) ([]Request, error) { + if resource.EqualType(ctl.managedTypeWatch.watchedType, res.Id.Type) { + return nil, nil // no-op + } + + for _, w := range ctl.watches { + if resource.EqualType(w.watchedType, res.Id.Type) { + return w.mapper(ctx, rt, res) + } + } + return nil, fmt.Errorf("no mapper for type: %s", resource.TypeToString(res.Id.Type)) +} + // String returns a textual description of the controller, useful for debugging. func (ctl *Controller) String() string { watchedTypes := make([]string, 0, len(ctl.watches)) diff --git a/internal/controller/dependency/simple.go b/internal/controller/dependency/simple.go index b154487e29..93284ac5df 100644 --- a/internal/controller/dependency/simple.go +++ b/internal/controller/dependency/simple.go @@ -6,6 +6,8 @@ package dependency import ( "context" + "google.golang.org/protobuf/proto" + "github.com/hashicorp/consul/internal/controller" "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/proto-public/pbresource" @@ -52,3 +54,15 @@ func ReplaceType(desiredType *pbresource.Type) controller.DependencyMapper { }, nil } } + +type DecodedDependencyMapper[T proto.Message] func(context.Context, controller.Runtime, *resource.DecodedResource[T]) ([]controller.Request, error) + +func MapDecoded[T proto.Message](mapper DecodedDependencyMapper[T]) controller.DependencyMapper { + return func(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) { + decoded, err := resource.Decode[T](res) + if err != nil { + return nil, err + } + return mapper(ctx, rt, decoded) + } +} diff --git a/internal/controller/dependency/simple_test.go b/internal/controller/dependency/simple_test.go index c5c260d4a1..b5a671a5cb 100644 --- a/internal/controller/dependency/simple_test.go +++ b/internal/controller/dependency/simple_test.go @@ -8,10 +8,15 @@ import ( "testing" "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/consul/internal/controller" - "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/consul/proto/private/prototest" "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/resource/resourcetest" + "github.com/hashicorp/consul/proto-public/pbresource" + pbdemo "github.com/hashicorp/consul/proto/private/pbdemo/v1" + "github.com/hashicorp/consul/proto/private/prototest" + "github.com/hashicorp/consul/sdk/testutil" ) func resourceID(group string, version string, kind string, name string) *pbresource.ID { @@ -137,3 +142,51 @@ func TestReplaceType(t *testing.T) { } prototest.AssertDeepEqual(t, expected, reqs[0].ID) } + +func TestMapDecoded(t *testing.T) { + mapper := MapDecoded[*pbdemo.Artist](func(_ context.Context, _ controller.Runtime, res *resource.DecodedResource[*pbdemo.Artist]) ([]controller.Request, error) { + return []controller.Request{ + { + ID: &pbresource.ID{ + Type: res.Id.Type, + Tenancy: res.Id.Tenancy, + // not realistic for how the Artist's Name is intended but we just want to pull + // some data out of the decoded portion and return it. + Name: res.Data.Name, + }, + }, + }, nil + }) + + for _, tenancy := range resourcetest.TestTenancies() { + t.Run(resourcetest.AppendTenancyInfo(t.Name(), tenancy), func(t *testing.T) { + ctx := testutil.TestContext(t) + + res1 := resourcetest.Resource(pbdemo.ArtistType, "foo"). + WithTenancy(tenancy). + WithData(t, &pbdemo.Artist{Name: "something"}). + Build() + + res2 := resourcetest.Resource(pbdemo.ArtistType, "foo"). + WithTenancy(tenancy). + // Wrong data type here to force an error in the outer decoder + WithData(t, &pbdemo.Album{Name: "else"}). + Build() + + reqs, err := mapper(ctx, controller.Runtime{}, res1) + require.NoError(t, err) + require.Len(t, reqs, 1) + + expected := &pbresource.ID{ + Type: res1.Id.Type, + Tenancy: res1.Id.Tenancy, + Name: "something", + } + prototest.AssertDeepEqual(t, expected, reqs[0].ID) + + reqs, err = mapper(ctx, controller.Runtime{}, res2) + require.Nil(t, reqs) + require.Error(t, err) + }) + } +} diff --git a/internal/controller/testing.go b/internal/controller/testing.go index 3b8ecac61f..1f7fb9ce34 100644 --- a/internal/controller/testing.go +++ b/internal/controller/testing.go @@ -6,9 +6,10 @@ package controller import ( "context" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/consul/internal/controller/cache" "github.com/hashicorp/consul/proto-public/pbresource" - "github.com/hashicorp/go-hclog" ) // TestController is most useful when writing unit tests for a controller where @@ -66,3 +67,13 @@ func (tc *TestController) Runtime() Runtime { Cache: tc.cache, } } + +// DryRunMapper will trigger the appropriate DependencyMapper for an update of +// the provided type and return the requested reconciles. +// +// Useful for testing just the DependencyMapper+Cache interactions for chains +// that are more complicated than just a full controller interaction test would +// be able to easily verify. +func (tc *TestController) DryRunMapper(ctx context.Context, res *pbresource.Resource) ([]Request, error) { + return tc.c.dryRunMapper(ctx, tc.Runtime(), res) +} diff --git a/internal/mesh/internal/controllers/implicitdestinations/auth_helper_test.go b/internal/mesh/internal/controllers/implicitdestinations/auth_helper_test.go new file mode 100644 index 0000000000..50ca93e6d6 --- /dev/null +++ b/internal/mesh/internal/controllers/implicitdestinations/auth_helper_test.go @@ -0,0 +1,88 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package implicitdestinations + +import ( + "strings" + "testing" + + "github.com/oklog/ulid/v2" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/internal/auth" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/resource/resourcetest" + rtest "github.com/hashicorp/consul/internal/resource/resourcetest" + pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +// TODO: do this properly and export it from internal/auth/exports.go +// This is a crude approximation suitable for this test. +func ReconcileComputedTrafficPermissions( + t *testing.T, + client *rtest.Client, + id *pbresource.ID, + tpList ...*pbauth.TrafficPermissions, +) *types.DecodedComputedTrafficPermissions { + // TODO: allow this to take a nil client and still execute all of the proper validations etc. + + require.True(t, resource.EqualType(pbauth.ComputedTrafficPermissionsType, id.GetType())) + + registry := resource.NewRegistry() + auth.RegisterTypes(registry) + + merged := &pbauth.ComputedTrafficPermissions{} + added := false + for _, tp := range tpList { + name := strings.ToLower(ulid.Make().String()) + + // Default to request aligned. + if tp.Destination == nil { + tp.Destination = &pbauth.Destination{} + } + if tp.Destination.IdentityName == "" { + tp.Destination.IdentityName = id.Name + } + require.Equal(t, id.Name, tp.Destination.IdentityName) + + res := rtest.Resource(pbauth.TrafficPermissionsType, name). + WithTenancy(id.Tenancy). + WithData(t, tp). + Build() + resourcetest.ValidateAndNormalize(t, registry, res) + + dec := rtest.MustDecode[*pbauth.TrafficPermissions](t, res) + + added = true + + switch dec.Data.Action { + case pbauth.Action_ACTION_ALLOW: + merged.AllowPermissions = append(merged.AllowPermissions, dec.Data.Permissions...) + case pbauth.Action_ACTION_DENY: + merged.DenyPermissions = append(merged.DenyPermissions, dec.Data.Permissions...) + default: + t.Fatalf("Unexpected action: %v", dec.Data.Action) + } + } + + if !added { + merged.IsDefault = true + } + + var res *pbresource.Resource + if client != nil { + res = rtest.ResourceID(id). + WithData(t, merged). + Write(t, client) + } else { + res = rtest.ResourceID(id). + WithData(t, merged). + Build() + resourcetest.ValidateAndNormalize(t, registry, res) + } + + return rtest.MustDecode[*pbauth.ComputedTrafficPermissions](t, res) +} diff --git a/internal/mesh/internal/controllers/implicitdestinations/controller.go b/internal/mesh/internal/controllers/implicitdestinations/controller.go new file mode 100644 index 0000000000..0c5016fb80 --- /dev/null +++ b/internal/mesh/internal/controllers/implicitdestinations/controller.go @@ -0,0 +1,314 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package implicitdestinations + +import ( + "context" + "sort" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/controller/cache" + "github.com/hashicorp/consul/internal/controller/cache/index" + "github.com/hashicorp/consul/internal/controller/dependency" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/storage" + pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +// Future work: this can be optimized to omit: +// +// - destinations denied due to DENY TP +// - only ports exposed by CR or CTP explicitly + +/* +Data Relationships: + +Reconcile: +- read WI[source] (ignore) +- list CTPs by WI[source] + - turn CTP.id -> WI[backend].id + - list SVCs by WI[backend] + - list CRs by SVC[backend] + - turn CR.id -> SVC[dest].id + - emit SVC[dest] + +DepMappers: +- CR SVC[backend] WI[backend] CTP WI[source] CID +- SVC[backend] WI[backend] CTP WI[source] CID +- CTP WI[source] CID +- bound refs for all + +*/ + +func Controller(globalDefaultAllow bool) *controller.Controller { + m := &mapAndTransformer{globalDefaultAllow: globalDefaultAllow} + + boundRefsMapper := dependency.CacheListMapper(pbmesh.ComputedImplicitDestinationsType, boundRefsIndex.Name()) + + return controller.NewController(ControllerID, + pbmesh.ComputedImplicitDestinationsType, + boundRefsIndex, + ). + WithWatch(pbauth.WorkloadIdentityType, + // BoundRefs: none + dependency.ReplaceType(pbmesh.ComputedImplicitDestinationsType), + ). + WithWatch(pbauth.ComputedTrafficPermissionsType, + // BoundRefs: the WI source refs are interior up-pointers and may change. + dependency.MultiMapper(boundRefsMapper, m.MapComputedTrafficPermissions), + ctpBySourceWorkloadIdentityIndex, + ctpByWildcardSourceIndexCreator(globalDefaultAllow), + ). + WithWatch(pbcatalog.ServiceType, + // BoundRefs: the WI slice in the status conds is an interior up-pointer and may change. + dependency.MultiMapper(boundRefsMapper, m.MapService), + serviceByWorkloadIdentityIndex, + ). + WithWatch(pbmesh.ComputedRoutesType, + // BoundRefs: the backend services are interior up-pointers and may change. + dependency.MultiMapper(boundRefsMapper, m.MapComputedRoutes), + computedRoutesByBackendServiceIndex, + ). + WithReconciler(&reconciler{ + defaultAllow: globalDefaultAllow, + }) +} + +type reconciler struct { + defaultAllow bool +} + +func (r *reconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error { + rt.Logger = rt.Logger.With("resource-id", req.ID, "controller", ControllerID) + + wi := resource.ReplaceType(pbauth.WorkloadIdentityType, req.ID) + + workloadIdentity, err := cache.GetDecoded[*pbauth.WorkloadIdentity](rt.Cache, pbauth.WorkloadIdentityType, "id", wi) + if err != nil { + rt.Logger.Error("error retrieving corresponding Workload Identity", "error", err) + return err + } else if workloadIdentity == nil { + rt.Logger.Trace("workload identity has been deleted") + return nil + } + + // generate new CID and compare, if different, write new one, if not return without doing anything + newData, err := r.generateComputedImplicitDestinations(rt, wi) + if err != nil { + rt.Logger.Error("error generating computed implicit destinations", "error", err) + // TODO: update the workload identity with this error as a status condition? + return err + } + + oldData, err := resource.GetDecodedResource[*pbmesh.ComputedImplicitDestinations](ctx, rt.Client, req.ID) + if err != nil { + rt.Logger.Error("error retrieving computed implicit destinations", "error", err) + return err + } + if oldData != nil && proto.Equal(oldData.Data, newData) { + rt.Logger.Trace("computed implicit destinations have not changed") + // there are no changes, and we can return early + return nil + } + rt.Logger.Trace("computed implicit destinations have changed") + + newCID, err := anypb.New(newData) + if err != nil { + rt.Logger.Error("error marshalling implicit destination data", "error", err) + return err + } + rt.Logger.Trace("writing computed implicit destinations") + + _, err = rt.Client.Write(ctx, &pbresource.WriteRequest{ + Resource: &pbresource.Resource{ + Id: req.ID, + Data: newCID, + Owner: workloadIdentity.Resource.Id, + }, + }) + if err != nil { + rt.Logger.Error("error writing new computed implicit destinations", "error", err) + return err + } + rt.Logger.Trace("new computed implicit destinations were successfully written") + + return nil +} + +// generateComputedImplicitDestinations will use all associated Traffic Permissions to create new ComputedImplicitDestinations data +func (r *reconciler) generateComputedImplicitDestinations(rt controller.Runtime, cid *pbresource.ID) (*pbmesh.ComputedImplicitDestinations, error) { + wiID := resource.ReplaceType(pbauth.WorkloadIdentityType, cid) + + // Summary: list CTPs by WI[source] + ctps, err := rt.Cache.List( + pbauth.ComputedTrafficPermissionsType, + ctpBySourceWorkloadIdentityIndex.Name(), + wiID, + ) + if err != nil { + return nil, err + } + + // This covers a foo.bar.* wildcard. + wildNameCTPs, err := rt.Cache.List( + pbauth.ComputedTrafficPermissionsType, + ctpByWildcardSourceIndexName, + tenantedName{ + Partition: wiID.GetTenancy().GetPartition(), + Namespace: wiID.GetTenancy().GetNamespace(), + Name: storage.Wildcard, + }, + ) + if err != nil { + return nil, err + } + ctps = append(ctps, wildNameCTPs...) + + // This covers a foo.*.* wildcard. + wildNamespaceCTPs, err := rt.Cache.List( + pbauth.ComputedTrafficPermissionsType, + ctpByWildcardSourceIndexName, + tenantedName{ + Partition: wiID.GetTenancy().GetPartition(), + Namespace: storage.Wildcard, + Name: storage.Wildcard, + }, + ) + if err != nil { + return nil, err + } + ctps = append(ctps, wildNamespaceCTPs...) + + // This covers the default-allow + default-CTP option. + wildPartitionCTPs, err := rt.Cache.List( + pbauth.ComputedTrafficPermissionsType, + ctpByWildcardSourceIndexName, + tenantedName{ + Partition: storage.Wildcard, + Namespace: storage.Wildcard, + Name: storage.Wildcard, + }, + ) + if err != nil { + return nil, err + } + ctps = append(ctps, wildPartitionCTPs...) + + var ( + out = &pbmesh.ComputedImplicitDestinations{} + seenDest = make(map[resource.ReferenceKey]struct{}) + boundRefCollector = resource.NewBoundReferenceCollector() + ) + for _, ctp := range ctps { + // CTP is name aligned with WI[backend]. + backendWorkloadID := resource.ReplaceType(pbauth.WorkloadIdentityType, ctp.Id) + + // Find all services that can reach this WI. + svcList, err := cache.ListDecoded[*pbcatalog.Service]( + rt.Cache, + pbcatalog.ServiceType, + serviceByWorkloadIdentityIndex.Name(), + backendWorkloadID, + ) + if err != nil { + return nil, err + } + + for _, svc := range svcList { + // Find all computed routes that have at least one backend target of this service. + crList, err := rt.Cache.List( + pbmesh.ComputedRoutesType, + computedRoutesByBackendServiceIndex.Name(), + svc.Id, + ) + if err != nil { + return nil, err + } + + // These are name-aligned with the service name that should go + // directly into the implicit destination list. + for _, cr := range crList { + implDestSvcRef := resource.ReplaceType(pbcatalog.ServiceType, cr.Id) + + rk := resource.NewReferenceKey(implDestSvcRef) + if _, seen := seenDest[rk]; seen { + continue + } + + // TODO: populate just the ports allowed by the underlying TPs. + implDest := &pbmesh.ImplicitDestination{ + DestinationRef: resource.Reference(implDestSvcRef, ""), + } + + implDestSvc, err := cache.GetDecoded[*pbcatalog.Service](rt.Cache, pbcatalog.ServiceType, "id", implDestSvcRef) + if err != nil { + return nil, err + } else if implDestSvc == nil { + continue // skip + } + + inMesh := false + for _, port := range implDestSvc.Data.Ports { + if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH { + inMesh = true + continue // skip + } + implDest.DestinationPorts = append(implDest.DestinationPorts, port.TargetPort) + } + if !inMesh { + continue // skip + } + + // Add entire bound-ref lineage at once, since they're only + // bound if they materially affect the computed resource. + boundRefCollector.AddRefOrID(ctp.Id) + boundRefCollector.AddRefOrID(svc.Id) + boundRefCollector.AddRefOrID(cr.Id) + boundRefCollector.AddRefOrID(implDestSvcRef) + + sort.Strings(implDest.DestinationPorts) + + out.Destinations = append(out.Destinations, implDest) + seenDest[rk] = struct{}{} + } + } + } + + // Ensure determinstic sort so we don't get into infinite-reconcile + sort.Slice(out.Destinations, func(i, j int) bool { + a, b := out.Destinations[i], out.Destinations[j] + return resource.LessReference(a.DestinationRef, b.DestinationRef) + }) + + out.BoundReferences = boundRefCollector.List() + + return out, nil +} + +func listAllWorkloadIdentities( + cache cache.ReadOnlyCache, + tenancy *pbresource.Tenancy, +) ([]*pbresource.Reference, error) { + // This is the same logic used by the sidecar controller to interpret CTPs. Here we + // carry it to its logical conclusion and simply include all possible identities. + iter, err := cache.ListIterator(pbauth.WorkloadIdentityType, "id", &pbresource.Reference{ + Type: pbauth.WorkloadIdentityType, + Tenancy: tenancy, + }, index.IndexQueryOptions{Prefix: true}) + if err != nil { + return nil, err + } + + var out []*pbresource.Reference + for res := iter.Next(); res != nil; res = iter.Next() { + out = append(out, resource.Reference(res.Id, "")) + } + return out, nil +} diff --git a/internal/mesh/internal/controllers/implicitdestinations/controller_test.go b/internal/mesh/internal/controllers/implicitdestinations/controller_test.go new file mode 100644 index 0000000000..60f3e333ce --- /dev/null +++ b/internal/mesh/internal/controllers/implicitdestinations/controller_test.go @@ -0,0 +1,1573 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package implicitdestinations + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing" + "github.com/hashicorp/consul/internal/auth" + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/controller/controllertest" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/routestest" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/resource/resourcetest" + rtest "github.com/hashicorp/consul/internal/resource/resourcetest" + pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/proto/private/prototest" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/consul/version/versiontest" +) + +type controllerSuite struct { + suite.Suite + + ctx context.Context + client *rtest.Client + rt controller.Runtime + + ctl *controller.TestController + tenancies []*pbresource.Tenancy + isEnterprise bool + + // Variants of the above for the few tests that need to verify + // default-allow mode. + rtDefaultAllow controller.Runtime + ctlDefaultAllow *controller.TestController +} + +func (suite *controllerSuite) SetupTest() { + suite.isEnterprise = versiontest.IsEnterprise() + suite.tenancies = resourcetest.TestTenancies() + registerTenancies := resourcetest.TestTenancies() + if suite.isEnterprise { + registerTenancies = append(registerTenancies, + rtest.Tenancy("wild.aaa"), + rtest.Tenancy("wild.bbb"), + rtest.Tenancy("default.fixed"), + ) + } + + suite.ctx = testutil.TestContext(suite.T()) + client := svctest.NewResourceServiceBuilder(). + WithRegisterFns(types.Register, catalog.RegisterTypes, auth.RegisterTypes). + WithTenancies(registerTenancies...). + Run(suite.T()) + + // The normal one we do most tests with is default-deny. + suite.ctl = controller.NewTestController(Controller(false), client). + WithLogger(testutil.Logger(suite.T())) + suite.rt = suite.ctl.Runtime() + suite.client = rtest.NewClient(suite.rt.Client) + + // Also create one for default-allow. (we pass the derived caching client + // from the first TestController in here so we can get both sets of caches + // to update in unison. + suite.ctlDefaultAllow = controller.NewTestController(Controller(true), suite.client). + WithLogger(testutil.Logger(suite.T())) + suite.rtDefaultAllow = suite.ctlDefaultAllow.Runtime() + // One true client. + suite.client = rtest.NewClient(suite.rtDefaultAllow.Client) +} + +func (suite *controllerSuite) requireCID(resource *pbresource.Resource, expected *pbmesh.ComputedImplicitDestinations) { + suite.T().Helper() + dec := rtest.MustDecode[*pbmesh.ComputedImplicitDestinations](suite.T(), resource) + prototest.AssertDeepEqual(suite.T(), expected, dec.Data) +} + +func (suite *controllerSuite) createWorkloadIdentities(names []string, tenancy *pbresource.Tenancy) []*pbresource.Resource { + return createWorkloadIdentities(suite.T(), suite.client, names, tenancy) +} + +func createWorkloadIdentities( + t *testing.T, + client *rtest.Client, + names []string, + tenancy *pbresource.Tenancy, +) []*pbresource.Resource { + var rs []*pbresource.Resource + for _, n := range names { + r := rtest.Resource(pbauth.WorkloadIdentityType, n). + WithTenancy(tenancy). + Write(t, client) + rs = append(rs, r) + } + return rs +} + +// TODO: have the CTP controller export an in-mem reconcile function like the routestest package +// that would help with the jumble of mutate+validate of TP + assembly into CTP +func (suite *controllerSuite) createTrafficPermissions( + names []string, + defaults []string, + tenancy *pbresource.Tenancy, +) { + suite.T().Helper() + var ( + destinationName string + sources []*pbauth.Source + + tpByDest = make(map[string][]*pbauth.TrafficPermissions) + ) + + for _, n := range names { + switch n { + case "d-wi1-s-wi2": + destinationName = "wi1" + sources = []*pbauth.Source{{ + IdentityName: "wi2", + }} + case "d-wi1-s-wi3": + destinationName = "wi1" + sources = []*pbauth.Source{{ + IdentityName: "wi3", + }} + case "d-wi2-s-wi1": + destinationName = "wi2" + sources = []*pbauth.Source{{ + IdentityName: "wi1", + }} + case "d-wi2-s-wi3": + destinationName = "wi2" + sources = []*pbauth.Source{{ + IdentityName: "wi3", + }} + case "d-wi4-s-wi5": + destinationName = "wi4" + sources = []*pbauth.Source{{ + IdentityName: "wi5", + }} + case "d-wi3-s-wild-name": + destinationName = "wi3" + sources = []*pbauth.Source{{ + IdentityName: "", + }} + default: + suite.T().Fatalf("unknown type of workload identity template: %s", n) + } + + // Write it just so we get the mutate+validate part + tp0 := rtest.Resource(pbauth.TrafficPermissionsType, "ignore"). + WithData(suite.T(), &pbauth.TrafficPermissions{ + Destination: &pbauth.Destination{ + IdentityName: destinationName, + }, + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: sources, + }}, + }). + WithTenancy(tenancy). + Build() + + decTP0 := rtest.MustDecode[*pbauth.TrafficPermissions](suite.T(), tp0) + + tpByDest[destinationName] = append(tpByDest[destinationName], decTP0.Data) + } + + // Insert just enough to get the default one made for free. + for _, n := range defaults { + some := tpByDest[n] + require.Empty(suite.T(), some) + tpByDest[n] = nil + } + + for destinationName, tpList := range tpByDest { + id := rtest.Resource(pbauth.ComputedTrafficPermissionsType, destinationName). + WithTenancy(tenancy). + ID() + + ReconcileComputedTrafficPermissions( + suite.T(), + suite.client, + id, + tpList..., + ) + } +} + +type serviceFixture struct { + *pbresource.Resource + StatusUpdate func() *pbresource.Resource +} + +func (suite *controllerSuite) createServices(names []string, tenancy *pbresource.Tenancy, withWIDs bool) []*serviceFixture { + return createServices(suite.T(), suite.client, names, tenancy, withWIDs) +} + +func createServices( + t *testing.T, + client *rtest.Client, + names []string, + tenancy *pbresource.Tenancy, + withWIDs bool, +) []*serviceFixture { + var rs []*serviceFixture + for _, n := range names { + var ( + workloads []string + ids []string + ) + switch n { + case "s1": + workloads = []string{"w1"} + ids = []string{"wi1"} + case "s2": + workloads = []string{"w2"} + ids = []string{"wi2"} + case "s3": + workloads = []string{"w3"} + ids = []string{"wi3"} + case "s4": + workloads = []string{"w4"} + ids = []string{"wi4"} + case "s5": + workloads = []string{"w5"} + ids = []string{"wi5"} + case "s1-2": + workloads = []string{"w1", "w2"} + ids = []string{"wi1", "wi2"} + case "s11-2": + workloads = []string{"w1-1", "w2"} + ids = []string{"wi1", "wi2"} + } + + // TODO: export this helper from the catalog package for testing. + var status *pbresource.Status + if withWIDs { + status = &pbresource.Status{ + Conditions: []*pbresource.Condition{{ + Type: catalog.StatusConditionBoundIdentities, + State: pbresource.Condition_STATE_TRUE, + Message: strings.Join(ids, ","), + }}, + } + } else { + status = &pbresource.Status{ + Conditions: []*pbresource.Condition{{ + Type: catalog.StatusConditionBoundIdentities, + State: pbresource.Condition_STATE_TRUE, + Message: "", + }}, + } + } + r := rtest.Resource(pbcatalog.ServiceType, n). + WithData(t, &pbcatalog.Service{ + Workloads: &pbcatalog.WorkloadSelector{ + Names: workloads, + }, + Ports: []*pbcatalog.ServicePort{ + {TargetPort: "mesh", Protocol: pbcatalog.Protocol_PROTOCOL_MESH}, + {TargetPort: "grpc", Protocol: pbcatalog.Protocol_PROTOCOL_GRPC}, + }, + }). + WithTenancy(tenancy). + WithStatus(catalog.EndpointsStatusKey, status). + Write(t, client) + + var statusUpdate = func() *pbresource.Resource { return r } + if !withWIDs { + statusUpdate = func() *pbresource.Resource { + ctx := client.Context(t) + + status := &pbresource.Status{ + ObservedGeneration: r.Generation, + Conditions: []*pbresource.Condition{{ + Type: catalog.StatusConditionBoundIdentities, + State: pbresource.Condition_STATE_TRUE, + Message: strings.Join(ids, ","), + }}, + } + resp, err := client.WriteStatus(ctx, &pbresource.WriteStatusRequest{ + Id: r.Id, + Key: catalog.EndpointsStatusKey, + Status: status, + }) + require.NoError(t, err) + return resp.Resource + } + } + + sf := &serviceFixture{ + Resource: r, + StatusUpdate: statusUpdate, + } + + rs = append(rs, sf) + } + return rs +} + +func (suite *controllerSuite) createComputedRoutes(svc *pbresource.Resource, decResList ...any) *types.DecodedComputedRoutes { + return createComputedRoutes(suite.T(), suite.client, svc, decResList...) +} + +func createComputedRoutes(t *testing.T, client *rtest.Client, svc *pbresource.Resource, decResList ...any) *types.DecodedComputedRoutes { + resList := make([]any, 0, len(decResList)+1) + resList = append(resList, + resourcetest.MustDecode[*pbcatalog.Service](t, svc), + ) + resList = append(resList, decResList...) + crID := resource.ReplaceType(pbmesh.ComputedRoutesType, svc.Id) + cr := routestest.ReconcileComputedRoutes(t, client, crID, resList...) + require.NotNil(t, cr) + return cr +} + +func (suite *controllerSuite) TestReconcile_CIDCreate_NoReferencingTrafficPermissionsExist() { + suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { + wi := rtest.Resource(pbauth.WorkloadIdentityType, "wi1"). + WithTenancy(tenancy). + Write(suite.T(), suite.client) + + id := rtest.Resource(pbmesh.ComputedImplicitDestinationsType, wi.Id.Name). + WithTenancy(tenancy). + ID() + + suite.reconcileOnce(id) + + // Ensure that the CID was created + cid := suite.client.RequireResourceExists(suite.T(), id) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{}, + BoundReferences: nil, + }) + }) +} + +func (suite *controllerSuite) TestReconcile_CIDCreate_ReferencingResourcesExist() { + suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { + suite.createTrafficPermissions([]string{"d-wi1-s-wi2"}, []string{"wi2"}, tenancy) + resID := &pbresource.ID{ + Name: "wi2", + Type: pbmesh.ComputedImplicitDestinationsType, + Tenancy: tenancy, + } + // create the workload identity for the source + wi := suite.createWorkloadIdentities([]string{"wi1", "wi2"}, tenancy) + svc := suite.createServices([]string{"s1-2"}, tenancy, true) + + // Write a default ComputedRoutes for s1-2. + cr := suite.createComputedRoutes(svc[0].Resource) + + ctpID := resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi[0].Id) + + suite.reconcileOnce(resID) + + // Ensure that the CID was created + cid := suite.client.RequireResourceExists(suite.T(), resID) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{{ + DestinationRef: refFromID(svc[0].Id), + DestinationPorts: []string{"grpc"}, + }}, + BoundReferences: []*pbresource.Reference{ + refFromID(ctpID), + refFromID(svc[0].Id), + refFromID(cr.Id), + }, + }) + rtest.RequireOwner(suite.T(), cid, wi[1].Id, true) + }) +} + +const ( + omitCTP = "computed-traffic-permissions" + omitWorkloadIdentity = "workload-identity" + omitService = "service" + omitWIOnService = "wi-on-service" + omitComputedRoutes = "computed-routes" +) + +func (suite *controllerSuite) TestReconcile_CIDCreate_IncrementalConstruction_ComputedTrafficPermissions() { + suite.testReconcile_CIDCreate_IncrementalConstruction(omitCTP) +} + +func (suite *controllerSuite) TestReconcile_CIDCreate_IncrementalConstruction_WorkloadIdentity() { + suite.testReconcile_CIDCreate_IncrementalConstruction(omitWorkloadIdentity) +} + +func (suite *controllerSuite) TestReconcile_CIDCreate_IncrementalConstruction_Service() { + suite.testReconcile_CIDCreate_IncrementalConstruction(omitService) +} + +func (suite *controllerSuite) TestReconcile_CIDCreate_IncrementalConstruction_WorkloadIdentitiesOnService() { + suite.testReconcile_CIDCreate_IncrementalConstruction(omitWIOnService) +} + +func (suite *controllerSuite) TestReconcile_CIDCreate_IncrementalConstruction_ComputedRoutes() { + suite.testReconcile_CIDCreate_IncrementalConstruction(omitComputedRoutes) +} + +func (suite *controllerSuite) testReconcile_CIDCreate_IncrementalConstruction(omit string) { + // There are 5 major ingredients to assemble a CID that this test machinery cares about: + // + // - Workload Identities + // - Computed Traffic Permissions + // - Services + // - Workload Identity data-bearing status cond on Services + // - Computed Routes + // + // For each of these possible ingredients, we execute the test in 4 chunks: + // + // 1. Build everything *except* one ingredient. + // 2. Reconcile and assert what the CID should look like without it. + // 3. Build the omitted ingredient. + // 4. Reconcile and assert that the CID looks the same in all cases. + // + // NOTEs: + // + // - CRs are owned by Services, so skipping the services will also skip the CRs. + // - status conds live on the service, so late-adding them is different + // than creating them the first time. + suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { + resID := &pbresource.ID{ + Name: "wi2", + Type: pbmesh.ComputedImplicitDestinationsType, + Tenancy: tenancy, + } + + var ( + wi []*pbresource.Resource + svc []*serviceFixture + cr *types.DecodedComputedRoutes + ) + + if omit != omitWorkloadIdentity { + wi = suite.createWorkloadIdentities([]string{"wi1", "wi2"}, tenancy) + } + if omit != omitCTP { + suite.createTrafficPermissions([]string{"d-wi1-s-wi2"}, []string{"wi2"}, tenancy) + } + if omit != omitService { + svc = suite.createServices([]string{"s1-2"}, tenancy, (omit != omitWIOnService)) + if omit != omitComputedRoutes { + // Write a default ComputedRoutes for s1-2. + cr = suite.createComputedRoutes(svc[0].Resource) + } + } + + // Reconcile the first time with one omission. + suite.reconcileOnce(resID) + + switch omit { + case omitWorkloadIdentity: + // Ensure that no CID was created + suite.client.RequireResourceNotFound(suite.T(), resID) + case omitCTP: + // no bound resources at all + cid := suite.client.RequireResourceExists(suite.T(), resID) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{}) + rtest.RequireOwner(suite.T(), cid, wi[1].Id, true) + case omitService, omitWIOnService: + // no linking workloads, no implicit destinations + cid := suite.client.RequireResourceExists(suite.T(), resID) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{}) + rtest.RequireOwner(suite.T(), cid, wi[1].Id, true) + case omitComputedRoutes: + // no linking workloads, no implicit destinations + cid := suite.client.RequireResourceExists(suite.T(), resID) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{}) + rtest.RequireOwner(suite.T(), cid, wi[1].Id, true) + default: + suite.T().Fatalf("omit=%s case not handled yet", omit) + } + + // Create WI and ensure CID is created + switch omit { + case omitWorkloadIdentity: + wi = suite.createWorkloadIdentities([]string{"wi1", "wi2"}, tenancy) + case omitCTP: + suite.createTrafficPermissions([]string{"d-wi1-s-wi2"}, []string{"wi2"}, tenancy) + case omitService: + svc = suite.createServices([]string{"s1-2"}, tenancy, true) + // Write a default ComputedRoutes for s1-2. + cr = suite.createComputedRoutes(svc[0].Resource) + case omitWIOnService: + // update the special bound WI status cond after the fact + svc[0].StatusUpdate() + case omitComputedRoutes: + // Write a default ComputedRoutes for s1-2. + cr = suite.createComputedRoutes(svc[0].Resource) + default: + suite.T().Fatalf("omit=%s case not handled yet", omit) + } + + suite.reconcileOnce(resID) + + ctpID := resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi[0].Id) + + cid := suite.client.RequireResourceExists(suite.T(), resID) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{{ + DestinationRef: refFromID(svc[0].Id), + DestinationPorts: []string{"grpc"}, + }}, + BoundReferences: []*pbresource.Reference{ + refFromID(ctpID), + refFromID(svc[0].Id), + refFromID(cr.Id), + }, + }) + rtest.RequireOwner(suite.T(), cid, wi[1].Id, true) + }) +} + +func (suite *controllerSuite) TestReconcile_CIDUpdate_Multiple_Workloads_Services_TrafficPermissions() { + suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { + wi := suite.createWorkloadIdentities([]string{ + "wi1", "wi2", "wi3", "wi4", "wi5", + }, tenancy) + + svc := suite.createServices([]string{ + "s1", + "s2", + "s3", + "s4", + "s1-2", + "s11-2", + }, tenancy, true) + + suite.createTrafficPermissions([]string{ + "d-wi1-s-wi3", + "d-wi2-s-wi3", + "d-wi1-s-wi2", + "d-wi4-s-wi5", + }, []string{"wi3", "wi5"}, tenancy) + + var ( + wi1 = wi[0] + wi2 = wi[1] + wi3 = wi[2] + wi4 = wi[3] + wi5 = wi[4] + + svc1 = svc[0] + svc2 = svc[1] + svc3 = svc[2] + svc4 = svc[3] + svc1_2 = svc[4] + svc11_2 = svc[5] + ) + + var ( + cr1 = suite.createComputedRoutes(svc1.Resource) + cr2 = suite.createComputedRoutes(svc2.Resource) + cr3 = suite.createComputedRoutes(svc3.Resource, + resourcetest.MustDecode[*pbmesh.GRPCRoute](suite.T(), rtest.Resource(pbmesh.GRPCRouteType, "grpc-route"). + WithTenancy(tenancy). + WithData(suite.T(), &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{{ + Ref: refFromID(svc3.Id), + }}, + Rules: []*pbmesh.GRPCRouteRule{{ + BackendRefs: []*pbmesh.GRPCBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: refFromID(svc3.Id), + }, + Weight: 50, + }, + { + BackendRef: &pbmesh.BackendReference{ + Ref: refFromID(svc4.Id), + }, + Weight: 50, + }, + }, + }}, + }). + Build()), + resourcetest.MustDecode[*pbcatalog.Service](suite.T(), svc4.Resource), + ) + cr4 = suite.createComputedRoutes(svc4.Resource) + cr1_2 = suite.createComputedRoutes(svc1_2.Resource) + cr11_2 = suite.createComputedRoutes(svc11_2.Resource) + + ctpID1 = resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi1.Id) + ctpID2 = resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi2.Id) + // ctpID3 = resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi3.Id) + ctpID4 = resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi4.Id) + // ctpID5 = resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi5.Id) + + cidWI1 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi1.Id) + cidWI2 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi2.Id) + cidWI3 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi3.Id) + cidWI4 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi4.Id) + cidWI5 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi5.Id) + ) + + suite.reconcileOnce(cidWI1) + suite.reconcileOnce(cidWI2) + suite.reconcileOnce(cidWI3) + suite.reconcileOnce(cidWI4) + suite.reconcileOnce(cidWI5) + + /* + CTPs: + wi3->wi1 + wi3->wi2 + wi2->wi1 + wi5->wi4 + + WIs by SOURCE: + wi1: [] + wi2: [wi1] + wi3: [wi1, wi2] + wi4: [] + wi5: [wi4] + + SVCs: + s1: [wi1] + s2: [wi2] + s3: [wi3] + s4: [wi4] + s1-2: [wi1, wi2] + s11-2: [wi1, wi2] + + CRs: + s1: [s1] + s2: [s2] + s3: [s3, s4] + s4: [s4] + s1-2: [s1-2] + s11-2: [s11-2] + + WIs by SOURCE + SVC+WI: + wi1: [] + wi2: [s1, s1-2, s11-2] + wi3: [s1, s2, s1-2, s11-2] + wi4: [] + wi5: [s4] + + EXPECT CIDs: + + wi1: [] + wi2: [s1, s1-2, s11-2] + wi3: [s1, s2, s1-2, s11-2] + wi4: [] + wi5: [s3, s4] + + */ + + // Ensure that the CIDs were created + suite.Run("cid for wi1", func() { + cid := suite.client.RequireResourceExists(suite.T(), cidWI1) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{}) + rtest.RequireOwner(suite.T(), cid, wi1.Id, true) + }) + + suite.Run("cid for wi2", func() { + cid := suite.client.RequireResourceExists(suite.T(), cidWI2) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + // { s1, refs: wi1 }, { s1-2, refs: wi1 }, { s11-2, refs: wi1 } + {DestinationRef: refFromID(svc1.Id), DestinationPorts: []string{"grpc"}}, + {DestinationRef: refFromID(svc1_2.Id), DestinationPorts: []string{"grpc"}}, + {DestinationRef: refFromID(svc11_2.Id), DestinationPorts: []string{"grpc"}}, + }, + BoundReferences: []*pbresource.Reference{ + refFromID(ctpID1), + + refFromID(svc1.Id), + refFromID(svc1_2.Id), + refFromID(svc11_2.Id), + + refFromID(cr1.Id), + refFromID(cr1_2.Id), + refFromID(cr11_2.Id), + }, + }) + rtest.RequireOwner(suite.T(), cid, wi2.Id, true) + }) + + suite.Run("cid for wi3", func() { + cid := suite.client.RequireResourceExists(suite.T(), cidWI3) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + // { s1, refs: wi1 }, { s2, refs: wi2 }, { s1-2, refs: wi1, wi2 }, { s11-2, refs: wi1, wi2 } + {DestinationRef: refFromID(svc1.Id), DestinationPorts: []string{"grpc"}}, + {DestinationRef: refFromID(svc1_2.Id), DestinationPorts: []string{"grpc"}}, + {DestinationRef: refFromID(svc11_2.Id), DestinationPorts: []string{"grpc"}}, + {DestinationRef: refFromID(svc2.Id), DestinationPorts: []string{"grpc"}}, + }, + BoundReferences: []*pbresource.Reference{ + refFromID(ctpID1), + refFromID(ctpID2), + + refFromID(svc1.Id), + refFromID(svc1_2.Id), + refFromID(svc11_2.Id), + refFromID(svc2.Id), + + refFromID(cr1.Id), + refFromID(cr1_2.Id), + refFromID(cr11_2.Id), + refFromID(cr2.Id), + }, + }) + rtest.RequireOwner(suite.T(), cid, wi3.Id, true) + }) + + suite.Run("cid for wi4", func() { + cid := suite.client.RequireResourceExists(suite.T(), cidWI4) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{}) + rtest.RequireOwner(suite.T(), cid, wi4.Id, true) + }) + + suite.Run("cid for wi5", func() { + cid := suite.client.RequireResourceExists(suite.T(), cidWI5) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + // { s4, refs: wi4 }, { s3, refs: wi3 } + {DestinationRef: refFromID(svc3.Id), DestinationPorts: []string{"grpc"}}, + {DestinationRef: refFromID(svc4.Id), DestinationPorts: []string{"grpc"}}, + }, + BoundReferences: []*pbresource.Reference{ + refFromID(ctpID4), + + refFromID(svc3.Id), + refFromID(svc4.Id), + + refFromID(cr3.Id), + refFromID(cr4.Id), + }, + }) + rtest.RequireOwner(suite.T(), cid, wi5.Id, true) + }) + }) +} + +func (suite *controllerSuite) reconcileOnce(id *pbresource.ID) { + err := suite.ctl.Reconcile(suite.ctx, controller.Request{ID: id}) + require.NoError(suite.T(), err) + suite.T().Cleanup(func() { + suite.client.CleanupDelete(suite.T(), id) + }) +} + +func (suite *controllerSuite) TestReconcile_CIDUpdate_TrafficPermissions_WildcardName() { + if !suite.isEnterprise { + suite.T().Skip("test only applies in enterprise as written") + } + fixedTenancy := rtest.Tenancy("default.fixed") + + suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { + wi := suite.createWorkloadIdentities([]string{ + "wi1", "wi2", "wi3", + }, tenancy) + + wiFixed := suite.createWorkloadIdentities([]string{ + "wi4", "wi5", + }, fixedTenancy) + + svc := suite.createServices([]string{"s1", "s2", "s3"}, tenancy, true) + + var ( + wi1 = wi[0] + wi2 = wi[1] + wi3 = wi[2] + + wiFixed4 = wiFixed[0] + wiFixed5 = wiFixed[1] + + svc1 = svc[0] + svc2 = svc[1] + svc3 = svc[2] + ) + + var ( + _ = suite.createComputedRoutes(svc1.Resource) + _ = suite.createComputedRoutes(svc2.Resource) + cr3 = suite.createComputedRoutes(svc3.Resource) + ) + + // Create some stub CTPs. + suite.createTrafficPermissions([]string{}, []string{"wi1", "wi2"}, tenancy) + suite.createTrafficPermissions([]string{}, []string{"wi4", "wi5"}, fixedTenancy) + + // Create a wildcard "all names in a namespace" TP. + ctpID3 := ReconcileComputedTrafficPermissions( + suite.T(), + suite.client, + resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi3.Id), + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{{ + IdentityName: "", + Namespace: fixedTenancy.Namespace, + Partition: fixedTenancy.Partition, + }}, + }}, + }, + ).Id + + // These DO NOT match the wildcard. + for _, wiID := range []*pbresource.ID{wi1.Id, wi2.Id, wi3.Id} { + cidWI := resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wiID) + + suite.reconcileOnce(cidWI) + + suite.Run("empty cid for "+resource.IDToString(wiID), func() { + cid := suite.client.RequireResourceExists(suite.T(), cidWI) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{}) + rtest.RequireOwner(suite.T(), cid, wiID, true) + }) + } + + // These DO match the wildcard. + for _, wiID := range []*pbresource.ID{wiFixed4.Id, wiFixed5.Id} { + cidWI := resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wiID) + + suite.reconcileOnce(cidWI) + + suite.Run("cid for "+resource.IDToString(wiID), func() { + cid := suite.client.RequireResourceExists(suite.T(), cidWI) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{{ + DestinationRef: refFromID(svc3.Id), + DestinationPorts: []string{"grpc"}, + }}, + BoundReferences: []*pbresource.Reference{ + refFromID(ctpID3), + refFromID(svc3.Id), + refFromID(cr3.Id), + }, + }) + rtest.RequireOwner(suite.T(), cid, wiID, true) + }) + } + }) +} + +func (suite *controllerSuite) TestReconcile_CIDUpdate_TrafficPermissions_WildcardNamespace() { + if !suite.isEnterprise { + suite.T().Skip("test only applies in enterprise as written") + } + + wildTenancy1 := rtest.Tenancy("wild.aaa") + wildTenancy2 := rtest.Tenancy("wild.bbb") + + suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { + wi := suite.createWorkloadIdentities([]string{ + "wi1", "wi2", "wi3", + }, tenancy) + + wiWild4 := suite.createWorkloadIdentities([]string{ + "wi4", + }, wildTenancy1)[0] + + wiWild5 := suite.createWorkloadIdentities([]string{ + "wi5", + }, wildTenancy2)[0] + + svc := suite.createServices([]string{"s1", "s2", "s3"}, tenancy, true) + + var ( + wi1 = wi[0] + wi2 = wi[1] + wi3 = wi[2] + + svc1 = svc[0] + svc2 = svc[1] + svc3 = svc[2] + ) + + var ( + _ = suite.createComputedRoutes(svc1.Resource) + _ = suite.createComputedRoutes(svc2.Resource) + cr3 = suite.createComputedRoutes(svc3.Resource) + ) + + // Create some stub CTPs. + suite.createTrafficPermissions([]string{}, []string{"wi1", "wi2"}, tenancy) + suite.createTrafficPermissions([]string{}, []string{"wi4"}, wildTenancy1) + suite.createTrafficPermissions([]string{}, []string{"wi5"}, wildTenancy2) + + // Create a wildcard "all namespaces in a partition" TP. + ctpID3 := ReconcileComputedTrafficPermissions( + suite.T(), + suite.client, + resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi3.Id), + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{{ + IdentityName: "", + Namespace: "", + Partition: "wild", + }}, + }}, + }, + ).Id + + // These DO NOT match the wildcard. + for _, wiID := range []*pbresource.ID{wi1.Id, wi2.Id, wi3.Id} { + cidWI := resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wiID) + + suite.reconcileOnce(cidWI) + + suite.Run("empty cid for "+resource.IDToString(wiID), func() { + cid := suite.client.RequireResourceExists(suite.T(), cidWI) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{}) + rtest.RequireOwner(suite.T(), cid, wiID, true) + }) + } + + // These DO match the wildcard. + for _, wiID := range []*pbresource.ID{wiWild4.Id, wiWild5.Id} { + cidWI := resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wiID) + + suite.reconcileOnce(cidWI) + + suite.Run("cid for "+resource.IDToString(wiID), func() { + cid := suite.client.RequireResourceExists(suite.T(), cidWI) + suite.requireCID(cid, &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{{ + DestinationRef: refFromID(svc3.Id), + DestinationPorts: []string{"grpc"}, + }}, + BoundReferences: []*pbresource.Reference{ + refFromID(ctpID3), + refFromID(svc3.Id), + refFromID(cr3.Id), + }, + }) + rtest.RequireOwner(suite.T(), cid, wiID, true) + }) + } + }) +} + +// TODO: test a bound references dep mapper loop + +func (suite *controllerSuite) TestMapping() { + suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { + //////////////////////////////////// + // Creating a WI triggers aligned reconcile. + wi := suite.createWorkloadIdentities([]string{ + "wi1", "wi2", "wi3", "wi4", + }, tenancy) + var ( + wi1 = wi[0] + wi2 = wi[1] + wi3 = wi[2] + wi4 = wi[3] + + cidWI1 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi1.Id) + cidWI2 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi2.Id) + cidWI3 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi3.Id) + cidWI4 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi4.Id) + + ctpID1 = resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi1.Id) + ctpID2 = resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi2.Id) + ctpID3 = resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi3.Id) + ctpID4 = resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi4.Id) + ) + suite.assertMapperDefaultDeny(wi1, cidWI1) + suite.assertMapperDefaultDeny(wi2, cidWI2) + suite.assertMapperDefaultDeny(wi3, cidWI3) + suite.assertMapperDefaultDeny(wi4, cidWI4) + + suite.assertMapperDefaultAllow(wi1, cidWI1) + suite.assertMapperDefaultAllow(wi2, cidWI2) + suite.assertMapperDefaultAllow(wi3, cidWI3) + suite.assertMapperDefaultAllow(wi4, cidWI4) + + //////////////////////////////////// + // Creating a CTP that references a wi as a source triggers. + suite.createTrafficPermissions([]string{ + "d-wi2-s-wi1", + "d-wi1-s-wi2", + }, []string{"wi4"}, tenancy) + + // Create a wildcard "all names in a namespace" TP. + ReconcileComputedTrafficPermissions( + suite.T(), + suite.client, + ctpID3, + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{{ + IdentityName: "", + // Namespace: tenancy.Namespace, + // Partition: tenancy.Partition, + }}, + }}, + }, + ) + + var ( + ctp1 = suite.client.RequireResourceExists(suite.T(), ctpID1) + ctp2 = suite.client.RequireResourceExists(suite.T(), ctpID2) + ctp3 = suite.client.RequireResourceExists(suite.T(), ctpID3) + ctp4 = suite.client.RequireResourceExists(suite.T(), ctpID4) + ) + suite.assertMapperDefaultDeny(ctp1, cidWI2) + suite.assertMapperDefaultDeny(ctp2, cidWI1) + suite.assertMapperDefaultDeny(ctp3, cidWI1, cidWI2, cidWI3, cidWI4) + suite.assertMapperDefaultDeny(ctp4) + + suite.assertMapperDefaultAllow(ctp1, cidWI2) + suite.assertMapperDefaultAllow(ctp2, cidWI1) + suite.assertMapperDefaultAllow(ctp3, cidWI1, cidWI2, cidWI3, cidWI4) + // wi4 has a default CTP in default allow so it allows all traffic to it. + suite.assertMapperDefaultAllow(ctp4, cidWI1, cidWI2, cidWI3, cidWI4) + + //////////////////////////////////// + // Creating a Service alone does nothing. + svc := suite.createServices([]string{ + "s1", "s2", "s3", "s4", + }, tenancy, false) + var ( + svc1 = svc[0] + svc2 = svc[1] + svc3 = svc[2] + svc4 = svc[3] + ) + suite.assertMapperDefaultDeny(svc1.Resource) + suite.assertMapperDefaultDeny(svc2.Resource) + suite.assertMapperDefaultDeny(svc3.Resource) + suite.assertMapperDefaultDeny(svc4.Resource) + + suite.assertMapperDefaultAllow(svc1.Resource) + suite.assertMapperDefaultAllow(svc2.Resource) + suite.assertMapperDefaultAllow(svc3.Resource) + suite.assertMapperDefaultAllow(svc4.Resource) + + //////////////////////////////////// + // Have to update the special status condition first. + svc1.Resource = svc1.StatusUpdate() + svc2.Resource = svc2.StatusUpdate() + svc3.Resource = svc3.StatusUpdate() + svc4.Resource = svc4.StatusUpdate() + suite.assertMapperDefaultDeny(svc1.Resource, cidWI2) + suite.assertMapperDefaultDeny(svc2.Resource, cidWI1) + suite.assertMapperDefaultDeny(svc3.Resource, cidWI1, cidWI2, cidWI3, cidWI4) + suite.assertMapperDefaultDeny(svc4.Resource) + + suite.assertMapperDefaultAllow(svc1.Resource, cidWI2) + suite.assertMapperDefaultAllow(svc2.Resource, cidWI1) + suite.assertMapperDefaultAllow(svc3.Resource, cidWI1, cidWI2, cidWI3, cidWI4) + // s4 maps to wi4 which has a default CTP in default allow so it allows all traffic to it. + suite.assertMapperDefaultAllow(svc4.Resource, cidWI1, cidWI2, cidWI3, cidWI4) + + //////////////////////////////////// + // Add a computed routes that provides another alias for the workloads. + cr1 := suite.createComputedRoutes(svc1.Resource) + cr2 := suite.createComputedRoutes(svc2.Resource) + cr3 := suite.createComputedRoutes(svc3.Resource, + resourcetest.MustDecode[*pbmesh.GRPCRoute](suite.T(), rtest.Resource(pbmesh.GRPCRouteType, "grpc-route"). + WithTenancy(tenancy). + WithData(suite.T(), &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{{ + Ref: refFromID(svc3.Id), + }}, + Rules: []*pbmesh.GRPCRouteRule{{ + BackendRefs: []*pbmesh.GRPCBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: refFromID(svc1.Id), + }, + Weight: 50, + }, + { + BackendRef: &pbmesh.BackendReference{ + Ref: refFromID(svc2.Id), + }, + Weight: 50, + }, + }, + }}, + }). + Build()), + resourcetest.MustDecode[*pbcatalog.Service](suite.T(), svc1.Resource), + resourcetest.MustDecode[*pbcatalog.Service](suite.T(), svc2.Resource), + ) + cr4 := suite.createComputedRoutes(svc4.Resource) + suite.assertMapperDefaultDeny(cr1.Resource, cidWI2) + suite.assertMapperDefaultDeny(cr2.Resource, cidWI1) + suite.assertMapperDefaultDeny(cr3.Resource, cidWI1, cidWI2) + suite.assertMapperDefaultDeny(cr4.Resource) + + suite.assertMapperDefaultAllow(cr1.Resource, cidWI2) + suite.assertMapperDefaultAllow(cr2.Resource, cidWI1) + suite.assertMapperDefaultAllow(cr3.Resource, cidWI1, cidWI2) + // cr4 aligns to s4 which maps to wi4 which has a default CTP in default allow so it allows all traffic to it. + suite.assertMapperDefaultAllow(cr4.Resource, cidWI1, cidWI2, cidWI3, cidWI4) + }) +} + +func (suite *controllerSuite) TestMapping_WildcardNamesAndNamespaces() { + if !suite.isEnterprise { + suite.T().Skip("test only applies in enterprise as written") + } + + fixedTenancy := rtest.Tenancy("default.fixed") // for wildcard name + wildTenancy1 := rtest.Tenancy("wild.aaa") // for wildcard ns + wildTenancy2 := rtest.Tenancy("wild.bbb") // for wildcard ns + + suite.runTestCaseWithTenancies(func(tenancy *pbresource.Tenancy) { + // Creating a WI triggers aligned reconcile. + wi := suite.createWorkloadIdentities([]string{ + "wi1", "wi2", "wi3", + }, tenancy) + + wiWild4 := suite.createWorkloadIdentities([]string{ + "wi4", + }, wildTenancy1)[0] + + wiWild5 := suite.createWorkloadIdentities([]string{ + "wi5", + }, wildTenancy2)[0] + + wiFixed := suite.createWorkloadIdentities([]string{ + "wi4", "wi5", + }, fixedTenancy) + + svc := suite.createServices([]string{"s1", "s2", "s3"}, tenancy, true) + + var ( + wi1 = wi[0] + wi2 = wi[1] + wi3 = wi[2] + + wiFixed4 = wiFixed[0] + wiFixed5 = wiFixed[1] + + svc1 = svc[0] + svc2 = svc[1] + svc3 = svc[2] + + cidWI1 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi1.Id) + cidWI2 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi2.Id) + cidWI3 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi3.Id) + + cidWIWild4 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wiWild4.Id) + cidWIWild5 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wiWild5.Id) + + cidWIFixed4 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wiFixed4.Id) + cidWIFixed5 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wiFixed5.Id) + ) + + var ( + cr1 = suite.createComputedRoutes(svc1.Resource) + cr2 = suite.createComputedRoutes(svc2.Resource) + cr3 = suite.createComputedRoutes(svc3.Resource) + ) + + // Create some stub CTPs. + suite.createTrafficPermissions([]string{}, []string{"wi1"}, tenancy) + suite.createTrafficPermissions([]string{}, []string{"wi4"}, wildTenancy1) + suite.createTrafficPermissions([]string{}, []string{"wi5"}, wildTenancy2) + + var ( + ctpID1 = resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi1.Id) + ) + + // Create a wildcard "all names in a namespace" TP. + ctpID2 := ReconcileComputedTrafficPermissions( + suite.T(), + suite.client, + resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi2.Id), + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{{ + IdentityName: "", + Namespace: fixedTenancy.Namespace, + Partition: fixedTenancy.Partition, + }}, + }}, + }, + ).Id + + // Create a wildcard "all namespaces in a partition" TP. + ctpID3 := ReconcileComputedTrafficPermissions( + suite.T(), + suite.client, + resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi3.Id), + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{{ + IdentityName: "", + Namespace: "", + Partition: "wild", + }}, + }}, + }, + ).Id + + var ( + ctp1 = suite.client.RequireResourceExists(suite.T(), ctpID1) + ctp2 = suite.client.RequireResourceExists(suite.T(), ctpID2) + ctp3 = suite.client.RequireResourceExists(suite.T(), ctpID3) + ) + + //////////////////////////////////// + // Workload Identities + // (nothing interesting here) + + suite.assertMapperDefaultDeny(wi1, cidWI1) + suite.assertMapperDefaultDeny(wi2, cidWI2) + suite.assertMapperDefaultDeny(wi3, cidWI3) + + //////////////////////////////////// + // CTPs + // (ctp2 aligns with wi2 which has the default.fixed.* allow) + // (ctp3 aligns with wi3 which has the wild.*.* allow) + + suite.assertMapperDefaultDeny(ctp1) + suite.assertMapperDefaultDeny(ctp2, cidWIFixed4, cidWIFixed5) + suite.assertMapperDefaultDeny(ctp3, cidWIWild4, cidWIWild5) + + //////////////////////////////////// + // Services + // (s2 encompasses wi2 which has the default.fixed.* allow) + // (s3 encompasses wi3 which has the wild.*.* allow) + + suite.assertMapperDefaultDeny(svc1.Resource) + suite.assertMapperDefaultDeny(svc2.Resource, cidWIFixed4, cidWIFixed5) + suite.assertMapperDefaultDeny(svc3.Resource, cidWIWild4, cidWIWild5) + + //////////////////////////////////// + // Computed Routes + // (cr2 aligns with s2 which encompasses wi2 which has the default.fixed.* allow) + // (cr3 aligns with s3 which encompasses wi3 which has the wild.*.* allow) + + suite.assertMapperDefaultDeny(cr1.Resource) + suite.assertMapperDefaultDeny(cr2.Resource, cidWIFixed4, cidWIFixed5) + suite.assertMapperDefaultDeny(cr3.Resource, cidWIWild4, cidWIWild5) + }) +} + +func TestController_DefaultDeny(t *testing.T) { + // This test's purpose is to exercise the controller in a halfway realistic + // way. Generally we are trying to go through the whole lifecycle of the + // controller. + // + // This isn't a full integration test as that would require also executing + // various other controllers. + + clientRaw := controllertest.NewControllerTestBuilder(). + WithTenancies(resourcetest.TestTenancies()...). + WithResourceRegisterFns(types.Register, catalog.RegisterTypes, auth.RegisterTypes). + WithControllerRegisterFns(func(mgr *controller.Manager) { + mgr.Register(Controller(false)) + }). + Run(t) + + client := rtest.NewClient(clientRaw) + + for _, tenancy := range resourcetest.TestTenancies() { + t.Run(tenancySubTestName(tenancy), func(t *testing.T) { + tenancy := tenancy + + // Add some workload identities and services. + wi := createWorkloadIdentities(t, client, []string{ + "wi1", "wi2", "wi3", + }, tenancy) + var ( + wi1 = wi[0] + wi2 = wi[1] + wi3 = wi[2] + + // ctpID1 = resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi1.Id) + ctpID2 = resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi2.Id) + ctpID3 = resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wi3.Id) + + cidID1 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi1.Id) + cidID2 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi2.Id) + cidID3 = resource.ReplaceType(pbmesh.ComputedImplicitDestinationsType, wi3.Id) + ) + svc := createServices(t, client, []string{"s1", "s2", "s3"}, tenancy, true) + var ( + _ = svc[0] + svc2 = svc[1] + svc3 = svc[2] + + crID2 = resource.ReplaceType(pbmesh.ComputedRoutesType, svc2.Id) + crID3 = resource.ReplaceType(pbmesh.ComputedRoutesType, svc3.Id) + ) + + // Wait for the empty stub resources to be created. + cidVersion1 := requireNewCIDVersion(t, client, cidID1, "", &pbmesh.ComputedImplicitDestinations{}) + _ = requireNewCIDVersion(t, client, cidID2, "", &pbmesh.ComputedImplicitDestinations{}) + _ = requireNewCIDVersion(t, client, cidID3, "", &pbmesh.ComputedImplicitDestinations{}) + + // Add some other required resources. + + ReconcileComputedTrafficPermissions(t, client, ctpID2, &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{{ + IdentityName: wi1.Id.Name, + }}, + }}, + }) + createComputedRoutes(t, client, svc2.Resource) + + testutil.RunStep(t, "wi1 can reach wi2", func(t *testing.T) { + cidVersion1 = requireNewCIDVersion(t, client, cidID1, cidVersion1, &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{{ + DestinationRef: resource.Reference(svc2.Id, ""), + DestinationPorts: []string{"grpc"}, + }}, + BoundReferences: []*pbresource.Reference{ + resource.Reference(ctpID2, ""), + resource.Reference(svc2.Id, ""), + resource.Reference(crID2, ""), + }, + }) + }) + + ReconcileComputedTrafficPermissions(t, client, ctpID3, &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{{ + IdentityName: wi1.Id.Name, + }}, + }}, + }) + createComputedRoutes(t, client, svc3.Resource) + + testutil.RunStep(t, "wi1 can reach wi2 and wi3", func(t *testing.T) { + cidVersion1 = requireNewCIDVersion(t, client, cidID1, cidVersion1, &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + { + DestinationRef: resource.Reference(svc2.Id, ""), + DestinationPorts: []string{"grpc"}, + }, + { + DestinationRef: resource.Reference(svc3.Id, ""), + DestinationPorts: []string{"grpc"}, + }, + }, + BoundReferences: []*pbresource.Reference{ + resource.Reference(ctpID2, ""), + resource.Reference(ctpID3, ""), + resource.Reference(svc2.Id, ""), + resource.Reference(svc3.Id, ""), + resource.Reference(crID2, ""), + resource.Reference(crID3, ""), + }, + }) + }) + + // Remove a route. + client.MustDelete(t, crID2) + + testutil.RunStep(t, "removing a ComputedRoutes should remove that service from any CID", func(t *testing.T) { + cidVersion1 = requireNewCIDVersion(t, client, cidID1, cidVersion1, &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + { + DestinationRef: resource.Reference(svc3.Id, ""), + DestinationPorts: []string{"grpc"}, + }, + }, + BoundReferences: []*pbresource.Reference{ + resource.Reference(ctpID3, ""), + resource.Reference(svc3.Id, ""), + resource.Reference(crID3, ""), + }, + }) + }) + + // Put it back. + createComputedRoutes(t, client, svc2.Resource) + + testutil.RunStep(t, "put the ComputedRoutes back", func(t *testing.T) { + cidVersion1 = requireNewCIDVersion(t, client, cidID1, cidVersion1, &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + { + DestinationRef: resource.Reference(svc2.Id, ""), + DestinationPorts: []string{"grpc"}, + }, + { + DestinationRef: resource.Reference(svc3.Id, ""), + DestinationPorts: []string{"grpc"}, + }, + }, + BoundReferences: []*pbresource.Reference{ + resource.Reference(ctpID2, ""), + resource.Reference(ctpID3, ""), + resource.Reference(svc2.Id, ""), + resource.Reference(svc3.Id, ""), + resource.Reference(crID2, ""), + resource.Reference(crID3, ""), + }, + }) + }) + + // Remove traffic access to wi3. + client.MustDelete(t, ctpID3) + + testutil.RunStep(t, "removing a CTP should remove those services only exposing that WI", func(t *testing.T) { + cidVersion1 = requireNewCIDVersion(t, client, cidID1, cidVersion1, &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + { + DestinationRef: resource.Reference(svc2.Id, ""), + DestinationPorts: []string{"grpc"}, + }, + }, + BoundReferences: []*pbresource.Reference{ + resource.Reference(ctpID2, ""), + resource.Reference(svc2.Id, ""), + resource.Reference(crID2, ""), + }, + }) + }) + + // Edit the route on top of svc3 to also split to svc2, which will + // cause it to re-manifest as an implicit destination due to half of + // the traffic possibly going to wi3. + grpcRoute := rtest.Resource(pbmesh.GRPCRouteType, "grpc-route-3"). + WithTenancy(tenancy). + WithData(t, &pbmesh.GRPCRoute{ + ParentRefs: []*pbmesh.ParentReference{{ + Ref: resource.Reference(svc3.Id, ""), + Port: "grpc", + }}, + Rules: []*pbmesh.GRPCRouteRule{{ + BackendRefs: []*pbmesh.GRPCBackendRef{ + { + BackendRef: &pbmesh.BackendReference{ + Ref: resource.Reference(svc2.Id, ""), + }, + Weight: 50, + }, + { + BackendRef: &pbmesh.BackendReference{ + Ref: resource.Reference(svc3.Id, ""), + }, + Weight: 50, + }, + }, + }}, + }). + Write(t, client) + createComputedRoutes(t, client, svc3.Resource, + rtest.MustDecode[*pbmesh.GRPCRoute](t, grpcRoute), + rtest.MustDecode[*pbcatalog.Service](t, svc2.Resource), + ) + + testutil.RunStep(t, "a workload reachable by one branch of a computed routes still is implicit", func(t *testing.T) { + cidVersion1 = requireNewCIDVersion(t, client, cidID1, cidVersion1, &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + { + DestinationRef: resource.Reference(svc2.Id, ""), + DestinationPorts: []string{"grpc"}, + }, + { + DestinationRef: resource.Reference(svc3.Id, ""), + DestinationPorts: []string{"grpc"}, + }, + }, + BoundReferences: []*pbresource.Reference{ + resource.Reference(ctpID2, ""), + // no contribution to ctpID3, b/c it is deleted + resource.Reference(svc2.Id, ""), + resource.Reference(svc3.Id, ""), + resource.Reference(crID2, ""), + resource.Reference(crID3, ""), + }, + }) + }) + }) + } +} + +func (suite *controllerSuite) runStep(name string, fn func()) { + suite.T().Helper() + require.True(suite.T(), suite.Run(name, fn)) +} + +func requireNewCIDVersion( + t *testing.T, + client *rtest.Client, + id *pbresource.ID, + version string, + expected *pbmesh.ComputedImplicitDestinations, +) string { + t.Helper() + + var nextVersion string + retry.Run(t, func(r *retry.R) { + res := client.WaitForNewVersion(r, id, version) + + cid := rtest.MustDecode[*pbmesh.ComputedImplicitDestinations](r, res) + + prototest.AssertDeepEqual(r, expected, cid.Data) + + nextVersion = res.Version + }) + return nextVersion +} + +func (suite *controllerSuite) runTestCaseWithTenancies(testFunc func(*pbresource.Tenancy)) { + for _, tenancy := range suite.tenancies { + suite.Run(suite.appendTenancyInfo(tenancy), func() { + testFunc(tenancy) + }) + } +} + +func (suite *controllerSuite) appendTenancyInfo(tenancy *pbresource.Tenancy) string { + return tenancySubTestName(tenancy) +} + +func tenancySubTestName(tenancy *pbresource.Tenancy) string { + return fmt.Sprintf("%s_Namespace_%s_Partition", tenancy.Namespace, tenancy.Partition) +} + +func TestController(t *testing.T) { + suite.Run(t, new(controllerSuite)) +} + +func refFromID(id *pbresource.ID) *pbresource.Reference { + return resource.Reference(id, "") +} + +func (suite *controllerSuite) assertMapperDefaultDeny(res *pbresource.Resource, expect ...*pbresource.ID) { + suite.T().Helper() + suite.assertMapper(suite.ctl, res, expect...) +} + +func (suite *controllerSuite) assertMapperDefaultAllow(res *pbresource.Resource, expect ...*pbresource.ID) { + suite.T().Helper() + suite.assertMapper(suite.ctlDefaultAllow, res, expect...) +} + +func (suite *controllerSuite) assertMapper( + ctl *controller.TestController, + res *pbresource.Resource, + expect ...*pbresource.ID, +) { + suite.T().Helper() + reqs, err := ctl.DryRunMapper(suite.ctx, res) + require.NoError(suite.T(), err) + + var got []*pbresource.ID + for _, req := range reqs { + got = append(got, req.ID) + } + + prototest.AssertElementsMatch(suite.T(), expect, got) +} diff --git a/internal/mesh/internal/controllers/implicitdestinations/index.go b/internal/mesh/internal/controllers/implicitdestinations/index.go new file mode 100644 index 0000000000..1fabf666ad --- /dev/null +++ b/internal/mesh/internal/controllers/implicitdestinations/index.go @@ -0,0 +1,236 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package implicitdestinations + +import ( + "golang.org/x/exp/maps" + + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/controller/cache/index" + "github.com/hashicorp/consul/internal/controller/cache/indexers" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/storage" + pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +// When an interior (mutable) foreign key pointer on watched data is used to +// determine the resources's applicability in a dependency mapper, it is +// subject to the "orphaned computed resource" problem. When you edit the +// mutable pointer to point elsewhere, the mapper will only witness the NEW +// value and will trigger reconciles for things derived from the NEW pointer, +// but side effects from a prior reconcile using the OLD pointer will be +// orphaned until some other event triggers that reconcile (if ever). +// +// To solve this we need to collect the list of bound references that were +// "ingredients" into a computed resource's output and persist them on the +// newly written resource. Then we load them up and index them such that we can +// use them to AUGMENT a mapper event with additional maps using the OLD data +// as well. +var boundRefsIndex = indexers.BoundRefsIndex[*pbmesh.ComputedImplicitDestinations]("bound-references") + +// Cache: reverse SVC[*] => WI[*] +var serviceByWorkloadIdentityIndex = indexers.RefOrIDIndex( + "service-by-workload-identity", + func(svc *types.DecodedService) []*pbresource.Reference { + return getWorkloadIdentitiesFromService(svc.Resource) + }, +) + +// Cache: reverse CTP => WI[source] +var ctpBySourceWorkloadIdentityIndex = indexers.RefOrIDIndex( + "ctp-by-source-workload-identity", + func(ctp *types.DecodedComputedTrafficPermissions) []*pbresource.Reference { + // We ignore wildcards for this index. + exact, _, _ := getSourceWorkloadIdentitiesFromCTP(ctp) + return maps.Values(exact) + }, +) + +const ctpByWildcardSourceIndexName = "ctp-by-wildcard-source" + +func ctpByWildcardSourceIndexCreator(globalDefaultAllow bool) *index.Index { + return indexers.DecodedMultiIndexer( + ctpByWildcardSourceIndexName, + index.SingleValueFromArgs(func(tn tenantedName) ([]byte, error) { + return indexFromTenantedName(tn), nil + }), + func(r *types.DecodedComputedTrafficPermissions) (bool, [][]byte, error) { + var vals [][]byte + + if r.Data.IsDefault && globalDefaultAllow { + // Literally everything can reach it. + vals = append(vals, indexFromTenantedName(tenantedName{ + Partition: storage.Wildcard, + Namespace: storage.Wildcard, + Name: storage.Wildcard, + })) + return true, vals, nil + } + + _, wildNameInNS, wildNSInPartition := getSourceWorkloadIdentitiesFromCTP(r) + for _, tenancy := range wildNameInNS { + // wildcard name + vals = append(vals, indexFromTenantedName(tenantedName{ + Partition: tenancy.Partition, + Namespace: tenancy.Namespace, + Name: storage.Wildcard, + })) + } + for _, partition := range wildNSInPartition { + // wildcard name+ns + vals = append(vals, indexFromTenantedName(tenantedName{ + Partition: partition, + Namespace: storage.Wildcard, + Name: storage.Wildcard, + })) + } + + return true, vals, nil + }, + ) +} + +type tenantedName struct { + Partition string + Namespace string + Name string +} + +func indexFromTenantedName(tn tenantedName) []byte { + var b index.Builder + b.String(tn.Partition) + b.String(tn.Namespace) + b.String(tn.Name) + return b.Bytes() +} + +// Cache: reverse CR => SVC[backend] +var computedRoutesByBackendServiceIndex = indexers.RefOrIDIndex( + "computed-routes-by-backend-service", + func(cr *types.DecodedComputedRoutes) []*pbresource.Reference { + return getBackendServiceRefsFromComputedRoutes(cr) + }, +) + +func getWorkloadIdentitiesFromService(svc *pbresource.Resource) []*pbresource.Reference { + ids := catalog.GetBoundIdentities(svc) + + out := make([]*pbresource.Reference, 0, len(ids)) + for _, id := range ids { + out = append(out, &pbresource.Reference{ + Type: pbauth.WorkloadIdentityType, + Name: id, + Tenancy: svc.Id.Tenancy, + }) + } + return out +} + +func getSourceWorkloadIdentitiesFromCTP( + ctp *types.DecodedComputedTrafficPermissions, +) (exact map[resource.ReferenceKey]*pbresource.Reference, wildNames []*pbresource.Tenancy, wildNS []string) { + var ( + out = make(map[resource.ReferenceKey]*pbresource.Reference) + wildNameInNS = make(map[string]*pbresource.Tenancy) + wildNSInPartition = make(map[string]struct{}) + ) + + for _, perm := range ctp.Data.AllowPermissions { + for _, src := range perm.Sources { + srcType := determineSourceType(src) + if srcType != sourceTypeLocal { + // Partition / Peer / SamenessGroup are mututally exclusive. + continue // Ignore these for now. + } + // It is assumed that src.Partition != "" at this point. + + if src.IdentityName != "" { + // exact + ref := &pbresource.Reference{ + Type: pbauth.WorkloadIdentityType, + Name: src.IdentityName, + Tenancy: &pbresource.Tenancy{ + Partition: src.Partition, + Namespace: src.Namespace, + }, + } + + rk := resource.NewReferenceKey(ref) + if _, ok := out[rk]; !ok { + out[rk] = ref + } + } else if src.Namespace != "" { + // wildcard name + tenancy := pbauth.SourceToTenancy(src) + tenancyStr := resource.TenancyToString(tenancy) + if _, ok := wildNameInNS[tenancyStr]; !ok { + wildNameInNS[tenancyStr] = tenancy + } + continue + } else { + // wildcard name+ns + if _, ok := wildNSInPartition[src.Partition]; !ok { + wildNSInPartition[src.Partition] = struct{}{} + } + continue + } + } + } + + var ( + sliceWildNameInNS []*pbresource.Tenancy + sliceWildNSInPartition []string + ) + if len(wildNameInNS) > 0 { + sliceWildNameInNS = maps.Values(wildNameInNS) + } + if len(wildNSInPartition) > 0 { + sliceWildNSInPartition = maps.Keys(wildNSInPartition) + } + + return out, sliceWildNameInNS, sliceWildNSInPartition +} + +func getBackendServiceRefsFromComputedRoutes(cr *types.DecodedComputedRoutes) []*pbresource.Reference { + var ( + out []*pbresource.Reference + seen = make(map[resource.ReferenceKey]struct{}) + ) + for _, pc := range cr.Data.PortedConfigs { + for _, target := range pc.Targets { + ref := target.BackendRef.Ref + rk := resource.NewReferenceKey(ref) + if _, ok := seen[rk]; !ok { + out = append(out, ref) + seen[rk] = struct{}{} + } + } + } + return out +} + +type sourceType int + +const ( + sourceTypeLocal sourceType = iota + sourceTypePeer + sourceTypeSamenessGroup +) + +// These rules also exist in internal/auth/internal/types during TP validation. +func determineSourceType(src *pbauth.Source) sourceType { + srcPeer := src.GetPeer() + + switch { + case srcPeer != "" && srcPeer != "local": + return sourceTypePeer + case src.GetSamenessGroup() != "": + return sourceTypeSamenessGroup + default: + return sourceTypeLocal + } +} diff --git a/internal/mesh/internal/controllers/implicitdestinations/index_test.go b/internal/mesh/internal/controllers/implicitdestinations/index_test.go new file mode 100644 index 0000000000..21e1d83236 --- /dev/null +++ b/internal/mesh/internal/controllers/implicitdestinations/index_test.go @@ -0,0 +1,408 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package implicitdestinations + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/internal/auth" + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/resource/resourcetest" + rtest "github.com/hashicorp/consul/internal/resource/resourcetest" + pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/proto/private/prototest" +) + +func TestGetWorkloadIdentitiesFromService(t *testing.T) { + tenancy := resource.DefaultNamespacedTenancy() + + build := func(conds ...*pbresource.Condition) *pbresource.Resource { + b := rtest.Resource(pbcatalog.ServiceType, "web"). + WithTenancy(tenancy). + WithData(t, &pbcatalog.Service{}) + if len(conds) > 0 { + b.WithStatus(catalog.EndpointsStatusKey, &pbresource.Status{ + Conditions: conds, + }) + } + return b.Build() + } + + fooRef := &pbresource.Reference{ + Type: pbauth.WorkloadIdentityType, + Tenancy: tenancy, + Name: "foo", + } + barRef := &pbresource.Reference{ + Type: pbauth.WorkloadIdentityType, + Tenancy: tenancy, + Name: "bar", + } + + makeRefs := func(refs ...*pbresource.Reference) []*pbresource.Reference { + return refs + } + + run := getWorkloadIdentitiesFromService + + require.Empty(t, run(build(nil))) + require.Empty(t, run(build(&pbresource.Condition{ + Type: catalog.StatusConditionBoundIdentities, + State: pbresource.Condition_STATE_TRUE, + Message: "", + }))) + prototest.AssertDeepEqual(t, makeRefs(fooRef), run(build(&pbresource.Condition{ + Type: catalog.StatusConditionBoundIdentities, + State: pbresource.Condition_STATE_TRUE, + Message: "foo", + }))) + require.Empty(t, run(build(&pbresource.Condition{ + Type: catalog.StatusConditionBoundIdentities, + State: pbresource.Condition_STATE_FALSE, + Message: "foo", + }))) + prototest.AssertDeepEqual(t, makeRefs(barRef, fooRef), run(build(&pbresource.Condition{ + Type: catalog.StatusConditionBoundIdentities, + State: pbresource.Condition_STATE_TRUE, + Message: "bar,foo", // proper order + }))) + prototest.AssertDeepEqual(t, makeRefs(barRef, fooRef), run(build(&pbresource.Condition{ + Type: catalog.StatusConditionBoundIdentities, + State: pbresource.Condition_STATE_TRUE, + Message: "foo,bar", // incorrect order gets fixed + }))) +} + +func TestGetSourceWorkloadIdentitiesFromCTP(t *testing.T) { + registry := resource.NewRegistry() + types.Register(registry) + auth.RegisterTypes(registry) + catalog.RegisterTypes(registry) + + type testcase struct { + ctp *types.DecodedComputedTrafficPermissions + expectExact []*pbresource.Reference + expectWildNameInNS []*pbresource.Tenancy + expectWildNSInPartition []string + } + + run := func(t *testing.T, tc testcase) { + expectExactMap := make(map[resource.ReferenceKey]*pbresource.Reference) + for _, ref := range tc.expectExact { + rk := resource.NewReferenceKey(ref) + expectExactMap[rk] = ref + } + + gotExact, gotWildNameInNS, gotWildNSInPartition := getSourceWorkloadIdentitiesFromCTP(tc.ctp) + prototest.AssertDeepEqual(t, expectExactMap, gotExact) + prototest.AssertElementsMatch(t, tc.expectWildNameInNS, gotWildNameInNS) + require.ElementsMatch(t, tc.expectWildNSInPartition, gotWildNSInPartition) + } + + tenancy := resource.DefaultNamespacedTenancy() + + ctpID := &pbresource.ID{ + Type: pbauth.ComputedTrafficPermissionsType, + Tenancy: tenancy, + Name: "ctp1", + } + + newRef := func(name string) *pbresource.Reference { + return &pbresource.Reference{ + Type: pbauth.WorkloadIdentityType, + Tenancy: tenancy, + Name: name, + } + } + + cases := map[string]testcase{ + "empty": { + ctp: ReconcileComputedTrafficPermissions(t, nil, ctpID), + }, + "single include": { + ctp: ReconcileComputedTrafficPermissions(t, nil, ctpID, + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{ + {IdentityName: "foo"}, + }, + }}, + }, + ), + expectExact: []*pbresource.Reference{ + newRef("foo"), + }, + }, + "multiple includes (1)": { + ctp: ReconcileComputedTrafficPermissions(t, nil, ctpID, + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{ + {IdentityName: "foo"}, + {IdentityName: "bar"}, + }, + }}, + }, + ), + expectExact: []*pbresource.Reference{ + newRef("foo"), + newRef("bar"), + }, + }, + "multiple includes (2)": { + ctp: ReconcileComputedTrafficPermissions(t, nil, ctpID, + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + {Sources: []*pbauth.Source{{IdentityName: "foo"}}}, + {Sources: []*pbauth.Source{{IdentityName: "bar"}}}, + }, + }, + ), + expectExact: []*pbresource.Reference{ + newRef("foo"), + newRef("bar"), + }, + }, + "default ns wildcard (1) / excludes ignored": { + ctp: ReconcileComputedTrafficPermissions(t, nil, ctpID, + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{{ + Exclude: []*pbauth.ExcludeSource{{ + IdentityName: "bar", + }}, + }}, + }}, + }, + ), + expectWildNSInPartition: []string{"default"}, + }, + "default ns wildcard (2)": { + ctp: ReconcileComputedTrafficPermissions(t, nil, ctpID, + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{ + {Partition: "default"}, + }, + }}, + }, + ), + expectWildNSInPartition: []string{"default"}, + }, + "multiple ns wildcards": { + ctp: ReconcileComputedTrafficPermissions(t, nil, ctpID, + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{ + {Partition: "foo"}, + {Partition: "bar"}, + }, + }}, + }, + ), + expectWildNSInPartition: []string{"bar", "foo"}, + }, + "multiple ns wildcards deduped": { + ctp: ReconcileComputedTrafficPermissions(t, nil, ctpID, + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{ + {Partition: "bar"}, + {Partition: "bar"}, + }, + }}, + }, + ), + expectWildNSInPartition: []string{"bar"}, + }, + "name wildcard": { + ctp: ReconcileComputedTrafficPermissions(t, nil, ctpID, + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{ + {Partition: "default", Namespace: "zim"}, + }, + }}, + }, + ), + expectWildNameInNS: []*pbresource.Tenancy{ + {Partition: "default", Namespace: "zim"}, + }, + }, + "multiple name wildcards": { + ctp: ReconcileComputedTrafficPermissions(t, nil, ctpID, + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{ + {Partition: "foo", Namespace: "zim"}, + {Partition: "bar", Namespace: "gir"}, + }, + }}, + }, + ), + expectWildNameInNS: []*pbresource.Tenancy{ + {Partition: "foo", Namespace: "zim"}, + {Partition: "bar", Namespace: "gir"}, + }, + }, + "multiple name wildcards deduped": { + ctp: ReconcileComputedTrafficPermissions(t, nil, ctpID, + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{{ + Sources: []*pbauth.Source{ + {Partition: "foo", Namespace: "zim"}, + {Partition: "foo", Namespace: "zim"}, + }, + }}, + }, + ), + expectWildNameInNS: []*pbresource.Tenancy{ + {Partition: "foo", Namespace: "zim"}, + }, + }, + "some of each": { + ctp: ReconcileComputedTrafficPermissions(t, nil, ctpID, + &pbauth.TrafficPermissions{ + Action: pbauth.Action_ACTION_ALLOW, + Permissions: []*pbauth.Permission{ + { + Sources: []*pbauth.Source{ + {Partition: "foo", Namespace: "zim"}, + {Partition: "bar", Namespace: "gir"}, + {IdentityName: "dib"}, + }, + }, + { + Sources: []*pbauth.Source{ + {Partition: "foo"}, + {Partition: "bar"}, + {IdentityName: "gaz"}, + }, + }, + }, + }, + ), + expectWildNameInNS: []*pbresource.Tenancy{ + {Partition: "foo", Namespace: "zim"}, + {Partition: "bar", Namespace: "gir"}, + }, + expectWildNSInPartition: []string{"bar", "foo"}, + expectExact: []*pbresource.Reference{ + newRef("dib"), + newRef("gaz"), + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestGetBackendServiceRefsFromComputedRoutes(t *testing.T) { + type testcase struct { + cr *types.DecodedComputedRoutes + expect []*pbresource.Reference + } + + run := func(t *testing.T, tc testcase) { + got := getBackendServiceRefsFromComputedRoutes(tc.cr) + prototest.AssertElementsMatch(t, tc.expect, got) + } + + tenancy := resource.DefaultNamespacedTenancy() + + newRef := func(name string) *pbresource.Reference { + return &pbresource.Reference{ + Type: pbcatalog.ServiceType, + Tenancy: tenancy, + Name: name, + } + } + + cr1 := resourcetest.Resource(pbmesh.ComputedRoutesType, "cr1"). + WithTenancy(tenancy). + WithData(t, &pbmesh.ComputedRoutes{ + PortedConfigs: map[string]*pbmesh.ComputedPortRoutes{ + "http": { + Targets: map[string]*pbmesh.BackendTargetDetails{ + "opaque1": { + BackendRef: &pbmesh.BackendReference{Ref: newRef("aaa")}, + }, + }, + }, + }, + }). + Build() + + cr2 := resourcetest.Resource(pbmesh.ComputedRoutesType, "cr2"). + WithTenancy(tenancy). + WithData(t, &pbmesh.ComputedRoutes{ + PortedConfigs: map[string]*pbmesh.ComputedPortRoutes{ + "http": { + Targets: map[string]*pbmesh.BackendTargetDetails{ + "opaque1": { + BackendRef: &pbmesh.BackendReference{Ref: newRef("aaa")}, + }, + "opaque2": { + BackendRef: &pbmesh.BackendReference{Ref: newRef("bbb")}, + }, + }, + }, + "grpc": { + Targets: map[string]*pbmesh.BackendTargetDetails{ + "opaque2": { + BackendRef: &pbmesh.BackendReference{Ref: newRef("bbb")}, + }, + "opaque3": { + BackendRef: &pbmesh.BackendReference{Ref: newRef("ccc")}, + }, + }, + }, + }, + }). + Build() + + cases := map[string]testcase{ + "one": { + cr: resourcetest.MustDecode[*pbmesh.ComputedRoutes](t, cr1), + expect: []*pbresource.Reference{ + newRef("aaa"), + }, + }, + "two": { + cr: resourcetest.MustDecode[*pbmesh.ComputedRoutes](t, cr2), + expect: []*pbresource.Reference{ + newRef("aaa"), + newRef("bbb"), + newRef("ccc"), + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} diff --git a/internal/mesh/internal/controllers/implicitdestinations/mapper.go b/internal/mesh/internal/controllers/implicitdestinations/mapper.go new file mode 100644 index 0000000000..22d648ba56 --- /dev/null +++ b/internal/mesh/internal/controllers/implicitdestinations/mapper.go @@ -0,0 +1,170 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package implicitdestinations + +import ( + "context" + + "golang.org/x/exp/maps" + + "github.com/hashicorp/consul/internal/controller" + "github.com/hashicorp/consul/internal/controller/cache" + "github.com/hashicorp/consul/internal/controller/dependency" + "github.com/hashicorp/consul/internal/mesh/internal/types" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/storage" + pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +type mapAndTransformer struct { + globalDefaultAllow bool +} + +// Note: these MapZZZ functions ignore the bound refs. + +func (m *mapAndTransformer) MapComputedTrafficPermissions(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) { + // Summary: CTP WI[source] CID + + dm := dependency.MapDecoded[*pbauth.ComputedTrafficPermissions]( + // (1) turn CTP -> WI[source] + m.mapComputedTrafficPermissionsToSourceWorkloadIdentities, + ) + return dependency.WrapAndReplaceType(pbmesh.ComputedImplicitDestinationsType, dm)(ctx, rt, res) +} + +func (m *mapAndTransformer) MapService(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) { + // Summary: SVC[backend] WI[backend] CTP WI[source] CID + + dm := dependency.MapperWithTransform( + // (2) turn WI[backend] -> CTP -> WI[source] + m.mapBackendWorkloadIdentityToSourceWorkloadIdentity, + // (1) turn SVC[backend] => WI[backend] + m.transformServiceToWorkloadIdentities, + ) + return dependency.WrapAndReplaceType(pbmesh.ComputedImplicitDestinationsType, dm)(ctx, rt, res) +} + +func (m *mapAndTransformer) MapComputedRoutes(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) { + // Summary: CR SVC[backend] WI[backend] CTP WI[source] CID + + dm := dependency.MapperWithTransform( + // (3) turn WI[backend] -> CTP -> WI[source] + m.mapBackendWorkloadIdentityToSourceWorkloadIdentity, + dependency.TransformChain( + // (1) Turn CR -> SVC[backend] + m.transformComputedRoutesToBackendServiceRefs, + // (2) Turn SVC[backend] -> WI[backend] + m.transformServiceToWorkloadIdentities, + ), + ) + return dependency.WrapAndReplaceType(pbmesh.ComputedImplicitDestinationsType, dm)(ctx, rt, res) +} + +func (m *mapAndTransformer) mapComputedTrafficPermissionsToSourceWorkloadIdentities(ctx context.Context, rt controller.Runtime, ctp *types.DecodedComputedTrafficPermissions) ([]controller.Request, error) { + refs, err := m.getSourceWorkloadIdentitiesFromCTPWithWildcardExpansion(rt.Cache, ctp) + if err != nil { + return nil, err + } + return controller.MakeRequests(pbauth.WorkloadIdentityType, refs), nil +} + +func (m *mapAndTransformer) getSourceWorkloadIdentitiesFromCTPWithWildcardExpansion( + cache cache.ReadOnlyCache, + ctp *types.DecodedComputedTrafficPermissions, +) ([]*pbresource.Reference, error) { + if ctp.Data.IsDefault && m.globalDefaultAllow { + return listAllWorkloadIdentities(cache, &pbresource.Tenancy{ + Partition: storage.Wildcard, + Namespace: storage.Wildcard, + }) + } + + exact, wildNames, wildNS := getSourceWorkloadIdentitiesFromCTP(ctp) + + for _, wildTenancy := range wildNames { + got, err := listAllWorkloadIdentities(cache, wildTenancy) + if err != nil { + return nil, err + } + for _, ref := range got { + rk := resource.NewReferenceKey(ref) + if _, ok := exact[rk]; !ok { + exact[rk] = ref + } + } + } + + for _, wildPartition := range wildNS { + got, err := listAllWorkloadIdentities(cache, &pbresource.Tenancy{ + Partition: wildPartition, + Namespace: storage.Wildcard, + }) + if err != nil { + return nil, err + } + for _, ref := range got { + rk := resource.NewReferenceKey(ref) + if _, ok := exact[rk]; !ok { + exact[rk] = ref + } + } + } + + return maps.Values(exact), nil +} + +func (m *mapAndTransformer) mapBackendWorkloadIdentityToSourceWorkloadIdentity(ctx context.Context, rt controller.Runtime, wiRes *pbresource.Resource) ([]controller.Request, error) { + ctpID := resource.ReplaceType(pbauth.ComputedTrafficPermissionsType, wiRes.Id) + + ctp, err := cache.GetDecoded[*pbauth.ComputedTrafficPermissions](rt.Cache, pbauth.ComputedTrafficPermissionsType, "id", ctpID) + if err != nil { + return nil, err + } else if ctp == nil { + return nil, nil + } + + return m.mapComputedTrafficPermissionsToSourceWorkloadIdentities(ctx, rt, ctp) +} + +func (m *mapAndTransformer) transformServiceToWorkloadIdentities(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]*pbresource.Resource, error) { + // This is deliberately thin b/c WI's have no body, and we'll pass this to + // another transformer immediately anyway, so it's largely an opaque + // carrier for the WI name string only. + + wiIDs := getWorkloadIdentitiesFromService(res) + + out := make([]*pbresource.Resource, 0, len(wiIDs)) + for _, wiID := range wiIDs { + wiLite := &pbresource.Resource{ + Id: resource.IDFromReference(wiID), + } + out = append(out, wiLite) + } + + return out, nil +} + +func (m *mapAndTransformer) transformComputedRoutesToBackendServiceRefs(ctx context.Context, rt controller.Runtime, res *pbresource.Resource) ([]*pbresource.Resource, error) { + cr, err := resource.Decode[*pbmesh.ComputedRoutes](res) + if err != nil { + return nil, err + } + + svcRefs := getBackendServiceRefsFromComputedRoutes(cr) + + out := make([]*pbresource.Resource, 0, len(svcRefs)) + for _, svcRef := range svcRefs { + svc, err := rt.Cache.Get(pbcatalog.ServiceType, "id", svcRef) + if err != nil { + return nil, err + } + if svc != nil { + out = append(out, svc) + } + } + return out, nil +} diff --git a/internal/mesh/internal/controllers/implicitdestinations/status.go b/internal/mesh/internal/controllers/implicitdestinations/status.go new file mode 100644 index 0000000000..376b7bc167 --- /dev/null +++ b/internal/mesh/internal/controllers/implicitdestinations/status.go @@ -0,0 +1,7 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 +package implicitdestinations + +const ( + ControllerID = "consul.io/implicit-destinations" +) diff --git a/internal/mesh/internal/controllers/register.go b/internal/mesh/internal/controllers/register.go index 46cab2f7c1..d4aa1270c1 100644 --- a/internal/mesh/internal/controllers/register.go +++ b/internal/mesh/internal/controllers/register.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/consul/internal/mesh/internal/controllers/apigateways" "github.com/hashicorp/consul/internal/mesh/internal/controllers/gatewayproxy" + "github.com/hashicorp/consul/internal/mesh/internal/controllers/implicitdestinations" "github.com/hashicorp/consul/internal/mesh/internal/controllers/meshconfiguration" "github.com/hashicorp/consul/agent/leafcert" @@ -55,6 +56,7 @@ func Register(mgr *controller.Manager, deps Dependencies) { mgr.Register(proxyconfiguration.Controller(workloadselectionmapper.New[*pbmesh.ProxyConfiguration](pbmesh.ComputedProxyConfigurationType))) mgr.Register(explicitdestinations.Controller(mapper.New())) + mgr.Register(implicitdestinations.Controller(deps.DefaultAllow)) mgr.Register(meshgateways.Controller()) mgr.Register(meshconfiguration.Controller()) diff --git a/internal/mesh/internal/controllers/routes/generate.go b/internal/mesh/internal/controllers/routes/generate.go index d85e47ab68..88458dcd06 100644 --- a/internal/mesh/internal/controllers/routes/generate.go +++ b/internal/mesh/internal/controllers/routes/generate.go @@ -78,7 +78,7 @@ func compile( // future we could use the others, but for now they are harmless to include // in the produced resource and is beneficial from an audit/debugging // perspective to know all of the inputs that produced this output. - boundRefCollector := NewBoundReferenceCollector() + boundRefCollector := resource.NewBoundReferenceCollector() parentServiceDec := related.GetService(parentServiceID) if parentServiceDec == nil { @@ -441,7 +441,7 @@ func compileFailoverConfig( related *loader.RelatedResources, failoverConfig *pbcatalog.FailoverConfig, targets map[string]*pbmesh.BackendTargetDetails, - brc *BoundReferenceCollector, + brc *resource.BoundReferenceCollector, ) *pbmesh.ComputedFailoverConfig { if failoverConfig == nil { return nil @@ -522,7 +522,7 @@ func compileHTTPRouteNode( res *pbresource.Resource, route *pbmesh.HTTPRoute, serviceGetter serviceGetter, - brc *BoundReferenceCollector, + brc *resource.BoundReferenceCollector, ) *inputRouteNode { route = protoClone(route) node := newInputRouteNode(port) @@ -599,7 +599,7 @@ func compileGRPCRouteNode( res *pbresource.Resource, route *pbmesh.GRPCRoute, serviceGetter serviceGetter, - brc *BoundReferenceCollector, + brc *resource.BoundReferenceCollector, ) *inputRouteNode { route = protoClone(route) @@ -669,7 +669,7 @@ func compileTCPRouteNode( res *pbresource.Resource, route *pbmesh.TCPRoute, serviceGetter serviceGetter, - brc *BoundReferenceCollector, + brc *resource.BoundReferenceCollector, ) *inputRouteNode { route = protoClone(route) diff --git a/internal/mesh/internal/types/computed_implicit_destinations.go b/internal/mesh/internal/types/computed_implicit_destinations.go new file mode 100644 index 0000000000..d127d1b35a --- /dev/null +++ b/internal/mesh/internal/types/computed_implicit_destinations.go @@ -0,0 +1,102 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package types + +import ( + "github.com/hashicorp/go-multierror" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/internal/catalog" + "github.com/hashicorp/consul/internal/resource" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" +) + +type DecodedComputedImplicitDestinations = resource.DecodedResource[*pbmesh.ComputedImplicitDestinations] + +func RegisterComputedImplicitDestinations(r resource.Registry) { + r.Register(resource.Registration{ + Type: pbmesh.ComputedImplicitDestinationsType, + Proto: &pbmesh.ComputedImplicitDestinations{}, + ACLs: &resource.ACLHooks{ + Read: aclReadHookComputedImplicitDestinations, + Write: aclWriteHookComputedImplicitDestinations, + List: resource.NoOpACLListHook, + }, + Validate: ValidateComputedImplicitDestinations, + Scope: resource.ScopeNamespace, + }) +} + +var ValidateComputedImplicitDestinations = resource.DecodeAndValidate(validateComputedImplicitDestinations) + +func validateComputedImplicitDestinations(res *DecodedComputedImplicitDestinations) error { + var merr error + for i, implDest := range res.Data.Destinations { + wrapErr := func(err error) error { + return resource.ErrInvalidListElement{ + Name: "destinations", + Index: i, + Wrapped: err, + } + } + if err := validateImplicitDestination(implDest, wrapErr); err != nil { + merr = multierror.Append(merr, err) + } + } + return merr +} + +func validateImplicitDestination(p *pbmesh.ImplicitDestination, wrapErr func(error) error) error { + var merr error + + wrapRefErr := func(err error) error { + return wrapErr(resource.ErrInvalidField{ + Name: "destination_ref", + Wrapped: err, + }) + } + + if refErr := catalog.ValidateLocalServiceRefNoSection(p.DestinationRef, wrapRefErr); refErr != nil { + merr = multierror.Append(merr, refErr) + } + + if len(p.DestinationPorts) == 0 { + merr = multierror.Append(merr, wrapErr(resource.ErrInvalidField{ + Name: "destination_ports", + Wrapped: resource.ErrEmpty, + })) + } else { + for i, port := range p.DestinationPorts { + if portErr := catalog.ValidatePortName(port); portErr != nil { + merr = multierror.Append(merr, wrapErr(resource.ErrInvalidListElement{ + Name: "destination_ports", + Index: i, + Wrapped: portErr, + })) + } + } + } + + return merr +} + +func aclReadHookComputedImplicitDestinations( + authorizer acl.Authorizer, + authzCtx *acl.AuthorizerContext, + id *pbresource.ID, + res *pbresource.Resource, +) error { + if id != nil { + return authorizer.ToAllowAuthorizer().IdentityReadAllowed(id.Name, authzCtx) + } + if res != nil { + return authorizer.ToAllowAuthorizer().IdentityReadAllowed(res.Id.Name, authzCtx) + } + return resource.ErrNeedResource +} + +func aclWriteHookComputedImplicitDestinations(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, res *pbresource.Resource) error { + return authorizer.ToAllowAuthorizer().OperatorWriteAllowed(authzContext) +} diff --git a/internal/mesh/internal/types/computed_implicit_destinations_test.go b/internal/mesh/internal/types/computed_implicit_destinations_test.go new file mode 100644 index 0000000000..bc2e1aaaee --- /dev/null +++ b/internal/mesh/internal/types/computed_implicit_destinations_test.go @@ -0,0 +1,268 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package types + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/internal/resource" + "github.com/hashicorp/consul/internal/resource/resourcetest" + pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1" + pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1" + "github.com/hashicorp/consul/proto-public/pbresource" + "github.com/hashicorp/consul/proto/private/prototest" + "github.com/hashicorp/consul/sdk/testutil" +) + +func TestValidateComputedImplicitDestinations(t *testing.T) { + type testcase struct { + data *pbmesh.ComputedImplicitDestinations + expectErr string + } + run := func(t *testing.T, tc testcase) { + res := resourcetest.Resource(pbmesh.ComputedImplicitDestinationsType, "api"). + WithTenancy(resource.DefaultNamespacedTenancy()). + WithData(t, tc.data). + Build() + + err := ValidateComputedImplicitDestinations(res) + + // Verify that validate didn't actually change the object. + got := resourcetest.MustDecode[*pbmesh.ComputedImplicitDestinations](t, res) + prototest.AssertDeepEqual(t, tc.data, got.Data) + + if tc.expectErr == "" { + require.NoError(t, err) + } else { + testutil.RequireErrorContains(t, err, tc.expectErr) + } + } + + cases := map[string]testcase{ + // emptiness + "empty": { + data: &pbmesh.ComputedImplicitDestinations{}, + }, + "svc/nil ref": { + data: &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + {DestinationRef: nil}, + }, + }, + expectErr: `invalid element at index 0 of list "destinations": invalid "destination_ref" field: missing required field`, + }, + "svc/bad type": { + data: &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + {DestinationRef: newRefWithTenancy(pbcatalog.WorkloadType, "default.default", "api")}, + }, + }, + expectErr: `invalid element at index 0 of list "destinations": invalid "destination_ref" field: invalid "type" field: reference must have type catalog.v2beta1.Service`, + }, + "svc/nil tenancy": { + data: &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + {DestinationRef: &pbresource.Reference{Type: pbcatalog.ServiceType, Name: "api"}}, + }, + }, + expectErr: `invalid element at index 0 of list "destinations": invalid "destination_ref" field: invalid "tenancy" field: missing required field`, + }, + "svc/bad dest tenancy/partition": { + data: &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + {DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, ".bar", "api")}, + }, + }, + expectErr: `invalid element at index 0 of list "destinations": invalid "destination_ref" field: invalid "tenancy" field: invalid "partition" field: cannot be empty`, + }, + "svc/bad dest tenancy/namespace": { + data: &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + {DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo", "api")}, + }, + }, + expectErr: `invalid element at index 0 of list "destinations": invalid "destination_ref" field: invalid "tenancy" field: invalid "namespace" field: cannot be empty`, + }, + "no ports": { + data: &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api"), + }, + }, + }, + expectErr: `invalid element at index 0 of list "destinations": invalid "destination_ports" field: cannot be empty`, + }, + "bad port/empty": { + data: &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api"), + DestinationPorts: []string{""}, + }, + }, + }, + expectErr: `invalid element at index 0 of list "destinations": invalid element at index 0 of list "destination_ports": cannot be empty`, + }, + "bad port/no letters": { + data: &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api"), + DestinationPorts: []string{"1234"}, + }, + }, + }, + expectErr: `invalid element at index 0 of list "destinations": invalid element at index 0 of list "destination_ports": value must be 1-15 characters`, + }, + "bad port/too long": { + data: &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api"), + DestinationPorts: []string{strings.Repeat("a", 16)}, + }, + }, + }, + expectErr: `invalid element at index 0 of list "destinations": invalid element at index 0 of list "destination_ports": value must be 1-15 characters`, + }, + "normal": { + data: &pbmesh.ComputedImplicitDestinations{ + Destinations: []*pbmesh.ImplicitDestination{ + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.bar", "api"), + DestinationPorts: []string{"p1"}, + }, + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "foo.zim", "api"), + DestinationPorts: []string{"p2"}, + }, + { + DestinationRef: newRefWithTenancy(pbcatalog.ServiceType, "gir.zim", "api"), + DestinationPorts: []string{"p3"}, + }, + }, + }, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestComputedImplicitDestinationsACLs(t *testing.T) { + // Wire up a registry to generically invoke hooks + registry := resource.NewRegistry() + Register(registry) + + type testcase struct { + rules string + check func(t *testing.T, authz acl.Authorizer, res *pbresource.Resource) + readOK string + writeOK string + listOK string + } + + const ( + DENY = "deny" + ALLOW = "allow" + DEFAULT = "default" + ) + + checkF := func(t *testing.T, expect string, got error) { + switch expect { + case ALLOW: + if acl.IsErrPermissionDenied(got) { + t.Fatal("should be allowed") + } + case DENY: + if !acl.IsErrPermissionDenied(got) { + t.Fatal("should be denied") + } + case DEFAULT: + require.Nil(t, got, "expected fallthrough decision") + default: + t.Fatalf("unexpected expectation: %q", expect) + } + } + + reg, ok := registry.Resolve(pbmesh.ComputedImplicitDestinationsType) + require.True(t, ok) + + run := func(t *testing.T, tc testcase) { + cidData := &pbmesh.ComputedImplicitDestinations{} + res := resourcetest.Resource(pbmesh.ComputedImplicitDestinationsType, "wi1"). + WithTenancy(resource.DefaultNamespacedTenancy()). + WithData(t, cidData). + Build() + resourcetest.ValidateAndNormalize(t, registry, res) + + config := acl.Config{ + WildcardName: structs.WildcardSpecifier, + } + authz, err := acl.NewAuthorizerFromRules(tc.rules, &config, nil) + require.NoError(t, err) + authz = acl.NewChainedAuthorizer([]acl.Authorizer{authz, acl.DenyAll()}) + + t.Run("read", func(t *testing.T) { + err := reg.ACLs.Read(authz, &acl.AuthorizerContext{}, res.Id, res) + checkF(t, tc.readOK, err) + }) + t.Run("write", func(t *testing.T) { + err := reg.ACLs.Write(authz, &acl.AuthorizerContext{}, res) + checkF(t, tc.writeOK, err) + }) + t.Run("list", func(t *testing.T) { + err := reg.ACLs.List(authz, &acl.AuthorizerContext{}) + checkF(t, tc.listOK, err) + }) + } + + cases := map[string]testcase{ + "no rules": { + rules: ``, + readOK: DENY, + writeOK: DENY, + listOK: DEFAULT, + }, + "operator read": { + rules: `operator = "read" `, + readOK: DENY, + writeOK: DENY, + listOK: DEFAULT, + }, + "operator write": { + rules: `operator = "write" `, + readOK: DENY, + writeOK: ALLOW, + listOK: DEFAULT, + }, + "workload identity w1 read": { + rules: `identity "wi1" { policy = "read" }`, + readOK: ALLOW, + writeOK: DENY, + listOK: DEFAULT, + }, + "workload identity w1 write": { + rules: `identity "wi1" { policy = "write" }`, + readOK: ALLOW, + writeOK: DENY, + listOK: DEFAULT, + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + run(t, tc) + }) + } +} diff --git a/internal/mesh/internal/types/decoded.go b/internal/mesh/internal/types/decoded.go index 6fa406f755..a0f8d3f97e 100644 --- a/internal/mesh/internal/types/decoded.go +++ b/internal/mesh/internal/types/decoded.go @@ -19,7 +19,9 @@ type ( DecodedDestinationsConfiguration = resource.DecodedResource[*pbmesh.DestinationsConfiguration] DecodedComputedRoutes = resource.DecodedResource[*pbmesh.ComputedRoutes] DecodedComputedTrafficPermissions = resource.DecodedResource[*pbauth.ComputedTrafficPermissions] + DecodedTrafficPermissions = resource.DecodedResource[*pbauth.TrafficPermissions] DecodedComputedFailoverPolicy = resource.DecodedResource[*pbcatalog.ComputedFailoverPolicy] + DecodedFailoverPolicy = resource.DecodedResource[*pbcatalog.FailoverPolicy] DecodedService = resource.DecodedResource[*pbcatalog.Service] DecodedServiceEndpoints = resource.DecodedResource[*pbcatalog.ServiceEndpoints] DecodedWorkload = resource.DecodedResource[*pbcatalog.Workload] diff --git a/internal/mesh/internal/types/types.go b/internal/mesh/internal/types/types.go index 5eba4b8e89..aab7f27765 100644 --- a/internal/mesh/internal/types/types.go +++ b/internal/mesh/internal/types/types.go @@ -12,6 +12,7 @@ func Register(r resource.Registry) { RegisterComputedProxyConfiguration(r) RegisterDestinations(r) RegisterComputedExplicitDestinations(r) + RegisterComputedImplicitDestinations(r) RegisterProxyStateTemplate(r) RegisterHTTPRoute(r) RegisterTCPRoute(r) diff --git a/internal/mesh/internal/controllers/routes/bound_refs.go b/internal/resource/bound_refs.go similarity index 75% rename from internal/mesh/internal/controllers/routes/bound_refs.go rename to internal/resource/bound_refs.go index afdac8460b..039c77543f 100644 --- a/internal/mesh/internal/controllers/routes/bound_refs.go +++ b/internal/resource/bound_refs.go @@ -1,17 +1,16 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package routes +package resource import ( "sort" - "github.com/hashicorp/consul/internal/resource" "github.com/hashicorp/consul/proto-public/pbresource" ) type sectionRefKey struct { - resource.ReferenceKey + ReferenceKey Section string } @@ -36,17 +35,17 @@ func (c *BoundReferenceCollector) List() []*pbresource.Reference { } sort.Slice(out, func(i, j int) bool { - return resource.LessReference(out[i], out[j]) + return LessReference(out[i], out[j]) }) return out } -func (c *BoundReferenceCollector) AddRefOrID(ref resource.ReferenceOrID) { +func (c *BoundReferenceCollector) AddRefOrID(ref ReferenceOrID) { if c == nil { return } - c.AddRef(resource.ReferenceFromReferenceOrID(ref)) + c.AddRef(ReferenceFromReferenceOrID(ref)) } func (c *BoundReferenceCollector) AddRef(ref *pbresource.Reference) { @@ -54,7 +53,7 @@ func (c *BoundReferenceCollector) AddRef(ref *pbresource.Reference) { return } srk := sectionRefKey{ - ReferenceKey: resource.NewReferenceKey(ref), + ReferenceKey: NewReferenceKey(ref), Section: ref.Section, } diff --git a/proto-public/pbmesh/v2beta1/computed_implicit_destinations.pb.binary.go b/proto-public/pbmesh/v2beta1/computed_implicit_destinations.pb.binary.go new file mode 100644 index 0000000000..7bb1a0147e --- /dev/null +++ b/proto-public/pbmesh/v2beta1/computed_implicit_destinations.pb.binary.go @@ -0,0 +1,28 @@ +// Code generated by protoc-gen-go-binary. DO NOT EDIT. +// source: pbmesh/v2beta1/computed_implicit_destinations.proto + +package meshv2beta1 + +import ( + "google.golang.org/protobuf/proto" +) + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *ComputedImplicitDestinations) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *ComputedImplicitDestinations) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *ImplicitDestination) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *ImplicitDestination) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} diff --git a/proto-public/pbmesh/v2beta1/computed_implicit_destinations.pb.go b/proto-public/pbmesh/v2beta1/computed_implicit_destinations.pb.go new file mode 100644 index 0000000000..245797377c --- /dev/null +++ b/proto-public/pbmesh/v2beta1/computed_implicit_destinations.pb.go @@ -0,0 +1,273 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: pbmesh/v2beta1/computed_implicit_destinations.proto + +package meshv2beta1 + +import ( + pbresource "github.com/hashicorp/consul/proto-public/pbresource" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// ImplicitDestinations tracks destination services for a given workload identity. +type ComputedImplicitDestinations struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // destinations is the list of destinations. + Destinations []*ImplicitDestination `protobuf:"bytes,1,rep,name=destinations,proto3" json:"destinations,omitempty"` + // BoundReferences is a slice of mixed type references of resources that were + // involved in the formulation of this resource. + BoundReferences []*pbresource.Reference `protobuf:"bytes,2,rep,name=bound_references,json=boundReferences,proto3" json:"bound_references,omitempty"` +} + +func (x *ComputedImplicitDestinations) Reset() { + *x = ComputedImplicitDestinations{} + if protoimpl.UnsafeEnabled { + mi := &file_pbmesh_v2beta1_computed_implicit_destinations_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComputedImplicitDestinations) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComputedImplicitDestinations) ProtoMessage() {} + +func (x *ComputedImplicitDestinations) ProtoReflect() protoreflect.Message { + mi := &file_pbmesh_v2beta1_computed_implicit_destinations_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComputedImplicitDestinations.ProtoReflect.Descriptor instead. +func (*ComputedImplicitDestinations) Descriptor() ([]byte, []int) { + return file_pbmesh_v2beta1_computed_implicit_destinations_proto_rawDescGZIP(), []int{0} +} + +func (x *ComputedImplicitDestinations) GetDestinations() []*ImplicitDestination { + if x != nil { + return x.Destinations + } + return nil +} + +func (x *ComputedImplicitDestinations) GetBoundReferences() []*pbresource.Reference { + if x != nil { + return x.BoundReferences + } + return nil +} + +// ImplicitDestination contains a reference to a catalog service and a list of +// port names that are allowed by TrafficPermissions. +type ImplicitDestination struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DestinationRef *pbresource.Reference `protobuf:"bytes,1,opt,name=destination_ref,json=destinationRef,proto3" json:"destination_ref,omitempty"` + DestinationPorts []string `protobuf:"bytes,2,rep,name=destination_ports,json=destinationPorts,proto3" json:"destination_ports,omitempty"` +} + +func (x *ImplicitDestination) Reset() { + *x = ImplicitDestination{} + if protoimpl.UnsafeEnabled { + mi := &file_pbmesh_v2beta1_computed_implicit_destinations_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImplicitDestination) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImplicitDestination) ProtoMessage() {} + +func (x *ImplicitDestination) ProtoReflect() protoreflect.Message { + mi := &file_pbmesh_v2beta1_computed_implicit_destinations_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImplicitDestination.ProtoReflect.Descriptor instead. +func (*ImplicitDestination) Descriptor() ([]byte, []int) { + return file_pbmesh_v2beta1_computed_implicit_destinations_proto_rawDescGZIP(), []int{1} +} + +func (x *ImplicitDestination) GetDestinationRef() *pbresource.Reference { + if x != nil { + return x.DestinationRef + } + return nil +} + +func (x *ImplicitDestination) GetDestinationPorts() []string { + if x != nil { + return x.DestinationPorts + } + return nil +} + +var File_pbmesh_v2beta1_computed_implicit_destinations_proto protoreflect.FileDescriptor + +var file_pbmesh_v2beta1_computed_implicit_destinations_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, + 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x63, + 0x69, 0x74, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x32, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x1a, 0x1c, 0x70, 0x62, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x19, 0x70, 0x62, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x01, + 0x0a, 0x1c, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, + 0x69, 0x74, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x56, + 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, 0x68, 0x2e, 0x76, 0x32, 0x62, + 0x65, 0x74, 0x61, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x44, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x10, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x3a, 0x06, 0xa2, 0x93, 0x04, 0x02, 0x08, 0x03, 0x22, + 0x91, 0x01, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x44, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0e, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, + 0x72, 0x74, 0x73, 0x42, 0xa2, 0x02, 0x0a, 0x21, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x6d, 0x65, 0x73, + 0x68, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x42, 0x21, 0x43, 0x6f, 0x6d, 0x70, 0x75, + 0x74, 0x65, 0x64, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x44, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x43, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x6d, 0x65, 0x73, 0x68, 0x2f, + 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x3b, 0x6d, 0x65, 0x73, 0x68, 0x76, 0x32, 0x62, 0x65, + 0x74, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x4d, 0xaa, 0x02, 0x1d, 0x48, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x4d, 0x65, 0x73, + 0x68, 0x2e, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xca, 0x02, 0x1d, 0x48, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, + 0x68, 0x5c, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xe2, 0x02, 0x29, 0x48, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x4d, 0x65, 0x73, + 0x68, 0x5c, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x20, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, + 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x4d, 0x65, 0x73, 0x68, 0x3a, + 0x3a, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pbmesh_v2beta1_computed_implicit_destinations_proto_rawDescOnce sync.Once + file_pbmesh_v2beta1_computed_implicit_destinations_proto_rawDescData = file_pbmesh_v2beta1_computed_implicit_destinations_proto_rawDesc +) + +func file_pbmesh_v2beta1_computed_implicit_destinations_proto_rawDescGZIP() []byte { + file_pbmesh_v2beta1_computed_implicit_destinations_proto_rawDescOnce.Do(func() { + file_pbmesh_v2beta1_computed_implicit_destinations_proto_rawDescData = protoimpl.X.CompressGZIP(file_pbmesh_v2beta1_computed_implicit_destinations_proto_rawDescData) + }) + return file_pbmesh_v2beta1_computed_implicit_destinations_proto_rawDescData +} + +var file_pbmesh_v2beta1_computed_implicit_destinations_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_pbmesh_v2beta1_computed_implicit_destinations_proto_goTypes = []interface{}{ + (*ComputedImplicitDestinations)(nil), // 0: hashicorp.consul.mesh.v2beta1.ComputedImplicitDestinations + (*ImplicitDestination)(nil), // 1: hashicorp.consul.mesh.v2beta1.ImplicitDestination + (*pbresource.Reference)(nil), // 2: hashicorp.consul.resource.Reference +} +var file_pbmesh_v2beta1_computed_implicit_destinations_proto_depIdxs = []int32{ + 1, // 0: hashicorp.consul.mesh.v2beta1.ComputedImplicitDestinations.destinations:type_name -> hashicorp.consul.mesh.v2beta1.ImplicitDestination + 2, // 1: hashicorp.consul.mesh.v2beta1.ComputedImplicitDestinations.bound_references:type_name -> hashicorp.consul.resource.Reference + 2, // 2: hashicorp.consul.mesh.v2beta1.ImplicitDestination.destination_ref:type_name -> hashicorp.consul.resource.Reference + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_pbmesh_v2beta1_computed_implicit_destinations_proto_init() } +func file_pbmesh_v2beta1_computed_implicit_destinations_proto_init() { + if File_pbmesh_v2beta1_computed_implicit_destinations_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pbmesh_v2beta1_computed_implicit_destinations_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ComputedImplicitDestinations); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pbmesh_v2beta1_computed_implicit_destinations_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImplicitDestination); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pbmesh_v2beta1_computed_implicit_destinations_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pbmesh_v2beta1_computed_implicit_destinations_proto_goTypes, + DependencyIndexes: file_pbmesh_v2beta1_computed_implicit_destinations_proto_depIdxs, + MessageInfos: file_pbmesh_v2beta1_computed_implicit_destinations_proto_msgTypes, + }.Build() + File_pbmesh_v2beta1_computed_implicit_destinations_proto = out.File + file_pbmesh_v2beta1_computed_implicit_destinations_proto_rawDesc = nil + file_pbmesh_v2beta1_computed_implicit_destinations_proto_goTypes = nil + file_pbmesh_v2beta1_computed_implicit_destinations_proto_depIdxs = nil +} diff --git a/proto-public/pbmesh/v2beta1/computed_implicit_destinations.proto b/proto-public/pbmesh/v2beta1/computed_implicit_destinations.proto new file mode 100644 index 0000000000..91ed1c0cf4 --- /dev/null +++ b/proto-public/pbmesh/v2beta1/computed_implicit_destinations.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package hashicorp.consul.mesh.v2beta1; + +import "pbresource/annotations.proto"; +import "pbresource/resource.proto"; + +// ImplicitDestinations tracks destination services for a given workload identity. +message ComputedImplicitDestinations { + option (hashicorp.consul.resource.spec) = {scope: SCOPE_NAMESPACE}; + // destinations is the list of destinations. + repeated ImplicitDestination destinations = 1; + + // BoundReferences is a slice of mixed type references of resources that were + // involved in the formulation of this resource. + repeated hashicorp.consul.resource.Reference bound_references = 2; +} + +// ImplicitDestination contains a reference to a catalog service and a list of +// port names that are allowed by TrafficPermissions. +message ImplicitDestination { + hashicorp.consul.resource.Reference destination_ref = 1; + repeated string destination_ports = 2; +} diff --git a/proto-public/pbmesh/v2beta1/computed_implicit_destinations_deepcopy.gen.go b/proto-public/pbmesh/v2beta1/computed_implicit_destinations_deepcopy.gen.go new file mode 100644 index 0000000000..da22077209 --- /dev/null +++ b/proto-public/pbmesh/v2beta1/computed_implicit_destinations_deepcopy.gen.go @@ -0,0 +1,48 @@ +// Code generated by protoc-gen-deepcopy. DO NOT EDIT. +package meshv2beta1 + +import ( + proto "google.golang.org/protobuf/proto" +) + +// DeepCopyInto supports using ComputedImplicitDestinations within kubernetes types, where deepcopy-gen is used. +func (in *ComputedImplicitDestinations) DeepCopyInto(out *ComputedImplicitDestinations) { + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComputedImplicitDestinations. Required by controller-gen. +func (in *ComputedImplicitDestinations) DeepCopy() *ComputedImplicitDestinations { + if in == nil { + return nil + } + out := new(ComputedImplicitDestinations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ComputedImplicitDestinations. Required by controller-gen. +func (in *ComputedImplicitDestinations) DeepCopyInterface() interface{} { + return in.DeepCopy() +} + +// DeepCopyInto supports using ImplicitDestination within kubernetes types, where deepcopy-gen is used. +func (in *ImplicitDestination) DeepCopyInto(out *ImplicitDestination) { + proto.Reset(out) + proto.Merge(out, proto.Clone(in)) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImplicitDestination. Required by controller-gen. +func (in *ImplicitDestination) DeepCopy() *ImplicitDestination { + if in == nil { + return nil + } + out := new(ImplicitDestination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ImplicitDestination. Required by controller-gen. +func (in *ImplicitDestination) DeepCopyInterface() interface{} { + return in.DeepCopy() +} diff --git a/proto-public/pbmesh/v2beta1/computed_implicit_destinations_json.gen.go b/proto-public/pbmesh/v2beta1/computed_implicit_destinations_json.gen.go new file mode 100644 index 0000000000..6fb6ee01df --- /dev/null +++ b/proto-public/pbmesh/v2beta1/computed_implicit_destinations_json.gen.go @@ -0,0 +1,33 @@ +// Code generated by protoc-json-shim. DO NOT EDIT. +package meshv2beta1 + +import ( + protojson "google.golang.org/protobuf/encoding/protojson" +) + +// MarshalJSON is a custom marshaler for ComputedImplicitDestinations +func (this *ComputedImplicitDestinations) MarshalJSON() ([]byte, error) { + str, err := ComputedImplicitDestinationsMarshaler.Marshal(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for ComputedImplicitDestinations +func (this *ComputedImplicitDestinations) UnmarshalJSON(b []byte) error { + return ComputedImplicitDestinationsUnmarshaler.Unmarshal(b, this) +} + +// MarshalJSON is a custom marshaler for ImplicitDestination +func (this *ImplicitDestination) MarshalJSON() ([]byte, error) { + str, err := ComputedImplicitDestinationsMarshaler.Marshal(this) + return []byte(str), err +} + +// UnmarshalJSON is a custom unmarshaler for ImplicitDestination +func (this *ImplicitDestination) UnmarshalJSON(b []byte) error { + return ComputedImplicitDestinationsUnmarshaler.Unmarshal(b, this) +} + +var ( + ComputedImplicitDestinationsMarshaler = &protojson.MarshalOptions{} + ComputedImplicitDestinationsUnmarshaler = &protojson.UnmarshalOptions{DiscardUnknown: false} +) diff --git a/proto-public/pbmesh/v2beta1/resources.rtypes.go b/proto-public/pbmesh/v2beta1/resources.rtypes.go index 845d27bf1a..9a65379a4f 100644 --- a/proto-public/pbmesh/v2beta1/resources.rtypes.go +++ b/proto-public/pbmesh/v2beta1/resources.rtypes.go @@ -12,6 +12,7 @@ const ( APIGatewayKind = "APIGateway" ComputedExplicitDestinationsKind = "ComputedExplicitDestinations" + ComputedImplicitDestinationsKind = "ComputedImplicitDestinations" ComputedProxyConfigurationKind = "ComputedProxyConfiguration" ComputedRoutesKind = "ComputedRoutes" DestinationPolicyKind = "DestinationPolicy" @@ -39,6 +40,12 @@ var ( Kind: ComputedExplicitDestinationsKind, } + ComputedImplicitDestinationsType = &pbresource.Type{ + Group: GroupName, + GroupVersion: Version, + Kind: ComputedImplicitDestinationsKind, + } + ComputedProxyConfigurationType = &pbresource.Type{ Group: GroupName, GroupVersion: Version,