mirror of https://github.com/status-im/consul.git
mesh: create new routes-controller to reconcile xRoute types into a ComputedRoutes resource (#18460)
This new controller produces an intermediate output (ComputedRoutes) that is meant to summarize all relevant xRoutes and related mesh configuration in an easier-to-use format for downstream use to construct the ProxyStateTemplate. It also applies status updates to the xRoute resource types to indicate that they are themselves semantically valid inputs.
This commit is contained in:
parent
4724a4e169
commit
89e6725eee
2
go.mod
2
go.mod
|
@ -88,6 +88,7 @@ require (
|
||||||
github.com/mitchellh/pointerstructure v1.2.1
|
github.com/mitchellh/pointerstructure v1.2.1
|
||||||
github.com/mitchellh/reflectwalk v1.0.2
|
github.com/mitchellh/reflectwalk v1.0.2
|
||||||
github.com/natefinch/npipe v0.0.0-20160621034901-c1b8fa8bdcce
|
github.com/natefinch/npipe v0.0.0-20160621034901-c1b8fa8bdcce
|
||||||
|
github.com/oklog/ulid v1.3.1
|
||||||
github.com/oklog/ulid/v2 v2.1.0
|
github.com/oklog/ulid/v2 v2.1.0
|
||||||
github.com/olekukonko/tablewriter v0.0.4
|
github.com/olekukonko/tablewriter v0.0.4
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
|
@ -219,7 +220,6 @@ require (
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 // indirect
|
github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 // indirect
|
||||||
github.com/oklog/run v1.0.0 // indirect
|
github.com/oklog/run v1.0.0 // indirect
|
||||||
github.com/oklog/ulid v1.3.1 // indirect
|
|
||||||
github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect
|
github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect
|
||||||
github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c // indirect
|
github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c // indirect
|
||||||
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
|
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
|
||||||
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package controller
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MakeRequests accepts a list of pbresource.ID and pbresource.Reference items,
|
||||||
|
// and mirrors them into a slice of []controller.Request items where the Type
|
||||||
|
// of of the items has replaced by 'typ'.
|
||||||
|
func MakeRequests[V resource.ReferenceOrID](
|
||||||
|
typ *pbresource.Type,
|
||||||
|
refs []V,
|
||||||
|
) []Request {
|
||||||
|
if len(refs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]Request, 0, len(refs))
|
||||||
|
for _, ref := range refs {
|
||||||
|
out = append(out, Request{
|
||||||
|
ID: &pbresource.ID{
|
||||||
|
Type: typ,
|
||||||
|
Tenancy: ref.GetTenancy(),
|
||||||
|
Name: ref.GetName(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
|
@ -0,0 +1,77 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package controller
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
"github.com/hashicorp/consul/proto/private/prototest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMakeRequests(t *testing.T) {
|
||||||
|
redType := &pbresource.Type{
|
||||||
|
Group: "colors",
|
||||||
|
GroupVersion: "vfake",
|
||||||
|
Kind: "red",
|
||||||
|
}
|
||||||
|
blueType := &pbresource.Type{
|
||||||
|
Group: "colors",
|
||||||
|
GroupVersion: "vfake",
|
||||||
|
Kind: "blue",
|
||||||
|
}
|
||||||
|
|
||||||
|
casparID := &pbresource.ID{
|
||||||
|
Type: redType,
|
||||||
|
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||||
|
Name: "caspar",
|
||||||
|
Uid: "ignored",
|
||||||
|
}
|
||||||
|
babypantsID := &pbresource.ID{
|
||||||
|
Type: redType,
|
||||||
|
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||||
|
Name: "babypants",
|
||||||
|
Uid: "ignored",
|
||||||
|
}
|
||||||
|
zimRef := &pbresource.Reference{
|
||||||
|
Type: redType,
|
||||||
|
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||||
|
Name: "zim",
|
||||||
|
Section: "ignored",
|
||||||
|
}
|
||||||
|
girRef := &pbresource.Reference{
|
||||||
|
Type: redType,
|
||||||
|
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||||
|
Name: "gir",
|
||||||
|
Section: "ignored",
|
||||||
|
}
|
||||||
|
|
||||||
|
newBlueReq := func(name string) Request {
|
||||||
|
return Request{
|
||||||
|
ID: &pbresource.ID{
|
||||||
|
Type: blueType,
|
||||||
|
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Nil(t, MakeRequests[*pbresource.ID](blueType, nil))
|
||||||
|
require.Nil(t, MakeRequests[*pbresource.Reference](blueType, nil))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []Request{
|
||||||
|
newBlueReq("caspar"), newBlueReq("babypants"),
|
||||||
|
}, MakeRequests[*pbresource.ID](blueType, []*pbresource.ID{
|
||||||
|
casparID, babypantsID,
|
||||||
|
}))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []Request{
|
||||||
|
newBlueReq("gir"), newBlueReq("zim"),
|
||||||
|
}, MakeRequests[*pbresource.Reference](blueType, []*pbresource.Reference{
|
||||||
|
girRef, zimRef,
|
||||||
|
}))
|
||||||
|
}
|
|
@ -6,6 +6,7 @@ package mesh
|
||||||
import (
|
import (
|
||||||
"github.com/hashicorp/consul/internal/controller"
|
"github.com/hashicorp/consul/internal/controller"
|
||||||
"github.com/hashicorp/consul/internal/mesh/internal/controllers"
|
"github.com/hashicorp/consul/internal/mesh/internal/controllers"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes"
|
||||||
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy"
|
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy"
|
||||||
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status"
|
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy/status"
|
||||||
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||||
|
@ -46,15 +47,16 @@ var (
|
||||||
|
|
||||||
// Resource Types for the latest version.
|
// Resource Types for the latest version.
|
||||||
|
|
||||||
ProxyConfigurationType = types.ProxyConfigurationType
|
ProxyConfigurationType = types.ProxyConfigurationType
|
||||||
UpstreamsType = types.UpstreamsType
|
UpstreamsType = types.UpstreamsType
|
||||||
UpstreamsConfigurationType = types.UpstreamsConfigurationType
|
UpstreamsConfigurationType = types.UpstreamsConfigurationType
|
||||||
ProxyStateTemplateType = types.ProxyStateTemplateType
|
ProxyStateTemplateType = types.ProxyStateTemplateType
|
||||||
HTTPRouteType = types.HTTPRouteType
|
ProxyStateTemplateConfigurationType = types.ProxyStateTemplateType
|
||||||
GRPCRouteType = types.GRPCRouteType
|
HTTPRouteType = types.HTTPRouteType
|
||||||
TCPRouteType = types.TCPRouteType
|
GRPCRouteType = types.GRPCRouteType
|
||||||
DestinationPolicyType = types.DestinationPolicyType
|
TCPRouteType = types.TCPRouteType
|
||||||
ComputedRoutesType = types.ComputedRoutesType
|
DestinationPolicyType = types.DestinationPolicyType
|
||||||
|
ComputedRoutesType = types.ComputedRoutesType
|
||||||
|
|
||||||
// Controller statuses.
|
// Controller statuses.
|
||||||
|
|
||||||
|
@ -67,6 +69,25 @@ var (
|
||||||
SidecarProxyStatusReasonDestinationServiceFound = status.StatusReasonDestinationServiceFound
|
SidecarProxyStatusReasonDestinationServiceFound = status.StatusReasonDestinationServiceFound
|
||||||
SidecarProxyStatusReasonMeshProtocolDestinationPort = status.StatusReasonMeshProtocolDestinationPort
|
SidecarProxyStatusReasonMeshProtocolDestinationPort = status.StatusReasonMeshProtocolDestinationPort
|
||||||
SidecarProxyStatusReasonNonMeshProtocolDestinationPort = status.StatusReasonNonMeshProtocolDestinationPort
|
SidecarProxyStatusReasonNonMeshProtocolDestinationPort = status.StatusReasonNonMeshProtocolDestinationPort
|
||||||
|
|
||||||
|
// Routes controller
|
||||||
|
RoutesStatusKey = routes.StatusKey
|
||||||
|
RoutesStatusConditionAccepted = routes.StatusConditionAccepted
|
||||||
|
RoutesStatusConditionAcceptedMissingParentRefReason = routes.MissingParentRefReason
|
||||||
|
RoutesStatusConditionAcceptedMissingBackendRefReason = routes.MissingBackendRefReason
|
||||||
|
RoutesStatusConditionAcceptedParentRefOutsideMeshReason = routes.ParentRefOutsideMeshReason
|
||||||
|
RoutesStatusConditionAcceptedBackendRefOutsideMeshReason = routes.BackendRefOutsideMeshReason
|
||||||
|
RoutesStatusConditionAcceptedParentRefUsingMeshPortReason = routes.ParentRefUsingMeshPortReason
|
||||||
|
RoutesStatusConditionAcceptedBackendRefUsingMeshPortReason = routes.BackendRefUsingMeshPortReason
|
||||||
|
RoutesStatusConditionAcceptedUnknownParentRefPortReason = routes.UnknownParentRefPortReason
|
||||||
|
RoutesStatusConditionAcceptedUnknownBackendRefPortReason = routes.UnknownBackendRefPortReason
|
||||||
|
RoutesStatusConditionAcceptedConflictNotBoundToParentRefReason = routes.ConflictNotBoundToParentRefReason
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Important constants
|
||||||
|
|
||||||
|
NullRouteBackend = types.NullRouteBackend
|
||||||
)
|
)
|
||||||
|
|
||||||
// RegisterTypes adds all resource types within the "mesh" API group
|
// RegisterTypes adds all resource types within the "mesh" API group
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"github.com/hashicorp/consul/internal/catalog"
|
"github.com/hashicorp/consul/internal/catalog"
|
||||||
"github.com/hashicorp/consul/internal/controller"
|
"github.com/hashicorp/consul/internal/controller"
|
||||||
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
|
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes"
|
||||||
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy"
|
"github.com/hashicorp/consul/internal/mesh/internal/controllers/sidecarproxy"
|
||||||
"github.com/hashicorp/consul/internal/mesh/internal/controllers/xds"
|
"github.com/hashicorp/consul/internal/mesh/internal/controllers/xds"
|
||||||
"github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper"
|
"github.com/hashicorp/consul/internal/mesh/internal/mappers/sidecarproxymapper"
|
||||||
|
@ -31,4 +32,6 @@ func Register(mgr *controller.Manager, deps Dependencies) {
|
||||||
mgr.Register(
|
mgr.Register(
|
||||||
sidecarproxy.Controller(destinationsCache, proxyCfgCache, m, deps.TrustDomainFetcher, deps.LocalDatacenter),
|
sidecarproxy.Controller(destinationsCache, proxyCfgCache, m, deps.TrustDomainFetcher, deps.LocalDatacenter),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
mgr.Register(routes.Controller())
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,178 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package routes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/catalog"
|
||||||
|
"github.com/hashicorp/consul/internal/controller"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/loader"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/xroutemapper"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Controller() controller.Controller {
|
||||||
|
mapper := xroutemapper.New()
|
||||||
|
|
||||||
|
r := &routesReconciler{
|
||||||
|
mapper: mapper,
|
||||||
|
}
|
||||||
|
return controller.ForType(types.ComputedRoutesType).
|
||||||
|
WithWatch(types.HTTPRouteType, mapper.MapHTTPRoute).
|
||||||
|
WithWatch(types.GRPCRouteType, mapper.MapGRPCRoute).
|
||||||
|
WithWatch(types.TCPRouteType, mapper.MapTCPRoute).
|
||||||
|
WithWatch(types.DestinationPolicyType, mapper.MapDestinationPolicy).
|
||||||
|
WithWatch(catalog.FailoverPolicyType, mapper.MapFailoverPolicy).
|
||||||
|
WithWatch(catalog.ServiceType, mapper.MapService).
|
||||||
|
WithReconciler(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
type routesReconciler struct {
|
||||||
|
mapper *xroutemapper.Mapper
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *routesReconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error {
|
||||||
|
// Notably don't inject the resource-id here into the logger, since we have
|
||||||
|
// to do a fan-out to multiple resources due to xRoutes having multiple
|
||||||
|
// parent refs.
|
||||||
|
rt.Logger = rt.Logger.With("controller", StatusKey)
|
||||||
|
|
||||||
|
rt.Logger.Trace("reconciling computed routes")
|
||||||
|
|
||||||
|
loggerFor := func(id *pbresource.ID) hclog.Logger {
|
||||||
|
return rt.Logger.With("resource-id", id)
|
||||||
|
}
|
||||||
|
related, err := loader.LoadResourcesForComputedRoutes(ctx, loggerFor, rt.Client, r.mapper, req.ID)
|
||||||
|
if err != nil {
|
||||||
|
rt.Logger.Error("error loading relevant resources", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
pending := make(PendingStatuses)
|
||||||
|
|
||||||
|
ValidateXRouteReferences(related, pending)
|
||||||
|
|
||||||
|
generatedResults := GenerateComputedRoutes(related, pending)
|
||||||
|
|
||||||
|
if err := UpdatePendingStatuses(ctx, rt, pending); err != nil {
|
||||||
|
rt.Logger.Error("error updating statuses for affected relevant resources", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, result := range generatedResults {
|
||||||
|
computedRoutesID := result.ID
|
||||||
|
|
||||||
|
logger := rt.Logger.With("resource-id", computedRoutesID)
|
||||||
|
|
||||||
|
prev, err := resource.GetDecodedResource[*pbmesh.ComputedRoutes](ctx, rt.Client, computedRoutesID)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("error loading previous computed routes", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ensureComputedRoutesIsSynced(ctx, logger, rt.Client, result, prev); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureComputedRoutesIsSynced(
|
||||||
|
ctx context.Context,
|
||||||
|
logger hclog.Logger,
|
||||||
|
client pbresource.ResourceServiceClient,
|
||||||
|
result *ComputedRoutesResult,
|
||||||
|
prev *types.DecodedComputedRoutes,
|
||||||
|
) error {
|
||||||
|
if result.Data == nil {
|
||||||
|
return deleteComputedRoutes(ctx, logger, client, prev)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upsert the resource if changed.
|
||||||
|
if prev != nil {
|
||||||
|
if proto.Equal(prev.Data, result.Data) {
|
||||||
|
return nil // no change
|
||||||
|
}
|
||||||
|
result.ID = prev.Resource.Id
|
||||||
|
}
|
||||||
|
|
||||||
|
return upsertComputedRoutes(ctx, logger, client, result.ID, result.OwnerID, result.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func upsertComputedRoutes(
|
||||||
|
ctx context.Context,
|
||||||
|
logger hclog.Logger,
|
||||||
|
client pbresource.ResourceServiceClient,
|
||||||
|
id *pbresource.ID,
|
||||||
|
ownerID *pbresource.ID,
|
||||||
|
data *pbmesh.ComputedRoutes,
|
||||||
|
) error {
|
||||||
|
mcData, err := anypb.New(data)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("error marshalling new computed routes payload", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now perform the write. The computed routes resource should be owned
|
||||||
|
// by the service so that it will automatically be deleted upon service
|
||||||
|
// deletion.
|
||||||
|
|
||||||
|
_, err = client.Write(ctx, &pbresource.WriteRequest{
|
||||||
|
Resource: &pbresource.Resource{
|
||||||
|
Id: id,
|
||||||
|
Owner: ownerID,
|
||||||
|
Data: mcData,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("error writing computed routes", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Trace("updated computed routes resource was successfully written")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteComputedRoutes(
|
||||||
|
ctx context.Context,
|
||||||
|
logger hclog.Logger,
|
||||||
|
client pbresource.ResourceServiceClient,
|
||||||
|
prev *types.DecodedComputedRoutes,
|
||||||
|
) error {
|
||||||
|
if prev == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The service the computed routes controls no longer participates in the
|
||||||
|
// mesh at all.
|
||||||
|
|
||||||
|
logger.Trace("removing previous computed routes")
|
||||||
|
|
||||||
|
// This performs a CAS deletion.
|
||||||
|
_, err := client.Delete(ctx, &pbresource.DeleteRequest{
|
||||||
|
Id: prev.Resource.Id,
|
||||||
|
Version: prev.Resource.Version,
|
||||||
|
})
|
||||||
|
// Potentially we could look for CAS failures by checking if the gRPC
|
||||||
|
// status code is Aborted. However its an edge case and there could
|
||||||
|
// possibly be other reasons why the gRPC status code would be aborted
|
||||||
|
// besides CAS version mismatches. The simplest thing to do is to just
|
||||||
|
// propagate the error and retry reconciliation later.
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("error deleting previous computed routes resource", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,602 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package routes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/catalog"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/loader"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||||
|
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GenerateComputedRoutes walks a set of related resources and assembles them
|
||||||
|
// into one effective easy-to-consume ComputedRoutes result.
|
||||||
|
//
|
||||||
|
// Any status conditions generated during traversal can be queued for
|
||||||
|
// persistence using the PendingStatuses map.
|
||||||
|
//
|
||||||
|
// This should not internally generate, nor return any errors.
|
||||||
|
func GenerateComputedRoutes(
|
||||||
|
related *loader.RelatedResources,
|
||||||
|
pending PendingStatuses,
|
||||||
|
) []*ComputedRoutesResult {
|
||||||
|
out := make([]*ComputedRoutesResult, 0, len(related.ComputedRoutesList))
|
||||||
|
for _, computedRoutesID := range related.ComputedRoutesList {
|
||||||
|
out = append(out, compile(related, computedRoutesID, pending))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
type ComputedRoutesResult struct {
|
||||||
|
// ID is always required.
|
||||||
|
ID *pbresource.ID
|
||||||
|
// OwnerID is only required on upserts.
|
||||||
|
OwnerID *pbresource.ID
|
||||||
|
// Data being empty means delete if exists.
|
||||||
|
Data *pbmesh.ComputedRoutes
|
||||||
|
}
|
||||||
|
|
||||||
|
func compile(
|
||||||
|
related *loader.RelatedResources,
|
||||||
|
computedRoutesID *pbresource.ID,
|
||||||
|
pending PendingStatuses,
|
||||||
|
) *ComputedRoutesResult {
|
||||||
|
// There is one computed routes resource for the entire service (perfect name alignment).
|
||||||
|
//
|
||||||
|
// All ports are embedded within.
|
||||||
|
|
||||||
|
parentServiceID := &pbresource.ID{
|
||||||
|
Type: catalog.ServiceType,
|
||||||
|
Tenancy: computedRoutesID.Tenancy,
|
||||||
|
Name: computedRoutesID.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
parentServiceRef := resource.Reference(parentServiceID, "")
|
||||||
|
|
||||||
|
parentServiceDec := related.GetService(parentServiceID)
|
||||||
|
if parentServiceDec == nil {
|
||||||
|
return &ComputedRoutesResult{
|
||||||
|
ID: computedRoutesID,
|
||||||
|
Data: nil, // returning nil signals a delete is requested
|
||||||
|
}
|
||||||
|
}
|
||||||
|
parentServiceID = parentServiceDec.Resource.Id // get ULID out of it
|
||||||
|
|
||||||
|
var (
|
||||||
|
inMesh = false
|
||||||
|
allowedPortProtocols = make(map[string]pbcatalog.Protocol)
|
||||||
|
)
|
||||||
|
for _, port := range parentServiceDec.Data.Ports {
|
||||||
|
if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
|
||||||
|
inMesh = true
|
||||||
|
continue // skip
|
||||||
|
}
|
||||||
|
allowedPortProtocols[port.TargetPort] = port.Protocol
|
||||||
|
}
|
||||||
|
|
||||||
|
if !inMesh {
|
||||||
|
return &ComputedRoutesResult{
|
||||||
|
ID: computedRoutesID,
|
||||||
|
Data: nil, // returning nil signals a delete is requested
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
computedRoutes := &pbmesh.ComputedRoutes{
|
||||||
|
PortedConfigs: make(map[string]*pbmesh.ComputedPortRoutes),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Visit all of the routes relevant to this computed routes.
|
||||||
|
routeNodesByPort := make(map[string][]*inputRouteNode)
|
||||||
|
related.WalkRoutesForParentRef(parentServiceRef, func(
|
||||||
|
rk resource.ReferenceKey,
|
||||||
|
res *pbresource.Resource,
|
||||||
|
xroute types.XRouteData,
|
||||||
|
) {
|
||||||
|
var (
|
||||||
|
ports []string
|
||||||
|
wildcardedPort bool
|
||||||
|
)
|
||||||
|
for _, ref := range xroute.GetParentRefs() {
|
||||||
|
if resource.ReferenceOrIDMatch(ref.Ref, parentServiceRef) {
|
||||||
|
if ref.Port == "" {
|
||||||
|
wildcardedPort = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if _, ok := allowedPortProtocols[ref.Port]; ok {
|
||||||
|
ports = append(ports, ref.Port)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do a port explosion.
|
||||||
|
if wildcardedPort {
|
||||||
|
ports = nil
|
||||||
|
for port := range allowedPortProtocols {
|
||||||
|
ports = append(ports, port)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ports) == 0 {
|
||||||
|
return // not relevant to this computed routes
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, port := range ports {
|
||||||
|
if port == "" {
|
||||||
|
panic("impossible to have an empty port here")
|
||||||
|
}
|
||||||
|
|
||||||
|
var node *inputRouteNode
|
||||||
|
switch route := xroute.(type) {
|
||||||
|
case *pbmesh.HTTPRoute:
|
||||||
|
node = compileHTTPRouteNode(port, res, route, related)
|
||||||
|
case *pbmesh.GRPCRoute:
|
||||||
|
node = compileGRPCRouteNode(port, res, route, related)
|
||||||
|
case *pbmesh.TCPRoute:
|
||||||
|
node = compileTCPRouteNode(port, res, route, related)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unexpected xroute type: %T", xroute))
|
||||||
|
}
|
||||||
|
|
||||||
|
routeNodesByPort[node.ParentPort] = append(routeNodesByPort[node.ParentPort], node)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Fill in defaults where there was no xroute defined at all.
|
||||||
|
for port, protocol := range allowedPortProtocols {
|
||||||
|
if _, ok := routeNodesByPort[port]; !ok {
|
||||||
|
var typ *pbresource.Type
|
||||||
|
|
||||||
|
// enumcover:pbcatalog.Protocol
|
||||||
|
switch protocol {
|
||||||
|
case pbcatalog.Protocol_PROTOCOL_HTTP2:
|
||||||
|
typ = types.HTTPRouteType
|
||||||
|
case pbcatalog.Protocol_PROTOCOL_HTTP:
|
||||||
|
typ = types.HTTPRouteType
|
||||||
|
case pbcatalog.Protocol_PROTOCOL_GRPC:
|
||||||
|
typ = types.GRPCRouteType
|
||||||
|
case pbcatalog.Protocol_PROTOCOL_TCP:
|
||||||
|
typ = types.TCPRouteType
|
||||||
|
case pbcatalog.Protocol_PROTOCOL_MESH:
|
||||||
|
fallthrough // to default
|
||||||
|
case pbcatalog.Protocol_PROTOCOL_UNSPECIFIED:
|
||||||
|
fallthrough // to default
|
||||||
|
default:
|
||||||
|
continue // not possible
|
||||||
|
}
|
||||||
|
|
||||||
|
routeNode := createDefaultRouteNode(parentServiceRef, port, typ)
|
||||||
|
|
||||||
|
routeNodesByPort[port] = append(routeNodesByPort[port], routeNode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// First sort the input routes by the final criteria, so we can let the
|
||||||
|
// stable sort take care of the ultimate tiebreakers.
|
||||||
|
for port, routeNodes := range routeNodesByPort {
|
||||||
|
gammaInitialSortWrappedRoutes(routeNodes)
|
||||||
|
|
||||||
|
// Now that they are sorted by age and name, we can figure out which
|
||||||
|
// xRoute should apply to each port (the first).
|
||||||
|
var top *inputRouteNode
|
||||||
|
for i, routeNode := range routeNodes {
|
||||||
|
if i == 0 {
|
||||||
|
top = routeNode
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if top.RouteType != routeNode.RouteType {
|
||||||
|
// This should only happen with user-provided ones, since we
|
||||||
|
// would never have two synthetic default routes at once.
|
||||||
|
res := routeNode.OriginalResource
|
||||||
|
if res != nil {
|
||||||
|
pending.AddConditions(resource.NewReferenceKey(res.Id), res, []*pbresource.Condition{
|
||||||
|
ConditionConflictNotBoundToParentRef(
|
||||||
|
parentServiceRef,
|
||||||
|
port,
|
||||||
|
top.RouteType,
|
||||||
|
),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
top.AppendRulesFrom(routeNode)
|
||||||
|
top.AddTargetsFrom(routeNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear this field as it's no longer used and doesn't make sense once
|
||||||
|
// this represents multiple xRoutes or defaults.
|
||||||
|
top.OriginalResource = nil
|
||||||
|
|
||||||
|
// Now we can do the big sort.
|
||||||
|
gammaSortRouteRules(top)
|
||||||
|
|
||||||
|
// Inject catch-all rules to ensure stray requests will explicitly be blackholed.
|
||||||
|
if !top.Default {
|
||||||
|
if !types.IsTCPRouteType(top.RouteType) {
|
||||||
|
// There are no match criteria on a TCPRoute, so never a need
|
||||||
|
// to add a catch-all.
|
||||||
|
appendDefaultRouteNode(top, types.NullRouteBackend)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mc := &pbmesh.ComputedPortRoutes{
|
||||||
|
UsingDefaultConfig: top.Default,
|
||||||
|
Targets: top.NewTargets,
|
||||||
|
}
|
||||||
|
parentRef := &pbmesh.ParentReference{
|
||||||
|
Ref: parentServiceRef,
|
||||||
|
Port: port,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case resource.EqualType(top.RouteType, types.HTTPRouteType):
|
||||||
|
mc.Config = &pbmesh.ComputedPortRoutes_Http{
|
||||||
|
Http: &pbmesh.ComputedHTTPRoute{
|
||||||
|
ParentRef: parentRef,
|
||||||
|
Rules: top.HTTPRules,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case resource.EqualType(top.RouteType, types.GRPCRouteType):
|
||||||
|
mc.Config = &pbmesh.ComputedPortRoutes_Grpc{
|
||||||
|
Grpc: &pbmesh.ComputedGRPCRoute{
|
||||||
|
ParentRef: parentRef,
|
||||||
|
Rules: top.GRPCRules,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
case resource.EqualType(top.RouteType, types.TCPRouteType):
|
||||||
|
mc.Config = &pbmesh.ComputedPortRoutes_Tcp{
|
||||||
|
Tcp: &pbmesh.ComputedTCPRoute{
|
||||||
|
ParentRef: parentRef,
|
||||||
|
Rules: top.TCPRules,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic("impossible")
|
||||||
|
}
|
||||||
|
|
||||||
|
computedRoutes.PortedConfigs[port] = mc
|
||||||
|
|
||||||
|
for _, details := range mc.Targets {
|
||||||
|
svcRef := details.BackendRef.Ref
|
||||||
|
|
||||||
|
svc := related.GetService(svcRef)
|
||||||
|
failoverPolicy := related.GetFailoverPolicyForService(svcRef)
|
||||||
|
destConfig := related.GetDestinationPolicyForService(svcRef)
|
||||||
|
|
||||||
|
if svc == nil {
|
||||||
|
panic("impossible at this point; should already have been handled before getting here")
|
||||||
|
}
|
||||||
|
details.Service = svc.Data
|
||||||
|
|
||||||
|
if failoverPolicy != nil {
|
||||||
|
details.FailoverPolicy = catalog.SimplifyFailoverPolicy(
|
||||||
|
svc.Data,
|
||||||
|
failoverPolicy.Data,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if destConfig != nil {
|
||||||
|
details.DestinationPolicy = destConfig.Data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
computedRoutes.PortedConfigs[port] = mc
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ComputedRoutesResult{
|
||||||
|
ID: computedRoutesID,
|
||||||
|
OwnerID: parentServiceID,
|
||||||
|
Data: computedRoutes,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func compileHTTPRouteNode(
|
||||||
|
port string,
|
||||||
|
res *pbresource.Resource,
|
||||||
|
route *pbmesh.HTTPRoute,
|
||||||
|
serviceGetter serviceGetter,
|
||||||
|
) *inputRouteNode {
|
||||||
|
route = protoClone(route)
|
||||||
|
node := newInputRouteNode(port)
|
||||||
|
|
||||||
|
node.RouteType = types.HTTPRouteType
|
||||||
|
node.OriginalResource = res
|
||||||
|
node.HTTPRules = make([]*pbmesh.ComputedHTTPRouteRule, 0, len(route.Rules))
|
||||||
|
for _, rule := range route.Rules {
|
||||||
|
irule := &pbmesh.ComputedHTTPRouteRule{
|
||||||
|
Matches: protoSliceClone(rule.Matches),
|
||||||
|
Filters: protoSliceClone(rule.Filters),
|
||||||
|
BackendRefs: make([]*pbmesh.ComputedHTTPBackendRef, 0, len(rule.BackendRefs)),
|
||||||
|
Timeouts: rule.Timeouts,
|
||||||
|
Retries: rule.Retries,
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1beta1.HTTPRouteRule
|
||||||
|
//
|
||||||
|
// If no matches are specified, the default is a prefix path match
|
||||||
|
// on “/”, which has the effect of matching every HTTP request.
|
||||||
|
if len(irule.Matches) == 0 {
|
||||||
|
irule.Matches = defaultHTTPRouteMatches()
|
||||||
|
}
|
||||||
|
for _, match := range irule.Matches {
|
||||||
|
if match.Path == nil {
|
||||||
|
// Path specifies a HTTP request path matcher. If this
|
||||||
|
// field is not specified, a default prefix match on
|
||||||
|
// the “/” path is provided.
|
||||||
|
match.Path = defaultHTTPPathMatch()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, backendRef := range rule.BackendRefs {
|
||||||
|
// Infer port name from parent ref.
|
||||||
|
if backendRef.BackendRef.Port == "" {
|
||||||
|
backendRef.BackendRef.Port = port
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
backendTarget string
|
||||||
|
backendSvc = serviceGetter.GetService(backendRef.BackendRef.Ref)
|
||||||
|
)
|
||||||
|
if shouldRouteTrafficToBackend(backendSvc, backendRef.BackendRef) {
|
||||||
|
details := &pbmesh.BackendTargetDetails{
|
||||||
|
BackendRef: backendRef.BackendRef,
|
||||||
|
}
|
||||||
|
backendTarget = node.AddTarget(backendRef.BackendRef, details)
|
||||||
|
} else {
|
||||||
|
backendTarget = types.NullRouteBackend
|
||||||
|
}
|
||||||
|
ibr := &pbmesh.ComputedHTTPBackendRef{
|
||||||
|
BackendTarget: backendTarget,
|
||||||
|
Weight: backendRef.Weight,
|
||||||
|
Filters: backendRef.Filters,
|
||||||
|
}
|
||||||
|
irule.BackendRefs = append(irule.BackendRefs, ibr)
|
||||||
|
}
|
||||||
|
|
||||||
|
node.HTTPRules = append(node.HTTPRules, irule)
|
||||||
|
}
|
||||||
|
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
func compileGRPCRouteNode(
|
||||||
|
port string,
|
||||||
|
res *pbresource.Resource,
|
||||||
|
route *pbmesh.GRPCRoute,
|
||||||
|
serviceGetter serviceGetter,
|
||||||
|
) *inputRouteNode {
|
||||||
|
route = protoClone(route)
|
||||||
|
|
||||||
|
node := newInputRouteNode(port)
|
||||||
|
|
||||||
|
node.RouteType = types.GRPCRouteType
|
||||||
|
node.OriginalResource = res
|
||||||
|
node.GRPCRules = make([]*pbmesh.ComputedGRPCRouteRule, 0, len(route.Rules))
|
||||||
|
for _, rule := range route.Rules {
|
||||||
|
irule := &pbmesh.ComputedGRPCRouteRule{
|
||||||
|
Matches: protoSliceClone(rule.Matches),
|
||||||
|
Filters: protoSliceClone(rule.Filters),
|
||||||
|
BackendRefs: make([]*pbmesh.ComputedGRPCBackendRef, 0, len(rule.BackendRefs)),
|
||||||
|
Timeouts: rule.Timeouts,
|
||||||
|
Retries: rule.Retries,
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.GRPCRouteRule
|
||||||
|
//
|
||||||
|
// If no matches are specified, the implementation MUST match every gRPC request.
|
||||||
|
if len(irule.Matches) == 0 {
|
||||||
|
irule.Matches = defaultGRPCRouteMatches()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, backendRef := range rule.BackendRefs {
|
||||||
|
// Infer port name from parent ref.
|
||||||
|
if backendRef.BackendRef.Port == "" {
|
||||||
|
backendRef.BackendRef.Port = port
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
backendTarget string
|
||||||
|
backendSvc = serviceGetter.GetService(backendRef.BackendRef.Ref)
|
||||||
|
)
|
||||||
|
if shouldRouteTrafficToBackend(backendSvc, backendRef.BackendRef) {
|
||||||
|
details := &pbmesh.BackendTargetDetails{
|
||||||
|
BackendRef: backendRef.BackendRef,
|
||||||
|
}
|
||||||
|
backendTarget = node.AddTarget(backendRef.BackendRef, details)
|
||||||
|
} else {
|
||||||
|
backendTarget = types.NullRouteBackend
|
||||||
|
}
|
||||||
|
|
||||||
|
ibr := &pbmesh.ComputedGRPCBackendRef{
|
||||||
|
BackendTarget: backendTarget,
|
||||||
|
Weight: backendRef.Weight,
|
||||||
|
Filters: backendRef.Filters,
|
||||||
|
}
|
||||||
|
irule.BackendRefs = append(irule.BackendRefs, ibr)
|
||||||
|
}
|
||||||
|
|
||||||
|
node.GRPCRules = append(node.GRPCRules, irule)
|
||||||
|
}
|
||||||
|
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
func compileTCPRouteNode(
|
||||||
|
port string,
|
||||||
|
res *pbresource.Resource,
|
||||||
|
route *pbmesh.TCPRoute,
|
||||||
|
serviceGetter serviceGetter,
|
||||||
|
) *inputRouteNode {
|
||||||
|
route = protoClone(route)
|
||||||
|
|
||||||
|
node := newInputRouteNode(port)
|
||||||
|
|
||||||
|
node.RouteType = types.TCPRouteType
|
||||||
|
node.OriginalResource = res
|
||||||
|
node.TCPRules = make([]*pbmesh.ComputedTCPRouteRule, 0, len(route.Rules))
|
||||||
|
for _, rule := range route.Rules {
|
||||||
|
irule := &pbmesh.ComputedTCPRouteRule{
|
||||||
|
BackendRefs: make([]*pbmesh.ComputedTCPBackendRef, 0, len(rule.BackendRefs)),
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io/v1alpha2.TCPRoute
|
||||||
|
|
||||||
|
for _, backendRef := range rule.BackendRefs {
|
||||||
|
// Infer port name from parent ref.
|
||||||
|
if backendRef.BackendRef.Port == "" {
|
||||||
|
backendRef.BackendRef.Port = port
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
backendTarget string
|
||||||
|
backendSvc = serviceGetter.GetService(backendRef.BackendRef.Ref)
|
||||||
|
)
|
||||||
|
if shouldRouteTrafficToBackend(backendSvc, backendRef.BackendRef) {
|
||||||
|
details := &pbmesh.BackendTargetDetails{
|
||||||
|
BackendRef: backendRef.BackendRef,
|
||||||
|
}
|
||||||
|
backendTarget = node.AddTarget(backendRef.BackendRef, details)
|
||||||
|
} else {
|
||||||
|
backendTarget = types.NullRouteBackend
|
||||||
|
}
|
||||||
|
|
||||||
|
ibr := &pbmesh.ComputedTCPBackendRef{
|
||||||
|
BackendTarget: backendTarget,
|
||||||
|
Weight: backendRef.Weight,
|
||||||
|
}
|
||||||
|
irule.BackendRefs = append(irule.BackendRefs, ibr)
|
||||||
|
}
|
||||||
|
|
||||||
|
node.TCPRules = append(node.TCPRules, irule)
|
||||||
|
}
|
||||||
|
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldRouteTrafficToBackend(backendSvc *types.DecodedService, backendRef *pbmesh.BackendReference) bool {
|
||||||
|
if backendSvc == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
found = false
|
||||||
|
inMesh = false
|
||||||
|
)
|
||||||
|
for _, port := range backendSvc.Data.Ports {
|
||||||
|
if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
|
||||||
|
inMesh = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if port.TargetPort == backendRef.Port {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return inMesh && found
|
||||||
|
}
|
||||||
|
|
||||||
|
func createDefaultRouteNode(
|
||||||
|
parentServiceRef *pbresource.Reference,
|
||||||
|
port string,
|
||||||
|
typ *pbresource.Type,
|
||||||
|
) *inputRouteNode {
|
||||||
|
// always add the parent as a possible target due to catch-all
|
||||||
|
defaultBackendRef := &pbmesh.BackendReference{
|
||||||
|
Ref: parentServiceRef,
|
||||||
|
Port: port,
|
||||||
|
}
|
||||||
|
|
||||||
|
routeNode := newInputRouteNode(port)
|
||||||
|
|
||||||
|
defaultBackendTarget := routeNode.AddTarget(defaultBackendRef, &pbmesh.BackendTargetDetails{
|
||||||
|
BackendRef: defaultBackendRef,
|
||||||
|
})
|
||||||
|
switch {
|
||||||
|
case resource.EqualType(types.HTTPRouteType, typ):
|
||||||
|
routeNode.RouteType = types.HTTPRouteType
|
||||||
|
appendDefaultHTTPRouteRule(routeNode, defaultBackendTarget)
|
||||||
|
case resource.EqualType(types.GRPCRouteType, typ):
|
||||||
|
routeNode.RouteType = types.GRPCRouteType
|
||||||
|
appendDefaultGRPCRouteRule(routeNode, defaultBackendTarget)
|
||||||
|
case resource.EqualType(types.TCPRouteType, typ):
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
routeNode.RouteType = types.TCPRouteType
|
||||||
|
appendDefaultTCPRouteRule(routeNode, defaultBackendTarget)
|
||||||
|
}
|
||||||
|
|
||||||
|
routeNode.Default = true
|
||||||
|
return routeNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendDefaultRouteNode(
|
||||||
|
routeNode *inputRouteNode,
|
||||||
|
defaultBackendTarget string,
|
||||||
|
) {
|
||||||
|
switch {
|
||||||
|
case resource.EqualType(types.HTTPRouteType, routeNode.RouteType):
|
||||||
|
appendDefaultHTTPRouteRule(routeNode, defaultBackendTarget)
|
||||||
|
case resource.EqualType(types.GRPCRouteType, routeNode.RouteType):
|
||||||
|
appendDefaultGRPCRouteRule(routeNode, defaultBackendTarget)
|
||||||
|
case resource.EqualType(types.TCPRouteType, routeNode.RouteType):
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
appendDefaultTCPRouteRule(routeNode, defaultBackendTarget)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendDefaultHTTPRouteRule(
|
||||||
|
routeNode *inputRouteNode,
|
||||||
|
backendTarget string,
|
||||||
|
) {
|
||||||
|
routeNode.HTTPRules = append(routeNode.HTTPRules, &pbmesh.ComputedHTTPRouteRule{
|
||||||
|
Matches: defaultHTTPRouteMatches(),
|
||||||
|
BackendRefs: []*pbmesh.ComputedHTTPBackendRef{{
|
||||||
|
BackendTarget: backendTarget,
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendDefaultGRPCRouteRule(
|
||||||
|
routeNode *inputRouteNode,
|
||||||
|
backendTarget string,
|
||||||
|
) {
|
||||||
|
routeNode.GRPCRules = append(routeNode.GRPCRules, &pbmesh.ComputedGRPCRouteRule{
|
||||||
|
Matches: defaultGRPCRouteMatches(),
|
||||||
|
BackendRefs: []*pbmesh.ComputedGRPCBackendRef{{
|
||||||
|
BackendTarget: backendTarget,
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendDefaultTCPRouteRule(
|
||||||
|
routeNode *inputRouteNode,
|
||||||
|
backendTarget string,
|
||||||
|
) {
|
||||||
|
routeNode.TCPRules = append(routeNode.TCPRules, &pbmesh.ComputedTCPRouteRule{
|
||||||
|
BackendRefs: []*pbmesh.ComputedTCPBackendRef{{
|
||||||
|
BackendTarget: backendTarget,
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultHTTPRouteMatches() []*pbmesh.HTTPRouteMatch {
|
||||||
|
return []*pbmesh.HTTPRouteMatch{{
|
||||||
|
Path: defaultHTTPPathMatch(),
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultHTTPPathMatch() *pbmesh.HTTPPathMatch {
|
||||||
|
return &pbmesh.HTTPPathMatch{
|
||||||
|
Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX,
|
||||||
|
Value: "/",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultGRPCRouteMatches() []*pbmesh.GRPCRouteMatch {
|
||||||
|
return []*pbmesh.GRPCRouteMatch{{}}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,72 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package routes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
// inputRouteNode is a dressed up version of an XRoute meant as working state
|
||||||
|
// for one pass of the compilation procedure.
|
||||||
|
type inputRouteNode struct {
|
||||||
|
ParentPort string // always set
|
||||||
|
|
||||||
|
RouteType *pbresource.Type
|
||||||
|
Default bool
|
||||||
|
|
||||||
|
// only one of these can be set to non-empty
|
||||||
|
HTTPRules []*pbmesh.ComputedHTTPRouteRule
|
||||||
|
GRPCRules []*pbmesh.ComputedGRPCRouteRule
|
||||||
|
TCPRules []*pbmesh.ComputedTCPRouteRule
|
||||||
|
|
||||||
|
NewTargets map[string]*pbmesh.BackendTargetDetails
|
||||||
|
|
||||||
|
// This field is non-nil for nodes based on a single xRoute, and nil for
|
||||||
|
// composite nodes or default nodes.
|
||||||
|
OriginalResource *pbresource.Resource
|
||||||
|
}
|
||||||
|
|
||||||
|
func newInputRouteNode(port string) *inputRouteNode {
|
||||||
|
return &inputRouteNode{
|
||||||
|
ParentPort: port,
|
||||||
|
NewTargets: make(map[string]*pbmesh.BackendTargetDetails),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *inputRouteNode) AddTarget(backendRef *pbmesh.BackendReference, data *pbmesh.BackendTargetDetails) string {
|
||||||
|
n.Default = false
|
||||||
|
key := types.BackendRefToComputedRoutesTarget(backendRef)
|
||||||
|
|
||||||
|
if _, ok := n.NewTargets[key]; !ok {
|
||||||
|
n.NewTargets[key] = data
|
||||||
|
}
|
||||||
|
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *inputRouteNode) AddTargetsFrom(next *inputRouteNode) {
|
||||||
|
n.Default = false
|
||||||
|
for key, details := range next.NewTargets {
|
||||||
|
if _, ok := n.NewTargets[key]; !ok {
|
||||||
|
n.NewTargets[key] = details // add if not already there
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *inputRouteNode) AppendRulesFrom(next *inputRouteNode) {
|
||||||
|
n.Default = false
|
||||||
|
switch {
|
||||||
|
case resource.EqualType(n.RouteType, types.HTTPRouteType):
|
||||||
|
n.HTTPRules = append(n.HTTPRules, next.HTTPRules...)
|
||||||
|
case resource.EqualType(n.RouteType, types.GRPCRouteType):
|
||||||
|
n.GRPCRules = append(n.GRPCRules, next.GRPCRules...)
|
||||||
|
case resource.EqualType(n.RouteType, types.TCPRouteType):
|
||||||
|
n.TCPRules = append(n.TCPRules, next.TCPRules...)
|
||||||
|
default:
|
||||||
|
panic("impossible")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,307 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package loader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/catalog"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/xroutemapper"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
type loader struct {
|
||||||
|
mapper *xroutemapper.Mapper
|
||||||
|
|
||||||
|
mem *memoizingLoader
|
||||||
|
|
||||||
|
// output var
|
||||||
|
out *RelatedResources
|
||||||
|
|
||||||
|
// working state
|
||||||
|
mcToLoad map[resource.ReferenceKey]struct{}
|
||||||
|
mcDone map[resource.ReferenceKey]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadResourcesForComputedRoutes(
|
||||||
|
ctx context.Context,
|
||||||
|
loggerFor func(*pbresource.ID) hclog.Logger,
|
||||||
|
client pbresource.ResourceServiceClient,
|
||||||
|
mapper *xroutemapper.Mapper,
|
||||||
|
computedRoutesID *pbresource.ID,
|
||||||
|
) (*RelatedResources, error) {
|
||||||
|
if loggerFor == nil {
|
||||||
|
loggerFor = func(_ *pbresource.ID) hclog.Logger {
|
||||||
|
return hclog.NewNullLogger()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
loader := &loader{
|
||||||
|
mapper: mapper,
|
||||||
|
mem: newMemoizingLoader(client),
|
||||||
|
mcToLoad: make(map[resource.ReferenceKey]struct{}),
|
||||||
|
mcDone: make(map[resource.ReferenceKey]struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := loader.load(ctx, loggerFor, computedRoutesID); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return loader.out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loader) requestLoad(computedRoutesID *pbresource.ID) {
|
||||||
|
if !resource.EqualType(computedRoutesID.Type, types.ComputedRoutesType) {
|
||||||
|
panic("input must be a ComputedRoutes type")
|
||||||
|
}
|
||||||
|
rk := resource.NewReferenceKey(computedRoutesID)
|
||||||
|
|
||||||
|
if _, done := l.mcDone[rk]; done {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.mcToLoad[rk] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loader) markLoaded(computedRoutesID *pbresource.ID) {
|
||||||
|
if !resource.EqualType(computedRoutesID.Type, types.ComputedRoutesType) {
|
||||||
|
panic("input must be a ComputedRoutes type")
|
||||||
|
}
|
||||||
|
rk := resource.NewReferenceKey(computedRoutesID)
|
||||||
|
|
||||||
|
l.mcDone[rk] = struct{}{}
|
||||||
|
delete(l.mcToLoad, rk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loader) nextRequested() *pbresource.ID {
|
||||||
|
for rk := range l.mcToLoad {
|
||||||
|
return rk.ToID()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loader) load(
|
||||||
|
ctx context.Context,
|
||||||
|
loggerFor func(*pbresource.ID) hclog.Logger,
|
||||||
|
computedRoutesID *pbresource.ID,
|
||||||
|
) error {
|
||||||
|
l.out = NewRelatedResources()
|
||||||
|
|
||||||
|
// Seed the graph fetch for our starting position.
|
||||||
|
l.requestLoad(computedRoutesID)
|
||||||
|
|
||||||
|
for {
|
||||||
|
mcID := l.nextRequested()
|
||||||
|
if mcID == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := l.loadOne(ctx, loggerFor, mcID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loader) loadOne(
|
||||||
|
ctx context.Context,
|
||||||
|
loggerFor func(*pbresource.ID) hclog.Logger,
|
||||||
|
computedRoutesID *pbresource.ID,
|
||||||
|
) error {
|
||||||
|
logger := loggerFor(computedRoutesID)
|
||||||
|
|
||||||
|
// There is one computed routes for the entire service (perfect name alignment).
|
||||||
|
//
|
||||||
|
// All ports are embedded within.
|
||||||
|
|
||||||
|
parentServiceID := changeResourceType(computedRoutesID, catalog.ServiceType)
|
||||||
|
parentServiceRef := resource.Reference(parentServiceID, "")
|
||||||
|
|
||||||
|
if err := l.loadUpstreamService(ctx, logger, parentServiceID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := l.gatherXRoutesAsInput(ctx, logger, parentServiceRef); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
l.out.AddComputedRoutesIDs(computedRoutesID)
|
||||||
|
|
||||||
|
l.markLoaded(computedRoutesID)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loader) gatherXRoutesAsInput(
|
||||||
|
ctx context.Context,
|
||||||
|
logger hclog.Logger,
|
||||||
|
parentServiceRef *pbresource.Reference,
|
||||||
|
) error {
|
||||||
|
routeIDs := l.mapper.RouteIDsByParentServiceRef(parentServiceRef)
|
||||||
|
|
||||||
|
// read the xRoutes
|
||||||
|
for _, routeID := range routeIDs {
|
||||||
|
switch {
|
||||||
|
case resource.EqualType(routeID.Type, types.HTTPRouteType):
|
||||||
|
route, err := l.mem.GetHTTPRoute(ctx, routeID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("the resource service has returned an unexpected error loading %s: %w", routeID, err)
|
||||||
|
}
|
||||||
|
var routeData types.XRouteData
|
||||||
|
if route != nil {
|
||||||
|
routeData = route.Data
|
||||||
|
}
|
||||||
|
err = l.gatherSingleXRouteAsInput(ctx, logger, routeID, routeData, func() {
|
||||||
|
l.out.AddHTTPRoute(route)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("the resource service has returned an unexpected error loading %s: %w", routeID, err)
|
||||||
|
}
|
||||||
|
case resource.EqualType(routeID.Type, types.GRPCRouteType):
|
||||||
|
route, err := l.mem.GetGRPCRoute(ctx, routeID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("the resource service has returned an unexpected error loading %s: %w", routeID, err)
|
||||||
|
}
|
||||||
|
var routeData types.XRouteData
|
||||||
|
if route != nil {
|
||||||
|
routeData = route.Data
|
||||||
|
}
|
||||||
|
err = l.gatherSingleXRouteAsInput(ctx, logger, routeID, routeData, func() {
|
||||||
|
l.out.AddGRPCRoute(route)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("the resource service has returned an unexpected error loading %s: %w", routeID, err)
|
||||||
|
}
|
||||||
|
case resource.EqualType(routeID.Type, types.TCPRouteType):
|
||||||
|
route, err := l.mem.GetTCPRoute(ctx, routeID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("the resource service has returned an unexpected error loading %s: %w", routeID, err)
|
||||||
|
}
|
||||||
|
var routeData types.XRouteData
|
||||||
|
if route != nil {
|
||||||
|
routeData = route.Data
|
||||||
|
}
|
||||||
|
err = l.gatherSingleXRouteAsInput(ctx, logger, routeID, routeData, func() {
|
||||||
|
l.out.AddTCPRoute(route)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("the resource service has returned an unexpected error loading %s: %w", routeID, err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
logger.Warn("skipping xRoute reference of unknown type", "ID", resource.IDToString(routeID))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loader) loadUpstreamService(
|
||||||
|
ctx context.Context,
|
||||||
|
logger hclog.Logger,
|
||||||
|
svcID *pbresource.ID,
|
||||||
|
) error {
|
||||||
|
logger = logger.With("service-id", resource.IDToString(svcID))
|
||||||
|
|
||||||
|
service, err := l.mem.GetService(ctx, svcID)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("error retrieving the service", "serviceID", svcID, "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if service != nil {
|
||||||
|
l.out.AddService(service)
|
||||||
|
|
||||||
|
failoverPolicyID := changeResourceType(svcID, catalog.FailoverPolicyType)
|
||||||
|
failoverPolicy, err := l.mem.GetFailoverPolicy(ctx, failoverPolicyID)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("error retrieving the failover policy", "failoverPolicyID", failoverPolicyID, "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if failoverPolicy != nil {
|
||||||
|
l.mapper.TrackFailoverPolicy(failoverPolicy)
|
||||||
|
l.out.AddFailoverPolicy(failoverPolicy)
|
||||||
|
|
||||||
|
destRefs := failoverPolicy.Data.GetUnderlyingDestinationRefs()
|
||||||
|
for _, destRef := range destRefs {
|
||||||
|
destID := resource.IDFromReference(destRef)
|
||||||
|
|
||||||
|
failService, err := l.mem.GetService(ctx, destID)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("error retrieving a failover destination service",
|
||||||
|
"serviceID", destID, "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if failService != nil {
|
||||||
|
l.out.AddService(failService)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
l.mapper.UntrackFailoverPolicy(failoverPolicyID)
|
||||||
|
}
|
||||||
|
|
||||||
|
destPolicyID := changeResourceType(svcID, types.DestinationPolicyType)
|
||||||
|
destPolicy, err := l.mem.GetDestinationPolicy(ctx, destPolicyID)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("error retrieving the destination config", "destPolicyID", destPolicyID, "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if destPolicy != nil {
|
||||||
|
l.out.AddDestinationPolicy(destPolicy)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *loader) gatherSingleXRouteAsInput(
|
||||||
|
ctx context.Context,
|
||||||
|
logger hclog.Logger,
|
||||||
|
routeID *pbresource.ID,
|
||||||
|
route types.XRouteData,
|
||||||
|
relatedRouteCaptureFn func(),
|
||||||
|
) error {
|
||||||
|
if route == nil {
|
||||||
|
logger.Trace("XRoute has been deleted")
|
||||||
|
l.mapper.UntrackXRoute(routeID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
l.mapper.TrackXRoute(routeID, route)
|
||||||
|
|
||||||
|
relatedRouteCaptureFn()
|
||||||
|
|
||||||
|
for _, parentRef := range route.GetParentRefs() {
|
||||||
|
if types.IsServiceType(parentRef.Ref.Type) {
|
||||||
|
parentComputedRoutesID := &pbresource.ID{
|
||||||
|
Type: types.ComputedRoutesType,
|
||||||
|
Tenancy: parentRef.Ref.Tenancy,
|
||||||
|
Name: parentRef.Ref.Name,
|
||||||
|
}
|
||||||
|
// Note: this will only schedule things to load that have not already been loaded
|
||||||
|
l.requestLoad(parentComputedRoutesID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, backendRef := range route.GetUnderlyingBackendRefs() {
|
||||||
|
if types.IsServiceType(backendRef.Ref.Type) {
|
||||||
|
svcID := resource.IDFromReference(backendRef.Ref)
|
||||||
|
if err := l.loadUpstreamService(ctx, logger, svcID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func changeResourceType(id *pbresource.ID, newType *pbresource.Type) *pbresource.ID {
|
||||||
|
return &pbresource.ID{
|
||||||
|
Type: newType,
|
||||||
|
Tenancy: id.Tenancy,
|
||||||
|
Name: id.Name,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,404 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package loader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-hclog"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"google.golang.org/protobuf/types/known/durationpb"
|
||||||
|
|
||||||
|
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
|
||||||
|
"github.com/hashicorp/consul/internal/catalog"
|
||||||
|
"github.com/hashicorp/consul/internal/controller"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/xroutemapper"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||||
|
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||||
|
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
"github.com/hashicorp/consul/proto/private/prototest"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLoadResourcesForComputedRoutes(t *testing.T) {
|
||||||
|
ctx := testutil.TestContext(t)
|
||||||
|
rclient := svctest.RunResourceService(t, types.Register, catalog.RegisterTypes)
|
||||||
|
rt := controller.Runtime{
|
||||||
|
Client: rclient,
|
||||||
|
Logger: testutil.Logger(t),
|
||||||
|
}
|
||||||
|
client := rtest.NewClient(rclient)
|
||||||
|
|
||||||
|
loggerFor := func(id *pbresource.ID) hclog.Logger {
|
||||||
|
return rt.Logger.With("resource-id", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
mapper := xroutemapper.New()
|
||||||
|
|
||||||
|
deleteRes := func(id *pbresource.ID, untrack bool) {
|
||||||
|
client.MustDelete(t, id)
|
||||||
|
if untrack {
|
||||||
|
switch {
|
||||||
|
case types.IsRouteType(id.Type):
|
||||||
|
mapper.UntrackXRoute(id)
|
||||||
|
case types.IsFailoverPolicyType(id.Type):
|
||||||
|
mapper.UntrackFailoverPolicy(id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
writeHTTP := func(name string, data *pbmesh.HTTPRoute) *types.DecodedHTTPRoute {
|
||||||
|
res := rtest.Resource(types.HTTPRouteType, name).
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
WithData(t, data).
|
||||||
|
Write(t, client)
|
||||||
|
mapper.TrackXRoute(res.Id, data)
|
||||||
|
dec, err := resource.Decode[*pbmesh.HTTPRoute](res)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return dec
|
||||||
|
}
|
||||||
|
|
||||||
|
writeGRPC := func(name string, data *pbmesh.GRPCRoute) *types.DecodedGRPCRoute {
|
||||||
|
res := rtest.Resource(types.GRPCRouteType, name).
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
WithData(t, data).
|
||||||
|
Write(t, client)
|
||||||
|
mapper.TrackXRoute(res.Id, data)
|
||||||
|
dec, err := resource.Decode[*pbmesh.GRPCRoute](res)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return dec
|
||||||
|
}
|
||||||
|
_ = writeGRPC // TODO
|
||||||
|
|
||||||
|
writeTCP := func(name string, data *pbmesh.TCPRoute) *types.DecodedTCPRoute {
|
||||||
|
res := rtest.Resource(types.TCPRouteType, name).
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
WithData(t, data).
|
||||||
|
Write(t, client)
|
||||||
|
mapper.TrackXRoute(res.Id, data)
|
||||||
|
dec, err := resource.Decode[*pbmesh.TCPRoute](res)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return dec
|
||||||
|
}
|
||||||
|
_ = writeTCP // TODO
|
||||||
|
|
||||||
|
writeDestPolicy := func(name string, data *pbmesh.DestinationPolicy) *types.DecodedDestinationPolicy {
|
||||||
|
res := rtest.Resource(types.DestinationPolicyType, name).
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
WithData(t, data).
|
||||||
|
Write(t, client)
|
||||||
|
dec, err := resource.Decode[*pbmesh.DestinationPolicy](res)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return dec
|
||||||
|
}
|
||||||
|
|
||||||
|
writeFailover := func(name string, data *pbcatalog.FailoverPolicy) *types.DecodedFailoverPolicy {
|
||||||
|
res := rtest.Resource(catalog.FailoverPolicyType, name).
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
WithData(t, data).
|
||||||
|
Write(t, client)
|
||||||
|
dec, err := resource.Decode[*pbcatalog.FailoverPolicy](res)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return dec
|
||||||
|
}
|
||||||
|
|
||||||
|
writeService := func(name string, data *pbcatalog.Service) *types.DecodedService {
|
||||||
|
res := rtest.Resource(catalog.ServiceType, name).
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
WithData(t, data).
|
||||||
|
Write(t, client)
|
||||||
|
dec, err := resource.Decode[*pbcatalog.Service](res)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return dec
|
||||||
|
}
|
||||||
|
|
||||||
|
/////////////////////////////////////
|
||||||
|
|
||||||
|
// Init some port-aligned services.
|
||||||
|
apiSvc := writeService("api", &pbcatalog.Service{
|
||||||
|
Ports: []*pbcatalog.ServicePort{
|
||||||
|
{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||||
|
{TargetPort: "tcp", Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||||
|
{TargetPort: "mesh", Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
adminSvc := writeService("admin", &pbcatalog.Service{
|
||||||
|
Ports: []*pbcatalog.ServicePort{
|
||||||
|
{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||||
|
{TargetPort: "tcp", Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||||
|
{TargetPort: "mesh", Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
fooSvc := writeService("foo", &pbcatalog.Service{
|
||||||
|
Ports: []*pbcatalog.ServicePort{
|
||||||
|
{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||||
|
{TargetPort: "tcp", Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||||
|
{TargetPort: "mesh", Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
barSvc := writeService("bar", &pbcatalog.Service{
|
||||||
|
Ports: []*pbcatalog.ServicePort{
|
||||||
|
{TargetPort: "http", Protocol: pbcatalog.Protocol_PROTOCOL_HTTP},
|
||||||
|
{TargetPort: "tcp", Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||||
|
{TargetPort: "mesh", Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
apiRoutesID := &pbresource.ID{
|
||||||
|
Type: types.ComputedRoutesType,
|
||||||
|
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||||
|
Name: "api",
|
||||||
|
}
|
||||||
|
adminRoutesID := &pbresource.ID{
|
||||||
|
Type: types.ComputedRoutesType,
|
||||||
|
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||||
|
Name: "admin",
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.RunStep(t, "only service", func(t *testing.T) {
|
||||||
|
out, err := LoadResourcesForComputedRoutes(ctx, loggerFor, rt.Client, mapper, apiRoutesID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
prototest.AssertDeepEqual(t, NewRelatedResources().AddResources(
|
||||||
|
apiSvc,
|
||||||
|
).AddComputedRoutesIDs(apiRoutesID), out)
|
||||||
|
require.Equal(t, doubleMap(t /* empty */), out.RoutesByParentRef)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Write one silly http route
|
||||||
|
route1 := writeHTTP("api-route1", &pbmesh.HTTPRoute{
|
||||||
|
ParentRefs: []*pbmesh.ParentReference{{
|
||||||
|
Ref: newRef(catalog.ServiceType, "api"),
|
||||||
|
// all ports
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "one silly route", func(t *testing.T) {
|
||||||
|
out, err := LoadResourcesForComputedRoutes(ctx, loggerFor, rt.Client, mapper, apiRoutesID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
prototest.AssertDeepEqual(t, NewRelatedResources().AddResources(
|
||||||
|
apiSvc,
|
||||||
|
route1,
|
||||||
|
).AddComputedRoutesIDs(apiRoutesID), out)
|
||||||
|
require.Equal(t, doubleMap(t,
|
||||||
|
apiSvc, route1,
|
||||||
|
), out.RoutesByParentRef)
|
||||||
|
})
|
||||||
|
|
||||||
|
// add a second route that is more interesting and is TCP
|
||||||
|
route2 := writeTCP("api-route2", &pbmesh.TCPRoute{
|
||||||
|
ParentRefs: []*pbmesh.ParentReference{{
|
||||||
|
Ref: newRef(catalog.ServiceType, "api"),
|
||||||
|
// all ports
|
||||||
|
}},
|
||||||
|
Rules: []*pbmesh.TCPRouteRule{{
|
||||||
|
BackendRefs: []*pbmesh.TCPBackendRef{
|
||||||
|
{
|
||||||
|
BackendRef: &pbmesh.BackendReference{
|
||||||
|
Ref: newRef(catalog.ServiceType, "foo"),
|
||||||
|
},
|
||||||
|
Weight: 30,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
BackendRef: &pbmesh.BackendReference{
|
||||||
|
Ref: newRef(catalog.ServiceType, "bar"),
|
||||||
|
},
|
||||||
|
Weight: 70,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "two routes", func(t *testing.T) {
|
||||||
|
out, err := LoadResourcesForComputedRoutes(ctx, loggerFor, rt.Client, mapper, apiRoutesID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
prototest.AssertDeepEqual(t, NewRelatedResources().AddResources(
|
||||||
|
apiSvc,
|
||||||
|
fooSvc,
|
||||||
|
barSvc,
|
||||||
|
route1,
|
||||||
|
route2,
|
||||||
|
).AddComputedRoutesIDs(apiRoutesID), out)
|
||||||
|
require.Equal(t, doubleMap(t,
|
||||||
|
apiSvc, route1,
|
||||||
|
apiSvc, route2,
|
||||||
|
), out.RoutesByParentRef)
|
||||||
|
})
|
||||||
|
|
||||||
|
// update the first to overlap with the second
|
||||||
|
route1 = writeHTTP("api-route1", &pbmesh.HTTPRoute{
|
||||||
|
ParentRefs: []*pbmesh.ParentReference{
|
||||||
|
{
|
||||||
|
Ref: newRef(catalog.ServiceType, "api"),
|
||||||
|
// all ports
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Ref: newRef(catalog.ServiceType, "admin"),
|
||||||
|
// all ports
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "two overlapping computed routes resources", func(t *testing.T) {
|
||||||
|
out, err := LoadResourcesForComputedRoutes(ctx, loggerFor, rt.Client, mapper, apiRoutesID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
prototest.AssertDeepEqual(t, NewRelatedResources().AddResources(
|
||||||
|
apiSvc,
|
||||||
|
fooSvc,
|
||||||
|
barSvc,
|
||||||
|
adminSvc,
|
||||||
|
route1,
|
||||||
|
route2,
|
||||||
|
).AddComputedRoutesIDs(apiRoutesID, adminRoutesID), out)
|
||||||
|
require.Equal(t, doubleMap(t,
|
||||||
|
apiSvc, route1,
|
||||||
|
apiSvc, route2,
|
||||||
|
adminSvc, route1,
|
||||||
|
), out.RoutesByParentRef)
|
||||||
|
})
|
||||||
|
|
||||||
|
// add a third (GRPC) that overlaps them both
|
||||||
|
|
||||||
|
route3 := writeGRPC("api-route3", &pbmesh.GRPCRoute{
|
||||||
|
ParentRefs: []*pbmesh.ParentReference{
|
||||||
|
{
|
||||||
|
Ref: newRef(catalog.ServiceType, "api"),
|
||||||
|
// all ports
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Ref: newRef(catalog.ServiceType, "admin"),
|
||||||
|
// all ports
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "three overlapping computed routes resources", func(t *testing.T) {
|
||||||
|
out, err := LoadResourcesForComputedRoutes(ctx, loggerFor, rt.Client, mapper, apiRoutesID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
prototest.AssertDeepEqual(t, NewRelatedResources().AddResources(
|
||||||
|
apiSvc,
|
||||||
|
fooSvc,
|
||||||
|
barSvc,
|
||||||
|
adminSvc,
|
||||||
|
route1,
|
||||||
|
route2,
|
||||||
|
route3,
|
||||||
|
).AddComputedRoutesIDs(apiRoutesID, adminRoutesID), out)
|
||||||
|
require.Equal(t, doubleMap(t,
|
||||||
|
apiSvc, route1,
|
||||||
|
apiSvc, route2,
|
||||||
|
apiSvc, route3,
|
||||||
|
adminSvc, route1,
|
||||||
|
adminSvc, route3,
|
||||||
|
), out.RoutesByParentRef)
|
||||||
|
})
|
||||||
|
|
||||||
|
// We untrack the first, but we let the third one be a dangling reference
|
||||||
|
// so that the loader has to fix it up.
|
||||||
|
deleteRes(route1.Resource.Id, true)
|
||||||
|
deleteRes(route3.Resource.Id, false)
|
||||||
|
|
||||||
|
testutil.RunStep(t, "delete first and third route", func(t *testing.T) {
|
||||||
|
out, err := LoadResourcesForComputedRoutes(ctx, loggerFor, rt.Client, mapper, apiRoutesID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
prototest.AssertDeepEqual(t, NewRelatedResources().AddResources(
|
||||||
|
apiSvc,
|
||||||
|
fooSvc,
|
||||||
|
barSvc,
|
||||||
|
route2,
|
||||||
|
).AddComputedRoutesIDs(apiRoutesID), out)
|
||||||
|
require.Equal(t, doubleMap(t,
|
||||||
|
apiSvc, route2,
|
||||||
|
), out.RoutesByParentRef)
|
||||||
|
})
|
||||||
|
|
||||||
|
barFailover := writeFailover("bar", &pbcatalog.FailoverPolicy{
|
||||||
|
Config: &pbcatalog.FailoverConfig{
|
||||||
|
Destinations: []*pbcatalog.FailoverDestination{{
|
||||||
|
Ref: newRef(catalog.ServiceType, "admin"),
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "add a failover", func(t *testing.T) {
|
||||||
|
out, err := LoadResourcesForComputedRoutes(ctx, loggerFor, rt.Client, mapper, apiRoutesID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
prototest.AssertDeepEqual(t, NewRelatedResources().AddResources(
|
||||||
|
apiSvc,
|
||||||
|
fooSvc,
|
||||||
|
barSvc,
|
||||||
|
adminSvc,
|
||||||
|
route2,
|
||||||
|
barFailover,
|
||||||
|
).AddComputedRoutesIDs(apiRoutesID), out)
|
||||||
|
require.Equal(t, doubleMap(t,
|
||||||
|
apiSvc, route2,
|
||||||
|
), out.RoutesByParentRef)
|
||||||
|
})
|
||||||
|
|
||||||
|
fooDestPolicy := writeDestPolicy("foo", &pbmesh.DestinationPolicy{
|
||||||
|
PortConfigs: map[string]*pbmesh.DestinationConfig{
|
||||||
|
"www": {
|
||||||
|
ConnectTimeout: durationpb.New(55 * time.Second),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "add a dest policy", func(t *testing.T) {
|
||||||
|
out, err := LoadResourcesForComputedRoutes(ctx, loggerFor, rt.Client, mapper, apiRoutesID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
prototest.AssertDeepEqual(t, NewRelatedResources().AddResources(
|
||||||
|
apiSvc,
|
||||||
|
fooSvc,
|
||||||
|
barSvc,
|
||||||
|
adminSvc,
|
||||||
|
route2,
|
||||||
|
barFailover,
|
||||||
|
fooDestPolicy,
|
||||||
|
).AddComputedRoutesIDs(apiRoutesID), out)
|
||||||
|
require.Equal(t, doubleMap(t,
|
||||||
|
apiSvc, route2,
|
||||||
|
), out.RoutesByParentRef)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRef(typ *pbresource.Type, name string) *pbresource.Reference {
|
||||||
|
return rtest.Resource(typ, name).
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
Reference("")
|
||||||
|
}
|
||||||
|
|
||||||
|
type resourceGetter interface {
|
||||||
|
GetResource() *pbresource.Resource
|
||||||
|
}
|
||||||
|
|
||||||
|
func doubleMap(t *testing.T, list ...resourceGetter) map[resource.ReferenceKey]map[resource.ReferenceKey]struct{} {
|
||||||
|
if len(list)%2 != 0 {
|
||||||
|
t.Fatalf("list must have an even number of references")
|
||||||
|
}
|
||||||
|
out := make(map[resource.ReferenceKey]map[resource.ReferenceKey]struct{})
|
||||||
|
for i := 0; i < len(list); i += 2 {
|
||||||
|
svcRK := resource.NewReferenceKey(list[i].GetResource().Id)
|
||||||
|
routeRK := resource.NewReferenceKey(list[i+1].GetResource().Id)
|
||||||
|
|
||||||
|
m, ok := out[svcRK]
|
||||||
|
if !ok {
|
||||||
|
m = make(map[resource.ReferenceKey]struct{})
|
||||||
|
out[svcRK] = m
|
||||||
|
}
|
||||||
|
m[routeRK] = struct{}{}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
|
@ -0,0 +1,94 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package loader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/catalog"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||||
|
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
type memoizingLoader struct {
|
||||||
|
client pbresource.ResourceServiceClient
|
||||||
|
|
||||||
|
mapHTTPRoute map[resource.ReferenceKey]*types.DecodedHTTPRoute
|
||||||
|
mapGRPCRoute map[resource.ReferenceKey]*types.DecodedGRPCRoute
|
||||||
|
mapTCPRoute map[resource.ReferenceKey]*types.DecodedTCPRoute
|
||||||
|
mapDestinationPolicy map[resource.ReferenceKey]*types.DecodedDestinationPolicy
|
||||||
|
mapFailoverPolicy map[resource.ReferenceKey]*types.DecodedFailoverPolicy
|
||||||
|
mapService map[resource.ReferenceKey]*types.DecodedService
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMemoizingLoader(client pbresource.ResourceServiceClient) *memoizingLoader {
|
||||||
|
if client == nil {
|
||||||
|
panic("client is required")
|
||||||
|
}
|
||||||
|
return &memoizingLoader{
|
||||||
|
client: client,
|
||||||
|
mapHTTPRoute: make(map[resource.ReferenceKey]*types.DecodedHTTPRoute),
|
||||||
|
mapGRPCRoute: make(map[resource.ReferenceKey]*types.DecodedGRPCRoute),
|
||||||
|
mapTCPRoute: make(map[resource.ReferenceKey]*types.DecodedTCPRoute),
|
||||||
|
mapDestinationPolicy: make(map[resource.ReferenceKey]*types.DecodedDestinationPolicy),
|
||||||
|
mapFailoverPolicy: make(map[resource.ReferenceKey]*types.DecodedFailoverPolicy),
|
||||||
|
mapService: make(map[resource.ReferenceKey]*types.DecodedService),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoizingLoader) GetHTTPRoute(ctx context.Context, id *pbresource.ID) (*types.DecodedHTTPRoute, error) {
|
||||||
|
return getOrCacheResource[*pbmesh.HTTPRoute](ctx, m.client, m.mapHTTPRoute, types.HTTPRouteType, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoizingLoader) GetGRPCRoute(ctx context.Context, id *pbresource.ID) (*types.DecodedGRPCRoute, error) {
|
||||||
|
return getOrCacheResource[*pbmesh.GRPCRoute](ctx, m.client, m.mapGRPCRoute, types.GRPCRouteType, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoizingLoader) GetTCPRoute(ctx context.Context, id *pbresource.ID) (*types.DecodedTCPRoute, error) {
|
||||||
|
return getOrCacheResource[*pbmesh.TCPRoute](ctx, m.client, m.mapTCPRoute, types.TCPRouteType, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoizingLoader) GetDestinationPolicy(ctx context.Context, id *pbresource.ID) (*types.DecodedDestinationPolicy, error) {
|
||||||
|
return getOrCacheResource[*pbmesh.DestinationPolicy](ctx, m.client, m.mapDestinationPolicy, types.DestinationPolicyType, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoizingLoader) GetFailoverPolicy(ctx context.Context, id *pbresource.ID) (*types.DecodedFailoverPolicy, error) {
|
||||||
|
return getOrCacheResource[*pbcatalog.FailoverPolicy](ctx, m.client, m.mapFailoverPolicy, catalog.FailoverPolicyType, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *memoizingLoader) GetService(ctx context.Context, id *pbresource.ID) (*types.DecodedService, error) {
|
||||||
|
return getOrCacheResource[*pbcatalog.Service](ctx, m.client, m.mapService, catalog.ServiceType, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOrCacheResource[T proto.Message](
|
||||||
|
ctx context.Context,
|
||||||
|
client pbresource.ResourceServiceClient,
|
||||||
|
cache map[resource.ReferenceKey]*resource.DecodedResource[T],
|
||||||
|
typ *pbresource.Type,
|
||||||
|
id *pbresource.ID,
|
||||||
|
) (*resource.DecodedResource[T], error) {
|
||||||
|
if !resource.EqualType(id.Type, typ) {
|
||||||
|
return nil, fmt.Errorf("expected %s not %s", resource.TypeToString(typ), resource.TypeToString(id.Type))
|
||||||
|
}
|
||||||
|
|
||||||
|
rk := resource.NewReferenceKey(id)
|
||||||
|
|
||||||
|
if cached, ok := cache[rk]; ok {
|
||||||
|
return cached, nil // cached value may be nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dec, err := resource.GetDecodedResource[T](ctx, client, id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cache[rk] = dec
|
||||||
|
return dec, nil
|
||||||
|
}
|
|
@ -0,0 +1,232 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package loader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/catalog"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RelatedResources is a spiritual successor of *configentry.DiscoveryChainSet
|
||||||
|
type RelatedResources struct {
|
||||||
|
ComputedRoutesList []*pbresource.ID
|
||||||
|
// RoutesByParentRef is a map of a parent Service to the xRoutes that compose it.
|
||||||
|
RoutesByParentRef map[resource.ReferenceKey]map[resource.ReferenceKey]struct{}
|
||||||
|
HTTPRoutes map[resource.ReferenceKey]*types.DecodedHTTPRoute
|
||||||
|
GRPCRoutes map[resource.ReferenceKey]*types.DecodedGRPCRoute
|
||||||
|
TCPRoutes map[resource.ReferenceKey]*types.DecodedTCPRoute
|
||||||
|
Services map[resource.ReferenceKey]*types.DecodedService
|
||||||
|
FailoverPolicies map[resource.ReferenceKey]*types.DecodedFailoverPolicy
|
||||||
|
DestinationPolicies map[resource.ReferenceKey]*types.DecodedDestinationPolicy
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRelatedResources() *RelatedResources {
|
||||||
|
return &RelatedResources{
|
||||||
|
RoutesByParentRef: make(map[resource.ReferenceKey]map[resource.ReferenceKey]struct{}),
|
||||||
|
HTTPRoutes: make(map[resource.ReferenceKey]*types.DecodedHTTPRoute),
|
||||||
|
GRPCRoutes: make(map[resource.ReferenceKey]*types.DecodedGRPCRoute),
|
||||||
|
TCPRoutes: make(map[resource.ReferenceKey]*types.DecodedTCPRoute),
|
||||||
|
Services: make(map[resource.ReferenceKey]*types.DecodedService),
|
||||||
|
FailoverPolicies: make(map[resource.ReferenceKey]*types.DecodedFailoverPolicy),
|
||||||
|
DestinationPolicies: make(map[resource.ReferenceKey]*types.DecodedDestinationPolicy),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) AddComputedRoutesIDs(list ...*pbresource.ID) *RelatedResources {
|
||||||
|
for _, id := range list {
|
||||||
|
r.AddComputedRoutesID(id)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) AddComputedRoutesID(id *pbresource.ID) *RelatedResources {
|
||||||
|
if !resource.EqualType(id.Type, types.ComputedRoutesType) {
|
||||||
|
panic(fmt.Sprintf("expected *mesh.ComputedRoutes, not %s", resource.TypeToString(id.Type)))
|
||||||
|
}
|
||||||
|
r.ComputedRoutesList = append(r.ComputedRoutesList, id)
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddResources must only be called with valid *resource.DecodedResource[T]
|
||||||
|
// types.
|
||||||
|
//
|
||||||
|
// This is provided as a testing convenience. Non-test code should call the
|
||||||
|
// type-specific adder.
|
||||||
|
func (r *RelatedResources) AddResources(list ...any) *RelatedResources {
|
||||||
|
for _, res := range list {
|
||||||
|
r.AddResource(res)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddResource must only be called with valid *resource.DecodedResource[T] types.
|
||||||
|
//
|
||||||
|
// This is provided as a testing convenience. Non-test code should call the
|
||||||
|
// type-specific adder.
|
||||||
|
func (r *RelatedResources) AddResource(res any) {
|
||||||
|
if res == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch dec := res.(type) {
|
||||||
|
case *types.DecodedHTTPRoute:
|
||||||
|
r.AddHTTPRoute(dec)
|
||||||
|
case *types.DecodedGRPCRoute:
|
||||||
|
r.AddGRPCRoute(dec)
|
||||||
|
case *types.DecodedTCPRoute:
|
||||||
|
r.AddTCPRoute(dec)
|
||||||
|
case *types.DecodedDestinationPolicy:
|
||||||
|
r.AddDestinationPolicy(dec)
|
||||||
|
case *types.DecodedService:
|
||||||
|
r.AddService(dec)
|
||||||
|
case *types.DecodedFailoverPolicy:
|
||||||
|
r.AddFailoverPolicy(dec)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unknown decoded resource type: %T", res))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) AddHTTPRoute(dec *types.DecodedHTTPRoute) {
|
||||||
|
r.addRouteSetEntries(dec.Resource, dec.Data)
|
||||||
|
addResource(dec.Resource.Id, dec, r.HTTPRoutes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) AddGRPCRoute(dec *types.DecodedGRPCRoute) {
|
||||||
|
r.addRouteSetEntries(dec.Resource, dec.Data)
|
||||||
|
addResource(dec.Resource.Id, dec, r.GRPCRoutes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) AddTCPRoute(dec *types.DecodedTCPRoute) {
|
||||||
|
r.addRouteSetEntries(dec.Resource, dec.Data)
|
||||||
|
addResource(dec.Resource.Id, dec, r.TCPRoutes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) AddDestinationPolicy(dec *types.DecodedDestinationPolicy) {
|
||||||
|
addResource(dec.Resource.Id, dec, r.DestinationPolicies)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) AddService(dec *types.DecodedService) {
|
||||||
|
addResource(dec.Resource.Id, dec, r.Services)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) AddFailoverPolicy(dec *types.DecodedFailoverPolicy) {
|
||||||
|
addResource(dec.Resource.Id, dec, r.FailoverPolicies)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) addRouteSetEntries(
|
||||||
|
res *pbresource.Resource,
|
||||||
|
xroute types.XRouteData,
|
||||||
|
) {
|
||||||
|
if res == nil || xroute == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
routeRK := resource.NewReferenceKey(res.Id)
|
||||||
|
|
||||||
|
for _, parentRef := range xroute.GetParentRefs() {
|
||||||
|
if parentRef.Ref == nil || !types.IsServiceType(parentRef.Ref.Type) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
svcRK := resource.NewReferenceKey(parentRef.Ref)
|
||||||
|
|
||||||
|
r.addRouteByParentRef(svcRK, routeRK)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) addRouteByParentRef(svcRK, xRouteRK resource.ReferenceKey) {
|
||||||
|
m, ok := r.RoutesByParentRef[svcRK]
|
||||||
|
if !ok {
|
||||||
|
m = make(map[resource.ReferenceKey]struct{})
|
||||||
|
r.RoutesByParentRef[svcRK] = m
|
||||||
|
}
|
||||||
|
m[xRouteRK] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type RouteWalkFunc func(
|
||||||
|
rk resource.ReferenceKey,
|
||||||
|
res *pbresource.Resource,
|
||||||
|
route types.XRouteData,
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *RelatedResources) WalkRoutes(fn RouteWalkFunc) {
|
||||||
|
for rk, route := range r.HTTPRoutes {
|
||||||
|
fn(rk, route.Resource, route.Data)
|
||||||
|
}
|
||||||
|
for rk, route := range r.GRPCRoutes {
|
||||||
|
fn(rk, route.Resource, route.Data)
|
||||||
|
}
|
||||||
|
for rk, route := range r.TCPRoutes {
|
||||||
|
fn(rk, route.Resource, route.Data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) WalkRoutesForParentRef(parentRef *pbresource.Reference, fn RouteWalkFunc) {
|
||||||
|
if !resource.EqualType(parentRef.Type, catalog.ServiceType) {
|
||||||
|
panic(fmt.Sprintf("expected *catalog.Service, not %s", resource.TypeToString(parentRef.Type)))
|
||||||
|
}
|
||||||
|
routeMap := r.RoutesByParentRef[resource.NewReferenceKey(parentRef)]
|
||||||
|
if len(routeMap) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for rk := range routeMap {
|
||||||
|
if route, ok := r.HTTPRoutes[rk]; ok {
|
||||||
|
fn(rk, route.Resource, route.Data)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if route, ok := r.GRPCRoutes[rk]; ok {
|
||||||
|
fn(rk, route.Resource, route.Data)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if route, ok := r.TCPRoutes[rk]; ok {
|
||||||
|
fn(rk, route.Resource, route.Data)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) GetService(ref resource.ReferenceOrID) *types.DecodedService {
|
||||||
|
return r.Services[resource.NewReferenceKey(ref)]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) GetFailoverPolicy(ref resource.ReferenceOrID) *types.DecodedFailoverPolicy {
|
||||||
|
return r.FailoverPolicies[resource.NewReferenceKey(ref)]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) GetFailoverPolicyForService(ref resource.ReferenceOrID) *types.DecodedFailoverPolicy {
|
||||||
|
failRef := &pbresource.Reference{
|
||||||
|
Type: catalog.FailoverPolicyType,
|
||||||
|
Tenancy: ref.GetTenancy(),
|
||||||
|
Name: ref.GetName(),
|
||||||
|
}
|
||||||
|
return r.GetFailoverPolicy(failRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) GetDestinationPolicy(ref resource.ReferenceOrID) *types.DecodedDestinationPolicy {
|
||||||
|
return r.DestinationPolicies[resource.NewReferenceKey(ref)]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RelatedResources) GetDestinationPolicyForService(ref resource.ReferenceOrID) *types.DecodedDestinationPolicy {
|
||||||
|
destRef := &pbresource.Reference{
|
||||||
|
Type: types.DestinationPolicyType,
|
||||||
|
Tenancy: ref.GetTenancy(),
|
||||||
|
Name: ref.GetName(),
|
||||||
|
}
|
||||||
|
return r.GetDestinationPolicy(destRef)
|
||||||
|
}
|
||||||
|
|
||||||
|
func addResource[V any](id *pbresource.ID, res *V, m map[resource.ReferenceKey]*V) {
|
||||||
|
if res == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
rk := resource.NewReferenceKey(id)
|
||||||
|
if _, ok := m[rk]; !ok {
|
||||||
|
m[rk] = res
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,92 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package routes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/controller"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PendingResourceStatusUpdate struct {
|
||||||
|
ID *pbresource.ID
|
||||||
|
Generation string
|
||||||
|
CurrStatus *pbresource.Status
|
||||||
|
|
||||||
|
NewConditions []*pbresource.Condition
|
||||||
|
}
|
||||||
|
|
||||||
|
type PendingStatuses map[resource.ReferenceKey]*PendingResourceStatusUpdate
|
||||||
|
|
||||||
|
func (p PendingStatuses) AddConditions(
|
||||||
|
rk resource.ReferenceKey,
|
||||||
|
res *pbresource.Resource,
|
||||||
|
newConditions []*pbresource.Condition,
|
||||||
|
) {
|
||||||
|
state, ok := p[rk]
|
||||||
|
if !ok {
|
||||||
|
state = &PendingResourceStatusUpdate{
|
||||||
|
ID: res.Id,
|
||||||
|
Generation: res.Generation,
|
||||||
|
CurrStatus: res.Status[StatusKey],
|
||||||
|
}
|
||||||
|
p[rk] = state
|
||||||
|
}
|
||||||
|
|
||||||
|
state.NewConditions = append(state.NewConditions, newConditions...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func UpdatePendingStatuses(
|
||||||
|
ctx context.Context,
|
||||||
|
rt controller.Runtime,
|
||||||
|
pending PendingStatuses,
|
||||||
|
) error {
|
||||||
|
for _, state := range pending {
|
||||||
|
logger := rt.Logger.With("resource", resource.IDToString(state.ID))
|
||||||
|
|
||||||
|
var newStatus *pbresource.Status
|
||||||
|
if len(state.NewConditions) > 0 {
|
||||||
|
newStatus = &pbresource.Status{
|
||||||
|
ObservedGeneration: state.Generation,
|
||||||
|
Conditions: state.NewConditions,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
newStatus = &pbresource.Status{
|
||||||
|
ObservedGeneration: state.Generation,
|
||||||
|
Conditions: []*pbresource.Condition{
|
||||||
|
ConditionXRouteOK,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if resource.EqualStatus(state.CurrStatus, newStatus, false) {
|
||||||
|
logger.Trace(
|
||||||
|
"resource's status is unchanged",
|
||||||
|
"conditions", newStatus.Conditions,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
_, err := rt.Client.WriteStatus(ctx, &pbresource.WriteStatusRequest{
|
||||||
|
Id: state.ID,
|
||||||
|
Key: StatusKey,
|
||||||
|
Status: newStatus,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(
|
||||||
|
"error encountered when attempting to update the resource's status",
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Trace(
|
||||||
|
"resource's status was updated",
|
||||||
|
"conditions", newStatus.Conditions,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,121 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package routes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/consul/internal/catalog"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/controllers/routes/loader"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||||
|
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ValidateXRouteReferences examines all of the ParentRefs and BackendRefs of
|
||||||
|
// xRoutes provided and issues status conditions if anything is unacceptable.
|
||||||
|
func ValidateXRouteReferences(related *loader.RelatedResources, pending PendingStatuses) {
|
||||||
|
related.WalkRoutes(func(
|
||||||
|
rk resource.ReferenceKey,
|
||||||
|
res *pbresource.Resource,
|
||||||
|
route types.XRouteData,
|
||||||
|
) {
|
||||||
|
parentRefs := route.GetParentRefs()
|
||||||
|
backendRefs := route.GetUnderlyingBackendRefs()
|
||||||
|
|
||||||
|
conditions := computeNewRouteRefConditions(related, parentRefs, backendRefs)
|
||||||
|
|
||||||
|
pending.AddConditions(rk, res, conditions)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type serviceGetter interface {
|
||||||
|
GetService(ref resource.ReferenceOrID) *types.DecodedService
|
||||||
|
}
|
||||||
|
|
||||||
|
func computeNewRouteRefConditions(
|
||||||
|
related serviceGetter,
|
||||||
|
parentRefs []*pbmesh.ParentReference,
|
||||||
|
backendRefs []*pbmesh.BackendReference,
|
||||||
|
) []*pbresource.Condition {
|
||||||
|
var conditions []*pbresource.Condition
|
||||||
|
|
||||||
|
// TODO(rb): handle port numbers here too if we are allowing those instead of the name?
|
||||||
|
|
||||||
|
for _, parentRef := range parentRefs {
|
||||||
|
if parentRef.Ref == nil || !resource.EqualType(parentRef.Ref.Type, catalog.ServiceType) {
|
||||||
|
continue // not possible due to xRoute validation
|
||||||
|
}
|
||||||
|
if parentRef.Ref.Section != "" {
|
||||||
|
continue // not possible due to xRoute validation
|
||||||
|
}
|
||||||
|
if svc := related.GetService(parentRef.Ref); svc != nil {
|
||||||
|
found := false
|
||||||
|
usingMesh := false
|
||||||
|
hasMesh := false
|
||||||
|
for _, port := range svc.Data.Ports {
|
||||||
|
if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
|
||||||
|
hasMesh = true
|
||||||
|
}
|
||||||
|
if port.TargetPort == parentRef.Port {
|
||||||
|
found = true
|
||||||
|
if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
|
||||||
|
usingMesh = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case !hasMesh:
|
||||||
|
conditions = append(conditions, ConditionParentRefOutsideMesh(parentRef.Ref))
|
||||||
|
case !found:
|
||||||
|
if parentRef.Port != "" {
|
||||||
|
conditions = append(conditions, ConditionUnknownParentRefPort(parentRef.Ref, parentRef.Port))
|
||||||
|
}
|
||||||
|
case usingMesh:
|
||||||
|
conditions = append(conditions, ConditionParentRefUsingMeshPort(parentRef.Ref, parentRef.Port))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
conditions = append(conditions, ConditionMissingParentRef(parentRef.Ref))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, backendRef := range backendRefs {
|
||||||
|
if backendRef.Ref == nil || !resource.EqualType(backendRef.Ref.Type, catalog.ServiceType) {
|
||||||
|
continue // not possible due to xRoute validation
|
||||||
|
}
|
||||||
|
if backendRef.Ref.Section != "" {
|
||||||
|
continue // not possible due to xRoute validation
|
||||||
|
}
|
||||||
|
if svc := related.GetService(backendRef.Ref); svc != nil {
|
||||||
|
found := false
|
||||||
|
usingMesh := false
|
||||||
|
hasMesh := false
|
||||||
|
for _, port := range svc.Data.Ports {
|
||||||
|
if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
|
||||||
|
hasMesh = true
|
||||||
|
}
|
||||||
|
if port.TargetPort == backendRef.Port {
|
||||||
|
found = true
|
||||||
|
if port.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
|
||||||
|
usingMesh = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case !hasMesh:
|
||||||
|
conditions = append(conditions, ConditionBackendRefOutsideMesh(backendRef.Ref))
|
||||||
|
case !found:
|
||||||
|
if backendRef.Port != "" {
|
||||||
|
conditions = append(conditions, ConditionUnknownBackendRefPort(backendRef.Ref, backendRef.Port))
|
||||||
|
}
|
||||||
|
case usingMesh:
|
||||||
|
conditions = append(conditions, ConditionBackendRefUsingMeshPort(backendRef.Ref, backendRef.Port))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
conditions = append(conditions, ConditionMissingBackendRef(backendRef.Ref))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return conditions
|
||||||
|
}
|
|
@ -0,0 +1,232 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package routes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/catalog"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||||
|
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||||
|
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
"github.com/hashicorp/consul/proto/private/prototest"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestComputeNewRouteRefConditions(t *testing.T) {
|
||||||
|
registry := resource.NewRegistry()
|
||||||
|
types.Register(registry)
|
||||||
|
catalog.RegisterTypes(registry)
|
||||||
|
|
||||||
|
newService := func(name string, ports map[string]pbcatalog.Protocol) *types.DecodedService {
|
||||||
|
var portSlice []*pbcatalog.ServicePort
|
||||||
|
for name, proto := range ports {
|
||||||
|
portSlice = append(portSlice, &pbcatalog.ServicePort{
|
||||||
|
TargetPort: name,
|
||||||
|
Protocol: proto,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
svc := rtest.Resource(catalog.ServiceType, name).
|
||||||
|
WithData(t, &pbcatalog.Service{Ports: portSlice}).
|
||||||
|
Build()
|
||||||
|
rtest.ValidateAndNormalize(t, registry, svc)
|
||||||
|
|
||||||
|
dec, err := resource.Decode[*pbcatalog.Service](svc)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return dec
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("no refs", func(t *testing.T) {
|
||||||
|
sg := newTestServiceGetter()
|
||||||
|
got := computeNewRouteRefConditions(sg, nil, nil)
|
||||||
|
require.Empty(t, got)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("parent refs", func(t *testing.T) {
|
||||||
|
t.Run("with no service", func(t *testing.T) {
|
||||||
|
sg := newTestServiceGetter()
|
||||||
|
got := computeNewRouteRefConditions(sg, []*pbmesh.ParentReference{
|
||||||
|
newParentRef(newRef(catalog.ServiceType, "api"), ""),
|
||||||
|
}, nil)
|
||||||
|
require.Len(t, got, 1)
|
||||||
|
prototest.AssertContainsElement(t, got, ConditionMissingParentRef(
|
||||||
|
newRef(catalog.ServiceType, "api"),
|
||||||
|
))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with service but no mesh port", func(t *testing.T) {
|
||||||
|
sg := newTestServiceGetter(newService("api", map[string]pbcatalog.Protocol{
|
||||||
|
"http": pbcatalog.Protocol_PROTOCOL_HTTP,
|
||||||
|
}))
|
||||||
|
got := computeNewRouteRefConditions(sg, []*pbmesh.ParentReference{
|
||||||
|
newParentRef(newRef(catalog.ServiceType, "api"), ""),
|
||||||
|
}, nil)
|
||||||
|
require.Len(t, got, 1)
|
||||||
|
prototest.AssertContainsElement(t, got, ConditionParentRefOutsideMesh(
|
||||||
|
newRef(catalog.ServiceType, "api"),
|
||||||
|
))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with service but using mesh port", func(t *testing.T) {
|
||||||
|
sg := newTestServiceGetter(newService("api", map[string]pbcatalog.Protocol{
|
||||||
|
"http": pbcatalog.Protocol_PROTOCOL_HTTP,
|
||||||
|
"mesh": pbcatalog.Protocol_PROTOCOL_MESH,
|
||||||
|
}))
|
||||||
|
got := computeNewRouteRefConditions(sg, []*pbmesh.ParentReference{
|
||||||
|
newParentRef(newRef(catalog.ServiceType, "api"), "mesh"),
|
||||||
|
}, nil)
|
||||||
|
require.Len(t, got, 1)
|
||||||
|
prototest.AssertContainsElement(t, got, ConditionParentRefUsingMeshPort(
|
||||||
|
newRef(catalog.ServiceType, "api"),
|
||||||
|
"mesh",
|
||||||
|
))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with service and using missing port", func(t *testing.T) {
|
||||||
|
sg := newTestServiceGetter(newService("api", map[string]pbcatalog.Protocol{
|
||||||
|
"http": pbcatalog.Protocol_PROTOCOL_HTTP,
|
||||||
|
"mesh": pbcatalog.Protocol_PROTOCOL_MESH,
|
||||||
|
}))
|
||||||
|
got := computeNewRouteRefConditions(sg, []*pbmesh.ParentReference{
|
||||||
|
newParentRef(newRef(catalog.ServiceType, "api"), "web"),
|
||||||
|
}, nil)
|
||||||
|
require.Len(t, got, 1)
|
||||||
|
prototest.AssertContainsElement(t, got, ConditionUnknownParentRefPort(
|
||||||
|
newRef(catalog.ServiceType, "api"),
|
||||||
|
"web",
|
||||||
|
))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with service and using empty port", func(t *testing.T) {
|
||||||
|
sg := newTestServiceGetter(newService("api", map[string]pbcatalog.Protocol{
|
||||||
|
"http": pbcatalog.Protocol_PROTOCOL_HTTP,
|
||||||
|
"mesh": pbcatalog.Protocol_PROTOCOL_MESH,
|
||||||
|
}))
|
||||||
|
got := computeNewRouteRefConditions(sg, []*pbmesh.ParentReference{
|
||||||
|
newParentRef(newRef(catalog.ServiceType, "api"), ""),
|
||||||
|
}, nil)
|
||||||
|
require.Empty(t, got)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with service and using correct port", func(t *testing.T) {
|
||||||
|
sg := newTestServiceGetter(newService("api", map[string]pbcatalog.Protocol{
|
||||||
|
"http": pbcatalog.Protocol_PROTOCOL_HTTP,
|
||||||
|
"mesh": pbcatalog.Protocol_PROTOCOL_MESH,
|
||||||
|
}))
|
||||||
|
got := computeNewRouteRefConditions(sg, []*pbmesh.ParentReference{
|
||||||
|
newParentRef(newRef(catalog.ServiceType, "api"), "http"),
|
||||||
|
}, nil)
|
||||||
|
require.Empty(t, got)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("backend refs", func(t *testing.T) {
|
||||||
|
t.Run("with no service", func(t *testing.T) {
|
||||||
|
sg := newTestServiceGetter()
|
||||||
|
got := computeNewRouteRefConditions(sg, nil, []*pbmesh.BackendReference{
|
||||||
|
newBackendRef(newRef(catalog.ServiceType, "api"), "", ""),
|
||||||
|
})
|
||||||
|
require.Len(t, got, 1)
|
||||||
|
prototest.AssertContainsElement(t, got, ConditionMissingBackendRef(
|
||||||
|
newRef(catalog.ServiceType, "api"),
|
||||||
|
))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with service but no mesh port", func(t *testing.T) {
|
||||||
|
sg := newTestServiceGetter(newService("api", map[string]pbcatalog.Protocol{
|
||||||
|
"http": pbcatalog.Protocol_PROTOCOL_HTTP,
|
||||||
|
}))
|
||||||
|
got := computeNewRouteRefConditions(sg, nil, []*pbmesh.BackendReference{
|
||||||
|
newBackendRef(newRef(catalog.ServiceType, "api"), "", ""),
|
||||||
|
})
|
||||||
|
require.Len(t, got, 1)
|
||||||
|
prototest.AssertContainsElement(t, got, ConditionBackendRefOutsideMesh(
|
||||||
|
newRef(catalog.ServiceType, "api"),
|
||||||
|
))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with service but using mesh port", func(t *testing.T) {
|
||||||
|
sg := newTestServiceGetter(newService("api", map[string]pbcatalog.Protocol{
|
||||||
|
"http": pbcatalog.Protocol_PROTOCOL_HTTP,
|
||||||
|
"mesh": pbcatalog.Protocol_PROTOCOL_MESH,
|
||||||
|
}))
|
||||||
|
got := computeNewRouteRefConditions(sg, nil, []*pbmesh.BackendReference{
|
||||||
|
newBackendRef(newRef(catalog.ServiceType, "api"), "mesh", ""),
|
||||||
|
})
|
||||||
|
require.Len(t, got, 1)
|
||||||
|
prototest.AssertContainsElement(t, got, ConditionBackendRefUsingMeshPort(
|
||||||
|
newRef(catalog.ServiceType, "api"),
|
||||||
|
"mesh",
|
||||||
|
))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with service and using missing port", func(t *testing.T) {
|
||||||
|
sg := newTestServiceGetter(newService("api", map[string]pbcatalog.Protocol{
|
||||||
|
"http": pbcatalog.Protocol_PROTOCOL_HTTP,
|
||||||
|
"mesh": pbcatalog.Protocol_PROTOCOL_MESH,
|
||||||
|
}))
|
||||||
|
got := computeNewRouteRefConditions(sg, nil, []*pbmesh.BackendReference{
|
||||||
|
newBackendRef(newRef(catalog.ServiceType, "api"), "web", ""),
|
||||||
|
})
|
||||||
|
require.Len(t, got, 1)
|
||||||
|
prototest.AssertContainsElement(t, got, ConditionUnknownBackendRefPort(
|
||||||
|
newRef(catalog.ServiceType, "api"),
|
||||||
|
"web",
|
||||||
|
))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with service and using empty port", func(t *testing.T) {
|
||||||
|
sg := newTestServiceGetter(newService("api", map[string]pbcatalog.Protocol{
|
||||||
|
"http": pbcatalog.Protocol_PROTOCOL_HTTP,
|
||||||
|
"mesh": pbcatalog.Protocol_PROTOCOL_MESH,
|
||||||
|
}))
|
||||||
|
got := computeNewRouteRefConditions(sg, nil, []*pbmesh.BackendReference{
|
||||||
|
newBackendRef(newRef(catalog.ServiceType, "api"), "", ""),
|
||||||
|
})
|
||||||
|
require.Empty(t, got)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with service and using correct port", func(t *testing.T) {
|
||||||
|
sg := newTestServiceGetter(newService("api", map[string]pbcatalog.Protocol{
|
||||||
|
"http": pbcatalog.Protocol_PROTOCOL_HTTP,
|
||||||
|
"mesh": pbcatalog.Protocol_PROTOCOL_MESH,
|
||||||
|
}))
|
||||||
|
got := computeNewRouteRefConditions(sg, nil, []*pbmesh.BackendReference{
|
||||||
|
newBackendRef(newRef(catalog.ServiceType, "api"), "http", ""),
|
||||||
|
})
|
||||||
|
require.Empty(t, got)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRef(typ *pbresource.Type, name string) *pbresource.Reference {
|
||||||
|
return rtest.Resource(typ, name).
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
Reference("")
|
||||||
|
}
|
||||||
|
|
||||||
|
type testServiceGetter struct {
|
||||||
|
services map[resource.ReferenceKey]*types.DecodedService
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestServiceGetter(svcs ...*types.DecodedService) serviceGetter {
|
||||||
|
g := &testServiceGetter{
|
||||||
|
services: make(map[resource.ReferenceKey]*types.DecodedService),
|
||||||
|
}
|
||||||
|
for _, svc := range svcs {
|
||||||
|
g.services[resource.NewReferenceKey(svc.Resource.Id)] = svc
|
||||||
|
}
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *testServiceGetter) GetService(ref resource.ReferenceOrID) *types.DecodedService {
|
||||||
|
if g.services == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return g.services[resource.NewReferenceKey(ref)]
|
||||||
|
}
|
|
@ -0,0 +1,231 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package routes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/oklog/ulid"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func gammaSortRouteRules(node *inputRouteNode) {
|
||||||
|
switch {
|
||||||
|
case resource.EqualType(node.RouteType, types.HTTPRouteType):
|
||||||
|
gammaSortHTTPRouteRules(node.HTTPRules)
|
||||||
|
case resource.EqualType(node.RouteType, types.GRPCRouteType):
|
||||||
|
// TODO(rb): do a determinstic sort of something
|
||||||
|
case resource.EqualType(node.RouteType, types.TCPRouteType):
|
||||||
|
// TODO(rb): do a determinstic sort of something
|
||||||
|
default:
|
||||||
|
panic("impossible")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func gammaSortHTTPRouteRules(rules []*pbmesh.ComputedHTTPRouteRule) {
|
||||||
|
// First generate a parallel slice.
|
||||||
|
sortable := &sortableHTTPRouteRules{
|
||||||
|
rules: rules,
|
||||||
|
derivedInfo: make([]*derivedHTTPRouteRuleInfo, 0, len(rules)),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, rule := range rules {
|
||||||
|
var sr derivedHTTPRouteRuleInfo
|
||||||
|
for _, m := range rule.Matches {
|
||||||
|
if m.Path != nil {
|
||||||
|
switch m.Path.Type {
|
||||||
|
case pbmesh.PathMatchType_PATH_MATCH_TYPE_EXACT:
|
||||||
|
sr.hasPathExact = true
|
||||||
|
case pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX:
|
||||||
|
sr.hasPathPrefix = true
|
||||||
|
v := len(m.Path.Value)
|
||||||
|
if v > sr.pathPrefixLength {
|
||||||
|
sr.pathPrefixLength = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Method != "" {
|
||||||
|
sr.hasMethod = true
|
||||||
|
}
|
||||||
|
if v := len(m.Headers); v > sr.numHeaders {
|
||||||
|
sr.numHeaders = v
|
||||||
|
}
|
||||||
|
if v := len(m.QueryParams); v > sr.numQueryParams {
|
||||||
|
sr.numQueryParams = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sortable.derivedInfo = append(sortable.derivedInfo, &sr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Similar to
|
||||||
|
// "agent/consul/discoverychain/gateway_httproute.go"
|
||||||
|
// compareHTTPRules
|
||||||
|
|
||||||
|
// Sort this by the GAMMA spec. We assume the caller has pre-sorted this
|
||||||
|
// for tiebreakers based on resource metadata.
|
||||||
|
|
||||||
|
sort.Stable(sortable)
|
||||||
|
}
|
||||||
|
|
||||||
|
type derivedHTTPRouteRuleInfo struct {
|
||||||
|
// sortable fields extracted from route
|
||||||
|
hasPathExact bool
|
||||||
|
hasPathPrefix bool
|
||||||
|
pathPrefixLength int
|
||||||
|
hasMethod bool
|
||||||
|
numHeaders int
|
||||||
|
numQueryParams int
|
||||||
|
}
|
||||||
|
|
||||||
|
type sortableHTTPRouteRules struct {
|
||||||
|
rules []*pbmesh.ComputedHTTPRouteRule
|
||||||
|
derivedInfo []*derivedHTTPRouteRuleInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ sort.Interface = (*sortableHTTPRouteRules)(nil)
|
||||||
|
|
||||||
|
func (r *sortableHTTPRouteRules) Len() int { return len(r.rules) }
|
||||||
|
|
||||||
|
func (r *sortableHTTPRouteRules) Swap(i, j int) {
|
||||||
|
r.rules[i], r.rules[j] = r.rules[j], r.rules[i]
|
||||||
|
r.derivedInfo[i], r.derivedInfo[j] = r.derivedInfo[j], r.derivedInfo[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *sortableHTTPRouteRules) Less(i, j int) bool {
|
||||||
|
a := r.derivedInfo[i]
|
||||||
|
b := r.derivedInfo[j]
|
||||||
|
|
||||||
|
// (1) “Exact” path match.
|
||||||
|
switch {
|
||||||
|
case a.hasPathExact && b.hasPathExact:
|
||||||
|
// NEXT TIE BREAK
|
||||||
|
case a.hasPathExact && !b.hasPathExact:
|
||||||
|
return true
|
||||||
|
case !a.hasPathExact && b.hasPathExact:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// (2) “Prefix” path match with largest number of characters.
|
||||||
|
switch {
|
||||||
|
case a.hasPathPrefix && b.hasPathPrefix:
|
||||||
|
if a.pathPrefixLength != b.pathPrefixLength {
|
||||||
|
return a.pathPrefixLength > b.pathPrefixLength
|
||||||
|
}
|
||||||
|
// NEXT TIE BREAK
|
||||||
|
case a.hasPathPrefix && !b.hasPathPrefix:
|
||||||
|
return true
|
||||||
|
case !a.hasPathPrefix && b.hasPathPrefix:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// (3) Method match.
|
||||||
|
switch {
|
||||||
|
case a.hasMethod && b.hasMethod:
|
||||||
|
// NEXT TIE BREAK
|
||||||
|
case a.hasMethod && !b.hasMethod:
|
||||||
|
return true
|
||||||
|
case !a.hasMethod && b.hasMethod:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// (4) Largest number of header matches.
|
||||||
|
switch {
|
||||||
|
case a.numHeaders == b.numHeaders:
|
||||||
|
// NEXT TIE BREAK
|
||||||
|
case a.numHeaders > b.numHeaders:
|
||||||
|
return true
|
||||||
|
case a.numHeaders < b.numHeaders:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// (5) Largest number of query param matches.
|
||||||
|
return a.numQueryParams > b.numQueryParams
|
||||||
|
}
|
||||||
|
|
||||||
|
// gammaInitialSortWrappedRoutes will sort the original inputs by the
|
||||||
|
// resource-envelope-only fields before we further stable sort by type-specific
|
||||||
|
// fields.
|
||||||
|
//
|
||||||
|
// If more than 1 route is provided the OriginalResource field must be set on
|
||||||
|
// all inputs (i.e. no synthetic default routes should be processed here).
|
||||||
|
func gammaInitialSortWrappedRoutes(routes []*inputRouteNode) {
|
||||||
|
if len(routes) < 2 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// First sort the input routes by the final criteria, so we can let the
|
||||||
|
// stable sort take care of the ultimate tiebreakers.
|
||||||
|
sort.Slice(routes, func(i, j int) bool {
|
||||||
|
var (
|
||||||
|
resA = routes[i].OriginalResource
|
||||||
|
resB = routes[j].OriginalResource
|
||||||
|
)
|
||||||
|
|
||||||
|
if resA == nil || resB == nil {
|
||||||
|
panic("some provided nodes lacked original resources")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
genA = resA.Generation
|
||||||
|
genB = resB.Generation
|
||||||
|
)
|
||||||
|
|
||||||
|
// (END-1) The oldest Route based on creation timestamp.
|
||||||
|
//
|
||||||
|
// Because these are ULIDs, we should be able to lexicographically sort
|
||||||
|
// them to determine the oldest, but we also need to have a further
|
||||||
|
// tiebreaker AFTER per-gamma so we cannot.
|
||||||
|
aULID, aErr := ulid.Parse(genA)
|
||||||
|
bULID, bErr := ulid.Parse(genB)
|
||||||
|
if aErr == nil && bErr == nil {
|
||||||
|
aTime := aULID.Time()
|
||||||
|
bTime := bULID.Time()
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case aTime < bTime:
|
||||||
|
return true
|
||||||
|
case aTime > bTime:
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
// NEXT TIE BREAK
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// (END-2) The Route appearing first in alphabetical order by “{namespace}/{name}”.
|
||||||
|
var (
|
||||||
|
tenancyA = resA.Id.Tenancy
|
||||||
|
tenancyB = resB.Id.Tenancy
|
||||||
|
|
||||||
|
nsA = tenancyA.Namespace
|
||||||
|
nsB = tenancyB.Namespace
|
||||||
|
)
|
||||||
|
|
||||||
|
if nsA == "" {
|
||||||
|
nsA = "default"
|
||||||
|
}
|
||||||
|
if nsB == "" {
|
||||||
|
nsB = "default"
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case nsA < nsB:
|
||||||
|
return true
|
||||||
|
case nsA > nsB:
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
// NEXT TIE BREAK
|
||||||
|
}
|
||||||
|
|
||||||
|
return resA.Id.Name < resB.Id.Name
|
||||||
|
|
||||||
|
// We get this for free b/c of the stable sort.
|
||||||
|
//
|
||||||
|
// If ties still exist within an HTTPRoute, matching precedence MUST
|
||||||
|
// be granted to the FIRST matching rule (in list order) with a match
|
||||||
|
// meeting the above criteria.
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,494 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package routes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||||
|
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGammaInitialSortWrappedRoutes(t *testing.T) {
|
||||||
|
// These generations were created with ulid.Make().String() at 1 second
|
||||||
|
// intervals. They are in ascending order.
|
||||||
|
generations := []string{
|
||||||
|
"01H8HGKHXCAJY7TRNJ06JGEZP9",
|
||||||
|
"01H8HGKJWMEYWKZATG44QZF0G7",
|
||||||
|
"01H8HGKKVW2339KFHAFXMEFRHP",
|
||||||
|
"01H8HGKMV45XAC2W1KCQ7KJ00J",
|
||||||
|
"01H8HGKNTCB3ZYN16AZ2KSXN54",
|
||||||
|
"01H8HGKPSN9V3QQXTQVZ1EQM2T",
|
||||||
|
"01H8HGKQRXMR8NY662AC520CDE",
|
||||||
|
"01H8HGKRR5C41RHGXY7H3N0JYX",
|
||||||
|
"01H8HGKSQDNSQ54VN86SBTR149",
|
||||||
|
"01H8HGKTPPRWFXWHKV90M2WW5R",
|
||||||
|
}
|
||||||
|
require.True(t, sort.StringsAreSorted(generations))
|
||||||
|
|
||||||
|
nodeID := func(node *inputRouteNode) string {
|
||||||
|
return resource.IDToString(node.OriginalResource.Id) + ":" + node.OriginalResource.Generation
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeIDSlice := func(nodes []*inputRouteNode) []string {
|
||||||
|
var out []string
|
||||||
|
for _, node := range nodes {
|
||||||
|
out = append(out, nodeID(node))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// newNode will only populate the fields that the sorting function cares
|
||||||
|
// about.
|
||||||
|
newNode := func(tenancy *pbresource.Tenancy, name string, gen string) *inputRouteNode {
|
||||||
|
id := rtest.Resource(types.HTTPRouteType, name).
|
||||||
|
WithTenancy(tenancy).
|
||||||
|
ID()
|
||||||
|
|
||||||
|
res := rtest.ResourceID(id).
|
||||||
|
WithGeneration(gen).
|
||||||
|
WithData(t, &pbmesh.HTTPRoute{}).
|
||||||
|
Build()
|
||||||
|
|
||||||
|
return &inputRouteNode{
|
||||||
|
OriginalResource: res,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type testcase struct {
|
||||||
|
routes []*inputRouteNode
|
||||||
|
}
|
||||||
|
|
||||||
|
run := func(t *testing.T, tc testcase) {
|
||||||
|
expect := nodeIDSlice(tc.routes)
|
||||||
|
|
||||||
|
if len(tc.routes) > 1 {
|
||||||
|
// Randomly permute it
|
||||||
|
in := tc.routes
|
||||||
|
for {
|
||||||
|
rand.Shuffle(len(in), func(i, j int) {
|
||||||
|
in[i], in[j] = in[j], in[i]
|
||||||
|
})
|
||||||
|
curr := nodeIDSlice(tc.routes)
|
||||||
|
|
||||||
|
if slices.Equal(expect, curr) {
|
||||||
|
// Loop until the shuffle was actually different.
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gammaInitialSortWrappedRoutes(tc.routes)
|
||||||
|
|
||||||
|
got := nodeIDSlice(tc.routes)
|
||||||
|
|
||||||
|
require.Equal(t, expect, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order:
|
||||||
|
// 1. generation older first
|
||||||
|
// 2. tenancy namespace A first
|
||||||
|
// 3. object name A first
|
||||||
|
cases := map[string]testcase{
|
||||||
|
"empty": {},
|
||||||
|
"one": {
|
||||||
|
routes: []*inputRouteNode{
|
||||||
|
newNode(defaultTenancy(), "foo", generations[0]),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"two: by generation": {
|
||||||
|
routes: []*inputRouteNode{
|
||||||
|
newNode(defaultTenancy(), "foo", generations[0]),
|
||||||
|
newNode(defaultTenancy(), "foo", generations[1]),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"two: by namespace": {
|
||||||
|
routes: []*inputRouteNode{
|
||||||
|
newNode(&pbresource.Tenancy{Namespace: "aaa"}, "foo", generations[0]),
|
||||||
|
newNode(&pbresource.Tenancy{Namespace: "bbb"}, "foo", generations[0]),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"two: by name": {
|
||||||
|
routes: []*inputRouteNode{
|
||||||
|
newNode(defaultTenancy(), "bar", generations[0]),
|
||||||
|
newNode(defaultTenancy(), "foo", generations[0]),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"two: by name with empty namespace": {
|
||||||
|
routes: []*inputRouteNode{
|
||||||
|
newNode(nsTenancy(""), "bar", generations[0]),
|
||||||
|
newNode(nsTenancy(""), "foo", generations[0]),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"four: by generation": {
|
||||||
|
routes: []*inputRouteNode{
|
||||||
|
newNode(defaultTenancy(), "foo", generations[0]),
|
||||||
|
newNode(defaultTenancy(), "foo", generations[1]),
|
||||||
|
newNode(defaultTenancy(), "foo", generations[2]),
|
||||||
|
newNode(defaultTenancy(), "foo", generations[3]),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"four: by name with some empty namespaces": {
|
||||||
|
routes: []*inputRouteNode{
|
||||||
|
newNode(nsTenancy("aaa"), "foo", generations[0]),
|
||||||
|
newNode(nsTenancy("bbb"), "foo", generations[0]),
|
||||||
|
newNode(&pbresource.Tenancy{}, "bar", generations[0]),
|
||||||
|
newNode(&pbresource.Tenancy{}, "foo", generations[0]),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"mixed": {
|
||||||
|
// Seed this with data such that each later sort criteria should
|
||||||
|
// want to be more to the top than an earlier criteria would allow,
|
||||||
|
// to prove that the sort is going to have to shake out the way the
|
||||||
|
// algorithm wants it to for maximum algorithm exercise.
|
||||||
|
routes: []*inputRouteNode{
|
||||||
|
// gen beats name
|
||||||
|
newNode(defaultTenancy(), "zzz", generations[0]),
|
||||||
|
newNode(defaultTenancy(), "aaa", generations[1]),
|
||||||
|
// gen beats ns
|
||||||
|
newNode(nsTenancy("zzz"), "foo", generations[2]),
|
||||||
|
newNode(nsTenancy("aaa"), "foo", generations[3]),
|
||||||
|
// ns beats name
|
||||||
|
newNode(nsTenancy("aaa"), "zzz", generations[4]),
|
||||||
|
newNode(nsTenancy("bbb"), "aaa", generations[5]),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range cases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
run(t, tc)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGammaSortHTTPRouteRules(t *testing.T) {
|
||||||
|
type testcase struct {
|
||||||
|
rules []*pbmesh.ComputedHTTPRouteRule
|
||||||
|
}
|
||||||
|
|
||||||
|
// In this test we will use the 'backend target' field to track the rule
|
||||||
|
// identity to make assertions easy.
|
||||||
|
|
||||||
|
targetSlice := func(rules []*pbmesh.ComputedHTTPRouteRule) []string {
|
||||||
|
var out []string
|
||||||
|
for _, rule := range rules {
|
||||||
|
out = append(out, rule.BackendRefs[0].BackendTarget)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
run := func(t *testing.T, tc testcase) {
|
||||||
|
expect := targetSlice(tc.rules)
|
||||||
|
|
||||||
|
if len(tc.rules) > 1 {
|
||||||
|
// Randomly permute it
|
||||||
|
in := tc.rules
|
||||||
|
for {
|
||||||
|
rand.Shuffle(len(in), func(i, j int) {
|
||||||
|
in[i], in[j] = in[j], in[i]
|
||||||
|
})
|
||||||
|
curr := targetSlice(tc.rules)
|
||||||
|
|
||||||
|
if slices.Equal(expect, curr) {
|
||||||
|
// Loop until the shuffle was actually different.
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gammaSortHTTPRouteRules(tc.rules)
|
||||||
|
|
||||||
|
got := targetSlice(tc.rules)
|
||||||
|
|
||||||
|
require.Equal(t, expect, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
newRule := func(target string, m ...*pbmesh.HTTPRouteMatch) *pbmesh.ComputedHTTPRouteRule {
|
||||||
|
return &pbmesh.ComputedHTTPRouteRule{
|
||||||
|
Matches: m,
|
||||||
|
BackendRefs: []*pbmesh.ComputedHTTPBackendRef{{
|
||||||
|
BackendTarget: target,
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rules:
|
||||||
|
// 1. exact path match exists
|
||||||
|
// 2. prefix match exists
|
||||||
|
// 3. prefix match has lots of characters
|
||||||
|
// 4. has method match
|
||||||
|
// 5. has lots of header matches
|
||||||
|
// 6. has lots of query param matches
|
||||||
|
cases := map[string]testcase{
|
||||||
|
"empty": {},
|
||||||
|
"one": {
|
||||||
|
rules: []*pbmesh.ComputedHTTPRouteRule{
|
||||||
|
newRule("r1", &pbmesh.HTTPRouteMatch{
|
||||||
|
//
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"two: by exact path match exists": {
|
||||||
|
rules: []*pbmesh.ComputedHTTPRouteRule{
|
||||||
|
newRule("r1", &pbmesh.HTTPRouteMatch{
|
||||||
|
Path: &pbmesh.HTTPPathMatch{
|
||||||
|
Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_EXACT,
|
||||||
|
Value: "/",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
newRule("r2", &pbmesh.HTTPRouteMatch{
|
||||||
|
Path: &pbmesh.HTTPPathMatch{
|
||||||
|
Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX,
|
||||||
|
Value: "/",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"two: by prefix path match exists": {
|
||||||
|
rules: []*pbmesh.ComputedHTTPRouteRule{
|
||||||
|
newRule("r1", &pbmesh.HTTPRouteMatch{
|
||||||
|
Path: &pbmesh.HTTPPathMatch{
|
||||||
|
Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX,
|
||||||
|
Value: "/",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
newRule("r2", &pbmesh.HTTPRouteMatch{
|
||||||
|
Path: &pbmesh.HTTPPathMatch{
|
||||||
|
Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_REGEX,
|
||||||
|
Value: "/[a]",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"two: by prefix path match length": {
|
||||||
|
rules: []*pbmesh.ComputedHTTPRouteRule{
|
||||||
|
newRule("r1", &pbmesh.HTTPRouteMatch{
|
||||||
|
Path: &pbmesh.HTTPPathMatch{
|
||||||
|
Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX,
|
||||||
|
Value: "/longer",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
newRule("r2", &pbmesh.HTTPRouteMatch{
|
||||||
|
Path: &pbmesh.HTTPPathMatch{
|
||||||
|
Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX,
|
||||||
|
Value: "/short",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"two: by method match exists": {
|
||||||
|
rules: []*pbmesh.ComputedHTTPRouteRule{
|
||||||
|
newRule("r1", &pbmesh.HTTPRouteMatch{
|
||||||
|
Method: "GET",
|
||||||
|
}),
|
||||||
|
newRule("r2", &pbmesh.HTTPRouteMatch{
|
||||||
|
Headers: []*pbmesh.HTTPHeaderMatch{{
|
||||||
|
Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT,
|
||||||
|
Name: "x-blah",
|
||||||
|
Value: "foo",
|
||||||
|
}},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"two: by header match quantity": {
|
||||||
|
rules: []*pbmesh.ComputedHTTPRouteRule{
|
||||||
|
newRule("r1", &pbmesh.HTTPRouteMatch{
|
||||||
|
Headers: []*pbmesh.HTTPHeaderMatch{
|
||||||
|
{
|
||||||
|
Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT,
|
||||||
|
Name: "x-blah",
|
||||||
|
Value: "foo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT,
|
||||||
|
Name: "x-other",
|
||||||
|
Value: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
newRule("r2", &pbmesh.HTTPRouteMatch{
|
||||||
|
Headers: []*pbmesh.HTTPHeaderMatch{{
|
||||||
|
Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT,
|
||||||
|
Name: "x-blah",
|
||||||
|
Value: "foo",
|
||||||
|
}},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"two: by query param match quantity": {
|
||||||
|
rules: []*pbmesh.ComputedHTTPRouteRule{
|
||||||
|
newRule("r1", &pbmesh.HTTPRouteMatch{
|
||||||
|
QueryParams: []*pbmesh.HTTPQueryParamMatch{
|
||||||
|
{
|
||||||
|
Type: pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_EXACT,
|
||||||
|
Name: "foo",
|
||||||
|
Value: "1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_EXACT,
|
||||||
|
Name: "bar",
|
||||||
|
Value: "1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
newRule("r2", &pbmesh.HTTPRouteMatch{
|
||||||
|
QueryParams: []*pbmesh.HTTPQueryParamMatch{{
|
||||||
|
Type: pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_EXACT,
|
||||||
|
Name: "foo",
|
||||||
|
Value: "1",
|
||||||
|
}},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"mixed: has path exact beats has path prefix when both are present": {
|
||||||
|
rules: []*pbmesh.ComputedHTTPRouteRule{
|
||||||
|
newRule("r1",
|
||||||
|
&pbmesh.HTTPRouteMatch{
|
||||||
|
Path: &pbmesh.HTTPPathMatch{
|
||||||
|
Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_EXACT,
|
||||||
|
Value: "/",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&pbmesh.HTTPRouteMatch{
|
||||||
|
Path: &pbmesh.HTTPPathMatch{
|
||||||
|
Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX,
|
||||||
|
Value: "/short",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
newRule("r2", &pbmesh.HTTPRouteMatch{
|
||||||
|
Path: &pbmesh.HTTPPathMatch{
|
||||||
|
Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX,
|
||||||
|
Value: "/longer",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"mixed: longer path prefix beats shorter when both are present": {
|
||||||
|
rules: []*pbmesh.ComputedHTTPRouteRule{
|
||||||
|
newRule("r1",
|
||||||
|
&pbmesh.HTTPRouteMatch{
|
||||||
|
Path: &pbmesh.HTTPPathMatch{
|
||||||
|
Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX,
|
||||||
|
Value: "/longer",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&pbmesh.HTTPRouteMatch{
|
||||||
|
Method: "GET",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
newRule("r2", &pbmesh.HTTPRouteMatch{
|
||||||
|
Path: &pbmesh.HTTPPathMatch{
|
||||||
|
Type: pbmesh.PathMatchType_PATH_MATCH_TYPE_PREFIX,
|
||||||
|
Value: "/short",
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"mixed: has method match beats header match when both are present": {
|
||||||
|
rules: []*pbmesh.ComputedHTTPRouteRule{
|
||||||
|
newRule("r1",
|
||||||
|
&pbmesh.HTTPRouteMatch{
|
||||||
|
Method: "GET",
|
||||||
|
},
|
||||||
|
&pbmesh.HTTPRouteMatch{
|
||||||
|
Headers: []*pbmesh.HTTPHeaderMatch{{
|
||||||
|
Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT,
|
||||||
|
Name: "x-blah",
|
||||||
|
Value: "foo",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
newRule("r2", &pbmesh.HTTPRouteMatch{
|
||||||
|
Headers: []*pbmesh.HTTPHeaderMatch{
|
||||||
|
{
|
||||||
|
Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT,
|
||||||
|
Name: "x-blah",
|
||||||
|
Value: "foo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT,
|
||||||
|
Name: "x-other",
|
||||||
|
Value: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"mixed: header match beats query param match when both are present": {
|
||||||
|
rules: []*pbmesh.ComputedHTTPRouteRule{
|
||||||
|
newRule("r1", &pbmesh.HTTPRouteMatch{
|
||||||
|
Headers: []*pbmesh.HTTPHeaderMatch{
|
||||||
|
{
|
||||||
|
Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT,
|
||||||
|
Name: "x-blah",
|
||||||
|
Value: "foo",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: pbmesh.HeaderMatchType_HEADER_MATCH_TYPE_EXACT,
|
||||||
|
Name: "x-other",
|
||||||
|
Value: "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
QueryParams: []*pbmesh.HTTPQueryParamMatch{{
|
||||||
|
Type: pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_EXACT,
|
||||||
|
Name: "foo",
|
||||||
|
Value: "1",
|
||||||
|
}},
|
||||||
|
}),
|
||||||
|
newRule("r2", &pbmesh.HTTPRouteMatch{
|
||||||
|
QueryParams: []*pbmesh.HTTPQueryParamMatch{
|
||||||
|
{
|
||||||
|
Type: pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_EXACT,
|
||||||
|
Name: "foo",
|
||||||
|
Value: "1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Type: pbmesh.QueryParamMatchType_QUERY_PARAM_MATCH_TYPE_EXACT,
|
||||||
|
Name: "bar",
|
||||||
|
Value: "1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range cases {
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
run(t, tc)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newID(typ *pbresource.Type, tenancy *pbresource.Tenancy, name string) *pbresource.ID {
|
||||||
|
return rtest.Resource(typ, name).
|
||||||
|
WithTenancy(tenancy).
|
||||||
|
ID()
|
||||||
|
}
|
||||||
|
|
||||||
|
func nsTenancy(ns string) *pbresource.Tenancy {
|
||||||
|
return &pbresource.Tenancy{
|
||||||
|
Partition: "default",
|
||||||
|
Namespace: ns,
|
||||||
|
PeerName: "local",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultTenancy() *pbresource.Tenancy {
|
||||||
|
return nsTenancy("default")
|
||||||
|
}
|
|
@ -0,0 +1,168 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package routes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
StatusKey = "consul.io/routes-controller"
|
||||||
|
StatusConditionAccepted = "accepted"
|
||||||
|
|
||||||
|
// conditions on xRoutes
|
||||||
|
|
||||||
|
XRouteOKReason = "Ok"
|
||||||
|
XRouteOKMessage = "xRoute was accepted"
|
||||||
|
|
||||||
|
MissingParentRefReason = "MissingParentRef"
|
||||||
|
MissingBackendRefReason = "MissingBackendRef"
|
||||||
|
|
||||||
|
ParentRefOutsideMeshReason = "ParentRefOutsideMesh"
|
||||||
|
BackendRefOutsideMeshReason = "BackendRefOutsideMesh"
|
||||||
|
|
||||||
|
ParentRefUsingMeshPortReason = "ParentRefUsingMeshPort"
|
||||||
|
BackendRefUsingMeshPortReason = "BackendRefUsingMeshPort"
|
||||||
|
|
||||||
|
UnknownParentRefPortReason = "UnknownParentRefPort"
|
||||||
|
UnknownBackendRefPortReason = "UnknownBackendRefPort"
|
||||||
|
|
||||||
|
ConflictNotBoundToParentRefReason = "ConflictNotBoundToParentRef"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ConditionXRouteOK = &pbresource.Condition{
|
||||||
|
Type: StatusConditionAccepted,
|
||||||
|
State: pbresource.Condition_STATE_TRUE,
|
||||||
|
Reason: XRouteOKReason,
|
||||||
|
Message: XRouteOKMessage,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func ConditionParentRefUsingMeshPort(ref *pbresource.Reference, port string) *pbresource.Condition {
|
||||||
|
return conditionRefUsingMeshPort(ref, port, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConditionBackendRefUsingMeshPort(ref *pbresource.Reference, port string) *pbresource.Condition {
|
||||||
|
return conditionRefUsingMeshPort(ref, port, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func conditionRefUsingMeshPort(ref *pbresource.Reference, port string, forBackend bool) *pbresource.Condition {
|
||||||
|
reason := ParentRefUsingMeshPortReason
|
||||||
|
short := "parent"
|
||||||
|
if forBackend {
|
||||||
|
reason = BackendRefUsingMeshPortReason
|
||||||
|
short = "backend"
|
||||||
|
}
|
||||||
|
return &pbresource.Condition{
|
||||||
|
Type: StatusConditionAccepted,
|
||||||
|
State: pbresource.Condition_STATE_FALSE,
|
||||||
|
Reason: reason,
|
||||||
|
Message: fmt.Sprintf(
|
||||||
|
"service for %s ref %q uses port %q which is a special unroutable mesh port",
|
||||||
|
short,
|
||||||
|
resource.ReferenceToString(ref),
|
||||||
|
port,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConditionMissingParentRef(ref *pbresource.Reference) *pbresource.Condition {
|
||||||
|
return conditionMissingRef(ref, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConditionMissingBackendRef(ref *pbresource.Reference) *pbresource.Condition {
|
||||||
|
return conditionMissingRef(ref, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func conditionMissingRef(ref *pbresource.Reference, forBackend bool) *pbresource.Condition {
|
||||||
|
reason := MissingParentRefReason
|
||||||
|
short := "parent"
|
||||||
|
if forBackend {
|
||||||
|
reason = MissingBackendRefReason
|
||||||
|
short = "backend"
|
||||||
|
}
|
||||||
|
return &pbresource.Condition{
|
||||||
|
Type: StatusConditionAccepted,
|
||||||
|
State: pbresource.Condition_STATE_FALSE,
|
||||||
|
Reason: reason,
|
||||||
|
Message: fmt.Sprintf(
|
||||||
|
"service for %s ref %q does not exist",
|
||||||
|
short,
|
||||||
|
resource.ReferenceToString(ref),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConditionParentRefOutsideMesh(ref *pbresource.Reference) *pbresource.Condition {
|
||||||
|
return conditionRefOutsideMesh(ref, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConditionBackendRefOutsideMesh(ref *pbresource.Reference) *pbresource.Condition {
|
||||||
|
return conditionRefOutsideMesh(ref, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func conditionRefOutsideMesh(ref *pbresource.Reference, forBackend bool) *pbresource.Condition {
|
||||||
|
reason := ParentRefOutsideMeshReason
|
||||||
|
short := "parent"
|
||||||
|
if forBackend {
|
||||||
|
reason = BackendRefOutsideMeshReason
|
||||||
|
short = "backend"
|
||||||
|
}
|
||||||
|
return &pbresource.Condition{
|
||||||
|
Type: StatusConditionAccepted,
|
||||||
|
State: pbresource.Condition_STATE_FALSE,
|
||||||
|
Reason: reason,
|
||||||
|
Message: fmt.Sprintf(
|
||||||
|
"service for %s ref %q does not expose a mesh port",
|
||||||
|
short,
|
||||||
|
resource.ReferenceToString(ref),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConditionUnknownParentRefPort(ref *pbresource.Reference, port string) *pbresource.Condition {
|
||||||
|
return conditionUnknownRefPort(ref, port, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConditionUnknownBackendRefPort(ref *pbresource.Reference, port string) *pbresource.Condition {
|
||||||
|
return conditionUnknownRefPort(ref, port, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func conditionUnknownRefPort(ref *pbresource.Reference, port string, forBackend bool) *pbresource.Condition {
|
||||||
|
reason := UnknownParentRefPortReason
|
||||||
|
short := "parent"
|
||||||
|
if forBackend {
|
||||||
|
reason = UnknownBackendRefPortReason
|
||||||
|
short = "backend"
|
||||||
|
}
|
||||||
|
return &pbresource.Condition{
|
||||||
|
Type: StatusConditionAccepted,
|
||||||
|
State: pbresource.Condition_STATE_FALSE,
|
||||||
|
Reason: reason,
|
||||||
|
Message: fmt.Sprintf(
|
||||||
|
"service for %s ref %q does not expose port %q",
|
||||||
|
short,
|
||||||
|
resource.ReferenceToString(ref),
|
||||||
|
port,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConditionConflictNotBoundToParentRef(ref *pbresource.Reference, port string, realType *pbresource.Type) *pbresource.Condition {
|
||||||
|
return &pbresource.Condition{
|
||||||
|
Type: StatusConditionAccepted,
|
||||||
|
State: pbresource.Condition_STATE_FALSE,
|
||||||
|
Reason: ConflictNotBoundToParentRefReason,
|
||||||
|
Message: fmt.Sprintf(
|
||||||
|
"Existing routes of type %q are bound to parent ref %q on port %q preventing this from binding",
|
||||||
|
resource.TypeToString(realType),
|
||||||
|
resource.ReferenceToString(ref),
|
||||||
|
port,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package routes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
func protoClone[T proto.Message](v T) T {
|
||||||
|
return proto.Clone(v).(T)
|
||||||
|
}
|
||||||
|
|
||||||
|
func protoSliceClone[T proto.Message](in []T) []T {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := make([]T, 0, len(in))
|
||||||
|
for _, v := range in {
|
||||||
|
out = append(out, protoClone[T](v))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package xroutemapper
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
func parentRefSliceToRefSlice(parentRefs []*pbmesh.ParentReference) []resource.ReferenceOrID {
|
||||||
|
if parentRefs == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
parents := make([]resource.ReferenceOrID, 0, len(parentRefs))
|
||||||
|
for _, parentRef := range parentRefs {
|
||||||
|
if parentRef.Ref != nil {
|
||||||
|
parents = append(parents, parentRef.Ref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return parents
|
||||||
|
}
|
||||||
|
|
||||||
|
func backendRefSliceToRefSlice(backendRefs []*pbmesh.BackendReference) []resource.ReferenceOrID {
|
||||||
|
if backendRefs == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
backends := make([]resource.ReferenceOrID, 0, len(backendRefs))
|
||||||
|
for _, backendRef := range backendRefs {
|
||||||
|
if backendRef.Ref != nil {
|
||||||
|
backends = append(backends, backendRef.Ref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return backends
|
||||||
|
}
|
||||||
|
|
||||||
|
func sliceReplaceType(list []*pbresource.ID, typ *pbresource.Type) []*pbresource.ID {
|
||||||
|
if list == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := make([]*pbresource.ID, 0, len(list))
|
||||||
|
for _, id := range list {
|
||||||
|
out = append(out, resource.ReplaceType(typ, id))
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
|
@ -0,0 +1,301 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package xroutemapper
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/catalog"
|
||||||
|
"github.com/hashicorp/consul/internal/controller"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
"github.com/hashicorp/consul/internal/resource/mappers/bimapper"
|
||||||
|
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||||
|
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mapper tracks the following relationships:
|
||||||
|
//
|
||||||
|
// - xRoute <-> ParentRef Service
|
||||||
|
// - xRoute <-> BackendRef Service
|
||||||
|
// - FailoverPolicy <-> DestRef Service
|
||||||
|
//
|
||||||
|
// It is the job of the controller, loader, and mapper to keep the mappings up
|
||||||
|
// to date whenever new data is loaded. Notably because the dep mapper events
|
||||||
|
// do not signal when data is deleted, it is the job of the reconcile load of
|
||||||
|
// the data causing the event to notice something has been deleted and to
|
||||||
|
// untrack it here.
|
||||||
|
type Mapper struct {
|
||||||
|
httpRouteParentMapper *bimapper.Mapper
|
||||||
|
grpcRouteParentMapper *bimapper.Mapper
|
||||||
|
tcpRouteParentMapper *bimapper.Mapper
|
||||||
|
|
||||||
|
httpRouteBackendMapper *bimapper.Mapper
|
||||||
|
grpcRouteBackendMapper *bimapper.Mapper
|
||||||
|
tcpRouteBackendMapper *bimapper.Mapper
|
||||||
|
|
||||||
|
failMapper catalog.FailoverPolicyMapper
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new Mapper.
|
||||||
|
func New() *Mapper {
|
||||||
|
return &Mapper{
|
||||||
|
httpRouteParentMapper: bimapper.New(types.HTTPRouteType, catalog.ServiceType),
|
||||||
|
grpcRouteParentMapper: bimapper.New(types.GRPCRouteType, catalog.ServiceType),
|
||||||
|
tcpRouteParentMapper: bimapper.New(types.TCPRouteType, catalog.ServiceType),
|
||||||
|
|
||||||
|
httpRouteBackendMapper: bimapper.New(types.HTTPRouteType, catalog.ServiceType),
|
||||||
|
grpcRouteBackendMapper: bimapper.New(types.GRPCRouteType, catalog.ServiceType),
|
||||||
|
tcpRouteBackendMapper: bimapper.New(types.TCPRouteType, catalog.ServiceType),
|
||||||
|
|
||||||
|
failMapper: catalog.NewFailoverPolicyMapper(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mapper) getRouteBiMappers(typ *pbresource.Type) (parent, backend *bimapper.Mapper) {
|
||||||
|
switch {
|
||||||
|
case resource.EqualType(types.HTTPRouteType, typ):
|
||||||
|
return m.httpRouteParentMapper, m.httpRouteBackendMapper
|
||||||
|
case resource.EqualType(types.GRPCRouteType, typ):
|
||||||
|
return m.grpcRouteParentMapper, m.grpcRouteBackendMapper
|
||||||
|
case resource.EqualType(types.TCPRouteType, typ):
|
||||||
|
return m.tcpRouteParentMapper, m.tcpRouteBackendMapper
|
||||||
|
default:
|
||||||
|
panic("unknown xroute type: " + resource.TypeToString(typ))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mapper) walkRouteParentBiMappers(fn func(bm *bimapper.Mapper)) {
|
||||||
|
for _, bm := range []*bimapper.Mapper{
|
||||||
|
m.httpRouteParentMapper,
|
||||||
|
m.grpcRouteParentMapper,
|
||||||
|
m.tcpRouteParentMapper,
|
||||||
|
} {
|
||||||
|
fn(bm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mapper) walkRouteBackendBiMappers(fn func(bm *bimapper.Mapper)) {
|
||||||
|
for _, bm := range []*bimapper.Mapper{
|
||||||
|
m.httpRouteBackendMapper,
|
||||||
|
m.grpcRouteBackendMapper,
|
||||||
|
m.tcpRouteBackendMapper,
|
||||||
|
} {
|
||||||
|
fn(bm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrackXRoute indexes the xRoute->parentRefService and
|
||||||
|
// xRoute->backendRefService relationship.
|
||||||
|
func (m *Mapper) TrackXRoute(id *pbresource.ID, xroute types.XRouteData) {
|
||||||
|
parent, backend := m.getRouteBiMappers(id.Type)
|
||||||
|
if parent == nil || backend == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
parentRefs := parentRefSliceToRefSlice(xroute.GetParentRefs())
|
||||||
|
backendRefs := backendRefSliceToRefSlice(xroute.GetUnderlyingBackendRefs())
|
||||||
|
|
||||||
|
parent.TrackItem(id, parentRefs)
|
||||||
|
backend.TrackItem(id, backendRefs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UntrackXRoute undoes TrackXRoute.
|
||||||
|
func (m *Mapper) UntrackXRoute(id *pbresource.ID) {
|
||||||
|
parent, backend := m.getRouteBiMappers(id.Type)
|
||||||
|
if parent == nil || backend == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
parent.UntrackItem(id)
|
||||||
|
backend.UntrackItem(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RouteIDsByParentServiceRef returns xRoute IDs that have a direct parentRef link to
|
||||||
|
// the provided service.
|
||||||
|
func (m *Mapper) RouteIDsByParentServiceRef(ref *pbresource.Reference) []*pbresource.ID {
|
||||||
|
var out []*pbresource.ID
|
||||||
|
m.walkRouteParentBiMappers(func(bm *bimapper.Mapper) {
|
||||||
|
got := bm.ItemsForLink(resource.IDFromReference(ref))
|
||||||
|
out = append(out, got...)
|
||||||
|
})
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// RouteIDsByBackendServiceRef returns xRoute IDs that have a direct backendRef
|
||||||
|
// link to the provided service.
|
||||||
|
func (m *Mapper) RouteIDsByBackendServiceRef(ref *pbresource.Reference) []*pbresource.ID {
|
||||||
|
var out []*pbresource.ID
|
||||||
|
m.walkRouteBackendBiMappers(func(bm *bimapper.Mapper) {
|
||||||
|
got := bm.ItemsForLink(resource.IDFromReference(ref))
|
||||||
|
out = append(out, got...)
|
||||||
|
})
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParentServiceRefsByRouteID is the opposite of RouteIDsByParentServiceRef.
|
||||||
|
func (m *Mapper) ParentServiceRefsByRouteID(item *pbresource.ID) []*pbresource.Reference {
|
||||||
|
parent, _ := m.getRouteBiMappers(item.Type)
|
||||||
|
if parent == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return parent.LinksForItem(item)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackendServiceRefsByRouteID is the opposite of RouteIDsByBackendServiceRef.
|
||||||
|
func (m *Mapper) BackendServiceRefsByRouteID(item *pbresource.ID) []*pbresource.Reference {
|
||||||
|
_, backend := m.getRouteBiMappers(item.Type)
|
||||||
|
if backend == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return backend.LinksForItem(item)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapHTTPRoute will map HTTPRoute changes to ComputedRoutes changes.
|
||||||
|
func (m *Mapper) MapHTTPRoute(_ context.Context, _ controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
|
||||||
|
return mapXRouteToComputedRoutes[*pbmesh.HTTPRoute](res, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapGRPCRoute will map GRPCRoute changes to ComputedRoutes changes.
|
||||||
|
func (m *Mapper) MapGRPCRoute(_ context.Context, _ controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
|
||||||
|
return mapXRouteToComputedRoutes[*pbmesh.GRPCRoute](res, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapTCPRoute will map TCPRoute changes to ComputedRoutes changes.
|
||||||
|
func (m *Mapper) MapTCPRoute(_ context.Context, _ controller.Runtime, res *pbresource.Resource) ([]controller.Request, error) {
|
||||||
|
return mapXRouteToComputedRoutes[*pbmesh.TCPRoute](res, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mapXRouteToComputedRoutes will map xRoute changes to ComputedRoutes changes.
|
||||||
|
func mapXRouteToComputedRoutes[T types.XRouteData](res *pbresource.Resource, m *Mapper) ([]controller.Request, error) {
|
||||||
|
dec, err := resource.Decode[T](res)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error unmarshalling xRoute: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
route := dec.Data
|
||||||
|
|
||||||
|
m.TrackXRoute(res.Id, route)
|
||||||
|
|
||||||
|
return controller.MakeRequests(
|
||||||
|
types.ComputedRoutesType,
|
||||||
|
parentRefSliceToRefSlice(route.GetParentRefs()),
|
||||||
|
), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mapper) MapFailoverPolicy(
|
||||||
|
_ context.Context,
|
||||||
|
_ controller.Runtime,
|
||||||
|
res *pbresource.Resource,
|
||||||
|
) ([]controller.Request, error) {
|
||||||
|
if !types.IsFailoverPolicyType(res.Id.Type) {
|
||||||
|
return nil, fmt.Errorf("type is not a failover policy type: %s", res.Id.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
dec, err := resource.Decode[*pbcatalog.FailoverPolicy](res)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error unmarshalling failover policy: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.failMapper.TrackFailover(dec)
|
||||||
|
|
||||||
|
// Since this is name-aligned, just switch the type and find routes that
|
||||||
|
// will route any traffic to this destination service.
|
||||||
|
svcID := resource.ReplaceType(catalog.ServiceType, res.Id)
|
||||||
|
|
||||||
|
return m.mapXRouteDirectServiceRefToComputedRoutesByID(svcID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mapper) TrackFailoverPolicy(failover *types.DecodedFailoverPolicy) {
|
||||||
|
if failover != nil {
|
||||||
|
m.failMapper.TrackFailover(failover)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mapper) UntrackFailoverPolicy(failoverPolicyID *pbresource.ID) {
|
||||||
|
m.failMapper.UntrackFailover(failoverPolicyID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mapper) MapDestinationPolicy(
|
||||||
|
_ context.Context,
|
||||||
|
_ controller.Runtime,
|
||||||
|
res *pbresource.Resource,
|
||||||
|
) ([]controller.Request, error) {
|
||||||
|
if !types.IsDestinationPolicyType(res.Id.Type) {
|
||||||
|
return nil, fmt.Errorf("type is not a destination policy type: %s", res.Id.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since this is name-aligned, just switch the type and find routes that
|
||||||
|
// will route any traffic to this destination service.
|
||||||
|
svcID := resource.ReplaceType(catalog.ServiceType, res.Id)
|
||||||
|
|
||||||
|
return m.mapXRouteDirectServiceRefToComputedRoutesByID(svcID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Mapper) MapService(
|
||||||
|
_ context.Context,
|
||||||
|
_ controller.Runtime,
|
||||||
|
res *pbresource.Resource,
|
||||||
|
) ([]controller.Request, error) {
|
||||||
|
// Ultimately we want to wake up a ComputedRoutes if either of the
|
||||||
|
// following exist:
|
||||||
|
//
|
||||||
|
// 1. xRoute[parentRef=OUTPUT_EVENT; backendRef=INPUT_EVENT]
|
||||||
|
// 2. xRoute[parentRef=OUTPUT_EVENT; backendRef=SOMETHING], FailoverPolicy[name=SOMETHING, destRef=INPUT_EVENT]
|
||||||
|
|
||||||
|
// (case 2) First find all failover policies that have a reference to our input service.
|
||||||
|
failPolicyIDs := m.failMapper.FailoverIDsByService(res.Id)
|
||||||
|
effectiveServiceIDs := sliceReplaceType(failPolicyIDs, catalog.ServiceType)
|
||||||
|
|
||||||
|
// (case 1) Do the direct mapping also.
|
||||||
|
effectiveServiceIDs = append(effectiveServiceIDs, res.Id)
|
||||||
|
|
||||||
|
var reqs []controller.Request
|
||||||
|
for _, svcID := range effectiveServiceIDs {
|
||||||
|
got, err := m.mapXRouteDirectServiceRefToComputedRoutesByID(svcID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
reqs = append(reqs, got...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return reqs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: this function does not interrogate down into failover policies
|
||||||
|
func (m *Mapper) mapXRouteDirectServiceRefToComputedRoutesByID(svcID *pbresource.ID) ([]controller.Request, error) {
|
||||||
|
if !types.IsServiceType(svcID.Type) {
|
||||||
|
return nil, fmt.Errorf("type is not a service type: %s", svcID.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// return 1 hit for the name aligned mesh config
|
||||||
|
primaryReq := controller.Request{
|
||||||
|
ID: resource.ReplaceType(types.ComputedRoutesType, svcID),
|
||||||
|
}
|
||||||
|
|
||||||
|
svcRef := resource.Reference(svcID, "")
|
||||||
|
|
||||||
|
// Find all routes with an explicit backend ref to this service.
|
||||||
|
//
|
||||||
|
// the "name aligned" inclusion above should handle the implicit default
|
||||||
|
// destination implied by a parent ref without us having to do much more.
|
||||||
|
routeIDs := m.RouteIDsByBackendServiceRef(svcRef)
|
||||||
|
|
||||||
|
out := make([]controller.Request, 0, 1+len(routeIDs)) // estimated
|
||||||
|
out = append(out, primaryReq)
|
||||||
|
|
||||||
|
for _, routeID := range routeIDs {
|
||||||
|
// Find all parent refs of this route.
|
||||||
|
svcRefs := m.ParentServiceRefsByRouteID(routeID)
|
||||||
|
|
||||||
|
out = append(out, controller.MakeRequests(
|
||||||
|
types.ComputedRoutesType,
|
||||||
|
svcRefs,
|
||||||
|
)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
|
@ -0,0 +1,617 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package xroutemapper
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/types/known/durationpb"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/catalog"
|
||||||
|
"github.com/hashicorp/consul/internal/controller"
|
||||||
|
"github.com/hashicorp/consul/internal/mesh/internal/types"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||||
|
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v1alpha1"
|
||||||
|
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
"github.com/hashicorp/consul/proto/private/prototest"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMapper_HTTPRoute_Tracking(t *testing.T) {
|
||||||
|
testMapper_Tracking(t, types.HTTPRouteType, func(t *testing.T, parentRefs []*pbmesh.ParentReference, backendRefs []*pbmesh.BackendReference) proto.Message {
|
||||||
|
route := &pbmesh.HTTPRoute{
|
||||||
|
ParentRefs: parentRefs,
|
||||||
|
}
|
||||||
|
for _, backendRef := range backendRefs {
|
||||||
|
route.Rules = append(route.Rules, &pbmesh.HTTPRouteRule{
|
||||||
|
BackendRefs: []*pbmesh.HTTPBackendRef{
|
||||||
|
{BackendRef: backendRef},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return route
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMapper_GRPCRoute_Tracking(t *testing.T) {
|
||||||
|
testMapper_Tracking(t, types.GRPCRouteType, func(t *testing.T, parentRefs []*pbmesh.ParentReference, backendRefs []*pbmesh.BackendReference) proto.Message {
|
||||||
|
route := &pbmesh.GRPCRoute{
|
||||||
|
ParentRefs: parentRefs,
|
||||||
|
}
|
||||||
|
for _, backendRef := range backendRefs {
|
||||||
|
route.Rules = append(route.Rules, &pbmesh.GRPCRouteRule{
|
||||||
|
BackendRefs: []*pbmesh.GRPCBackendRef{
|
||||||
|
{BackendRef: backendRef},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return route
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMapper_TCPRoute_Tracking(t *testing.T) {
|
||||||
|
testMapper_Tracking(t, types.TCPRouteType, func(t *testing.T, parentRefs []*pbmesh.ParentReference, backendRefs []*pbmesh.BackendReference) proto.Message {
|
||||||
|
route := &pbmesh.TCPRoute{
|
||||||
|
ParentRefs: parentRefs,
|
||||||
|
}
|
||||||
|
for _, backendRef := range backendRefs {
|
||||||
|
route.Rules = append(route.Rules, &pbmesh.TCPRouteRule{
|
||||||
|
BackendRefs: []*pbmesh.TCPBackendRef{
|
||||||
|
{BackendRef: backendRef},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return route
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testMapper_Tracking(t *testing.T, typ *pbresource.Type, newRoute func(t *testing.T, parentRefs []*pbmesh.ParentReference, backendRefs []*pbmesh.BackendReference) proto.Message) {
|
||||||
|
registry := resource.NewRegistry()
|
||||||
|
types.Register(registry)
|
||||||
|
catalog.RegisterTypes(registry)
|
||||||
|
|
||||||
|
newService := func(name string) *pbresource.Resource {
|
||||||
|
svc := rtest.Resource(catalog.ServiceType, name).
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
WithData(t, &pbcatalog.Service{}).
|
||||||
|
Build()
|
||||||
|
rtest.ValidateAndNormalize(t, registry, svc)
|
||||||
|
return svc
|
||||||
|
}
|
||||||
|
|
||||||
|
newDestPolicy := func(name string, dur time.Duration) *pbresource.Resource {
|
||||||
|
policy := rtest.Resource(types.DestinationPolicyType, name).
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
WithData(t, &pbmesh.DestinationPolicy{
|
||||||
|
PortConfigs: map[string]*pbmesh.DestinationConfig{
|
||||||
|
"http": {
|
||||||
|
ConnectTimeout: durationpb.New(dur),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}).Build()
|
||||||
|
rtest.ValidateAndNormalize(t, registry, policy)
|
||||||
|
return policy
|
||||||
|
}
|
||||||
|
|
||||||
|
newFailPolicy := func(name string, refs ...*pbresource.Reference) *pbresource.Resource {
|
||||||
|
var dests []*pbcatalog.FailoverDestination
|
||||||
|
for _, ref := range refs {
|
||||||
|
dests = append(dests, &pbcatalog.FailoverDestination{
|
||||||
|
Ref: ref,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
policy := rtest.Resource(catalog.FailoverPolicyType, name).
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
WithData(t, &pbcatalog.FailoverPolicy{
|
||||||
|
Config: &pbcatalog.FailoverConfig{
|
||||||
|
Destinations: dests,
|
||||||
|
},
|
||||||
|
}).Build()
|
||||||
|
rtest.ValidateAndNormalize(t, registry, policy)
|
||||||
|
return policy
|
||||||
|
}
|
||||||
|
|
||||||
|
apiComputedRoutes := newID(types.ComputedRoutesType, "api")
|
||||||
|
wwwComputedRoutes := newID(types.ComputedRoutesType, "www")
|
||||||
|
barComputedRoutes := newID(types.ComputedRoutesType, "bar")
|
||||||
|
fooComputedRoutes := newID(types.ComputedRoutesType, "foo")
|
||||||
|
zimComputedRoutes := newID(types.ComputedRoutesType, "zim")
|
||||||
|
girComputedRoutes := newID(types.ComputedRoutesType, "gir")
|
||||||
|
|
||||||
|
m := New()
|
||||||
|
|
||||||
|
var (
|
||||||
|
apiSvc = newService("api")
|
||||||
|
wwwSvc = newService("www")
|
||||||
|
barSvc = newService("bar")
|
||||||
|
fooSvc = newService("foo")
|
||||||
|
zimSvc = newService("zim")
|
||||||
|
girSvc = newService("gir")
|
||||||
|
|
||||||
|
apiSvcRef = resource.Reference(apiSvc.Id, "")
|
||||||
|
wwwSvcRef = resource.Reference(wwwSvc.Id, "")
|
||||||
|
barSvcRef = resource.Reference(barSvc.Id, "")
|
||||||
|
fooSvcRef = resource.Reference(fooSvc.Id, "")
|
||||||
|
zimSvcRef = resource.Reference(zimSvc.Id, "")
|
||||||
|
girSvcRef = resource.Reference(girSvc.Id, "")
|
||||||
|
|
||||||
|
apiDest = newDestPolicy("api", 55*time.Second)
|
||||||
|
wwwDest = newDestPolicy("www", 123*time.Second)
|
||||||
|
|
||||||
|
// Start out easy and don't have failover policies that reference other services.
|
||||||
|
apiFail = newFailPolicy("api", newRef(catalog.ServiceType, "api"))
|
||||||
|
wwwFail = newFailPolicy("www", newRef(catalog.ServiceType, "www"))
|
||||||
|
barFail = newFailPolicy("bar", newRef(catalog.ServiceType, "bar"))
|
||||||
|
)
|
||||||
|
|
||||||
|
testutil.RunStep(t, "only name aligned defaults", func(t *testing.T) {
|
||||||
|
requireTracking(t, m, apiSvc, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwSvc, wwwComputedRoutes)
|
||||||
|
requireTracking(t, m, barSvc, barComputedRoutes)
|
||||||
|
requireTracking(t, m, fooSvc, fooComputedRoutes)
|
||||||
|
requireTracking(t, m, zimSvc, zimComputedRoutes)
|
||||||
|
requireTracking(t, m, girSvc, girComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiDest, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwDest, wwwComputedRoutes)
|
||||||
|
|
||||||
|
// This will track the failover policies.
|
||||||
|
requireTracking(t, m, apiFail, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwFail, wwwComputedRoutes)
|
||||||
|
requireTracking(t, m, barFail, barComputedRoutes)
|
||||||
|
|
||||||
|
// verify other helper methods
|
||||||
|
for _, ref := range []*pbresource.Reference{apiSvcRef, wwwSvcRef, barSvcRef, fooSvcRef, zimSvcRef, girSvcRef} {
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(ref))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(ref))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
var (
|
||||||
|
route1 *pbresource.Resource
|
||||||
|
)
|
||||||
|
testutil.RunStep(t, "track a name-aligned xroute", func(t *testing.T) {
|
||||||
|
// First route will also not cross any services.
|
||||||
|
route1 := rtest.Resource(typ, "route-1").
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
WithData(t, newRoute(t,
|
||||||
|
[]*pbmesh.ParentReference{
|
||||||
|
{Ref: newRef(catalog.ServiceType, "api")},
|
||||||
|
},
|
||||||
|
[]*pbmesh.BackendReference{
|
||||||
|
newBackendRef("api"),
|
||||||
|
},
|
||||||
|
)).Build()
|
||||||
|
rtest.ValidateAndNormalize(t, registry, route1)
|
||||||
|
|
||||||
|
requireTracking(t, m, route1, apiComputedRoutes)
|
||||||
|
|
||||||
|
// Now 'api' references should trigger more, but be duplicate-suppressed.
|
||||||
|
requireTracking(t, m, apiSvc, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwSvc, wwwComputedRoutes)
|
||||||
|
requireTracking(t, m, barSvc, barComputedRoutes)
|
||||||
|
requireTracking(t, m, fooSvc, fooComputedRoutes)
|
||||||
|
requireTracking(t, m, zimSvc, zimComputedRoutes)
|
||||||
|
requireTracking(t, m, girSvc, girComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiDest, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwDest, wwwComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiFail, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwFail, wwwComputedRoutes)
|
||||||
|
requireTracking(t, m, barFail, barComputedRoutes)
|
||||||
|
|
||||||
|
// verify other helper methods
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{apiSvcRef}, m.BackendServiceRefsByRouteID(route1.Id))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{apiSvcRef}, m.ParentServiceRefsByRouteID(route1.Id))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route1.Id}, m.RouteIDsByBackendServiceRef(apiSvcRef))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route1.Id}, m.RouteIDsByParentServiceRef(apiSvcRef))
|
||||||
|
|
||||||
|
for _, ref := range []*pbresource.Reference{wwwSvcRef, barSvcRef, fooSvcRef, zimSvcRef, girSvcRef} {
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(ref))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(ref))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "make the route cross services", func(t *testing.T) {
|
||||||
|
route1 = rtest.Resource(typ, "route-1").
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
WithData(t, newRoute(t,
|
||||||
|
[]*pbmesh.ParentReference{
|
||||||
|
{Ref: newRef(catalog.ServiceType, "api")},
|
||||||
|
},
|
||||||
|
[]*pbmesh.BackendReference{
|
||||||
|
newBackendRef("www"),
|
||||||
|
},
|
||||||
|
)).Build()
|
||||||
|
rtest.ValidateAndNormalize(t, registry, route1)
|
||||||
|
|
||||||
|
// Now witness the update.
|
||||||
|
requireTracking(t, m, route1, apiComputedRoutes)
|
||||||
|
|
||||||
|
// Now 'api' references should trigger different things.
|
||||||
|
requireTracking(t, m, apiSvc, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwSvc, wwwComputedRoutes, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, barSvc, barComputedRoutes)
|
||||||
|
requireTracking(t, m, fooSvc, fooComputedRoutes)
|
||||||
|
requireTracking(t, m, zimSvc, zimComputedRoutes)
|
||||||
|
requireTracking(t, m, girSvc, girComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiDest, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwDest, wwwComputedRoutes, apiComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiFail, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwFail, wwwComputedRoutes, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, barFail, barComputedRoutes)
|
||||||
|
|
||||||
|
// verify other helper methods
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{wwwSvcRef}, m.BackendServiceRefsByRouteID(route1.Id))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{apiSvcRef}, m.ParentServiceRefsByRouteID(route1.Id))
|
||||||
|
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(apiSvcRef))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route1.Id}, m.RouteIDsByParentServiceRef(apiSvcRef))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route1.Id}, m.RouteIDsByBackendServiceRef(wwwSvcRef))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(wwwSvcRef))
|
||||||
|
|
||||||
|
for _, ref := range []*pbresource.Reference{barSvcRef, fooSvcRef, zimSvcRef, girSvcRef} {
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(ref))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(ref))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
var (
|
||||||
|
route2 *pbresource.Resource
|
||||||
|
)
|
||||||
|
testutil.RunStep(t, "make another route sharing a parent with the first", func(t *testing.T) {
|
||||||
|
route2 = rtest.Resource(typ, "route-2").
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
WithData(t, newRoute(t,
|
||||||
|
[]*pbmesh.ParentReference{
|
||||||
|
{Ref: newRef(catalog.ServiceType, "api")},
|
||||||
|
{Ref: newRef(catalog.ServiceType, "foo")},
|
||||||
|
},
|
||||||
|
[]*pbmesh.BackendReference{
|
||||||
|
newBackendRef("bar"),
|
||||||
|
},
|
||||||
|
)).Build()
|
||||||
|
rtest.ValidateAndNormalize(t, registry, route1)
|
||||||
|
|
||||||
|
// Now witness a route with multiple parents, overlapping the other route.
|
||||||
|
requireTracking(t, m, route2, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiSvc, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwSvc, wwwComputedRoutes, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, barSvc, barComputedRoutes, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, fooSvc, fooComputedRoutes)
|
||||||
|
requireTracking(t, m, zimSvc, zimComputedRoutes)
|
||||||
|
requireTracking(t, m, girSvc, girComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiDest, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwDest, wwwComputedRoutes, apiComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiFail, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwFail, wwwComputedRoutes, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, barFail, barComputedRoutes, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, route1, apiComputedRoutes)
|
||||||
|
// skip re-verifying route2
|
||||||
|
// requireTracking(t, m, route2, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
// verify other helper methods
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{wwwSvcRef}, m.BackendServiceRefsByRouteID(route1.Id))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{apiSvcRef}, m.ParentServiceRefsByRouteID(route1.Id))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{barSvcRef}, m.BackendServiceRefsByRouteID(route2.Id))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{apiSvcRef, fooSvcRef}, m.ParentServiceRefsByRouteID(route2.Id))
|
||||||
|
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(apiSvcRef))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route1.Id, route2.Id}, m.RouteIDsByParentServiceRef(apiSvcRef))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route1.Id}, m.RouteIDsByBackendServiceRef(wwwSvcRef))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(wwwSvcRef))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route2.Id}, m.RouteIDsByBackendServiceRef(barSvcRef))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(barSvcRef))
|
||||||
|
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(fooSvcRef))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route2.Id}, m.RouteIDsByParentServiceRef(fooSvcRef))
|
||||||
|
|
||||||
|
for _, ref := range []*pbresource.Reference{zimSvcRef, girSvcRef} {
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(ref))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(ref))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "update the failover policy to cross services", func(t *testing.T) {
|
||||||
|
apiFail = newFailPolicy("api",
|
||||||
|
newRef(catalog.ServiceType, "foo"),
|
||||||
|
newRef(catalog.ServiceType, "zim"))
|
||||||
|
requireTracking(t, m, apiFail, apiComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiSvc, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwSvc, wwwComputedRoutes, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, barSvc, barComputedRoutes, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, fooSvc, fooComputedRoutes, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, zimSvc, zimComputedRoutes, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, girSvc, girComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiDest, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwDest, wwwComputedRoutes, apiComputedRoutes)
|
||||||
|
|
||||||
|
// skipping verification of apiFail b/c it happened above already
|
||||||
|
// requireTracking(t, m, apiFail, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwFail, wwwComputedRoutes, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, barFail, barComputedRoutes, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, route1, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, route2, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
// verify other helper methods
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{wwwSvcRef}, m.BackendServiceRefsByRouteID(route1.Id))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{apiSvcRef}, m.ParentServiceRefsByRouteID(route1.Id))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{barSvcRef}, m.BackendServiceRefsByRouteID(route2.Id))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{apiSvcRef, fooSvcRef}, m.ParentServiceRefsByRouteID(route2.Id))
|
||||||
|
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(apiSvcRef))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route1.Id, route2.Id}, m.RouteIDsByParentServiceRef(apiSvcRef))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route1.Id}, m.RouteIDsByBackendServiceRef(wwwSvcRef))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(wwwSvcRef))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route2.Id}, m.RouteIDsByBackendServiceRef(barSvcRef))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(barSvcRef))
|
||||||
|
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(fooSvcRef))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route2.Id}, m.RouteIDsByParentServiceRef(fooSvcRef))
|
||||||
|
|
||||||
|
for _, ref := range []*pbresource.Reference{zimSvcRef, girSvcRef} {
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(ref))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(ref))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "set a new failover policy for a service in route2", func(t *testing.T) {
|
||||||
|
barFail = newFailPolicy("bar",
|
||||||
|
newRef(catalog.ServiceType, "gir"))
|
||||||
|
requireTracking(t, m, barFail, barComputedRoutes, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiSvc, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwSvc, wwwComputedRoutes, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, barSvc, barComputedRoutes, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, fooSvc, fooComputedRoutes, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, zimSvc, zimComputedRoutes, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, girSvc, girComputedRoutes, barComputedRoutes, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiDest, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwDest, wwwComputedRoutes, apiComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiFail, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwFail, wwwComputedRoutes, apiComputedRoutes)
|
||||||
|
// skipping verification of barFail b/c it happened above already
|
||||||
|
// requireTracking(t, m, barFail, barComputedRoutes, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, route1, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, route2, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
// verify other helper methods
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{wwwSvcRef}, m.BackendServiceRefsByRouteID(route1.Id))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{apiSvcRef}, m.ParentServiceRefsByRouteID(route1.Id))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{barSvcRef}, m.BackendServiceRefsByRouteID(route2.Id))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{apiSvcRef, fooSvcRef}, m.ParentServiceRefsByRouteID(route2.Id))
|
||||||
|
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(apiSvcRef))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route1.Id, route2.Id}, m.RouteIDsByParentServiceRef(apiSvcRef))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route1.Id}, m.RouteIDsByBackendServiceRef(wwwSvcRef))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(wwwSvcRef))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route2.Id}, m.RouteIDsByBackendServiceRef(barSvcRef))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(barSvcRef))
|
||||||
|
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(fooSvcRef))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route2.Id}, m.RouteIDsByParentServiceRef(fooSvcRef))
|
||||||
|
|
||||||
|
for _, ref := range []*pbresource.Reference{zimSvcRef, girSvcRef} {
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(ref))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(ref))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "delete first route", func(t *testing.T) {
|
||||||
|
m.UntrackXRoute(route1.Id)
|
||||||
|
route1 = nil
|
||||||
|
|
||||||
|
requireTracking(t, m, apiSvc, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwSvc, wwwComputedRoutes)
|
||||||
|
requireTracking(t, m, barSvc, barComputedRoutes, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, fooSvc, fooComputedRoutes, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, zimSvc, zimComputedRoutes, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, girSvc, girComputedRoutes, barComputedRoutes, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiDest, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwDest, wwwComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiFail, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwFail, wwwComputedRoutes)
|
||||||
|
requireTracking(t, m, barFail, barComputedRoutes, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, route2, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
// verify other helper methods
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{barSvcRef}, m.BackendServiceRefsByRouteID(route2.Id))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{apiSvcRef, fooSvcRef}, m.ParentServiceRefsByRouteID(route2.Id))
|
||||||
|
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(apiSvcRef))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route2.Id}, m.RouteIDsByParentServiceRef(apiSvcRef))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route2.Id}, m.RouteIDsByBackendServiceRef(barSvcRef))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(barSvcRef))
|
||||||
|
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(fooSvcRef))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route2.Id}, m.RouteIDsByParentServiceRef(fooSvcRef))
|
||||||
|
|
||||||
|
for _, ref := range []*pbresource.Reference{wwwSvcRef, zimSvcRef, girSvcRef} {
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(ref))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(ref))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "delete all failover", func(t *testing.T) {
|
||||||
|
m.UntrackFailoverPolicy(apiFail.Id)
|
||||||
|
m.UntrackFailoverPolicy(wwwFail.Id)
|
||||||
|
m.UntrackFailoverPolicy(barFail.Id)
|
||||||
|
|
||||||
|
apiFail = nil
|
||||||
|
wwwFail = nil
|
||||||
|
barFail = nil
|
||||||
|
|
||||||
|
requireTracking(t, m, apiSvc, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwSvc, wwwComputedRoutes)
|
||||||
|
requireTracking(t, m, barSvc, barComputedRoutes, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, fooSvc, fooComputedRoutes)
|
||||||
|
requireTracking(t, m, zimSvc, zimComputedRoutes)
|
||||||
|
requireTracking(t, m, girSvc, girComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiDest, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwDest, wwwComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, route2, apiComputedRoutes, fooComputedRoutes)
|
||||||
|
|
||||||
|
// verify other helper methods
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{barSvcRef}, m.BackendServiceRefsByRouteID(route2.Id))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.Reference{apiSvcRef, fooSvcRef}, m.ParentServiceRefsByRouteID(route2.Id))
|
||||||
|
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(apiSvcRef))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route2.Id}, m.RouteIDsByParentServiceRef(apiSvcRef))
|
||||||
|
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route2.Id}, m.RouteIDsByBackendServiceRef(barSvcRef))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(barSvcRef))
|
||||||
|
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(fooSvcRef))
|
||||||
|
prototest.AssertElementsMatch(t, []*pbresource.ID{route2.Id}, m.RouteIDsByParentServiceRef(fooSvcRef))
|
||||||
|
|
||||||
|
for _, ref := range []*pbresource.Reference{wwwSvcRef, zimSvcRef, girSvcRef} {
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(ref))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(ref))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
testutil.RunStep(t, "delete second route", func(t *testing.T) {
|
||||||
|
m.UntrackXRoute(route2.Id)
|
||||||
|
route2 = nil
|
||||||
|
|
||||||
|
requireTracking(t, m, apiSvc, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwSvc, wwwComputedRoutes)
|
||||||
|
requireTracking(t, m, barSvc, barComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, fooSvc, fooComputedRoutes)
|
||||||
|
requireTracking(t, m, zimSvc, zimComputedRoutes)
|
||||||
|
requireTracking(t, m, girSvc, girComputedRoutes)
|
||||||
|
|
||||||
|
requireTracking(t, m, apiDest, apiComputedRoutes)
|
||||||
|
requireTracking(t, m, wwwDest, wwwComputedRoutes)
|
||||||
|
|
||||||
|
// verify other helper methods
|
||||||
|
for _, ref := range []*pbresource.Reference{apiSvcRef, wwwSvcRef, barSvcRef, fooSvcRef, zimSvcRef, girSvcRef} {
|
||||||
|
require.Empty(t, m.RouteIDsByBackendServiceRef(ref))
|
||||||
|
require.Empty(t, m.RouteIDsByParentServiceRef(ref))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func requireTracking(
|
||||||
|
t *testing.T,
|
||||||
|
mapper *Mapper,
|
||||||
|
res *pbresource.Resource,
|
||||||
|
computedRoutesIDs ...*pbresource.ID,
|
||||||
|
) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
require.NotNil(t, res)
|
||||||
|
|
||||||
|
var (
|
||||||
|
reqs []controller.Request
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
switch {
|
||||||
|
case resource.EqualType(types.HTTPRouteType, res.Id.Type):
|
||||||
|
reqs, err = mapper.MapHTTPRoute(context.Background(), controller.Runtime{}, res)
|
||||||
|
case resource.EqualType(types.GRPCRouteType, res.Id.Type):
|
||||||
|
reqs, err = mapper.MapGRPCRoute(context.Background(), controller.Runtime{}, res)
|
||||||
|
case resource.EqualType(types.TCPRouteType, res.Id.Type):
|
||||||
|
reqs, err = mapper.MapTCPRoute(context.Background(), controller.Runtime{}, res)
|
||||||
|
case resource.EqualType(types.DestinationPolicyType, res.Id.Type):
|
||||||
|
reqs, err = mapper.MapDestinationPolicy(context.Background(), controller.Runtime{}, res)
|
||||||
|
case resource.EqualType(catalog.FailoverPolicyType, res.Id.Type):
|
||||||
|
reqs, err = mapper.MapFailoverPolicy(context.Background(), controller.Runtime{}, res)
|
||||||
|
case resource.EqualType(catalog.ServiceType, res.Id.Type):
|
||||||
|
reqs, err = mapper.MapService(context.Background(), controller.Runtime{}, res)
|
||||||
|
default:
|
||||||
|
t.Fatalf("unhandled resource type: %s", resource.TypeToString(res.Id.Type))
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
reqs = testDeduplicateRequests(reqs)
|
||||||
|
require.Len(t, reqs, len(computedRoutesIDs))
|
||||||
|
for _, computedRoutesID := range computedRoutesIDs {
|
||||||
|
require.NotNil(t, computedRoutesID)
|
||||||
|
prototest.AssertContainsElement(t, reqs, controller.Request{ID: computedRoutesID})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBackendRef(name string) *pbmesh.BackendReference {
|
||||||
|
return &pbmesh.BackendReference{
|
||||||
|
Ref: newRef(catalog.ServiceType, name),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRef(typ *pbresource.Type, name string) *pbresource.Reference {
|
||||||
|
return rtest.Resource(typ, name).
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
Reference("")
|
||||||
|
}
|
||||||
|
|
||||||
|
func newID(typ *pbresource.Type, name string) *pbresource.ID {
|
||||||
|
return rtest.Resource(typ, name).
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
ID()
|
||||||
|
}
|
||||||
|
|
||||||
|
func testDeduplicateRequests(reqs []controller.Request) []controller.Request {
|
||||||
|
type resID struct {
|
||||||
|
resource.ReferenceKey
|
||||||
|
UID string
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]controller.Request, 0, len(reqs))
|
||||||
|
seen := make(map[resID]struct{})
|
||||||
|
|
||||||
|
for _, req := range reqs {
|
||||||
|
rid := resID{
|
||||||
|
ReferenceKey: resource.NewReferenceKey(req.ID),
|
||||||
|
UID: req.ID.Uid,
|
||||||
|
}
|
||||||
|
if _, ok := seen[rid]; !ok {
|
||||||
|
out = append(out, req)
|
||||||
|
seen[rid] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
|
@ -13,6 +13,11 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ComputedRoutesKind = "ComputedRoutes"
|
ComputedRoutesKind = "ComputedRoutes"
|
||||||
|
|
||||||
|
// NullRouteBackend is the sentinel string used in ComputedRoutes backend
|
||||||
|
// targets to indicate that traffic arriving at this destination should
|
||||||
|
// fail in a protocol-specific way (i.e. HTTP is 5xx)
|
||||||
|
NullRouteBackend = "NULL-ROUTE"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -66,12 +71,9 @@ func ValidateComputedRoutes(res *pbresource.Resource) error {
|
||||||
Wrapped: resource.ErrEmpty,
|
Wrapped: resource.ErrEmpty,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
if len(pmc.Targets) == 0 {
|
|
||||||
merr = multierror.Append(merr, wrapErr(resource.ErrInvalidField{
|
// TODO(rb): do a deep inspection of the config to verify that all
|
||||||
Name: "targets",
|
// xRoute backends ultimately point to an item in the targets map.
|
||||||
Wrapped: resource.ErrEmpty,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return merr
|
return merr
|
||||||
|
|
|
@ -48,32 +48,17 @@ func TestValidateComputedRoutes(t *testing.T) {
|
||||||
PortedConfigs: map[string]*pbmesh.ComputedPortRoutes{
|
PortedConfigs: map[string]*pbmesh.ComputedPortRoutes{
|
||||||
"http": {
|
"http": {
|
||||||
Config: nil,
|
Config: nil,
|
||||||
Targets: map[string]*pbmesh.BackendTargetDetails{
|
|
||||||
"foo": {},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectErr: `invalid value of key "http" within ported_configs: invalid "config" field: cannot be empty`,
|
expectErr: `invalid value of key "http" within ported_configs: invalid "config" field: cannot be empty`,
|
||||||
},
|
},
|
||||||
"empty targets": {
|
|
||||||
routes: &pbmesh.ComputedRoutes{
|
|
||||||
PortedConfigs: map[string]*pbmesh.ComputedPortRoutes{
|
|
||||||
"http": {
|
|
||||||
Config: &pbmesh.ComputedPortRoutes_Tcp{
|
|
||||||
Tcp: &pbmesh.InterpretedTCPRoute{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectErr: `invalid value of key "http" within ported_configs: invalid "targets" field: cannot be empty`,
|
|
||||||
},
|
|
||||||
"valid": {
|
"valid": {
|
||||||
routes: &pbmesh.ComputedRoutes{
|
routes: &pbmesh.ComputedRoutes{
|
||||||
PortedConfigs: map[string]*pbmesh.ComputedPortRoutes{
|
PortedConfigs: map[string]*pbmesh.ComputedPortRoutes{
|
||||||
"http": {
|
"http": {
|
||||||
Config: &pbmesh.ComputedPortRoutes_Tcp{
|
Config: &pbmesh.ComputedPortRoutes_Tcp{
|
||||||
Tcp: &pbmesh.InterpretedTCPRoute{},
|
Tcp: &pbmesh.ComputedTCPRoute{},
|
||||||
},
|
},
|
||||||
Targets: map[string]*pbmesh.BackendTargetDetails{
|
Targets: map[string]*pbmesh.BackendTargetDetails{
|
||||||
"foo": {},
|
"foo": {},
|
||||||
|
|
|
@ -4,16 +4,39 @@
|
||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/internal/catalog"
|
"github.com/hashicorp/consul/internal/catalog"
|
||||||
"github.com/hashicorp/consul/internal/resource"
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v1alpha1"
|
||||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
)
|
)
|
||||||
|
|
||||||
func IsRouteType(typ *pbresource.Type) bool {
|
func IsRouteType(typ *pbresource.Type) bool {
|
||||||
|
return IsHTTPRouteType(typ) ||
|
||||||
|
IsGRPCRouteType(typ) ||
|
||||||
|
IsTCPRouteType(typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsHTTPRouteType(typ *pbresource.Type) bool {
|
||||||
switch {
|
switch {
|
||||||
case resource.EqualType(typ, HTTPRouteType),
|
case resource.EqualType(typ, HTTPRouteType):
|
||||||
resource.EqualType(typ, GRPCRouteType),
|
return true
|
||||||
resource.EqualType(typ, TCPRouteType):
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsGRPCRouteType(typ *pbresource.Type) bool {
|
||||||
|
switch {
|
||||||
|
case resource.EqualType(typ, GRPCRouteType):
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsTCPRouteType(typ *pbresource.Type) bool {
|
||||||
|
switch {
|
||||||
|
case resource.EqualType(typ, TCPRouteType):
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
|
@ -50,3 +73,28 @@ func IsComputedRoutesType(typ *pbresource.Type) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BackendRefToComputedRoutesTarget turns the provided BackendReference into an
|
||||||
|
// opaque string format suitable for use as a map key and reference in a
|
||||||
|
// standalone object or reference.
|
||||||
|
//
|
||||||
|
// It is opaque in that the caller should not attempt to parse it, and there is
|
||||||
|
// no implied storage or wire compatibility concern, since the data is treated
|
||||||
|
// opaquely at use time.
|
||||||
|
func BackendRefToComputedRoutesTarget(backendRef *pbmesh.BackendReference) string {
|
||||||
|
ref := backendRef.Ref
|
||||||
|
|
||||||
|
s := fmt.Sprintf(
|
||||||
|
"%s/%s/%s?port=%s",
|
||||||
|
resource.TypeToString(ref.Type),
|
||||||
|
resource.TenancyToString(ref.Tenancy),
|
||||||
|
ref.Name,
|
||||||
|
backendRef.Port,
|
||||||
|
)
|
||||||
|
|
||||||
|
if backendRef.Datacenter != "" {
|
||||||
|
s += "&dc=" + backendRef.Datacenter
|
||||||
|
}
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
|
@ -81,6 +81,16 @@ func validateParentRefs(parentRefs []*pbmesh.ParentReference) error {
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if parent.Ref.Name == "" {
|
||||||
|
merr = multierror.Append(merr, resource.ErrInvalidField{
|
||||||
|
Name: "ref",
|
||||||
|
Wrapped: resource.ErrInvalidField{
|
||||||
|
Name: "name",
|
||||||
|
Wrapped: resource.ErrMissing,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
prk := portedRefKey{
|
prk := portedRefKey{
|
||||||
Key: resource.NewReferenceKey(parent.Ref),
|
Key: resource.NewReferenceKey(parent.Ref),
|
||||||
Port: parent.Port,
|
Port: parent.Port,
|
||||||
|
@ -175,6 +185,16 @@ func validateBackendRef(backendRef *pbmesh.BackendReference) []error {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if backendRef.Ref.Name == "" {
|
||||||
|
errs = append(errs, resource.ErrInvalidField{
|
||||||
|
Name: "ref",
|
||||||
|
Wrapped: resource.ErrInvalidField{
|
||||||
|
Name: "name",
|
||||||
|
Wrapped: resource.ErrMissing,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
if backendRef.Ref.Section != "" {
|
if backendRef.Ref.Section != "" {
|
||||||
errs = append(errs, resource.ErrInvalidField{
|
errs = append(errs, resource.ErrInvalidField{
|
||||||
Name: "ref",
|
Name: "ref",
|
||||||
|
|
|
@ -28,92 +28,92 @@ func (msg *ComputedPortRoutes) UnmarshalBinary(b []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalBinary implements encoding.BinaryMarshaler
|
// MarshalBinary implements encoding.BinaryMarshaler
|
||||||
func (msg *InterpretedHTTPRoute) MarshalBinary() ([]byte, error) {
|
func (msg *ComputedHTTPRoute) MarshalBinary() ([]byte, error) {
|
||||||
return proto.Marshal(msg)
|
return proto.Marshal(msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||||
func (msg *InterpretedHTTPRoute) UnmarshalBinary(b []byte) error {
|
func (msg *ComputedHTTPRoute) UnmarshalBinary(b []byte) error {
|
||||||
return proto.Unmarshal(b, msg)
|
return proto.Unmarshal(b, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalBinary implements encoding.BinaryMarshaler
|
// MarshalBinary implements encoding.BinaryMarshaler
|
||||||
func (msg *InterpretedHTTPRouteRule) MarshalBinary() ([]byte, error) {
|
func (msg *ComputedHTTPRouteRule) MarshalBinary() ([]byte, error) {
|
||||||
return proto.Marshal(msg)
|
return proto.Marshal(msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||||
func (msg *InterpretedHTTPRouteRule) UnmarshalBinary(b []byte) error {
|
func (msg *ComputedHTTPRouteRule) UnmarshalBinary(b []byte) error {
|
||||||
return proto.Unmarshal(b, msg)
|
return proto.Unmarshal(b, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalBinary implements encoding.BinaryMarshaler
|
// MarshalBinary implements encoding.BinaryMarshaler
|
||||||
func (msg *InterpretedHTTPBackendRef) MarshalBinary() ([]byte, error) {
|
func (msg *ComputedHTTPBackendRef) MarshalBinary() ([]byte, error) {
|
||||||
return proto.Marshal(msg)
|
return proto.Marshal(msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||||
func (msg *InterpretedHTTPBackendRef) UnmarshalBinary(b []byte) error {
|
func (msg *ComputedHTTPBackendRef) UnmarshalBinary(b []byte) error {
|
||||||
return proto.Unmarshal(b, msg)
|
return proto.Unmarshal(b, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalBinary implements encoding.BinaryMarshaler
|
// MarshalBinary implements encoding.BinaryMarshaler
|
||||||
func (msg *InterpretedGRPCRoute) MarshalBinary() ([]byte, error) {
|
func (msg *ComputedGRPCRoute) MarshalBinary() ([]byte, error) {
|
||||||
return proto.Marshal(msg)
|
return proto.Marshal(msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||||
func (msg *InterpretedGRPCRoute) UnmarshalBinary(b []byte) error {
|
func (msg *ComputedGRPCRoute) UnmarshalBinary(b []byte) error {
|
||||||
return proto.Unmarshal(b, msg)
|
return proto.Unmarshal(b, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalBinary implements encoding.BinaryMarshaler
|
// MarshalBinary implements encoding.BinaryMarshaler
|
||||||
func (msg *InterpretedGRPCRouteRule) MarshalBinary() ([]byte, error) {
|
func (msg *ComputedGRPCRouteRule) MarshalBinary() ([]byte, error) {
|
||||||
return proto.Marshal(msg)
|
return proto.Marshal(msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||||
func (msg *InterpretedGRPCRouteRule) UnmarshalBinary(b []byte) error {
|
func (msg *ComputedGRPCRouteRule) UnmarshalBinary(b []byte) error {
|
||||||
return proto.Unmarshal(b, msg)
|
return proto.Unmarshal(b, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalBinary implements encoding.BinaryMarshaler
|
// MarshalBinary implements encoding.BinaryMarshaler
|
||||||
func (msg *InterpretedGRPCBackendRef) MarshalBinary() ([]byte, error) {
|
func (msg *ComputedGRPCBackendRef) MarshalBinary() ([]byte, error) {
|
||||||
return proto.Marshal(msg)
|
return proto.Marshal(msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||||
func (msg *InterpretedGRPCBackendRef) UnmarshalBinary(b []byte) error {
|
func (msg *ComputedGRPCBackendRef) UnmarshalBinary(b []byte) error {
|
||||||
return proto.Unmarshal(b, msg)
|
return proto.Unmarshal(b, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalBinary implements encoding.BinaryMarshaler
|
// MarshalBinary implements encoding.BinaryMarshaler
|
||||||
func (msg *InterpretedTCPRoute) MarshalBinary() ([]byte, error) {
|
func (msg *ComputedTCPRoute) MarshalBinary() ([]byte, error) {
|
||||||
return proto.Marshal(msg)
|
return proto.Marshal(msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||||
func (msg *InterpretedTCPRoute) UnmarshalBinary(b []byte) error {
|
func (msg *ComputedTCPRoute) UnmarshalBinary(b []byte) error {
|
||||||
return proto.Unmarshal(b, msg)
|
return proto.Unmarshal(b, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalBinary implements encoding.BinaryMarshaler
|
// MarshalBinary implements encoding.BinaryMarshaler
|
||||||
func (msg *InterpretedTCPRouteRule) MarshalBinary() ([]byte, error) {
|
func (msg *ComputedTCPRouteRule) MarshalBinary() ([]byte, error) {
|
||||||
return proto.Marshal(msg)
|
return proto.Marshal(msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||||
func (msg *InterpretedTCPRouteRule) UnmarshalBinary(b []byte) error {
|
func (msg *ComputedTCPRouteRule) UnmarshalBinary(b []byte) error {
|
||||||
return proto.Unmarshal(b, msg)
|
return proto.Unmarshal(b, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalBinary implements encoding.BinaryMarshaler
|
// MarshalBinary implements encoding.BinaryMarshaler
|
||||||
func (msg *InterpretedTCPBackendRef) MarshalBinary() ([]byte, error) {
|
func (msg *ComputedTCPBackendRef) MarshalBinary() ([]byte, error) {
|
||||||
return proto.Marshal(msg)
|
return proto.Marshal(msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
// UnmarshalBinary implements encoding.BinaryUnmarshaler
|
||||||
func (msg *InterpretedTCPBackendRef) UnmarshalBinary(b []byte) error {
|
func (msg *ComputedTCPBackendRef) UnmarshalBinary(b []byte) error {
|
||||||
return proto.Unmarshal(b, msg)
|
return proto.Unmarshal(b, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -21,9 +21,9 @@ message ComputedRoutes {
|
||||||
|
|
||||||
message ComputedPortRoutes {
|
message ComputedPortRoutes {
|
||||||
oneof config {
|
oneof config {
|
||||||
InterpretedHTTPRoute http = 1;
|
ComputedHTTPRoute http = 1;
|
||||||
InterpretedGRPCRoute grpc = 2;
|
ComputedGRPCRoute grpc = 2;
|
||||||
InterpretedTCPRoute tcp = 3;
|
ComputedTCPRoute tcp = 3;
|
||||||
}
|
}
|
||||||
bool using_default_config = 4; // TODO
|
bool using_default_config = 4; // TODO
|
||||||
|
|
||||||
|
@ -31,58 +31,79 @@ message ComputedPortRoutes {
|
||||||
map<string, BackendTargetDetails> targets = 5;
|
map<string, BackendTargetDetails> targets = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message InterpretedHTTPRoute {
|
message ComputedHTTPRoute {
|
||||||
ParentReference parent_ref = 1;
|
ParentReference parent_ref = 1;
|
||||||
reserved 2; // hostnames
|
reserved 2; // hostnames
|
||||||
repeated InterpretedHTTPRouteRule rules = 3;
|
repeated ComputedHTTPRouteRule rules = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message InterpretedHTTPRouteRule {
|
message ComputedHTTPRouteRule {
|
||||||
repeated HTTPRouteMatch matches = 1;
|
repeated HTTPRouteMatch matches = 1;
|
||||||
repeated HTTPRouteFilter filters = 2;
|
repeated HTTPRouteFilter filters = 2;
|
||||||
repeated InterpretedHTTPBackendRef backend_refs = 3;
|
repeated ComputedHTTPBackendRef backend_refs = 3;
|
||||||
HTTPRouteTimeouts timeouts = 4;
|
HTTPRouteTimeouts timeouts = 4;
|
||||||
HTTPRouteRetries retries = 5;
|
HTTPRouteRetries retries = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message InterpretedHTTPBackendRef {
|
message ComputedHTTPBackendRef {
|
||||||
|
// BackendTarget indicates which key in the targets map provides
|
||||||
|
// the rest of the configuration.
|
||||||
|
//
|
||||||
|
// If this field is set to the empty string, or is the sentinel value
|
||||||
|
// "NULL-ROUTE" is an indication that all of the traffic destined for this
|
||||||
|
// backend reference should be null routed in a format appropriate for the
|
||||||
|
// protocol (i.e. for HTTP use 5xx).
|
||||||
string backend_target = 1;
|
string backend_target = 1;
|
||||||
uint32 weight = 2;
|
uint32 weight = 2;
|
||||||
repeated HTTPRouteFilter filters = 3;
|
repeated HTTPRouteFilter filters = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message InterpretedGRPCRoute {
|
message ComputedGRPCRoute {
|
||||||
ParentReference parent_ref = 1;
|
ParentReference parent_ref = 1;
|
||||||
reserved 2; // hostnames
|
reserved 2; // hostnames
|
||||||
repeated InterpretedGRPCRouteRule rules = 3;
|
repeated ComputedGRPCRouteRule rules = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message InterpretedGRPCRouteRule {
|
message ComputedGRPCRouteRule {
|
||||||
repeated GRPCRouteMatch matches = 1;
|
repeated GRPCRouteMatch matches = 1;
|
||||||
repeated GRPCRouteFilter filters = 2;
|
repeated GRPCRouteFilter filters = 2;
|
||||||
repeated InterpretedGRPCBackendRef backend_refs = 3;
|
repeated ComputedGRPCBackendRef backend_refs = 3;
|
||||||
HTTPRouteTimeouts timeouts = 4;
|
HTTPRouteTimeouts timeouts = 4;
|
||||||
HTTPRouteRetries retries = 5;
|
HTTPRouteRetries retries = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message InterpretedGRPCBackendRef {
|
message ComputedGRPCBackendRef {
|
||||||
|
// BackendTarget indicates which key in the targets map provides
|
||||||
|
// the rest of the configuration.
|
||||||
|
//
|
||||||
|
// If this field is set to the empty string, or is the sentinel value
|
||||||
|
// "NULL-ROUTE" is an indication that all of the traffic destined for this
|
||||||
|
// backend reference should be null routed in a format appropriate for the
|
||||||
|
// protocol (i.e. for HTTP use 5xx).
|
||||||
string backend_target = 1;
|
string backend_target = 1;
|
||||||
uint32 weight = 2;
|
uint32 weight = 2;
|
||||||
repeated GRPCRouteFilter filters = 3;
|
repeated GRPCRouteFilter filters = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message InterpretedTCPRoute {
|
message ComputedTCPRoute {
|
||||||
ParentReference parent_ref = 1;
|
ParentReference parent_ref = 1;
|
||||||
repeated InterpretedTCPRouteRule rules = 2;
|
repeated ComputedTCPRouteRule rules = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
message InterpretedTCPRouteRule {
|
message ComputedTCPRouteRule {
|
||||||
repeated InterpretedTCPBackendRef backend_refs = 1;
|
repeated ComputedTCPBackendRef backend_refs = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: look into smuggling the target through a different typeURL, or just
|
// TODO: look into smuggling the target through a different typeURL, or just
|
||||||
// skip in favor of letting the caller do their own lookups?
|
// skip in favor of letting the caller do their own lookups?
|
||||||
message InterpretedTCPBackendRef {
|
message ComputedTCPBackendRef {
|
||||||
|
// BackendTarget indicates which key in the targets map provides
|
||||||
|
// the rest of the configuration.
|
||||||
|
//
|
||||||
|
// If this field is set to the empty string, or is the sentinel value
|
||||||
|
// "NULL-ROUTE" is an indication that all of the traffic destined for this
|
||||||
|
// backend reference should be null routed in a format appropriate for the
|
||||||
|
// protocol (i.e. for HTTP use 5xx).
|
||||||
string backend_target = 1;
|
string backend_target = 1;
|
||||||
uint32 weight = 2;
|
uint32 weight = 2;
|
||||||
}
|
}
|
||||||
|
@ -91,8 +112,6 @@ message BackendTargetDetails {
|
||||||
// identity info
|
// identity info
|
||||||
BackendReference backend_ref = 1;
|
BackendReference backend_ref = 1;
|
||||||
|
|
||||||
bool null_route_traffic = 2;
|
|
||||||
|
|
||||||
hashicorp.consul.catalog.v1alpha1.Service service = 3;
|
hashicorp.consul.catalog.v1alpha1.Service service = 3;
|
||||||
hashicorp.consul.catalog.v1alpha1.FailoverPolicy failover_policy = 4;
|
hashicorp.consul.catalog.v1alpha1.FailoverPolicy failover_policy = 4;
|
||||||
DestinationPolicy destination_policy = 5;
|
DestinationPolicy destination_policy = 5;
|
||||||
|
|
Loading…
Reference in New Issue