mirror of https://github.com/status-im/consul.git
added computed failover controller (#20329)
* added computed failover controller * removed some uncessary changes * removed uncessary changes * minor refactor * minor refactor fmt * added copyright
This commit is contained in:
parent
0abf8f8426
commit
3446eb3b1b
|
@ -2,8 +2,9 @@
|
||||||
flowchart TD
|
flowchart TD
|
||||||
auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/trafficpermissions
|
auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/trafficpermissions
|
||||||
auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/workloadidentity
|
auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/workloadidentity
|
||||||
catalog/v2beta1/computedfailoverpolicy
|
catalog/v2beta1/computedfailoverpolicy --> catalog/v2beta1/failoverpolicy
|
||||||
catalog/v2beta1/failoverpolicy --> catalog/v2beta1/service
|
catalog/v2beta1/computedfailoverpolicy --> catalog/v2beta1/service
|
||||||
|
catalog/v2beta1/failoverpolicy
|
||||||
catalog/v2beta1/healthstatus
|
catalog/v2beta1/healthstatus
|
||||||
catalog/v2beta1/node --> catalog/v2beta1/nodehealthstatus
|
catalog/v2beta1/node --> catalog/v2beta1/nodehealthstatus
|
||||||
catalog/v2beta1/nodehealthstatus
|
catalog/v2beta1/nodehealthstatus
|
||||||
|
|
|
@ -6,6 +6,7 @@ package failover
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/catalog/internal/controllers/failover/expander"
|
||||||
"github.com/hashicorp/consul/internal/catalog/internal/types"
|
"github.com/hashicorp/consul/internal/catalog/internal/types"
|
||||||
"github.com/hashicorp/consul/internal/controller"
|
"github.com/hashicorp/consul/internal/controller"
|
||||||
"github.com/hashicorp/consul/internal/controller/cache"
|
"github.com/hashicorp/consul/internal/controller/cache"
|
||||||
|
@ -13,44 +14,53 @@ import (
|
||||||
"github.com/hashicorp/consul/internal/controller/dependency"
|
"github.com/hashicorp/consul/internal/controller/dependency"
|
||||||
"github.com/hashicorp/consul/internal/resource"
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||||
|
pbmulticluster "github.com/hashicorp/consul/proto-public/pbmulticluster/v2beta1"
|
||||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
destRefsIndexName = "destination-refs"
|
destRefsIndexName = "destination-refs"
|
||||||
|
boundRefsIndexName = "bound-refs"
|
||||||
)
|
)
|
||||||
|
|
||||||
func FailoverPolicyController() *controller.Controller {
|
func FailoverPolicyController(sgExpander expander.SamenessGroupExpander) *controller.Controller {
|
||||||
return controller.NewController(
|
ctrl := controller.NewController(
|
||||||
ControllerID,
|
ControllerID,
|
||||||
pbcatalog.FailoverPolicyType,
|
pbcatalog.ComputedFailoverPolicyType,
|
||||||
// We index the destination references of a failover policy so that when the
|
indexers.BoundRefsIndex[*pbcatalog.ComputedFailoverPolicy](boundRefsIndexName),
|
||||||
// Service watch fires we can find all FailoverPolicy resources that reference
|
).
|
||||||
// it to rereconcile them.
|
|
||||||
indexers.RefOrIDIndex(
|
|
||||||
destRefsIndexName,
|
|
||||||
func(res *resource.DecodedResource[*pbcatalog.FailoverPolicy]) []*pbresource.Reference {
|
|
||||||
return res.Data.GetUnderlyingDestinationRefs()
|
|
||||||
},
|
|
||||||
)).
|
|
||||||
WithWatch(
|
WithWatch(
|
||||||
pbcatalog.ServiceType,
|
pbcatalog.ServiceType,
|
||||||
dependency.MultiMapper(
|
dependency.MultiMapper(
|
||||||
// FailoverPolicy is name-aligned with the Service it controls so always
|
// FailoverPolicy is name-aligned with the Service it controls so always
|
||||||
// re-reconcile the corresponding FailoverPolicy when a Service changes.
|
// re-reconcile the corresponding FailoverPolicy when a Service changes.
|
||||||
dependency.ReplaceType(pbcatalog.FailoverPolicyType),
|
dependency.ReplaceType(pbcatalog.ComputedFailoverPolicyType),
|
||||||
// Also check for all FailoverPolicy resources that have this service as a
|
dependency.WrapAndReplaceType(
|
||||||
// destination and re-reconcile those to check for port mapping conflicts.
|
pbcatalog.ComputedFailoverPolicyType,
|
||||||
dependency.CacheListMapper(pbcatalog.FailoverPolicyType, destRefsIndexName),
|
dependency.CacheParentsMapper(pbcatalog.ComputedFailoverPolicyType, boundRefsIndexName),
|
||||||
|
),
|
||||||
),
|
),
|
||||||
).
|
).
|
||||||
WithReconciler(newFailoverPolicyReconciler())
|
WithWatch(
|
||||||
|
pbcatalog.FailoverPolicyType,
|
||||||
|
dependency.ReplaceType(pbcatalog.ComputedFailoverPolicyType),
|
||||||
|
sgExpander.GetSamenessGroupIndex(),
|
||||||
|
).
|
||||||
|
WithReconciler(newFailoverPolicyReconciler(sgExpander))
|
||||||
|
|
||||||
|
return registerEnterpriseControllerWatchers(ctrl)
|
||||||
}
|
}
|
||||||
|
|
||||||
type failoverPolicyReconciler struct{}
|
type failoverPolicyReconciler struct {
|
||||||
|
sgExpander expander.SamenessGroupExpander
|
||||||
|
}
|
||||||
|
|
||||||
func newFailoverPolicyReconciler() *failoverPolicyReconciler {
|
func newFailoverPolicyReconciler(sgExpander expander.SamenessGroupExpander) *failoverPolicyReconciler {
|
||||||
return &failoverPolicyReconciler{}
|
return &failoverPolicyReconciler{
|
||||||
|
sgExpander: sgExpander,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *failoverPolicyReconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error {
|
func (r *failoverPolicyReconciler) Reconcile(ctx context.Context, rt controller.Runtime, req controller.Request) error {
|
||||||
|
@ -58,18 +68,25 @@ func (r *failoverPolicyReconciler) Reconcile(ctx context.Context, rt controller.
|
||||||
// reconciliation request processing will not affect future invocations.
|
// reconciliation request processing will not affect future invocations.
|
||||||
rt.Logger = rt.Logger.With("resource-id", req.ID)
|
rt.Logger = rt.Logger.With("resource-id", req.ID)
|
||||||
|
|
||||||
rt.Logger.Trace("reconciling failover policy")
|
rt.Logger.Trace("reconciling computed failover policy")
|
||||||
|
|
||||||
failoverPolicyID := req.ID
|
|
||||||
|
|
||||||
|
computedFailoverPolicy, err := cache.GetDecoded[*pbcatalog.ComputedFailoverPolicy](rt.Cache, pbcatalog.ComputedFailoverPolicyType, "id", req.ID)
|
||||||
|
if err != nil {
|
||||||
|
rt.Logger.Error("error retrieving computed failover policy", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
failoverPolicyID := resource.ReplaceType(pbcatalog.FailoverPolicyType, req.ID)
|
||||||
failoverPolicy, err := cache.GetDecoded[*pbcatalog.FailoverPolicy](rt.Cache, pbcatalog.FailoverPolicyType, "id", failoverPolicyID)
|
failoverPolicy, err := cache.GetDecoded[*pbcatalog.FailoverPolicy](rt.Cache, pbcatalog.FailoverPolicyType, "id", failoverPolicyID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rt.Logger.Error("error retrieving failover policy", "error", err)
|
rt.Logger.Error("error retrieving failover policy", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if failoverPolicy == nil {
|
if failoverPolicy == nil {
|
||||||
// Either the failover policy was deleted, or it doesn't exist but an
|
if err := deleteResource(ctx, rt, computedFailoverPolicy.GetResource()); err != nil {
|
||||||
// update to a Service came through and we can ignore it.
|
rt.Logger.Error("failed to delete computed failover policy", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,81 +102,75 @@ func (r *failoverPolicyReconciler) Reconcile(ctx context.Context, rt controller.
|
||||||
rt.Logger.Error("error retrieving corresponding service", "error", err)
|
rt.Logger.Error("error retrieving corresponding service", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
destServices := make(map[resource.ReferenceKey]*resource.DecodedResource[*pbcatalog.Service])
|
|
||||||
if service != nil {
|
|
||||||
destServices[resource.NewReferenceKey(serviceID)] = service
|
|
||||||
}
|
|
||||||
|
|
||||||
// Denormalize the ports and stuff. After this we have no empty ports.
|
if service == nil {
|
||||||
if service != nil {
|
if err := deleteResource(ctx, rt, computedFailoverPolicy.GetResource()); err != nil {
|
||||||
failoverPolicy.Data = types.SimplifyFailoverPolicy(
|
rt.Logger.Error("failed to delete computed failover policy", "error", err)
|
||||||
service.Data,
|
|
||||||
failoverPolicy.Data,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch services.
|
|
||||||
for _, dest := range failoverPolicy.Data.GetUnderlyingDestinations() {
|
|
||||||
if dest.Ref == nil || !isServiceType(dest.Ref.Type) || dest.Ref.Section != "" {
|
|
||||||
continue // invalid, not possible due to validation hook
|
|
||||||
}
|
|
||||||
|
|
||||||
key := resource.NewReferenceKey(dest.Ref)
|
|
||||||
|
|
||||||
if _, ok := destServices[key]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
destID := resource.IDFromReference(dest.Ref)
|
|
||||||
|
|
||||||
destService, err := cache.GetDecoded[*pbcatalog.Service](rt.Cache, pbcatalog.ServiceType, "id", destID)
|
|
||||||
if err != nil {
|
|
||||||
rt.Logger.Error("error retrieving destination service", "service", key, "error", err)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if destService != nil {
|
conds := []*pbresource.Condition{ConditionMissingService}
|
||||||
destServices[key] = destService
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
newStatus := computeNewStatus(failoverPolicy, service, destServices)
|
if err := writeStatus(ctx, rt, failoverPolicy.Resource, conds); err != nil {
|
||||||
|
rt.Logger.Error("error encountered when attempting to update the resource's failover policy status", "error", err)
|
||||||
if resource.EqualStatus(failoverPolicy.Resource.Status[ControllerID], newStatus, false) {
|
return err
|
||||||
rt.Logger.Trace("resource's failover policy status is unchanged",
|
}
|
||||||
"conditions", newStatus.Conditions)
|
rt.Logger.Trace("resource's failover policy status was updated",
|
||||||
|
"conditions", conds)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = rt.Client.WriteStatus(ctx, &pbresource.WriteStatusRequest{
|
newComputedFailoverPolicy, destServices, missingSamenessGroups, err := makeComputedFailoverPolicy(ctx, rt, r.sgExpander, failoverPolicy, service)
|
||||||
Id: failoverPolicy.Resource.Id,
|
|
||||||
Key: ControllerID,
|
|
||||||
Status: newStatus,
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
computedFailoverResource := computedFailoverPolicy.GetResource()
|
||||||
|
|
||||||
|
if !proto.Equal(computedFailoverPolicy.GetData(), newComputedFailoverPolicy) {
|
||||||
|
|
||||||
|
newCFPData, err := anypb.New(newComputedFailoverPolicy)
|
||||||
|
if err != nil {
|
||||||
|
rt.Logger.Error("error marshalling new computed failover policy", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rt.Logger.Trace("writing computed failover policy")
|
||||||
|
rsp, err := rt.Client.Write(ctx, &pbresource.WriteRequest{
|
||||||
|
Resource: &pbresource.Resource{
|
||||||
|
Id: req.ID,
|
||||||
|
Data: newCFPData,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil || rsp.Resource == nil {
|
||||||
|
rt.Logger.Error("error writing new computed failover policy", "error", err)
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
rt.Logger.Trace("new computed failover policy was successfully written")
|
||||||
|
computedFailoverResource = rsp.Resource
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
conds := computeNewConditions(failoverPolicy.Resource, newComputedFailoverPolicy, service, destServices, missingSamenessGroups)
|
||||||
|
if err := writeStatus(ctx, rt, failoverPolicy.Resource, conds); err != nil {
|
||||||
rt.Logger.Error("error encountered when attempting to update the resource's failover policy status", "error", err)
|
rt.Logger.Error("error encountered when attempting to update the resource's failover policy status", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
rt.Logger.Trace("resource's failover policy status was updated",
|
conds = computeNewConditions(computedFailoverResource, newComputedFailoverPolicy, service, destServices, missingSamenessGroups)
|
||||||
"conditions", newStatus.Conditions)
|
if err := writeStatus(ctx, rt, computedFailoverResource, conds); err != nil {
|
||||||
|
rt.Logger.Error("error encountered when attempting to update the resource's computed failover policy status", "error", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func computeNewStatus(
|
func computeNewConditions(
|
||||||
failoverPolicy *resource.DecodedResource[*pbcatalog.FailoverPolicy],
|
fpRes *pbresource.Resource,
|
||||||
|
fp *pbcatalog.ComputedFailoverPolicy,
|
||||||
service *resource.DecodedResource[*pbcatalog.Service],
|
service *resource.DecodedResource[*pbcatalog.Service],
|
||||||
destServices map[resource.ReferenceKey]*resource.DecodedResource[*pbcatalog.Service],
|
destServices map[resource.ReferenceKey]*resource.DecodedResource[*pbcatalog.Service],
|
||||||
) *pbresource.Status {
|
missingSamenessGroups map[string]struct{},
|
||||||
if service == nil {
|
) []*pbresource.Condition {
|
||||||
return &pbresource.Status{
|
|
||||||
ObservedGeneration: failoverPolicy.Resource.Generation,
|
|
||||||
Conditions: []*pbresource.Condition{
|
|
||||||
ConditionMissingService,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
allowedPortProtocols := make(map[string]pbcatalog.Protocol)
|
allowedPortProtocols := make(map[string]pbcatalog.Protocol)
|
||||||
for _, port := range service.Data.Ports {
|
for _, port := range service.Data.Ports {
|
||||||
|
@ -171,25 +182,7 @@ func computeNewStatus(
|
||||||
|
|
||||||
var conditions []*pbresource.Condition
|
var conditions []*pbresource.Condition
|
||||||
|
|
||||||
if failoverPolicy.Data.Config != nil {
|
for port, pc := range fp.GetPortConfigs() {
|
||||||
for _, dest := range failoverPolicy.Data.Config.Destinations {
|
|
||||||
// We know from validation that a Ref must be set, and the type it
|
|
||||||
// points to is a Service.
|
|
||||||
//
|
|
||||||
// Rather than do additional validation, just do a quick
|
|
||||||
// belt-and-suspenders check-and-skip if something looks weird.
|
|
||||||
if dest.Ref == nil || !isServiceType(dest.Ref.Type) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if cond := serviceHasPort(dest, destServices); cond != nil {
|
|
||||||
conditions = append(conditions, cond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// TODO: validate that referenced sameness groups exist
|
|
||||||
}
|
|
||||||
|
|
||||||
for port, pc := range failoverPolicy.Data.PortConfigs {
|
|
||||||
if _, ok := allowedPortProtocols[port]; !ok {
|
if _, ok := allowedPortProtocols[port]; !ok {
|
||||||
conditions = append(conditions, ConditionUnknownPort(port))
|
conditions = append(conditions, ConditionUnknownPort(port))
|
||||||
}
|
}
|
||||||
|
@ -208,23 +201,28 @@ func computeNewStatus(
|
||||||
conditions = append(conditions, cond)
|
conditions = append(conditions, cond)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: validate that referenced sameness groups exist
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(conditions) > 0 {
|
for destKey, svc := range destServices {
|
||||||
return &pbresource.Status{
|
if svc != nil {
|
||||||
ObservedGeneration: failoverPolicy.Resource.Generation,
|
continue
|
||||||
Conditions: conditions,
|
|
||||||
}
|
}
|
||||||
|
conditions = append(conditions, ConditionMissingDestinationService(destKey.ToReference()))
|
||||||
}
|
}
|
||||||
|
|
||||||
return &pbresource.Status{
|
for sg := range missingSamenessGroups {
|
||||||
ObservedGeneration: failoverPolicy.Resource.Generation,
|
ref := &pbresource.Reference{
|
||||||
Conditions: []*pbresource.Condition{
|
Type: pbmulticluster.SamenessGroupType,
|
||||||
ConditionOK,
|
Tenancy: &pbresource.Tenancy{
|
||||||
|
Partition: fpRes.GetId().GetTenancy().GetPartition(),
|
||||||
|
PeerName: resource.DefaultPeerName,
|
||||||
},
|
},
|
||||||
|
Name: sg,
|
||||||
}
|
}
|
||||||
|
conditions = append(conditions, ConditionMissingSamenessGroup(ref))
|
||||||
|
}
|
||||||
|
|
||||||
|
return conditions
|
||||||
}
|
}
|
||||||
|
|
||||||
func serviceHasPort(
|
func serviceHasPort(
|
||||||
|
@ -233,8 +231,8 @@ func serviceHasPort(
|
||||||
) *pbresource.Condition {
|
) *pbresource.Condition {
|
||||||
key := resource.NewReferenceKey(dest.Ref)
|
key := resource.NewReferenceKey(dest.Ref)
|
||||||
destService, ok := destServices[key]
|
destService, ok := destServices[key]
|
||||||
if !ok {
|
if !ok || destService == nil {
|
||||||
return ConditionMissingDestinationService(dest.Ref)
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
found := false
|
found := false
|
||||||
|
@ -265,3 +263,139 @@ func isServiceType(typ *pbresource.Type) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func deleteResource(ctx context.Context, rt controller.Runtime, resource *pbresource.Resource) error {
|
||||||
|
if resource == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
_, err := rt.Client.Delete(ctx, &pbresource.DeleteRequest{
|
||||||
|
Id: resource.GetId(),
|
||||||
|
Version: resource.GetVersion(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeComputedFailoverPolicy(ctx context.Context, rt controller.Runtime, sgExpander expander.SamenessGroupExpander, failoverPolicy *resource.DecodedResource[*pbcatalog.FailoverPolicy], service *resource.DecodedResource[*pbcatalog.Service]) (*pbcatalog.ComputedFailoverPolicy, map[resource.ReferenceKey]*resource.DecodedResource[*pbcatalog.Service], map[string]struct{}, error) {
|
||||||
|
simplified := types.SimplifyFailoverPolicy(
|
||||||
|
service.Data,
|
||||||
|
failoverPolicy.Data,
|
||||||
|
)
|
||||||
|
cfp := &pbcatalog.ComputedFailoverPolicy{
|
||||||
|
|
||||||
|
PortConfigs: simplified.PortConfigs,
|
||||||
|
}
|
||||||
|
missingSamenessGroups := make(map[string]struct{})
|
||||||
|
destServices := map[resource.ReferenceKey]*resource.DecodedResource[*pbcatalog.Service]{
|
||||||
|
resource.NewReferenceKey(service.Id): service,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expand sameness group
|
||||||
|
for port, fc := range cfp.PortConfigs {
|
||||||
|
if fc.GetSamenessGroup() == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
dests, missing, err := sgExpander.ComputeFailoverDestinationsFromSamenessGroup(rt, failoverPolicy.Id, fc.GetSamenessGroup(), port)
|
||||||
|
if err != nil {
|
||||||
|
return cfp, nil, missingSamenessGroups, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if missing != "" {
|
||||||
|
delete(cfp.PortConfigs, port)
|
||||||
|
missingSamenessGroups[missing] = struct{}{}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dests) == 0 {
|
||||||
|
delete(cfp.PortConfigs, port)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fc.SamenessGroup = ""
|
||||||
|
fc.Destinations = dests
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter missing destinations
|
||||||
|
for port, fc := range cfp.PortConfigs {
|
||||||
|
if len(fc.Destinations) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
fc.Destinations, err = filterInvalidDests(ctx, rt, fc.Destinations, destServices)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fc.GetDestinations()) == 0 {
|
||||||
|
delete(cfp.GetPortConfigs(), port)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for ref := range destServices {
|
||||||
|
cfp.BoundReferences = append(cfp.BoundReferences, ref.ToReference())
|
||||||
|
}
|
||||||
|
|
||||||
|
return cfp, destServices, missingSamenessGroups, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterInvalidDests(ctx context.Context, rt controller.Runtime, dests []*pbcatalog.FailoverDestination, destServices map[resource.ReferenceKey]*resource.DecodedResource[*pbcatalog.Service]) ([]*pbcatalog.FailoverDestination, error) {
|
||||||
|
var out []*pbcatalog.FailoverDestination
|
||||||
|
for _, dest := range dests {
|
||||||
|
ref := resource.NewReferenceKey(dest.Ref)
|
||||||
|
if svc, ok := destServices[ref]; ok {
|
||||||
|
if svc != nil {
|
||||||
|
out = append(out, dest)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
destService, err := resource.GetDecodedResource[*pbcatalog.Service](ctx, rt.Client, resource.IDFromReference(dest.Ref))
|
||||||
|
if err != nil {
|
||||||
|
rt.Logger.Error("error retrieving destination service while filtering", "service", dest, "error", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if destService != nil {
|
||||||
|
out = append(out, dest)
|
||||||
|
}
|
||||||
|
destServices[resource.NewReferenceKey(dest.Ref)] = destService
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeStatus(ctx context.Context, rt controller.Runtime, res *pbresource.Resource, conditions []*pbresource.Condition) error {
|
||||||
|
newStatus := &pbresource.Status{
|
||||||
|
ObservedGeneration: res.GetGeneration(),
|
||||||
|
Conditions: []*pbresource.Condition{
|
||||||
|
ConditionOK,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(conditions) > 0 {
|
||||||
|
newStatus = &pbresource.Status{
|
||||||
|
ObservedGeneration: res.GetGeneration(),
|
||||||
|
Conditions: conditions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !resource.EqualStatus(res.GetStatus()[ControllerID], newStatus, false) {
|
||||||
|
|
||||||
|
_, err := rt.Client.WriteStatus(ctx, &pbresource.WriteStatusRequest{
|
||||||
|
Id: res.Id,
|
||||||
|
Key: ControllerID,
|
||||||
|
Status: newStatus,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rt.Logger.Trace("resource's status was updated",
|
||||||
|
"conditions", newStatus.Conditions)
|
||||||
|
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -7,14 +7,17 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/catalog/internal/controllers/failover/expander"
|
||||||
"github.com/hashicorp/consul/internal/catalog/internal/types"
|
"github.com/hashicorp/consul/internal/catalog/internal/types"
|
||||||
"github.com/hashicorp/consul/internal/controller"
|
"github.com/hashicorp/consul/internal/controller"
|
||||||
"github.com/hashicorp/consul/internal/controller/controllertest"
|
"github.com/hashicorp/consul/internal/controller/controllertest"
|
||||||
|
"github.com/hashicorp/consul/internal/multicluster"
|
||||||
"github.com/hashicorp/consul/internal/resource"
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
"github.com/hashicorp/consul/internal/resource/resourcetest"
|
"github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
"github.com/hashicorp/consul/proto/private/prototest"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestController(t *testing.T) {
|
func TestController(t *testing.T) {
|
||||||
|
@ -23,9 +26,9 @@ func TestController(t *testing.T) {
|
||||||
|
|
||||||
clientRaw := controllertest.NewControllerTestBuilder().
|
clientRaw := controllertest.NewControllerTestBuilder().
|
||||||
WithTenancies(resourcetest.TestTenancies()...).
|
WithTenancies(resourcetest.TestTenancies()...).
|
||||||
WithResourceRegisterFns(types.Register).
|
WithResourceRegisterFns(types.Register, multicluster.RegisterTypes).
|
||||||
WithControllerRegisterFns(func(mgr *controller.Manager) {
|
WithControllerRegisterFns(func(mgr *controller.Manager) {
|
||||||
mgr.Register(FailoverPolicyController())
|
mgr.Register(FailoverPolicyController(expander.GetSamenessGroupExpander()))
|
||||||
}).
|
}).
|
||||||
Run(t)
|
Run(t)
|
||||||
|
|
||||||
|
@ -54,7 +57,10 @@ func TestController(t *testing.T) {
|
||||||
|
|
||||||
t.Cleanup(func() { client.MustDelete(t, failover.Id) })
|
t.Cleanup(func() { client.MustDelete(t, failover.Id) })
|
||||||
|
|
||||||
|
var expectedComputedFP *pbcatalog.ComputedFailoverPolicy
|
||||||
|
|
||||||
client.WaitForStatusCondition(t, failover.Id, ControllerID, ConditionMissingService)
|
client.WaitForStatusCondition(t, failover.Id, ControllerID, ConditionMissingService)
|
||||||
|
client.RequireResourceNotFound(t, resource.ReplaceType(pbcatalog.ComputedFailoverPolicyType, failover.Id))
|
||||||
t.Logf("reconciled to missing service status")
|
t.Logf("reconciled to missing service status")
|
||||||
|
|
||||||
// Provide the service.
|
// Provide the service.
|
||||||
|
@ -72,7 +78,34 @@ func TestController(t *testing.T) {
|
||||||
|
|
||||||
t.Cleanup(func() { client.MustDelete(t, svc.Id) })
|
t.Cleanup(func() { client.MustDelete(t, svc.Id) })
|
||||||
|
|
||||||
client.WaitForStatusCondition(t, failover.Id, ControllerID, ConditionOK)
|
expectedComputedFP = &pbcatalog.ComputedFailoverPolicy{
|
||||||
|
PortConfigs: map[string]*pbcatalog.FailoverConfig{
|
||||||
|
"http": {
|
||||||
|
Destinations: []*pbcatalog.FailoverDestination{{
|
||||||
|
Ref: apiServiceRef,
|
||||||
|
Port: "http",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
BoundReferences: []*pbresource.Reference{apiServiceRef},
|
||||||
|
}
|
||||||
|
|
||||||
|
waitAndAssertComputedFailoverPolicy(t, client, failover.Id, expectedComputedFP, ConditionOK)
|
||||||
|
|
||||||
|
t.Log("delete service")
|
||||||
|
|
||||||
|
client.MustDelete(t, svc.Id)
|
||||||
|
|
||||||
|
client.WaitForReconciliation(t, resource.ReplaceType(pbcatalog.ComputedFailoverPolicyType, failover.Id), ControllerID)
|
||||||
|
client.WaitForStatusCondition(t, failover.Id, ControllerID, ConditionMissingService)
|
||||||
|
client.RequireResourceNotFound(t, resource.ReplaceType(pbcatalog.ComputedFailoverPolicyType, failover.Id))
|
||||||
|
|
||||||
|
// re add the service
|
||||||
|
rtest.Resource(pbcatalog.ServiceType, "api").
|
||||||
|
WithData(t, apiServiceData).
|
||||||
|
WithTenancy(tenancy).
|
||||||
|
Write(t, client)
|
||||||
|
|
||||||
t.Logf("reconciled to accepted")
|
t.Logf("reconciled to accepted")
|
||||||
|
|
||||||
// Update the failover to reference an unknown port
|
// Update the failover to reference an unknown port
|
||||||
|
@ -92,14 +125,18 @@ func TestController(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
svc = rtest.Resource(pbcatalog.FailoverPolicyType, "api").
|
failover = rtest.Resource(pbcatalog.FailoverPolicyType, "api").
|
||||||
WithData(t, failoverData).
|
WithData(t, failoverData).
|
||||||
WithTenancy(tenancy).
|
WithTenancy(tenancy).
|
||||||
Write(t, client)
|
Write(t, client)
|
||||||
|
|
||||||
t.Cleanup(func() { client.MustDelete(t, svc.Id) })
|
t.Cleanup(func() { client.MustDelete(t, failover.Id) })
|
||||||
|
|
||||||
client.WaitForStatusCondition(t, failover.Id, ControllerID, ConditionUnknownPort("admin"))
|
expectedComputedFP = &pbcatalog.ComputedFailoverPolicy{
|
||||||
|
PortConfigs: failoverData.PortConfigs,
|
||||||
|
BoundReferences: []*pbresource.Reference{apiServiceRef},
|
||||||
|
}
|
||||||
|
waitAndAssertComputedFailoverPolicy(t, client, failover.Id, expectedComputedFP, ConditionUnknownPort("admin"))
|
||||||
t.Logf("reconciled to unknown admin port")
|
t.Logf("reconciled to unknown admin port")
|
||||||
|
|
||||||
// update the service to fix the stray reference, but point to a mesh port
|
// update the service to fix the stray reference, but point to a mesh port
|
||||||
|
@ -123,7 +160,7 @@ func TestController(t *testing.T) {
|
||||||
|
|
||||||
t.Cleanup(func() { client.MustDelete(t, svc.Id) })
|
t.Cleanup(func() { client.MustDelete(t, svc.Id) })
|
||||||
|
|
||||||
client.WaitForStatusCondition(t, failover.Id, ControllerID, ConditionUsingMeshDestinationPort(apiServiceRef, "admin"))
|
waitAndAssertComputedFailoverPolicy(t, client, failover.Id, expectedComputedFP, ConditionUsingMeshDestinationPort(apiServiceRef, "admin"))
|
||||||
t.Logf("reconciled to using mesh destination port")
|
t.Logf("reconciled to using mesh destination port")
|
||||||
|
|
||||||
// update the service to fix the stray reference to not be a mesh port
|
// update the service to fix the stray reference to not be a mesh port
|
||||||
|
@ -147,7 +184,7 @@ func TestController(t *testing.T) {
|
||||||
|
|
||||||
t.Cleanup(func() { client.MustDelete(t, svc.Id) })
|
t.Cleanup(func() { client.MustDelete(t, svc.Id) })
|
||||||
|
|
||||||
client.WaitForStatusCondition(t, failover.Id, ControllerID, ConditionOK)
|
waitAndAssertComputedFailoverPolicy(t, client, failover.Id, expectedComputedFP, ConditionOK)
|
||||||
t.Logf("reconciled to accepted")
|
t.Logf("reconciled to accepted")
|
||||||
|
|
||||||
// change failover leg to point to missing service
|
// change failover leg to point to missing service
|
||||||
|
@ -167,14 +204,26 @@ func TestController(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
svc = rtest.Resource(pbcatalog.FailoverPolicyType, "api").
|
failover = rtest.Resource(pbcatalog.FailoverPolicyType, "api").
|
||||||
WithData(t, failoverData).
|
WithData(t, failoverData).
|
||||||
WithTenancy(tenancy).
|
WithTenancy(tenancy).
|
||||||
Write(t, client)
|
Write(t, client)
|
||||||
|
|
||||||
t.Cleanup(func() { client.MustDelete(t, svc.Id) })
|
t.Cleanup(func() { client.MustDelete(t, failover.Id) })
|
||||||
|
|
||||||
client.WaitForStatusCondition(t, failover.Id, ControllerID, ConditionMissingDestinationService(otherServiceRef))
|
expectedComputedFP = &pbcatalog.ComputedFailoverPolicy{
|
||||||
|
PortConfigs: map[string]*pbcatalog.FailoverConfig{
|
||||||
|
"http": {
|
||||||
|
Destinations: []*pbcatalog.FailoverDestination{{
|
||||||
|
Ref: apiServiceRef,
|
||||||
|
Port: "http",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
BoundReferences: []*pbresource.Reference{apiServiceRef, otherServiceRef},
|
||||||
|
}
|
||||||
|
|
||||||
|
waitAndAssertComputedFailoverPolicy(t, client, failover.Id, expectedComputedFP, ConditionMissingDestinationService(otherServiceRef))
|
||||||
t.Logf("reconciled to missing dest service: other")
|
t.Logf("reconciled to missing dest service: other")
|
||||||
|
|
||||||
// Create the missing service, but forget the port.
|
// Create the missing service, but forget the port.
|
||||||
|
@ -192,7 +241,11 @@ func TestController(t *testing.T) {
|
||||||
|
|
||||||
t.Cleanup(func() { client.MustDelete(t, svc.Id) })
|
t.Cleanup(func() { client.MustDelete(t, svc.Id) })
|
||||||
|
|
||||||
client.WaitForStatusCondition(t, failover.Id, ControllerID, ConditionUnknownDestinationPort(otherServiceRef, "admin"))
|
expectedComputedFP = &pbcatalog.ComputedFailoverPolicy{
|
||||||
|
PortConfigs: failoverData.PortConfigs,
|
||||||
|
BoundReferences: []*pbresource.Reference{apiServiceRef, otherServiceRef},
|
||||||
|
}
|
||||||
|
waitAndAssertComputedFailoverPolicy(t, client, failover.Id, expectedComputedFP, ConditionUnknownDestinationPort(otherServiceRef, "admin"))
|
||||||
t.Logf("reconciled to missing dest port other:admin")
|
t.Logf("reconciled to missing dest port other:admin")
|
||||||
|
|
||||||
// fix the destination leg's port
|
// fix the destination leg's port
|
||||||
|
@ -274,7 +327,24 @@ func TestController(t *testing.T) {
|
||||||
|
|
||||||
t.Cleanup(func() { client.MustDelete(t, failover.Id) })
|
t.Cleanup(func() { client.MustDelete(t, failover.Id) })
|
||||||
|
|
||||||
client.WaitForStatusCondition(t, failover.Id, ControllerID, ConditionUnknownDestinationPort(otherServiceRef, "bar"))
|
expectedComputedFP = &pbcatalog.ComputedFailoverPolicy{
|
||||||
|
PortConfigs: map[string]*pbcatalog.FailoverConfig{
|
||||||
|
"foo": {
|
||||||
|
Destinations: []*pbcatalog.FailoverDestination{{
|
||||||
|
Ref: otherServiceRef,
|
||||||
|
Port: "foo",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
"bar": {
|
||||||
|
Destinations: []*pbcatalog.FailoverDestination{{
|
||||||
|
Ref: otherServiceRef,
|
||||||
|
Port: "bar",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
BoundReferences: []*pbresource.Reference{apiServiceRef, otherServiceRef},
|
||||||
|
}
|
||||||
|
waitAndAssertComputedFailoverPolicy(t, client, failover.Id, expectedComputedFP, ConditionUnknownDestinationPort(otherServiceRef, "bar"))
|
||||||
t.Logf("reconciled to missing dest port other:bar")
|
t.Logf("reconciled to missing dest port other:bar")
|
||||||
|
|
||||||
// and fix it the silly way by removing it from api+failover
|
// and fix it the silly way by removing it from api+failover
|
||||||
|
@ -294,7 +364,18 @@ func TestController(t *testing.T) {
|
||||||
|
|
||||||
t.Cleanup(func() { client.MustDelete(t, svc.Id) })
|
t.Cleanup(func() { client.MustDelete(t, svc.Id) })
|
||||||
|
|
||||||
client.WaitForStatusCondition(t, failover.Id, ControllerID, ConditionOK)
|
expectedComputedFP = &pbcatalog.ComputedFailoverPolicy{
|
||||||
|
PortConfigs: map[string]*pbcatalog.FailoverConfig{
|
||||||
|
"foo": {
|
||||||
|
Destinations: []*pbcatalog.FailoverDestination{{
|
||||||
|
Ref: otherServiceRef,
|
||||||
|
Port: "foo",
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
BoundReferences: []*pbresource.Reference{apiServiceRef, otherServiceRef},
|
||||||
|
}
|
||||||
|
waitAndAssertComputedFailoverPolicy(t, client, failover.Id, expectedComputedFP, ConditionOK)
|
||||||
t.Logf("reconciled to accepted")
|
t.Logf("reconciled to accepted")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -303,3 +384,15 @@ func TestController(t *testing.T) {
|
||||||
func tenancySubTestName(tenancy *pbresource.Tenancy) string {
|
func tenancySubTestName(tenancy *pbresource.Tenancy) string {
|
||||||
return fmt.Sprintf("%s_Namespace_%s_Partition", tenancy.Namespace, tenancy.Partition)
|
return fmt.Sprintf("%s_Namespace_%s_Partition", tenancy.Namespace, tenancy.Partition)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func waitAndAssertComputedFailoverPolicy(t *testing.T, client *rtest.Client, failoverId *pbresource.ID, expectedComputedFP *pbcatalog.ComputedFailoverPolicy, cond *pbresource.Condition) {
|
||||||
|
cfpID := resource.ReplaceType(pbcatalog.ComputedFailoverPolicyType, failoverId)
|
||||||
|
client.WaitForReconciliation(t, cfpID, ControllerID)
|
||||||
|
client.WaitForStatusCondition(t, failoverId, ControllerID, cond)
|
||||||
|
client.WaitForStatusCondition(t, cfpID, ControllerID, cond)
|
||||||
|
client.WaitForResourceState(t, cfpID, func(t rtest.T, r *pbresource.Resource) {
|
||||||
|
computedFp := client.RequireResourceExists(t, cfpID)
|
||||||
|
decodedComputedFp := rtest.MustDecode[*pbcatalog.ComputedFailoverPolicy](t, computedFp)
|
||||||
|
prototest.AssertDeepEqual(t, expectedComputedFP, decodedComputedFp.Data)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
//go:build !consulent
|
||||||
|
|
||||||
|
package expander
|
||||||
|
|
||||||
|
import "github.com/hashicorp/consul/internal/catalog/internal/controllers/failover/expander/expander_ce"
|
||||||
|
|
||||||
|
func GetSamenessGroupExpander() *expander_ce.SamenessGroupExpander {
|
||||||
|
return expander_ce.New()
|
||||||
|
}
|
|
@ -0,0 +1,38 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package expander_ce
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/consul/internal/controller"
|
||||||
|
"github.com/hashicorp/consul/internal/controller/cache/index"
|
||||||
|
"github.com/hashicorp/consul/internal/controller/cache/indexers"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SamenessGroupExpander struct{}
|
||||||
|
|
||||||
|
func New() *SamenessGroupExpander {
|
||||||
|
return &SamenessGroupExpander{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sgE *SamenessGroupExpander) ComputeFailoverDestinationsFromSamenessGroup(rt controller.Runtime, id *pbresource.ID, sg string, port string) ([]*pbcatalog.FailoverDestination, string, error) {
|
||||||
|
//no - op for CE
|
||||||
|
return nil, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const sgIndexName = "samenessGroupIndex"
|
||||||
|
|
||||||
|
func (sgE *SamenessGroupExpander) GetSamenessGroupIndex() *index.Index {
|
||||||
|
return indexers.DecodedMultiIndexer(
|
||||||
|
sgIndexName,
|
||||||
|
index.ReferenceOrIDFromArgs,
|
||||||
|
func(r *resource.DecodedResource[*pbcatalog.FailoverPolicy]) (bool, [][]byte, error) {
|
||||||
|
//no - op for CE
|
||||||
|
return false, nil, nil
|
||||||
|
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
|
@ -0,0 +1,67 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package expander_ce
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/internal/controller"
|
||||||
|
"github.com/hashicorp/consul/internal/controller/cache"
|
||||||
|
"github.com/hashicorp/consul/internal/resource"
|
||||||
|
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||||
|
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||||
|
pbmulticluster "github.com/hashicorp/consul/proto-public/pbmulticluster/v2beta1"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"github.com/stretchr/testify/suite"
|
||||||
|
)
|
||||||
|
|
||||||
|
type expanderSuite struct {
|
||||||
|
suite.Suite
|
||||||
|
ctx context.Context
|
||||||
|
cache cache.Cache
|
||||||
|
rt controller.Runtime
|
||||||
|
tenancies []*pbresource.Tenancy
|
||||||
|
|
||||||
|
samenessGroupExpander *SamenessGroupExpander
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *expanderSuite) SetupTest() {
|
||||||
|
suite.ctx = testutil.TestContext(suite.T())
|
||||||
|
suite.tenancies = rtest.TestTenancies()
|
||||||
|
|
||||||
|
suite.samenessGroupExpander = New()
|
||||||
|
|
||||||
|
suite.cache = cache.New()
|
||||||
|
suite.cache.AddType(pbmulticluster.SamenessGroupType)
|
||||||
|
suite.rt = controller.Runtime{
|
||||||
|
Cache: suite.cache,
|
||||||
|
Logger: testutil.Logger(suite.T()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExpander(t *testing.T) {
|
||||||
|
suite.Run(t, new(expanderSuite))
|
||||||
|
}
|
||||||
|
func (suite *expanderSuite) Test_ComputeFailoverDestinationsFromSamenessGroup() {
|
||||||
|
fpData := &pbcatalog.FailoverPolicy{
|
||||||
|
PortConfigs: map[string]*pbcatalog.FailoverConfig{
|
||||||
|
"http": {
|
||||||
|
SamenessGroup: "sg1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
fp := rtest.Resource(pbcatalog.FailoverPolicyType, "apisvc").
|
||||||
|
WithData(suite.T(), fpData).
|
||||||
|
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||||
|
Build()
|
||||||
|
decFp, err := resource.Decode[*pbcatalog.FailoverPolicy](fp)
|
||||||
|
require.NoError(suite.T(), err)
|
||||||
|
dests, sg, err := suite.samenessGroupExpander.ComputeFailoverDestinationsFromSamenessGroup(suite.rt, decFp.Id, "sg1", "http")
|
||||||
|
require.NoError(suite.T(), err)
|
||||||
|
require.Nil(suite.T(), dests)
|
||||||
|
require.Equal(suite.T(), "", sg)
|
||||||
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
package expander
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/consul/internal/controller"
|
||||||
|
"github.com/hashicorp/consul/internal/controller/cache/index"
|
||||||
|
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||||
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SamenessgroupExpander is used to expand sameness group for a ComputedFailover resource
|
||||||
|
type SamenessGroupExpander interface {
|
||||||
|
ComputeFailoverDestinationsFromSamenessGroup(rt controller.Runtime, id *pbresource.ID, sg string, port string) ([]*pbcatalog.FailoverDestination, string, error)
|
||||||
|
GetSamenessGroupIndex() *index.Index
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
// Copyright (c) HashiCorp, Inc.
|
||||||
|
// SPDX-License-Identifier: BUSL-1.1
|
||||||
|
|
||||||
|
//go:build !consulent
|
||||||
|
|
||||||
|
package failover
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/consul/internal/controller"
|
||||||
|
)
|
||||||
|
|
||||||
|
func registerEnterpriseControllerWatchers(ctrl *controller.Controller) *controller.Controller {
|
||||||
|
return ctrl
|
||||||
|
}
|
|
@ -29,6 +29,9 @@ const (
|
||||||
|
|
||||||
UsingMeshDestinationPortReason = "UsingMeshDestinationPort"
|
UsingMeshDestinationPortReason = "UsingMeshDestinationPort"
|
||||||
UsingMeshDestinationPortMessagePrefix = "port is a special unroutable mesh port on destination service: "
|
UsingMeshDestinationPortMessagePrefix = "port is a special unroutable mesh port on destination service: "
|
||||||
|
|
||||||
|
MissingSamenessGroupReason = "MissingSamenessGroup"
|
||||||
|
MissingSamenessGroupMessagePrefix = "referenced sameness group does not exist: "
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -82,3 +85,12 @@ func ConditionUsingMeshDestinationPort(ref *pbresource.Reference, port string) *
|
||||||
Message: UnknownDestinationPortMessagePrefix + port + " on " + resource.ReferenceToString(ref),
|
Message: UnknownDestinationPortMessagePrefix + port + " on " + resource.ReferenceToString(ref),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ConditionMissingSamenessGroup(ref *pbresource.Reference) *pbresource.Condition {
|
||||||
|
return &pbresource.Condition{
|
||||||
|
Type: StatusConditionAccepted,
|
||||||
|
State: pbresource.Condition_STATE_FALSE,
|
||||||
|
Reason: MissingSamenessGroupReason,
|
||||||
|
Message: MissingSamenessGroupMessagePrefix + resource.ReferenceToString(ref),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ package controllers
|
||||||
import (
|
import (
|
||||||
"github.com/hashicorp/consul/internal/catalog/internal/controllers/endpoints"
|
"github.com/hashicorp/consul/internal/catalog/internal/controllers/endpoints"
|
||||||
"github.com/hashicorp/consul/internal/catalog/internal/controllers/failover"
|
"github.com/hashicorp/consul/internal/catalog/internal/controllers/failover"
|
||||||
|
"github.com/hashicorp/consul/internal/catalog/internal/controllers/failover/expander"
|
||||||
"github.com/hashicorp/consul/internal/catalog/internal/controllers/nodehealth"
|
"github.com/hashicorp/consul/internal/catalog/internal/controllers/nodehealth"
|
||||||
"github.com/hashicorp/consul/internal/catalog/internal/controllers/workloadhealth"
|
"github.com/hashicorp/consul/internal/catalog/internal/controllers/workloadhealth"
|
||||||
"github.com/hashicorp/consul/internal/controller"
|
"github.com/hashicorp/consul/internal/controller"
|
||||||
|
@ -15,5 +16,5 @@ func Register(mgr *controller.Manager) {
|
||||||
mgr.Register(nodehealth.NodeHealthController())
|
mgr.Register(nodehealth.NodeHealthController())
|
||||||
mgr.Register(workloadhealth.WorkloadHealthController())
|
mgr.Register(workloadhealth.WorkloadHealthController())
|
||||||
mgr.Register(endpoints.ServiceEndpointsController())
|
mgr.Register(endpoints.ServiceEndpointsController())
|
||||||
mgr.Register(failover.FailoverPolicyController())
|
mgr.Register(failover.FailoverPolicyController(expander.GetSamenessGroupExpander()))
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,26 +30,49 @@ func RegisterComputedFailoverPolicy(r resource.Registry) {
|
||||||
var ValidateComputedFailoverPolicy = resource.DecodeAndValidate(validateComputedFailoverPolicy)
|
var ValidateComputedFailoverPolicy = resource.DecodeAndValidate(validateComputedFailoverPolicy)
|
||||||
|
|
||||||
func validateComputedFailoverPolicy(res *DecodedComputedFailoverPolicy) error {
|
func validateComputedFailoverPolicy(res *DecodedComputedFailoverPolicy) error {
|
||||||
if res.Data.Config != nil && res.Data.Config.SamenessGroup != "" {
|
if res.Data.Config != nil {
|
||||||
return fmt.Errorf(`invalid "config" field: computed failover policy cannot have a sameness_group`)
|
return fmt.Errorf(`invalid "config" field: computed failover policy cannot have a config`)
|
||||||
}
|
}
|
||||||
for _, fc := range res.Data.PortConfigs {
|
for _, fc := range res.Data.PortConfigs {
|
||||||
if fc.GetSamenessGroup() != "" {
|
if fc.GetSamenessGroup() != "" {
|
||||||
return fmt.Errorf(`invalid "config" field: computed failover policy cannot have a sameness_group`)
|
return fmt.Errorf(`invalid "config" field: computed failover policy cannot have a sameness_group`)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dfp := convertToDecodedFailoverPolicy(res)
|
return validateCommonFailoverConfigs(&pbcatalog.FailoverPolicy{
|
||||||
return validateFailoverPolicy(dfp)
|
Config: res.Data.Config,
|
||||||
|
PortConfigs: res.Data.PortConfigs,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func aclWriteHookComputedFailoverPolicy(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, res *DecodedComputedFailoverPolicy) error {
|
func aclWriteHookComputedFailoverPolicy(authorizer acl.Authorizer, authzContext *acl.AuthorizerContext, res *DecodedComputedFailoverPolicy) error {
|
||||||
dfp := convertToDecodedFailoverPolicy(res)
|
// FailoverPolicy is name-aligned with Service
|
||||||
return aclWriteHookFailoverPolicy(authorizer, authzContext, dfp)
|
serviceName := res.Id.Name
|
||||||
}
|
|
||||||
|
|
||||||
func convertToDecodedFailoverPolicy(res *DecodedComputedFailoverPolicy) *DecodedFailoverPolicy {
|
// Check service:write permissions on the service this is controlling.
|
||||||
dfp := &DecodedFailoverPolicy{}
|
if err := authorizer.ToAllowAuthorizer().ServiceWriteAllowed(serviceName, authzContext); err != nil {
|
||||||
dfp.Data = (*pbcatalog.FailoverPolicy)(res.GetData())
|
return err
|
||||||
dfp.Resource = res.GetResource()
|
}
|
||||||
return dfp
|
|
||||||
|
// Ensure you have service:read on any destination that may be affected by
|
||||||
|
// traffic FROM this config change.
|
||||||
|
if res.Data.Config != nil {
|
||||||
|
for _, dest := range res.Data.Config.Destinations {
|
||||||
|
destAuthzContext := resource.AuthorizerContext(dest.Ref.GetTenancy())
|
||||||
|
destServiceName := dest.Ref.GetName()
|
||||||
|
if err := authorizer.ToAllowAuthorizer().ServiceReadAllowed(destServiceName, destAuthzContext); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, pc := range res.Data.PortConfigs {
|
||||||
|
for _, dest := range pc.Destinations {
|
||||||
|
destAuthzContext := resource.AuthorizerContext(dest.Ref.GetTenancy())
|
||||||
|
destServiceName := dest.Ref.GetName()
|
||||||
|
if err := authorizer.ToAllowAuthorizer().ServiceReadAllowed(destServiceName, destAuthzContext); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,14 +56,14 @@ func TestValidateComputedFailoverPolicy(t *testing.T) {
|
||||||
testutil.RequireErrorContains(t, err, tc.expectErr)
|
testutil.RequireErrorContains(t, err, tc.expectErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cases := convertToComputedFailverPolicyTestCases(getCommonTestCases())
|
cases := convertToComputedFailverPolicyTestCases(getComputedFailoverCases())
|
||||||
cases["plain config: sameness_group"] = computedFailoverTestcase{
|
cases["plain config: config"] = computedFailoverTestcase{
|
||||||
failover: &pbcatalog.ComputedFailoverPolicy{
|
failover: &pbcatalog.ComputedFailoverPolicy{
|
||||||
Config: &pbcatalog.FailoverConfig{
|
Config: &pbcatalog.FailoverConfig{
|
||||||
SamenessGroup: "test",
|
SamenessGroup: "test",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectErr: `invalid "config" field: computed failover policy cannot have a sameness_group`,
|
expectErr: `invalid "config" field: computed failover policy cannot have a config`,
|
||||||
}
|
}
|
||||||
cases["ported config: sameness_group"] = computedFailoverTestcase{
|
cases["ported config: sameness_group"] = computedFailoverTestcase{
|
||||||
failover: &pbcatalog.ComputedFailoverPolicy{
|
failover: &pbcatalog.ComputedFailoverPolicy{
|
||||||
|
@ -109,7 +109,7 @@ func testFailOverPolicyAcls(t *testing.T, isComputedFailoverPolicy bool) {
|
||||||
if isComputedFailoverPolicy {
|
if isComputedFailoverPolicy {
|
||||||
typ = pbcatalog.ComputedFailoverPolicyType
|
typ = pbcatalog.ComputedFailoverPolicyType
|
||||||
cfgData = &pbcatalog.ComputedFailoverPolicy{
|
cfgData = &pbcatalog.ComputedFailoverPolicy{
|
||||||
Config: cfgDests,
|
PortConfigs: portedCfgDests,
|
||||||
}
|
}
|
||||||
portedCfgData = &pbcatalog.ComputedFailoverPolicy{
|
portedCfgData = &pbcatalog.ComputedFailoverPolicy{
|
||||||
PortConfigs: portedCfgDests,
|
PortConfigs: portedCfgDests,
|
||||||
|
|
|
@ -115,19 +115,27 @@ func validateFailoverPolicy(res *DecodedFailoverPolicy) error {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if res.Data.Config != nil {
|
if err := validateCommonFailoverConfigs(res.Data); err != nil {
|
||||||
|
merr = multierror.Append(merr, err)
|
||||||
|
}
|
||||||
|
return merr
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateCommonFailoverConfigs(res *pbcatalog.FailoverPolicy) error {
|
||||||
|
var merr error
|
||||||
|
if res.Config != nil {
|
||||||
wrapConfigErr := func(err error) error {
|
wrapConfigErr := func(err error) error {
|
||||||
return resource.ErrInvalidField{
|
return resource.ErrInvalidField{
|
||||||
Name: "config",
|
Name: "config",
|
||||||
Wrapped: err,
|
Wrapped: err,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if cfgErr := validateFailoverConfig(res.Data.Config, false, wrapConfigErr); cfgErr != nil {
|
if cfgErr := validateFailoverConfig(res.Config, false, wrapConfigErr); cfgErr != nil {
|
||||||
merr = multierror.Append(merr, cfgErr)
|
merr = multierror.Append(merr, cfgErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for portName, pc := range res.Data.PortConfigs {
|
for portName, pc := range res.PortConfigs {
|
||||||
wrapConfigErr := func(err error) error {
|
wrapConfigErr := func(err error) error {
|
||||||
return resource.ErrInvalidMapValue{
|
return resource.ErrInvalidMapValue{
|
||||||
Map: "port_configs",
|
Map: "port_configs",
|
||||||
|
@ -147,7 +155,6 @@ func validateFailoverPolicy(res *DecodedFailoverPolicy) error {
|
||||||
merr = multierror.Append(merr, cfgErr)
|
merr = multierror.Append(merr, cfgErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: should sameness group be a ref once that's a resource?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return merr
|
return merr
|
||||||
|
|
|
@ -241,7 +241,112 @@ func addFailoverConfigSamenessGroupCases(fpcases map[string]failoverTestcase) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCommonTestCases() map[string]failoverTestcase {
|
func getComputedFailoverCases() map[string]failoverTestcase {
|
||||||
|
configCases := getCommonConfigCases()
|
||||||
|
fpcases := getCommonFpCases()
|
||||||
|
for name, tc := range configCases {
|
||||||
|
fpcases["ported config: "+name] = failoverTestcase{
|
||||||
|
failover: &pbcatalog.FailoverPolicy{
|
||||||
|
PortConfigs: map[string]*pbcatalog.FailoverConfig{
|
||||||
|
"http": proto.Clone(tc.config).(*pbcatalog.FailoverConfig),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: maybeWrap(`invalid value of key "http" within port_configs: `, tc.expectErr),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fpcases
|
||||||
|
}
|
||||||
|
func getFailoverCases() map[string]failoverTestcase {
|
||||||
|
configCases := getCommonConfigCases()
|
||||||
|
fpcases := getCommonFpCases()
|
||||||
|
fpcases["non-empty: some plain config but no port configs"] = failoverTestcase{
|
||||||
|
failover: &pbcatalog.FailoverPolicy{
|
||||||
|
Config: &pbcatalog.FailoverConfig{
|
||||||
|
Destinations: []*pbcatalog.FailoverDestination{
|
||||||
|
{Ref: newRef(pbcatalog.ServiceType, "api-backup")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// plain config
|
||||||
|
fpcases["plain config: bad dest: any port name"] = failoverTestcase{
|
||||||
|
failover: &pbcatalog.FailoverPolicy{
|
||||||
|
Config: &pbcatalog.FailoverConfig{
|
||||||
|
Destinations: []*pbcatalog.FailoverDestination{
|
||||||
|
{Ref: newRef(pbcatalog.ServiceType, "api-backup"), Port: "web"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: `invalid "config" field: invalid element at index 0 of list "destinations": invalid "port" field: ports cannot be specified explicitly for the general failover section since it relies upon port alignment`,
|
||||||
|
}
|
||||||
|
// emptiness
|
||||||
|
fpcases["empty"] = failoverTestcase{
|
||||||
|
failover: &pbcatalog.FailoverPolicy{},
|
||||||
|
expectErr: `invalid "config" field: at least one of config or port_configs must be set`,
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range configCases {
|
||||||
|
fpcases["plain config: "+name] = failoverTestcase{
|
||||||
|
failover: &pbcatalog.FailoverPolicy{
|
||||||
|
Config: proto.Clone(tc.config).(*pbcatalog.FailoverConfig),
|
||||||
|
},
|
||||||
|
expectErr: maybeWrap(`invalid "config" field: `, tc.expectErr),
|
||||||
|
}
|
||||||
|
|
||||||
|
fpcases["ported config: "+name] = failoverTestcase{
|
||||||
|
failover: &pbcatalog.FailoverPolicy{
|
||||||
|
PortConfigs: map[string]*pbcatalog.FailoverConfig{
|
||||||
|
"http": proto.Clone(tc.config).(*pbcatalog.FailoverConfig),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: maybeWrap(`invalid value of key "http" within port_configs: `, tc.expectErr),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
addFailoverConfigSamenessGroupCases(fpcases)
|
||||||
|
return fpcases
|
||||||
|
}
|
||||||
|
func getCommonFpCases() map[string]failoverTestcase {
|
||||||
|
fpcases := map[string]failoverTestcase{
|
||||||
|
"non-empty: one port config but no plain config": {
|
||||||
|
failover: &pbcatalog.FailoverPolicy{
|
||||||
|
PortConfigs: map[string]*pbcatalog.FailoverConfig{
|
||||||
|
"http": {
|
||||||
|
Destinations: []*pbcatalog.FailoverDestination{
|
||||||
|
{Ref: newRef(pbcatalog.ServiceType, "api-backup")},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// ported config
|
||||||
|
"ported config: bad dest: invalid port name": {
|
||||||
|
failover: &pbcatalog.FailoverPolicy{
|
||||||
|
PortConfigs: map[string]*pbcatalog.FailoverConfig{
|
||||||
|
"http": {
|
||||||
|
Destinations: []*pbcatalog.FailoverDestination{
|
||||||
|
{Ref: newRef(pbcatalog.ServiceType, "api-backup"), Port: "$bad$"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: `invalid value of key "http" within port_configs: invalid element at index 0 of list "destinations": invalid "port" field: value must match regex: ^[a-z0-9]([a-z0-9\-_]*[a-z0-9])?$`,
|
||||||
|
},
|
||||||
|
"ported config: bad ported in map": {
|
||||||
|
failover: &pbcatalog.FailoverPolicy{
|
||||||
|
PortConfigs: map[string]*pbcatalog.FailoverConfig{
|
||||||
|
"$bad$": {
|
||||||
|
Destinations: []*pbcatalog.FailoverDestination{
|
||||||
|
{Ref: newRef(pbcatalog.ServiceType, "api-backup"), Port: "http"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectErr: `map port_configs contains an invalid key - "$bad$": value must match regex: ^[a-z0-9]([a-z0-9\-_]*[a-z0-9])?$`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return fpcases
|
||||||
|
}
|
||||||
|
func getCommonConfigCases() map[string]configTestcase {
|
||||||
configCases := map[string]configTestcase{
|
configCases := map[string]configTestcase{
|
||||||
"dest without sameness": {
|
"dest without sameness": {
|
||||||
config: &pbcatalog.FailoverConfig{
|
config: &pbcatalog.FailoverConfig{
|
||||||
|
@ -323,89 +428,7 @@ func getCommonTestCases() map[string]failoverTestcase {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
return configCases
|
||||||
fpcases := map[string]failoverTestcase{
|
|
||||||
// emptiness
|
|
||||||
"empty": {
|
|
||||||
failover: &pbcatalog.FailoverPolicy{},
|
|
||||||
expectErr: `invalid "config" field: at least one of config or port_configs must be set`,
|
|
||||||
},
|
|
||||||
"non-empty: one port config but no plain config": {
|
|
||||||
failover: &pbcatalog.FailoverPolicy{
|
|
||||||
PortConfigs: map[string]*pbcatalog.FailoverConfig{
|
|
||||||
"http": {
|
|
||||||
Destinations: []*pbcatalog.FailoverDestination{
|
|
||||||
{Ref: newRef(pbcatalog.ServiceType, "api-backup")},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"non-empty: some plain config but no port configs": {
|
|
||||||
failover: &pbcatalog.FailoverPolicy{
|
|
||||||
Config: &pbcatalog.FailoverConfig{
|
|
||||||
Destinations: []*pbcatalog.FailoverDestination{
|
|
||||||
{Ref: newRef(pbcatalog.ServiceType, "api-backup")},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
// plain config
|
|
||||||
"plain config: bad dest: any port name": {
|
|
||||||
failover: &pbcatalog.FailoverPolicy{
|
|
||||||
Config: &pbcatalog.FailoverConfig{
|
|
||||||
Destinations: []*pbcatalog.FailoverDestination{
|
|
||||||
{Ref: newRef(pbcatalog.ServiceType, "api-backup"), Port: "web"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectErr: `invalid "config" field: invalid element at index 0 of list "destinations": invalid "port" field: ports cannot be specified explicitly for the general failover section since it relies upon port alignment`,
|
|
||||||
},
|
|
||||||
// ported config
|
|
||||||
"ported config: bad dest: invalid port name": {
|
|
||||||
failover: &pbcatalog.FailoverPolicy{
|
|
||||||
PortConfigs: map[string]*pbcatalog.FailoverConfig{
|
|
||||||
"http": {
|
|
||||||
Destinations: []*pbcatalog.FailoverDestination{
|
|
||||||
{Ref: newRef(pbcatalog.ServiceType, "api-backup"), Port: "$bad$"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectErr: `invalid value of key "http" within port_configs: invalid element at index 0 of list "destinations": invalid "port" field: value must match regex: ^[a-z0-9]([a-z0-9\-_]*[a-z0-9])?$`,
|
|
||||||
},
|
|
||||||
"ported config: bad ported in map": {
|
|
||||||
failover: &pbcatalog.FailoverPolicy{
|
|
||||||
PortConfigs: map[string]*pbcatalog.FailoverConfig{
|
|
||||||
"$bad$": {
|
|
||||||
Destinations: []*pbcatalog.FailoverDestination{
|
|
||||||
{Ref: newRef(pbcatalog.ServiceType, "api-backup"), Port: "http"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectErr: `map port_configs contains an invalid key - "$bad$": value must match regex: ^[a-z0-9]([a-z0-9\-_]*[a-z0-9])?$`,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, tc := range configCases {
|
|
||||||
fpcases["plain config: "+name] = failoverTestcase{
|
|
||||||
failover: &pbcatalog.FailoverPolicy{
|
|
||||||
Config: proto.Clone(tc.config).(*pbcatalog.FailoverConfig),
|
|
||||||
},
|
|
||||||
expectErr: maybeWrap(`invalid "config" field: `, tc.expectErr),
|
|
||||||
}
|
|
||||||
|
|
||||||
fpcases["ported config: "+name] = failoverTestcase{
|
|
||||||
failover: &pbcatalog.FailoverPolicy{
|
|
||||||
PortConfigs: map[string]*pbcatalog.FailoverConfig{
|
|
||||||
"http": proto.Clone(tc.config).(*pbcatalog.FailoverConfig),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectErr: maybeWrap(`invalid value of key "http" within port_configs: `, tc.expectErr),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fpcases
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateFailoverPolicy(t *testing.T) {
|
func TestValidateFailoverPolicy(t *testing.T) {
|
||||||
|
@ -434,8 +457,7 @@ func TestValidateFailoverPolicy(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cases := getCommonTestCases()
|
cases := getFailoverCases()
|
||||||
addFailoverConfigSamenessGroupCases(cases)
|
|
||||||
|
|
||||||
for name, tc := range cases {
|
for name, tc := range cases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
package catalogv2beta1
|
package catalogv2beta1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
_ "github.com/hashicorp/consul/proto-public/pbresource"
|
pbresource "github.com/hashicorp/consul/proto-public/pbresource"
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
@ -35,6 +35,9 @@ type ComputedFailoverPolicy struct {
|
||||||
// PortConfigs defines failover for a specific port on this service and takes
|
// PortConfigs defines failover for a specific port on this service and takes
|
||||||
// precedence over Config.
|
// precedence over Config.
|
||||||
PortConfigs map[string]*FailoverConfig `protobuf:"bytes,2,rep,name=port_configs,json=portConfigs,proto3" json:"port_configs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
PortConfigs map[string]*FailoverConfig `protobuf:"bytes,2,rep,name=port_configs,json=portConfigs,proto3" json:"port_configs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||||
|
// BoundReferences is a slice of mixed type references of resources that were
|
||||||
|
// involved in the formulation of this resource.
|
||||||
|
BoundReferences []*pbresource.Reference `protobuf:"bytes,3,rep,name=bound_references,json=boundReferences,proto3" json:"bound_references,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *ComputedFailoverPolicy) Reset() {
|
func (x *ComputedFailoverPolicy) Reset() {
|
||||||
|
@ -83,6 +86,13 @@ func (x *ComputedFailoverPolicy) GetPortConfigs() map[string]*FailoverConfig {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (x *ComputedFailoverPolicy) GetBoundReferences() []*pbresource.Reference {
|
||||||
|
if x != nil {
|
||||||
|
return x.BoundReferences
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
var File_pbcatalog_v2beta1_computed_failover_policy_proto protoreflect.FileDescriptor
|
var File_pbcatalog_v2beta1_computed_failover_policy_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
var file_pbcatalog_v2beta1_computed_failover_policy_proto_rawDesc = []byte{
|
var file_pbcatalog_v2beta1_computed_failover_policy_proto_rawDesc = []byte{
|
||||||
|
@ -95,48 +105,54 @@ var file_pbcatalog_v2beta1_computed_failover_policy_proto_rawDesc = []byte{
|
||||||
0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72,
|
0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2f, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72,
|
||||||
0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x70,
|
0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x70,
|
||||||
0x62, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
|
0x62, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
|
||||||
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xca, 0x02, 0x0a, 0x16,
|
0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x70, 0x62, 0x72,
|
||||||
0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72,
|
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
|
||||||
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x48, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
|
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9b, 0x03, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x70, 0x75,
|
||||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f,
|
0x74, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63,
|
||||||
0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f,
|
0x79, 0x12, 0x48, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||||
0x67, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76,
|
0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f,
|
||||||
0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
|
0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x62,
|
||||||
0x12, 0x6c, 0x0a, 0x0c, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73,
|
0x65, 0x74, 0x61, 0x31, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e,
|
||||||
0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f,
|
0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6c, 0x0a, 0x0c, 0x70,
|
||||||
0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f,
|
0x6f, 0x72, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
|
||||||
0x67, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74,
|
0x0b, 0x32, 0x49, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f,
|
||||||
0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
|
0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x62,
|
||||||
0x2e, 0x50, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72,
|
0x65, 0x74, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x46, 0x61, 0x69,
|
||||||
0x79, 0x52, 0x0b, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0x70,
|
0x6c, 0x6f, 0x76, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x6f, 0x72, 0x74,
|
||||||
0x0a, 0x10, 0x50, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x45, 0x6e, 0x74,
|
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x70, 0x6f,
|
||||||
0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
|
0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x4f, 0x0a, 0x10, 0x62, 0x6f, 0x75,
|
||||||
0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
|
0x6e, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20,
|
||||||
0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e,
|
0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e,
|
||||||
0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
|
0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e,
|
||||||
0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x43,
|
0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x62, 0x6f, 0x75, 0x6e, 0x64,
|
||||||
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
|
0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x1a, 0x70, 0x0a, 0x10, 0x50, 0x6f,
|
||||||
0x3a, 0x06, 0xa2, 0x93, 0x04, 0x02, 0x08, 0x03, 0x42, 0xb1, 0x02, 0x0a, 0x24, 0x63, 0x6f, 0x6d,
|
0x72, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
|
||||||
0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75,
|
0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
|
||||||
0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61,
|
0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||||
0x31, 0x42, 0x1b, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f,
|
0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73,
|
||||||
0x76, 0x65, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
|
0x75, 0x6c, 0x2e, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74,
|
||||||
0x5a, 0x49, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73,
|
0x61, 0x31, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69,
|
||||||
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72,
|
0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x06, 0xa2, 0x93,
|
||||||
0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x63, 0x61, 0x74,
|
0x04, 0x02, 0x08, 0x03, 0x42, 0xb1, 0x02, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73,
|
||||||
0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x3b, 0x63, 0x61, 0x74,
|
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x63, 0x61,
|
||||||
0x61, 0x6c, 0x6f, 0x67, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x48, 0x43,
|
0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x42, 0x1b, 0x43,
|
||||||
0x43, 0xaa, 0x02, 0x20, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f,
|
0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x50,
|
||||||
0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x56, 0x32, 0x62,
|
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x49, 0x67, 0x69,
|
||||||
0x65, 0x74, 0x61, 0x31, 0xca, 0x02, 0x20, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
|
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f,
|
||||||
0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5c,
|
0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d,
|
||||||
0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xe2, 0x02, 0x2c, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63,
|
0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67,
|
||||||
0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x43, 0x61, 0x74, 0x61, 0x6c,
|
0x2f, 0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x3b, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67,
|
||||||
0x6f, 0x67, 0x5c, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65,
|
0x76, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x48, 0x43, 0x43, 0xaa, 0x02, 0x20,
|
||||||
0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x23, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f,
|
0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c,
|
||||||
0x72, 0x70, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x43, 0x61, 0x74, 0x61,
|
0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31,
|
||||||
0x6c, 0x6f, 0x67, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72,
|
0xca, 0x02, 0x20, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e,
|
||||||
0x6f, 0x74, 0x6f, 0x33,
|
0x73, 0x75, 0x6c, 0x5c, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5c, 0x56, 0x32, 0x62, 0x65,
|
||||||
|
0x74, 0x61, 0x31, 0xe2, 0x02, 0x2c, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c,
|
||||||
|
0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5c, 0x56,
|
||||||
|
0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
|
||||||
|
0x74, 0x61, 0xea, 0x02, 0x23, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a,
|
||||||
|
0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x3a,
|
||||||
|
0x3a, 0x56, 0x32, 0x62, 0x65, 0x74, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -156,16 +172,18 @@ var file_pbcatalog_v2beta1_computed_failover_policy_proto_goTypes = []interface{
|
||||||
(*ComputedFailoverPolicy)(nil), // 0: hashicorp.consul.catalog.v2beta1.ComputedFailoverPolicy
|
(*ComputedFailoverPolicy)(nil), // 0: hashicorp.consul.catalog.v2beta1.ComputedFailoverPolicy
|
||||||
nil, // 1: hashicorp.consul.catalog.v2beta1.ComputedFailoverPolicy.PortConfigsEntry
|
nil, // 1: hashicorp.consul.catalog.v2beta1.ComputedFailoverPolicy.PortConfigsEntry
|
||||||
(*FailoverConfig)(nil), // 2: hashicorp.consul.catalog.v2beta1.FailoverConfig
|
(*FailoverConfig)(nil), // 2: hashicorp.consul.catalog.v2beta1.FailoverConfig
|
||||||
|
(*pbresource.Reference)(nil), // 3: hashicorp.consul.resource.Reference
|
||||||
}
|
}
|
||||||
var file_pbcatalog_v2beta1_computed_failover_policy_proto_depIdxs = []int32{
|
var file_pbcatalog_v2beta1_computed_failover_policy_proto_depIdxs = []int32{
|
||||||
2, // 0: hashicorp.consul.catalog.v2beta1.ComputedFailoverPolicy.config:type_name -> hashicorp.consul.catalog.v2beta1.FailoverConfig
|
2, // 0: hashicorp.consul.catalog.v2beta1.ComputedFailoverPolicy.config:type_name -> hashicorp.consul.catalog.v2beta1.FailoverConfig
|
||||||
1, // 1: hashicorp.consul.catalog.v2beta1.ComputedFailoverPolicy.port_configs:type_name -> hashicorp.consul.catalog.v2beta1.ComputedFailoverPolicy.PortConfigsEntry
|
1, // 1: hashicorp.consul.catalog.v2beta1.ComputedFailoverPolicy.port_configs:type_name -> hashicorp.consul.catalog.v2beta1.ComputedFailoverPolicy.PortConfigsEntry
|
||||||
2, // 2: hashicorp.consul.catalog.v2beta1.ComputedFailoverPolicy.PortConfigsEntry.value:type_name -> hashicorp.consul.catalog.v2beta1.FailoverConfig
|
3, // 2: hashicorp.consul.catalog.v2beta1.ComputedFailoverPolicy.bound_references:type_name -> hashicorp.consul.resource.Reference
|
||||||
3, // [3:3] is the sub-list for method output_type
|
2, // 3: hashicorp.consul.catalog.v2beta1.ComputedFailoverPolicy.PortConfigsEntry.value:type_name -> hashicorp.consul.catalog.v2beta1.FailoverConfig
|
||||||
3, // [3:3] is the sub-list for method input_type
|
4, // [4:4] is the sub-list for method output_type
|
||||||
3, // [3:3] is the sub-list for extension type_name
|
4, // [4:4] is the sub-list for method input_type
|
||||||
3, // [3:3] is the sub-list for extension extendee
|
4, // [4:4] is the sub-list for extension type_name
|
||||||
0, // [0:3] is the sub-list for field type_name
|
4, // [4:4] is the sub-list for extension extendee
|
||||||
|
0, // [0:4] is the sub-list for field type_name
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { file_pbcatalog_v2beta1_computed_failover_policy_proto_init() }
|
func init() { file_pbcatalog_v2beta1_computed_failover_policy_proto_init() }
|
||||||
|
|
|
@ -7,6 +7,7 @@ package hashicorp.consul.catalog.v2beta1;
|
||||||
|
|
||||||
import "pbcatalog/v2beta1/failover_policy.proto";
|
import "pbcatalog/v2beta1/failover_policy.proto";
|
||||||
import "pbresource/annotations.proto";
|
import "pbresource/annotations.proto";
|
||||||
|
import "pbresource/resource.proto";
|
||||||
|
|
||||||
// This is a Resource type.
|
// This is a Resource type.
|
||||||
message ComputedFailoverPolicy {
|
message ComputedFailoverPolicy {
|
||||||
|
@ -18,4 +19,8 @@ message ComputedFailoverPolicy {
|
||||||
// PortConfigs defines failover for a specific port on this service and takes
|
// PortConfigs defines failover for a specific port on this service and takes
|
||||||
// precedence over Config.
|
// precedence over Config.
|
||||||
map<string, FailoverConfig> port_configs = 2;
|
map<string, FailoverConfig> port_configs = 2;
|
||||||
|
|
||||||
|
// BoundReferences is a slice of mixed type references of resources that were
|
||||||
|
// involved in the formulation of this resource.
|
||||||
|
repeated hashicorp.consul.resource.Reference bound_references = 3;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue