mirror of https://github.com/status-im/consul.git
state: prohibit exported discovery chains to have cross-datacenter or cross-partition references (#13726)
Because peerings are pairwise, between two tuples of (datacenter, partition) having any exported reference via a discovery chain that crosses out of the peered datacenter or partition will ultimately not be able to work for various reasons. The biggest one is that there is no way in the ultimate destination to configure an intention that can allow an external SpiffeID to access a service. This PR ensures that a user simply cannot do this, so they won't run into weird situations like this.
This commit is contained in:
parent
75768a2039
commit
2317f37b4d
|
@ -138,7 +138,12 @@ func (s *Store) ConfigEntriesByKind(ws memdb.WatchSet, kind string, entMeta *acl
|
|||
return configEntriesByKindTxn(tx, ws, kind, entMeta)
|
||||
}
|
||||
|
||||
func listDiscoveryChainNamesTxn(tx ReadTxn, ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []structs.ServiceName, error) {
|
||||
func listDiscoveryChainNamesTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
overrides map[configentry.KindName]structs.ConfigEntry,
|
||||
entMeta acl.EnterpriseMeta,
|
||||
) (uint64, []structs.ServiceName, error) {
|
||||
// Get the index and watch for updates
|
||||
idx := maxIndexWatchTxn(tx, ws, tableConfigEntries)
|
||||
|
||||
|
@ -160,6 +165,15 @@ func listDiscoveryChainNamesTxn(tx ReadTxn, ws memdb.WatchSet, entMeta acl.Enter
|
|||
sn := structs.NewServiceName(entry.GetName(), entry.GetEnterpriseMeta())
|
||||
seen[sn] = struct{}{}
|
||||
}
|
||||
|
||||
for kn, entry := range overrides {
|
||||
sn := structs.NewServiceName(kn.Name, &kn.EnterpriseMeta)
|
||||
if entry != nil {
|
||||
seen[sn] = struct{}{}
|
||||
} else {
|
||||
delete(seen, sn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results := maps.SliceOfKeys(seen)
|
||||
|
@ -506,7 +520,7 @@ var serviceGraphKinds = []string{
|
|||
|
||||
// discoveryChainTargets will return a list of services listed as a target for the input's discovery chain
|
||||
func (s *Store) discoveryChainTargetsTxn(tx ReadTxn, ws memdb.WatchSet, dc, service string, entMeta *acl.EnterpriseMeta) (uint64, []structs.ServiceName, error) {
|
||||
idx, targets, err := s.discoveryChainOriginalTargetsTxn(tx, ws, dc, service, entMeta)
|
||||
idx, targets, err := discoveryChainOriginalTargetsTxn(tx, ws, dc, service, entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
@ -524,7 +538,12 @@ func (s *Store) discoveryChainTargetsTxn(tx ReadTxn, ws memdb.WatchSet, dc, serv
|
|||
return idx, resp, nil
|
||||
}
|
||||
|
||||
func (s *Store) discoveryChainOriginalTargetsTxn(tx ReadTxn, ws memdb.WatchSet, dc, service string, entMeta *acl.EnterpriseMeta) (uint64, []*structs.DiscoveryTarget, error) {
|
||||
func discoveryChainOriginalTargetsTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
dc, service string,
|
||||
entMeta *acl.EnterpriseMeta,
|
||||
) (uint64, []*structs.DiscoveryTarget, error) {
|
||||
source := structs.NewServiceName(service, entMeta)
|
||||
req := discoverychain.CompileRequest{
|
||||
ServiceName: source.Name,
|
||||
|
@ -532,7 +551,7 @@ func (s *Store) discoveryChainOriginalTargetsTxn(tx ReadTxn, ws memdb.WatchSet,
|
|||
EvaluateInPartition: source.PartitionOrDefault(),
|
||||
EvaluateInDatacenter: dc,
|
||||
}
|
||||
idx, chain, _, err := s.serviceDiscoveryChainTxn(tx, ws, source.Name, entMeta, req)
|
||||
idx, chain, _, err := serviceDiscoveryChainTxn(tx, ws, source.Name, entMeta, req)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to fetch discovery chain for %q: %v", source.String(), err)
|
||||
}
|
||||
|
@ -579,7 +598,7 @@ func (s *Store) discoveryChainSourcesTxn(tx ReadTxn, ws memdb.WatchSet, dc strin
|
|||
EvaluateInPartition: sn.PartitionOrDefault(),
|
||||
EvaluateInDatacenter: dc,
|
||||
}
|
||||
idx, chain, _, err := s.serviceDiscoveryChainTxn(tx, ws, sn.Name, &sn.EnterpriseMeta, req)
|
||||
idx, chain, _, err := serviceDiscoveryChainTxn(tx, ws, sn.Name, &sn.EnterpriseMeta, req)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to fetch discovery chain for %q: %v", sn.String(), err)
|
||||
}
|
||||
|
@ -620,7 +639,28 @@ func validateProposedConfigEntryInServiceGraph(
|
|||
wildcardEntMeta := kindName.WithWildcardNamespace()
|
||||
|
||||
switch kindName.Kind {
|
||||
case structs.ExportedServices, structs.MeshConfig:
|
||||
case structs.ExportedServices:
|
||||
// This is the case for deleting a config entry
|
||||
if newEntry == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
entry := newEntry.(*structs.ExportedServicesConfigEntry)
|
||||
|
||||
_, serviceList, err := listServicesExportedToAnyPeerByConfigEntry(nil, tx, entry, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, sn := range serviceList {
|
||||
if err := validateChainIsPeerExportSafe(tx, sn, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
case structs.MeshConfig:
|
||||
// Exported services and mesh config do not influence discovery chains.
|
||||
return nil
|
||||
|
||||
|
@ -759,8 +799,9 @@ func validateProposedConfigEntryInServiceGraph(
|
|||
}
|
||||
|
||||
var (
|
||||
svcProtocols = make(map[structs.ServiceID]string)
|
||||
svcTopNodeType = make(map[structs.ServiceID]string)
|
||||
svcProtocols = make(map[structs.ServiceID]string)
|
||||
svcTopNodeType = make(map[structs.ServiceID]string)
|
||||
exportedServicesByPartition = make(map[string]map[structs.ServiceName]struct{})
|
||||
)
|
||||
for chain := range checkChains {
|
||||
protocol, topNode, err := testCompileDiscoveryChain(tx, chain.ID, overrides, &chain.EnterpriseMeta)
|
||||
|
@ -769,6 +810,27 @@ func validateProposedConfigEntryInServiceGraph(
|
|||
}
|
||||
svcProtocols[chain] = protocol
|
||||
svcTopNodeType[chain] = topNode.Type
|
||||
|
||||
chainSvc := structs.NewServiceName(chain.ID, &chain.EnterpriseMeta)
|
||||
|
||||
// Validate that we aren't adding a cross-datacenter or cross-partition
|
||||
// reference to a peer-exported service's discovery chain by this pending
|
||||
// edit.
|
||||
partition := chain.PartitionOrDefault()
|
||||
exportedServices, ok := exportedServicesByPartition[partition]
|
||||
if !ok {
|
||||
entMeta := structs.NodeEnterpriseMetaInPartition(partition)
|
||||
_, exportedServices, err = listAllExportedServices(nil, tx, overrides, *entMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
exportedServicesByPartition[partition] = exportedServices
|
||||
}
|
||||
if _, exported := exportedServices[chainSvc]; exported {
|
||||
if err := validateChainIsPeerExportSafe(tx, chainSvc, overrides); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now validate all of our ingress gateways.
|
||||
|
@ -828,9 +890,75 @@ func validateProposedConfigEntryInServiceGraph(
|
|||
return nil
|
||||
}
|
||||
|
||||
func validateChainIsPeerExportSafe(
|
||||
tx ReadTxn,
|
||||
exportedSvc structs.ServiceName,
|
||||
overrides map[configentry.KindName]structs.ConfigEntry,
|
||||
) error {
|
||||
_, chainEntries, err := readDiscoveryChainConfigEntriesTxn(tx, nil, exportedSvc.Name, overrides, &exportedSvc.EnterpriseMeta)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading discovery chain for %q during config entry validation: %w", exportedSvc, err)
|
||||
}
|
||||
|
||||
emptyOrMatchesEntryPartition := func(entry structs.ConfigEntry, found string) bool {
|
||||
if found == "" {
|
||||
return true
|
||||
}
|
||||
return acl.EqualPartitions(entry.GetEnterpriseMeta().PartitionOrEmpty(), found)
|
||||
}
|
||||
|
||||
for _, e := range chainEntries.Routers {
|
||||
for _, route := range e.Routes {
|
||||
if route.Destination == nil {
|
||||
continue
|
||||
}
|
||||
if !emptyOrMatchesEntryPartition(e, route.Destination.Partition) {
|
||||
return fmt.Errorf("peer exported service %q contains cross-partition route destination", exportedSvc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, e := range chainEntries.Splitters {
|
||||
for _, split := range e.Splits {
|
||||
if !emptyOrMatchesEntryPartition(e, split.Partition) {
|
||||
return fmt.Errorf("peer exported service %q contains cross-partition split destination", exportedSvc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, e := range chainEntries.Resolvers {
|
||||
if e.Redirect != nil {
|
||||
if e.Redirect.Datacenter != "" {
|
||||
return fmt.Errorf("peer exported service %q contains cross-datacenter resolver redirect", exportedSvc)
|
||||
}
|
||||
if !emptyOrMatchesEntryPartition(e, e.Redirect.Partition) {
|
||||
return fmt.Errorf("peer exported service %q contains cross-partition resolver redirect", exportedSvc)
|
||||
}
|
||||
}
|
||||
if e.Failover != nil {
|
||||
for _, failover := range e.Failover {
|
||||
if len(failover.Datacenters) > 0 {
|
||||
return fmt.Errorf("peer exported service %q contains cross-datacenter failover", exportedSvc)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// testCompileDiscoveryChain speculatively compiles a discovery chain with
|
||||
// pending modifications to see if it would be valid. Also returns the computed
|
||||
// protocol and topmost discovery chain node.
|
||||
//
|
||||
// If provided, the overrides map will service reads of specific config entries
|
||||
// instead of the state store if the config entry kind name is present in the
|
||||
// map. A nil in the map implies that the config entry should be tombstoned
|
||||
// during evaluation and treated as erased.
|
||||
//
|
||||
// The override map lets us speculatively compile a discovery chain to see if
|
||||
// doing so would error, so we can ultimately block config entry writes from
|
||||
// happening.
|
||||
func testCompileDiscoveryChain(
|
||||
tx ReadTxn,
|
||||
chainName string,
|
||||
|
@ -871,10 +999,10 @@ func (s *Store) ServiceDiscoveryChain(
|
|||
tx := s.db.ReadTxn()
|
||||
defer tx.Abort()
|
||||
|
||||
return s.serviceDiscoveryChainTxn(tx, ws, serviceName, entMeta, req)
|
||||
return serviceDiscoveryChainTxn(tx, ws, serviceName, entMeta, req)
|
||||
}
|
||||
|
||||
func (s *Store) serviceDiscoveryChainTxn(
|
||||
func serviceDiscoveryChainTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
serviceName string,
|
||||
|
@ -888,7 +1016,7 @@ func (s *Store) serviceDiscoveryChainTxn(
|
|||
}
|
||||
req.Entries = entries
|
||||
|
||||
_, config, err := s.CAConfig(ws)
|
||||
_, config, err := caConfigTxn(tx, ws)
|
||||
if err != nil {
|
||||
return 0, nil, nil, err
|
||||
} else if config == nil {
|
||||
|
@ -1268,7 +1396,9 @@ func anyKey(m map[structs.ServiceID]struct{}) (structs.ServiceID, bool) {
|
|||
// getProxyConfigEntryTxn is a convenience method for fetching a
|
||||
// proxy-defaults kind of config entry.
|
||||
//
|
||||
// If an override is returned the index returned will be 0.
|
||||
// If an override KEY is present for the requested config entry, the index
|
||||
// returned will be 0. Any override VALUE (nil or otherwise) will be returned
|
||||
// if there is a KEY match.
|
||||
func getProxyConfigEntryTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
|
@ -1293,7 +1423,9 @@ func getProxyConfigEntryTxn(
|
|||
// getServiceConfigEntryTxn is a convenience method for fetching a
|
||||
// service-defaults kind of config entry.
|
||||
//
|
||||
// If an override is returned the index returned will be 0.
|
||||
// If an override KEY is present for the requested config entry, the index
|
||||
// returned will be 0. Any override VALUE (nil or otherwise) will be returned
|
||||
// if there is a KEY match.
|
||||
func getServiceConfigEntryTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
|
@ -1318,7 +1450,9 @@ func getServiceConfigEntryTxn(
|
|||
// getRouterConfigEntryTxn is a convenience method for fetching a
|
||||
// service-router kind of config entry.
|
||||
//
|
||||
// If an override is returned the index returned will be 0.
|
||||
// If an override KEY is present for the requested config entry, the index
|
||||
// returned will be 0. Any override VALUE (nil or otherwise) will be returned
|
||||
// if there is a KEY match.
|
||||
func getRouterConfigEntryTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
|
@ -1343,7 +1477,9 @@ func getRouterConfigEntryTxn(
|
|||
// getSplitterConfigEntryTxn is a convenience method for fetching a
|
||||
// service-splitter kind of config entry.
|
||||
//
|
||||
// If an override is returned the index returned will be 0.
|
||||
// If an override KEY is present for the requested config entry, the index
|
||||
// returned will be 0. Any override VALUE (nil or otherwise) will be returned
|
||||
// if there is a KEY match.
|
||||
func getSplitterConfigEntryTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
|
@ -1368,7 +1504,9 @@ func getSplitterConfigEntryTxn(
|
|||
// getResolverConfigEntryTxn is a convenience method for fetching a
|
||||
// service-resolver kind of config entry.
|
||||
//
|
||||
// If an override is returned the index returned will be 0.
|
||||
// If an override KEY is present for the requested config entry, the index
|
||||
// returned will be 0. Any override VALUE (nil or otherwise) will be returned
|
||||
// if there is a KEY match.
|
||||
func getResolverConfigEntryTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
|
@ -1393,7 +1531,9 @@ func getResolverConfigEntryTxn(
|
|||
// getServiceIntentionsConfigEntryTxn is a convenience method for fetching a
|
||||
// service-intentions kind of config entry.
|
||||
//
|
||||
// If an override is returned the index returned will be 0.
|
||||
// If an override KEY is present for the requested config entry, the index
|
||||
// returned will be 0. Any override VALUE (nil or otherwise) will be returned
|
||||
// if there is a KEY match.
|
||||
func getServiceIntentionsConfigEntryTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
|
@ -1415,6 +1555,32 @@ func getServiceIntentionsConfigEntryTxn(
|
|||
return idx, ixn, nil
|
||||
}
|
||||
|
||||
// getExportedServicesConfigEntryTxn is a convenience method for fetching a
|
||||
// exported-services kind of config entry.
|
||||
//
|
||||
// If an override KEY is present for the requested config entry, the index
|
||||
// returned will be 0. Any override VALUE (nil or otherwise) will be returned
|
||||
// if there is a KEY match.
|
||||
func getExportedServicesConfigEntryTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
overrides map[configentry.KindName]structs.ConfigEntry,
|
||||
entMeta *acl.EnterpriseMeta,
|
||||
) (uint64, *structs.ExportedServicesConfigEntry, error) {
|
||||
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ExportedServices, entMeta.PartitionOrDefault(), overrides, entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
} else if entry == nil {
|
||||
return idx, nil, nil
|
||||
}
|
||||
|
||||
export, ok := entry.(*structs.ExportedServicesConfigEntry)
|
||||
if !ok {
|
||||
return 0, nil, fmt.Errorf("invalid service config type %T", entry)
|
||||
}
|
||||
return idx, export, nil
|
||||
}
|
||||
|
||||
func configEntryWithOverridesTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
|
@ -1443,12 +1609,12 @@ func protocolForService(
|
|||
svc structs.ServiceName,
|
||||
) (uint64, string, error) {
|
||||
// Get the global proxy defaults (for default protocol)
|
||||
maxIdx, proxyConfig, err := configEntryTxn(tx, ws, structs.ProxyDefaults, structs.ProxyConfigGlobal, &svc.EnterpriseMeta)
|
||||
maxIdx, proxyConfig, err := getProxyConfigEntryTxn(tx, ws, structs.ProxyConfigGlobal, nil, &svc.EnterpriseMeta)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
|
||||
idx, serviceDefaults, err := configEntryTxn(tx, ws, structs.ServiceDefaults, svc.Name, &svc.EnterpriseMeta)
|
||||
idx, serviceDefaults, err := getServiceConfigEntryTxn(tx, ws, svc.Name, nil, &svc.EnterpriseMeta)
|
||||
if err != nil {
|
||||
return 0, "", err
|
||||
}
|
||||
|
|
|
@ -843,24 +843,75 @@ func TestStore_Service_TerminatingGateway_Kind_Service_Destination_Wildcard(t *t
|
|||
}
|
||||
|
||||
func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
||||
ensureConfigEntry := func(s *Store, idx uint64, entry structs.ConfigEntry) error {
|
||||
if err := entry.Normalize(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := entry.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
}
|
||||
|
||||
type tcase struct {
|
||||
entries []structs.ConfigEntry
|
||||
op func(t *testing.T, s *Store) error
|
||||
opAdd structs.ConfigEntry
|
||||
opDelete configentry.KindName
|
||||
expectErr string
|
||||
expectGraphErr bool
|
||||
}
|
||||
|
||||
EMPTY_KN := configentry.KindName{}
|
||||
|
||||
run := func(t *testing.T, tc tcase) {
|
||||
s := testConfigStateStore(t)
|
||||
for _, entry := range tc.entries {
|
||||
require.NoError(t, ensureConfigEntry(s, 0, entry))
|
||||
}
|
||||
|
||||
nOps := 0
|
||||
if tc.opAdd != nil {
|
||||
nOps++
|
||||
}
|
||||
if tc.opDelete != EMPTY_KN {
|
||||
nOps++
|
||||
}
|
||||
require.Equal(t, 1, nOps, "exactly one operation is required")
|
||||
|
||||
var err error
|
||||
switch {
|
||||
case tc.opAdd != nil:
|
||||
err = ensureConfigEntry(s, 0, tc.opAdd)
|
||||
case tc.opDelete != EMPTY_KN:
|
||||
kn := tc.opDelete
|
||||
err = s.DeleteConfigEntry(0, kn.Kind, kn.Name, &kn.EnterpriseMeta)
|
||||
default:
|
||||
t.Fatal("not possible")
|
||||
}
|
||||
|
||||
if tc.expectErr != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.expectErr)
|
||||
_, ok := err.(*structs.ConfigEntryGraphError)
|
||||
if tc.expectGraphErr {
|
||||
require.True(t, ok, "%T is not a *ConfigEntryGraphError", err)
|
||||
} else {
|
||||
require.False(t, ok, "did not expect a *ConfigEntryGraphError here: %v", err)
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
cases := map[string]tcase{
|
||||
"splitter fails without default protocol": {
|
||||
entries: []structs.ConfigEntry{},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceSplitterConfigEntry{
|
||||
Kind: structs.ServiceSplitter,
|
||||
Name: "main",
|
||||
Splits: []structs.ServiceSplit{
|
||||
{Weight: 100},
|
||||
},
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
opAdd: &structs.ServiceSplitterConfigEntry{
|
||||
Kind: structs.ServiceSplitter,
|
||||
Name: "main",
|
||||
Splits: []structs.ServiceSplit{
|
||||
{Weight: 100},
|
||||
},
|
||||
},
|
||||
expectErr: "does not permit advanced routing or splitting behavior",
|
||||
expectGraphErr: true,
|
||||
|
@ -873,15 +924,12 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
Protocol: "tcp",
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceSplitterConfigEntry{
|
||||
Kind: structs.ServiceSplitter,
|
||||
Name: "main",
|
||||
Splits: []structs.ServiceSplit{
|
||||
{Weight: 100},
|
||||
},
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
opAdd: &structs.ServiceSplitterConfigEntry{
|
||||
Kind: structs.ServiceSplitter,
|
||||
Name: "main",
|
||||
Splits: []structs.ServiceSplit{
|
||||
{Weight: 100},
|
||||
},
|
||||
},
|
||||
expectErr: "does not permit advanced routing or splitting behavior",
|
||||
expectGraphErr: true,
|
||||
|
@ -914,17 +962,14 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceSplitterConfigEntry{
|
||||
Kind: structs.ServiceSplitter,
|
||||
Name: "main",
|
||||
Splits: []structs.ServiceSplit{
|
||||
{Weight: 90, ServiceSubset: "v1"},
|
||||
{Weight: 10, ServiceSubset: "v2"},
|
||||
},
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
opAdd: &structs.ServiceSplitterConfigEntry{
|
||||
Kind: structs.ServiceSplitter,
|
||||
Name: "main",
|
||||
Splits: []structs.ServiceSplit{
|
||||
{Weight: 90, ServiceSubset: "v1"},
|
||||
{Weight: 10, ServiceSubset: "v2"},
|
||||
},
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||
},
|
||||
},
|
||||
"splitter works with http protocol (from proxy-defaults)": {
|
||||
|
@ -949,16 +994,13 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceSplitterConfigEntry{
|
||||
Kind: structs.ServiceSplitter,
|
||||
Name: "main",
|
||||
Splits: []structs.ServiceSplit{
|
||||
{Weight: 90, ServiceSubset: "v1"},
|
||||
{Weight: 10, ServiceSubset: "v2"},
|
||||
},
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
opAdd: &structs.ServiceSplitterConfigEntry{
|
||||
Kind: structs.ServiceSplitter,
|
||||
Name: "main",
|
||||
Splits: []structs.ServiceSplit{
|
||||
{Weight: 90, ServiceSubset: "v1"},
|
||||
{Weight: 10, ServiceSubset: "v2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"router fails with tcp protocol": {
|
||||
|
@ -978,24 +1020,21 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceRouterConfigEntry{
|
||||
Kind: structs.ServiceRouter,
|
||||
Name: "main",
|
||||
Routes: []structs.ServiceRoute{
|
||||
{
|
||||
Match: &structs.ServiceRouteMatch{
|
||||
HTTP: &structs.ServiceRouteHTTPMatch{
|
||||
PathExact: "/other",
|
||||
},
|
||||
},
|
||||
Destination: &structs.ServiceRouteDestination{
|
||||
ServiceSubset: "other",
|
||||
opAdd: &structs.ServiceRouterConfigEntry{
|
||||
Kind: structs.ServiceRouter,
|
||||
Name: "main",
|
||||
Routes: []structs.ServiceRoute{
|
||||
{
|
||||
Match: &structs.ServiceRouteMatch{
|
||||
HTTP: &structs.ServiceRouteHTTPMatch{
|
||||
PathExact: "/other",
|
||||
},
|
||||
},
|
||||
Destination: &structs.ServiceRouteDestination{
|
||||
ServiceSubset: "other",
|
||||
},
|
||||
},
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
},
|
||||
},
|
||||
expectErr: "does not permit advanced routing or splitting behavior",
|
||||
expectGraphErr: true,
|
||||
|
@ -1012,24 +1051,21 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceRouterConfigEntry{
|
||||
Kind: structs.ServiceRouter,
|
||||
Name: "main",
|
||||
Routes: []structs.ServiceRoute{
|
||||
{
|
||||
Match: &structs.ServiceRouteMatch{
|
||||
HTTP: &structs.ServiceRouteHTTPMatch{
|
||||
PathExact: "/other",
|
||||
},
|
||||
},
|
||||
Destination: &structs.ServiceRouteDestination{
|
||||
ServiceSubset: "other",
|
||||
opAdd: &structs.ServiceRouterConfigEntry{
|
||||
Kind: structs.ServiceRouter,
|
||||
Name: "main",
|
||||
Routes: []structs.ServiceRoute{
|
||||
{
|
||||
Match: &structs.ServiceRouteMatch{
|
||||
HTTP: &structs.ServiceRouteHTTPMatch{
|
||||
PathExact: "/other",
|
||||
},
|
||||
},
|
||||
Destination: &structs.ServiceRouteDestination{
|
||||
ServiceSubset: "other",
|
||||
},
|
||||
},
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
},
|
||||
},
|
||||
expectErr: "does not permit advanced routing or splitting behavior",
|
||||
expectGraphErr: true,
|
||||
|
@ -1063,9 +1099,7 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
return s.DeleteConfigEntry(0, structs.ServiceDefaults, "main", nil)
|
||||
},
|
||||
opDelete: configentry.NewKindName(structs.ServiceDefaults, "main", nil),
|
||||
expectErr: "does not permit advanced routing or splitting behavior",
|
||||
expectGraphErr: true,
|
||||
},
|
||||
|
@ -1099,9 +1133,7 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
return s.DeleteConfigEntry(0, structs.ProxyDefaults, structs.ProxyConfigGlobal, nil)
|
||||
},
|
||||
opDelete: configentry.NewKindName(structs.ProxyDefaults, structs.ProxyConfigGlobal, nil),
|
||||
expectErr: "does not permit advanced routing or splitting behavior",
|
||||
expectGraphErr: true,
|
||||
},
|
||||
|
@ -1140,9 +1172,7 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
return s.DeleteConfigEntry(0, structs.ProxyDefaults, structs.ProxyConfigGlobal, nil)
|
||||
},
|
||||
opDelete: configentry.NewKindName(structs.ProxyDefaults, structs.ProxyConfigGlobal, nil),
|
||||
},
|
||||
"cannot change to tcp protocol after splitter created": {
|
||||
entries: []structs.ConfigEntry{
|
||||
|
@ -1172,13 +1202,10 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "main",
|
||||
Protocol: "tcp",
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
opAdd: &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "main",
|
||||
Protocol: "tcp",
|
||||
},
|
||||
expectErr: "does not permit advanced routing or splitting behavior",
|
||||
expectGraphErr: true,
|
||||
|
@ -1216,9 +1243,7 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
return s.DeleteConfigEntry(0, structs.ServiceDefaults, "main", nil)
|
||||
},
|
||||
opDelete: configentry.NewKindName(structs.ServiceDefaults, "main", nil),
|
||||
expectErr: "does not permit advanced routing or splitting behavior",
|
||||
expectGraphErr: true,
|
||||
},
|
||||
|
@ -1255,13 +1280,10 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "main",
|
||||
Protocol: "tcp",
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
opAdd: &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "main",
|
||||
Protocol: "tcp",
|
||||
},
|
||||
expectErr: "does not permit advanced routing or splitting behavior",
|
||||
expectGraphErr: true,
|
||||
|
@ -1280,16 +1302,13 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
Protocol: "tcp",
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceSplitterConfigEntry{
|
||||
Kind: structs.ServiceSplitter,
|
||||
Name: "main",
|
||||
Splits: []structs.ServiceSplit{
|
||||
{Weight: 90},
|
||||
{Weight: 10, Service: "other"},
|
||||
},
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
opAdd: &structs.ServiceSplitterConfigEntry{
|
||||
Kind: structs.ServiceSplitter,
|
||||
Name: "main",
|
||||
Splits: []structs.ServiceSplit{
|
||||
{Weight: 90},
|
||||
{Weight: 10, Service: "other"},
|
||||
},
|
||||
},
|
||||
expectErr: "uses inconsistent protocols",
|
||||
expectGraphErr: true,
|
||||
|
@ -1307,24 +1326,21 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
Protocol: "tcp",
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceRouterConfigEntry{
|
||||
Kind: structs.ServiceRouter,
|
||||
Name: "main",
|
||||
Routes: []structs.ServiceRoute{
|
||||
{
|
||||
Match: &structs.ServiceRouteMatch{
|
||||
HTTP: &structs.ServiceRouteHTTPMatch{
|
||||
PathExact: "/other",
|
||||
},
|
||||
},
|
||||
Destination: &structs.ServiceRouteDestination{
|
||||
Service: "other",
|
||||
opAdd: &structs.ServiceRouterConfigEntry{
|
||||
Kind: structs.ServiceRouter,
|
||||
Name: "main",
|
||||
Routes: []structs.ServiceRoute{
|
||||
{
|
||||
Match: &structs.ServiceRouteMatch{
|
||||
HTTP: &structs.ServiceRouteHTTPMatch{
|
||||
PathExact: "/other",
|
||||
},
|
||||
},
|
||||
Destination: &structs.ServiceRouteDestination{
|
||||
Service: "other",
|
||||
},
|
||||
},
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
},
|
||||
},
|
||||
expectErr: "uses inconsistent protocols",
|
||||
expectGraphErr: true,
|
||||
|
@ -1348,17 +1364,14 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
ConnectTimeout: 33 * time.Second,
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "main",
|
||||
Failover: map[string]structs.ServiceResolverFailover{
|
||||
"*": {
|
||||
Service: "other",
|
||||
},
|
||||
opAdd: &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "main",
|
||||
Failover: map[string]structs.ServiceResolverFailover{
|
||||
"*": {
|
||||
Service: "other",
|
||||
},
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
},
|
||||
},
|
||||
expectErr: "uses inconsistent protocols",
|
||||
expectGraphErr: true,
|
||||
|
@ -1381,15 +1394,12 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
ConnectTimeout: 33 * time.Second,
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "main",
|
||||
Redirect: &structs.ServiceResolverRedirect{
|
||||
Service: "other",
|
||||
},
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
opAdd: &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "main",
|
||||
Redirect: &structs.ServiceResolverRedirect{
|
||||
Service: "other",
|
||||
},
|
||||
},
|
||||
expectErr: "uses inconsistent protocols",
|
||||
expectGraphErr: true,
|
||||
|
@ -1408,16 +1418,13 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "main",
|
||||
Redirect: &structs.ServiceResolverRedirect{
|
||||
Service: "other",
|
||||
ServiceSubset: "v1",
|
||||
},
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
opAdd: &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "main",
|
||||
Redirect: &structs.ServiceResolverRedirect{
|
||||
Service: "other",
|
||||
ServiceSubset: "v1",
|
||||
},
|
||||
},
|
||||
},
|
||||
"cannot redirect to a subset that does not exist": {
|
||||
|
@ -1428,16 +1435,13 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
ConnectTimeout: 33 * time.Second,
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "main",
|
||||
Redirect: &structs.ServiceResolverRedirect{
|
||||
Service: "other",
|
||||
ServiceSubset: "v1",
|
||||
},
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
opAdd: &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "main",
|
||||
Redirect: &structs.ServiceResolverRedirect{
|
||||
Service: "other",
|
||||
ServiceSubset: "v1",
|
||||
},
|
||||
},
|
||||
expectErr: `does not have a subset named "v1"`,
|
||||
expectGraphErr: true,
|
||||
|
@ -1453,15 +1457,12 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "main",
|
||||
Redirect: &structs.ServiceResolverRedirect{
|
||||
Service: "other",
|
||||
},
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
opAdd: &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "main",
|
||||
Redirect: &structs.ServiceResolverRedirect{
|
||||
Service: "other",
|
||||
},
|
||||
},
|
||||
expectErr: `detected circular resolver redirect`,
|
||||
expectGraphErr: true,
|
||||
|
@ -1483,45 +1484,102 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
op: func(t *testing.T, s *Store) error {
|
||||
entry := &structs.ServiceSplitterConfigEntry{
|
||||
Kind: "service-splitter",
|
||||
Name: "main",
|
||||
Splits: []structs.ServiceSplit{
|
||||
{Weight: 100, Service: "other"},
|
||||
},
|
||||
}
|
||||
return s.EnsureConfigEntry(0, entry)
|
||||
opAdd: &structs.ServiceSplitterConfigEntry{
|
||||
Kind: "service-splitter",
|
||||
Name: "main",
|
||||
Splits: []structs.ServiceSplit{
|
||||
{Weight: 100, Service: "other"},
|
||||
},
|
||||
},
|
||||
expectErr: `detected circular reference`,
|
||||
expectGraphErr: true,
|
||||
},
|
||||
/////////////////////////////////////////////////
|
||||
"cannot peer export cross-dc redirect": {
|
||||
entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: "service-resolver",
|
||||
Name: "main",
|
||||
Redirect: &structs.ServiceResolverRedirect{
|
||||
Datacenter: "dc3",
|
||||
},
|
||||
},
|
||||
},
|
||||
opAdd: &structs.ExportedServicesConfigEntry{
|
||||
Name: "default",
|
||||
Services: []structs.ExportedService{{
|
||||
Name: "main",
|
||||
Consumers: []structs.ServiceConsumer{{PeerName: "my-peer"}},
|
||||
}},
|
||||
},
|
||||
expectErr: `contains cross-datacenter resolver redirect`,
|
||||
},
|
||||
"cannot peer export cross-dc redirect via wildcard": {
|
||||
entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: "service-resolver",
|
||||
Name: "main",
|
||||
Redirect: &structs.ServiceResolverRedirect{
|
||||
Datacenter: "dc3",
|
||||
},
|
||||
},
|
||||
},
|
||||
opAdd: &structs.ExportedServicesConfigEntry{
|
||||
Name: "default",
|
||||
Services: []structs.ExportedService{{
|
||||
Name: "*",
|
||||
Consumers: []structs.ServiceConsumer{{PeerName: "my-peer"}},
|
||||
}},
|
||||
},
|
||||
expectErr: `contains cross-datacenter resolver redirect`,
|
||||
},
|
||||
"cannot peer export cross-dc failover": {
|
||||
entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: "service-resolver",
|
||||
Name: "main",
|
||||
Failover: map[string]structs.ServiceResolverFailover{
|
||||
"*": {
|
||||
Datacenters: []string{"dc3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
opAdd: &structs.ExportedServicesConfigEntry{
|
||||
Name: "default",
|
||||
Services: []structs.ExportedService{{
|
||||
Name: "main",
|
||||
Consumers: []structs.ServiceConsumer{{PeerName: "my-peer"}},
|
||||
}},
|
||||
},
|
||||
expectErr: `contains cross-datacenter failover`,
|
||||
},
|
||||
"cannot peer export cross-dc failover via wildcard": {
|
||||
entries: []structs.ConfigEntry{
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: "service-resolver",
|
||||
Name: "main",
|
||||
Failover: map[string]structs.ServiceResolverFailover{
|
||||
"*": {
|
||||
Datacenters: []string{"dc3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
opAdd: &structs.ExportedServicesConfigEntry{
|
||||
Name: "default",
|
||||
Services: []structs.ExportedService{{
|
||||
Name: "*",
|
||||
Consumers: []structs.ServiceConsumer{{PeerName: "my-peer"}},
|
||||
}},
|
||||
},
|
||||
expectErr: `contains cross-datacenter failover`,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
name := name
|
||||
tc := tc
|
||||
|
||||
t.Run(name, func(t *testing.T) {
|
||||
s := testConfigStateStore(t)
|
||||
for _, entry := range tc.entries {
|
||||
require.NoError(t, entry.Normalize())
|
||||
require.NoError(t, s.EnsureConfigEntry(0, entry))
|
||||
}
|
||||
|
||||
err := tc.op(t, s)
|
||||
if tc.expectErr != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), tc.expectErr)
|
||||
_, ok := err.(*structs.ConfigEntryGraphError)
|
||||
if tc.expectGraphErr {
|
||||
require.True(t, ok, "%T is not a *ConfigEntryGraphError", err)
|
||||
} else {
|
||||
require.False(t, ok, "did not expect a *ConfigEntryGraphError here: %v", err)
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/hashicorp/go-memdb"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/configentry"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/lib/maps"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
|
@ -154,10 +155,10 @@ func peeringReadTxn(tx ReadTxn, ws memdb.WatchSet, q Query) (uint64, *pbpeering.
|
|||
func (s *Store) PeeringList(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.Peering, error) {
|
||||
tx := s.db.ReadTxn()
|
||||
defer tx.Abort()
|
||||
return s.peeringListTxn(ws, tx, entMeta)
|
||||
return peeringListTxn(ws, tx, entMeta)
|
||||
}
|
||||
|
||||
func (s *Store) peeringListTxn(ws memdb.WatchSet, tx ReadTxn, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.Peering, error) {
|
||||
func peeringListTxn(ws memdb.WatchSet, tx ReadTxn, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.Peering, error) {
|
||||
var (
|
||||
iter memdb.ResultIterator
|
||||
err error
|
||||
|
@ -322,21 +323,21 @@ func (s *Store) ExportedServicesForPeer(ws memdb.WatchSet, peerID string, dc str
|
|||
return 0, &structs.ExportedServiceList{}, nil
|
||||
}
|
||||
|
||||
return s.exportedServicesForPeerTxn(ws, tx, peering, dc)
|
||||
return exportedServicesForPeerTxn(ws, tx, peering, dc)
|
||||
}
|
||||
|
||||
func (s *Store) ExportedServicesForAllPeersByName(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, map[string]structs.ServiceList, error) {
|
||||
tx := s.db.ReadTxn()
|
||||
defer tx.Abort()
|
||||
|
||||
maxIdx, peerings, err := s.peeringListTxn(ws, tx, entMeta)
|
||||
maxIdx, peerings, err := peeringListTxn(ws, tx, entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to list peerings: %w", err)
|
||||
}
|
||||
|
||||
out := make(map[string]structs.ServiceList)
|
||||
for _, peering := range peerings {
|
||||
idx, list, err := s.exportedServicesForPeerTxn(ws, tx, peering, "")
|
||||
idx, list, err := exportedServicesForPeerTxn(ws, tx, peering, "")
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to list exported services for peer %q: %w", peering.ID, err)
|
||||
}
|
||||
|
@ -356,34 +357,31 @@ func (s *Store) ExportedServicesForAllPeersByName(ws memdb.WatchSet, entMeta acl
|
|||
// specific peering, and optionally include information about discovery chain
|
||||
// reachable targets for these exported services if the "dc" parameter is
|
||||
// specified.
|
||||
func (s *Store) exportedServicesForPeerTxn(ws memdb.WatchSet, tx ReadTxn, peering *pbpeering.Peering, dc string) (uint64, *structs.ExportedServiceList, error) {
|
||||
func exportedServicesForPeerTxn(
|
||||
ws memdb.WatchSet,
|
||||
tx ReadTxn,
|
||||
peering *pbpeering.Peering,
|
||||
dc string,
|
||||
) (uint64, *structs.ExportedServiceList, error) {
|
||||
maxIdx := peering.ModifyIndex
|
||||
|
||||
entMeta := structs.NodeEnterpriseMetaInPartition(peering.Partition)
|
||||
idx, raw, err := configEntryTxn(tx, ws, structs.ExportedServices, entMeta.PartitionOrDefault(), entMeta)
|
||||
idx, conf, err := getExportedServicesConfigEntryTxn(tx, ws, nil, entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to fetch exported-services config entry: %w", err)
|
||||
}
|
||||
if idx > maxIdx {
|
||||
maxIdx = idx
|
||||
}
|
||||
if raw == nil {
|
||||
if conf == nil {
|
||||
return maxIdx, &structs.ExportedServiceList{}, nil
|
||||
}
|
||||
|
||||
conf, ok := raw.(*structs.ExportedServicesConfigEntry)
|
||||
if !ok {
|
||||
return 0, nil, fmt.Errorf("expected type *structs.ExportedServicesConfigEntry, got %T", raw)
|
||||
}
|
||||
|
||||
var (
|
||||
normalSet = make(map[structs.ServiceName]struct{})
|
||||
discoSet = make(map[structs.ServiceName]struct{})
|
||||
)
|
||||
|
||||
// TODO(peering): filter the disco chain portion of the results to only be
|
||||
// things reachable over the mesh to avoid replicating some clutter.
|
||||
//
|
||||
// At least one of the following should be true for a name for it to
|
||||
// replicate:
|
||||
//
|
||||
|
@ -426,7 +424,7 @@ func (s *Store) exportedServicesForPeerTxn(ws memdb.WatchSet, tx ReadTxn, peerin
|
|||
}
|
||||
|
||||
// list all config entries of kind service-resolver, service-router, service-splitter?
|
||||
idx, discoChains, err := listDiscoveryChainNamesTxn(tx, ws, svcMeta)
|
||||
idx, discoChains, err := listDiscoveryChainNamesTxn(tx, ws, nil, svcMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to get discovery chain names: %w", err)
|
||||
}
|
||||
|
@ -463,7 +461,7 @@ func (s *Store) exportedServicesForPeerTxn(ws memdb.WatchSet, tx ReadTxn, peerin
|
|||
if dc != "" && !structs.IsProtocolHTTPLike(protocol) {
|
||||
// We only need to populate the targets for replication purposes for L4 protocols, which
|
||||
// do not ultimately get intercepted by the mesh gateways.
|
||||
idx, targets, err := s.discoveryChainOriginalTargetsTxn(tx, ws, dc, svc.Name, &svc.EnterpriseMeta)
|
||||
idx, targets, err := discoveryChainOriginalTargetsTxn(tx, ws, dc, svc.Name, &svc.EnterpriseMeta)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get discovery chain targets for service %q: %w", svc, err)
|
||||
}
|
||||
|
@ -504,6 +502,86 @@ func (s *Store) exportedServicesForPeerTxn(ws memdb.WatchSet, tx ReadTxn, peerin
|
|||
return maxIdx, list, nil
|
||||
}
|
||||
|
||||
func listAllExportedServices(
|
||||
ws memdb.WatchSet,
|
||||
tx ReadTxn,
|
||||
overrides map[configentry.KindName]structs.ConfigEntry,
|
||||
entMeta acl.EnterpriseMeta,
|
||||
) (uint64, map[structs.ServiceName]struct{}, error) {
|
||||
idx, export, err := getExportedServicesConfigEntryTxn(tx, ws, overrides, &entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
found := make(map[structs.ServiceName]struct{})
|
||||
if export == nil {
|
||||
return idx, found, nil
|
||||
}
|
||||
|
||||
_, services, err := listServicesExportedToAnyPeerByConfigEntry(ws, tx, export, overrides)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
for _, svc := range services {
|
||||
found[svc] = struct{}{}
|
||||
}
|
||||
|
||||
return idx, found, nil
|
||||
}
|
||||
|
||||
func listServicesExportedToAnyPeerByConfigEntry(
|
||||
ws memdb.WatchSet,
|
||||
tx ReadTxn,
|
||||
conf *structs.ExportedServicesConfigEntry,
|
||||
overrides map[configentry.KindName]structs.ConfigEntry,
|
||||
) (uint64, []structs.ServiceName, error) {
|
||||
var (
|
||||
entMeta = conf.GetEnterpriseMeta()
|
||||
found = make(map[structs.ServiceName]struct{})
|
||||
maxIdx uint64
|
||||
)
|
||||
|
||||
for _, svc := range conf.Services {
|
||||
svcMeta := acl.NewEnterpriseMetaWithPartition(entMeta.PartitionOrDefault(), svc.Namespace)
|
||||
|
||||
sawPeer := false
|
||||
for _, consumer := range svc.Consumers {
|
||||
if consumer.PeerName == "" {
|
||||
continue
|
||||
}
|
||||
sawPeer = true
|
||||
|
||||
sn := structs.NewServiceName(svc.Name, &svcMeta)
|
||||
if _, ok := found[sn]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if svc.Name != structs.WildcardSpecifier {
|
||||
found[sn] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if sawPeer && svc.Name == structs.WildcardSpecifier {
|
||||
idx, discoChains, err := listDiscoveryChainNamesTxn(tx, ws, overrides, svcMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to get discovery chain names: %w", err)
|
||||
}
|
||||
if idx > maxIdx {
|
||||
maxIdx = idx
|
||||
}
|
||||
for _, sn := range discoChains {
|
||||
found[sn] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
foundKeys := maps.SliceOfKeys(found)
|
||||
|
||||
structs.ServiceList(foundKeys).Sort()
|
||||
|
||||
return maxIdx, foundKeys, nil
|
||||
}
|
||||
|
||||
// PeeringsForService returns the list of peerings that are associated with the service name provided in the query.
|
||||
// This is used to configure connect proxies for a given service. The result is generated by querying for exported
|
||||
// service config entries and filtering for those that match the given service.
|
||||
|
|
|
@ -289,8 +289,7 @@ func TestSubscriptionManager_RegisterDeregister(t *testing.T) {
|
|||
Name: "mysql",
|
||||
Failover: map[string]structs.ServiceResolverFailover{
|
||||
"*": {
|
||||
Service: "failover",
|
||||
Datacenters: []string{"dc2", "dc3"},
|
||||
Service: "failover",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
@ -327,8 +326,7 @@ func TestSubscriptionManager_RegisterDeregister(t *testing.T) {
|
|||
},
|
||||
SpiffeID: []string{
|
||||
"spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/mysql",
|
||||
"spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/failover",
|
||||
"spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc3/svc/failover",
|
||||
"spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/failover",
|
||||
},
|
||||
Protocol: "tcp",
|
||||
},
|
||||
|
|
|
@ -548,10 +548,19 @@ func TestConfigSnapshotPeeredMeshGateway(t testing.T, variant string, nsFn func(
|
|||
},
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "api-dc2",
|
||||
Name: "api",
|
||||
Subsets: map[string]structs.ServiceResolverSubset{
|
||||
"v2": {
|
||||
Filter: "Service.Meta.version == v2",
|
||||
},
|
||||
},
|
||||
},
|
||||
&structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "api-v2",
|
||||
Redirect: &structs.ServiceResolverRedirect{
|
||||
Service: "api",
|
||||
Datacenter: "dc2",
|
||||
Service: "api",
|
||||
ServiceSubset: "v2",
|
||||
},
|
||||
},
|
||||
&structs.ServiceSplitterConfigEntry{
|
||||
|
@ -576,7 +585,7 @@ func TestConfigSnapshotPeeredMeshGateway(t testing.T, variant string, nsFn func(
|
|||
Match: httpMatch(&structs.ServiceRouteHTTPMatch{
|
||||
PathPrefix: "/api",
|
||||
}),
|
||||
Destination: toService("api-dc2"),
|
||||
Destination: toService("api-v2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -602,13 +611,7 @@ func TestConfigSnapshotPeeredMeshGateway(t testing.T, variant string, nsFn func(
|
|||
extraUpdates = append(extraUpdates,
|
||||
UpdateEvent{
|
||||
CorrelationID: datacentersWatchID,
|
||||
Result: &[]string{"dc1", "dc2"},
|
||||
},
|
||||
UpdateEvent{
|
||||
CorrelationID: "mesh-gateway:dc2",
|
||||
Result: &structs.IndexedNodesWithGateways{
|
||||
Nodes: TestGatewayNodesDC2(t),
|
||||
},
|
||||
Result: &[]string{"dc1"},
|
||||
},
|
||||
UpdateEvent{
|
||||
CorrelationID: exportedServiceListWatchID,
|
||||
|
|
|
@ -907,6 +907,14 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
|
|||
|
||||
target := chain.Targets[targetID]
|
||||
|
||||
if forMeshGateway && !cfgSnap.Locality.Matches(target.Datacenter, target.Partition) {
|
||||
s.Logger.Warn("ignoring discovery chain target that crosses a datacenter or partition boundary in a mesh gateway",
|
||||
"target", target,
|
||||
"gatewayLocality", cfgSnap.Locality,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
// Determine if we have to generate the entire cluster differently.
|
||||
failoverThroughMeshGateway := chain.WillFailoverThroughMeshGateway(node) && !forMeshGateway
|
||||
|
||||
|
|
|
@ -177,7 +177,6 @@ func (s *ResourceGenerator) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.C
|
|||
keys := cfgSnap.MeshGateway.GatewayKeys()
|
||||
resources := make([]proto.Message, 0, len(keys)+len(cfgSnap.MeshGateway.ServiceGroups))
|
||||
|
||||
endpointsPerRemoteGateway := make(map[string]structs.CheckServiceNodes)
|
||||
for _, key := range keys {
|
||||
if key.Matches(cfgSnap.Datacenter, cfgSnap.ProxyID.PartitionOrDefault()) {
|
||||
continue // skip local
|
||||
|
@ -194,8 +193,6 @@ func (s *ResourceGenerator) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.C
|
|||
continue
|
||||
}
|
||||
|
||||
endpointsPerRemoteGateway[key.String()] = endpoints
|
||||
|
||||
{ // standard connect
|
||||
clusterName := connect.GatewaySNI(key.Datacenter, key.Partition, cfgSnap.Roots.TrustDomain)
|
||||
|
||||
|
@ -274,7 +271,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.C
|
|||
resources = append(resources, e...)
|
||||
|
||||
// Generate the endpoints for exported discovery chain targets.
|
||||
e, err = s.makeExportedUpstreamEndpointsForMeshGateway(cfgSnap, endpointsPerRemoteGateway)
|
||||
e, err = s.makeExportedUpstreamEndpointsForMeshGateway(cfgSnap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -479,6 +476,7 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
|||
gatewayEndpoints,
|
||||
targetID,
|
||||
gatewayKey,
|
||||
forMeshGateway,
|
||||
)
|
||||
if !valid {
|
||||
continue // skip the cluster if we're still populating the snapshot
|
||||
|
@ -500,6 +498,7 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
|||
gatewayEndpoints,
|
||||
failTargetID,
|
||||
gatewayKey,
|
||||
forMeshGateway,
|
||||
)
|
||||
if !valid {
|
||||
continue // skip the failover target if we're still populating the snapshot
|
||||
|
@ -519,10 +518,7 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
|
|||
return resources, nil
|
||||
}
|
||||
|
||||
func (s *ResourceGenerator) makeExportedUpstreamEndpointsForMeshGateway(
|
||||
cfgSnap *proxycfg.ConfigSnapshot,
|
||||
endpointsPerRemoteGateway map[string]structs.CheckServiceNodes,
|
||||
) ([]proto.Message, error) {
|
||||
func (s *ResourceGenerator) makeExportedUpstreamEndpointsForMeshGateway(cfgSnap *proxycfg.ConfigSnapshot) ([]proto.Message, error) {
|
||||
var resources []proto.Message
|
||||
|
||||
populatedExportedClusters := make(map[string]struct{}) // key=clusterName
|
||||
|
@ -531,41 +527,38 @@ func (s *ResourceGenerator) makeExportedUpstreamEndpointsForMeshGateway(
|
|||
|
||||
chainEndpoints := make(map[string]structs.CheckServiceNodes)
|
||||
for _, target := range chain.Targets {
|
||||
if cfgSnap.Locality.Matches(target.Datacenter, target.Partition) {
|
||||
// served locally
|
||||
targetSvc := target.ServiceName()
|
||||
if !cfgSnap.Locality.Matches(target.Datacenter, target.Partition) {
|
||||
s.Logger.Warn("ignoring discovery chain target that crosses a datacenter or partition boundary in a mesh gateway",
|
||||
"target", target,
|
||||
"gatewayLocality", cfgSnap.Locality,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
endpoints, ok := cfgSnap.MeshGateway.ServiceGroups[targetSvc]
|
||||
targetSvc := target.ServiceName()
|
||||
|
||||
endpoints, ok := cfgSnap.MeshGateway.ServiceGroups[targetSvc]
|
||||
if !ok {
|
||||
continue // ignore; not ready
|
||||
}
|
||||
|
||||
if target.ServiceSubset == "" {
|
||||
chainEndpoints[target.ID] = endpoints
|
||||
} else {
|
||||
resolver, ok := cfgSnap.MeshGateway.ServiceResolvers[targetSvc]
|
||||
if !ok {
|
||||
continue // ignore; not ready
|
||||
}
|
||||
subset, ok := resolver.Subsets[target.ServiceSubset]
|
||||
if !ok {
|
||||
continue // ignore; not ready
|
||||
}
|
||||
|
||||
if target.ServiceSubset == "" {
|
||||
chainEndpoints[target.ID] = endpoints
|
||||
} else {
|
||||
resolver, ok := cfgSnap.MeshGateway.ServiceResolvers[targetSvc]
|
||||
if !ok {
|
||||
continue // ignore; not ready
|
||||
}
|
||||
subset, ok := resolver.Subsets[target.ServiceSubset]
|
||||
if !ok {
|
||||
continue // ignore; not ready
|
||||
}
|
||||
|
||||
subsetEndpoints, err := s.filterSubsetEndpoints(&subset, endpoints)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chainEndpoints[target.ID] = subsetEndpoints
|
||||
subsetEndpoints, err := s.filterSubsetEndpoints(&subset, endpoints)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// serve remotely
|
||||
gk := proxycfg.GatewayKey{
|
||||
Datacenter: target.Datacenter,
|
||||
Partition: target.Partition,
|
||||
}
|
||||
// TODO(peering): handle hostname endpoints logic
|
||||
chainEndpoints[target.ID] = cfgSnap.GetMeshGatewayEndpoints(gk)
|
||||
chainEndpoints[target.ID] = subsetEndpoints
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -575,7 +568,7 @@ func (s *ResourceGenerator) makeExportedUpstreamEndpointsForMeshGateway(
|
|||
cfgSnap.Locality,
|
||||
nil,
|
||||
chainEndpoints,
|
||||
endpointsPerRemoteGateway,
|
||||
nil,
|
||||
true,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -652,6 +645,7 @@ func makeLoadAssignmentEndpointGroup(
|
|||
gatewayHealth map[string]structs.CheckServiceNodes,
|
||||
targetID string,
|
||||
localKey proxycfg.GatewayKey,
|
||||
forMeshGateway bool,
|
||||
) (loadAssignmentEndpointGroup, bool) {
|
||||
realEndpoints, ok := targetHealth[targetID]
|
||||
if !ok {
|
||||
|
@ -666,12 +660,11 @@ func makeLoadAssignmentEndpointGroup(
|
|||
case structs.MeshGatewayModeRemote:
|
||||
gatewayKey.Datacenter = target.Datacenter
|
||||
gatewayKey.Partition = target.Partition
|
||||
|
||||
case structs.MeshGatewayModeLocal:
|
||||
gatewayKey = localKey
|
||||
}
|
||||
|
||||
if gatewayKey.IsEmpty() || localKey.Matches(target.Datacenter, target.Partition) {
|
||||
if forMeshGateway || gatewayKey.IsEmpty() || localKey.Matches(target.Datacenter, target.Partition) {
|
||||
// Gateways are not needed if the request isn't for a remote DC or partition.
|
||||
return loadAssignmentEndpointGroup{
|
||||
Endpoints: realEndpoints,
|
||||
|
|
|
@ -35,23 +35,6 @@
|
|||
|
||||
}
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||
"name": "dc2.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
},
|
||||
"resourceApiVersion": "V3"
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"outlierDetection": {
|
||||
|
||||
}
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||
"name": "exported~alt.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
|
@ -110,64 +93,6 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||
"name": "exported~api.default.dc2.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"altStatName": "exported~api.default.dc2.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
},
|
||||
"resourceApiVersion": "V3"
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"circuitBreakers": {
|
||||
|
||||
},
|
||||
"outlierDetection": {
|
||||
|
||||
},
|
||||
"commonLbConfig": {
|
||||
"healthyPanicThreshold": {
|
||||
|
||||
}
|
||||
},
|
||||
"transportSocket": {
|
||||
"name": "tls",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"matchSubjectAltNames": [
|
||||
{
|
||||
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc2/svc/api"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"sni": "api.default.dc2.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||
"name": "exported~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
|
@ -225,6 +150,64 @@
|
|||
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||
"name": "exported~v2.api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"altStatName": "exported~v2.api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
},
|
||||
"resourceApiVersion": "V3"
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"circuitBreakers": {
|
||||
|
||||
},
|
||||
"outlierDetection": {
|
||||
|
||||
},
|
||||
"commonLbConfig": {
|
||||
"healthyPanicThreshold": {
|
||||
|
||||
}
|
||||
},
|
||||
"transportSocket": {
|
||||
"name": "tls",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext",
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"matchSubjectAltNames": [
|
||||
{
|
||||
"exact": "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/dc1/svc/api"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"sni": "v2.api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.config.cluster.v3.Cluster",
|
||||
|
|
|
@ -69,40 +69,6 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
|
||||
"clusterName": "dc2.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "198.18.1.1",
|
||||
"portValue": 443
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
},
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "198.18.1.2",
|
||||
"portValue": 443
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
|
||||
"clusterName": "exported~alt.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
|
@ -137,40 +103,6 @@
|
|||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
|
||||
"clusterName": "exported~api.default.dc2.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "198.18.1.1",
|
||||
"portValue": 443
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
},
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "198.18.1.2",
|
||||
"portValue": 443
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment",
|
||||
"clusterName": "exported~db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul",
|
||||
|
|
|
@ -101,23 +101,6 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"*.dc2.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
]
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.tcp_proxy",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy",
|
||||
"statPrefix": "mesh_gateway_remote.default.dc2",
|
||||
"cluster": "dc2.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
"prefix": "/api"
|
||||
},
|
||||
"route": {
|
||||
"cluster": "exported~api.default.dc2.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
"cluster": "exported~v2.api.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul"
|
||||
}
|
||||
},
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue