Merge pull request #11450 from hashicorp/ap/best-addr

Ensure calls to BestAddress consider partition
This commit is contained in:
Freddy 2021-11-01 15:44:41 -06:00 committed by GitHub
commit 3f30afd26b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 219 additions and 62 deletions

View File

@ -239,6 +239,7 @@ func TestManager_BasicLifecycle(t *testing.T) {
IntentionsSet: true, IntentionsSet: true,
}, },
Datacenter: "dc1", Datacenter: "dc1",
Locality: GatewayKey{Datacenter: "dc1", Partition: structs.PartitionOrDefault("")},
}, },
}, },
{ {
@ -296,6 +297,7 @@ func TestManager_BasicLifecycle(t *testing.T) {
IntentionsSet: true, IntentionsSet: true,
}, },
Datacenter: "dc1", Datacenter: "dc1",
Locality: GatewayKey{Datacenter: "dc1", Partition: structs.PartitionOrDefault("")},
}, },
}, },
} }

View File

@ -143,7 +143,10 @@ func (s *handlerMeshGateway) handleUpdate(ctx context.Context, u cache.UpdateEve
for dc, nodes := range dcIndexedNodes.DatacenterNodes { for dc, nodes := range dcIndexedNodes.DatacenterNodes {
snap.MeshGateway.HostnameDatacenters[dc] = hostnameEndpoints( snap.MeshGateway.HostnameDatacenters[dc] = hostnameEndpoints(
s.logger.Named(logging.MeshGateway), snap.Datacenter, nodes) s.logger.Named(logging.MeshGateway),
snap.Locality,
nodes,
)
} }
for dc := range snap.MeshGateway.HostnameDatacenters { for dc := range snap.MeshGateway.HostnameDatacenters {
@ -323,7 +326,10 @@ func (s *handlerMeshGateway) handleUpdate(ctx context.Context, u cache.UpdateEve
if len(resp.Nodes) > 0 { if len(resp.Nodes) > 0 {
snap.MeshGateway.GatewayGroups[key] = resp.Nodes snap.MeshGateway.GatewayGroups[key] = resp.Nodes
snap.MeshGateway.HostnameDatacenters[key] = hostnameEndpoints( snap.MeshGateway.HostnameDatacenters[key] = hostnameEndpoints(
s.logger.Named(logging.MeshGateway), snap.Datacenter, resp.Nodes) s.logger.Named(logging.MeshGateway),
snap.Locality,
resp.Nodes,
)
} }
default: default:

View File

@ -8,6 +8,7 @@ import (
"github.com/mitchellh/copystructure" "github.com/mitchellh/copystructure"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
) )
@ -61,7 +62,7 @@ type GatewayKey struct {
func (k GatewayKey) String() string { func (k GatewayKey) String() string {
resp := k.Datacenter resp := k.Datacenter
if k.Partition != "" { if !structs.IsDefaultPartition(k.Partition) {
resp = k.Partition + "." + resp resp = k.Partition + "." + resp
} }
return resp return resp
@ -79,7 +80,7 @@ func gatewayKeyFromString(s string) GatewayKey {
split := strings.SplitN(s, ".", 2) split := strings.SplitN(s, ".", 2)
if len(split) == 1 { if len(split) == 1 {
return GatewayKey{Datacenter: split[0]} return GatewayKey{Datacenter: split[0], Partition: acl.DefaultPartitionName}
} }
return GatewayKey{Partition: split[0], Datacenter: split[1]} return GatewayKey{Partition: split[0], Datacenter: split[1]}
} }
@ -303,12 +304,13 @@ func (c *configSnapshotMeshGateway) GatewayKeys() []GatewayKey {
} }
keys := make([]GatewayKey, 0, sz) keys := make([]GatewayKey, 0, sz)
for key := range c.GatewayGroups { for key := range c.FedStateGateways {
keys = append(keys, gatewayKeyFromString(key)) keys = append(keys, gatewayKeyFromString(key))
} }
for key := range c.FedStateGateways { for key := range c.GatewayGroups {
if _, ok := c.GatewayGroups[key]; !ok { gk := gatewayKeyFromString(key)
keys = append(keys, gatewayKeyFromString(key)) if _, ok := c.FedStateGateways[gk.Datacenter]; !ok {
keys = append(keys, gk)
} }
} }
@ -408,6 +410,7 @@ type ConfigSnapshot struct {
Proxy structs.ConnectProxyConfig Proxy structs.ConnectProxyConfig
Datacenter string Datacenter string
IntentionDefaultAllow bool IntentionDefaultAllow bool
Locality GatewayKey
ServerSNIFn ServerSNIFunc ServerSNIFn ServerSNIFunc
Roots *structs.IndexedCARoots Roots *structs.IndexedCARoots

View File

@ -254,6 +254,7 @@ func newConfigSnapshotFromServiceInstance(s serviceInstance, config stateConfig)
TaggedAddresses: s.taggedAddresses, TaggedAddresses: s.taggedAddresses,
Proxy: s.proxyCfg, Proxy: s.proxyCfg,
Datacenter: config.source.Datacenter, Datacenter: config.source.Datacenter,
Locality: GatewayKey{Datacenter: config.source.Datacenter, Partition: s.proxyID.PartitionOrDefault()},
ServerSNIFn: config.serverSNIFn, ServerSNIFn: config.serverSNIFn,
IntentionDefaultAllow: config.intentionDefaultAllow, IntentionDefaultAllow: config.intentionDefaultAllow,
} }
@ -401,7 +402,7 @@ func (s *state) Changed(ns *structs.NodeService, token string) bool {
// Envoy cannot resolve hostnames provided through EDS, so we exclusively use CDS for these clusters. // Envoy cannot resolve hostnames provided through EDS, so we exclusively use CDS for these clusters.
// If there is a mix of hostnames and addresses we exclusively use the hostnames, since clusters cannot discover // If there is a mix of hostnames and addresses we exclusively use the hostnames, since clusters cannot discover
// services with both EDS and DNS. // services with both EDS and DNS.
func hostnameEndpoints(logger hclog.Logger, localDC string, nodes structs.CheckServiceNodes) structs.CheckServiceNodes { func hostnameEndpoints(logger hclog.Logger, localKey GatewayKey, nodes structs.CheckServiceNodes) structs.CheckServiceNodes {
var ( var (
hasIP bool hasIP bool
hasHostname bool hasHostname bool
@ -409,7 +410,7 @@ func hostnameEndpoints(logger hclog.Logger, localDC string, nodes structs.CheckS
) )
for _, n := range nodes { for _, n := range nodes {
addr, _ := n.BestAddress(localDC != n.Node.Datacenter) addr, _ := n.BestAddress(!localKey.Matches(n.Node.Datacenter, n.Node.PartitionOrDefault()))
if net.ParseIP(addr) != nil { if net.ParseIP(addr) != nil {
hasIP = true hasIP = true
continue continue

View File

@ -649,8 +649,8 @@ func TestState_WatchesAndUpdates(t *testing.T) {
"upstream-target:api-failover-remote.default.default.dc2:api-failover-remote?dc=dc2": genVerifyServiceWatch("api-failover-remote", "", "dc2", true), "upstream-target:api-failover-remote.default.default.dc2:api-failover-remote?dc=dc2": genVerifyServiceWatch("api-failover-remote", "", "dc2", true),
"upstream-target:api-failover-local.default.default.dc2:api-failover-local?dc=dc2": genVerifyServiceWatch("api-failover-local", "", "dc2", true), "upstream-target:api-failover-local.default.default.dc2:api-failover-local?dc=dc2": genVerifyServiceWatch("api-failover-local", "", "dc2", true),
"upstream-target:api-failover-direct.default.default.dc2:api-failover-direct?dc=dc2": genVerifyServiceWatch("api-failover-direct", "", "dc2", true), "upstream-target:api-failover-direct.default.default.dc2:api-failover-direct?dc=dc2": genVerifyServiceWatch("api-failover-direct", "", "dc2", true),
"mesh-gateway:default.dc2:api-failover-remote?dc=dc2": genVerifyGatewayWatch("dc2"), "mesh-gateway:dc2:api-failover-remote?dc=dc2": genVerifyGatewayWatch("dc2"),
"mesh-gateway:default.dc1:api-failover-local?dc=dc2": genVerifyGatewayWatch("dc1"), "mesh-gateway:dc1:api-failover-local?dc=dc2": genVerifyGatewayWatch("dc1"),
}, },
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
require.True(t, snap.Valid()) require.True(t, snap.Valid())
@ -673,7 +673,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
} }
if meshGatewayProxyConfigValue == structs.MeshGatewayModeLocal { if meshGatewayProxyConfigValue == structs.MeshGatewayModeLocal {
stage1.requiredWatches["mesh-gateway:default.dc1:api-dc2"] = genVerifyGatewayWatch("dc1") stage1.requiredWatches["mesh-gateway:dc1:api-dc2"] = genVerifyGatewayWatch("dc1")
} }
return testCase{ return testCase{
@ -2313,3 +2313,128 @@ func TestState_WatchesAndUpdates(t *testing.T) {
}) })
} }
} }
func Test_hostnameEndpoints(t *testing.T) {
type testCase struct {
name string
localKey GatewayKey
nodes structs.CheckServiceNodes
want structs.CheckServiceNodes
}
run := func(t *testing.T, tc testCase) {
logger := hclog.New(nil)
got := hostnameEndpoints(logger, tc.localKey, tc.nodes)
require.Equal(t, tc.want, got)
}
cases := []testCase{
{
name: "same locality and no LAN hostname endpoints",
localKey: GatewayKey{Datacenter: "dc1", Partition: structs.PartitionOrDefault("")},
nodes: structs.CheckServiceNodes{
{
Node: &structs.Node{
Node: "mesh-gateway",
Datacenter: "dc1",
},
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t,
"10.0.1.1", 8443,
structs.ServiceAddress{},
structs.ServiceAddress{Address: "123.us-west-1.elb.notaws.com", Port: 443}),
},
{
Node: &structs.Node{
Node: "mesh-gateway",
Datacenter: "dc1",
},
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t,
"10.0.2.2", 8443,
structs.ServiceAddress{},
structs.ServiceAddress{Address: "123.us-west-2.elb.notaws.com", Port: 443}),
},
},
want: nil,
},
{
name: "same locality and one LAN hostname endpoint",
localKey: GatewayKey{Datacenter: "dc1", Partition: structs.PartitionOrDefault("")},
nodes: structs.CheckServiceNodes{
{
Node: &structs.Node{
Node: "mesh-gateway",
Datacenter: "dc1",
},
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t,
"gateway.mydomain", 8443,
structs.ServiceAddress{},
structs.ServiceAddress{Address: "123.us-west-1.elb.notaws.com", Port: 443}),
},
{
Node: &structs.Node{
Node: "mesh-gateway",
Datacenter: "dc1",
},
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t,
"10.0.2.2", 8443,
structs.ServiceAddress{},
structs.ServiceAddress{Address: "123.us-west-2.elb.notaws.com", Port: 443}),
},
},
want: structs.CheckServiceNodes{
{
Node: &structs.Node{
Node: "mesh-gateway",
Datacenter: "dc1",
},
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t,
"gateway.mydomain", 8443,
structs.ServiceAddress{},
structs.ServiceAddress{Address: "123.us-west-1.elb.notaws.com", Port: 443}),
},
},
},
{
name: "different locality and one WAN hostname endpoint",
localKey: GatewayKey{Datacenter: "dc2", Partition: structs.PartitionOrDefault("")},
nodes: structs.CheckServiceNodes{
{
Node: &structs.Node{
Node: "mesh-gateway",
Datacenter: "dc1",
},
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t,
"gateway.mydomain", 8443,
structs.ServiceAddress{},
structs.ServiceAddress{Address: "8.8.8.8", Port: 443}),
},
{
Node: &structs.Node{
Node: "mesh-gateway",
Datacenter: "dc1",
},
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t,
"10.0.2.2", 8443,
structs.ServiceAddress{},
structs.ServiceAddress{Address: "123.us-west-2.elb.notaws.com", Port: 443}),
},
},
want: structs.CheckServiceNodes{
{
Node: &structs.Node{
Node: "mesh-gateway",
Datacenter: "dc1",
},
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t,
"10.0.2.2", 8443,
structs.ServiceAddress{},
structs.ServiceAddress{Address: "123.us-west-2.elb.notaws.com", Port: 443}),
},
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
run(t, c)
})
}
}

View File

@ -285,7 +285,10 @@ func (s *handlerTerminatingGateway) handleUpdate(ctx context.Context, u cache.Up
if len(resp.Nodes) > 0 { if len(resp.Nodes) > 0 {
snap.TerminatingGateway.ServiceGroups[sn] = resp.Nodes snap.TerminatingGateway.ServiceGroups[sn] = resp.Nodes
snap.TerminatingGateway.HostnameServices[sn] = hostnameEndpoints( snap.TerminatingGateway.HostnameServices[sn] = hostnameEndpoints(
s.logger, snap.Datacenter, resp.Nodes) s.logger,
snap.Locality,
resp.Nodes,
)
} }
// Store leaf cert for watched service // Store leaf cert for watched service

View File

@ -13,6 +13,7 @@ import (
"github.com/mitchellh/go-testing-interface" "github.com/mitchellh/go-testing-interface"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types" cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/connect"
@ -674,11 +675,12 @@ func TestConfigSnapshot(t testing.T) *ConfigSnapshot {
upstreams := structs.TestUpstreams(t) upstreams := structs.TestUpstreams(t)
return &ConfigSnapshot{ return &ConfigSnapshot{
Kind: structs.ServiceKindConnectProxy, Locality: GatewayKey{Datacenter: "dc1", Partition: acl.DefaultPartitionName},
Service: "web-sidecar-proxy", Kind: structs.ServiceKindConnectProxy,
ProxyID: structs.NewServiceID("web-sidecar-proxy", nil), Service: "web-sidecar-proxy",
Address: "0.0.0.0", ProxyID: structs.NewServiceID("web-sidecar-proxy", nil),
Port: 9999, Address: "0.0.0.0",
Port: 9999,
Proxy: structs.ConnectProxyConfig{ Proxy: structs.ConnectProxyConfig{
DestinationServiceID: "web", DestinationServiceID: "web",
DestinationServiceName: "web", DestinationServiceName: "web",
@ -798,11 +800,12 @@ func testConfigSnapshotDiscoveryChain(t testing.T, variation string, additionalE
roots, leaf := TestCerts(t) roots, leaf := TestCerts(t)
snap := &ConfigSnapshot{ snap := &ConfigSnapshot{
Kind: structs.ServiceKindConnectProxy, Locality: GatewayKey{Datacenter: "dc1", Partition: acl.DefaultPartitionName},
Service: "web-sidecar-proxy", Kind: structs.ServiceKindConnectProxy,
ProxyID: structs.NewServiceID("web-sidecar-proxy", nil), Service: "web-sidecar-proxy",
Address: "0.0.0.0", ProxyID: structs.NewServiceID("web-sidecar-proxy", nil),
Port: 9999, Address: "0.0.0.0",
Port: 9999,
Proxy: structs.ConnectProxyConfig{ Proxy: structs.ConnectProxyConfig{
DestinationServiceID: "web", DestinationServiceID: "web",
DestinationServiceName: "web", DestinationServiceName: "web",
@ -1429,7 +1432,7 @@ func setupTestVariationConfigEntriesAndSnapshot(
TestUpstreamNodesDC2(t) TestUpstreamNodesDC2(t)
snap.WatchedGatewayEndpoints = map[string]map[string]structs.CheckServiceNodes{ snap.WatchedGatewayEndpoints = map[string]map[string]structs.CheckServiceNodes{
"db": { "db": {
"default.dc2": TestGatewayNodesDC2(t), "dc2": TestGatewayNodesDC2(t),
}, },
} }
case "failover-through-double-remote-gateway-triggered": case "failover-through-double-remote-gateway-triggered":
@ -1442,8 +1445,8 @@ func setupTestVariationConfigEntriesAndSnapshot(
snap.WatchedUpstreamEndpoints["db"]["db.default.default.dc3"] = TestUpstreamNodesDC2(t) snap.WatchedUpstreamEndpoints["db"]["db.default.default.dc3"] = TestUpstreamNodesDC2(t)
snap.WatchedGatewayEndpoints = map[string]map[string]structs.CheckServiceNodes{ snap.WatchedGatewayEndpoints = map[string]map[string]structs.CheckServiceNodes{
"db": { "db": {
"default.dc2": TestGatewayNodesDC2(t), "dc2": TestGatewayNodesDC2(t),
"default.dc3": TestGatewayNodesDC3(t), "dc3": TestGatewayNodesDC3(t),
}, },
} }
case "failover-through-local-gateway-triggered": case "failover-through-local-gateway-triggered":
@ -1455,7 +1458,7 @@ func setupTestVariationConfigEntriesAndSnapshot(
TestUpstreamNodesDC2(t) TestUpstreamNodesDC2(t)
snap.WatchedGatewayEndpoints = map[string]map[string]structs.CheckServiceNodes{ snap.WatchedGatewayEndpoints = map[string]map[string]structs.CheckServiceNodes{
"db": { "db": {
"default.dc1": TestGatewayNodesDC1(t), "dc1": TestGatewayNodesDC1(t),
}, },
} }
case "failover-through-double-local-gateway-triggered": case "failover-through-double-local-gateway-triggered":
@ -1468,7 +1471,7 @@ func setupTestVariationConfigEntriesAndSnapshot(
snap.WatchedUpstreamEndpoints["db"]["db.default.default.dc3"] = TestUpstreamNodesDC2(t) snap.WatchedUpstreamEndpoints["db"]["db.default.default.dc3"] = TestUpstreamNodesDC2(t)
snap.WatchedGatewayEndpoints = map[string]map[string]structs.CheckServiceNodes{ snap.WatchedGatewayEndpoints = map[string]map[string]structs.CheckServiceNodes{
"db": { "db": {
"default.dc1": TestGatewayNodesDC1(t), "dc1": TestGatewayNodesDC1(t),
}, },
} }
case "splitter-with-resolver-redirect-multidc": case "splitter-with-resolver-redirect-multidc":
@ -1510,11 +1513,12 @@ func TestConfigSnapshotMeshGatewayNoServices(t testing.T) *ConfigSnapshot {
func testConfigSnapshotMeshGateway(t testing.T, populateServices bool, useFederationStates bool) *ConfigSnapshot { func testConfigSnapshotMeshGateway(t testing.T, populateServices bool, useFederationStates bool) *ConfigSnapshot {
roots, _ := TestCerts(t) roots, _ := TestCerts(t)
snap := &ConfigSnapshot{ snap := &ConfigSnapshot{
Kind: structs.ServiceKindMeshGateway, Locality: GatewayKey{Datacenter: "dc1", Partition: acl.DefaultPartitionName},
Service: "mesh-gateway", Kind: structs.ServiceKindMeshGateway,
ProxyID: structs.NewServiceID("mesh-gateway", nil), Service: "mesh-gateway",
Address: "1.2.3.4", ProxyID: structs.NewServiceID("mesh-gateway", nil),
Port: 8443, Address: "1.2.3.4",
Port: 8443,
Proxy: structs.ConnectProxyConfig{ Proxy: structs.ConnectProxyConfig{
Config: map[string]interface{}{}, Config: map[string]interface{}{},
}, },
@ -1543,7 +1547,7 @@ func testConfigSnapshotMeshGateway(t testing.T, populateServices bool, useFedera
}, },
WatchedServicesSet: true, WatchedServicesSet: true,
WatchedGateways: map[string]context.CancelFunc{ WatchedGateways: map[string]context.CancelFunc{
"default.dc2": nil, "dc2": nil,
}, },
ServiceGroups: map[structs.ServiceName]structs.CheckServiceNodes{ ServiceGroups: map[structs.ServiceName]structs.CheckServiceNodes{
structs.NewServiceName("foo", nil): TestGatewayServiceGroupFooDC1(t), structs.NewServiceName("foo", nil): TestGatewayServiceGroupFooDC1(t),
@ -1721,6 +1725,7 @@ func testConfigSnapshotIngressGateway(
roots, leaf := TestCerts(t) roots, leaf := TestCerts(t)
snap := &ConfigSnapshot{ snap := &ConfigSnapshot{
Locality: GatewayKey{Datacenter: "dc1", Partition: acl.DefaultPartitionName},
Kind: structs.ServiceKindIngressGateway, Kind: structs.ServiceKindIngressGateway,
Service: "ingress-gateway", Service: "ingress-gateway",
ProxyID: structs.NewServiceID("ingress-gateway", nil), ProxyID: structs.NewServiceID("ingress-gateway", nil),
@ -1760,11 +1765,12 @@ func testConfigSnapshotIngressGateway(
func TestConfigSnapshotExposeConfig(t testing.T) *ConfigSnapshot { func TestConfigSnapshotExposeConfig(t testing.T) *ConfigSnapshot {
return &ConfigSnapshot{ return &ConfigSnapshot{
Kind: structs.ServiceKindConnectProxy, Locality: GatewayKey{Datacenter: "dc1", Partition: acl.DefaultPartitionName},
Service: "web-proxy", Kind: structs.ServiceKindConnectProxy,
ProxyID: structs.NewServiceID("web-proxy", nil), Service: "web-proxy",
Address: "1.2.3.4", ProxyID: structs.NewServiceID("web-proxy", nil),
Port: 8080, Address: "1.2.3.4",
Port: 8080,
Proxy: structs.ConnectProxyConfig{ Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web", DestinationServiceName: "web",
DestinationServiceID: "web", DestinationServiceID: "web",
@ -1801,10 +1807,11 @@ func testConfigSnapshotTerminatingGateway(t testing.T, populateServices bool) *C
roots, _ := TestCerts(t) roots, _ := TestCerts(t)
snap := &ConfigSnapshot{ snap := &ConfigSnapshot{
Kind: structs.ServiceKindTerminatingGateway, Locality: GatewayKey{Datacenter: "dc1", Partition: acl.DefaultPartitionName},
Service: "terminating-gateway", Kind: structs.ServiceKindTerminatingGateway,
ProxyID: structs.NewServiceID("terminating-gateway", nil), Service: "terminating-gateway",
Address: "1.2.3.4", ProxyID: structs.NewServiceID("terminating-gateway", nil),
Address: "1.2.3.4",
TaggedAddresses: map[string]structs.ServiceAddress{ TaggedAddresses: map[string]structs.ServiceAddress{
structs.TaggedAddressWAN: { structs.TaggedAddressWAN: {
Address: "198.18.0.1", Address: "198.18.0.1",
@ -2035,11 +2042,12 @@ func testConfigSnapshotTerminatingGateway(t testing.T, populateServices bool) *C
func TestConfigSnapshotGRPCExposeHTTP1(t testing.T) *ConfigSnapshot { func TestConfigSnapshotGRPCExposeHTTP1(t testing.T) *ConfigSnapshot {
return &ConfigSnapshot{ return &ConfigSnapshot{
Kind: structs.ServiceKindConnectProxy, Locality: GatewayKey{Datacenter: "dc1", Partition: acl.DefaultPartitionName},
Service: "grpc-proxy", Kind: structs.ServiceKindConnectProxy,
ProxyID: structs.NewServiceID("grpc-proxy", nil), Service: "grpc-proxy",
Address: "1.2.3.4", ProxyID: structs.NewServiceID("grpc-proxy", nil),
Port: 8080, Address: "1.2.3.4",
Port: 8080,
Proxy: structs.ConnectProxyConfig{ Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "grpc", DestinationServiceName: "grpc",
DestinationServiceID: "grpc", DestinationServiceID: "grpc",

View File

@ -107,6 +107,8 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u cache.Up
Addrs: make(map[string]struct{}), Addrs: make(map[string]struct{}),
} }
} }
// TODO(partitions) Update to account for upstream in remote partition once tproxy supports it
addr, _ := node.BestAddress(false) addr, _ := node.BestAddress(false)
upstreamsSnapshot.PassthroughUpstreams[svc.String()].Addrs[addr] = struct{}{} upstreamsSnapshot.PassthroughUpstreams[svc.String()].Addrs[addr] = struct{}{}
} }

View File

@ -209,14 +209,14 @@ func (s *ResourceGenerator) clustersFromSnapshotMeshGateway(cfgSnap *proxycfg.Co
// Generate the remote clusters // Generate the remote clusters
for _, key := range keys { for _, key := range keys {
if key.Matches(cfgSnap.Datacenter, cfgSnap.ProxyID.PartitionOrEmpty()) { if key.Matches(cfgSnap.Datacenter, cfgSnap.ProxyID.PartitionOrDefault()) {
continue // skip local continue // skip local
} }
opts := gatewayClusterOpts{ opts := gatewayClusterOpts{
name: connect.GatewaySNI(key.Datacenter, key.Partition, cfgSnap.Roots.TrustDomain), name: connect.GatewaySNI(key.Datacenter, key.Partition, cfgSnap.Roots.TrustDomain),
hostnameEndpoints: cfgSnap.MeshGateway.HostnameDatacenters[key.String()], hostnameEndpoints: cfgSnap.MeshGateway.HostnameDatacenters[key.String()],
isRemote: key.Datacenter != cfgSnap.Datacenter, isRemote: true,
} }
cluster := s.makeGatewayCluster(cfgSnap, opts) cluster := s.makeGatewayCluster(cfgSnap, opts)
clusters = append(clusters, cluster) clusters = append(clusters, cluster)
@ -238,7 +238,7 @@ func (s *ResourceGenerator) clustersFromSnapshotMeshGateway(cfgSnap *proxycfg.Co
opts := gatewayClusterOpts{ opts := gatewayClusterOpts{
name: cfgSnap.ServerSNIFn(key.Datacenter, ""), name: cfgSnap.ServerSNIFn(key.Datacenter, ""),
hostnameEndpoints: hostnameEndpoints, hostnameEndpoints: hostnameEndpoints,
isRemote: key.Datacenter != cfgSnap.Datacenter, isRemote: !key.Matches(cfgSnap.Datacenter, cfgSnap.ProxyID.PartitionOrDefault()),
} }
cluster := s.makeGatewayCluster(cfgSnap, opts) cluster := s.makeGatewayCluster(cfgSnap, opts)
clusters = append(clusters, cluster) clusters = append(clusters, cluster)
@ -299,10 +299,16 @@ func (s *ResourceGenerator) makeGatewayServiceClusters(
hostnameEndpoints = cfgSnap.TerminatingGateway.HostnameServices[svc] hostnameEndpoints = cfgSnap.TerminatingGateway.HostnameServices[svc]
} }
var isRemote bool
if len(services[svc]) > 0 {
isRemote = !cfgSnap.Locality.Matches(services[svc][0].Node.Datacenter, services[svc][0].Node.PartitionOrDefault())
}
opts := gatewayClusterOpts{ opts := gatewayClusterOpts{
name: clusterName, name: clusterName,
hostnameEndpoints: hostnameEndpoints, hostnameEndpoints: hostnameEndpoints,
connectTimeout: resolver.ConnectTimeout, connectTimeout: resolver.ConnectTimeout,
isRemote: isRemote,
} }
cluster := s.makeGatewayCluster(cfgSnap, opts) cluster := s.makeGatewayCluster(cfgSnap, opts)
@ -323,6 +329,7 @@ func (s *ResourceGenerator) makeGatewayServiceClusters(
hostnameEndpoints: subsetHostnameEndpoints, hostnameEndpoints: subsetHostnameEndpoints,
onlyPassing: subset.OnlyPassing, onlyPassing: subset.OnlyPassing,
connectTimeout: resolver.ConnectTimeout, connectTimeout: resolver.ConnectTimeout,
isRemote: isRemote,
} }
cluster := s.makeGatewayCluster(cfgSnap, opts) cluster := s.makeGatewayCluster(cfgSnap, opts)

View File

@ -51,7 +51,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
es := s.endpointsFromDiscoveryChain( es := s.endpointsFromDiscoveryChain(
id, id,
chain, chain,
proxycfg.GatewayKey{Datacenter: cfgSnap.Datacenter, Partition: cfgSnap.ProxyID.PartitionOrDefault()}, cfgSnap.Locality,
cfgSnap.ConnectProxy.UpstreamConfig[id], cfgSnap.ConnectProxy.UpstreamConfig[id],
cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[id], cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[id],
cfgSnap.ConnectProxy.WatchedGatewayEndpoints[id], cfgSnap.ConnectProxy.WatchedGatewayEndpoints[id],
@ -79,7 +79,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
[]loadAssignmentEndpointGroup{ []loadAssignmentEndpointGroup{
{Endpoints: endpoints}, {Endpoints: endpoints},
}, },
cfgSnap.Datacenter, cfgSnap.Locality,
) )
resources = append(resources, la) resources = append(resources, la)
} }
@ -114,7 +114,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.C
resources := make([]proto.Message, 0, len(keys)+len(cfgSnap.MeshGateway.ServiceGroups)) resources := make([]proto.Message, 0, len(keys)+len(cfgSnap.MeshGateway.ServiceGroups))
for _, key := range keys { for _, key := range keys {
if key.Matches(cfgSnap.Datacenter, cfgSnap.ProxyID.PartitionOrEmpty()) { if key.Matches(cfgSnap.Datacenter, cfgSnap.ProxyID.PartitionOrDefault()) {
continue // skip local continue // skip local
} }
// Also skip gateways with a hostname as their address. EDS cannot resolve hostnames, // Also skip gateways with a hostname as their address. EDS cannot resolve hostnames,
@ -140,7 +140,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.C
[]loadAssignmentEndpointGroup{ []loadAssignmentEndpointGroup{
{Endpoints: endpoints}, {Endpoints: endpoints},
}, },
cfgSnap.Datacenter, cfgSnap.Locality,
) )
resources = append(resources, la) resources = append(resources, la)
} }
@ -155,7 +155,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.C
[]loadAssignmentEndpointGroup{ []loadAssignmentEndpointGroup{
{Endpoints: endpoints}, {Endpoints: endpoints},
}, },
cfgSnap.Datacenter, cfgSnap.Locality,
) )
resources = append(resources, la) resources = append(resources, la)
} }
@ -256,7 +256,7 @@ func (s *ResourceGenerator) endpointsFromServicesAndResolvers(
la := makeLoadAssignment( la := makeLoadAssignment(
clusterName, clusterName,
groups, groups,
cfgSnap.Datacenter, cfgSnap.Locality,
) )
resources = append(resources, la) resources = append(resources, la)
} }
@ -427,7 +427,7 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
la := makeLoadAssignment( la := makeLoadAssignment(
clusterName, clusterName,
endpointGroups, endpointGroups,
gatewayKey.Datacenter, gatewayKey,
) )
resources = append(resources, la) resources = append(resources, la)
} }
@ -441,7 +441,7 @@ type loadAssignmentEndpointGroup struct {
OverrideHealth envoy_core_v3.HealthStatus OverrideHealth envoy_core_v3.HealthStatus
} }
func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpointGroup, localDatacenter string) *envoy_endpoint_v3.ClusterLoadAssignment { func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpointGroup, localKey proxycfg.GatewayKey) *envoy_endpoint_v3.ClusterLoadAssignment {
cla := &envoy_endpoint_v3.ClusterLoadAssignment{ cla := &envoy_endpoint_v3.ClusterLoadAssignment{
ClusterName: clusterName, ClusterName: clusterName,
Endpoints: make([]*envoy_endpoint_v3.LocalityLbEndpoints, 0, len(endpointGroups)), Endpoints: make([]*envoy_endpoint_v3.LocalityLbEndpoints, 0, len(endpointGroups)),
@ -461,7 +461,7 @@ func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpo
for _, ep := range endpoints { for _, ep := range endpoints {
// TODO (mesh-gateway) - should we respect the translate_wan_addrs configuration here or just always use the wan for cross-dc? // TODO (mesh-gateway) - should we respect the translate_wan_addrs configuration here or just always use the wan for cross-dc?
addr, port := ep.BestAddress(localDatacenter != ep.Node.Datacenter) addr, port := ep.BestAddress(!localKey.Matches(ep.Node.Datacenter, ep.Node.PartitionOrDefault()))
healthStatus, weight := calculateEndpointHealthAndWeight(ep, endpointGroup.OnlyPassing) healthStatus, weight := calculateEndpointHealthAndWeight(ep, endpointGroup.OnlyPassing)
if endpointGroup.OverrideHealth != envoy_core_v3.HealthStatus_UNKNOWN { if endpointGroup.OverrideHealth != envoy_core_v3.HealthStatus_UNKNOWN {

View File

@ -209,7 +209,7 @@ func Test_makeLoadAssignment(t *testing.T) {
got := makeLoadAssignment( got := makeLoadAssignment(
tt.clusterName, tt.clusterName,
tt.endpoints, tt.endpoints,
"dc1", proxycfg.GatewayKey{Datacenter: "dc1"},
) )
require.Equal(t, tt.want, got) require.Equal(t, tt.want, got)
}) })