mirror of https://github.com/status-im/consul.git
Implement Mesh Gateways
This includes both ingress and egress functionality.
This commit is contained in:
parent
da8db83ddf
commit
8d953f5840
|
@ -195,8 +195,10 @@ func buildAgentService(s *structs.NodeService, proxies map[string]*local.Managed
|
|||
if as.Meta == nil {
|
||||
as.Meta = map[string]string{}
|
||||
}
|
||||
// Attach Unmanaged Proxy config if exists
|
||||
if s.Kind == structs.ServiceKindConnectProxy {
|
||||
// Attach Proxy config if exists
|
||||
if s.Kind == structs.ServiceKindConnectProxy ||
|
||||
s.Kind == structs.ServiceKindMeshGateway {
|
||||
|
||||
as.Proxy = s.Proxy.ToAPI()
|
||||
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
|
||||
// Also set the deprecated ProxyDestination
|
||||
|
@ -376,7 +378,9 @@ func (s *HTTPServer) AgentService(resp http.ResponseWriter, req *http.Request) (
|
|||
}
|
||||
}
|
||||
|
||||
if svc.Kind == structs.ServiceKindConnectProxy {
|
||||
if svc.Kind == structs.ServiceKindConnectProxy ||
|
||||
svc.Kind == structs.ServiceKindMeshGateway {
|
||||
|
||||
proxy = svc.Proxy.ToAPI()
|
||||
}
|
||||
|
||||
|
|
|
@ -231,6 +231,38 @@ func TestAgent_Services_Sidecar(t *testing.T) {
|
|||
assert.NotContains(string(output), "locally_registered_as_sidecar")
|
||||
}
|
||||
|
||||
// Thie tests that a mesh gateway service is returned as expected.
|
||||
func TestAgent_Services_MeshGateway(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
|
||||
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||
srv1 := &structs.NodeService{
|
||||
Kind: structs.ServiceKindMeshGateway,
|
||||
ID: "mg-dc1-01",
|
||||
Service: "mg-dc1",
|
||||
Port: 8443,
|
||||
Proxy: structs.ConnectProxyConfig{
|
||||
Config: map[string]interface{}{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
}
|
||||
a.State.AddService(srv1, "")
|
||||
|
||||
req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
|
||||
obj, err := a.srv.AgentServices(nil, req)
|
||||
require.NoError(t, err)
|
||||
val := obj.(map[string]*api.AgentService)
|
||||
require.Len(t, val, 1)
|
||||
actual := val["mg-dc1-01"]
|
||||
require.NotNil(t, actual)
|
||||
require.Equal(t, api.ServiceKindMeshGateway, actual.Kind)
|
||||
require.Equal(t, srv1.Proxy.ToAPI(), actual.Proxy)
|
||||
}
|
||||
|
||||
func TestAgent_Services_ACLFilter(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, t.Name(), TestACLConfig())
|
||||
|
@ -624,7 +656,7 @@ func TestAgent_Service_DeprecatedManagedProxy(t *testing.T) {
|
|||
Service: "web-proxy",
|
||||
Port: 9999,
|
||||
Address: "10.10.10.10",
|
||||
ContentHash: "e24f099e42e88317",
|
||||
ContentHash: "245d12541a0e7e84",
|
||||
Proxy: &api.AgentServiceConnectProxyConfig{
|
||||
DestinationServiceID: "web",
|
||||
DestinationServiceName: "web",
|
||||
|
@ -5177,7 +5209,7 @@ func TestAgentConnectProxyConfig_Blocking(t *testing.T) {
|
|||
ProxyServiceID: "test-proxy",
|
||||
TargetServiceID: "test",
|
||||
TargetServiceName: "test",
|
||||
ContentHash: "a7c93585b6d70445",
|
||||
ContentHash: "cd9fae3f744900f3",
|
||||
ExecMode: "daemon",
|
||||
Command: []string{"tubes.sh"},
|
||||
Config: map[string]interface{}{
|
||||
|
@ -5198,7 +5230,7 @@ func TestAgentConnectProxyConfig_Blocking(t *testing.T) {
|
|||
ur, err := copystructure.Copy(expectedResponse)
|
||||
require.NoError(t, err)
|
||||
updatedResponse := ur.(*api.ConnectProxyConfig)
|
||||
updatedResponse.ContentHash = "aedc0ca0f3f7794e"
|
||||
updatedResponse.ContentHash = "59b052e51c1dada3"
|
||||
updatedResponse.Upstreams = append(updatedResponse.Upstreams, api.Upstream{
|
||||
DestinationType: "service",
|
||||
DestinationName: "cache",
|
||||
|
|
|
@ -1295,6 +1295,8 @@ func (b *Builder) serviceKindVal(v *string) structs.ServiceKind {
|
|||
switch *v {
|
||||
case string(structs.ServiceKindConnectProxy):
|
||||
return structs.ServiceKindConnectProxy
|
||||
case string(structs.ServiceKindMeshGateway):
|
||||
return structs.ServiceKindMeshGateway
|
||||
default:
|
||||
return structs.ServiceKindTypical
|
||||
}
|
||||
|
|
|
@ -3564,6 +3564,17 @@ func TestFullConfig(t *testing.T) {
|
|||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "kvVqbwSE",
|
||||
"kind": "mesh-gateway",
|
||||
"name": "gw-primary-dc",
|
||||
"port": 27147,
|
||||
"proxy": {
|
||||
"config": {
|
||||
"1CuJHVfw" : "Kzqsa7yc"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"session_ttl_min": "26627s",
|
||||
|
@ -4156,6 +4167,17 @@ func TestFullConfig(t *testing.T) {
|
|||
},
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
id = "kvVqbwSE"
|
||||
kind = "mesh-gateway"
|
||||
name = "gw-primary-dc"
|
||||
port = 27147
|
||||
proxy {
|
||||
config {
|
||||
"1CuJHVfw" = "Kzqsa7yc"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
session_ttl_min = "26627s"
|
||||
|
@ -4740,6 +4762,21 @@ func TestFullConfig(t *testing.T) {
|
|||
Warning: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "kvVqbwSE",
|
||||
Kind: "mesh-gateway",
|
||||
Name: "gw-primary-dc",
|
||||
Port: 27147,
|
||||
Proxy: &structs.ConnectProxyConfig{
|
||||
Config: map[string]interface{}{
|
||||
"1CuJHVfw": "Kzqsa7yc",
|
||||
},
|
||||
},
|
||||
Weights: &structs.Weights{
|
||||
Passing: 1,
|
||||
Warning: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "dLOXpSCI",
|
||||
Name: "o1ynPkp0",
|
||||
|
|
|
@ -182,6 +182,44 @@ func TestConfig_Apply(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConfig_Apply_ProxyDefaultsMeshGateway(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
a := NewTestAgent(t, t.Name(), "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
|
||||
// Create some config entries.
|
||||
body := bytes.NewBuffer([]byte(`
|
||||
{
|
||||
"Kind": "proxy-defaults",
|
||||
"Name": "global",
|
||||
"MeshGateway": {
|
||||
"Mode": "local"
|
||||
}
|
||||
}`))
|
||||
|
||||
req, _ := http.NewRequest("PUT", "/v1/config", body)
|
||||
resp := httptest.NewRecorder()
|
||||
_, err := a.srv.ConfigApply(resp, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 200, resp.Code, "!200 Response Code: %s", resp.Body.String())
|
||||
|
||||
// Get the remaining entry.
|
||||
{
|
||||
args := structs.ConfigEntryQuery{
|
||||
Kind: structs.ProxyDefaults,
|
||||
Name: "global",
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
var out structs.ConfigEntryResponse
|
||||
require.NoError(t, a.RPC("ConfigEntry.Get", &args, &out))
|
||||
require.NotNil(t, out.Entry)
|
||||
entry := out.Entry.(*structs.ProxyConfigEntry)
|
||||
require.Equal(t, structs.MeshGatewayModeLocal, entry.MeshGateway.Mode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig_Apply_CAS(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
|
|
@ -553,3 +553,9 @@ func (s *Server) getACLReplicationStatusRunningType() (structs.ACLReplicationTyp
|
|||
defer s.aclReplicationStatusLock.RUnlock()
|
||||
return s.aclReplicationStatus.ReplicationType, s.aclReplicationStatus.Running
|
||||
}
|
||||
|
||||
func (s *Server) getACLReplicationStatus() structs.ACLReplicationStatus {
|
||||
s.aclReplicationStatusLock.RLock()
|
||||
defer s.aclReplicationStatusLock.RUnlock()
|
||||
return s.aclReplicationStatus
|
||||
}
|
||||
|
|
|
@ -375,7 +375,7 @@ func TestACLReplication_LegacyTokens(t *testing.T) {
|
|||
// legacy replication isn't meddling.
|
||||
waitForNewACLs(t, s1)
|
||||
waitForNewACLs(t, s2)
|
||||
waitForNewACLReplication(t, s2, structs.ACLReplicateTokens)
|
||||
waitForNewACLReplication(t, s2, structs.ACLReplicateTokens, 1, 1, 0)
|
||||
|
||||
// Create a bunch of new tokens.
|
||||
var id string
|
||||
|
|
|
@ -326,7 +326,7 @@ func TestACLReplication_Tokens(t *testing.T) {
|
|||
// legacy replication isn't meddling.
|
||||
waitForNewACLs(t, s1)
|
||||
waitForNewACLs(t, s2)
|
||||
waitForNewACLReplication(t, s2, structs.ACLReplicateTokens)
|
||||
waitForNewACLReplication(t, s2, structs.ACLReplicateTokens, 1, 1, 0)
|
||||
|
||||
// Create a bunch of new tokens and policies
|
||||
var tokens structs.ACLTokens
|
||||
|
@ -508,7 +508,7 @@ func TestACLReplication_Policies(t *testing.T) {
|
|||
// legacy replication isn't meddling.
|
||||
waitForNewACLs(t, s1)
|
||||
waitForNewACLs(t, s2)
|
||||
waitForNewACLReplication(t, s2, structs.ACLReplicatePolicies)
|
||||
waitForNewACLReplication(t, s2, structs.ACLReplicatePolicies, 1, 0, 0)
|
||||
|
||||
// Create a bunch of new policies
|
||||
var policies structs.ACLPolicies
|
||||
|
@ -775,7 +775,7 @@ func TestACLReplication_AllTypes(t *testing.T) {
|
|||
// legacy replication isn't meddling.
|
||||
waitForNewACLs(t, s1)
|
||||
waitForNewACLs(t, s2)
|
||||
waitForNewACLReplication(t, s2, structs.ACLReplicateTokens)
|
||||
waitForNewACLReplication(t, s2, structs.ACLReplicateTokens, 1, 1, 0)
|
||||
|
||||
const (
|
||||
numItems = 50
|
||||
|
|
|
@ -231,6 +231,7 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
reply.MeshGateway.Mode = structs.MeshGatewayModeDefault
|
||||
// Pass the WatchSet to both the service and proxy config lookups. If either is updated
|
||||
// during the blocking query, this function will be rerun and these state store lookups
|
||||
// will both be current.
|
||||
|
@ -263,16 +264,22 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
|||
return fmt.Errorf("failed to copy global proxy-defaults: %v", err)
|
||||
}
|
||||
reply.ProxyConfig = mapCopy.(map[string]interface{})
|
||||
reply.MeshGateway = proxyConf.MeshGateway
|
||||
}
|
||||
|
||||
reply.Index = index
|
||||
|
||||
if serviceConf != nil && serviceConf.Protocol != "" {
|
||||
if serviceConf != nil {
|
||||
if serviceConf.MeshGateway.Mode != structs.MeshGatewayModeDefault {
|
||||
reply.MeshGateway.Mode = serviceConf.MeshGateway.Mode
|
||||
}
|
||||
if serviceConf.Protocol != "" {
|
||||
if reply.ProxyConfig == nil {
|
||||
reply.ProxyConfig = make(map[string]interface{})
|
||||
}
|
||||
reply.ProxyConfig["protocol"] = serviceConf.Protocol
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the upstream protocols to the upstream configs
|
||||
for _, upstream := range args.Upstreams {
|
||||
|
|
|
@ -66,7 +66,36 @@ func TestConfigEntry_Apply(t *testing.T) {
|
|||
require.Equal("foo", serviceConf.Name)
|
||||
require.Equal("tcp", serviceConf.Protocol)
|
||||
require.Equal(structs.ServiceDefaults, serviceConf.Kind)
|
||||
}
|
||||
|
||||
func TestConfigEntry_ProxyDefaultsMeshGateway(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir1, s1 := testServer(t)
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
args := structs.ConfigEntryRequest{
|
||||
Datacenter: "dc1",
|
||||
Entry: &structs.ProxyConfigEntry{
|
||||
Kind: "proxy-defaults",
|
||||
Name: "global",
|
||||
MeshGateway: structs.MeshGatewayConfig{Mode: "local"},
|
||||
},
|
||||
}
|
||||
out := false
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &args, &out))
|
||||
require.True(t, out)
|
||||
|
||||
state := s1.fsm.State()
|
||||
_, entry, err := state.ConfigEntry(nil, structs.ProxyDefaults, "global")
|
||||
require.NoError(t, err)
|
||||
|
||||
proxyConf, ok := entry.(*structs.ProxyConfigEntry)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, structs.MeshGatewayModeLocal, proxyConf.MeshGateway.Mode)
|
||||
}
|
||||
|
||||
func TestConfigEntry_Apply_ACLDeny(t *testing.T) {
|
||||
|
|
|
@ -166,19 +166,15 @@ func waitForNewACLs(t *testing.T, server *Server) {
|
|||
require.False(t, server.UseLegacyACLs(), "Server cannot use new ACLs")
|
||||
}
|
||||
|
||||
func waitForNewACLReplication(t *testing.T, server *Server, expectedReplicationType structs.ACLReplicationType) {
|
||||
var (
|
||||
replTyp structs.ACLReplicationType
|
||||
running bool
|
||||
)
|
||||
func waitForNewACLReplication(t *testing.T, server *Server, expectedReplicationType structs.ACLReplicationType, minPolicyIndex, minTokenIndex, minRoleIndex uint64) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
replTyp, running = server.getACLReplicationStatusRunningType()
|
||||
require.Equal(r, expectedReplicationType, replTyp, "Server not running new replicator yet")
|
||||
require.True(r, running, "Server not running new replicator yet")
|
||||
status := server.getACLReplicationStatus()
|
||||
require.Equal(r, expectedReplicationType, status.ReplicationType, "Server not running new replicator yet")
|
||||
require.True(r, status.Running, "Server not running new replicator yet")
|
||||
require.True(r, status.ReplicatedIndex >= minPolicyIndex, "Server hasn't replicated enough policies")
|
||||
require.True(r, status.ReplicatedTokenIndex >= minTokenIndex, "Server hasn't replicated enough tokens")
|
||||
require.True(r, status.ReplicatedRoleIndex >= minRoleIndex, "Server hasn't replicated enough roles")
|
||||
})
|
||||
|
||||
require.Equal(t, expectedReplicationType, replTyp, "Server not running new replicator yet")
|
||||
require.True(t, running, "Server not running new replicator yet")
|
||||
}
|
||||
|
||||
func seeEachOther(a, b []serf.Member, addra, addrb string) bool {
|
||||
|
@ -496,7 +492,7 @@ func registerTestCatalogEntries(t *testing.T, codec rpc.ClientCodec) {
|
|||
registerTestCatalogEntriesMap(t, codec, registrations)
|
||||
}
|
||||
|
||||
func registerTestCatalogEntries2(t *testing.T, codec rpc.ClientCodec) {
|
||||
func registerTestCatalogEntriesMeshGateway(t *testing.T, codec rpc.ClientCodec) {
|
||||
t.Helper()
|
||||
|
||||
registrations := map[string]*structs.RegisterRequest{
|
||||
|
@ -513,7 +509,7 @@ func registerTestCatalogEntries2(t *testing.T, codec rpc.ClientCodec) {
|
|||
Address: "198.18.1.4",
|
||||
},
|
||||
},
|
||||
"Service rproxy": &structs.RegisterRequest{
|
||||
"Service web-proxy": &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "proxy",
|
||||
ID: types.NodeID("2d31602c-3291-4f94-842d-446bc2f945ce"),
|
||||
|
|
|
@ -67,6 +67,7 @@ func TestIntentionApply_new(t *testing.T) {
|
|||
actual.CreateIndex, actual.ModifyIndex = 0, 0
|
||||
actual.CreatedAt = ixn.Intention.CreatedAt
|
||||
actual.UpdatedAt = ixn.Intention.UpdatedAt
|
||||
actual.Hash = ixn.Intention.Hash
|
||||
ixn.Intention.UpdatePrecedence()
|
||||
assert.Equal(ixn.Intention, actual)
|
||||
}
|
||||
|
@ -222,6 +223,7 @@ func TestIntentionApply_updateGood(t *testing.T) {
|
|||
actual.CreateIndex, actual.ModifyIndex = 0, 0
|
||||
actual.CreatedAt = ixn.Intention.CreatedAt
|
||||
actual.UpdatedAt = ixn.Intention.UpdatedAt
|
||||
actual.Hash = ixn.Intention.Hash
|
||||
ixn.Intention.UpdatePrecedence()
|
||||
assert.Equal(ixn.Intention, actual)
|
||||
}
|
||||
|
@ -381,6 +383,7 @@ service "foo" {
|
|||
actual.CreateIndex, actual.ModifyIndex = 0, 0
|
||||
actual.CreatedAt = ixn.Intention.CreatedAt
|
||||
actual.UpdatedAt = ixn.Intention.UpdatedAt
|
||||
actual.Hash = ixn.Intention.Hash
|
||||
ixn.Intention.UpdatePrecedence()
|
||||
assert.Equal(ixn.Intention, actual)
|
||||
}
|
||||
|
|
|
@ -496,7 +496,7 @@ func TestInternal_ServiceDump_Kind(t *testing.T) {
|
|||
|
||||
// prep the cluster with some data we can use in our filters
|
||||
registerTestCatalogEntries(t, codec)
|
||||
registerTestCatalogEntries2(t, codec)
|
||||
registerTestCatalogEntriesMeshGateway(t, codec)
|
||||
|
||||
doRequest := func(t *testing.T, kind structs.ServiceKind) structs.CheckServiceNodes {
|
||||
t.Helper()
|
||||
|
|
|
@ -229,11 +229,9 @@ func TestLeader_SecondaryCA_IntermediateRefresh(t *testing.T) {
|
|||
func TestLeader_SecondaryCA_TransitionFromPrimary(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
require := require.New(t)
|
||||
|
||||
// Initialize dc1 as the primary DC
|
||||
id1, err := uuid.GenerateUUID()
|
||||
require.NoError(err)
|
||||
require.NoError(t, err)
|
||||
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.PrimaryDatacenter = "dc1"
|
||||
c.CAConfig.ClusterID = id1
|
||||
|
@ -246,7 +244,7 @@ func TestLeader_SecondaryCA_TransitionFromPrimary(t *testing.T) {
|
|||
|
||||
// dc2 as a primary DC initially
|
||||
id2, err := uuid.GenerateUUID()
|
||||
require.NoError(err)
|
||||
require.NoError(t, err)
|
||||
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Datacenter = "dc2"
|
||||
c.PrimaryDatacenter = "dc2"
|
||||
|
@ -260,8 +258,8 @@ func TestLeader_SecondaryCA_TransitionFromPrimary(t *testing.T) {
|
|||
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
||||
args := structs.DCSpecificRequest{Datacenter: "dc2"}
|
||||
var dc2PrimaryRoots structs.IndexedCARoots
|
||||
require.NoError(s2.RPC("ConnectCA.Roots", &args, &dc2PrimaryRoots))
|
||||
require.Len(dc2PrimaryRoots.Roots, 1)
|
||||
require.NoError(t, s2.RPC("ConnectCA.Roots", &args, &dc2PrimaryRoots))
|
||||
require.Len(t, dc2PrimaryRoots.Roots, 1)
|
||||
|
||||
// Set the ExternalTrustDomain to a blank string to simulate an old version (pre-1.4.0)
|
||||
// it's fine to change the roots struct directly here because the RPC endpoint already
|
||||
|
@ -274,7 +272,7 @@ func TestLeader_SecondaryCA_TransitionFromPrimary(t *testing.T) {
|
|||
Roots: dc2PrimaryRoots.Roots,
|
||||
}
|
||||
resp, err := s2.raftApply(structs.ConnectCARequestType, rootSetArgs)
|
||||
require.NoError(err)
|
||||
require.NoError(t, err)
|
||||
if respErr, ok := resp.(error); ok {
|
||||
t.Fatal(respErr)
|
||||
}
|
||||
|
@ -296,21 +294,22 @@ func TestLeader_SecondaryCA_TransitionFromPrimary(t *testing.T) {
|
|||
testrpc.WaitForLeader(t, s3.RPC, "dc2")
|
||||
|
||||
// Verify the secondary has migrated its TrustDomain and added the new primary's root.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
args = structs.DCSpecificRequest{Datacenter: "dc1"}
|
||||
var dc1Roots structs.IndexedCARoots
|
||||
require.NoError(s1.RPC("ConnectCA.Roots", &args, &dc1Roots))
|
||||
require.Len(dc1Roots.Roots, 1)
|
||||
require.NoError(r, s1.RPC("ConnectCA.Roots", &args, &dc1Roots))
|
||||
require.Len(r, dc1Roots.Roots, 1)
|
||||
|
||||
args = structs.DCSpecificRequest{Datacenter: "dc2"}
|
||||
var dc2SecondaryRoots structs.IndexedCARoots
|
||||
require.NoError(s3.RPC("ConnectCA.Roots", &args, &dc2SecondaryRoots))
|
||||
require.NoError(r, s3.RPC("ConnectCA.Roots", &args, &dc2SecondaryRoots))
|
||||
|
||||
// dc2's TrustDomain should have changed to the primary's
|
||||
require.Equal(dc2SecondaryRoots.TrustDomain, dc1Roots.TrustDomain)
|
||||
require.NotEqual(dc2SecondaryRoots.TrustDomain, dc2PrimaryRoots.TrustDomain)
|
||||
require.Equal(r, dc2SecondaryRoots.TrustDomain, dc1Roots.TrustDomain)
|
||||
require.NotEqual(r, dc2SecondaryRoots.TrustDomain, dc2PrimaryRoots.TrustDomain)
|
||||
|
||||
// Both roots should be present and correct
|
||||
require.Len(dc2SecondaryRoots.Roots, 2)
|
||||
require.Len(r, dc2SecondaryRoots.Roots, 2)
|
||||
var oldSecondaryRoot *structs.CARoot
|
||||
var newSecondaryRoot *structs.CARoot
|
||||
if dc2SecondaryRoots.Roots[0].ID == dc2PrimaryRoots.Roots[0].ID {
|
||||
|
@ -322,12 +321,13 @@ func TestLeader_SecondaryCA_TransitionFromPrimary(t *testing.T) {
|
|||
}
|
||||
|
||||
// The old root should have its TrustDomain filled in as the old domain.
|
||||
require.Equal(oldSecondaryRoot.ExternalTrustDomain, strings.TrimSuffix(dc2PrimaryRoots.TrustDomain, ".consul"))
|
||||
require.Equal(r, oldSecondaryRoot.ExternalTrustDomain, strings.TrimSuffix(dc2PrimaryRoots.TrustDomain, ".consul"))
|
||||
|
||||
require.Equal(oldSecondaryRoot.ID, dc2PrimaryRoots.Roots[0].ID)
|
||||
require.Equal(oldSecondaryRoot.RootCert, dc2PrimaryRoots.Roots[0].RootCert)
|
||||
require.Equal(newSecondaryRoot.ID, dc1Roots.Roots[0].ID)
|
||||
require.Equal(newSecondaryRoot.RootCert, dc1Roots.Roots[0].RootCert)
|
||||
require.Equal(r, oldSecondaryRoot.ID, dc2PrimaryRoots.Roots[0].ID)
|
||||
require.Equal(r, oldSecondaryRoot.RootCert, dc2PrimaryRoots.Roots[0].RootCert)
|
||||
require.Equal(r, newSecondaryRoot.ID, dc1Roots.Roots[0].ID)
|
||||
require.Equal(r, newSecondaryRoot.RootCert, dc1Roots.Roots[0].RootCert)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_SecondaryCA_UpgradeBeforePrimary(t *testing.T) {
|
||||
|
@ -665,6 +665,7 @@ func TestLeader_ReplicateIntentions_forwardToPrimary(t *testing.T) {
|
|||
actual.CreateIndex, actual.ModifyIndex = 0, 0
|
||||
actual.CreatedAt = ixn.Intention.CreatedAt
|
||||
actual.UpdatedAt = ixn.Intention.UpdatedAt
|
||||
actual.Hash = ixn.Intention.Hash
|
||||
ixn.Intention.UpdatePrecedence()
|
||||
assert.Equal(ixn.Intention, actual)
|
||||
|
||||
|
|
|
@ -277,7 +277,7 @@ type Server struct {
|
|||
shutdownCh chan struct{}
|
||||
shutdownLock sync.Mutex
|
||||
|
||||
// State for enterprise leader logic
|
||||
// State for multi-dc connect leader logic
|
||||
connectLock sync.RWMutex
|
||||
connectEnabled bool
|
||||
connectCh chan struct{}
|
||||
|
|
|
@ -194,7 +194,6 @@ func (s *Store) caSetConfigTxn(idx uint64, tx *memdb.Txn, config *structs.CAConf
|
|||
}
|
||||
config.ModifyIndex = idx
|
||||
|
||||
fmt.Printf("\n\nInserting CA Config: %#v", config)
|
||||
if err := tx.Insert(caConfigTableName, config); err != nil {
|
||||
return fmt.Errorf("failed updating CA config: %s", err)
|
||||
}
|
||||
|
@ -279,7 +278,6 @@ func (s *Store) CARootActive(ws memdb.WatchSet) (uint64, *structs.CARoot, error)
|
|||
//
|
||||
// The first boolean result returns whether the transaction succeeded or not.
|
||||
func (s *Store) CARootSetCAS(idx, cidx uint64, rs []*structs.CARoot) (bool, error) {
|
||||
fmt.Printf("\n\nSetting the CA Roots: idx: %d, cidx: %d - %#v\n", idx, cidx, rs)
|
||||
tx := s.db.Txn(true)
|
||||
defer tx.Abort()
|
||||
|
||||
|
@ -327,7 +325,6 @@ func (s *Store) CARootSetCAS(idx, cidx uint64, rs []*structs.CARoot) (bool, erro
|
|||
|
||||
// Insert all
|
||||
for _, r := range rs {
|
||||
fmt.Printf("Inserting CA Root: %#v\n", r)
|
||||
if err := tx.Insert(caRootTableName, r); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -338,7 +335,6 @@ func (s *Store) CARootSetCAS(idx, cidx uint64, rs []*structs.CARoot) (bool, erro
|
|||
return false, fmt.Errorf("failed updating index: %s", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\n\n")
|
||||
tx.Commit()
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -9,7 +9,9 @@ import (
|
|||
|
||||
// IndexServiceKind indexes a *struct.ServiceNode for querying by
|
||||
// the services kind. We need a custom indexer because of the default
|
||||
// kind being the empty string
|
||||
// kind being the empty string. The StringFieldIndex in memdb seems to
|
||||
// treate the empty string as missing and doesn't work correctly when we actually
|
||||
// want to index ""
|
||||
type IndexServiceKind struct{}
|
||||
|
||||
func (idx *IndexServiceKind) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
|
|
|
@ -9,6 +9,21 @@ import (
|
|||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
// fixHashField is used to convert the JSON string to a []byte before handing to mapstructure
|
||||
func fixHashField(raw interface{}) error {
|
||||
rawMap, ok := raw.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if val, ok := rawMap["Hash"]; ok {
|
||||
if sval, ok := val.(string); ok {
|
||||
rawMap["Hash"] = []byte(sval)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// /v1/connection/intentions
|
||||
func (s *HTTPServer) IntentionEndpoint(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
switch req.Method {
|
||||
|
@ -50,7 +65,7 @@ func (s *HTTPServer) IntentionCreate(resp http.ResponseWriter, req *http.Request
|
|||
}
|
||||
s.parseDC(req, &args.Datacenter)
|
||||
s.parseToken(req, &args.Token)
|
||||
if err := decodeBody(req, &args.Intention, nil); err != nil {
|
||||
if err := decodeBody(req, &args.Intention, fixHashField); err != nil {
|
||||
return nil, fmt.Errorf("Failed to decode request body: %s", err)
|
||||
}
|
||||
|
||||
|
@ -243,10 +258,8 @@ func (s *HTTPServer) IntentionSpecificUpdate(id string, resp http.ResponseWriter
|
|||
}
|
||||
s.parseDC(req, &args.Datacenter)
|
||||
s.parseToken(req, &args.Token)
|
||||
if err := decodeBody(req, &args.Intention, nil); err != nil {
|
||||
resp.WriteHeader(http.StatusBadRequest)
|
||||
fmt.Fprintf(resp, "Request decode failed: %v", err)
|
||||
return nil, nil
|
||||
if err := decodeBody(req, &args.Intention, fixHashField); err != nil {
|
||||
return nil, BadRequestError{Reason: fmt.Sprintf("Request decode failed: %v", err)}
|
||||
}
|
||||
|
||||
// Use the ID from the URL
|
||||
|
|
|
@ -131,7 +131,7 @@ func (m *Manager) syncState() {
|
|||
// Traverse the local state and ensure all proxy services are registered
|
||||
services := m.State.Services()
|
||||
for svcID, svc := range services {
|
||||
if svc.Kind != structs.ServiceKindConnectProxy {
|
||||
if svc.Kind != structs.ServiceKindConnectProxy && svc.Kind != structs.ServiceKindMeshGateway {
|
||||
continue
|
||||
}
|
||||
// TODO(banks): need to work out when to default some stuff. For example
|
||||
|
|
|
@ -107,15 +107,18 @@ func TestManager_BasicLifecycle(t *testing.T) {
|
|||
// coalesce timeout
|
||||
expectSnap := &ConfigSnapshot{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
Service: webProxy.Service,
|
||||
ProxyID: webProxy.ID,
|
||||
Address: webProxy.Address,
|
||||
Port: webProxy.Port,
|
||||
Proxy: webProxy.Proxy,
|
||||
TaggedAddresses: make(map[string]structs.ServiceAddress),
|
||||
Roots: roots,
|
||||
Leaf: leaf,
|
||||
UpstreamEndpoints: map[string]structs.CheckServiceNodes{
|
||||
"db": TestUpstreamNodes(t),
|
||||
},
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
start := time.Now()
|
||||
assertWatchChanRecvs(t, wCh, expectSnap)
|
||||
|
|
|
@ -1,22 +1,43 @@
|
|||
package proxycfg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/mitchellh/copystructure"
|
||||
)
|
||||
|
||||
type configSnapshotConnectProxy struct {
|
||||
Leaf *structs.IssuedCert
|
||||
UpstreamEndpoints map[string]structs.CheckServiceNodes
|
||||
}
|
||||
|
||||
type configSnapshotMeshGateway struct {
|
||||
WatchedServices map[string]context.CancelFunc
|
||||
WatchedDatacenters map[string]context.CancelFunc
|
||||
ServiceGroups map[string]structs.CheckServiceNodes
|
||||
GatewayGroups map[string]structs.CheckServiceNodes
|
||||
}
|
||||
|
||||
// ConfigSnapshot captures all the resulting config needed for a proxy instance.
|
||||
// It is meant to be point-in-time coherent and is used to deliver the current
|
||||
// config state to observers who need it to be pushed in (e.g. XDS server).
|
||||
type ConfigSnapshot struct {
|
||||
Kind structs.ServiceKind
|
||||
Service string
|
||||
ProxyID string
|
||||
Address string
|
||||
Port int
|
||||
TaggedAddresses map[string]structs.ServiceAddress
|
||||
Proxy structs.ConnectProxyConfig
|
||||
Datacenter string
|
||||
Roots *structs.IndexedCARoots
|
||||
Leaf *structs.IssuedCert
|
||||
UpstreamEndpoints map[string]structs.CheckServiceNodes
|
||||
|
||||
// connect-proxy specific
|
||||
ConnectProxy configSnapshotConnectProxy
|
||||
|
||||
// mesh-gateway specific
|
||||
MeshGateway configSnapshotMeshGateway
|
||||
|
||||
// Skip intentions for now as we don't push those down yet, just pre-warm them.
|
||||
}
|
||||
|
@ -25,7 +46,10 @@ type ConfigSnapshot struct {
|
|||
func (s *ConfigSnapshot) Valid() bool {
|
||||
switch s.Kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return s.Roots != nil && s.Leaf != nil
|
||||
return s.Roots != nil && s.ConnectProxy.Leaf != nil
|
||||
case structs.ServiceKindMeshGateway:
|
||||
// TODO (mesh-gateway) - what happens if all the connect services go away
|
||||
return s.Roots != nil && len(s.MeshGateway.ServiceGroups) > 0
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
@ -38,5 +62,15 @@ func (s *ConfigSnapshot) Clone() (*ConfigSnapshot, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snapCopy.(*ConfigSnapshot), nil
|
||||
|
||||
snap := snapCopy.(*ConfigSnapshot)
|
||||
|
||||
switch s.Kind {
|
||||
case structs.ServiceKindMeshGateway:
|
||||
// nil these out as anything receiving one of these clones does not need them and should never "cancel" our watches
|
||||
snap.MeshGateway.WatchedDatacenters = nil
|
||||
snap.MeshGateway.WatchedServices = nil
|
||||
}
|
||||
|
||||
return snap, nil
|
||||
}
|
||||
|
|
|
@ -20,6 +20,8 @@ const (
|
|||
rootsWatchID = "roots"
|
||||
leafWatchID = "leaf"
|
||||
intentionsWatchID = "intentions"
|
||||
serviceListWatchID = "service-list"
|
||||
datacentersWatchID = "datacenters"
|
||||
serviceIDPrefix = string(structs.UpstreamDestTypeService) + ":"
|
||||
preparedQueryIDPrefix = string(structs.UpstreamDestTypePreparedQuery) + ":"
|
||||
defaultPreparedQueryPollInterval = 30 * time.Second
|
||||
|
@ -39,9 +41,11 @@ type state struct {
|
|||
cancel func()
|
||||
|
||||
kind structs.ServiceKind
|
||||
service string
|
||||
proxyID string
|
||||
address string
|
||||
port int
|
||||
taggedAddresses map[string]structs.ServiceAddress
|
||||
proxyCfg structs.ConnectProxyConfig
|
||||
token string
|
||||
|
||||
|
@ -58,8 +62,8 @@ type state struct {
|
|||
// The returned state needs it's required dependencies to be set before Watch
|
||||
// can be called.
|
||||
func newState(ns *structs.NodeService, token string) (*state, error) {
|
||||
if ns.Kind != structs.ServiceKindConnectProxy {
|
||||
return nil, errors.New("not a connect-proxy")
|
||||
if ns.Kind != structs.ServiceKindConnectProxy && ns.Kind != structs.ServiceKindMeshGateway {
|
||||
return nil, errors.New("not a connect-proxy or mesh-gateway")
|
||||
}
|
||||
|
||||
// Copy the config map
|
||||
|
@ -72,11 +76,18 @@ func newState(ns *structs.NodeService, token string) (*state, error) {
|
|||
return nil, errors.New("failed to copy proxy config")
|
||||
}
|
||||
|
||||
taggedAddresses := make(map[string]structs.ServiceAddress)
|
||||
for k, v := range ns.TaggedAddresses {
|
||||
taggedAddresses[k] = v
|
||||
}
|
||||
|
||||
return &state{
|
||||
kind: ns.Kind,
|
||||
service: ns.Service,
|
||||
proxyID: ns.ID,
|
||||
address: ns.Address,
|
||||
port: ns.Port,
|
||||
taggedAddresses: taggedAddresses,
|
||||
proxyCfg: proxyCfg,
|
||||
token: token,
|
||||
// 10 is fairly arbitrary here but allow for the 3 mandatory and a
|
||||
|
@ -123,11 +134,46 @@ func (s *state) initWatches() error {
|
|||
switch s.kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return s.initWatchesConnectProxy()
|
||||
case structs.ServiceKindMeshGateway:
|
||||
return s.initWatchesMeshGateway()
|
||||
default:
|
||||
return fmt.Errorf("Unsupported service kind")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) watchConnectProxyService(correlationId string, service string, dc string, filter string, meshGatewayMode structs.MeshGatewayMode) error {
|
||||
switch meshGatewayMode {
|
||||
case structs.MeshGatewayModeRemote:
|
||||
return s.cache.Notify(s.ctx, cachetype.InternalServiceDumpName, &structs.ServiceDumpRequest{
|
||||
Datacenter: dc,
|
||||
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||
ServiceKind: structs.ServiceKindMeshGateway,
|
||||
UseServiceKind: true,
|
||||
}, correlationId, s.ch)
|
||||
case structs.MeshGatewayModeLocal:
|
||||
return s.cache.Notify(s.ctx, cachetype.InternalServiceDumpName, &structs.ServiceDumpRequest{
|
||||
Datacenter: s.source.Datacenter,
|
||||
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||
ServiceKind: structs.ServiceKindMeshGateway,
|
||||
UseServiceKind: true,
|
||||
}, correlationId, s.ch)
|
||||
default:
|
||||
// This includes both the None and Default modes on purpose
|
||||
return s.cache.Notify(s.ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{
|
||||
Datacenter: dc,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: s.token,
|
||||
Filter: filter,
|
||||
},
|
||||
ServiceName: service,
|
||||
Connect: true,
|
||||
// Note that Identifier doesn't type-prefix for service any more as it's
|
||||
// the default and makes metrics and other things much cleaner. It's
|
||||
// simpler for us if we have the type to make things unambiguous.
|
||||
}, correlationId, s.ch)
|
||||
}
|
||||
}
|
||||
|
||||
// initWatchesConnectProxy sets up the watches needed based on current proxy registration
|
||||
// state.
|
||||
func (s *state) initWatchesConnectProxy() error {
|
||||
|
@ -186,17 +232,14 @@ func (s *state) initWatchesConnectProxy() error {
|
|||
case structs.UpstreamDestTypeService:
|
||||
fallthrough
|
||||
case "": // Treat unset as the default Service type
|
||||
err = s.cache.Notify(s.ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{
|
||||
Datacenter: dc,
|
||||
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||
ServiceName: u.DestinationName,
|
||||
Connect: true,
|
||||
// Note that Identifier doesn't type-prefix for service any more as it's
|
||||
// the default and makes metrics and other things much cleaner. It's
|
||||
// simpler for us if we have the type to make things unambiguous.
|
||||
}, "upstream:"+serviceIDPrefix+u.Identifier(), s.ch)
|
||||
meshGateway := structs.MeshGatewayModeNone
|
||||
|
||||
if err != nil {
|
||||
// TODO (mesh-gateway)- maybe allow using a gateway within a datacenter at some point
|
||||
if dc != s.source.Datacenter {
|
||||
meshGateway = u.MeshGateway.Mode
|
||||
}
|
||||
|
||||
if err := s.watchConnectProxyService("upstream:"+serviceIDPrefix+u.Identifier(), u.DestinationName, dc, "", meshGateway); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -207,6 +250,43 @@ func (s *state) initWatchesConnectProxy() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// initWatchesMeshGateway sets up the watches needed based on the current mesh gateway registration
|
||||
func (s *state) initWatchesMeshGateway() error {
|
||||
// Watch for root changes
|
||||
err := s.cache.Notify(s.ctx, cachetype.ConnectCARootName, &structs.DCSpecificRequest{
|
||||
Datacenter: s.source.Datacenter,
|
||||
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||
}, rootsWatchID, s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Watch for all services
|
||||
err = s.cache.Notify(s.ctx, cachetype.CatalogListServicesName, &structs.DCSpecificRequest{
|
||||
Datacenter: s.source.Datacenter,
|
||||
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||
}, serviceListWatchID, s.ch)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Eventually we will have to watch connect enable instances for each service as well as the
|
||||
// destination services themselves but those notifications will be setup later. However we
|
||||
// cannot setup those watches until we know what the services are. from the service list
|
||||
// watch above
|
||||
|
||||
err = s.cache.Notify(s.ctx, cachetype.CatalogDatacentersName, &structs.DatacentersRequest{
|
||||
QueryOptions: structs.QueryOptions{Token: s.token, MaxAge: 30 * time.Second},
|
||||
}, datacentersWatchID, s.ch)
|
||||
|
||||
// Once we start getting notified about the datacenters we will setup watches on the
|
||||
// gateways within those other datacenters. We cannot do that here because we don't
|
||||
// know what they are yet.
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *state) run() {
|
||||
// Close the channel we return from Watch when we stop so consumers can stop
|
||||
// watching and clean up their goroutines. It's important we do this here and
|
||||
|
@ -216,15 +296,24 @@ func (s *state) run() {
|
|||
|
||||
snap := ConfigSnapshot{
|
||||
Kind: s.kind,
|
||||
Service: s.service,
|
||||
ProxyID: s.proxyID,
|
||||
Address: s.address,
|
||||
Port: s.port,
|
||||
TaggedAddresses: s.taggedAddresses,
|
||||
Proxy: s.proxyCfg,
|
||||
Datacenter: s.source.Datacenter,
|
||||
}
|
||||
|
||||
switch s.kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
snap.UpstreamEndpoints = make(map[string]structs.CheckServiceNodes)
|
||||
snap.ConnectProxy.UpstreamEndpoints = make(map[string]structs.CheckServiceNodes)
|
||||
case structs.ServiceKindMeshGateway:
|
||||
snap.MeshGateway.WatchedServices = make(map[string]context.CancelFunc)
|
||||
snap.MeshGateway.WatchedDatacenters = make(map[string]context.CancelFunc)
|
||||
// TODO (mesh-gateway) - maybe reuse UpstreamEndpoints?
|
||||
snap.MeshGateway.ServiceGroups = make(map[string]structs.CheckServiceNodes)
|
||||
snap.MeshGateway.GatewayGroups = make(map[string]structs.CheckServiceNodes)
|
||||
}
|
||||
|
||||
// This turns out to be really fiddly/painful by just using time.Timer.C
|
||||
|
@ -303,6 +392,8 @@ func (s *state) handleUpdate(u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
|||
switch s.kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return s.handleUpdateConnectProxy(u, snap)
|
||||
case structs.ServiceKindMeshGateway:
|
||||
return s.handleUpdateMeshGateway(u, snap)
|
||||
default:
|
||||
return fmt.Errorf("Unsupported service kind")
|
||||
}
|
||||
|
@ -321,7 +412,7 @@ func (s *state) handleUpdateConnectProxy(u cache.UpdateEvent, snap *ConfigSnapsh
|
|||
if !ok {
|
||||
return fmt.Errorf("invalid type for leaf response: %T", u.Result)
|
||||
}
|
||||
snap.Leaf = leaf
|
||||
snap.ConnectProxy.Leaf = leaf
|
||||
case intentionsWatchID:
|
||||
// Not in snapshot currently, no op
|
||||
default:
|
||||
|
@ -333,7 +424,7 @@ func (s *state) handleUpdateConnectProxy(u cache.UpdateEvent, snap *ConfigSnapsh
|
|||
return fmt.Errorf("invalid type for service response: %T", u.Result)
|
||||
}
|
||||
svc := strings.TrimPrefix(u.CorrelationID, "upstream:"+serviceIDPrefix)
|
||||
snap.UpstreamEndpoints[svc] = resp.Nodes
|
||||
snap.ConnectProxy.UpstreamEndpoints[svc] = resp.Nodes
|
||||
|
||||
case strings.HasPrefix(u.CorrelationID, "upstream:"+preparedQueryIDPrefix):
|
||||
resp, ok := u.Result.(*structs.PreparedQueryExecuteResponse)
|
||||
|
@ -341,7 +432,7 @@ func (s *state) handleUpdateConnectProxy(u cache.UpdateEvent, snap *ConfigSnapsh
|
|||
return fmt.Errorf("invalid type for prepared query response: %T", u.Result)
|
||||
}
|
||||
pq := strings.TrimPrefix(u.CorrelationID, "upstream:")
|
||||
snap.UpstreamEndpoints[pq] = resp.Nodes
|
||||
snap.ConnectProxy.UpstreamEndpoints[pq] = resp.Nodes
|
||||
|
||||
default:
|
||||
return errors.New("unknown correlation ID")
|
||||
|
@ -350,6 +441,132 @@ func (s *state) handleUpdateConnectProxy(u cache.UpdateEvent, snap *ConfigSnapsh
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *state) handleUpdateMeshGateway(u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
||||
switch u.CorrelationID {
|
||||
case rootsWatchID:
|
||||
roots, ok := u.Result.(*structs.IndexedCARoots)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid type for roots response: %T", u.Result)
|
||||
}
|
||||
snap.Roots = roots
|
||||
case serviceListWatchID:
|
||||
services, ok := u.Result.(*structs.IndexedServices)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid type for services response: %T", u.Result)
|
||||
}
|
||||
|
||||
for svcName := range services.Services {
|
||||
if _, ok := snap.MeshGateway.WatchedServices[svcName]; !ok {
|
||||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
err := s.cache.Notify(ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{
|
||||
Datacenter: s.source.Datacenter,
|
||||
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||
ServiceName: svcName,
|
||||
Connect: true,
|
||||
}, fmt.Sprintf("connect-service:%s", svcName), s.ch)
|
||||
|
||||
if err != nil {
|
||||
s.logger.Printf("[ERR] mesh-gateway: failed to register watch for connect-service:%s", svcName)
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO (mesh-gateway) also should watch the resolver config for the service here
|
||||
snap.MeshGateway.WatchedServices[svcName] = cancel
|
||||
}
|
||||
}
|
||||
|
||||
for svcName, cancelFn := range snap.MeshGateway.WatchedServices {
|
||||
if _, ok := services.Services[svcName]; !ok {
|
||||
delete(snap.MeshGateway.WatchedServices, svcName)
|
||||
cancelFn()
|
||||
}
|
||||
}
|
||||
case datacentersWatchID:
|
||||
datacentersRaw, ok := u.Result.(*[]string)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid type for datacenters response: %T", u.Result)
|
||||
}
|
||||
if datacentersRaw == nil {
|
||||
return fmt.Errorf("invalid response with a nil datacenter list")
|
||||
}
|
||||
|
||||
datacenters := *datacentersRaw
|
||||
|
||||
for _, dc := range datacenters {
|
||||
if dc == s.source.Datacenter {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := snap.MeshGateway.WatchedDatacenters[dc]; !ok {
|
||||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
err := s.cache.Notify(ctx, cachetype.InternalServiceDumpName, &structs.ServiceDumpRequest{
|
||||
Datacenter: dc,
|
||||
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||
ServiceKind: structs.ServiceKindMeshGateway,
|
||||
UseServiceKind: true,
|
||||
}, fmt.Sprintf("mesh-gateway:%s", dc), s.ch)
|
||||
|
||||
if err != nil {
|
||||
s.logger.Printf("[ERR] mesh-gateway: failed to register watch for mesh-gateway:%s", dc)
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
snap.MeshGateway.WatchedDatacenters[dc] = cancel
|
||||
}
|
||||
}
|
||||
|
||||
for dc, cancelFn := range snap.MeshGateway.WatchedDatacenters {
|
||||
found := false
|
||||
for _, dcCurrent := range datacenters {
|
||||
if dcCurrent == dc {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
delete(snap.MeshGateway.WatchedDatacenters, dc)
|
||||
cancelFn()
|
||||
}
|
||||
}
|
||||
default:
|
||||
switch {
|
||||
case strings.HasPrefix(u.CorrelationID, "connect-service:"):
|
||||
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid type for service response: %T", u.Result)
|
||||
}
|
||||
|
||||
svc := strings.TrimPrefix(u.CorrelationID, "connect-service:")
|
||||
|
||||
if len(resp.Nodes) > 0 {
|
||||
snap.MeshGateway.ServiceGroups[svc] = resp.Nodes
|
||||
} else if _, ok := snap.MeshGateway.ServiceGroups[svc]; ok {
|
||||
delete(snap.MeshGateway.ServiceGroups, svc)
|
||||
}
|
||||
case strings.HasPrefix(u.CorrelationID, "mesh-gateway:"):
|
||||
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid type for service response: %T", u.Result)
|
||||
}
|
||||
|
||||
dc := strings.TrimPrefix(u.CorrelationID, "mesh-gateway:")
|
||||
|
||||
if len(resp.Nodes) > 0 {
|
||||
snap.MeshGateway.GatewayGroups[dc] = resp.Nodes
|
||||
} else if _, ok := snap.MeshGateway.GatewayGroups[dc]; ok {
|
||||
delete(snap.MeshGateway.GatewayGroups, dc)
|
||||
}
|
||||
default:
|
||||
// do nothing for now
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CurrentSnapshot synchronously returns the current ConfigSnapshot if there is
|
||||
// one ready. If we don't have one yet because not all necessary parts have been
|
||||
// returned (i.e. both roots and leaf cert), nil is returned.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package proxycfg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -145,11 +146,64 @@ func TestUpstreamNodes(t testing.T) structs.CheckServiceNodes {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGatewayNodesDC2(t testing.T) structs.CheckServiceNodes {
|
||||
return structs.CheckServiceNodes{
|
||||
structs.CheckServiceNode{
|
||||
Node: &structs.Node{
|
||||
ID: "mesh-gateway-1",
|
||||
Node: "mesh-gateway",
|
||||
Address: "10.0.1.1",
|
||||
Datacenter: "dc2",
|
||||
},
|
||||
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t,
|
||||
"10.0.1.1", 8443,
|
||||
structs.ServiceAddress{Address: "10.0.1.1", Port: 8443},
|
||||
structs.ServiceAddress{Address: "198.18.1.1", Port: 443}),
|
||||
},
|
||||
structs.CheckServiceNode{
|
||||
Node: &structs.Node{
|
||||
ID: "mesh-gateway-2",
|
||||
Node: "mesh-gateway",
|
||||
Address: "10.0.1.2",
|
||||
Datacenter: "dc2",
|
||||
},
|
||||
Service: structs.TestNodeServiceMeshGatewayWithAddrs(t,
|
||||
"10.0.1.2", 8443,
|
||||
structs.ServiceAddress{Address: "10.0.1.2", Port: 8443},
|
||||
structs.ServiceAddress{Address: "198.18.1.2", Port: 443}),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestGatewayServicesDC1(t testing.T) structs.CheckServiceNodes {
|
||||
return structs.CheckServiceNodes{
|
||||
structs.CheckServiceNode{
|
||||
Node: &structs.Node{
|
||||
ID: "foo-node-1",
|
||||
Node: "foo-node-1",
|
||||
Address: "10.1.1.1",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
Service: structs.TestNodeServiceProxy(t),
|
||||
},
|
||||
structs.CheckServiceNode{
|
||||
Node: &structs.Node{
|
||||
ID: "foo-node-2",
|
||||
Node: "foo-node-2",
|
||||
Address: "10.1.1.2",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
Service: structs.TestNodeServiceProxy(t),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TestConfigSnapshot returns a fully populated snapshot
|
||||
func TestConfigSnapshot(t testing.T) *ConfigSnapshot {
|
||||
roots, leaf := TestCerts(t)
|
||||
return &ConfigSnapshot{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
Service: "web-sidecar-proxy",
|
||||
ProxyID: "web-sidecar-proxy",
|
||||
Address: "0.0.0.0",
|
||||
Port: 9999,
|
||||
|
@ -164,10 +218,54 @@ func TestConfigSnapshot(t testing.T) *ConfigSnapshot {
|
|||
Upstreams: structs.TestUpstreams(t),
|
||||
},
|
||||
Roots: roots,
|
||||
ConnectProxy: configSnapshotConnectProxy{
|
||||
Leaf: leaf,
|
||||
UpstreamEndpoints: map[string]structs.CheckServiceNodes{
|
||||
"db": TestUpstreamNodes(t),
|
||||
},
|
||||
},
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigSnapshotMeshGateway(t testing.T) *ConfigSnapshot {
|
||||
roots, _ := TestCerts(t)
|
||||
return &ConfigSnapshot{
|
||||
Kind: structs.ServiceKindMeshGateway,
|
||||
Service: "mesh-gateway",
|
||||
ProxyID: "mesh-gateway",
|
||||
Address: "1.2.3.4",
|
||||
Port: 8443,
|
||||
Proxy: structs.ConnectProxyConfig{
|
||||
Config: map[string]interface{}{},
|
||||
},
|
||||
TaggedAddresses: map[string]structs.ServiceAddress{
|
||||
"lan": structs.ServiceAddress{
|
||||
Address: "1.2.3.4",
|
||||
Port: 8443,
|
||||
},
|
||||
"wan": structs.ServiceAddress{
|
||||
Address: "198.18.0.1",
|
||||
Port: 443,
|
||||
},
|
||||
},
|
||||
Roots: roots,
|
||||
Datacenter: "dc1",
|
||||
MeshGateway: configSnapshotMeshGateway{
|
||||
WatchedServices: map[string]context.CancelFunc{
|
||||
"foo": nil,
|
||||
"bar": nil,
|
||||
},
|
||||
WatchedDatacenters: map[string]context.CancelFunc{
|
||||
"dc2": nil,
|
||||
},
|
||||
ServiceGroups: map[string]structs.CheckServiceNodes{
|
||||
"foo": TestGatewayServicesDC1(t),
|
||||
},
|
||||
GatewayGroups: map[string]structs.CheckServiceNodes{
|
||||
"dc2": TestGatewayNodesDC2(t),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -344,7 +344,7 @@ func (s *serviceConfigWatch) updateRegistration(registration *serviceRegistratio
|
|||
// mergeServiceConfig returns the final effective config for the watched service,
|
||||
// including the latest known global defaults from the servers.
|
||||
func (s *serviceConfigWatch) mergeServiceConfig() (*structs.NodeService, error) {
|
||||
if s.defaults == nil || !s.registration.service.IsSidecarProxy() {
|
||||
if s.defaults == nil || (!s.registration.service.IsSidecarProxy() && !s.registration.service.IsMeshGateway()) {
|
||||
return s.registration.service, nil
|
||||
}
|
||||
|
||||
|
@ -362,6 +362,11 @@ func (s *serviceConfigWatch) mergeServiceConfig() (*structs.NodeService, error)
|
|||
if err := mergo.Merge(&ns.Proxy.Config, s.defaults.ProxyConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ns.Proxy.MeshGateway.Mode == structs.MeshGatewayModeDefault {
|
||||
ns.Proxy.MeshGateway.Mode = s.defaults.MeshGateway.Mode
|
||||
}
|
||||
|
||||
// Merge upstream defaults if there were any returned
|
||||
for i := range ns.Proxy.Upstreams {
|
||||
// Get a pointer not a value copy of the upstream struct
|
||||
|
@ -369,6 +374,12 @@ func (s *serviceConfigWatch) mergeServiceConfig() (*structs.NodeService, error)
|
|||
if us.DestinationType != "" && us.DestinationType != structs.UpstreamDestTypeService {
|
||||
continue
|
||||
}
|
||||
|
||||
// default the upstreams gateway mode if it didn't specify one
|
||||
if us.MeshGateway.Mode == structs.MeshGatewayModeDefault {
|
||||
us.MeshGateway.Mode = ns.Proxy.MeshGateway.Mode
|
||||
}
|
||||
|
||||
usCfg, ok := s.defaults.UpstreamConfigs[us.DestinationName]
|
||||
if !ok {
|
||||
// No config defaults to merge
|
||||
|
|
|
@ -49,6 +49,8 @@ type ServiceConfigEntry struct {
|
|||
Kind string
|
||||
Name string
|
||||
Protocol string
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty"`
|
||||
|
||||
// TODO(banks): enable this once we have upstreams supported too. Enabling
|
||||
// sidecars actually makes no sense and adds complications when you don't
|
||||
// allow upstreams to be specified centrally too.
|
||||
|
@ -114,6 +116,7 @@ type ProxyConfigEntry struct {
|
|||
Kind string
|
||||
Name string
|
||||
Config map[string]interface{}
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty"`
|
||||
|
||||
RaftIndex
|
||||
}
|
||||
|
@ -231,6 +234,8 @@ func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) {
|
|||
"connect": "Connect",
|
||||
"sidecar_proxy": "SidecarProxy",
|
||||
"protocol": "Protocol",
|
||||
"mesh_gateway": "MeshGateway",
|
||||
"mode": "Mode",
|
||||
"Config": "",
|
||||
})
|
||||
|
||||
|
@ -431,6 +436,7 @@ func (r *ServiceConfigRequest) CacheInfo() cache.RequestInfo {
|
|||
type ServiceConfigResponse struct {
|
||||
ProxyConfig map[string]interface{}
|
||||
UpstreamConfigs map[string]map[string]interface{}
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty"`
|
||||
QueryMeta
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,35 @@ import (
|
|||
"github.com/hashicorp/consul/api"
|
||||
)
|
||||
|
||||
type MeshGatewayMode string
|
||||
|
||||
const (
|
||||
// MeshGatewayModeDefault represents no specific mode and should
|
||||
// be used to indicate that a different layer of the configuration
|
||||
// chain should take precedence
|
||||
MeshGatewayModeDefault MeshGatewayMode = ""
|
||||
|
||||
// MeshGatewayModeNone represents that the Upstream Connect connections
|
||||
// should be direct and not flow through a mesh gateway.
|
||||
MeshGatewayModeNone MeshGatewayMode = "none"
|
||||
|
||||
// MeshGatewayModeLocal represents that the Upstrea Connect connections
|
||||
// should be made to a mesh gateway in the local datacenter. This is
|
||||
MeshGatewayModeLocal MeshGatewayMode = "local"
|
||||
|
||||
// MeshGatewayModeRemote represents that the Upstream Connect connections
|
||||
// should be made to a mesh gateway in a remote datacenter.
|
||||
MeshGatewayModeRemote MeshGatewayMode = "remote"
|
||||
)
|
||||
|
||||
// MeshGatewayConfig controls how Mesh Gateways are configured and used
|
||||
// This is a struct to allow for future additions without having more free-hanging
|
||||
// configuration items all over the place
|
||||
type MeshGatewayConfig struct {
|
||||
// The Mesh Gateway routing mode
|
||||
Mode MeshGatewayMode `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ConnectProxyConfig describes the configuration needed for any proxy managed
|
||||
// or unmanaged. It describes a single logical service's listener and optionally
|
||||
// upstreams and sidecar-related config for a single instance. To describe a
|
||||
|
@ -43,6 +72,9 @@ type ConnectProxyConfig struct {
|
|||
// Upstreams describes any upstream dependencies the proxy instance should
|
||||
// setup.
|
||||
Upstreams Upstreams `json:",omitempty"`
|
||||
|
||||
// MeshGateway defines the mesh gateway configuration for this upstream
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ToAPI returns the api struct with the same fields. We have duplicates to
|
||||
|
@ -122,6 +154,9 @@ type Upstream struct {
|
|||
// It can be used to pass arbitrary configuration for this specific upstream
|
||||
// to the proxy.
|
||||
Config map[string]interface{} `bexpr:"-"`
|
||||
|
||||
// MeshGateway is the configuration for mesh gateway usage of this upstream
|
||||
MeshGateway MeshGatewayConfig `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Validate sanity checks the struct is valid
|
||||
|
|
|
@ -93,6 +93,7 @@ func TestUpstream_MarshalJSON(t *testing.T) {
|
|||
"DestinationName": "foo",
|
||||
"Datacenter": "dc1",
|
||||
"LocalBindPort": 1234,
|
||||
"MeshGateway": {},
|
||||
"Config": null
|
||||
}`,
|
||||
wantErr: false,
|
||||
|
@ -110,6 +111,7 @@ func TestUpstream_MarshalJSON(t *testing.T) {
|
|||
"DestinationName": "foo",
|
||||
"Datacenter": "dc1",
|
||||
"LocalBindPort": 1234,
|
||||
"MeshGateway": {},
|
||||
"Config": null
|
||||
}`,
|
||||
wantErr: false,
|
||||
|
|
|
@ -560,6 +560,16 @@ type Node struct {
|
|||
|
||||
RaftIndex `bexpr:"-"`
|
||||
}
|
||||
|
||||
func (n *Node) BestAddress(wan bool) string {
|
||||
if wan {
|
||||
if addr, ok := n.TaggedAddresses["wan"]; ok {
|
||||
return addr
|
||||
}
|
||||
}
|
||||
return n.Address
|
||||
}
|
||||
|
||||
type Nodes []*Node
|
||||
|
||||
// IsSame return whether nodes are similar without taking into account
|
||||
|
@ -757,6 +767,11 @@ const (
|
|||
// service proxies another service within Consul and speaks the connect
|
||||
// protocol.
|
||||
ServiceKindConnectProxy ServiceKind = "connect-proxy"
|
||||
|
||||
// ServiceKindMeshGateway is a Mesh Gateway for the Connect feature. This
|
||||
// service will proxy connections based off the SNI header set by other
|
||||
// connect proxies
|
||||
ServiceKindMeshGateway ServiceKind = "mesh-gateway"
|
||||
)
|
||||
|
||||
func ServiceKindFromString(kind string) (ServiceKind, error) {
|
||||
|
@ -852,12 +867,28 @@ type NodeService struct {
|
|||
RaftIndex `bexpr:"-"`
|
||||
}
|
||||
|
||||
func (ns *NodeService) BestAddress(wan bool) (string, int) {
|
||||
addr := ns.Address
|
||||
port := ns.Port
|
||||
|
||||
if wan {
|
||||
if wan, ok := ns.TaggedAddresses["wan"]; ok {
|
||||
addr = wan.Address
|
||||
if wan.Port != 0 {
|
||||
port = wan.Port
|
||||
}
|
||||
}
|
||||
}
|
||||
return addr, port
|
||||
}
|
||||
|
||||
// ServiceConnect are the shared Connect settings between all service
|
||||
// definitions from the agent to the state store.
|
||||
type ServiceConnect struct {
|
||||
// Native is true when this service can natively understand Connect.
|
||||
Native bool `json:",omitempty"`
|
||||
|
||||
// DEPRECATED(managed-proxies) - Remove with the rest of managed proxies
|
||||
// Proxy configures a connect proxy instance for the service. This is
|
||||
// only used for agent service definitions and is invalid for non-agent
|
||||
// (catalog API) definitions.
|
||||
|
@ -878,6 +909,11 @@ func (s *NodeService) IsSidecarProxy() bool {
|
|||
return s.Kind == ServiceKindConnectProxy && s.Proxy.DestinationServiceID != ""
|
||||
}
|
||||
|
||||
func (s *NodeService) IsMeshGateway() bool {
|
||||
// TODO (mesh-gateway) any other things to check?
|
||||
return s.Kind == ServiceKindMeshGateway
|
||||
}
|
||||
|
||||
// Validate validates the node service configuration.
|
||||
//
|
||||
// NOTE(mitchellh): This currently only validates fields for a ConnectProxy.
|
||||
|
@ -913,6 +949,43 @@ func (s *NodeService) Validate() error {
|
|||
}
|
||||
}
|
||||
|
||||
// MeshGateway validation
|
||||
if s.Kind == ServiceKindMeshGateway {
|
||||
// Gateways must have a port
|
||||
if s.Port == 0 {
|
||||
result = multierror.Append(result, fmt.Errorf("Port must be non-zero for a Mesh Gateway"))
|
||||
}
|
||||
|
||||
// Gateways cannot have sidecars
|
||||
if s.Connect.SidecarService != nil {
|
||||
result = multierror.Append(result, fmt.Errorf("Mesh Gateways cannot have a sidecar service defined"))
|
||||
}
|
||||
|
||||
if s.Connect.Proxy != nil {
|
||||
result = multierror.Append(result, fmt.Errorf("The Connect.Proxy configuration is invalid for Mesh Gateways"))
|
||||
}
|
||||
|
||||
if s.Proxy.DestinationServiceName != "" {
|
||||
result = multierror.Append(result, fmt.Errorf("The Proxy.DestinationServiceName configuration is invalid for Mesh Gateways"))
|
||||
}
|
||||
|
||||
if s.Proxy.DestinationServiceID != "" {
|
||||
result = multierror.Append(result, fmt.Errorf("The Proxy.DestinationServiceID configuration is invalid for Mesh Gateways"))
|
||||
}
|
||||
|
||||
if s.Proxy.LocalServiceAddress != "" {
|
||||
result = multierror.Append(result, fmt.Errorf("The Proxy.LocalServiceAddress configuration is invalid for Mesh Gateways"))
|
||||
}
|
||||
|
||||
if s.Proxy.LocalServicePort != 0 {
|
||||
result = multierror.Append(result, fmt.Errorf("The Proxy.LocalServicePort configuration is invalid for Mesh Gateways"))
|
||||
}
|
||||
|
||||
if len(s.Proxy.Upstreams) != 0 {
|
||||
result = multierror.Append(result, fmt.Errorf("The Proxy.Upstreams configuration is invalid for Mesh Gateways"))
|
||||
}
|
||||
}
|
||||
|
||||
// Nested sidecar validation
|
||||
if s.Connect.SidecarService != nil {
|
||||
if s.Connect.SidecarService.ID != "" {
|
||||
|
@ -1166,6 +1239,28 @@ type CheckServiceNode struct {
|
|||
Service *NodeService
|
||||
Checks HealthChecks
|
||||
}
|
||||
|
||||
func (csn *CheckServiceNode) BestAddress(wan bool) (string, int) {
|
||||
// TODO (mesh-gateway) needs a test
|
||||
// best address
|
||||
// wan
|
||||
// wan svc addr
|
||||
// svc addr
|
||||
// wan node addr
|
||||
// node addr
|
||||
// lan
|
||||
// svc addr
|
||||
// node addr
|
||||
|
||||
addr, port := csn.Service.BestAddress(wan)
|
||||
|
||||
if addr == "" {
|
||||
addr = csn.Node.BestAddress(wan)
|
||||
}
|
||||
|
||||
return addr, port
|
||||
}
|
||||
|
||||
type CheckServiceNodes []CheckServiceNode
|
||||
|
||||
// Shuffle does an in-place random shuffle using the Fisher-Yates algorithm.
|
||||
|
|
|
@ -27,6 +27,14 @@ type fieldConfigTest struct {
|
|||
expected bexpr.FieldConfigurations
|
||||
}
|
||||
|
||||
var expectedFieldConfigMeshGatewayConfig bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
"Mode": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "Mode",
|
||||
CoerceFn: bexpr.CoerceString,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
}
|
||||
|
||||
var expectedFieldConfigUpstreams bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
"DestinationType": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "DestinationType",
|
||||
|
@ -58,6 +66,10 @@ var expectedFieldConfigUpstreams bexpr.FieldConfigurations = bexpr.FieldConfigur
|
|||
CoerceFn: bexpr.CoerceInt,
|
||||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual},
|
||||
},
|
||||
"MeshGateway": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "MeshGateway",
|
||||
SubFields: expectedFieldConfigMeshGatewayConfig,
|
||||
},
|
||||
}
|
||||
|
||||
var expectedFieldConfigConnectProxyConfig bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
|
@ -86,6 +98,10 @@ var expectedFieldConfigConnectProxyConfig bexpr.FieldConfigurations = bexpr.Fiel
|
|||
SupportedOperations: []bexpr.MatchOperator{bexpr.MatchIsEmpty, bexpr.MatchIsNotEmpty},
|
||||
SubFields: expectedFieldConfigUpstreams,
|
||||
},
|
||||
"MeshGateway": &bexpr.FieldConfiguration{
|
||||
StructFieldName: "MeshGateway",
|
||||
SubFields: expectedFieldConfigMeshGatewayConfig,
|
||||
},
|
||||
}
|
||||
|
||||
var expectedFieldConfigServiceConnect bexpr.FieldConfigurations = bexpr.FieldConfigurations{
|
||||
|
|
|
@ -367,6 +367,65 @@ func TestStructs_ServiceNode_Conversions(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStructs_NodeService_ValidateMeshGateway(t *testing.T) {
|
||||
type testCase struct {
|
||||
Modify func(*NodeService)
|
||||
Err string
|
||||
}
|
||||
cases := map[string]testCase{
|
||||
"valid": testCase{
|
||||
func(x *NodeService) {},
|
||||
"",
|
||||
},
|
||||
"zero-port": testCase{
|
||||
func(x *NodeService) { x.Port = 0 },
|
||||
"Port must be non-zero",
|
||||
},
|
||||
"sidecar-service": testCase{
|
||||
func(x *NodeService) { x.Connect.SidecarService = &ServiceDefinition{} },
|
||||
"cannot have a sidecar service",
|
||||
},
|
||||
"connect-managed-proxy": testCase{
|
||||
func(x *NodeService) { x.Connect.Proxy = &ServiceDefinitionConnectProxy{} },
|
||||
"Connect.Proxy configuration is invalid",
|
||||
},
|
||||
"proxy-destination-name": testCase{
|
||||
func(x *NodeService) { x.Proxy.DestinationServiceName = "foo" },
|
||||
"Proxy.DestinationServiceName configuration is invalid",
|
||||
},
|
||||
"proxy-destination-id": testCase{
|
||||
func(x *NodeService) { x.Proxy.DestinationServiceID = "foo" },
|
||||
"Proxy.DestinationServiceID configuration is invalid",
|
||||
},
|
||||
"proxy-local-address": testCase{
|
||||
func(x *NodeService) { x.Proxy.LocalServiceAddress = "127.0.0.1" },
|
||||
"Proxy.LocalServiceAddress configuration is invalid",
|
||||
},
|
||||
"proxy-local-port": testCase{
|
||||
func(x *NodeService) { x.Proxy.LocalServicePort = 36 },
|
||||
"Proxy.LocalServicePort configuration is invalid",
|
||||
},
|
||||
"proxy-upstreams": testCase{
|
||||
func(x *NodeService) { x.Proxy.Upstreams = []Upstream{Upstream{}} },
|
||||
"Proxy.Upstreams configuration is invalid",
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
ns := TestNodeServiceMeshGateway(t)
|
||||
tc.Modify(ns)
|
||||
|
||||
err := ns.Validate()
|
||||
if tc.Err == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Contains(t, strings.ToLower(err.Error()), strings.ToLower(tc.Err))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructs_NodeService_ValidateConnectProxy(t *testing.T) {
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -1155,3 +1214,324 @@ func TestServiceNode_JSON_OmitServiceTaggedAdddresses(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNode_BestAddress(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type testCase struct {
|
||||
input Node
|
||||
lanAddr string
|
||||
wanAddr string
|
||||
}
|
||||
|
||||
nodeAddr := "10.1.2.3"
|
||||
nodeWANAddr := "198.18.19.20"
|
||||
|
||||
cases := map[string]testCase{
|
||||
"address": testCase{
|
||||
input: Node{
|
||||
Address: nodeAddr,
|
||||
},
|
||||
|
||||
lanAddr: nodeAddr,
|
||||
wanAddr: nodeAddr,
|
||||
},
|
||||
"wan-address": testCase{
|
||||
input: Node{
|
||||
Address: nodeAddr,
|
||||
TaggedAddresses: map[string]string{
|
||||
"wan": nodeWANAddr,
|
||||
},
|
||||
},
|
||||
|
||||
lanAddr: nodeAddr,
|
||||
wanAddr: nodeWANAddr,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
name := name
|
||||
tc := tc
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
require.Equal(t, tc.lanAddr, tc.input.BestAddress(false))
|
||||
require.Equal(t, tc.wanAddr, tc.input.BestAddress(true))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeService_BestAddress(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type testCase struct {
|
||||
input NodeService
|
||||
lanAddr string
|
||||
lanPort int
|
||||
wanAddr string
|
||||
wanPort int
|
||||
}
|
||||
|
||||
serviceAddr := "10.2.3.4"
|
||||
servicePort := 1234
|
||||
serviceWANAddr := "198.19.20.21"
|
||||
serviceWANPort := 987
|
||||
|
||||
cases := map[string]testCase{
|
||||
"no-address": testCase{
|
||||
input: NodeService{
|
||||
Port: servicePort,
|
||||
},
|
||||
|
||||
lanAddr: "",
|
||||
lanPort: servicePort,
|
||||
wanAddr: "",
|
||||
wanPort: servicePort,
|
||||
},
|
||||
"service-address": testCase{
|
||||
input: NodeService{
|
||||
Address: serviceAddr,
|
||||
Port: servicePort,
|
||||
},
|
||||
|
||||
lanAddr: serviceAddr,
|
||||
lanPort: servicePort,
|
||||
wanAddr: serviceAddr,
|
||||
wanPort: servicePort,
|
||||
},
|
||||
"service-wan-address": testCase{
|
||||
input: NodeService{
|
||||
Address: serviceAddr,
|
||||
Port: servicePort,
|
||||
TaggedAddresses: map[string]ServiceAddress{
|
||||
"wan": ServiceAddress{
|
||||
Address: serviceWANAddr,
|
||||
Port: serviceWANPort,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
lanAddr: serviceAddr,
|
||||
lanPort: servicePort,
|
||||
wanAddr: serviceWANAddr,
|
||||
wanPort: serviceWANPort,
|
||||
},
|
||||
"service-wan-address-default-port": testCase{
|
||||
input: NodeService{
|
||||
Address: serviceAddr,
|
||||
Port: servicePort,
|
||||
TaggedAddresses: map[string]ServiceAddress{
|
||||
"wan": ServiceAddress{
|
||||
Address: serviceWANAddr,
|
||||
Port: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
lanAddr: serviceAddr,
|
||||
lanPort: servicePort,
|
||||
wanAddr: serviceWANAddr,
|
||||
wanPort: servicePort,
|
||||
},
|
||||
"service-wan-address-node-lan": testCase{
|
||||
input: NodeService{
|
||||
Port: servicePort,
|
||||
TaggedAddresses: map[string]ServiceAddress{
|
||||
"wan": ServiceAddress{
|
||||
Address: serviceWANAddr,
|
||||
Port: serviceWANPort,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
lanAddr: "",
|
||||
lanPort: servicePort,
|
||||
wanAddr: serviceWANAddr,
|
||||
wanPort: serviceWANPort,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
name := name
|
||||
tc := tc
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
addr, port := tc.input.BestAddress(false)
|
||||
require.Equal(t, tc.lanAddr, addr)
|
||||
require.Equal(t, tc.lanPort, port)
|
||||
|
||||
addr, port = tc.input.BestAddress(true)
|
||||
require.Equal(t, tc.wanAddr, addr)
|
||||
require.Equal(t, tc.wanPort, port)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckServiceNode_BestAddress(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type testCase struct {
|
||||
input CheckServiceNode
|
||||
lanAddr string
|
||||
lanPort int
|
||||
wanAddr string
|
||||
wanPort int
|
||||
}
|
||||
|
||||
nodeAddr := "10.1.2.3"
|
||||
nodeWANAddr := "198.18.19.20"
|
||||
serviceAddr := "10.2.3.4"
|
||||
servicePort := 1234
|
||||
serviceWANAddr := "198.19.20.21"
|
||||
serviceWANPort := 987
|
||||
|
||||
cases := map[string]testCase{
|
||||
"node-address": testCase{
|
||||
input: CheckServiceNode{
|
||||
Node: &Node{
|
||||
Address: nodeAddr,
|
||||
},
|
||||
Service: &NodeService{
|
||||
Port: servicePort,
|
||||
},
|
||||
},
|
||||
|
||||
lanAddr: nodeAddr,
|
||||
lanPort: servicePort,
|
||||
wanAddr: nodeAddr,
|
||||
wanPort: servicePort,
|
||||
},
|
||||
"node-wan-address": testCase{
|
||||
input: CheckServiceNode{
|
||||
Node: &Node{
|
||||
Address: nodeAddr,
|
||||
TaggedAddresses: map[string]string{
|
||||
"wan": nodeWANAddr,
|
||||
},
|
||||
},
|
||||
Service: &NodeService{
|
||||
Port: servicePort,
|
||||
},
|
||||
},
|
||||
|
||||
lanAddr: nodeAddr,
|
||||
lanPort: servicePort,
|
||||
wanAddr: nodeWANAddr,
|
||||
wanPort: servicePort,
|
||||
},
|
||||
"service-address": testCase{
|
||||
input: CheckServiceNode{
|
||||
Node: &Node{
|
||||
Address: nodeAddr,
|
||||
// this will be ignored
|
||||
TaggedAddresses: map[string]string{
|
||||
"wan": nodeWANAddr,
|
||||
},
|
||||
},
|
||||
Service: &NodeService{
|
||||
Address: serviceAddr,
|
||||
Port: servicePort,
|
||||
},
|
||||
},
|
||||
|
||||
lanAddr: serviceAddr,
|
||||
lanPort: servicePort,
|
||||
wanAddr: serviceAddr,
|
||||
wanPort: servicePort,
|
||||
},
|
||||
"service-wan-address": testCase{
|
||||
input: CheckServiceNode{
|
||||
Node: &Node{
|
||||
Address: nodeAddr,
|
||||
// this will be ignored
|
||||
TaggedAddresses: map[string]string{
|
||||
"wan": nodeWANAddr,
|
||||
},
|
||||
},
|
||||
Service: &NodeService{
|
||||
Address: serviceAddr,
|
||||
Port: servicePort,
|
||||
TaggedAddresses: map[string]ServiceAddress{
|
||||
"wan": ServiceAddress{
|
||||
Address: serviceWANAddr,
|
||||
Port: serviceWANPort,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
lanAddr: serviceAddr,
|
||||
lanPort: servicePort,
|
||||
wanAddr: serviceWANAddr,
|
||||
wanPort: serviceWANPort,
|
||||
},
|
||||
"service-wan-address-default-port": testCase{
|
||||
input: CheckServiceNode{
|
||||
Node: &Node{
|
||||
Address: nodeAddr,
|
||||
// this will be ignored
|
||||
TaggedAddresses: map[string]string{
|
||||
"wan": nodeWANAddr,
|
||||
},
|
||||
},
|
||||
Service: &NodeService{
|
||||
Address: serviceAddr,
|
||||
Port: servicePort,
|
||||
TaggedAddresses: map[string]ServiceAddress{
|
||||
"wan": ServiceAddress{
|
||||
Address: serviceWANAddr,
|
||||
Port: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
lanAddr: serviceAddr,
|
||||
lanPort: servicePort,
|
||||
wanAddr: serviceWANAddr,
|
||||
wanPort: servicePort,
|
||||
},
|
||||
"service-wan-address-node-lan": testCase{
|
||||
input: CheckServiceNode{
|
||||
Node: &Node{
|
||||
Address: nodeAddr,
|
||||
// this will be ignored
|
||||
TaggedAddresses: map[string]string{
|
||||
"wan": nodeWANAddr,
|
||||
},
|
||||
},
|
||||
Service: &NodeService{
|
||||
Port: servicePort,
|
||||
TaggedAddresses: map[string]ServiceAddress{
|
||||
"wan": ServiceAddress{
|
||||
Address: serviceWANAddr,
|
||||
Port: serviceWANPort,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
lanAddr: nodeAddr,
|
||||
lanPort: servicePort,
|
||||
wanAddr: serviceWANAddr,
|
||||
wanPort: serviceWANPort,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
name := name
|
||||
tc := tc
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
addr, port := tc.input.BestAddress(false)
|
||||
require.Equal(t, tc.lanAddr, addr)
|
||||
require.Equal(t, tc.lanPort, port)
|
||||
|
||||
addr, port = tc.input.BestAddress(true)
|
||||
require.Equal(t, tc.wanAddr, addr)
|
||||
require.Equal(t, tc.wanPort, port)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,6 +49,33 @@ func TestNodeServiceProxy(t testing.T) *NodeService {
|
|||
}
|
||||
}
|
||||
|
||||
// TestNodeServiceMeshGateway returns a *NodeService representing a valid Mesh Gateway
|
||||
func TestNodeServiceMeshGateway(t testing.T) *NodeService {
|
||||
return TestNodeServiceMeshGatewayWithAddrs(t,
|
||||
"10.1.2.3",
|
||||
8443,
|
||||
ServiceAddress{Address: "10.1.2.3", Port: 8443},
|
||||
ServiceAddress{Address: "198.18.4.5", Port: 443})
|
||||
}
|
||||
|
||||
func TestNodeServiceMeshGatewayWithAddrs(t testing.T, address string, port int, lanAddr, wanAddr ServiceAddress) *NodeService {
|
||||
return &NodeService{
|
||||
Kind: ServiceKindMeshGateway,
|
||||
Service: "mesh-gateway",
|
||||
Address: address,
|
||||
Port: port,
|
||||
Proxy: ConnectProxyConfig{
|
||||
Config: map[string]interface{}{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
TaggedAddresses: map[string]ServiceAddress{
|
||||
"lan": lanAddr,
|
||||
"wan": wanAddr,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TestNodeServiceSidecar returns a *NodeService representing a service
|
||||
// registration with a nested Sidecar registration.
|
||||
func TestNodeServiceSidecar(t testing.T) *NodeService {
|
||||
|
|
|
@ -28,6 +28,8 @@ func (s *Server) clustersFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token st
|
|||
switch cfgSnap.Kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return s.clustersFromSnapshotConnectProxy(cfgSnap, token)
|
||||
case structs.ServiceKindMeshGateway:
|
||||
return s.clustersFromSnapshotMeshGateway(cfgSnap, token)
|
||||
default:
|
||||
return nil, fmt.Errorf("Invalid service kind: %v", cfgSnap.Kind)
|
||||
}
|
||||
|
@ -36,9 +38,6 @@ func (s *Server) clustersFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token st
|
|||
// clustersFromSnapshot returns the xDS API representation of the "clusters"
|
||||
// (upstreams) in the snapshot.
|
||||
func (s *Server) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
|
||||
if cfgSnap == nil {
|
||||
return nil, errors.New("nil config given")
|
||||
}
|
||||
// Include the "app" cluster for the public listener
|
||||
clusters := make([]proto.Message, len(cfgSnap.Proxy.Upstreams)+1)
|
||||
|
||||
|
@ -58,6 +57,42 @@ func (s *Server) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnapsh
|
|||
return clusters, nil
|
||||
}
|
||||
|
||||
// clustersFromSnapshotMeshGateway returns the xDS API representation of the "clusters"
|
||||
// for a mesh gateway. This will include 1 cluster per remote datacenter as well as
|
||||
// 1 cluster for each service subset.
|
||||
func (s *Server) clustersFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
|
||||
// TODO (mesh-gateway) will need to generate 1 cluster for each service subset as well.
|
||||
|
||||
// 1 cluster per remote dc + 1 cluster per local service
|
||||
clusters := make([]proto.Message, len(cfgSnap.MeshGateway.GatewayGroups)+len(cfgSnap.MeshGateway.ServiceGroups))
|
||||
|
||||
var err error
|
||||
idx := 0
|
||||
// generate the remote dc clusters
|
||||
for dc, _ := range cfgSnap.MeshGateway.GatewayGroups {
|
||||
clusterName := DatacenterSNI(dc, cfgSnap)
|
||||
|
||||
clusters[idx], err = s.makeMeshGatewayCluster(clusterName, cfgSnap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idx += 1
|
||||
}
|
||||
|
||||
// generate the per-service clusters
|
||||
for svc, _ := range cfgSnap.MeshGateway.ServiceGroups {
|
||||
clusterName := ServiceSNI(svc, "default", cfgSnap.Datacenter, cfgSnap)
|
||||
|
||||
clusters[idx], err = s.makeMeshGatewayCluster(clusterName, cfgSnap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idx += 1
|
||||
}
|
||||
|
||||
return clusters, nil
|
||||
}
|
||||
|
||||
func (s *Server) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot) (*envoy.Cluster, error) {
|
||||
var c *envoy.Cluster
|
||||
var err error
|
||||
|
@ -108,6 +143,16 @@ func (s *Server) makeUpstreamCluster(upstream structs.Upstream, cfgSnap *proxycf
|
|||
var c *envoy.Cluster
|
||||
var err error
|
||||
|
||||
ns := "default"
|
||||
if upstream.DestinationNamespace != "" {
|
||||
ns = upstream.DestinationNamespace
|
||||
}
|
||||
dc := cfgSnap.Datacenter
|
||||
if upstream.Datacenter != "" {
|
||||
dc = upstream.Datacenter
|
||||
}
|
||||
sni := ServiceSNI(upstream.DestinationName, ns, dc, cfgSnap)
|
||||
|
||||
cfg, err := ParseUpstreamConfig(upstream.Config)
|
||||
if err != nil {
|
||||
// Don't hard fail on a config typo, just warn. The parse func returns
|
||||
|
@ -146,6 +191,7 @@ func (s *Server) makeUpstreamCluster(upstream structs.Upstream, cfgSnap *proxycf
|
|||
// Enable TLS upstream with the configured client certificate.
|
||||
c.TlsContext = &envoyauth.UpstreamTlsContext{
|
||||
CommonTlsContext: makeCommonTLSContext(cfgSnap),
|
||||
Sni: sni,
|
||||
}
|
||||
|
||||
return c, nil
|
||||
|
@ -196,3 +242,27 @@ func makeClusterFromUserConfig(configJSON string) (*envoy.Cluster, error) {
|
|||
err := jsonpb.UnmarshalString(configJSON, &c)
|
||||
return &c, err
|
||||
}
|
||||
|
||||
func (s *Server) makeMeshGatewayCluster(clusterName string, cfgSnap *proxycfg.ConfigSnapshot) (*envoy.Cluster, error) {
|
||||
cfg, err := ParseMeshGatewayConfig(cfgSnap.Proxy.Config)
|
||||
if err != nil {
|
||||
// Don't hard fail on a config typo, just warn. The parse func returns
|
||||
// default config if there is an error so it's safe to continue.
|
||||
s.Logger.Printf("[WARN] envoy: failed to parse mesh gateway config: %s", err)
|
||||
}
|
||||
|
||||
return &envoy.Cluster{
|
||||
Name: clusterName,
|
||||
ConnectTimeout: time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond,
|
||||
ClusterDiscoveryType: &envoy.Cluster_Type{Type: envoy.Cluster_EDS},
|
||||
EdsClusterConfig: &envoy.Cluster_EdsClusterConfig{
|
||||
EdsConfig: &envoycore.ConfigSource{
|
||||
ConfigSourceSpecifier: &envoycore.ConfigSource_Ads{
|
||||
Ads: &envoycore.AggregatedConfigSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Having an empty config enables outlier detection with default config.
|
||||
OutlierDetection: &envoycluster.OutlierDetection{},
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"text/template"
|
||||
|
||||
"github.com/hashicorp/consul/agent/proxycfg"
|
||||
testinf "github.com/mitchellh/go-testing-interface"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -16,18 +17,21 @@ func TestClustersFromSnapshot(t *testing.T) {
|
|||
|
||||
tests := []struct {
|
||||
name string
|
||||
create func(t testinf.T) *proxycfg.ConfigSnapshot
|
||||
// Setup is called before the test starts. It is passed the snapshot from
|
||||
// TestConfigSnapshot and is allowed to modify it in any way to setup the
|
||||
// create func and is allowed to modify it in any way to setup the
|
||||
// test input.
|
||||
setup func(snap *proxycfg.ConfigSnapshot)
|
||||
overrideGoldenName string
|
||||
}{
|
||||
{
|
||||
name: "defaults",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
setup: nil, // Default snapshot
|
||||
},
|
||||
{
|
||||
name: "custom-local-app",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Config["envoy_local_cluster_json"] =
|
||||
customAppClusterJSON(t, customClusterJSONOptions{
|
||||
|
@ -38,6 +42,7 @@ func TestClustersFromSnapshot(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "custom-local-app-typed",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
overrideGoldenName: "custom-local-app",
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Config["envoy_local_cluster_json"] =
|
||||
|
@ -49,6 +54,7 @@ func TestClustersFromSnapshot(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "custom-upstream",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Upstreams[0].Config["envoy_cluster_json"] =
|
||||
customAppClusterJSON(t, customClusterJSONOptions{
|
||||
|
@ -59,6 +65,7 @@ func TestClustersFromSnapshot(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "custom-upstream-typed",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
overrideGoldenName: "custom-upstream",
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Upstreams[0].Config["envoy_cluster_json"] =
|
||||
|
@ -70,6 +77,7 @@ func TestClustersFromSnapshot(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "custom-upstream-ignores-tls",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
overrideGoldenName: "custom-upstream", // should be the same
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Upstreams[0].Config["envoy_cluster_json"] =
|
||||
|
@ -83,11 +91,17 @@ func TestClustersFromSnapshot(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "custom-timeouts",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Config["local_connect_timeout_ms"] = 1234
|
||||
snap.Proxy.Upstreams[0].Config["connect_timeout_ms"] = 2345
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mesh-gateway",
|
||||
create: proxycfg.TestConfigSnapshotMeshGateway,
|
||||
setup: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
@ -95,14 +109,18 @@ func TestClustersFromSnapshot(t *testing.T) {
|
|||
require := require.New(t)
|
||||
|
||||
// Sanity check default with no overrides first
|
||||
snap := proxycfg.TestConfigSnapshot(t)
|
||||
snap := tt.create(t)
|
||||
|
||||
// We need to replace the TLS certs with deterministic ones to make golden
|
||||
// files workable. Note we don't update these otherwise they'd change
|
||||
// golder files for every test case and so not be any use!
|
||||
snap.Leaf.CertPEM = golden(t, "test-leaf-cert", "")
|
||||
snap.Leaf.PrivateKeyPEM = golden(t, "test-leaf-key", "")
|
||||
if snap.ConnectProxy.Leaf != nil {
|
||||
snap.ConnectProxy.Leaf.CertPEM = golden(t, "test-leaf-cert", "")
|
||||
snap.ConnectProxy.Leaf.PrivateKeyPEM = golden(t, "test-leaf-key", "")
|
||||
}
|
||||
if snap.Roots != nil {
|
||||
snap.Roots.Roots[0].RootCert = golden(t, "test-root-cert", "")
|
||||
}
|
||||
|
||||
if tt.setup != nil {
|
||||
tt.setup(snap)
|
||||
|
@ -172,7 +190,7 @@ func expectClustersJSONResources(t *testing.T, snap *proxycfg.ConfigSnapshot, to
|
|||
|
||||
},
|
||||
"connectTimeout": "1s",
|
||||
"tlsContext": ` + expectedUpstreamTLSContextJSON(t, snap) + `
|
||||
"tlsContext": ` + expectedUpstreamTLSContextJSON(t, snap, "db.default.dc1.internal.11111111-2222-3333-4444-555555555555") + `
|
||||
}`,
|
||||
"prepared_query:geo-cache": `
|
||||
{
|
||||
|
@ -190,7 +208,7 @@ func expectClustersJSONResources(t *testing.T, snap *proxycfg.ConfigSnapshot, to
|
|||
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"tlsContext": ` + expectedUpstreamTLSContextJSON(t, snap) + `
|
||||
"tlsContext": ` + expectedUpstreamTLSContextJSON(t, snap, "geo-cache.default.dc1.internal.11111111-2222-3333-4444-555555555555") + `
|
||||
}`,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package xds
|
|||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
|
@ -52,6 +53,43 @@ func ParseProxyConfig(m map[string]interface{}) (ProxyConfig, error) {
|
|||
return cfg, err
|
||||
}
|
||||
|
||||
type MeshGatewayConfig struct {
|
||||
// BindTaggedAddresses when set will cause all of the services tagged
|
||||
// addresses to have listeners bound to them in addition to the main service
|
||||
// address listener. This is only suitable when the tagged addresses are IP
|
||||
// addresses of network interfaces Envoy can see. i.e. When using DNS names
|
||||
// for those addresses or where an external entity maps that IP to the Envoy
|
||||
// (like AWS EC2 mapping a public IP to the private interface) then this
|
||||
// cannot be used. See the BindAddresses config instead
|
||||
//
|
||||
// TODO - wow this is a verbose setting name. Maybe shorten this
|
||||
BindTaggedAddresses bool `mapstructure:"envoy_mesh_gateway_bind_tagged_addresses"`
|
||||
|
||||
// BindAddresses additional bind addresses to configure listeners for
|
||||
BindAddresses map[string]structs.ServiceAddress `mapstructure:"envoy_mesh_gateway_bind_addresses"`
|
||||
|
||||
// NoDefaultBind indicates that we should not bind to the default address of the
|
||||
// gateway service
|
||||
NoDefaultBind bool `mapstructure:"envoy_mesh_gateway_no_default_bind"`
|
||||
|
||||
// ConnectTimeoutMs is the number of milliseconds to timeout making a new
|
||||
// connection to this upstream. Defaults to 5000 (5 seconds) if not set.
|
||||
ConnectTimeoutMs int `mapstructure:"connect_timeout_ms"`
|
||||
}
|
||||
|
||||
// ParseMeshGatewayConfig returns the MeshGatewayConfig parsed from an opaque map. If an
|
||||
// error occurs during parsing, it is returned along with the default config. This
|
||||
// allows the caller to choose whether and how to report the error
|
||||
func ParseMeshGatewayConfig(m map[string]interface{}) (MeshGatewayConfig, error) {
|
||||
var cfg MeshGatewayConfig
|
||||
err := mapstructure.WeakDecode(m, &cfg)
|
||||
|
||||
if cfg.ConnectTimeoutMs < 1 {
|
||||
cfg.ConnectTimeoutMs = 5000
|
||||
}
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
// UpstreamConfig describes the keys we understand from
|
||||
// Connect.Proxy.Upstream[*].Config.
|
||||
type UpstreamConfig struct {
|
||||
|
|
|
@ -15,14 +15,16 @@ import (
|
|||
)
|
||||
|
||||
// endpointsFromSnapshot returns the xDS API representation of the "endpoints"
|
||||
func endpointsFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
|
||||
func (s *Server) endpointsFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
|
||||
if cfgSnap == nil {
|
||||
return nil, errors.New("nil config given")
|
||||
}
|
||||
|
||||
switch cfgSnap.Kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return endpointsFromSnapshotConnectProxy(cfgSnap, token)
|
||||
return s.endpointsFromSnapshotConnectProxy(cfgSnap, token)
|
||||
case structs.ServiceKindMeshGateway:
|
||||
return s.endpointsFromSnapshotMeshGateway(cfgSnap, token)
|
||||
default:
|
||||
return nil, fmt.Errorf("Invalid service kind: %v", cfgSnap.Kind)
|
||||
}
|
||||
|
@ -30,18 +32,35 @@ func endpointsFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]pr
|
|||
|
||||
// endpointsFromSnapshotConnectProxy returns the xDS API representation of the "endpoints"
|
||||
// (upstream instances) in the snapshot.
|
||||
func endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
|
||||
if cfgSnap == nil {
|
||||
return nil, errors.New("nil config given")
|
||||
}
|
||||
resources := make([]proto.Message, 0, len(cfgSnap.UpstreamEndpoints))
|
||||
for id, endpoints := range cfgSnap.UpstreamEndpoints {
|
||||
la := makeLoadAssignment(id, endpoints)
|
||||
func (s *Server) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
|
||||
resources := make([]proto.Message, 0, len(cfgSnap.ConnectProxy.UpstreamEndpoints))
|
||||
for id, endpoints := range cfgSnap.ConnectProxy.UpstreamEndpoints {
|
||||
la := makeLoadAssignment(id, endpoints, cfgSnap.Datacenter)
|
||||
resources = append(resources, la)
|
||||
}
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func (s *Server) endpointsFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
|
||||
resources := make([]proto.Message, 0, len(cfgSnap.MeshGateway.GatewayGroups)+len(cfgSnap.MeshGateway.ServiceGroups))
|
||||
|
||||
// generate the endpoints for the gateways in the remote datacenters
|
||||
for dc, endpoints := range cfgSnap.MeshGateway.GatewayGroups {
|
||||
clusterName := DatacenterSNI(dc, cfgSnap)
|
||||
la := makeLoadAssignment(clusterName, endpoints, cfgSnap.Datacenter)
|
||||
resources = append(resources, la)
|
||||
}
|
||||
|
||||
// generate the endpoints for the local service groups
|
||||
for svc, endpoints := range cfgSnap.MeshGateway.ServiceGroups {
|
||||
clusterName := ServiceSNI(svc, "default", cfgSnap.Datacenter, cfgSnap)
|
||||
la := makeLoadAssignment(clusterName, endpoints, cfgSnap.Datacenter)
|
||||
resources = append(resources, la)
|
||||
}
|
||||
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func makeEndpoint(clusterName, host string, port int) envoyendpoint.LbEndpoint {
|
||||
return envoyendpoint.LbEndpoint{
|
||||
HostIdentifier: &envoyendpoint.LbEndpoint_Endpoint{Endpoint: &envoyendpoint.Endpoint{
|
||||
|
@ -50,13 +69,11 @@ func makeEndpoint(clusterName, host string, port int) envoyendpoint.LbEndpoint {
|
|||
}}
|
||||
}
|
||||
|
||||
func makeLoadAssignment(clusterName string, endpoints structs.CheckServiceNodes) *envoy.ClusterLoadAssignment {
|
||||
func makeLoadAssignment(clusterName string, endpoints structs.CheckServiceNodes, localDatacenter string) *envoy.ClusterLoadAssignment {
|
||||
es := make([]envoyendpoint.LbEndpoint, 0, len(endpoints))
|
||||
for _, ep := range endpoints {
|
||||
addr := ep.Service.Address
|
||||
if addr == "" {
|
||||
addr = ep.Node.Address
|
||||
}
|
||||
// TODO (mesh-gateway) - should we respect the translate_wan_addrs configuration here or just always use the wan for cross-dc?
|
||||
addr, port := ep.BestAddress(localDatacenter != ep.Node.Datacenter)
|
||||
healthStatus := envoycore.HealthStatus_HEALTHY
|
||||
weight := 1
|
||||
if ep.Service.Weights != nil {
|
||||
|
@ -86,7 +103,7 @@ func makeLoadAssignment(clusterName string, endpoints structs.CheckServiceNodes)
|
|||
es = append(es, envoyendpoint.LbEndpoint{
|
||||
HostIdentifier: &envoyendpoint.LbEndpoint_Endpoint{
|
||||
Endpoint: &envoyendpoint.Endpoint{
|
||||
Address: makeAddressPtr(addr, ep.Service.Port),
|
||||
Address: makeAddressPtr(addr, port),
|
||||
}},
|
||||
HealthStatus: healthStatus,
|
||||
LoadBalancingWeight: makeUint32Value(weight),
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package xds
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/copystructure"
|
||||
|
@ -10,7 +13,9 @@ import (
|
|||
envoy "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
"github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
envoyendpoint "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint"
|
||||
"github.com/hashicorp/consul/agent/proxycfg"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
testinf "github.com/mitchellh/go-testing-interface"
|
||||
)
|
||||
|
||||
func Test_makeLoadAssignment(t *testing.T) {
|
||||
|
@ -192,8 +197,73 @@ func Test_makeLoadAssignment(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := makeLoadAssignment(tt.clusterName, tt.endpoints)
|
||||
got := makeLoadAssignment(tt.clusterName, tt.endpoints, "dc1")
|
||||
require.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_endpointsFromSnapshot(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
create func(t testinf.T) *proxycfg.ConfigSnapshot
|
||||
// Setup is called before the test starts. It is passed the snapshot from
|
||||
// create func and is allowed to modify it in any way to setup the
|
||||
// test input.
|
||||
setup func(snap *proxycfg.ConfigSnapshot)
|
||||
overrideGoldenName string
|
||||
}{
|
||||
{
|
||||
name: "defaults",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
setup: nil, // Default snapshot
|
||||
},
|
||||
{
|
||||
name: "mesh-gateway",
|
||||
create: proxycfg.TestConfigSnapshotMeshGateway,
|
||||
setup: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
// Sanity check default with no overrides first
|
||||
snap := tt.create(t)
|
||||
|
||||
// We need to replace the TLS certs with deterministic ones to make golden
|
||||
// files workable. Note we don't update these otherwise they'd change
|
||||
// golden files for every test case and so not be any use!
|
||||
if snap.ConnectProxy.Leaf != nil {
|
||||
snap.ConnectProxy.Leaf.CertPEM = golden(t, "test-leaf-cert", "")
|
||||
snap.ConnectProxy.Leaf.PrivateKeyPEM = golden(t, "test-leaf-key", "")
|
||||
}
|
||||
if snap.Roots != nil {
|
||||
snap.Roots.Roots[0].RootCert = golden(t, "test-root-cert", "")
|
||||
}
|
||||
|
||||
if tt.setup != nil {
|
||||
tt.setup(snap)
|
||||
}
|
||||
|
||||
// Need server just for logger dependency
|
||||
s := Server{Logger: log.New(os.Stderr, "", log.LstdFlags)}
|
||||
|
||||
endpoints, err := s.endpointsFromSnapshot(snap, "my-token")
|
||||
require.NoError(err)
|
||||
r, err := createResponse(EndpointType, "00000001", "00000001", endpoints)
|
||||
require.NoError(err)
|
||||
|
||||
gotJSON := responseToJSON(t, r)
|
||||
|
||||
gName := tt.name
|
||||
if tt.overrideGoldenName != "" {
|
||||
gName = tt.overrideGoldenName
|
||||
}
|
||||
|
||||
require.JSONEq(golden(t, path.Join("endpoints", gName), gotJSON), gotJSON)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,18 +33,15 @@ func (s *Server) listenersFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot, token s
|
|||
switch cfgSnap.Kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return s.listenersFromSnapshotConnectProxy(cfgSnap, token)
|
||||
case structs.ServiceKindMeshGateway:
|
||||
return s.listenersFromSnapshotMeshGateway(cfgSnap, token)
|
||||
default:
|
||||
return nil, fmt.Errorf("Invalid service kind: %v", cfgSnap.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
// listenersFromSnapshotConnectProxy returns the xDS API representation of the "listeners"
|
||||
// in the snapshot.
|
||||
// listenersFromSnapshotConnectProxy returns the "listeners" for a connect proxy service
|
||||
func (s *Server) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
|
||||
if cfgSnap == nil {
|
||||
return nil, errors.New("nil config given")
|
||||
}
|
||||
|
||||
// One listener for each upstream plus the public one
|
||||
resources := make([]proto.Message, len(cfgSnap.Proxy.Upstreams)+1)
|
||||
|
||||
|
@ -63,6 +60,53 @@ func (s *Server) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnaps
|
|||
return resources, nil
|
||||
}
|
||||
|
||||
// listenersFromSnapshotMeshGateway returns the "listener" for a mesh-gateway service
|
||||
func (s *Server) listenersFromSnapshotMeshGateway(cfgSnap *proxycfg.ConfigSnapshot, token string) ([]proto.Message, error) {
|
||||
cfg, err := ParseMeshGatewayConfig(cfgSnap.Proxy.Config)
|
||||
if err != nil {
|
||||
// Don't hard fail on a config typo, just warn. The parse func returns
|
||||
// default config if there is an error so it's safe to continue.
|
||||
s.Logger.Printf("[WARN] envoy: failed to parse Connect.Proxy.Config: %s", err)
|
||||
}
|
||||
|
||||
// TODO - prevent invalid configurations of binding to the same port/addr
|
||||
// twice including with the any addresses
|
||||
|
||||
var resources []proto.Message
|
||||
if !cfg.NoDefaultBind {
|
||||
addr := cfgSnap.Address
|
||||
if addr == "" {
|
||||
addr = "0.0.0.0"
|
||||
}
|
||||
|
||||
l, err := s.makeGatewayListener("default", addr, cfgSnap.Port, cfgSnap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resources = append(resources, l)
|
||||
}
|
||||
|
||||
if cfg.BindTaggedAddresses {
|
||||
for name, addrCfg := range cfgSnap.TaggedAddresses {
|
||||
l, err := s.makeGatewayListener(name, addrCfg.Address, addrCfg.Port, cfgSnap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resources = append(resources, l)
|
||||
}
|
||||
}
|
||||
|
||||
for name, addrCfg := range cfg.BindAddresses {
|
||||
l, err := s.makeGatewayListener(name, addrCfg.Address, addrCfg.Port, cfgSnap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resources = append(resources, l)
|
||||
}
|
||||
|
||||
return resources, err
|
||||
}
|
||||
|
||||
// makeListener returns a listener with name and bind details set. Filters must
|
||||
// be added before it's useful.
|
||||
//
|
||||
|
@ -231,6 +275,61 @@ func (s *Server) makeUpstreamListener(u *structs.Upstream) (proto.Message, error
|
|||
return l, nil
|
||||
}
|
||||
|
||||
func (s *Server) makeGatewayListener(name, addr string, port int, cfgSnap *proxycfg.ConfigSnapshot) (*envoy.Listener, error) {
|
||||
tlsInspector, err := makeTLSInspectorListenerFilter()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sniCluster, err := makeSNIClusterFilter()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The cluster name here doesn't matter as the sni_cluster
|
||||
// filter will fill it in for us.
|
||||
tcpProxy, err := makeTCPProxyFilter(name, "", "mesh_gateway_local_")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sniClusterChain := envoylistener.FilterChain{
|
||||
Filters: []envoylistener.Filter{
|
||||
sniCluster,
|
||||
tcpProxy,
|
||||
},
|
||||
}
|
||||
|
||||
l := makeListener(name, addr, port)
|
||||
l.ListenerFilters = []envoylistener.ListenerFilter{tlsInspector}
|
||||
|
||||
// TODO (mesh-gateway) - Do we need to create clusters for all the old trust domains as well?
|
||||
// We need 1 Filter Chain per datacenter
|
||||
for dc := range cfgSnap.MeshGateway.WatchedDatacenters {
|
||||
clusterName := DatacenterSNI(dc, cfgSnap)
|
||||
filterName := fmt.Sprintf("%s_%s", name, dc)
|
||||
dcTCPProxy, err := makeTCPProxyFilter(filterName, clusterName, "mesh_gateway_remote_")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.FilterChains = append(l.FilterChains, envoylistener.FilterChain{
|
||||
FilterChainMatch: &envoylistener.FilterChainMatch{
|
||||
ServerNames: []string{fmt.Sprintf("*.%s", clusterName)},
|
||||
},
|
||||
Filters: []envoylistener.Filter{
|
||||
dcTCPProxy,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// This needs to get tacked on at the end as it has no
|
||||
// matching and will act as a catch all
|
||||
l.FilterChains = append(l.FilterChains, sniClusterChain)
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func makeListenerFilter(protocol, filterName, cluster, statPrefix string, ingress bool) (envoylistener.Filter, error) {
|
||||
switch protocol {
|
||||
case "grpc":
|
||||
|
@ -246,6 +345,21 @@ func makeListenerFilter(protocol, filterName, cluster, statPrefix string, ingres
|
|||
}
|
||||
}
|
||||
|
||||
func makeTLSInspectorListenerFilter() (envoylistener.ListenerFilter, error) {
|
||||
return envoylistener.ListenerFilter{Name: util.TlsInspector}, nil
|
||||
}
|
||||
|
||||
func makeSNIFilterChainMatch(sniMatch string) (*envoylistener.FilterChainMatch, error) {
|
||||
return &envoylistener.FilterChainMatch{
|
||||
ServerNames: []string{sniMatch},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func makeSNIClusterFilter() (envoylistener.Filter, error) {
|
||||
// This filter has no config which is why we are not calling make
|
||||
return envoylistener.Filter{Name: "envoy.filters.network.sni_cluster"}, nil
|
||||
}
|
||||
|
||||
func makeTCPProxyFilter(filterName, cluster, statPrefix string) (envoylistener.Filter, error) {
|
||||
cfg := &envoytcp.TcpProxy{
|
||||
StatPrefix: makeStatPrefix("tcp", statPrefix, filterName),
|
||||
|
@ -390,12 +504,12 @@ func makeCommonTLSContext(cfgSnap *proxycfg.ConfigSnapshot) *envoyauth.CommonTls
|
|||
&envoyauth.TlsCertificate{
|
||||
CertificateChain: &envoycore.DataSource{
|
||||
Specifier: &envoycore.DataSource_InlineString{
|
||||
InlineString: cfgSnap.Leaf.CertPEM,
|
||||
InlineString: cfgSnap.ConnectProxy.Leaf.CertPEM,
|
||||
},
|
||||
},
|
||||
PrivateKey: &envoycore.DataSource{
|
||||
Specifier: &envoycore.DataSource_InlineString{
|
||||
InlineString: cfgSnap.Leaf.PrivateKeyPEM,
|
||||
InlineString: cfgSnap.ConnectProxy.Leaf.PrivateKeyPEM,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -6,10 +6,14 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"testing"
|
||||
"text/template"
|
||||
|
||||
envoy "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
"github.com/hashicorp/consul/agent/proxycfg"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
testinf "github.com/mitchellh/go-testing-interface"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -17,6 +21,7 @@ func TestListenersFromSnapshot(t *testing.T) {
|
|||
|
||||
tests := []struct {
|
||||
name string
|
||||
create func(t testinf.T) *proxycfg.ConfigSnapshot
|
||||
// Setup is called before the test starts. It is passed the snapshot from
|
||||
// TestConfigSnapshot and is allowed to modify it in any way to setup the
|
||||
// test input.
|
||||
|
@ -25,22 +30,26 @@ func TestListenersFromSnapshot(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "defaults",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
setup: nil, // Default snapshot
|
||||
},
|
||||
{
|
||||
name: "http-public-listener",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Config["protocol"] = "http"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "http-upstream",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Upstreams[0].Config["protocol"] = "http"
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "custom-public-listener",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Config["envoy_public_listener_json"] =
|
||||
customListenerJSON(t, customListenerJSONOptions{
|
||||
|
@ -51,6 +60,7 @@ func TestListenersFromSnapshot(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "custom-public-listener-typed",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
overrideGoldenName: "custom-public-listener", // should be the same
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Config["envoy_public_listener_json"] =
|
||||
|
@ -62,6 +72,7 @@ func TestListenersFromSnapshot(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "custom-public-listener-ignores-tls",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
overrideGoldenName: "custom-public-listener", // should be the same
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Config["envoy_public_listener_json"] =
|
||||
|
@ -75,6 +86,7 @@ func TestListenersFromSnapshot(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "custom-upstream",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Upstreams[0].Config["envoy_listener_json"] =
|
||||
customListenerJSON(t, customListenerJSONOptions{
|
||||
|
@ -85,6 +97,7 @@ func TestListenersFromSnapshot(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "custom-upstream-typed",
|
||||
create: proxycfg.TestConfigSnapshot,
|
||||
overrideGoldenName: "custom-upstream", // should be the same
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Upstreams[0].Config["envoy_listener_json"] =
|
||||
|
@ -94,6 +107,42 @@ func TestListenersFromSnapshot(t *testing.T) {
|
|||
})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mesh-gateway",
|
||||
create: proxycfg.TestConfigSnapshotMeshGateway,
|
||||
},
|
||||
{
|
||||
name: "mesh-gateway-tagged-addresses",
|
||||
create: proxycfg.TestConfigSnapshotMeshGateway,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Config = map[string]interface{}{
|
||||
"envoy_mesh_gateway_no_default_bind": true,
|
||||
"envoy_mesh_gateway_bind_tagged_addresses": true,
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mesh-gateway-custom-addresses",
|
||||
create: proxycfg.TestConfigSnapshotMeshGateway,
|
||||
setup: func(snap *proxycfg.ConfigSnapshot) {
|
||||
snap.Proxy.Config = map[string]interface{}{
|
||||
"envoy_mesh_gateway_bind_addresses": map[string]structs.ServiceAddress{
|
||||
"foo": structs.ServiceAddress{
|
||||
Address: "198.17.2.3",
|
||||
Port: 8080,
|
||||
},
|
||||
"bar": structs.ServiceAddress{
|
||||
Address: "2001:db8::ff",
|
||||
Port: 9999,
|
||||
},
|
||||
"baz": structs.ServiceAddress{
|
||||
Address: "127.0.0.1",
|
||||
Port: 8765,
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
@ -101,14 +150,18 @@ func TestListenersFromSnapshot(t *testing.T) {
|
|||
require := require.New(t)
|
||||
|
||||
// Sanity check default with no overrides first
|
||||
snap := proxycfg.TestConfigSnapshot(t)
|
||||
snap := tt.create(t)
|
||||
|
||||
// We need to replace the TLS certs with deterministic ones to make golden
|
||||
// files workable. Note we don't update these otherwise they'd change
|
||||
// golder files for every test case and so not be any use!
|
||||
snap.Leaf.CertPEM = golden(t, "test-leaf-cert", "")
|
||||
snap.Leaf.PrivateKeyPEM = golden(t, "test-leaf-key", "")
|
||||
if snap.ConnectProxy.Leaf != nil {
|
||||
snap.ConnectProxy.Leaf.CertPEM = golden(t, "test-leaf-cert", "")
|
||||
snap.ConnectProxy.Leaf.PrivateKeyPEM = golden(t, "test-leaf-key", "")
|
||||
}
|
||||
if snap.Roots != nil {
|
||||
snap.Roots.Roots[0].RootCert = golden(t, "test-root-cert", "")
|
||||
}
|
||||
|
||||
if tt.setup != nil {
|
||||
tt.setup(snap)
|
||||
|
@ -118,6 +171,10 @@ func TestListenersFromSnapshot(t *testing.T) {
|
|||
s := Server{Logger: log.New(os.Stderr, "", log.LstdFlags)}
|
||||
|
||||
listeners, err := s.listenersFromSnapshot(snap, "my-token")
|
||||
sort.Slice(listeners, func(i, j int) bool {
|
||||
return listeners[i].(*envoy.Listener).Name < listeners[j].(*envoy.Listener).Name
|
||||
})
|
||||
|
||||
require.NoError(err)
|
||||
r, err := createResponse(ListenerType, "00000001", "00000001", listeners)
|
||||
require.NoError(err)
|
||||
|
|
|
@ -194,7 +194,7 @@ func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest)
|
|||
handlers := map[string]*xDSType{
|
||||
EndpointType: &xDSType{
|
||||
typeURL: EndpointType,
|
||||
resources: endpointsFromSnapshot,
|
||||
resources: s.endpointsFromSnapshot,
|
||||
stream: stream,
|
||||
},
|
||||
ClusterType: &xDSType{
|
||||
|
@ -240,6 +240,11 @@ func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest)
|
|||
if rule != nil && !rule.ServiceWrite(cfgSnap.Proxy.DestinationServiceName, nil) {
|
||||
return status.Errorf(codes.PermissionDenied, "permission denied")
|
||||
}
|
||||
case structs.ServiceKindMeshGateway:
|
||||
// TODO (mesh-gateway) - figure out what ACLs to check for the Gateways
|
||||
if rule != nil && !rule.ServiceWrite(cfgSnap.Service, nil) {
|
||||
return status.Errorf(codes.PermissionDenied, "permission denied")
|
||||
}
|
||||
default:
|
||||
return status.Errorf(codes.Internal, "Invalid service kind")
|
||||
}
|
||||
|
|
|
@ -178,7 +178,7 @@ func TestServer_StreamAggregatedResources_BasicProtocol(t *testing.T) {
|
|||
// actually resend all blocked types on the new "version" anyway since it
|
||||
// doesn't know _what_ changed. We could do something trivial but let's
|
||||
// simulate a leaf cert expiring and being rotated.
|
||||
snap.Leaf = proxycfg.TestLeafForCA(t, snap.Roots.Roots[0])
|
||||
snap.ConnectProxy.Leaf = proxycfg.TestLeafForCA(t, snap.Roots.Roots[0])
|
||||
mgr.DeliverConfig(t, "web-sidecar-proxy", snap)
|
||||
|
||||
// All 3 response that have something to return should return with new version
|
||||
|
@ -222,7 +222,7 @@ func TestServer_StreamAggregatedResources_BasicProtocol(t *testing.T) {
|
|||
assertChanBlocked(t, envoy.stream.sendCh)
|
||||
|
||||
// Change config again and make sure it's delivered to everyone!
|
||||
snap.Leaf = proxycfg.TestLeafForCA(t, snap.Roots.Roots[0])
|
||||
snap.ConnectProxy.Leaf = proxycfg.TestLeafForCA(t, snap.Roots.Roots[0])
|
||||
mgr.DeliverConfig(t, "web-sidecar-proxy", snap)
|
||||
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectClustersJSON(t, snap, "", 3, 7))
|
||||
|
@ -274,15 +274,15 @@ func expectEndpointsJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, token stri
|
|||
}`
|
||||
}
|
||||
|
||||
func expectedUpstreamTLSContextJSON(t *testing.T, snap *proxycfg.ConfigSnapshot) string {
|
||||
return expectedTLSContextJSON(t, snap, false)
|
||||
func expectedUpstreamTLSContextJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, sni string) string {
|
||||
return expectedTLSContextJSON(t, snap, false, sni)
|
||||
}
|
||||
|
||||
func expectedPublicTLSContextJSON(t *testing.T, snap *proxycfg.ConfigSnapshot) string {
|
||||
return expectedTLSContextJSON(t, snap, true)
|
||||
return expectedTLSContextJSON(t, snap, true, "")
|
||||
}
|
||||
|
||||
func expectedTLSContextJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, requireClientCert bool) string {
|
||||
func expectedTLSContextJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, requireClientCert bool, sni string) string {
|
||||
// Assume just one root for now, can get fancier later if needed.
|
||||
caPEM := snap.Roots.Roots[0].RootCert
|
||||
reqClient := ""
|
||||
|
@ -290,16 +290,23 @@ func expectedTLSContextJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, require
|
|||
reqClient = `,
|
||||
"requireClientCertificate": true`
|
||||
}
|
||||
|
||||
upstreamSNI := ""
|
||||
if sni != "" {
|
||||
upstreamSNI = `,
|
||||
"sni": "` + sni + `"`
|
||||
}
|
||||
|
||||
return `{
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "` + strings.Replace(snap.Leaf.CertPEM, "\n", "\\n", -1) + `"
|
||||
"inlineString": "` + strings.Replace(snap.ConnectProxy.Leaf.CertPEM, "\n", "\\n", -1) + `"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "` + strings.Replace(snap.Leaf.PrivateKeyPEM, "\n", "\\n", -1) + `"
|
||||
"inlineString": "` + strings.Replace(snap.ConnectProxy.Leaf.PrivateKeyPEM, "\n", "\\n", -1) + `"
|
||||
}
|
||||
}
|
||||
],
|
||||
|
@ -310,6 +317,7 @@ func expectedTLSContextJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, require
|
|||
}
|
||||
}
|
||||
` + reqClient + `
|
||||
` + upstreamSNI + `
|
||||
}`
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
package xds
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/agent/proxycfg"
|
||||
)
|
||||
|
||||
func DatacenterSNI(dc string, cfgSnap *proxycfg.ConfigSnapshot) string {
|
||||
return fmt.Sprintf("%s.internal.%s", dc, cfgSnap.Roots.TrustDomain)
|
||||
}
|
||||
|
||||
func ServiceSNI(service string, namespace string, datacenter string, cfgSnap *proxycfg.ConfigSnapshot) string {
|
||||
// TODO (mesh-gateway) - support service subsets here too
|
||||
return fmt.Sprintf("%s.%s.%s.internal.%s", service, namespace, datacenter, cfgSnap.Roots.TrustDomain)
|
||||
}
|
|
@ -46,7 +46,8 @@
|
|||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555"
|
||||
},
|
||||
"outlierDetection": {
|
||||
|
||||
|
@ -84,7 +85,8 @@
|
|||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"sni": "geo-cache.default.dc1.internal.11111111-2222-3333-4444-555555555555"
|
||||
},
|
||||
"outlierDetection": {
|
||||
|
||||
|
|
|
@ -58,7 +58,8 @@
|
|||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555"
|
||||
},
|
||||
"outlierDetection": {
|
||||
|
||||
|
@ -96,7 +97,8 @@
|
|||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"sni": "geo-cache.default.dc1.internal.11111111-2222-3333-4444-555555555555"
|
||||
},
|
||||
"outlierDetection": {
|
||||
|
||||
|
|
|
@ -58,7 +58,8 @@
|
|||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555"
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -93,7 +94,8 @@
|
|||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"sni": "geo-cache.default.dc1.internal.11111111-2222-3333-4444-555555555555"
|
||||
},
|
||||
"outlierDetection": {
|
||||
|
||||
|
|
|
@ -58,7 +58,8 @@
|
|||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"sni": "db.default.dc1.internal.11111111-2222-3333-4444-555555555555"
|
||||
},
|
||||
"outlierDetection": {
|
||||
|
||||
|
@ -96,7 +97,8 @@
|
|||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"sni": "geo-cache.default.dc1.internal.11111111-2222-3333-4444-555555555555"
|
||||
},
|
||||
"outlierDetection": {
|
||||
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"name": "dc2.internal.11111111-2222-3333-4444-555555555555",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"outlierDetection": {
|
||||
|
||||
}
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"name": "foo.default.dc1.internal.11111111-2222-3333-4444-555555555555",
|
||||
"type": "EDS",
|
||||
"edsClusterConfig": {
|
||||
"edsConfig": {
|
||||
"ads": {
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"outlierDetection": {
|
||||
|
||||
}
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Cluster",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"clusterName": "db",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.1",
|
||||
"portValue": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
},
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "10.10.1.2",
|
||||
"portValue": 0
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"clusterName": "dc2.internal.11111111-2222-3333-4444-555555555555",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "198.18.1.1",
|
||||
"portValue": 443
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
},
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "198.18.1.2",
|
||||
"portValue": 443
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"clusterName": "foo.default.dc1.internal.11111111-2222-3333-4444-555555555555",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "127.0.0.2",
|
||||
"portValue": 2222
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
},
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "127.0.0.2",
|
||||
"portValue": 2222
|
||||
}
|
||||
}
|
||||
},
|
||||
"healthStatus": "HEALTHY",
|
||||
"loadBalancingWeight": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -1,6 +1,52 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "custom-upstream",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "11.11.11.11",
|
||||
"portValue": 11111
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "random-cluster",
|
||||
"stat_prefix": "foo-stats"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "prepared_query:geo-cache:127.10.10.10:8181",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "127.10.10.10",
|
||||
"portValue": 8181
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "prepared_query:geo-cache",
|
||||
"stat_prefix": "upstream_prepared_query_geo-cache_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "public_listener:0.0.0.0:9999",
|
||||
|
@ -63,52 +109,6 @@
|
|||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "custom-upstream",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "11.11.11.11",
|
||||
"portValue": 11111
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "random-cluster",
|
||||
"stat_prefix": "foo-stats"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "prepared_query:geo-cache:127.10.10.10:8181",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "127.10.10.10",
|
||||
"portValue": 8181
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "prepared_query:geo-cache",
|
||||
"stat_prefix": "upstream_prepared_query_geo-cache_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
|
|
|
@ -1,6 +1,52 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "db:127.0.0.1:9191",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "127.0.0.1",
|
||||
"portValue": 9191
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "db",
|
||||
"stat_prefix": "upstream_db_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "prepared_query:geo-cache:127.10.10.10:8181",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "127.10.10.10",
|
||||
"portValue": 8181
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "prepared_query:geo-cache",
|
||||
"stat_prefix": "upstream_prepared_query_geo-cache_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "public_listener:0.0.0.0:9999",
|
||||
|
@ -63,52 +109,6 @@
|
|||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "db:127.0.0.1:9191",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "127.0.0.1",
|
||||
"portValue": 9191
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "db",
|
||||
"stat_prefix": "upstream_db_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "prepared_query:geo-cache:127.10.10.10:8181",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "127.10.10.10",
|
||||
"portValue": 8181
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "prepared_query:geo-cache",
|
||||
"stat_prefix": "upstream_prepared_query_geo-cache_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
|
|
|
@ -1,6 +1,52 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "db:127.0.0.1:9191",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "127.0.0.1",
|
||||
"portValue": 9191
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "db",
|
||||
"stat_prefix": "upstream_db_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "prepared_query:geo-cache:127.10.10.10:8181",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "127.10.10.10",
|
||||
"portValue": 8181
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "prepared_query:geo-cache",
|
||||
"stat_prefix": "upstream_prepared_query_geo-cache_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "public_listener:0.0.0.0:9999",
|
||||
|
@ -92,52 +138,6 @@
|
|||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "db:127.0.0.1:9191",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "127.0.0.1",
|
||||
"portValue": 9191
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "db",
|
||||
"stat_prefix": "upstream_db_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "prepared_query:geo-cache:127.10.10.10:8181",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "127.10.10.10",
|
||||
"portValue": 8181
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "prepared_query:geo-cache",
|
||||
"stat_prefix": "upstream_prepared_query_geo-cache_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
|
|
|
@ -1,69 +1,6 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "public_listener:0.0.0.0:9999",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "0.0.0.0",
|
||||
"portValue": 9999
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"tlsContext": {
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"requireClientCertificate": true
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.ext_authz",
|
||||
"config": {
|
||||
"grpc_service": {
|
||||
"envoy_grpc": {
|
||||
"cluster_name": "local_agent"
|
||||
},
|
||||
"initial_metadata": [
|
||||
{
|
||||
"key": "x-consul-token",
|
||||
"value": "my-token"
|
||||
}
|
||||
]
|
||||
},
|
||||
"stat_prefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "local_app",
|
||||
"stat_prefix": "public_listener_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "db:127.0.0.1:9191",
|
||||
|
@ -139,6 +76,69 @@
|
|||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "public_listener:0.0.0.0:9999",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "0.0.0.0",
|
||||
"portValue": 9999
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"tlsContext": {
|
||||
"commonTlsContext": {
|
||||
"tlsParams": {
|
||||
|
||||
},
|
||||
"tlsCertificates": [
|
||||
{
|
||||
"certificateChain": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICjDCCAjKgAwIBAgIIC5llxGV1gB8wCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowDjEMMAoG\nA1UEAxMDd2ViMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEADPv1RHVNRfa2VKR\nAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Favq5E0ivpNtv1QnFhxtPd7d5k4e+T7\nSkW1TaOCAXIwggFuMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcD\nAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBoBgNVHQ4EYQRfN2Q6MDc6ODc6M2E6\nNDA6MTk6NDc6YzM6NWE6YzA6YmE6NjI6ZGY6YWY6NGI6ZDQ6MDU6MjU6NzY6M2Q6\nNWE6OGQ6MTY6OGQ6Njc6NWU6MmU6YTA6MzQ6N2Q6ZGM6ZmYwagYDVR0jBGMwYYBf\nZDE6MTE6MTE6YWM6MmE6YmE6OTc6YjI6M2Y6YWM6N2I6YmQ6ZGE6YmU6YjE6OGE6\nZmM6OWE6YmE6YjU6YmM6ODM6ZTc6NWU6NDE6NmY6ZjI6NzM6OTU6NTg6MGM6ZGIw\nWQYDVR0RBFIwUIZOc3BpZmZlOi8vMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1\nNTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2RjMS9zdmMvd2ViMAoGCCqG\nSM49BAMCA0gAMEUCIGC3TTvvjj76KMrguVyFf4tjOqaSCRie3nmHMRNNRav7AiEA\npY0heYeK9A6iOLrzqxSerkXXQyj5e9bE4VgUnxgPU6g=\n-----END CERTIFICATE-----\n"
|
||||
},
|
||||
"privateKey": {
|
||||
"inlineString": "-----BEGIN EC PRIVATE KEY-----\nMHcCAQEEIMoTkpRggp3fqZzFKh82yS4LjtJI+XY+qX/7DefHFrtdoAoGCCqGSM49\nAwEHoUQDQgAEADPv1RHVNRfa2VKRAB16b6rZnEt7tuhaxCFpQXPj7M2omb0B9Fav\nq5E0ivpNtv1QnFhxtPd7d5k4e+T7SkW1TQ==\n-----END EC PRIVATE KEY-----\n"
|
||||
}
|
||||
}
|
||||
],
|
||||
"validationContext": {
|
||||
"trustedCa": {
|
||||
"inlineString": "-----BEGIN CERTIFICATE-----\nMIICXDCCAgKgAwIBAgIICpZq70Z9LyUwCgYIKoZIzj0EAwIwFDESMBAGA1UEAxMJ\nVGVzdCBDQSAyMB4XDTE5MDMyMjEzNTgyNloXDTI5MDMyMjEzNTgyNlowFDESMBAG\nA1UEAxMJVGVzdCBDQSAyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEIhywH1gx\nAsMwuF3ukAI5YL2jFxH6Usnma1HFSfVyxbXX1/uoZEYrj8yCAtdU2yoHETyd+Zx2\nThhRLP79pYegCaOCATwwggE4MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTAD\nAQH/MGgGA1UdDgRhBF9kMToxMToxMTphYzoyYTpiYTo5NzpiMjozZjphYzo3Yjpi\nZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1ZTo0MTo2ZjpmMjo3\nMzo5NTo1ODowYzpkYjBqBgNVHSMEYzBhgF9kMToxMToxMTphYzoyYTpiYTo5Nzpi\nMjozZjphYzo3YjpiZDpkYTpiZTpiMTo4YTpmYzo5YTpiYTpiNTpiYzo4MzplNzo1\nZTo0MTo2ZjpmMjo3Mzo5NTo1ODowYzpkYjA/BgNVHREEODA2hjRzcGlmZmU6Ly8x\nMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqG\nSM49BAMCA0gAMEUCICOY0i246rQHJt8o8Oya0D5PLL1FnmsQmQqIGCi31RwnAiEA\noR5f6Ku+cig2Il8T8LJujOp2/2A72QcHZA57B13y+8o=\n-----END CERTIFICATE-----\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
"requireClientCertificate": true
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.ext_authz",
|
||||
"config": {
|
||||
"grpc_service": {
|
||||
"envoy_grpc": {
|
||||
"cluster_name": "local_agent"
|
||||
},
|
||||
"initial_metadata": [
|
||||
{
|
||||
"key": "x-consul-token",
|
||||
"value": "my-token"
|
||||
}
|
||||
]
|
||||
},
|
||||
"stat_prefix": "connect_authz"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "local_app",
|
||||
"stat_prefix": "public_listener_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
|
|
|
@ -0,0 +1,195 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "bar:2001:db8::ff:9999",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "2001:db8::ff",
|
||||
"portValue": 9999
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"*.dc2.internal.11111111-2222-3333-4444-555555555555"
|
||||
]
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "dc2.internal.11111111-2222-3333-4444-555555555555",
|
||||
"stat_prefix": "mesh_gateway_remote_bar_dc2_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "mesh_gateway_local_bar_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"listenerFilters": [
|
||||
{
|
||||
"name": "envoy.listener.tls_inspector"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "baz:127.0.0.1:8765",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "127.0.0.1",
|
||||
"portValue": 8765
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"*.dc2.internal.11111111-2222-3333-4444-555555555555"
|
||||
]
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "dc2.internal.11111111-2222-3333-4444-555555555555",
|
||||
"stat_prefix": "mesh_gateway_remote_baz_dc2_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "mesh_gateway_local_baz_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"listenerFilters": [
|
||||
{
|
||||
"name": "envoy.listener.tls_inspector"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "default:1.2.3.4:8443",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "1.2.3.4",
|
||||
"portValue": 8443
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"*.dc2.internal.11111111-2222-3333-4444-555555555555"
|
||||
]
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "dc2.internal.11111111-2222-3333-4444-555555555555",
|
||||
"stat_prefix": "mesh_gateway_remote_default_dc2_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "mesh_gateway_local_default_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"listenerFilters": [
|
||||
{
|
||||
"name": "envoy.listener.tls_inspector"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "foo:198.17.2.3:8080",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "198.17.2.3",
|
||||
"portValue": 8080
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"*.dc2.internal.11111111-2222-3333-4444-555555555555"
|
||||
]
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "dc2.internal.11111111-2222-3333-4444-555555555555",
|
||||
"stat_prefix": "mesh_gateway_remote_foo_dc2_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "mesh_gateway_local_foo_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"listenerFilters": [
|
||||
{
|
||||
"name": "envoy.listener.tls_inspector"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "lan:1.2.3.4:8443",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "1.2.3.4",
|
||||
"portValue": 8443
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"*.dc2.internal.11111111-2222-3333-4444-555555555555"
|
||||
]
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "dc2.internal.11111111-2222-3333-4444-555555555555",
|
||||
"stat_prefix": "mesh_gateway_remote_lan_dc2_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "mesh_gateway_local_lan_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"listenerFilters": [
|
||||
{
|
||||
"name": "envoy.listener.tls_inspector"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "wan:198.18.0.1:443",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "198.18.0.1",
|
||||
"portValue": 443
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"*.dc2.internal.11111111-2222-3333-4444-555555555555"
|
||||
]
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "dc2.internal.11111111-2222-3333-4444-555555555555",
|
||||
"stat_prefix": "mesh_gateway_remote_wan_dc2_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "mesh_gateway_local_wan_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"listenerFilters": [
|
||||
{
|
||||
"name": "envoy.listener.tls_inspector"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
{
|
||||
"versionInfo": "00000001",
|
||||
"resources": [
|
||||
{
|
||||
"@type": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"name": "default:1.2.3.4:8443",
|
||||
"address": {
|
||||
"socketAddress": {
|
||||
"address": "1.2.3.4",
|
||||
"portValue": 8443
|
||||
}
|
||||
},
|
||||
"filterChains": [
|
||||
{
|
||||
"filterChainMatch": {
|
||||
"serverNames": [
|
||||
"*.dc2.internal.11111111-2222-3333-4444-555555555555"
|
||||
]
|
||||
},
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "dc2.internal.11111111-2222-3333-4444-555555555555",
|
||||
"stat_prefix": "mesh_gateway_remote_default_dc2_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.sni_cluster"
|
||||
},
|
||||
{
|
||||
"name": "envoy.tcp_proxy",
|
||||
"config": {
|
||||
"cluster": "",
|
||||
"stat_prefix": "mesh_gateway_local_default_tcp"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"listenerFilters": [
|
||||
{
|
||||
"name": "envoy.listener.tls_inspector"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"typeUrl": "type.googleapis.com/envoy.api.v2.Listener",
|
||||
"nonce": "00000001"
|
||||
}
|
|
@ -23,6 +23,11 @@ const (
|
|||
// service proxies another service within Consul and speaks the connect
|
||||
// protocol.
|
||||
ServiceKindConnectProxy ServiceKind = "connect-proxy"
|
||||
|
||||
// ServiceKindMeshGateway is a Mesh Gateway for the Connect feature. This
|
||||
// service will proxy connections based off the SNI header set by other
|
||||
// connect proxies
|
||||
ServiceKindMeshGateway ServiceKind = "mesh-gateway"
|
||||
)
|
||||
|
||||
// ProxyExecMode is the execution mode for a managed Connect proxy.
|
||||
|
@ -120,12 +125,12 @@ type AgentServiceConnectProxy struct {
|
|||
// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy
|
||||
// ServiceDefinition or response.
|
||||
type AgentServiceConnectProxyConfig struct {
|
||||
DestinationServiceName string
|
||||
DestinationServiceName string `json:",omitempty"`
|
||||
DestinationServiceID string `json:",omitempty"`
|
||||
LocalServiceAddress string `json:",omitempty"`
|
||||
LocalServicePort int `json:",omitempty"`
|
||||
Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||
Upstreams []Upstream
|
||||
Upstreams []Upstream `json:",omitempty"`
|
||||
}
|
||||
|
||||
// AgentMember represents a cluster member known to the agent
|
||||
|
|
|
@ -1574,7 +1574,7 @@ func TestAPI_AgentConnectProxyConfig(t *testing.T) {
|
|||
ProxyServiceID: "foo-proxy",
|
||||
TargetServiceID: "foo",
|
||||
TargetServiceName: "foo",
|
||||
ContentHash: "acdf5eb6f5794a14",
|
||||
ContentHash: "b58a7e24130d3058",
|
||||
ExecMode: "daemon",
|
||||
Command: []string{"consul", "connect", "proxy"},
|
||||
Config: map[string]interface{}{
|
||||
|
@ -1722,3 +1722,34 @@ func TestAgentService_JSON_OmitTaggedAdddresses(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgentService_Register_MeshGateway(t *testing.T) {
|
||||
t.Parallel()
|
||||
c, s := makeClient(t)
|
||||
defer s.Stop()
|
||||
|
||||
agent := c.Agent()
|
||||
|
||||
reg := AgentServiceRegistration{
|
||||
Kind: ServiceKindMeshGateway,
|
||||
Name: "mesh-gateway",
|
||||
Address: "10.1.2.3",
|
||||
Port: 8443,
|
||||
Proxy: &AgentServiceConnectProxyConfig{
|
||||
Config: map[string]interface{}{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := agent.ServiceRegister(®)
|
||||
require.NoError(t, err)
|
||||
|
||||
svc, _, err := agent.Service("mesh-gateway", nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, svc)
|
||||
require.Equal(t, ServiceKindMeshGateway, svc.Kind)
|
||||
require.NotNil(t, svc.Proxy)
|
||||
require.Contains(t, svc.Proxy.Config, "foo")
|
||||
require.Equal(t, "bar", svc.Proxy.Config["foo"])
|
||||
}
|
||||
|
|
|
@ -28,10 +28,39 @@ type ConfigEntry interface {
|
|||
GetModifyIndex() uint64
|
||||
}
|
||||
|
||||
type MeshGatewayMode string
|
||||
|
||||
const (
|
||||
// MeshGatewayModeDefault represents no specific mode and should
|
||||
// be used to indicate that a different layer of the configuration
|
||||
// chain should take precedence
|
||||
MeshGatewayModeDefault MeshGatewayMode = ""
|
||||
|
||||
// MeshGatewayModeNone represents that the Upstream Connect connections
|
||||
// should be direct and not flow through a mesh gateway.
|
||||
MeshGatewayModeNone MeshGatewayMode = "none"
|
||||
|
||||
// MeshGatewayModeLocal represents that the Upstrea Connect connections
|
||||
// should be made to a mesh gateway in the local datacenter. This is
|
||||
MeshGatewayModeLocal MeshGatewayMode = "local"
|
||||
|
||||
// MeshGatewayModeRemote represents that the Upstream Connect connections
|
||||
// should be made to a mesh gateway in a remote datacenter.
|
||||
MeshGatewayModeRemote MeshGatewayMode = "remote"
|
||||
)
|
||||
|
||||
// MeshGatewayConfig controls how Mesh Gateways are used for upstream Connect
|
||||
// services
|
||||
type MeshGatewayConfig struct {
|
||||
// Mode is the mode that should be used for the upstream connection.
|
||||
Mode MeshGatewayMode
|
||||
}
|
||||
|
||||
type ServiceConfigEntry struct {
|
||||
Kind string
|
||||
Name string
|
||||
Protocol string
|
||||
MeshGateway MeshGatewayConfig
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
@ -56,6 +85,7 @@ type ProxyConfigEntry struct {
|
|||
Kind string
|
||||
Name string
|
||||
Config map[string]interface{}
|
||||
MeshGateway MeshGatewayConfig
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
|
|
@ -54,6 +54,13 @@ type Intention struct {
|
|||
// or modified.
|
||||
CreatedAt, UpdatedAt time.Time
|
||||
|
||||
// Hash of the contents of the intention
|
||||
//
|
||||
// This is needed mainly for replication purposes. When replicating from
|
||||
// one DC to another keeping the content Hash will allow us to detect
|
||||
// content changes more efficiently than checking every single field
|
||||
Hash []byte
|
||||
|
||||
CreateIndex uint64
|
||||
ModifyIndex uint64
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ func TestAPI_ConnectIntentionCreateListGetUpdateDelete(t *testing.T) {
|
|||
ixn.UpdatedAt = actual.UpdatedAt
|
||||
ixn.CreateIndex = actual.CreateIndex
|
||||
ixn.ModifyIndex = actual.ModifyIndex
|
||||
ixn.Hash = actual.Hash
|
||||
require.Equal(ixn, actual)
|
||||
|
||||
// Get it
|
||||
|
@ -49,6 +50,7 @@ func TestAPI_ConnectIntentionCreateListGetUpdateDelete(t *testing.T) {
|
|||
require.NoError(err)
|
||||
ixn.UpdatedAt = actual.UpdatedAt
|
||||
ixn.ModifyIndex = actual.ModifyIndex
|
||||
ixn.Hash = actual.Hash
|
||||
require.Equal(ixn, actual)
|
||||
|
||||
// Delete it
|
||||
|
|
|
@ -17,6 +17,8 @@ import (
|
|||
"github.com/hashicorp/consul/api"
|
||||
proxyCmd "github.com/hashicorp/consul/command/connect/proxy"
|
||||
"github.com/hashicorp/consul/command/flags"
|
||||
"github.com/hashicorp/consul/ipaddr"
|
||||
"github.com/hashicorp/go-sockaddr/template"
|
||||
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
@ -44,6 +46,7 @@ type cmd struct {
|
|||
client *api.Client
|
||||
|
||||
// flags
|
||||
meshGateway bool
|
||||
proxyID string
|
||||
sidecarFor string
|
||||
adminAccessLogPath string
|
||||
|
@ -52,6 +55,14 @@ type cmd struct {
|
|||
bootstrap bool
|
||||
disableCentralConfig bool
|
||||
grpcAddr string
|
||||
|
||||
// mesh gateway registration information
|
||||
register bool
|
||||
address string
|
||||
wanAddress string
|
||||
deregAfterCritical string
|
||||
|
||||
meshGatewaySvcName string
|
||||
}
|
||||
|
||||
func (c *cmd) init() {
|
||||
|
@ -60,6 +71,9 @@ func (c *cmd) init() {
|
|||
c.flags.StringVar(&c.proxyID, "proxy-id", "",
|
||||
"The proxy's ID on the local agent.")
|
||||
|
||||
c.flags.BoolVar(&c.meshGateway, "mesh-gateway", false,
|
||||
"Generate the bootstrap.json but don't exec envoy")
|
||||
|
||||
c.flags.StringVar(&c.sidecarFor, "sidecar-for", "",
|
||||
"The ID of a service instance on the local agent that this proxy should "+
|
||||
"become a sidecar for. It requires that the proxy service is registered "+
|
||||
|
@ -94,11 +108,58 @@ func (c *cmd) init() {
|
|||
"Set the agent's gRPC address and port (in http(s)://host:port format). "+
|
||||
"Alternatively, you can specify CONSUL_GRPC_ADDR in ENV.")
|
||||
|
||||
c.flags.BoolVar(&c.register, "register", false,
|
||||
"Register a new Mesh Gateway service before configuring and starting Envoy")
|
||||
|
||||
c.flags.StringVar(&c.address, "address", "",
|
||||
"LAN address to advertise in the Mesh Gateway service registration")
|
||||
|
||||
c.flags.StringVar(&c.wanAddress, "wan-address", "",
|
||||
"WAN address to advertise in the Mesh Gateway service registration")
|
||||
|
||||
c.flags.StringVar(&c.meshGatewaySvcName, "service", "mesh-gateway",
|
||||
"Service name to use for the registration")
|
||||
|
||||
c.flags.StringVar(&c.deregAfterCritical, "deregister-after-critical", "6h",
|
||||
"The amount of time the gateway services health check can be failing before being deregistered")
|
||||
|
||||
c.http = &flags.HTTPFlags{}
|
||||
flags.Merge(c.flags, c.http.ClientFlags())
|
||||
c.help = flags.Usage(help, c.flags)
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultMeshGatewayPort int = 443
|
||||
)
|
||||
|
||||
func parseAddress(addrStr string) (string, int, error) {
|
||||
if addrStr == "" {
|
||||
// defaulting the port to 443
|
||||
return "", DefaultMeshGatewayPort, nil
|
||||
}
|
||||
|
||||
x, err := template.Parse(addrStr)
|
||||
if err != nil {
|
||||
return "", DefaultMeshGatewayPort, fmt.Errorf("Error parsing address %q: %v", addrStr, err)
|
||||
}
|
||||
|
||||
addr, portStr, err := net.SplitHostPort(x)
|
||||
if err != nil {
|
||||
return "", DefaultMeshGatewayPort, fmt.Errorf("Error parsing address %q: %v", x, err)
|
||||
}
|
||||
|
||||
port := DefaultMeshGatewayPort
|
||||
|
||||
if portStr != "" {
|
||||
port, err = strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return "", DefaultMeshGatewayPort, fmt.Errorf("Error parsing port %q: %v", portStr, err)
|
||||
}
|
||||
}
|
||||
|
||||
return addr, port, nil
|
||||
}
|
||||
|
||||
func (c *cmd) Run(args []string) int {
|
||||
if err := c.flags.Parse(args); err != nil {
|
||||
return 1
|
||||
|
@ -136,6 +197,74 @@ func (c *cmd) Run(args []string) int {
|
|||
}
|
||||
c.client = client
|
||||
|
||||
if c.register {
|
||||
if !c.meshGateway {
|
||||
c.UI.Error("Auto-Registration can only be used for mesh gateways")
|
||||
return 1
|
||||
}
|
||||
|
||||
lanAddr, lanPort, err := parseAddress(c.address)
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Failed to parse the -address parameter: %v", err))
|
||||
return 1
|
||||
}
|
||||
|
||||
taggedAddrs := make(map[string]api.ServiceAddress)
|
||||
|
||||
if lanAddr != "" {
|
||||
taggedAddrs["lan"] = api.ServiceAddress{Address: lanAddr, Port: lanPort}
|
||||
}
|
||||
|
||||
if c.wanAddress != "" {
|
||||
wanAddr, wanPort, err := parseAddress(c.wanAddress)
|
||||
if err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Failed to parse the -wan-address parameter: %v", err))
|
||||
return 1
|
||||
}
|
||||
taggedAddrs["wan"] = api.ServiceAddress{Address: wanAddr, Port: wanPort}
|
||||
}
|
||||
|
||||
tcpCheckAddr := lanAddr
|
||||
if tcpCheckAddr == "" {
|
||||
// fallback to localhost as the gateway has to reside in the same network namespace
|
||||
// as the agent
|
||||
tcpCheckAddr = "127.0.0.1"
|
||||
}
|
||||
|
||||
var proxyConf *api.AgentServiceConnectProxyConfig
|
||||
|
||||
if lanAddr != "" {
|
||||
proxyConf = &api.AgentServiceConnectProxyConfig{
|
||||
Config: map[string]interface{}{
|
||||
"envoy_mesh_gateway_no_default_bind": true,
|
||||
"envoy_mesh_gateway_bind_tagged_addresses": true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
svc := api.AgentServiceRegistration{
|
||||
Kind: api.ServiceKindMeshGateway,
|
||||
Name: c.meshGatewaySvcName,
|
||||
Address: lanAddr,
|
||||
Port: lanPort,
|
||||
TaggedAddresses: taggedAddrs,
|
||||
Proxy: proxyConf,
|
||||
Check: &api.AgentServiceCheck{
|
||||
Name: "Mesh Gateway Listening",
|
||||
TCP: ipaddr.FormatAddressPort(tcpCheckAddr, lanPort),
|
||||
Interval: "10s",
|
||||
DeregisterCriticalServiceAfter: c.deregAfterCritical,
|
||||
},
|
||||
}
|
||||
|
||||
if err := client.Agent().ServiceRegister(&svc); err != nil {
|
||||
c.UI.Error(fmt.Sprintf("Error registering service %q: %s", svc.Name, err))
|
||||
return 1
|
||||
}
|
||||
|
||||
c.UI.Output(fmt.Sprintf("Registered service: %s", svc.Name))
|
||||
}
|
||||
|
||||
// See if we need to lookup proxyID
|
||||
if c.proxyID == "" && c.sidecarFor != "" {
|
||||
proxyID, err := c.lookupProxyIDForSidecar()
|
||||
|
@ -144,9 +273,18 @@ func (c *cmd) Run(args []string) int {
|
|||
return 1
|
||||
}
|
||||
c.proxyID = proxyID
|
||||
} else if c.proxyID == "" && c.meshGateway {
|
||||
gatewaySvc, err := c.lookupGatewayProxy()
|
||||
if err != nil {
|
||||
c.UI.Error(err.Error())
|
||||
return 1
|
||||
}
|
||||
c.proxyID = gatewaySvc.ID
|
||||
c.meshGatewaySvcName = gatewaySvc.Service
|
||||
}
|
||||
|
||||
if c.proxyID == "" {
|
||||
c.UI.Error("No proxy ID specified. One of -proxy-id or -sidecar-for is " +
|
||||
c.UI.Error("No proxy ID specified. One of -proxy-id or -sidecar-for/-mesh-gateway is " +
|
||||
"required")
|
||||
return 1
|
||||
}
|
||||
|
@ -264,6 +402,8 @@ func (c *cmd) templateArgs() (*BootstrapTplArgs, error) {
|
|||
cluster := c.proxyID
|
||||
if c.sidecarFor != "" {
|
||||
cluster = c.sidecarFor
|
||||
} else if c.meshGateway && c.meshGatewaySvcName != "" {
|
||||
cluster = c.meshGatewaySvcName
|
||||
}
|
||||
|
||||
adminAccessLogPath := c.adminAccessLogPath
|
||||
|
@ -302,7 +442,7 @@ func (c *cmd) generateConfig() ([]byte, error) {
|
|||
}
|
||||
|
||||
if svc.Proxy == nil {
|
||||
return nil, errors.New("service is not a Connect proxy")
|
||||
return nil, errors.New("service is not a Connect proxy or mesh gateway")
|
||||
}
|
||||
|
||||
// Parse the bootstrap config
|
||||
|
@ -310,9 +450,11 @@ func (c *cmd) generateConfig() ([]byte, error) {
|
|||
return nil, fmt.Errorf("failed parsing Proxy.Config: %s", err)
|
||||
}
|
||||
|
||||
if svc.Proxy.DestinationServiceName != "" {
|
||||
// Override cluster now we know the actual service name
|
||||
args.ProxyCluster = svc.Proxy.DestinationServiceName
|
||||
}
|
||||
}
|
||||
|
||||
return bsCfg.GenerateJSON(args)
|
||||
}
|
||||
|
@ -321,6 +463,10 @@ func (c *cmd) lookupProxyIDForSidecar() (string, error) {
|
|||
return proxyCmd.LookupProxyIDForSidecar(c.client, c.sidecarFor)
|
||||
}
|
||||
|
||||
func (c *cmd) lookupGatewayProxy() (*api.AgentService, error) {
|
||||
return proxyCmd.LookupGatewayProxy(c.client)
|
||||
}
|
||||
|
||||
func (c *cmd) Synopsis() string {
|
||||
return synopsis
|
||||
}
|
||||
|
|
|
@ -247,6 +247,33 @@ func LookupProxyIDForSidecar(client *api.Client, sidecarFor string) (string, err
|
|||
return proxyIDs[0], nil
|
||||
}
|
||||
|
||||
// LookupGatewayProxyID finds the mesh-gateway service registered with the local
|
||||
// agent if any and returns its service ID. It will return an ID if and only if
|
||||
// there is exactly one registered mesh-gateway registered to the agent.
|
||||
func LookupGatewayProxy(client *api.Client) (*api.AgentService, error) {
|
||||
svcs, err := client.Agent().ServicesWithFilter("Kind == `mesh-gateway`")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed looking up mesh-gateway instances: %v", err)
|
||||
}
|
||||
|
||||
var proxyIDs []string
|
||||
for _, svc := range svcs {
|
||||
proxyIDs = append(proxyIDs, svc.ID)
|
||||
}
|
||||
|
||||
switch len(svcs) {
|
||||
case 0:
|
||||
return nil, fmt.Errorf("No mesh-gateway services registered with this agent")
|
||||
case 1:
|
||||
for _, svc := range svcs {
|
||||
return svc, nil
|
||||
}
|
||||
return nil, fmt.Errorf("This should be unreachable")
|
||||
default:
|
||||
return nil, fmt.Errorf("Cannot lookup the mesh-gateway's proxy ID because multiple are registered with the agent")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cmd) configWatcher(client *api.Client) (proxyImpl.ConfigWatcher, error) {
|
||||
// Use the configured proxy ID
|
||||
if c.proxyID != "" {
|
||||
|
|
|
@ -23,6 +23,7 @@ type cmd struct {
|
|||
help string
|
||||
|
||||
// flags
|
||||
flagKind string
|
||||
flagId string
|
||||
flagName string
|
||||
flagAddress string
|
||||
|
@ -52,6 +53,7 @@ func (c *cmd) init() {
|
|||
c.flags.Var((*flags.FlagMapValue)(&c.flagTaggedAddresses), "tagged-address",
|
||||
"Tagged address to set on the service, formatted as key=value. This flag "+
|
||||
"may be specified multiple times to set multiple addresses.")
|
||||
c.flags.StringVar(&c.flagKind, "kind", "", "The services 'kind'")
|
||||
|
||||
c.http = &flags.HTTPFlags{}
|
||||
flags.Merge(c.flags, c.http.ClientFlags())
|
||||
|
@ -78,6 +80,7 @@ func (c *cmd) Run(args []string) int {
|
|||
}
|
||||
|
||||
svcs := []*api.AgentServiceRegistration{&api.AgentServiceRegistration{
|
||||
Kind: api.ServiceKind(c.flagKind),
|
||||
ID: c.flagId,
|
||||
Name: c.flagName,
|
||||
Address: c.flagAddress,
|
||||
|
|
Loading…
Reference in New Issue