mirror of https://github.com/status-im/consul.git
Envoy Mesh Gateway integration tests (#6187)
* Allow setting the mesh gateway mode for an upstream in config files * Add envoy integration test for mesh gateways This necessitated many supporting changes in most of the other test cases. Add remote mode mesh gateways integration test
This commit is contained in:
parent
e5e3f483c5
commit
3053342198
|
@ -1321,6 +1321,7 @@ func (b *Builder) serviceProxyVal(v *ServiceProxy, deprecatedDest *string) *stru
|
|||
LocalServicePort: b.intVal(v.LocalServicePort),
|
||||
Config: v.Config,
|
||||
Upstreams: b.upstreamsVal(v.Upstreams),
|
||||
MeshGateway: b.meshGatewayConfVal(v.MeshGateway),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1335,6 +1336,7 @@ func (b *Builder) upstreamsVal(v []Upstream) structs.Upstreams {
|
|||
LocalBindAddress: b.stringVal(u.LocalBindAddress),
|
||||
LocalBindPort: b.intVal(u.LocalBindPort),
|
||||
Config: u.Config,
|
||||
MeshGateway: b.meshGatewayConfVal(u.MeshGateway),
|
||||
}
|
||||
if ups[i].DestinationType == "" {
|
||||
ups[i].DestinationType = structs.UpstreamDestTypeService
|
||||
|
@ -1343,6 +1345,23 @@ func (b *Builder) upstreamsVal(v []Upstream) structs.Upstreams {
|
|||
return ups
|
||||
}
|
||||
|
||||
func (b *Builder) meshGatewayConfVal(mgConf *MeshGatewayConfig) structs.MeshGatewayConfig {
|
||||
cfg := structs.MeshGatewayConfig{Mode: structs.MeshGatewayModeDefault}
|
||||
if mgConf == nil || mgConf.Mode == nil {
|
||||
// return defaults
|
||||
return cfg
|
||||
}
|
||||
|
||||
mode, err := structs.ValidateMeshGatewayMode(*mgConf.Mode)
|
||||
if err != nil {
|
||||
b.err = multierror.Append(b.err, err)
|
||||
return cfg
|
||||
}
|
||||
|
||||
cfg.Mode = mode
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (b *Builder) serviceConnectVal(v *ServiceConnect) *structs.ServiceConnect {
|
||||
if v == nil {
|
||||
return nil
|
||||
|
|
|
@ -478,6 +478,9 @@ type ServiceProxy struct {
|
|||
// Upstreams describes any upstream dependencies the proxy instance should
|
||||
// setup.
|
||||
Upstreams []Upstream `json:"upstreams,omitempty" hcl:"upstreams" mapstructure:"upstreams"`
|
||||
|
||||
// Mesh Gateway Configuration
|
||||
MeshGateway *MeshGatewayConfig `json:"mesh_gateway,omitempty" hcl:"mesh_gateway" mapstructure:"mesh_gateway"`
|
||||
}
|
||||
|
||||
// Upstream represents a single upstream dependency for a service or proxy. It
|
||||
|
@ -513,6 +516,14 @@ type Upstream struct {
|
|||
// It can be used to pass arbitrary configuration for this specific upstream
|
||||
// to the proxy.
|
||||
Config map[string]interface{} `json:"config,omitempty" hcl:"config" mapstructure:"config"`
|
||||
|
||||
// Mesh Gateway Configuration
|
||||
MeshGateway *MeshGatewayConfig `json:"mesh_gateway,omitempty" hcl:"mesh_gateway" mapstructure:"mesh_gateway"`
|
||||
}
|
||||
|
||||
type MeshGatewayConfig struct {
|
||||
// Mesh Gateway Mode
|
||||
Mode *string `json:"mode,omitempty" hcl:"mode" mapstructure:"mode"`
|
||||
}
|
||||
|
||||
// AutoEncrypt is the agent-global auto_encrypt configuration.
|
||||
|
|
|
@ -37,6 +37,21 @@ type MeshGatewayConfig struct {
|
|||
Mode MeshGatewayMode `json:",omitempty"`
|
||||
}
|
||||
|
||||
func ValidateMeshGatewayMode(mode string) (MeshGatewayMode, error) {
|
||||
switch MeshGatewayMode(mode) {
|
||||
case MeshGatewayModeNone:
|
||||
return MeshGatewayModeNone, nil
|
||||
case MeshGatewayModeDefault:
|
||||
return MeshGatewayModeDefault, nil
|
||||
case MeshGatewayModeLocal:
|
||||
return MeshGatewayModeLocal, nil
|
||||
case MeshGatewayModeRemote:
|
||||
return MeshGatewayModeRemote, nil
|
||||
default:
|
||||
return MeshGatewayModeDefault, fmt.Errorf("Invalid Mesh Gateway Mode: %q", mode)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *MeshGatewayConfig) ToAPI() api.MeshGatewayConfig {
|
||||
return api.MeshGatewayConfig{Mode: api.MeshGatewayMode(c.Mode)}
|
||||
}
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
# Setup deny intention
|
||||
docker_consul intention create -deny s1 s2
|
||||
docker_consul primary intention create -deny s1 s2
|
||||
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"
|
||||
gen_envoy_bootstrap s1 19000 primary
|
||||
gen_envoy_bootstrap s2 19001 primary
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
# Remove deny intention
|
||||
docker_consul intention delete s1 s2
|
||||
docker_consul primary intention delete s1 s2
|
||||
|
|
|
@ -23,7 +23,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should NOT be able to connect to s2" {
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"
|
||||
gen_envoy_bootstrap s1 19000 primary
|
||||
gen_envoy_bootstrap s2 19001 primary
|
||||
|
|
|
@ -27,7 +27,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2" {
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
# wait for bootstrap to apply config entries
|
||||
wait_for_config_entry proxy-defaults global
|
||||
wait_for_config_entry service-defaults s1
|
||||
wait_for_config_entry service-defaults s2
|
||||
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"
|
||||
gen_envoy_bootstrap s1 19000 primary
|
||||
gen_envoy_bootstrap s2 19001 primary
|
||||
|
|
|
@ -23,7 +23,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2 with http/1.1" {
|
||||
|
|
|
@ -10,11 +10,4 @@ wait_for_config_entry service-resolver s2
|
|||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2-v1 19001
|
||||
gen_envoy_bootstrap s2-v2 19002
|
||||
gen_envoy_bootstrap s2 19003
|
||||
|
||||
export REQUIRED_SERVICES="
|
||||
s1 s1-sidecar-proxy
|
||||
s2 s2-sidecar-proxy
|
||||
s2-v1 s2-v1-sidecar-proxy
|
||||
s2-v2 s2-v2-sidecar-proxy
|
||||
"
|
||||
gen_envoy_bootstrap s2 19003
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
|
||||
export REQUIRED_SERVICES="
|
||||
$DEFAULT_REQUIRED_SERVICES
|
||||
s2-v1 s2-v1-sidecar-proxy
|
||||
s2-v2 s2-v2-sidecar-proxy
|
||||
"
|
||||
|
|
@ -39,7 +39,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for v2.s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 v2.s2 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 v2.s2.default.primary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2-v2 via upstream s2" {
|
||||
|
|
|
@ -16,10 +16,4 @@ wait_for_config_entry service-resolver s2
|
|||
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
gen_envoy_bootstrap s2-v1 19002
|
||||
|
||||
export REQUIRED_SERVICES="
|
||||
s1 s1-sidecar-proxy
|
||||
s2 s2-sidecar-proxy
|
||||
s2-v1 s2-v1-sidecar-proxy
|
||||
"
|
||||
gen_envoy_bootstrap s2-v1 19002
|
|
@ -0,0 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
export REQUIRED_SERVICES="
|
||||
$DEFAULT_REQUIRED_SERVICES
|
||||
s2-v1 s2-v1-sidecar-proxy
|
||||
"
|
|
@ -12,12 +12,4 @@ gen_envoy_bootstrap s1 19000
|
|||
gen_envoy_bootstrap s2 19001
|
||||
gen_envoy_bootstrap s3-v1 19002
|
||||
gen_envoy_bootstrap s3-v2 19003
|
||||
gen_envoy_bootstrap s3 19004
|
||||
|
||||
export REQUIRED_SERVICES="
|
||||
s1 s1-sidecar-proxy
|
||||
s2 s2-sidecar-proxy
|
||||
s3 s3-sidecar-proxy
|
||||
s3-v1 s3-v1-sidecar-proxy
|
||||
s3-v2 s3-v2-sidecar-proxy
|
||||
"
|
||||
gen_envoy_bootstrap s3 19004
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
|
||||
export REQUIRED_SERVICES="
|
||||
$DEFAULT_REQUIRED_SERVICES
|
||||
s3 s3-sidecar-proxy
|
||||
s3-v1 s3-v1-sidecar-proxy
|
||||
s3-v2 s3-v2-sidecar-proxy
|
||||
"
|
|
@ -12,12 +12,4 @@ gen_envoy_bootstrap s1 19000
|
|||
gen_envoy_bootstrap s2 19001
|
||||
gen_envoy_bootstrap s3-v1 19002
|
||||
gen_envoy_bootstrap s3-v2 19003
|
||||
gen_envoy_bootstrap s3 19004
|
||||
|
||||
export REQUIRED_SERVICES="
|
||||
s1 s1-sidecar-proxy
|
||||
s2 s2-sidecar-proxy
|
||||
s3 s3-sidecar-proxy
|
||||
s3-v1 s3-v1-sidecar-proxy
|
||||
s3-v2 s3-v2-sidecar-proxy
|
||||
"
|
||||
gen_envoy_bootstrap s3 19004
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
|
||||
export REQUIRED_SERVICES="
|
||||
$DEFAULT_REQUIRED_SERVICES
|
||||
s3 s3-sidecar-proxy
|
||||
s3-v1 s3-v1-sidecar-proxy
|
||||
s3-v2 s3-v2-sidecar-proxy
|
||||
"
|
|
@ -53,7 +53,7 @@ load helpers
|
|||
# Note: when failover is configured the cluster is named for the original
|
||||
# service not any destination related to failover.
|
||||
@test "s1 upstream should have healthy endpoints for s2 and s3 together" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 2
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary HEALTHY 2
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2 via upstream s2 to start" {
|
||||
|
@ -65,8 +65,8 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s3-v1 and unhealthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 UNHEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary UNHEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s3-v1 now" {
|
||||
|
|
|
@ -1,17 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
# wait for bootstrap to apply config entries
|
||||
wait_for_config_entry proxy-defaults global
|
||||
wait_for_config_entry service-resolver s2
|
||||
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
gen_envoy_bootstrap s3 19002
|
||||
|
||||
export REQUIRED_SERVICES="
|
||||
s1 s1-sidecar-proxy
|
||||
s2 s2-sidecar-proxy
|
||||
s3 s3-sidecar-proxy
|
||||
"
|
||||
gen_envoy_bootstrap s1 19000 primary
|
||||
gen_envoy_bootstrap s2 19001 primary
|
||||
gen_envoy_bootstrap s3 19002 primary
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
export REQUIRED_SERVICES="$DEFAULT_REQUIRED_SERVICES s3 s3-sidecar-proxy"
|
|
@ -31,7 +31,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s3" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s3 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s3.default.primary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to its upstream simply" {
|
||||
|
|
|
@ -1,16 +1,8 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
# Force rebuild of the exec container since this doesn't happen if only the
|
||||
# version argument changed which means we end up testing the wrong version of
|
||||
# Envoy.
|
||||
docker-compose build s1-sidecar-proxy-consul-exec
|
||||
|
||||
# Bring up s1 and it's proxy as well because the check that it has a cert causes
|
||||
# a proxy connection to be opened and having the backend not be available seems
|
||||
# to cause Envoy to fail non-deterministically in CI (rarely on local machine).
|
||||
# It might be related to this know issue
|
||||
# https://github.com/envoyproxy/envoy/issues/2800 where TcpProxy will error if
|
||||
# the backend is down sometimes part way through the handshake.
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy-consul-exec"
|
||||
docker-compose build s1-sidecar-proxy-consul-exec consul-primary
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Bring up s1 and it's proxy as well because the check that it has a cert causes
|
||||
# a proxy connection to be opened and having the backend not be available seems
|
||||
# to cause Envoy to fail non-deterministically in CI (rarely on local machine).
|
||||
# It might be related to this know issue
|
||||
# https://github.com/envoyproxy/envoy/issues/2800 where TcpProxy will error if
|
||||
# the backend is down sometimes part way through the handshake.
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy-consul-exec"
|
|
@ -1,8 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy fake-statsd"
|
||||
gen_envoy_bootstrap s1 19000 primary
|
||||
gen_envoy_bootstrap s2 19001 primary
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
export REQUIRED_SERVICES="$DEFAULT_REQUIRED_SERVICES fake-statsd"
|
|
@ -15,7 +15,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2" {
|
||||
|
@ -28,7 +28,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 proxy should be sending metrics to statsd" {
|
||||
run retry_default cat /workdir/statsd/statsd.log
|
||||
run retry_default cat /workdir/primary/statsd/statsd.log
|
||||
|
||||
echo "METRICS:"
|
||||
echo "$output"
|
||||
|
@ -39,7 +39,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 proxy should be sending dogstatsd tagged metrics" {
|
||||
run retry_default must_match_in_statsd_logs '[#,]local_cluster:s1(,|$)'
|
||||
run retry_default must_match_in_statsd_logs '[#,]local_cluster:s1(,|$)' primary
|
||||
|
||||
echo "OUTPUT: $output"
|
||||
|
||||
|
@ -47,7 +47,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 proxy should be adding cluster name as a tag" {
|
||||
run retry_default must_match_in_statsd_logs '[#,]envoy.cluster_name:s2(,|$)'
|
||||
run retry_default must_match_in_statsd_logs '[#,]envoy.cluster_name:s2(,|$)' primary
|
||||
|
||||
echo "OUTPUT: $output"
|
||||
|
||||
|
@ -55,7 +55,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 proxy should be sending additional configured tags" {
|
||||
run retry_default must_match_in_statsd_logs '[#,]foo:bar(,|$)'
|
||||
run retry_default must_match_in_statsd_logs '[#,]foo:bar(,|$)' primary
|
||||
|
||||
echo "OUTPUT: $output"
|
||||
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
bind_addr = "0.0.0.0"
|
||||
advertise_addr = "{{ GetInterfaceIP \"eth0\" }}"
|
|
@ -0,0 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
snapshot_envoy_admin localhost:19000 s1 primary || true
|
||||
snapshot_envoy_admin localhost:19001 s2 secondary || true
|
||||
snapshot_envoy_admin localhost:19002 mesh-gateway primary || true
|
||||
snapshot_envoy_admin localhost:19003 mesh-gateway secondary || true
|
|
@ -0,0 +1,5 @@
|
|||
services {
|
||||
name = "mesh-gateway"
|
||||
kind = "mesh-gateway"
|
||||
port = 4431
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
services {
|
||||
name = "s1"
|
||||
port = 8080
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams = [
|
||||
{
|
||||
destination_name = "s2"
|
||||
datacenter = "secondary"
|
||||
local_bind_port = 5000
|
||||
mesh_gateway {
|
||||
mode = "local"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
# We don't want an s2 service in the primary dc
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -eEuo pipefail
|
||||
|
||||
gen_envoy_bootstrap s1 19000 primary
|
||||
gen_envoy_bootstrap mesh-gateway 19002 primary true
|
||||
retry_default docker_consul primary curl -s "http://localhost:8500/v1/catalog/service/consul?dc=secondary" >/dev/null
|
|
@ -0,0 +1,41 @@
|
|||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "s1 proxy is running correct version" {
|
||||
assert_envoy_version 19000
|
||||
}
|
||||
|
||||
@test "s1 proxy admin is up on :19000" {
|
||||
retry_default curl -f -s localhost:19000/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "gateway-primary proxy admin is up on :19002" {
|
||||
retry_default curl -f -s localhost:19002/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s1 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21000 s1
|
||||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.secondary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "gateway-primary should have healthy endpoints for secondary" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19002 secondary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2" {
|
||||
run retry_default curl -s -f -d hello localhost:5000
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "hello" ]
|
||||
}
|
||||
|
||||
@test "s1 upstream made 1 connection" {
|
||||
assert_envoy_metric 127.0.0.1:19000 "cluster.s2.default.secondary.*cx_total" 1
|
||||
}
|
||||
|
||||
@test "gateway-primary is used for the upstream connection" {
|
||||
assert_envoy_metric 127.0.0.1:19002 "cluster.secondary.*cx_total" 1
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
services {
|
||||
name = "mesh-gateway"
|
||||
kind = "mesh-gateway"
|
||||
port = 4432
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
retry_join_wan = ["consul-primary"]
|
|
@ -0,0 +1 @@
|
|||
# we don't want an s1 service in the secondary dc
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -eEuo pipefail
|
||||
|
||||
gen_envoy_bootstrap s2 19001 secondary
|
||||
gen_envoy_bootstrap mesh-gateway 19003 secondary true
|
||||
retry_default docker_consul secondary curl -s "http://localhost:8500/v1/catalog/service/consul?dc=primary" >/dev/null
|
|
@ -0,0 +1,27 @@
|
|||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "s2 proxy is running correct version" {
|
||||
assert_envoy_version 19001
|
||||
}
|
||||
|
||||
@test "s2 proxy admin is up on :19001" {
|
||||
retry_default curl -f -s localhost:19001/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "gateway-secondary proxy admin is up on :19003" {
|
||||
retry_default curl -f -s localhost:19003/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s2 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21000 s2 secondary
|
||||
}
|
||||
|
||||
@test "s2 proxy should be healthy" {
|
||||
assert_service_has_healthy_instances s2 1
|
||||
}
|
||||
|
||||
@test "gateway-secondary is used for the upstream connection" {
|
||||
assert_envoy_metric 127.0.0.1:19003 "cluster.s2.default.secondary.*cx_total" 1
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy gateway-primary s2-secondary s2-sidecar-proxy-secondary gateway-secondary"
|
||||
export REQUIRE_SECONDARY=1
|
|
@ -0,0 +1,2 @@
|
|||
bind_addr = "0.0.0.0"
|
||||
advertise_addr = "{{ GetInterfaceIP \"eth0\" }}"
|
|
@ -0,0 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
snapshot_envoy_admin localhost:19000 s1 primary || true
|
||||
snapshot_envoy_admin localhost:19001 s2 secondary || true
|
||||
snapshot_envoy_admin localhost:19002 mesh-gateway primary || true
|
||||
snapshot_envoy_admin localhost:19003 mesh-gateway secondary || true
|
|
@ -0,0 +1,20 @@
|
|||
services {
|
||||
name = "s1"
|
||||
port = 8080
|
||||
connect {
|
||||
sidecar_service {
|
||||
proxy {
|
||||
upstreams = [
|
||||
{
|
||||
destination_name = "s2"
|
||||
datacenter = "secondary"
|
||||
local_bind_port = 5000
|
||||
mesh_gateway {
|
||||
mode = "remote"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
# We don't want an s2 service in the primary dc
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -eEuo pipefail
|
||||
|
||||
gen_envoy_bootstrap s1 19000 primary
|
|
@ -0,0 +1,29 @@
|
|||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "s1 proxy is running correct version" {
|
||||
assert_envoy_version 19000
|
||||
}
|
||||
|
||||
@test "s1 proxy admin is up on :19000" {
|
||||
retry_default curl -f -s localhost:19000/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s1 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21000 s1
|
||||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.secondary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2" {
|
||||
run retry_default curl -s -f -d hello localhost:5000
|
||||
[ "$status" -eq 0 ]
|
||||
[ "$output" = "hello" ]
|
||||
}
|
||||
|
||||
@test "s1 upstream made 1 connection" {
|
||||
assert_envoy_metric 127.0.0.1:19000 "cluster.s2.default.secondary.*cx_total" 1
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
services {
|
||||
name = "mesh-gateway"
|
||||
kind = "mesh-gateway"
|
||||
port = 4432
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
retry_join_wan = ["consul-primary"]
|
|
@ -0,0 +1 @@
|
|||
# we don't want an s1 service in the secondary dc
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -eEuo pipefail
|
||||
|
||||
gen_envoy_bootstrap s2 19001 secondary
|
||||
gen_envoy_bootstrap mesh-gateway 19003 secondary true
|
||||
retry_default docker_consul secondary curl -s "http://localhost:8500/v1/catalog/service/consul?dc=primary" >/dev/null
|
|
@ -0,0 +1,27 @@
|
|||
#!/usr/bin/env bats
|
||||
|
||||
load helpers
|
||||
|
||||
@test "s2 proxy is running correct version" {
|
||||
assert_envoy_version 19001
|
||||
}
|
||||
|
||||
@test "s2 proxy admin is up on :19001" {
|
||||
retry_default curl -f -s localhost:19001/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "gateway-secondary proxy admin is up on :19003" {
|
||||
retry_default curl -f -s localhost:19003/stats -o /dev/null
|
||||
}
|
||||
|
||||
@test "s2 proxy listener should be up and have right cert" {
|
||||
assert_proxy_presents_cert_uri localhost:21000 s2 secondary
|
||||
}
|
||||
|
||||
@test "s2 proxy should be healthy" {
|
||||
assert_service_has_healthy_instances s2 1
|
||||
}
|
||||
|
||||
@test "gateway-secondary is used for the upstream connection" {
|
||||
assert_envoy_metric 127.0.0.1:19003 "cluster.s2.default.secondary.*cx_total" 1
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy gateway-primary s2-secondary s2-sidecar-proxy-secondary gateway-secondary"
|
||||
export REQUIRE_SECONDARY=1
|
|
@ -1,8 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy fake-statsd"
|
||||
gen_envoy_bootstrap s1 19000 primary
|
||||
gen_envoy_bootstrap s2 19001 primary
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
export REQUIRED_SERVICES="$DEFAULT_REQUIRED_SERVICES fake-statsd"
|
|
@ -15,7 +15,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2 via grpc" {
|
||||
|
@ -27,8 +27,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 proxy should be sending gRPC metrics to statsd" {
|
||||
run retry_default must_match_in_statsd_logs 'envoy.cluster.default.dc1.internal.*.consul.grpc.PingServer.total'
|
||||
|
||||
run retry_default must_match_in_statsd_logs 'envoy.cluster.default.primary.internal.*.consul.grpc.PingServer.total'
|
||||
echo "OUTPUT: $output"
|
||||
|
||||
[ "$status" == 0 ]
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
# Setup deny intention
|
||||
docker_consul intention create -deny s1 s2
|
||||
docker_consul primary intention create -deny s1 s2
|
||||
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"
|
||||
gen_envoy_bootstrap s1 19000 primary
|
||||
gen_envoy_bootstrap s2 19001 primary
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
# Remove deny intention
|
||||
docker_consul intention delete s1 s2
|
||||
docker_consul primary intention delete s1 s2
|
||||
|
|
|
@ -23,7 +23,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should NOT be able to connect to s2" {
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"
|
||||
gen_envoy_bootstrap s1 19000 primary
|
||||
gen_envoy_bootstrap s2 19001 primary
|
|
@ -23,7 +23,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2 with http/1.1" {
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"
|
||||
gen_envoy_bootstrap s1 19000 primary
|
||||
gen_envoy_bootstrap s2 19001 primary
|
|
@ -23,7 +23,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2 via http2" {
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"
|
||||
gen_envoy_bootstrap s1 19000 primary
|
||||
gen_envoy_bootstrap s2 19001 primary
|
|
@ -23,7 +23,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2 with http/1.1" {
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy fake-statsd"
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
export REQUIRED_SERVICES="$DEFAULT_REQUIRED_SERVICES fake-statsd"
|
|
@ -15,7 +15,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2" {
|
||||
|
@ -25,7 +25,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 proxy should be sending metrics to statsd" {
|
||||
run retry_default must_match_in_statsd_logs '^envoy\.'
|
||||
run retry_default must_match_in_statsd_logs '^envoy\.' primary
|
||||
|
||||
echo "OUTPUT: $output"
|
||||
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
gen_envoy_bootstrap s1 19000
|
||||
gen_envoy_bootstrap s2 19001
|
||||
|
||||
export REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy jaeger"
|
||||
gen_envoy_bootstrap s1 19000 primary
|
||||
gen_envoy_bootstrap s2 19001 primary
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
export REQUIRED_SERVICES="$DEFAULT_REQUIRED_SERVICES jaeger"
|
|
@ -23,7 +23,7 @@ load helpers
|
|||
}
|
||||
|
||||
@test "s1 upstream should have healthy endpoints for s2" {
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2 HEALTHY 1
|
||||
assert_upstream_has_endpoints_in_status 127.0.0.1:19000 s2.default.primary HEALTHY 1
|
||||
}
|
||||
|
||||
@test "s1 upstream should be able to connect to s2" {
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
primary_datacenter = "primary"
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
export DEFAULT_REQUIRED_SERVICES="s1 s1-sidecar-proxy s2 s2-sidecar-proxy"
|
||||
export REQUIRED_SERVICES="${DEFAULT_REQUIRED_SERVICES}"
|
||||
export REQUIRE_SECONDARY=0
|
|
@ -22,13 +22,15 @@ services:
|
|||
- sleep
|
||||
- "86400"
|
||||
|
||||
consul:
|
||||
consul-primary:
|
||||
image: "consul-dev"
|
||||
command:
|
||||
- "agent"
|
||||
- "-dev"
|
||||
- "-datacenter"
|
||||
- "primary"
|
||||
- "-config-dir"
|
||||
- "/workdir/consul"
|
||||
- "/workdir/primary/consul"
|
||||
- "-client"
|
||||
- "0.0.0.0"
|
||||
volumes:
|
||||
|
@ -44,7 +46,7 @@ services:
|
|||
|
||||
s1:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "fortio/fortio"
|
||||
environment:
|
||||
- "FORTIO_NAME=s1"
|
||||
|
@ -56,11 +58,11 @@ services:
|
|||
- ":8079"
|
||||
- "-redirect-port"
|
||||
- "disabled"
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
s2:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "fortio/fortio"
|
||||
environment:
|
||||
- "FORTIO_NAME=s2"
|
||||
|
@ -72,11 +74,11 @@ services:
|
|||
- ":8179"
|
||||
- "-redirect-port"
|
||||
- "disabled"
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
s2-v1:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "fortio/fortio"
|
||||
environment:
|
||||
- "FORTIO_NAME=s2-v1"
|
||||
|
@ -88,11 +90,11 @@ services:
|
|||
- ":8178"
|
||||
- "-redirect-port"
|
||||
- "disabled"
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
s2-v2:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "fortio/fortio"
|
||||
environment:
|
||||
- "FORTIO_NAME=s2-v2"
|
||||
|
@ -104,11 +106,11 @@ services:
|
|||
- ":8177"
|
||||
- "-redirect-port"
|
||||
- "disabled"
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
s3:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "fortio/fortio"
|
||||
environment:
|
||||
- "FORTIO_NAME=s3"
|
||||
|
@ -120,11 +122,11 @@ services:
|
|||
- ":8279"
|
||||
- "-redirect-port"
|
||||
- "disabled"
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
s3-v1:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "fortio/fortio"
|
||||
environment:
|
||||
- "FORTIO_NAME=s3-v1"
|
||||
|
@ -136,11 +138,11 @@ services:
|
|||
- ":8278"
|
||||
- "-redirect-port"
|
||||
- "disabled"
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
s3-v2:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "fortio/fortio"
|
||||
environment:
|
||||
- "FORTIO_NAME=s3-v2"
|
||||
|
@ -152,16 +154,16 @@ services:
|
|||
- ":8277"
|
||||
- "-redirect-port"
|
||||
- "disabled"
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
s1-sidecar-proxy:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/envoy/s1-bootstrap.json"
|
||||
- "/workdir/primary/envoy/s1-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
|
@ -173,16 +175,16 @@ services:
|
|||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
s2-sidecar-proxy:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/envoy/s2-bootstrap.json"
|
||||
- "/workdir/primary/envoy/s2-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
|
@ -194,16 +196,16 @@ services:
|
|||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
s2-v1-sidecar-proxy:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/envoy/s2-v1-bootstrap.json"
|
||||
- "/workdir/primary/envoy/s2-v1-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
|
@ -215,16 +217,16 @@ services:
|
|||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
s2-v2-sidecar-proxy:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/envoy/s2-v2-bootstrap.json"
|
||||
- "/workdir/primary/envoy/s2-v2-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
|
@ -236,16 +238,16 @@ services:
|
|||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
s3-sidecar-proxy:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/envoy/s3-bootstrap.json"
|
||||
- "/workdir/primary/envoy/s3-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
|
@ -257,16 +259,16 @@ services:
|
|||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
s3-v1-sidecar-proxy:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/envoy/s3-v1-bootstrap.json"
|
||||
- "/workdir/primary/envoy/s3-v1-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
|
@ -278,16 +280,16 @@ services:
|
|||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
s3-v2-sidecar-proxy:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/envoy/s3-v2-bootstrap.json"
|
||||
- "/workdir/primary/envoy/s3-v2-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
|
@ -299,11 +301,11 @@ services:
|
|||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
verify:
|
||||
verify-primary:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile-bats
|
||||
|
@ -312,15 +314,14 @@ services:
|
|||
- ENVOY_VERSION
|
||||
command:
|
||||
- "--pretty"
|
||||
- "/workdir/bats"
|
||||
- "/workdir/primary/bats"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
pid: host
|
||||
network_mode: service:consul-primary
|
||||
|
||||
s1-sidecar-proxy-consul-exec:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile-consul-envoy
|
||||
|
@ -336,11 +337,11 @@ services:
|
|||
- "--"
|
||||
- "-l"
|
||||
- "debug"
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
fake-statsd:
|
||||
depends_on:
|
||||
- consul
|
||||
- consul-primary
|
||||
image: "alpine/socat"
|
||||
command:
|
||||
- -u
|
||||
|
@ -348,10 +349,10 @@ services:
|
|||
# This magic incantation is needed since Envoy doesn't add newlines and so
|
||||
# we need each packet to be passed to echo to add a new line before
|
||||
# appending.
|
||||
- SYSTEM:'xargs -0 echo >> /workdir/statsd/statsd.log'
|
||||
- SYSTEM:'xargs -0 echo >> /workdir/primary/statsd/statsd.log'
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
wipe-volumes:
|
||||
volumes:
|
||||
|
@ -379,12 +380,180 @@ services:
|
|||
volumes:
|
||||
- *workdir-volume
|
||||
image: openzipkin/zipkin
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
|
||||
jaeger:
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
image: jaegertracing/all-in-one:1.11
|
||||
network_mode: service:consul
|
||||
network_mode: service:consul-primary
|
||||
command:
|
||||
- --collector.zipkin.http-port=9411
|
||||
|
||||
consul-secondary:
|
||||
image: "consul-dev"
|
||||
command:
|
||||
- "agent"
|
||||
- "-dev"
|
||||
- "-datacenter"
|
||||
- "secondary"
|
||||
- "-config-dir"
|
||||
- "/workdir/secondary/consul"
|
||||
- "-client"
|
||||
- "0.0.0.0"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
ports:
|
||||
# Exposing to host makes debugging locally a bit easier
|
||||
- "9500:8500"
|
||||
- "9502:8502"
|
||||
|
||||
s1-secondary:
|
||||
depends_on:
|
||||
- consul-secondary
|
||||
image: "fortio/fortio"
|
||||
environment:
|
||||
- "FORTIO_NAME=s1"
|
||||
command:
|
||||
- "server"
|
||||
- "-http-port"
|
||||
- ":8080"
|
||||
- "-grpc-port"
|
||||
- ":8079"
|
||||
- "-redirect-port"
|
||||
- "disabled"
|
||||
network_mode: service:consul-secondary
|
||||
|
||||
s2-secondary:
|
||||
depends_on:
|
||||
- consul-secondary
|
||||
image: "fortio/fortio"
|
||||
environment:
|
||||
- "FORTIO_NAME=s2"
|
||||
command:
|
||||
- "server"
|
||||
- "-http-port"
|
||||
- ":8181"
|
||||
- "-grpc-port"
|
||||
- ":8179"
|
||||
- "-redirect-port"
|
||||
- "disabled"
|
||||
network_mode: service:consul-secondary
|
||||
|
||||
s1-sidecar-proxy-secondary:
|
||||
depends_on:
|
||||
- consul-secondary
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/secondary/envoy/s1-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
# despite separate containers that don't share IPC namespace. Not quite
|
||||
# sure how this happens but may be due to unix socket being in some shared
|
||||
# location?
|
||||
- "--disable-hot-restart"
|
||||
- "--drain-time-s"
|
||||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul-secondary
|
||||
|
||||
s2-sidecar-proxy-secondary:
|
||||
depends_on:
|
||||
- consul-secondary
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/secondary/envoy/s2-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
# despite separate containers that don't share IPC namespace. Not quite
|
||||
# sure how this happens but may be due to unix socket being in some shared
|
||||
# location?
|
||||
- "--disable-hot-restart"
|
||||
- "--drain-time-s"
|
||||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul-secondary
|
||||
|
||||
gateway-primary:
|
||||
depends_on:
|
||||
- consul-primary
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/primary/envoy/mesh-gateway-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
# despite separate containers that don't share IPC namespace. Not quite
|
||||
# sure how this happens but may be due to unix socket being in some shared
|
||||
# location?
|
||||
- "--disable-hot-restart"
|
||||
- "--drain-time-s"
|
||||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul-primary
|
||||
|
||||
gateway-secondary:
|
||||
depends_on:
|
||||
- consul-secondary
|
||||
image: "envoyproxy/envoy:v${ENVOY_VERSION:-1.8.0}"
|
||||
command:
|
||||
- "envoy"
|
||||
- "-c"
|
||||
- "/workdir/secondary/envoy/mesh-gateway-bootstrap.json"
|
||||
- "-l"
|
||||
- "debug"
|
||||
# Hot restart breaks since both envoys seem to interact with each other
|
||||
# despite separate containers that don't share IPC namespace. Not quite
|
||||
# sure how this happens but may be due to unix socket being in some shared
|
||||
# location?
|
||||
- "--disable-hot-restart"
|
||||
- "--drain-time-s"
|
||||
- "1"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul-secondary
|
||||
|
||||
verify-primary:
|
||||
depends_on:
|
||||
- consul-primary
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile-bats
|
||||
tty: true
|
||||
environment:
|
||||
- ENVOY_VERSION
|
||||
command:
|
||||
- "--pretty"
|
||||
- "/workdir/primary/bats"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul-primary
|
||||
pid: host
|
||||
|
||||
verify-secondary:
|
||||
depends_on:
|
||||
- consul-secondary
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile-bats
|
||||
tty: true
|
||||
environment:
|
||||
- ENVOY_VERSION
|
||||
command:
|
||||
- "--pretty"
|
||||
- "/workdir/secondary/bats"
|
||||
volumes:
|
||||
- *workdir-volume
|
||||
network_mode: service:consul-secondary
|
||||
pid: host
|
|
@ -9,23 +9,42 @@ function retry {
|
|||
shift
|
||||
local delay=$1
|
||||
shift
|
||||
while true; do
|
||||
"$@" && break || {
|
||||
exit=$?
|
||||
if [[ $n -lt $max ]]; then
|
||||
((n++))
|
||||
echo "Command failed. Attempt $n/$max:"
|
||||
sleep $delay;
|
||||
else
|
||||
echo "The command has failed after $n attempts." >&2
|
||||
return $exit
|
||||
|
||||
local errtrace=0
|
||||
if grep -q "errtrace" <<< "$SHELLOPTS"
|
||||
then
|
||||
errtrace=1
|
||||
set +E
|
||||
fi
|
||||
|
||||
for ((i=1;i<=$max;i++))
|
||||
do
|
||||
if $@
|
||||
then
|
||||
if test $errtrace -eq 1
|
||||
then
|
||||
set -E
|
||||
fi
|
||||
}
|
||||
return 0
|
||||
else
|
||||
echo "Command failed. Attempt $i/$max:"
|
||||
sleep $delay
|
||||
fi
|
||||
done
|
||||
|
||||
if test $errtrace -eq 1
|
||||
then
|
||||
set -E
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
function retry_default {
|
||||
retry 5 1 $@
|
||||
set +E
|
||||
ret=0
|
||||
retry 5 1 $@ || ret=1
|
||||
set -E
|
||||
return $ret
|
||||
}
|
||||
|
||||
function retry_long {
|
||||
|
@ -60,16 +79,36 @@ function echoblue {
|
|||
tput sgr0
|
||||
}
|
||||
|
||||
function is_set {
|
||||
# Arguments:
|
||||
# $1 - string value to check its truthiness
|
||||
#
|
||||
# Return:
|
||||
# 0 - is truthy (backwards I know but allows syntax like `if is_set <var>` to work)
|
||||
# 1 - is not truthy
|
||||
|
||||
local val=$(tr '[:upper:]' '[:lower:]' <<< "$1")
|
||||
case $val in
|
||||
1 | t | true | y | yes)
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function get_cert {
|
||||
local HOSTPORT=$1
|
||||
openssl s_client -connect $HOSTPORT \
|
||||
-showcerts 2>/dev/null \
|
||||
| openssl x509 -noout -text
|
||||
CERT=$(openssl s_client -connect $HOSTPORT -showcerts )
|
||||
openssl x509 -noout -text <<< "$CERT"
|
||||
}
|
||||
|
||||
function assert_proxy_presents_cert_uri {
|
||||
local HOSTPORT=$1
|
||||
local SERVICENAME=$2
|
||||
local DC=${3:-primary}
|
||||
|
||||
|
||||
CERT=$(retry_default get_cert $HOSTPORT)
|
||||
|
||||
|
@ -77,7 +116,7 @@ function assert_proxy_presents_cert_uri {
|
|||
echo "GOT CERT:"
|
||||
echo "$CERT"
|
||||
|
||||
echo "$CERT" | grep -Eo "URI:spiffe://([a-zA-Z0-9-]+).consul/ns/default/dc/dc1/svc/$SERVICENAME"
|
||||
echo "$CERT" | grep -Eo "URI:spiffe://([a-zA-Z0-9-]+).consul/ns/default/dc/${DC}/svc/$SERVICENAME"
|
||||
}
|
||||
|
||||
function assert_envoy_version {
|
||||
|
@ -119,9 +158,25 @@ function get_envoy_stats_flush_interval {
|
|||
function snapshot_envoy_admin {
|
||||
local HOSTPORT=$1
|
||||
local ENVOY_NAME=$2
|
||||
local DC=${3:-primary}
|
||||
|
||||
docker_wget "http://${HOSTPORT}/config_dump" -q -O - > "./workdir/envoy/${ENVOY_NAME}-config_dump.json"
|
||||
docker_wget "http://${HOSTPORT}/clusters?format=json" -q -O - > "./workdir/envoy/${ENVOY_NAME}-clusters.json"
|
||||
|
||||
docker_wget "$DC" "http://${HOSTPORT}/config_dump" -q -O - > "./workdir/${DC}/envoy/${ENVOY_NAME}-config_dump.json"
|
||||
docker_wget "$DC" "http://${HOSTPORT}/clusters?format=json" -q -O - > "./workdir/${DC}/envoy/${ENVOY_NAME}-clusters.json"
|
||||
docker_wget "$DC" "http://${HOSTPORT}/stats" -q -O - > "./workdir/${DC}/envoy/${ENVOY_NAME}-stats.txt"
|
||||
}
|
||||
|
||||
function get_all_envoy_metrics {
|
||||
local HOSTPORT=$1
|
||||
curl -s -f $HOSTPORT/stats
|
||||
return $?
|
||||
}
|
||||
|
||||
function get_envoy_metrics {
|
||||
local HOSTPORT=$1
|
||||
local METRICS=$2
|
||||
|
||||
get_all_envoy_metrics $HOSTPORT | grep "$METRICS"
|
||||
}
|
||||
|
||||
function get_upstream_endpoint_in_status_count {
|
||||
|
@ -133,7 +188,7 @@ function get_upstream_endpoint_in_status_count {
|
|||
# echo "$output" >&3
|
||||
echo "$output" | jq --raw-output "
|
||||
.cluster_statuses[]
|
||||
| select(.name|startswith(\"${CLUSTER_NAME}.default.dc1.internal.\"))
|
||||
| select(.name|startswith(\"${CLUSTER_NAME}\"))
|
||||
| [.host_statuses[].health_status.eds_health_status]
|
||||
| [select(.[] == \"${HEALTH_STATUS}\")]
|
||||
| length"
|
||||
|
@ -159,6 +214,36 @@ function assert_upstream_has_endpoints_in_status {
|
|||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
function assert_envoy_metric {
|
||||
set -eEuo pipefail
|
||||
local HOSTPORT=$1
|
||||
local METRIC=$2
|
||||
local EXPECT_COUNT=$3
|
||||
|
||||
METRICS=$(get_envoy_metrics $HOSTPORT "$METRIC")
|
||||
|
||||
if [ -z "${METRICS}" ]
|
||||
then
|
||||
echo "Metric not found" 1>&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
GOT_COUNT=$(awk -F: '{print $2}' <<< "$METRICS" | head -n 1 | tr -d ' ')
|
||||
|
||||
if [ -z "$GOT_COUNT" ]
|
||||
then
|
||||
echo "Couldn't parse metric count" 1>&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ $EXPECT_COUNT -ne $GOT_COUNT ]
|
||||
then
|
||||
echo "$METRIC - expected count: $EXPECT_COUNT, actual count: $GOT_COUNT" 1>&2
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function get_healthy_service_count {
|
||||
local SERVICE_NAME=$1
|
||||
run retry_default curl -s -f 127.0.0.1:8500/v1/health/connect/${SERVICE_NAME}?passing
|
||||
|
@ -184,22 +269,30 @@ function assert_service_has_healthy_instances {
|
|||
}
|
||||
|
||||
function docker_consul {
|
||||
docker run -i --rm --network container:envoy_consul_1 consul-dev $@
|
||||
local DC=$1
|
||||
shift 1
|
||||
docker run -i --rm --network container:envoy_consul-${DC}_1 consul-dev "$@"
|
||||
}
|
||||
|
||||
function docker_wget {
|
||||
docker run -ti --rm --network container:envoy_consul_1 alpine:3.9 wget $@
|
||||
local DC=$1
|
||||
shift 1
|
||||
docker run -ti --rm --network container:envoy_consul-${DC}_1 alpine:3.9 wget "$@"
|
||||
}
|
||||
|
||||
function docker_curl {
|
||||
docker run -ti --rm --network container:envoy_consul_1 --entrypoint curl consul-dev $@
|
||||
local DC=$1
|
||||
shift 1
|
||||
docker run -ti --rm --network container:envoy_consul-${DC}_1 --entrypoint curl consul-dev "$@"
|
||||
}
|
||||
|
||||
function get_envoy_pid {
|
||||
local BOOTSTRAP_NAME=$1
|
||||
local DC=${2:-primary}
|
||||
run ps aux
|
||||
[ "$status" == 0 ]
|
||||
PID="$(echo "$output" | grep "envoy -c /workdir/envoy/${BOOTSTRAP_NAME}-bootstrap.json" | awk '{print $1}')"
|
||||
echo "$output" 1>&2
|
||||
PID="$(echo "$output" | grep "envoy -c /workdir/$DC/envoy/${BOOTSTRAP_NAME}-bootstrap.json" | awk '{print $1}')"
|
||||
[ -n "$PID" ]
|
||||
|
||||
echo "$PID"
|
||||
|
@ -207,15 +300,19 @@ function get_envoy_pid {
|
|||
|
||||
function kill_envoy {
|
||||
local BOOTSTRAP_NAME=$1
|
||||
local DC=${2:-primary}
|
||||
|
||||
PID="$(get_envoy_pid $BOOTSTRAP_NAME)"
|
||||
PID="$(get_envoy_pid $BOOTSTRAP_NAME "$DC")"
|
||||
echo "PID = $PID"
|
||||
|
||||
kill -TERM $PID
|
||||
}
|
||||
|
||||
function must_match_in_statsd_logs {
|
||||
run cat /workdir/statsd/statsd.log
|
||||
local DC=${2:-primary}
|
||||
|
||||
run cat /workdir/${DC}/statsd/statsd.log
|
||||
echo "$output"
|
||||
COUNT=$( echo "$output" | grep -Ec $1 )
|
||||
|
||||
echo "COUNT of '$1' matches: $COUNT"
|
||||
|
@ -269,13 +366,21 @@ function must_fail_http_connection {
|
|||
function gen_envoy_bootstrap {
|
||||
SERVICE=$1
|
||||
ADMIN_PORT=$2
|
||||
DC=${3:-primary}
|
||||
IS_MGW=${4:-0}
|
||||
|
||||
if output=$(docker_consul connect envoy -bootstrap \
|
||||
-proxy-id $SERVICE-sidecar-proxy \
|
||||
PROXY_ID="$SERVICE"
|
||||
if ! is_set "$IS_MGW"
|
||||
then
|
||||
PROXY_ID="$SERVICE-sidecar-proxy"
|
||||
fi
|
||||
|
||||
if output=$(docker_consul "$DC" connect envoy -bootstrap \
|
||||
-proxy-id $PROXY_ID \
|
||||
-admin-bind 0.0.0.0:$ADMIN_PORT 2>&1); then
|
||||
|
||||
# All OK, write config to file
|
||||
echo "$output" > workdir/envoy/$SERVICE-bootstrap.json
|
||||
echo "$output" > workdir/${DC}/envoy/$SERVICE-bootstrap.json
|
||||
else
|
||||
status=$?
|
||||
# Command failed, instead of swallowing error (printed on stdout by docker
|
||||
|
@ -288,13 +393,13 @@ function gen_envoy_bootstrap {
|
|||
function read_config_entry {
|
||||
local KIND=$1
|
||||
local NAME=$2
|
||||
docker_consul config read -kind $KIND -name $NAME
|
||||
local DC=${3:-primary}
|
||||
|
||||
docker_consul "$DC" config read -kind $KIND -name $NAME
|
||||
}
|
||||
|
||||
function wait_for_config_entry {
|
||||
local KIND=$1
|
||||
local NAME=$2
|
||||
retry_default read_config_entry $KIND $NAME >/dev/null
|
||||
retry_default read_config_entry "$@" >/dev/null
|
||||
}
|
||||
|
||||
function delete_config_entry {
|
||||
|
@ -305,12 +410,14 @@ function delete_config_entry {
|
|||
|
||||
function wait_for_agent_service_register {
|
||||
local SERVICE_ID=$1
|
||||
retry_default docker_curl -sLf "http://127.0.0.1:8500/v1/agent/service/${SERVICE_ID}" >/dev/null
|
||||
local DC=${2:-primary}
|
||||
retry_default docker_curl "$DC" -sLf "http://127.0.0.1:8500/v1/agent/service/${SERVICE_ID}" >/dev/null
|
||||
}
|
||||
|
||||
function set_ttl_check_state {
|
||||
local CHECK_ID=$1
|
||||
local CHECK_STATE=$2
|
||||
local DC=${3:-primary}
|
||||
|
||||
case "$CHECK_STATE" in
|
||||
pass)
|
||||
|
@ -324,7 +431,7 @@ function set_ttl_check_state {
|
|||
return 1
|
||||
esac
|
||||
|
||||
retry_default docker_curl -sL -XPUT "http://localhost:8500/v1/agent/check/warn/${CHECK_ID}"
|
||||
retry_default docker_curl "${DC}" -sL -XPUT "http://localhost:8500/v1/agent/check/warn/${CHECK_ID}"
|
||||
}
|
||||
|
||||
function get_upstream_fortio_name {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
set -eEuo pipefail
|
||||
|
||||
# DEBUG=1 enables set -x for this script so echos every command run
|
||||
DEBUG=${DEBUG:-}
|
||||
|
@ -36,31 +36,35 @@ source helpers.bash
|
|||
RESULT=1
|
||||
CLEANED_UP=0
|
||||
|
||||
PREV_CMD=""
|
||||
THIS_CMD=""
|
||||
|
||||
function cleanup {
|
||||
local STATUS="$?"
|
||||
local CMD="$THIS_CMD"
|
||||
|
||||
if [ "$CLEANED_UP" != 0 ] ; then
|
||||
return
|
||||
fi
|
||||
CLEANED_UP=1
|
||||
|
||||
if [ $STATUS -ne 0 ] ; then
|
||||
# We failed due to set -e catching an error, output some useful info about
|
||||
# that error.
|
||||
echo "ERR: command exited with $STATUS"
|
||||
echo " command: $CMD"
|
||||
if [ "$STATUS" -ne 0 ]
|
||||
then
|
||||
capture_logs
|
||||
fi
|
||||
|
||||
docker-compose down -v --remove-orphans
|
||||
}
|
||||
trap cleanup EXIT
|
||||
# Magic to capture commands and statuses so we can show them when we exit due to
|
||||
# set -e This is useful for debugging setup.sh failures.
|
||||
trap 'PREV_CMD=$THIS_CMD; THIS_CMD=$BASH_COMMAND' DEBUG
|
||||
|
||||
function command_error {
|
||||
echo "ERR: command exited with status $1"
|
||||
echo " command: $2"
|
||||
echo " line: $3"
|
||||
echo " function: $4"
|
||||
echo " called at: $5"
|
||||
# printf '%s\n' "${FUNCNAME[@]}"
|
||||
# printf '%s\n' "${BASH_SOURCE[@]}"
|
||||
# printf '%s\n' "${BASH_LINENO[@]}"
|
||||
}
|
||||
|
||||
trap 'command_error $? "${BASH_COMMAND}" "${LINENO}" "${FUNCNAME[0]}" "${BASH_SOURCE[0]}:${BASH_LINENO[0]}"' ERR
|
||||
|
||||
# Cleanup from any previous unclean runs.
|
||||
docker-compose down -v --remove-orphans
|
||||
|
@ -68,101 +72,258 @@ docker-compose down -v --remove-orphans
|
|||
# Start the volume container
|
||||
docker-compose up -d workdir
|
||||
|
||||
function init_workdir {
|
||||
local DC="$1"
|
||||
|
||||
if test -z "$DC"
|
||||
then
|
||||
DC=primary
|
||||
fi
|
||||
|
||||
# Note, we use explicit set of dirs so we don't delete .gitignore. Also,
|
||||
# don't wipe logs between runs as they are already split and we need them to
|
||||
# upload as artifacts later.
|
||||
rm -rf workdir/${DC}
|
||||
mkdir -p workdir/${DC}/{consul,envoy,bats,statsd}
|
||||
|
||||
# Reload consul config from defaults
|
||||
cp consul-base-cfg/* workdir/${DC}/consul/
|
||||
|
||||
# Add any overrides if there are any (no op if not)
|
||||
find ${CASE_DIR} -name '*.hcl' -maxdepth 1 -type f -exec cp -f {} workdir/${DC}/consul \;
|
||||
|
||||
# Copy all the test files
|
||||
find ${CASE_DIR} -name '*.bats' -maxdepth 1 -type f -exec cp -f {} workdir/${DC}/bats \;
|
||||
# Copy DC specific bats
|
||||
cp helpers.bash workdir/${DC}/bats
|
||||
|
||||
# Add any DC overrides
|
||||
if test -d "${CASE_DIR}/${DC}"
|
||||
then
|
||||
find ${CASE_DIR}/${DC} -type f -name '*.hcl' -exec cp -f {} workdir/${DC}/consul \;
|
||||
find ${CASE_DIR}/${DC} -type f -name '*.bats' -exec cp -f {} workdir/${DC}/bats \;
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function start_consul {
|
||||
local DC=${1:-primary}
|
||||
|
||||
# Start consul now as setup script needs it up
|
||||
docker-compose rm -s -v -f consul-${DC} || true
|
||||
docker-compose up -d consul-${DC}
|
||||
}
|
||||
|
||||
function pre_service_setup {
|
||||
local DC=${1:-primary}
|
||||
|
||||
# Run test case setup (e.g. generating Envoy bootstrap, starting containers)
|
||||
if [ -f "${CASE_DIR}${DC}/setup.sh" ]
|
||||
then
|
||||
source ${CASE_DIR}${DC}/setup.sh
|
||||
else
|
||||
source ${CASE_DIR}setup.sh
|
||||
fi
|
||||
}
|
||||
|
||||
function start_services {
|
||||
# Push the state to the shared docker volume (note this is because CircleCI
|
||||
# can't use shared volumes)
|
||||
docker cp workdir/. envoy_workdir_1:/workdir
|
||||
|
||||
# Start containers required
|
||||
if [ ! -z "$REQUIRED_SERVICES" ] ; then
|
||||
docker-compose rm -s -v -f $REQUIRED_SERVICES || true
|
||||
docker-compose up --build -d $REQUIRED_SERVICES
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function verify {
|
||||
local DC=$1
|
||||
if test -z "$DC"
|
||||
then
|
||||
DC=primary
|
||||
fi
|
||||
|
||||
# Execute tests
|
||||
res=0
|
||||
|
||||
echo "- - - - - - - - - - - - - - - - - - - - - - - -"
|
||||
echoblue -n "CASE $CASE_STR"
|
||||
echo -n ": "
|
||||
|
||||
# Nuke any previous case's verify container.
|
||||
docker-compose rm -s -v -f verify-${DC} || true
|
||||
|
||||
if docker-compose up --abort-on-container-exit --exit-code-from verify-${DC} verify-${DC} ; then
|
||||
echogreen "✓ PASS"
|
||||
else
|
||||
echored "⨯ FAIL"
|
||||
res=1
|
||||
fi
|
||||
echo "================================================"
|
||||
|
||||
return $res
|
||||
}
|
||||
|
||||
function capture_logs {
|
||||
echo "Capturing Logs for $CASE_STR"
|
||||
mkdir -p "$LOG_DIR"
|
||||
services="$REQUIRED_SERVICES consul-primary"
|
||||
if is_set $REQUIRE_SECONDARY
|
||||
then
|
||||
services="$services consul-secondary"
|
||||
fi
|
||||
|
||||
if [ -f "${CASE_DIR}capture.sh" ]
|
||||
then
|
||||
echo "Executing ${CASE_DIR}capture.sh"
|
||||
source ${CASE_DIR}capture.sh || true
|
||||
fi
|
||||
|
||||
|
||||
for cont in $services
|
||||
do
|
||||
echo "Capturing log for $cont"
|
||||
docker-compose logs --no-color "$cont" 2>&1 > "${LOG_DIR}/${cont}.log"
|
||||
done
|
||||
}
|
||||
|
||||
function stop_services {
|
||||
|
||||
# Teardown
|
||||
if [ -f "${CASE_DIR}teardown.sh" ] ; then
|
||||
source "${CASE_DIR}teardown.sh"
|
||||
fi
|
||||
docker-compose rm -s -v -f $REQUIRED_SERVICES || true
|
||||
}
|
||||
|
||||
function initVars {
|
||||
source "defaults.sh"
|
||||
if [ -f "${CASE_DIR}vars.sh" ] ; then
|
||||
source "${CASE_DIR}vars.sh"
|
||||
fi
|
||||
}
|
||||
|
||||
function runTest {
|
||||
initVars
|
||||
|
||||
# Initialize the workdir
|
||||
init_workdir primary
|
||||
|
||||
if is_set $REQUIRE_SECONDARY
|
||||
then
|
||||
init_workdir secondary
|
||||
fi
|
||||
|
||||
# Wipe state
|
||||
docker-compose up wipe-volumes
|
||||
|
||||
# Push the state to the shared docker volume (note this is because CircleCI
|
||||
# can't use shared volumes)
|
||||
docker cp workdir/. envoy_workdir_1:/workdir
|
||||
|
||||
start_consul primary
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
capture_logs
|
||||
return 1
|
||||
fi
|
||||
|
||||
if is_set $REQUIRE_SECONDARY
|
||||
then
|
||||
start_consul secondary
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
capture_logs
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
pre_service_setup primary
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
capture_logs
|
||||
return 1
|
||||
fi
|
||||
|
||||
if is_set $REQUIRE_SECONDARY
|
||||
then
|
||||
pre_service_setup secondary
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
capture_logs
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
start_services
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
capture_logs
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Run the verify container and report on the output
|
||||
verify primary
|
||||
TESTRESULT=$?
|
||||
|
||||
if is_set $REQUIRE_SECONDARY && test "$TESTRESULT" -eq 0
|
||||
then
|
||||
verify secondary
|
||||
SECONDARYRESULT=$?
|
||||
|
||||
if [ "$SECONDARYRESULT" -ne 0 ]
|
||||
then
|
||||
TESTRESULT=$SECONDARYRESULT
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$TESTRESULT" -ne 0 ]
|
||||
then
|
||||
capture_logs
|
||||
fi
|
||||
|
||||
stop_services primary
|
||||
|
||||
if is_set $REQUIRE_SECONDARY
|
||||
then
|
||||
stop_services secondary
|
||||
fi
|
||||
|
||||
|
||||
return $TESTRESULT
|
||||
}
|
||||
|
||||
|
||||
RESULT=0
|
||||
|
||||
for c in ./case-*/ ; do
|
||||
for ev in $ENVOY_VERSIONS ; do
|
||||
CASE_NAME=$( basename $c | cut -c6- )
|
||||
CASE_ENVOY_VERSION="envoy $ev"
|
||||
CASE_STR="$CASE_NAME, $CASE_ENVOY_VERSION"
|
||||
export CASE_DIR="${c}"
|
||||
export CASE_NAME=$( basename $c | cut -c6- )
|
||||
export CASE_ENVOY_VERSION="envoy $ev"
|
||||
export CASE_STR="$CASE_NAME, $CASE_ENVOY_VERSION"
|
||||
export ENVOY_VERSION="${ev}"
|
||||
export LOG_DIR="workdir/logs/${CASE_DIR}/${ENVOY_VERSION}"
|
||||
echo "================================================"
|
||||
echoblue "CASE $CASE_STR"
|
||||
echo "- - - - - - - - - - - - - - - - - - - - - - - -"
|
||||
|
||||
export ENVOY_VERSION=$ev
|
||||
|
||||
if [ ! -z "$FILTER_TESTS" ] && echo "$CASE_STR" | grep -v "$FILTER_TESTS" > /dev/null ; then
|
||||
echo " SKIPPED: doesn't match FILTER_TESTS=$FILTER_TESTS"
|
||||
continue 1
|
||||
fi
|
||||
|
||||
# Wipe state
|
||||
docker-compose up wipe-volumes
|
||||
# Note, we use explicit set of dirs so we don't delete .gitignore. Also,
|
||||
# don't wipe logs between runs as they are already split and we need them to
|
||||
# upload as artifacts later.
|
||||
rm -rf workdir/{consul,envoy,bats,statsd}
|
||||
mkdir -p workdir/{consul,envoy,bats,statsd,logs}
|
||||
|
||||
# Reload consul config from defaults
|
||||
cp consul-base-cfg/* workdir/consul
|
||||
|
||||
# Add any overrides if there are any (no op if not)
|
||||
cp -f ${c}*.hcl workdir/consul 2>/dev/null || :
|
||||
|
||||
# Push the state to the shared docker volume (note this is because CircleCI
|
||||
# can't use shared volumes)
|
||||
docker cp workdir/. envoy_workdir_1:/workdir
|
||||
|
||||
# Start consul now as setup script needs it up
|
||||
docker-compose rm -s -v -f consul || true
|
||||
docker-compose up -d consul
|
||||
|
||||
# Copy all the test files
|
||||
cp ${c}*.bats workdir/bats
|
||||
cp helpers.bash workdir/bats
|
||||
|
||||
# Run test case setup (e.g. generating Envoy bootstrap, starting containers)
|
||||
source ${c}setup.sh
|
||||
|
||||
# Push the state to the shared docker volume (note this is because CircleCI
|
||||
# can't use shared volumes)
|
||||
docker cp workdir/. envoy_workdir_1:/workdir
|
||||
|
||||
# Start containers required
|
||||
if [ ! -z "$REQUIRED_SERVICES" ] ; then
|
||||
docker-compose rm -s -v -f $REQUIRED_SERVICES || true
|
||||
docker-compose up --build -d $REQUIRED_SERVICES
|
||||
if ! runTest
|
||||
then
|
||||
RESULT=1
|
||||
fi
|
||||
|
||||
# Nuke any previous case's verify container.
|
||||
docker-compose rm -s -v -f verify || true
|
||||
|
||||
# Execute tests
|
||||
THISRESULT=1
|
||||
if docker-compose up --build --exit-code-from verify verify ; then
|
||||
echo "- - - - - - - - - - - - - - - - - - - - - - - -"
|
||||
echoblue -n "CASE $CASE_STR"
|
||||
echo -n ": "
|
||||
echogreen "✓ PASS"
|
||||
else
|
||||
echo "- - - - - - - - - - - - - - - - - - - - - - - -"
|
||||
echoblue -n "CASE $CASE_STR"
|
||||
echo -n ": "
|
||||
echored "⨯ FAIL"
|
||||
if [ $RESULT -eq 1 ] ; then
|
||||
RESULT=0
|
||||
fi
|
||||
THISRESULT=0
|
||||
fi
|
||||
echo "================================================"
|
||||
|
||||
# Hack consul into the list of containers to stop and dump logs for.
|
||||
REQUIRED_SERVICES="$REQUIRED_SERVICES consul"
|
||||
|
||||
# Teardown
|
||||
if [ -f "${c}teardown.sh" ] ; then
|
||||
source "${c}teardown.sh"
|
||||
fi
|
||||
if [ ! -z "$REQUIRED_SERVICES" ] ; then
|
||||
if [[ "$THISRESULT" == 0 ]] ; then
|
||||
mkdir -p workdir/logs/$c/$ENVOY_VERSION
|
||||
for cont in $REQUIRED_SERVICES; do
|
||||
docker-compose logs --no-color $cont 2>&1 > workdir/logs/$c/$ENVOY_VERSION/$cont.log
|
||||
done
|
||||
fi
|
||||
docker-compose rm -s -v -f $REQUIRED_SERVICES || true
|
||||
fi
|
||||
|
||||
if [ $RESULT -eq 0 ] && [ ! -z "$STOP_ON_FAIL" ] ; then
|
||||
if [ $RESULT -ne 0 ] && [ ! -z "$STOP_ON_FAIL" ] ; then
|
||||
echo " => STOPPING because STOP_ON_FAIL set"
|
||||
break 2
|
||||
fi
|
||||
|
@ -171,7 +332,7 @@ done
|
|||
|
||||
cleanup
|
||||
|
||||
if [ $RESULT -eq 1 ] ; then
|
||||
if [ $RESULT -eq 0 ] ; then
|
||||
echogreen "✓ PASS"
|
||||
else
|
||||
echored "⨯ FAIL"
|
||||
|
|
Loading…
Reference in New Issue