mirror of https://github.com/status-im/consul.git
add peering_commontopo tests [NET-3700] (#17951)
Co-authored-by: R.B. Boyer <4903+rboyer@users.noreply.github.com> Co-authored-by: R.B. Boyer <rb@hashicorp.com> Co-authored-by: Freddy <freddygv@users.noreply.github.com> Co-authored-by: NiniOak <anita.akaeze@hashicorp.com>
This commit is contained in:
parent
548a5ca385
commit
cd3fc9e1d0
|
@ -422,7 +422,7 @@ jobs:
|
||||||
-tags "${{ env.GOTAGS }}" \
|
-tags "${{ env.GOTAGS }}" \
|
||||||
-timeout=30m \
|
-timeout=30m \
|
||||||
-json \
|
-json \
|
||||||
`go list ./... | grep -v upgrade` \
|
`go list -tags "${{ env.GOTAGS }}" ./... | grep -v upgrade | grep -v peering_commontopo` \
|
||||||
--target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \
|
--target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \
|
||||||
--target-version local \
|
--target-version local \
|
||||||
--latest-image docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }} \
|
--latest-image docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }} \
|
||||||
|
@ -591,6 +591,98 @@ jobs:
|
||||||
DD_ENV: ci
|
DD_ENV: ci
|
||||||
run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml
|
run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml
|
||||||
|
|
||||||
|
peering_commontopo-integration-test:
|
||||||
|
runs-on: ${{ fromJSON(needs.setup.outputs.compute-xl) }}
|
||||||
|
needs:
|
||||||
|
- setup
|
||||||
|
- dev-build
|
||||||
|
permissions:
|
||||||
|
id-token: write # NOTE: this permission is explicitly required for Vault auth.
|
||||||
|
contents: read
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
env:
|
||||||
|
ENVOY_VERSION: "1.24.6"
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@8e5e7e5ab8b370d6c329ec480221332ada57f0ab # v3.5.2
|
||||||
|
# NOTE: This step is specifically needed for ENT. It allows us to access the required private HashiCorp repos.
|
||||||
|
- name: Setup Git
|
||||||
|
if: ${{ endsWith(github.repository, '-enterprise') }}
|
||||||
|
run: git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}:@github.com".insteadOf "https://github.com"
|
||||||
|
- uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
- run: go env
|
||||||
|
|
||||||
|
# Get go binary from workspace
|
||||||
|
- name: fetch binary
|
||||||
|
uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2
|
||||||
|
with:
|
||||||
|
name: '${{ env.CONSUL_BINARY_UPLOAD_NAME }}'
|
||||||
|
path: .
|
||||||
|
- name: restore mode+x
|
||||||
|
run: chmod +x consul
|
||||||
|
- name: Build consul:local image
|
||||||
|
run: docker build -t ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local -f ./build-support/docker/Consul-Dev.dockerfile .
|
||||||
|
- name: Peering commonTopo Integration Tests
|
||||||
|
run: |
|
||||||
|
mkdir -p "${{ env.TEST_RESULTS_DIR }}"
|
||||||
|
cd ./test-integ/peering_commontopo
|
||||||
|
docker run --rm ${{ env.CONSUL_LATEST_IMAGE_NAME }}:local consul version
|
||||||
|
go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \
|
||||||
|
--raw-command \
|
||||||
|
--format=short-verbose \
|
||||||
|
--debug \
|
||||||
|
--packages="./..." \
|
||||||
|
-- \
|
||||||
|
go test \
|
||||||
|
-tags "${{ env.GOTAGS }}" \
|
||||||
|
-timeout=30m \
|
||||||
|
-json . \
|
||||||
|
--target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \
|
||||||
|
--target-version local \
|
||||||
|
--latest-image docker.mirror.hashicorp.services/${{ env.CONSUL_LATEST_IMAGE_NAME }} \
|
||||||
|
--latest-version latest
|
||||||
|
ls -lrt
|
||||||
|
env:
|
||||||
|
# this is needed because of incompatibility between RYUK container and GHA
|
||||||
|
GOTESTSUM_JUNITFILE: ${{ env.TEST_RESULTS_DIR }}/results.xml
|
||||||
|
GOTESTSUM_FORMAT: standard-verbose
|
||||||
|
COMPOSE_INTERACTIVE_NO_CLI: 1
|
||||||
|
# tput complains if this isn't set to something.
|
||||||
|
TERM: ansi
|
||||||
|
# NOTE: ENT specific step as we store secrets in Vault.
|
||||||
|
- name: Authenticate to Vault
|
||||||
|
if: ${{ endsWith(github.repository, '-enterprise') }}
|
||||||
|
id: vault-auth
|
||||||
|
run: vault-auth
|
||||||
|
|
||||||
|
# NOTE: ENT specific step as we store secrets in Vault.
|
||||||
|
- name: Fetch Secrets
|
||||||
|
if: ${{ endsWith(github.repository, '-enterprise') }}
|
||||||
|
id: secrets
|
||||||
|
uses: hashicorp/vault-action@v2.5.0
|
||||||
|
with:
|
||||||
|
url: ${{ steps.vault-auth.outputs.addr }}
|
||||||
|
caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }}
|
||||||
|
token: ${{ steps.vault-auth.outputs.token }}
|
||||||
|
secrets: |
|
||||||
|
kv/data/github/${{ github.repository }}/datadog apikey | DATADOG_API_KEY;
|
||||||
|
|
||||||
|
- name: prepare datadog-ci
|
||||||
|
if: ${{ !endsWith(github.repository, '-enterprise') }}
|
||||||
|
run: |
|
||||||
|
curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "/usr/local/bin/datadog-ci"
|
||||||
|
chmod +x /usr/local/bin/datadog-ci
|
||||||
|
|
||||||
|
- name: upload coverage
|
||||||
|
# do not run on forks
|
||||||
|
if: github.event.pull_request.head.repo.full_name == github.repository
|
||||||
|
env:
|
||||||
|
DATADOG_API_KEY: "${{ endsWith(github.repository, '-enterprise') && env.DATADOG_API_KEY || secrets.DATADOG_API_KEY }}"
|
||||||
|
DD_ENV: ci
|
||||||
|
run: datadog-ci junit upload --service "$GITHUB_REPOSITORY" $TEST_RESULTS_DIR/results.xml
|
||||||
|
|
||||||
test-integrations-success:
|
test-integrations-success:
|
||||||
needs:
|
needs:
|
||||||
- setup
|
- setup
|
||||||
|
@ -600,6 +692,7 @@ jobs:
|
||||||
- generate-envoy-job-matrices
|
- generate-envoy-job-matrices
|
||||||
- envoy-integration-test
|
- envoy-integration-test
|
||||||
- compatibility-integration-test
|
- compatibility-integration-test
|
||||||
|
- peering_commontopo-integration-test
|
||||||
- upgrade-integration-test
|
- upgrade-integration-test
|
||||||
runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }}
|
runs-on: ${{ fromJSON(needs.setup.outputs.compute-small) }}
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
.vagrant/
|
.vagrant/
|
||||||
/pkg
|
/pkg
|
||||||
bin/
|
bin/
|
||||||
|
workdir/
|
||||||
changelog.tmp
|
changelog.tmp
|
||||||
exit-code
|
exit-code
|
||||||
Thumbs.db
|
Thumbs.db
|
||||||
|
@ -68,3 +69,4 @@ override.tf.json
|
||||||
terraform.rc
|
terraform.rc
|
||||||
/go.work
|
/go.work
|
||||||
/go.work.sum
|
/go.work.sum
|
||||||
|
.docker
|
||||||
|
|
|
@ -32,11 +32,11 @@ type QueryFailoverTarget struct {
|
||||||
|
|
||||||
// Partition specifies a partition to try during failover
|
// Partition specifies a partition to try during failover
|
||||||
// Note: Partition are available only in Consul Enterprise
|
// Note: Partition are available only in Consul Enterprise
|
||||||
Partition string
|
Partition string `json:",omitempty"`
|
||||||
|
|
||||||
// Namespace specifies a namespace to try during failover
|
// Namespace specifies a namespace to try during failover
|
||||||
// Note: Namespaces are available only in Consul Enterprise
|
// Note: Namespaces are available only in Consul Enterprise
|
||||||
Namespace string
|
Namespace string `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// QueryDNSOptions controls settings when query results are served over DNS.
|
// QueryDNSOptions controls settings when query results are served over DNS.
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
# test-integ
|
||||||
|
|
||||||
|
Go integration tests for consul. `/test/integration` also holds integration tests; they need migrating.
|
|
@ -0,0 +1,105 @@
|
||||||
|
module github.com/hashicorp/consul/test-integ
|
||||||
|
|
||||||
|
go 1.20
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/hashicorp/consul/api v1.22.0
|
||||||
|
github.com/hashicorp/consul/sdk v0.14.0
|
||||||
|
github.com/hashicorp/consul/test/integration/consul-container v0.0.0-20230628201853-bdf4fad7c5a5
|
||||||
|
github.com/hashicorp/consul/testing/deployer v0.0.0-00010101000000-000000000000
|
||||||
|
github.com/hashicorp/go-cleanhttp v0.5.2
|
||||||
|
github.com/itchyny/gojq v0.12.13
|
||||||
|
github.com/mitchellh/copystructure v1.2.0
|
||||||
|
github.com/stretchr/testify v1.8.4
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
fortio.org/dflag v1.5.2 // indirect
|
||||||
|
fortio.org/fortio v1.54.0 // indirect
|
||||||
|
fortio.org/log v1.3.0 // indirect
|
||||||
|
fortio.org/sets v1.0.2 // indirect
|
||||||
|
fortio.org/version v1.0.2 // indirect
|
||||||
|
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||||
|
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||||
|
github.com/agext/levenshtein v1.2.1 // indirect
|
||||||
|
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
|
||||||
|
github.com/armon/go-metrics v0.4.1 // indirect
|
||||||
|
github.com/avast/retry-go v3.0.0+incompatible // indirect
|
||||||
|
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||||
|
github.com/containerd/containerd v1.7.1 // indirect
|
||||||
|
github.com/cpuguy83/dockercfg v0.3.1 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
|
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||||
|
github.com/docker/docker v23.0.6+incompatible // indirect
|
||||||
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
|
github.com/fatih/color v1.14.1 // indirect
|
||||||
|
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
|
||||||
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
|
github.com/google/btree v1.0.1 // indirect
|
||||||
|
github.com/google/go-cmp v0.5.9 // indirect
|
||||||
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
|
github.com/hashicorp/go-hclog v1.5.0 // indirect
|
||||||
|
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||||
|
github.com/hashicorp/go-msgpack v0.5.5 // indirect
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
|
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||||
|
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||||
|
github.com/hashicorp/go-uuid v1.0.3 // indirect
|
||||||
|
github.com/hashicorp/go-version v1.2.1 // indirect
|
||||||
|
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||||
|
github.com/hashicorp/hcl/v2 v2.16.2 // indirect
|
||||||
|
github.com/hashicorp/memberlist v0.5.0 // indirect
|
||||||
|
github.com/hashicorp/serf v0.10.1 // indirect
|
||||||
|
github.com/imdario/mergo v0.3.15 // indirect
|
||||||
|
github.com/itchyny/timefmt-go v0.1.5 // indirect
|
||||||
|
github.com/klauspost/compress v1.16.5 // indirect
|
||||||
|
github.com/magiconair/properties v1.8.7 // indirect
|
||||||
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
|
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||||
|
github.com/miekg/dns v1.1.50 // indirect
|
||||||
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||||
|
github.com/mitchellh/go-wordwrap v1.0.0 // indirect
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
|
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||||
|
github.com/moby/patternmatcher v0.5.0 // indirect
|
||||||
|
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||||
|
github.com/moby/term v0.5.0 // indirect
|
||||||
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
|
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
|
||||||
|
github.com/opencontainers/runc v1.1.7 // indirect
|
||||||
|
github.com/otiai10/copy v1.10.0 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
|
github.com/rboyer/safeio v0.2.2 // indirect
|
||||||
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
||||||
|
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||||
|
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 // indirect
|
||||||
|
github.com/testcontainers/testcontainers-go v0.20.1 // indirect
|
||||||
|
github.com/zclconf/go-cty v1.12.1 // indirect
|
||||||
|
golang.org/x/crypto v0.7.0 // indirect
|
||||||
|
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
|
||||||
|
golang.org/x/mod v0.10.0 // indirect
|
||||||
|
golang.org/x/net v0.10.0 // indirect
|
||||||
|
golang.org/x/sys v0.8.0 // indirect
|
||||||
|
golang.org/x/text v0.9.0 // indirect
|
||||||
|
golang.org/x/tools v0.9.1 // indirect
|
||||||
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||||
|
google.golang.org/grpc v1.55.0 // indirect
|
||||||
|
google.golang.org/protobuf v1.30.0 // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
gotest.tools/v3 v3.4.0 // indirect
|
||||||
|
)
|
||||||
|
|
||||||
|
replace (
|
||||||
|
github.com/hashicorp/consul => ../
|
||||||
|
github.com/hashicorp/consul/api => ../api
|
||||||
|
github.com/hashicorp/consul/envoyextensions => ../envoyextensions
|
||||||
|
github.com/hashicorp/consul/proto-public => ../proto-public
|
||||||
|
github.com/hashicorp/consul/sdk => ../sdk
|
||||||
|
github.com/hashicorp/consul/test/integration/consul-container => ../test/integration/consul-container
|
||||||
|
github.com/hashicorp/consul/testing/deployer => ../testing/deployer
|
||||||
|
)
|
|
@ -0,0 +1,377 @@
|
||||||
|
fortio.org/assert v1.1.4 h1:Za1RaG+OjsTMpQS3J3UCvTF6wc4+IOHCz+jAOU37Y4o=
|
||||||
|
fortio.org/dflag v1.5.2 h1:F9XVRj4Qr2IbJP7BMj7XZc9wB0Q/RZ61Ool+4YPVad8=
|
||||||
|
fortio.org/dflag v1.5.2/go.mod h1:ppb/A8u+KKg+qUUYZNYuvRnXuVb8IsdHb/XGzsmjkN8=
|
||||||
|
fortio.org/fortio v1.54.0 h1:2jn8yTd6hcIEoKY4CjI0lI6XxTWVxsMYF2bMiWOmv+Y=
|
||||||
|
fortio.org/fortio v1.54.0/go.mod h1:SRaZbikL31UoAkw0On2hwpvHrQ0rRVnsAz3UGVNvMRw=
|
||||||
|
fortio.org/log v1.3.0 h1:bESPvuQGKejw7rrx41Sg3GoF+tsrB7oC08PxBs5/AM0=
|
||||||
|
fortio.org/log v1.3.0/go.mod h1:u/8/2lyczXq52aT5Nw6reD+3cR6m/EbS2jBiIYhgiTU=
|
||||||
|
fortio.org/sets v1.0.2 h1:gSWZFg9rgzl1zJfI/93lDJKBFw8WZ3Uxe3oQ5uDM4T4=
|
||||||
|
fortio.org/sets v1.0.2/go.mod h1:xVjulHr0FhlmReSymI+AhDtQ4FgjiazQ3JmuNpYFMs8=
|
||||||
|
fortio.org/version v1.0.2 h1:8NwxdX58aoeKx7T5xAPO0xlUu1Hpk42nRz5s6e6eKZ0=
|
||||||
|
fortio.org/version v1.0.2/go.mod h1:2JQp9Ax+tm6QKiGuzR5nJY63kFeANcgrZ0osoQFDVm0=
|
||||||
|
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||||
|
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||||
|
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||||
|
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||||
|
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||||
|
github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek=
|
||||||
|
github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=
|
||||||
|
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||||
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
|
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
|
||||||
|
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
|
||||||
|
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||||
|
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||||
|
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||||
|
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||||
|
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
|
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
|
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
|
||||||
|
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
|
||||||
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
|
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||||
|
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||||
|
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||||
|
github.com/containerd/containerd v1.7.1 h1:k8DbDkSOwt5rgxQ3uCI4WMKIJxIndSCBUaGm5oRn+Go=
|
||||||
|
github.com/containerd/containerd v1.7.1/go.mod h1:gA+nJUADRBm98QS5j5RPROnt0POQSMK+r7P7EGMC/Qc=
|
||||||
|
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
|
||||||
|
github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E=
|
||||||
|
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
|
||||||
|
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||||
|
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
|
github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU=
|
||||||
|
github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
|
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||||
|
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||||
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
|
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
|
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||||
|
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||||
|
github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
|
||||||
|
github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
|
||||||
|
github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo=
|
||||||
|
github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
|
||||||
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
|
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
||||||
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||||
|
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||||
|
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||||
|
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||||
|
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||||
|
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||||
|
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||||
|
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
|
||||||
|
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||||
|
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||||
|
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||||
|
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||||
|
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||||
|
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
|
||||||
|
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||||
|
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||||
|
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||||
|
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||||
|
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||||
|
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||||
|
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
|
||||||
|
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
||||||
|
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||||
|
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
|
github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI=
|
||||||
|
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||||
|
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||||
|
github.com/hashicorp/hcl/v2 v2.16.2 h1:mpkHZh/Tv+xet3sy3F9Ld4FyI2tUpWe9x3XtPx9f1a0=
|
||||||
|
github.com/hashicorp/hcl/v2 v2.16.2/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng=
|
||||||
|
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||||
|
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
|
||||||
|
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
|
||||||
|
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
|
||||||
|
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||||
|
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||||
|
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
|
||||||
|
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||||
|
github.com/itchyny/gojq v0.12.13 h1:IxyYlHYIlspQHHTE0f3cJF0NKDMfajxViuhBLnHd/QU=
|
||||||
|
github.com/itchyny/gojq v0.12.13/go.mod h1:JzwzAqenfhrPUuwbmEz3nu3JQmFLlQTQMUcOdnu/Sf4=
|
||||||
|
github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE=
|
||||||
|
github.com/itchyny/timefmt-go v0.1.5/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8=
|
||||||
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
|
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||||
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
|
||||||
|
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
|
||||||
|
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||||
|
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||||
|
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||||
|
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
|
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||||
|
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||||
|
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||||
|
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||||
|
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||||
|
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||||
|
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
|
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||||
|
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||||
|
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||||
|
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||||
|
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||||
|
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
|
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||||
|
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||||
|
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||||
|
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||||
|
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||||
|
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
|
||||||
|
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
|
||||||
|
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||||
|
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||||
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
|
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
|
||||||
|
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||||
|
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
|
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||||
|
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||||
|
github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
|
||||||
|
github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||||
|
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||||
|
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
|
||||||
|
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||||
|
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||||
|
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||||
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
|
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||||
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
|
github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8=
|
||||||
|
github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
|
||||||
|
github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk=
|
||||||
|
github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
|
||||||
|
github.com/otiai10/copy v1.10.0 h1:znyI7l134wNg/wDktoVQPxPkgvhDfGCYUasey+h0rDQ=
|
||||||
|
github.com/otiai10/copy v1.10.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww=
|
||||||
|
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
|
||||||
|
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
|
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||||
|
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||||
|
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||||
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
|
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
|
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
|
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||||
|
github.com/rboyer/safeio v0.2.2 h1:XhtqyUTRleMYGyBt3ni4j2BtEh669U2ry2INnnd+B4k=
|
||||||
|
github.com/rboyer/safeio v0.2.2/go.mod h1:pSnr2LFXyn/c/fotxotyOdYy7pP/XSh6MpBmzXPjiNc=
|
||||||
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
|
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
|
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||||
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||||
|
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||||
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
|
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||||
|
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||||
|
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||||
|
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||||
|
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 h1:xzABM9let0HLLqFypcxvLmlvEciCHL7+Lv+4vwZqecI=
|
||||||
|
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569/go.mod h1:2Ly+NIftZN4de9zRmENdYbvPQeaVIYKWpLFStLFEBgI=
|
||||||
|
github.com/testcontainers/testcontainers-go v0.20.1 h1:mK15UPJ8c5P+NsQKmkqzs/jMdJt6JMs5vlw2y4j92c0=
|
||||||
|
github.com/testcontainers/testcontainers-go v0.20.1/go.mod h1:zb+NOlCQBkZ7RQp4QI+YMIHyO2CQ/qsXzNF5eLJ24SY=
|
||||||
|
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||||
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
|
github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY=
|
||||||
|
github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA=
|
||||||
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
|
||||||
|
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||||
|
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
|
||||||
|
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||||
|
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
|
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
|
||||||
|
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||||
|
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||||
|
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||||
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||||
|
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||||
|
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
|
golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
|
||||||
|
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
||||||
|
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||||
|
google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
|
||||||
|
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
|
||||||
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||||
|
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
|
||||||
|
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
|
|
@ -0,0 +1,12 @@
|
||||||
|
# peering_commontopo
|
||||||
|
|
||||||
|
These peering tests all use a `commonTopo` (read: "common topology") to enable sharing a deployment of a Consul. Sharing a deployment of Consul cuts down on setup time.
|
||||||
|
|
||||||
|
This is only possible if two constraints are followed:
|
||||||
|
|
||||||
|
- `setup()` phase must ensure that any resources added to the topology cannot interfere with other tests. Principally by prefixing.
|
||||||
|
- `test()` phase must be "passive" and not mutate the topology in any way that would interfere with other tests.
|
||||||
|
|
||||||
|
Some of these tests *do* mutate in their `test()` phase, and while they use `commonTopo` for the purpose of code sharing, they are not included in the "shared topo" tests in `all_sharedtopo_test.go`.
|
||||||
|
|
||||||
|
Tests that are "shared topo" can also be run in an independent manner, gated behind the `-no-reuse-common-topo` flag. The same flag also prevents the shared topo suite from running. So `go test .` (without the flag) runs all shared topo-capable tests in *shared topo mode*, as well as shared topo-incapable tests; and `go test -no-reuse-common-topo` runs all shared topo-capable tests *individidually*, as well as the shared topo-incapable tests. Mostly this is so that when working on a single test, you don't also need to run other tests, but by default when running `go test .` the usual way, you run all tests in the fastest way.
|
|
@ -0,0 +1,272 @@
|
||||||
|
package peering
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ac1BasicSuite struct {
|
||||||
|
// inputs
|
||||||
|
DC string
|
||||||
|
Peer string
|
||||||
|
|
||||||
|
// test points
|
||||||
|
sidServerHTTP topology.ServiceID
|
||||||
|
sidServerTCP topology.ServiceID
|
||||||
|
nodeServerHTTP topology.NodeID
|
||||||
|
nodeServerTCP topology.NodeID
|
||||||
|
|
||||||
|
// 1.1
|
||||||
|
sidClientTCP topology.ServiceID
|
||||||
|
nodeClientTCP topology.NodeID
|
||||||
|
|
||||||
|
// 1.2
|
||||||
|
sidClientHTTP topology.ServiceID
|
||||||
|
nodeClientHTTP topology.NodeID
|
||||||
|
|
||||||
|
upstreamHTTP *topology.Upstream
|
||||||
|
upstreamTCP *topology.Upstream
|
||||||
|
}
|
||||||
|
|
||||||
|
var ac1BasicSuites []sharedTopoSuite = []sharedTopoSuite{
|
||||||
|
&ac1BasicSuite{DC: "dc1", Peer: "dc2"},
|
||||||
|
&ac1BasicSuite{DC: "dc2", Peer: "dc1"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAC1Basic(t *testing.T) {
|
||||||
|
runShareableSuites(t, ac1BasicSuites)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac1BasicSuite) testName() string {
|
||||||
|
return fmt.Sprintf("ac1 basic %s->%s", s.DC, s.Peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// creates clients in s.DC and servers in s.Peer
|
||||||
|
func (s *ac1BasicSuite) setup(t *testing.T, ct *commonTopo) {
|
||||||
|
clu := ct.ClusterByDatacenter(t, s.DC)
|
||||||
|
peerClu := ct.ClusterByDatacenter(t, s.Peer)
|
||||||
|
|
||||||
|
partition := "default"
|
||||||
|
peer := LocalPeerName(peerClu, "default")
|
||||||
|
cluPeerName := LocalPeerName(clu, "default")
|
||||||
|
const prefix = "ac1-"
|
||||||
|
|
||||||
|
tcpServerSID := topology.ServiceID{
|
||||||
|
Name: prefix + "server-tcp",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
httpServerSID := topology.ServiceID{
|
||||||
|
Name: prefix + "server-http",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
upstreamHTTP := &topology.Upstream{
|
||||||
|
ID: topology.ServiceID{
|
||||||
|
Name: httpServerSID.Name,
|
||||||
|
Partition: partition,
|
||||||
|
},
|
||||||
|
LocalPort: 5001,
|
||||||
|
Peer: peer,
|
||||||
|
}
|
||||||
|
upstreamTCP := &topology.Upstream{
|
||||||
|
ID: topology.ServiceID{
|
||||||
|
Name: tcpServerSID.Name,
|
||||||
|
Partition: partition,
|
||||||
|
},
|
||||||
|
LocalPort: 5000,
|
||||||
|
Peer: peer,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make clients which have server upstreams
|
||||||
|
setupClientServiceAndConfigs := func(protocol string) (serviceExt, *topology.Node) {
|
||||||
|
sid := topology.ServiceID{
|
||||||
|
Name: prefix + "client-" + protocol,
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
svc := serviceExt{
|
||||||
|
Service: NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
sid,
|
||||||
|
func(s *topology.Service) {
|
||||||
|
s.Upstreams = []*topology.Upstream{
|
||||||
|
upstreamTCP,
|
||||||
|
upstreamHTTP,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
),
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: sid.Name,
|
||||||
|
Partition: ConfigEntryPartition(sid.Partition),
|
||||||
|
Protocol: protocol,
|
||||||
|
UpstreamConfig: &api.UpstreamConfiguration{
|
||||||
|
Defaults: &api.UpstreamConfig{
|
||||||
|
MeshGateway: api.MeshGatewayConfig{
|
||||||
|
Mode: api.MeshGatewayModeLocal,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
node := ct.AddServiceNode(clu, svc)
|
||||||
|
|
||||||
|
return svc, node
|
||||||
|
}
|
||||||
|
tcpClient, tcpClientNode := setupClientServiceAndConfigs("tcp")
|
||||||
|
httpClient, httpClientNode := setupClientServiceAndConfigs("http")
|
||||||
|
|
||||||
|
httpServer := serviceExt{
|
||||||
|
Service: NewFortioServiceWithDefaults(
|
||||||
|
peerClu.Datacenter,
|
||||||
|
httpServerSID,
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: httpServerSID.Name,
|
||||||
|
Partition: ConfigEntryPartition(httpServerSID.Partition),
|
||||||
|
Protocol: "http",
|
||||||
|
},
|
||||||
|
Exports: []api.ServiceConsumer{{Peer: cluPeerName}},
|
||||||
|
Intentions: &api.ServiceIntentionsConfigEntry{
|
||||||
|
Kind: api.ServiceIntentions,
|
||||||
|
Name: httpServerSID.Name,
|
||||||
|
Partition: ConfigEntryPartition(httpServerSID.Partition),
|
||||||
|
Sources: []*api.SourceIntention{
|
||||||
|
{
|
||||||
|
Name: tcpClient.ID.Name,
|
||||||
|
Peer: cluPeerName,
|
||||||
|
Action: api.IntentionActionAllow,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: httpClient.ID.Name,
|
||||||
|
Peer: cluPeerName,
|
||||||
|
Action: api.IntentionActionAllow,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tcpServer := serviceExt{
|
||||||
|
Service: NewFortioServiceWithDefaults(
|
||||||
|
peerClu.Datacenter,
|
||||||
|
tcpServerSID,
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: tcpServerSID.Name,
|
||||||
|
Partition: ConfigEntryPartition(tcpServerSID.Partition),
|
||||||
|
Protocol: "tcp",
|
||||||
|
},
|
||||||
|
Exports: []api.ServiceConsumer{{Peer: cluPeerName}},
|
||||||
|
Intentions: &api.ServiceIntentionsConfigEntry{
|
||||||
|
Kind: api.ServiceIntentions,
|
||||||
|
Name: tcpServerSID.Name,
|
||||||
|
Partition: ConfigEntryPartition(tcpServerSID.Partition),
|
||||||
|
Sources: []*api.SourceIntention{
|
||||||
|
{
|
||||||
|
Name: tcpClient.ID.Name,
|
||||||
|
Peer: cluPeerName,
|
||||||
|
Action: api.IntentionActionAllow,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: httpClient.ID.Name,
|
||||||
|
Peer: cluPeerName,
|
||||||
|
Action: api.IntentionActionAllow,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
httpServerNode := ct.AddServiceNode(peerClu, httpServer)
|
||||||
|
tcpServerNode := ct.AddServiceNode(peerClu, tcpServer)
|
||||||
|
|
||||||
|
s.sidClientHTTP = httpClient.ID
|
||||||
|
s.nodeClientHTTP = httpClientNode.ID()
|
||||||
|
s.sidClientTCP = tcpClient.ID
|
||||||
|
s.nodeClientTCP = tcpClientNode.ID()
|
||||||
|
s.upstreamHTTP = upstreamHTTP
|
||||||
|
s.upstreamTCP = upstreamTCP
|
||||||
|
|
||||||
|
// these are references in Peer
|
||||||
|
s.sidServerHTTP = httpServerSID
|
||||||
|
s.nodeServerHTTP = httpServerNode.ID()
|
||||||
|
s.sidServerTCP = tcpServerSID
|
||||||
|
s.nodeServerTCP = tcpServerNode.ID()
|
||||||
|
}
|
||||||
|
|
||||||
|
// implements https://docs.google.com/document/d/1Fs3gNMhCqE4zVNMFcbzf02ZrB0kxxtJpI2h905oKhrs/edit#heading=h.wtzvyryyb56v
|
||||||
|
func (s *ac1BasicSuite) test(t *testing.T, ct *commonTopo) {
|
||||||
|
dc := ct.Sprawl.Topology().Clusters[s.DC]
|
||||||
|
peer := ct.Sprawl.Topology().Clusters[s.Peer]
|
||||||
|
ac := s
|
||||||
|
|
||||||
|
// refresh this from Topology
|
||||||
|
svcClientTCP := dc.ServiceByID(
|
||||||
|
ac.nodeClientTCP,
|
||||||
|
ac.sidClientTCP,
|
||||||
|
)
|
||||||
|
svcClientHTTP := dc.ServiceByID(
|
||||||
|
ac.nodeClientHTTP,
|
||||||
|
ac.sidClientHTTP,
|
||||||
|
)
|
||||||
|
// our ac has the node/sid for server in the peer DC
|
||||||
|
svcServerHTTP := peer.ServiceByID(
|
||||||
|
ac.nodeServerHTTP,
|
||||||
|
ac.sidServerHTTP,
|
||||||
|
)
|
||||||
|
svcServerTCP := peer.ServiceByID(
|
||||||
|
ac.nodeServerTCP,
|
||||||
|
ac.sidServerTCP,
|
||||||
|
)
|
||||||
|
|
||||||
|
// preconditions
|
||||||
|
// these could be done parallel with each other, but complexity
|
||||||
|
// probably not worth the speed boost
|
||||||
|
ct.Assert.HealthyWithPeer(t, dc.Name, svcServerHTTP.ID, LocalPeerName(peer, "default"))
|
||||||
|
ct.Assert.HealthyWithPeer(t, dc.Name, svcServerTCP.ID, LocalPeerName(peer, "default"))
|
||||||
|
ct.Assert.UpstreamEndpointHealthy(t, svcClientTCP, ac.upstreamTCP)
|
||||||
|
ct.Assert.UpstreamEndpointHealthy(t, svcClientTCP, ac.upstreamHTTP)
|
||||||
|
|
||||||
|
tcs := []struct {
|
||||||
|
acSub int
|
||||||
|
proto string
|
||||||
|
svc *topology.Service
|
||||||
|
}{
|
||||||
|
{1, "tcp", svcClientTCP},
|
||||||
|
{2, "http", svcClientHTTP},
|
||||||
|
}
|
||||||
|
for _, tc := range tcs {
|
||||||
|
tc := tc
|
||||||
|
t.Run(fmt.Sprintf("1.%d. %s in A can call HTTP upstream", tc.acSub, tc.proto), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
ct.Assert.FortioFetch2HeaderEcho(t, tc.svc, ac.upstreamHTTP)
|
||||||
|
})
|
||||||
|
t.Run(fmt.Sprintf("1.%d. %s in A can call TCP upstream", tc.acSub, tc.proto), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
ct.Assert.FortioFetch2HeaderEcho(t, tc.svc, ac.upstreamTCP)
|
||||||
|
})
|
||||||
|
t.Run(fmt.Sprintf("1.%d. via %s in A, FORTIO_NAME of HTTP upstream", tc.acSub, tc.proto), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
ct.Assert.FortioFetch2FortioName(t,
|
||||||
|
tc.svc,
|
||||||
|
ac.upstreamHTTP,
|
||||||
|
peer.Name,
|
||||||
|
svcServerHTTP.ID,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
t.Run(fmt.Sprintf("1.%d. via %s in A, FORTIO_NAME of TCP upstream", tc.acSub, tc.proto), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
ct.Assert.FortioFetch2FortioName(t,
|
||||||
|
tc.svc,
|
||||||
|
ac.upstreamTCP,
|
||||||
|
peer.Name,
|
||||||
|
svcServerTCP.ID,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,203 @@
|
||||||
|
package peering
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ac2DiscoChainSuite struct {
|
||||||
|
DC string
|
||||||
|
Peer string
|
||||||
|
|
||||||
|
clientSID topology.ServiceID
|
||||||
|
}
|
||||||
|
|
||||||
|
var ac2DiscoChainSuites []sharedTopoSuite = []sharedTopoSuite{
|
||||||
|
&ac2DiscoChainSuite{DC: "dc1", Peer: "dc2"},
|
||||||
|
&ac2DiscoChainSuite{DC: "dc2", Peer: "dc1"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAC2DiscoChain(t *testing.T) {
|
||||||
|
runShareableSuites(t, ac2DiscoChainSuites)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac2DiscoChainSuite) testName() string {
|
||||||
|
return fmt.Sprintf("ac2 disco chain %s->%s", s.DC, s.Peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac2DiscoChainSuite) setup(t *testing.T, ct *commonTopo) {
|
||||||
|
clu := ct.ClusterByDatacenter(t, s.DC)
|
||||||
|
peerClu := ct.ClusterByDatacenter(t, s.Peer)
|
||||||
|
partition := "default"
|
||||||
|
peer := LocalPeerName(peerClu, "default")
|
||||||
|
|
||||||
|
// Make an HTTP server with discovery chain config entries
|
||||||
|
server := NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
topology.ServiceID{
|
||||||
|
Name: "ac2-disco-chain-svc",
|
||||||
|
Partition: partition,
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
ct.ExportService(clu, partition,
|
||||||
|
api.ExportedService{
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Consumers: []api.ServiceConsumer{
|
||||||
|
{
|
||||||
|
Peer: peer,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
clu.InitialConfigEntries = append(clu.InitialConfigEntries,
|
||||||
|
&api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
Protocol: "http",
|
||||||
|
},
|
||||||
|
&api.ServiceSplitterConfigEntry{
|
||||||
|
Kind: api.ServiceSplitter,
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
Splits: []api.ServiceSplit{
|
||||||
|
{
|
||||||
|
Weight: 100.0,
|
||||||
|
ResponseHeaders: &api.HTTPHeaderModifiers{
|
||||||
|
Add: map[string]string{
|
||||||
|
"X-Split": "test",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
ct.AddServiceNode(clu, serviceExt{Service: server})
|
||||||
|
|
||||||
|
// Define server as upstream for client
|
||||||
|
upstream := &topology.Upstream{
|
||||||
|
ID: topology.ServiceID{
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: partition, // TODO: iterate over all possible partitions
|
||||||
|
},
|
||||||
|
// TODO: we need to expose this on 0.0.0.0 so we can check it
|
||||||
|
// through our forward proxy. not realistic IMO
|
||||||
|
LocalAddress: "0.0.0.0",
|
||||||
|
LocalPort: 5000,
|
||||||
|
Peer: peer,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make client which will dial server
|
||||||
|
clientSID := topology.ServiceID{
|
||||||
|
Name: "ac2-client",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
client := NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
clientSID,
|
||||||
|
func(s *topology.Service) {
|
||||||
|
s.Upstreams = []*topology.Upstream{
|
||||||
|
upstream,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
ct.ExportService(clu, partition,
|
||||||
|
api.ExportedService{
|
||||||
|
Name: client.ID.Name,
|
||||||
|
Consumers: []api.ServiceConsumer{
|
||||||
|
{
|
||||||
|
Peer: peer,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
ct.AddServiceNode(clu, serviceExt{Service: client})
|
||||||
|
|
||||||
|
clu.InitialConfigEntries = append(clu.InitialConfigEntries,
|
||||||
|
&api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: client.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
Protocol: "http",
|
||||||
|
UpstreamConfig: &api.UpstreamConfiguration{
|
||||||
|
Defaults: &api.UpstreamConfig{
|
||||||
|
MeshGateway: api.MeshGatewayConfig{
|
||||||
|
Mode: api.MeshGatewayModeLocal,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
// Add intention allowing client to call server
|
||||||
|
clu.InitialConfigEntries = append(clu.InitialConfigEntries,
|
||||||
|
&api.ServiceIntentionsConfigEntry{
|
||||||
|
Kind: api.ServiceIntentions,
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
Sources: []*api.SourceIntention{
|
||||||
|
{
|
||||||
|
Name: client.ID.Name,
|
||||||
|
Peer: peer,
|
||||||
|
Action: api.IntentionActionAllow,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
s.clientSID = clientSID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac2DiscoChainSuite) test(t *testing.T, ct *commonTopo) {
|
||||||
|
dc := ct.Sprawl.Topology().Clusters[s.DC]
|
||||||
|
|
||||||
|
svcs := dc.ServicesByID(s.clientSID)
|
||||||
|
require.Len(t, svcs, 1, "expected exactly one client in datacenter")
|
||||||
|
|
||||||
|
client := svcs[0]
|
||||||
|
require.Len(t, client.Upstreams, 1, "expected exactly one upstream for client")
|
||||||
|
u := client.Upstreams[0]
|
||||||
|
|
||||||
|
t.Run("peered upstream exists in catalog", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
ct.Assert.CatalogServiceExists(t, s.DC, u.ID.Name, &api.QueryOptions{
|
||||||
|
Peer: u.Peer,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("peered upstream endpoint status is healthy", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
ct.Assert.UpstreamEndpointStatus(t, client, peerClusterPrefix(u), "HEALTHY", 1)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("response contains header injected by splitter", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
// TODO: not sure we should call u.LocalPort? it's not realistic from a security
|
||||||
|
// standpoint. prefer the fortio fetch2 stuff myself
|
||||||
|
ct.Assert.HTTPServiceEchoesResHeader(t, client, u.LocalPort, "",
|
||||||
|
map[string]string{
|
||||||
|
"X-Split": "test",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// For reference see consul/xds/clusters.go:
|
||||||
|
//
|
||||||
|
// func (s *ResourceGenerator) getTargetClusterName
|
||||||
|
//
|
||||||
|
// and connect/sni.go
|
||||||
|
func peerClusterPrefix(u *topology.Upstream) string {
|
||||||
|
if u.Peer == "" {
|
||||||
|
panic("upstream is not from a peer")
|
||||||
|
}
|
||||||
|
u.ID.Normalize()
|
||||||
|
return u.ID.Name + "." + u.ID.Namespace + "." + u.Peer + ".external"
|
||||||
|
}
|
|
@ -0,0 +1,264 @@
|
||||||
|
package peering
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||||
|
"github.com/hashicorp/go-cleanhttp"
|
||||||
|
"github.com/itchyny/gojq"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
|
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ac3SvcDefaultsSuites []sharedTopoSuite = []sharedTopoSuite{
|
||||||
|
&ac3SvcDefaultsSuite{DC: "dc1", Peer: "dc2"},
|
||||||
|
&ac3SvcDefaultsSuite{DC: "dc2", Peer: "dc1"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAC3SvcDefaults(t *testing.T) {
|
||||||
|
runShareableSuites(t, ac3SvcDefaultsSuites)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ac3SvcDefaultsSuite struct {
|
||||||
|
// inputs
|
||||||
|
DC string
|
||||||
|
Peer string
|
||||||
|
|
||||||
|
// test points
|
||||||
|
sidServer topology.ServiceID
|
||||||
|
nodeServer topology.NodeID
|
||||||
|
sidClient topology.ServiceID
|
||||||
|
nodeClient topology.NodeID
|
||||||
|
|
||||||
|
upstream *topology.Upstream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac3SvcDefaultsSuite) testName() string {
|
||||||
|
return fmt.Sprintf("ac3 service defaults upstreams %s->%s", s.DC, s.Peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// creates clients in s.DC and servers in s.Peer
|
||||||
|
func (s *ac3SvcDefaultsSuite) setup(t *testing.T, ct *commonTopo) {
|
||||||
|
clu := ct.ClusterByDatacenter(t, s.DC)
|
||||||
|
peerClu := ct.ClusterByDatacenter(t, s.Peer)
|
||||||
|
|
||||||
|
partition := "default"
|
||||||
|
peer := LocalPeerName(peerClu, "default")
|
||||||
|
cluPeerName := LocalPeerName(clu, "default")
|
||||||
|
|
||||||
|
serverSID := topology.ServiceID{
|
||||||
|
Name: "ac3-server",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
upstream := &topology.Upstream{
|
||||||
|
ID: topology.ServiceID{
|
||||||
|
Name: serverSID.Name,
|
||||||
|
Partition: partition,
|
||||||
|
},
|
||||||
|
LocalPort: 5001,
|
||||||
|
Peer: peer,
|
||||||
|
}
|
||||||
|
|
||||||
|
sid := topology.ServiceID{
|
||||||
|
Name: "ac3-client",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
client := serviceExt{
|
||||||
|
Service: NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
sid,
|
||||||
|
func(s *topology.Service) {
|
||||||
|
s.Upstreams = []*topology.Upstream{
|
||||||
|
upstream,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
),
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: sid.Name,
|
||||||
|
Partition: ConfigEntryPartition(sid.Partition),
|
||||||
|
Protocol: "http",
|
||||||
|
UpstreamConfig: &api.UpstreamConfiguration{
|
||||||
|
Overrides: []*api.UpstreamConfig{
|
||||||
|
{
|
||||||
|
Name: upstream.ID.Name,
|
||||||
|
Namespace: upstream.ID.Namespace,
|
||||||
|
Peer: peer,
|
||||||
|
PassiveHealthCheck: &api.PassiveHealthCheck{
|
||||||
|
MaxFailures: 1,
|
||||||
|
Interval: 10 * time.Minute,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Defaults: &api.UpstreamConfig{
|
||||||
|
MeshGateway: api.MeshGatewayConfig{
|
||||||
|
Mode: api.MeshGatewayModeLocal,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
clientNode := ct.AddServiceNode(clu, client)
|
||||||
|
|
||||||
|
server := serviceExt{
|
||||||
|
Service: NewFortioServiceWithDefaults(
|
||||||
|
peerClu.Datacenter,
|
||||||
|
serverSID,
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: serverSID.Name,
|
||||||
|
Partition: ConfigEntryPartition(serverSID.Partition),
|
||||||
|
Protocol: "http",
|
||||||
|
},
|
||||||
|
Exports: []api.ServiceConsumer{{Peer: cluPeerName}},
|
||||||
|
Intentions: &api.ServiceIntentionsConfigEntry{
|
||||||
|
Kind: api.ServiceIntentions,
|
||||||
|
Name: serverSID.Name,
|
||||||
|
Partition: ConfigEntryPartition(serverSID.Partition),
|
||||||
|
Sources: []*api.SourceIntention{
|
||||||
|
{
|
||||||
|
Name: client.ID.Name,
|
||||||
|
Peer: cluPeerName,
|
||||||
|
Action: api.IntentionActionAllow,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
serverNode := ct.AddServiceNode(peerClu, server)
|
||||||
|
|
||||||
|
s.sidClient = client.ID
|
||||||
|
s.nodeClient = clientNode.ID()
|
||||||
|
s.upstream = upstream
|
||||||
|
|
||||||
|
// these are references in Peer
|
||||||
|
s.sidServer = serverSID
|
||||||
|
s.nodeServer = serverNode.ID()
|
||||||
|
}
|
||||||
|
|
||||||
|
// make two requests to upstream via client's fetch2 with status=<nonceStatus>
|
||||||
|
// the first time, it should return nonceStatus
|
||||||
|
// the second time, we expect the upstream to have been removed from the envoy cluster,
|
||||||
|
// and thereby get some other 5xx
|
||||||
|
func (s *ac3SvcDefaultsSuite) test(t *testing.T, ct *commonTopo) {
|
||||||
|
dc := ct.Sprawl.Topology().Clusters[s.DC]
|
||||||
|
peer := ct.Sprawl.Topology().Clusters[s.Peer]
|
||||||
|
|
||||||
|
// refresh this from Topology
|
||||||
|
svcClient := dc.ServiceByID(
|
||||||
|
s.nodeClient,
|
||||||
|
s.sidClient,
|
||||||
|
)
|
||||||
|
// our ac has the node/sid for server in the peer DC
|
||||||
|
svcServer := peer.ServiceByID(
|
||||||
|
s.nodeServer,
|
||||||
|
s.sidServer,
|
||||||
|
)
|
||||||
|
|
||||||
|
// preconditions
|
||||||
|
// these could be done parallel with each other, but complexity
|
||||||
|
// probably not worth the speed boost
|
||||||
|
ct.Assert.HealthyWithPeer(t, dc.Name, svcServer.ID, LocalPeerName(peer, "default"))
|
||||||
|
ct.Assert.UpstreamEndpointHealthy(t, svcClient, s.upstream)
|
||||||
|
// TODO: we need to let the upstream start serving properly before we do this. if it
|
||||||
|
// isn't ready and returns a 5xx (which it will do if it's not up yet!), it will stick
|
||||||
|
// in a down state for PassiveHealthCheck.Interval
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
ct.Assert.FortioFetch2HeaderEcho(t, svcClient, s.upstream)
|
||||||
|
|
||||||
|
// TODO: use proxied HTTP client
|
||||||
|
client := cleanhttp.DefaultClient()
|
||||||
|
// TODO: what is default? namespace? partition?
|
||||||
|
clusterName := fmt.Sprintf("%s.default.%s.external", s.upstream.ID.Name, s.upstream.Peer)
|
||||||
|
nonceStatus := http.StatusInsufficientStorage
|
||||||
|
url507 := fmt.Sprintf("http://localhost:%d/fortio/fetch2?url=%s", svcClient.ExposedPort,
|
||||||
|
url.QueryEscape(fmt.Sprintf("http://localhost:%d/?status=%d", s.upstream.LocalPort, nonceStatus)),
|
||||||
|
)
|
||||||
|
|
||||||
|
// we only make this call once
|
||||||
|
req, err := http.NewRequest(http.MethodGet, url507, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
res, err := client.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer res.Body.Close()
|
||||||
|
require.Equal(t, nonceStatus, res.StatusCode)
|
||||||
|
|
||||||
|
// this is a modified version of assertEnvoyUpstreamHealthy
|
||||||
|
envoyAddr := fmt.Sprintf("localhost:%d", svcClient.ExposedEnvoyAdminPort)
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: 30 * time.Second, Wait: 500 * time.Millisecond}, t, func(r *retry.R) {
|
||||||
|
// BOOKMARK: avoid libassert, but we need to resurrect this method in asserter first
|
||||||
|
clusters, statusCode, err := libassert.GetEnvoyOutputWithClient(client, envoyAddr, "clusters", map[string]string{"format": "json"})
|
||||||
|
if err != nil {
|
||||||
|
r.Fatal("could not fetch envoy clusters")
|
||||||
|
}
|
||||||
|
require.Equal(r, 200, statusCode)
|
||||||
|
|
||||||
|
filter := fmt.Sprintf(
|
||||||
|
`.cluster_statuses[]
|
||||||
|
| select(.name|contains("%s"))
|
||||||
|
| [.host_statuses[].health_status.failed_outlier_check]
|
||||||
|
|.[0]`,
|
||||||
|
clusterName)
|
||||||
|
result, err := jqOne(clusters, filter)
|
||||||
|
require.NoErrorf(r, err, "could not found cluster name %q: %v \n%s", clusterName, err, clusters)
|
||||||
|
|
||||||
|
resultAsBool, ok := result.(bool)
|
||||||
|
require.True(r, ok)
|
||||||
|
require.True(r, resultAsBool)
|
||||||
|
})
|
||||||
|
|
||||||
|
url200 := fmt.Sprintf("http://localhost:%d/fortio/fetch2?url=%s", svcClient.ExposedPort,
|
||||||
|
url.QueryEscape(fmt.Sprintf("http://localhost:%d/", s.upstream.LocalPort)),
|
||||||
|
)
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: time.Minute * 1, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
|
||||||
|
req, err := http.NewRequest(http.MethodGet, url200, nil)
|
||||||
|
require.NoError(r, err)
|
||||||
|
res, err := client.Do(req)
|
||||||
|
require.NoError(r, err)
|
||||||
|
defer res.Body.Close()
|
||||||
|
require.True(r, res.StatusCode >= 500 && res.StatusCode < 600 && res.StatusCode != nonceStatus)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Executes the JQ filter against the given JSON string.
|
||||||
|
// Iff there is one result, return that.
|
||||||
|
func jqOne(config, filter string) (interface{}, error) {
|
||||||
|
query, err := gojq.Parse(filter)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var m interface{}
|
||||||
|
err = json.Unmarshal([]byte(config), &m)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
iter := query.Run(m)
|
||||||
|
result := []interface{}{}
|
||||||
|
for {
|
||||||
|
v, ok := iter.Next()
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err, ok := v.(error); ok {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result = append(result, v)
|
||||||
|
}
|
||||||
|
if len(result) != 1 {
|
||||||
|
return nil, fmt.Errorf("required result of len 1, but is %d: %v", len(result), result)
|
||||||
|
}
|
||||||
|
return result[0], nil
|
||||||
|
}
|
|
@ -0,0 +1,213 @@
|
||||||
|
package peering
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||||
|
"github.com/hashicorp/go-cleanhttp"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ac4ProxyDefaultsSuite struct {
|
||||||
|
DC string
|
||||||
|
Peer string
|
||||||
|
|
||||||
|
nodeClient topology.NodeID
|
||||||
|
nodeServer topology.NodeID
|
||||||
|
|
||||||
|
serverSID topology.ServiceID
|
||||||
|
clientSID topology.ServiceID
|
||||||
|
upstream *topology.Upstream
|
||||||
|
}
|
||||||
|
|
||||||
|
var ac4ProxyDefaultsSuites []sharedTopoSuite = []sharedTopoSuite{
|
||||||
|
&ac4ProxyDefaultsSuite{DC: "dc1", Peer: "dc2"},
|
||||||
|
&ac4ProxyDefaultsSuite{DC: "dc2", Peer: "dc1"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAC4ProxyDefaults(t *testing.T) {
|
||||||
|
runShareableSuites(t, ac4ProxyDefaultsSuites)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac4ProxyDefaultsSuite) testName() string {
|
||||||
|
return fmt.Sprintf("ac4 proxy defaults %s->%s", s.DC, s.Peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// creates clients in s.DC and servers in s.Peer
|
||||||
|
func (s *ac4ProxyDefaultsSuite) setup(t *testing.T, ct *commonTopo) {
|
||||||
|
clu := ct.ClusterByDatacenter(t, s.DC)
|
||||||
|
peerClu := ct.ClusterByDatacenter(t, s.Peer)
|
||||||
|
|
||||||
|
partition := "default"
|
||||||
|
peer := LocalPeerName(peerClu, "default")
|
||||||
|
cluPeerName := LocalPeerName(clu, "default")
|
||||||
|
|
||||||
|
serverSID := topology.ServiceID{
|
||||||
|
Name: "ac4-server-http",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
// Define server as upstream for client
|
||||||
|
upstream := &topology.Upstream{
|
||||||
|
ID: serverSID,
|
||||||
|
LocalPort: 5000,
|
||||||
|
Peer: peer,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make client which will dial server
|
||||||
|
clientSID := topology.ServiceID{
|
||||||
|
Name: "ac4-http-client",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
client := serviceExt{
|
||||||
|
Service: NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
clientSID,
|
||||||
|
func(s *topology.Service) {
|
||||||
|
s.Upstreams = []*topology.Upstream{
|
||||||
|
upstream,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
),
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: clientSID.Name,
|
||||||
|
Partition: ConfigEntryPartition(clientSID.Partition),
|
||||||
|
Protocol: "http",
|
||||||
|
UpstreamConfig: &api.UpstreamConfiguration{
|
||||||
|
Defaults: &api.UpstreamConfig{
|
||||||
|
MeshGateway: api.MeshGatewayConfig{
|
||||||
|
Mode: api.MeshGatewayModeLocal,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
clientNode := ct.AddServiceNode(clu, client)
|
||||||
|
|
||||||
|
server := serviceExt{
|
||||||
|
Service: NewFortioServiceWithDefaults(
|
||||||
|
peerClu.Datacenter,
|
||||||
|
serverSID,
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: serverSID.Name,
|
||||||
|
Partition: ConfigEntryPartition(serverSID.Partition),
|
||||||
|
Protocol: "http",
|
||||||
|
},
|
||||||
|
Exports: []api.ServiceConsumer{{Peer: cluPeerName}},
|
||||||
|
Intentions: &api.ServiceIntentionsConfigEntry{
|
||||||
|
Kind: api.ServiceIntentions,
|
||||||
|
Name: serverSID.Name,
|
||||||
|
Partition: ConfigEntryPartition(serverSID.Partition),
|
||||||
|
Sources: []*api.SourceIntention{
|
||||||
|
{
|
||||||
|
Name: client.ID.Name,
|
||||||
|
Peer: cluPeerName,
|
||||||
|
Action: api.IntentionActionAllow,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
peerClu.InitialConfigEntries = append(peerClu.InitialConfigEntries,
|
||||||
|
&api.ProxyConfigEntry{
|
||||||
|
Kind: api.ProxyDefaults,
|
||||||
|
Name: api.ProxyConfigGlobal,
|
||||||
|
Partition: ConfigEntryPartition(server.ID.Partition),
|
||||||
|
Config: map[string]interface{}{
|
||||||
|
"protocol": "http",
|
||||||
|
"local_request_timeout_ms": 500,
|
||||||
|
},
|
||||||
|
MeshGateway: api.MeshGatewayConfig{
|
||||||
|
Mode: api.MeshGatewayModeLocal,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
serverNode := ct.AddServiceNode(peerClu, server)
|
||||||
|
|
||||||
|
s.clientSID = clientSID
|
||||||
|
s.serverSID = serverSID
|
||||||
|
s.nodeServer = serverNode.ID()
|
||||||
|
s.nodeClient = clientNode.ID()
|
||||||
|
s.upstream = upstream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac4ProxyDefaultsSuite) test(t *testing.T, ct *commonTopo) {
|
||||||
|
var client *topology.Service
|
||||||
|
|
||||||
|
dc := ct.Sprawl.Topology().Clusters[s.DC]
|
||||||
|
peer := ct.Sprawl.Topology().Clusters[s.Peer]
|
||||||
|
|
||||||
|
clientSVC := dc.ServiceByID(
|
||||||
|
s.nodeClient,
|
||||||
|
s.clientSID,
|
||||||
|
)
|
||||||
|
serverSVC := peer.ServiceByID(
|
||||||
|
s.nodeServer,
|
||||||
|
s.serverSID,
|
||||||
|
)
|
||||||
|
|
||||||
|
// preconditions check
|
||||||
|
ct.Assert.HealthyWithPeer(t, dc.Name, serverSVC.ID, LocalPeerName(peer, "default"))
|
||||||
|
ct.Assert.UpstreamEndpointHealthy(t, clientSVC, s.upstream)
|
||||||
|
ct.Assert.FortioFetch2HeaderEcho(t, clientSVC, s.upstream)
|
||||||
|
|
||||||
|
t.Run("Validate services exist in catalog", func(t *testing.T) {
|
||||||
|
dcSvcs := dc.ServicesByID(s.clientSID)
|
||||||
|
require.Len(t, dcSvcs, 1, "expected exactly one client")
|
||||||
|
client = dcSvcs[0]
|
||||||
|
require.Len(t, client.Upstreams, 1, "expected exactly one upstream for client")
|
||||||
|
|
||||||
|
server := dc.ServicesByID(s.serverSID)
|
||||||
|
require.Len(t, server, 1, "expected exactly one server")
|
||||||
|
require.Len(t, server[0].Upstreams, 0, "expected no upstream for server")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("peered upstream exists in catalog", func(t *testing.T) {
|
||||||
|
ct.Assert.CatalogServiceExists(t, s.DC, s.upstream.ID.Name, &api.QueryOptions{
|
||||||
|
Peer: s.upstream.Peer,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("HTTP service fails due to connection timeout", func(t *testing.T) {
|
||||||
|
url504 := fmt.Sprintf("http://localhost:%d/fortio/fetch2?url=%s", client.ExposedPort,
|
||||||
|
url.QueryEscape(fmt.Sprintf("http://localhost:%d/?delay=1000ms", s.upstream.LocalPort)),
|
||||||
|
)
|
||||||
|
|
||||||
|
url200 := fmt.Sprintf("http://localhost:%d/fortio/fetch2?url=%s", client.ExposedPort,
|
||||||
|
url.QueryEscape(fmt.Sprintf("http://localhost:%d/", s.upstream.LocalPort)),
|
||||||
|
)
|
||||||
|
|
||||||
|
// validate request timeout error where service has 1000ms response delay and
|
||||||
|
// proxy default is set to local_request_timeout_ms: 500ms
|
||||||
|
// return 504
|
||||||
|
httpClient := cleanhttp.DefaultClient()
|
||||||
|
req, err := http.NewRequest(http.MethodGet, url504, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res, err := httpClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
defer res.Body.Close()
|
||||||
|
require.Equal(t, http.StatusGatewayTimeout, res.StatusCode)
|
||||||
|
|
||||||
|
// validate successful GET request where service has no response delay and
|
||||||
|
// proxy default is set to local_request_timeout_ms: 500ms
|
||||||
|
// return 200
|
||||||
|
req, err = http.NewRequest(http.MethodGet, url200, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
res, err = httpClient.Do(req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
defer res.Body.Close()
|
||||||
|
require.Equal(t, http.StatusOK, res.StatusCode)
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,129 @@
|
||||||
|
package peering
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
|
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||||
|
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ac5_1NoSvcMeshSuite struct {
|
||||||
|
DC string
|
||||||
|
Peer string
|
||||||
|
|
||||||
|
serverSID topology.ServiceID
|
||||||
|
clientSID topology.ServiceID
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ac5_1NoSvcMeshSuites []sharedTopoSuite = []sharedTopoSuite{
|
||||||
|
&ac5_1NoSvcMeshSuite{DC: "dc1", Peer: "dc2"},
|
||||||
|
&ac5_1NoSvcMeshSuite{DC: "dc2", Peer: "dc1"},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAC5ServiceMeshDisabledSuite(t *testing.T) {
|
||||||
|
runShareableSuites(t, ac5_1NoSvcMeshSuites)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac5_1NoSvcMeshSuite) testName() string {
|
||||||
|
return fmt.Sprintf("ac5.1 no service mesh %s->%s", s.DC, s.Peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// creates clients in s.DC and servers in s.Peer
|
||||||
|
func (s *ac5_1NoSvcMeshSuite) setup(t *testing.T, ct *commonTopo) {
|
||||||
|
clu := ct.ClusterByDatacenter(t, s.DC)
|
||||||
|
peerClu := ct.ClusterByDatacenter(t, s.Peer)
|
||||||
|
|
||||||
|
// TODO: handle all partitions
|
||||||
|
partition := "default"
|
||||||
|
peer := LocalPeerName(peerClu, partition)
|
||||||
|
|
||||||
|
serverSID := topology.ServiceID{
|
||||||
|
Name: "ac5-server-http",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make client which will dial server
|
||||||
|
clientSID := topology.ServiceID{
|
||||||
|
Name: "ac5-http-client",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
|
||||||
|
// disable service mesh for client in s.DC
|
||||||
|
client := serviceExt{
|
||||||
|
Service: NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
clientSID,
|
||||||
|
func(s *topology.Service) {
|
||||||
|
s.EnvoyAdminPort = 0
|
||||||
|
s.DisableServiceMesh = true
|
||||||
|
},
|
||||||
|
),
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: clientSID.Name,
|
||||||
|
Partition: ConfigEntryPartition(clientSID.Partition),
|
||||||
|
Protocol: "http",
|
||||||
|
},
|
||||||
|
Exports: []api.ServiceConsumer{{Peer: peer}},
|
||||||
|
}
|
||||||
|
ct.AddServiceNode(clu, client)
|
||||||
|
|
||||||
|
server := serviceExt{
|
||||||
|
Service: NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
serverSID,
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
Exports: []api.ServiceConsumer{{Peer: peer}},
|
||||||
|
}
|
||||||
|
|
||||||
|
ct.AddServiceNode(clu, server)
|
||||||
|
|
||||||
|
s.clientSID = clientSID
|
||||||
|
s.serverSID = serverSID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac5_1NoSvcMeshSuite) test(t *testing.T, ct *commonTopo) {
|
||||||
|
dc := ct.Sprawl.Topology().Clusters[s.DC]
|
||||||
|
peer := ct.Sprawl.Topology().Clusters[s.Peer]
|
||||||
|
cl := ct.APIClientForCluster(t, dc)
|
||||||
|
peerName := LocalPeerName(peer, "default")
|
||||||
|
|
||||||
|
s.testServiceHealthInCatalog(t, ct, cl, peerName)
|
||||||
|
s.testProxyDisabledInDC2(t, cl, peerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac5_1NoSvcMeshSuite) testServiceHealthInCatalog(t *testing.T, ct *commonTopo, cl *api.Client, peer string) {
|
||||||
|
t.Run("validate service health in catalog", func(t *testing.T) {
|
||||||
|
libassert.CatalogServiceExists(t, cl, s.clientSID.Name, &api.QueryOptions{
|
||||||
|
Peer: peer,
|
||||||
|
})
|
||||||
|
require.NotEqual(t, s.serverSID.Name, s.Peer)
|
||||||
|
assertServiceHealth(t, cl, s.serverSID.Name, 1)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac5_1NoSvcMeshSuite) testProxyDisabledInDC2(t *testing.T, cl *api.Client, peer string) {
|
||||||
|
t.Run("service mesh is disabled", func(t *testing.T) {
|
||||||
|
var (
|
||||||
|
services map[string][]string
|
||||||
|
err error
|
||||||
|
expected = fmt.Sprintf("%s-sidecar-proxy", s.clientSID.Name)
|
||||||
|
)
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
services, _, err = cl.Catalog().Services(&api.QueryOptions{
|
||||||
|
Peer: peer,
|
||||||
|
})
|
||||||
|
require.NoError(r, err, "error reading service data")
|
||||||
|
require.Greater(r, len(services), 0, "did not find service(s) in catalog")
|
||||||
|
})
|
||||||
|
require.NotContains(t, services, expected, fmt.Sprintf("error: should not create proxy for service: %s", services))
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,398 @@
|
||||||
|
package peering
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
|
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// 1. Setup: put health service instances in each of the 3 clusters and create the PQ in one of them
|
||||||
|
// 2. Execute the PQ: Validate that failover count == 0 and that the pq results come from the local cluster
|
||||||
|
// 3. Register a failing TTL health check with the agent managing the service instance in the local cluster
|
||||||
|
// 4. Execute the PQ: Validate that failover count == 1 and that the pq results come from the first failover target peer
|
||||||
|
// 5. Register a failing TTL health check with the agent managing the service instance in the first failover peer
|
||||||
|
// 6. Execute the PQ: Validate that failover count == 2 and that the pq results come from the second failover target
|
||||||
|
// 7. Delete failing health check from step 5
|
||||||
|
// 8. Repeat step 4
|
||||||
|
// 9. Delete failing health check from step 3
|
||||||
|
// 10. Repeat step 2
|
||||||
|
type ac5_2PQFailoverSuite struct {
|
||||||
|
clientSID topology.ServiceID
|
||||||
|
serverSID topology.ServiceID
|
||||||
|
nodeServer topology.NodeID
|
||||||
|
}
|
||||||
|
|
||||||
|
var ac5_2Context = make(map[nodeKey]ac5_2PQFailoverSuite)
|
||||||
|
|
||||||
|
func TestAC5PreparedQueryFailover(t *testing.T) {
|
||||||
|
ct := NewCommonTopo(t)
|
||||||
|
s := &ac5_2PQFailoverSuite{}
|
||||||
|
s.setup(t, ct)
|
||||||
|
ct.Launch(t)
|
||||||
|
s.test(t, ct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac5_2PQFailoverSuite) setup(t *testing.T, ct *commonTopo) {
|
||||||
|
s.setupDC(ct, ct.DC1, ct.DC2)
|
||||||
|
s.setupDC(ct, ct.DC2, ct.DC1)
|
||||||
|
s.setupDC3(ct, ct.DC3, ct.DC1, ct.DC2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac5_2PQFailoverSuite) setupDC(ct *commonTopo, clu, peerClu *topology.Cluster) {
|
||||||
|
// TODO: handle all partitions
|
||||||
|
partition := "default"
|
||||||
|
peer := LocalPeerName(peerClu, partition)
|
||||||
|
|
||||||
|
serverSID := topology.ServiceID{
|
||||||
|
Name: "ac5-server-http",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
|
||||||
|
clientSID := topology.ServiceID{
|
||||||
|
Name: "ac5-client-http",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
|
||||||
|
client := serviceExt{
|
||||||
|
Service: NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
clientSID,
|
||||||
|
func(s *topology.Service) {
|
||||||
|
s.EnvoyAdminPort = 0
|
||||||
|
s.DisableServiceMesh = true
|
||||||
|
},
|
||||||
|
),
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: clientSID.Name,
|
||||||
|
Partition: ConfigEntryPartition(clientSID.Partition),
|
||||||
|
Protocol: "http",
|
||||||
|
},
|
||||||
|
Exports: []api.ServiceConsumer{{Peer: peer}},
|
||||||
|
}
|
||||||
|
|
||||||
|
ct.AddServiceNode(clu, client)
|
||||||
|
|
||||||
|
server := serviceExt{
|
||||||
|
Service: NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
serverSID,
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
Exports: []api.ServiceConsumer{{Peer: peer}},
|
||||||
|
}
|
||||||
|
serverNode := ct.AddServiceNode(clu, server)
|
||||||
|
|
||||||
|
ac5_2Context[nodeKey{clu.Datacenter, partition}] = ac5_2PQFailoverSuite{
|
||||||
|
clientSID: clientSID,
|
||||||
|
serverSID: serverSID,
|
||||||
|
nodeServer: serverNode.ID(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac5_2PQFailoverSuite) setupDC3(ct *commonTopo, clu, peer1, peer2 *topology.Cluster) {
|
||||||
|
var (
|
||||||
|
peers []string
|
||||||
|
partition = "default"
|
||||||
|
)
|
||||||
|
peers = append(peers, LocalPeerName(peer1, partition), LocalPeerName(peer2, partition))
|
||||||
|
|
||||||
|
serverSID := topology.ServiceID{
|
||||||
|
Name: "ac5-server-http",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
|
||||||
|
clientSID := topology.ServiceID{
|
||||||
|
Name: "ac5-client-http",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
|
||||||
|
// disable service mesh for client in DC3
|
||||||
|
client := serviceExt{
|
||||||
|
Service: NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
clientSID,
|
||||||
|
func(s *topology.Service) {
|
||||||
|
s.EnvoyAdminPort = 0
|
||||||
|
s.DisableServiceMesh = true
|
||||||
|
},
|
||||||
|
),
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: clientSID.Name,
|
||||||
|
Partition: ConfigEntryPartition(clientSID.Partition),
|
||||||
|
Protocol: "http",
|
||||||
|
},
|
||||||
|
Exports: func() []api.ServiceConsumer {
|
||||||
|
var consumers []api.ServiceConsumer
|
||||||
|
for _, peer := range peers {
|
||||||
|
consumers = append(consumers, api.ServiceConsumer{
|
||||||
|
Peer: peer,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return consumers
|
||||||
|
}(),
|
||||||
|
}
|
||||||
|
|
||||||
|
ct.AddServiceNode(clu, client)
|
||||||
|
|
||||||
|
server := serviceExt{
|
||||||
|
Service: NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
serverSID,
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
Exports: func() []api.ServiceConsumer {
|
||||||
|
var consumers []api.ServiceConsumer
|
||||||
|
for _, peer := range peers {
|
||||||
|
consumers = append(consumers, api.ServiceConsumer{
|
||||||
|
Peer: peer,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return consumers
|
||||||
|
}(),
|
||||||
|
}
|
||||||
|
|
||||||
|
serverNode := ct.AddServiceNode(clu, server)
|
||||||
|
|
||||||
|
ac5_2Context[nodeKey{clu.Datacenter, partition}] = ac5_2PQFailoverSuite{
|
||||||
|
clientSID: clientSID,
|
||||||
|
serverSID: serverSID,
|
||||||
|
nodeServer: serverNode.ID(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac5_2PQFailoverSuite) createPreparedQuery(t *testing.T, ct *commonTopo, c *api.Client, serviceName, partition string) (*api.PreparedQueryDefinition, *api.PreparedQuery) {
|
||||||
|
var (
|
||||||
|
peers []string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
peers = append(peers, LocalPeerName(ct.DC2, partition), LocalPeerName(ct.DC3, partition))
|
||||||
|
|
||||||
|
def := &api.PreparedQueryDefinition{
|
||||||
|
Name: "ac5-prepared-query",
|
||||||
|
Service: api.ServiceQuery{
|
||||||
|
Service: serviceName,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
OnlyPassing: true,
|
||||||
|
Failover: api.QueryFailoverOptions{
|
||||||
|
Targets: func() []api.QueryFailoverTarget {
|
||||||
|
var queryFailoverTargets []api.QueryFailoverTarget
|
||||||
|
for _, peer := range peers {
|
||||||
|
queryFailoverTargets = append(queryFailoverTargets, api.QueryFailoverTarget{
|
||||||
|
Peer: peer,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return queryFailoverTargets
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
query := c.PreparedQuery()
|
||||||
|
def.ID, _, err = query.Create(def, nil)
|
||||||
|
require.NoError(t, err, "error creating prepared query in cluster")
|
||||||
|
|
||||||
|
return def, query
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac5_2PQFailoverSuite) test(t *testing.T, ct *commonTopo) {
|
||||||
|
partition := "default"
|
||||||
|
dc1 := ct.Sprawl.Topology().Clusters[ct.DC1.Name]
|
||||||
|
dc2 := ct.Sprawl.Topology().Clusters[ct.DC2.Name]
|
||||||
|
dc3 := ct.Sprawl.Topology().Clusters[ct.DC3.Name]
|
||||||
|
|
||||||
|
type testcase struct {
|
||||||
|
cluster *topology.Cluster
|
||||||
|
peer *topology.Cluster
|
||||||
|
targetCluster *topology.Cluster
|
||||||
|
}
|
||||||
|
tcs := []testcase{
|
||||||
|
{
|
||||||
|
cluster: dc1,
|
||||||
|
peer: dc2,
|
||||||
|
targetCluster: dc3,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range tcs {
|
||||||
|
client := ct.APIClientForCluster(t, tc.cluster)
|
||||||
|
|
||||||
|
t.Run(fmt.Sprintf("%#v", tc), func(t *testing.T) {
|
||||||
|
svc := ac5_2Context[nodeKey{tc.cluster.Name, partition}]
|
||||||
|
require.NotNil(t, svc.serverSID.Name, "expected service name to not be nil")
|
||||||
|
require.NotNil(t, svc.nodeServer, "expected node server to not be nil")
|
||||||
|
|
||||||
|
assertServiceHealth(t, client, svc.serverSID.Name, 1)
|
||||||
|
def, _ := s.createPreparedQuery(t, ct, client, svc.serverSID.Name, partition)
|
||||||
|
s.testPreparedQueryZeroFailover(t, client, def, tc.cluster)
|
||||||
|
s.testPreparedQuerySingleFailover(t, ct, client, def, tc.cluster, tc.peer, partition)
|
||||||
|
s.testPreparedQueryTwoFailovers(t, ct, client, def, tc.cluster, tc.peer, tc.targetCluster, partition)
|
||||||
|
|
||||||
|
// delete failing health check in peer cluster & validate single failover
|
||||||
|
s.testPQSingleFailover(t, ct, client, def, tc.cluster, tc.peer, partition)
|
||||||
|
// delete failing health check in cluster & validate zero failover
|
||||||
|
s.testPQZeroFailover(t, ct, client, def, tc.cluster, tc.peer, partition)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac5_2PQFailoverSuite) testPreparedQueryZeroFailover(t *testing.T, cl *api.Client, def *api.PreparedQueryDefinition, cluster *topology.Cluster) {
|
||||||
|
t.Run(fmt.Sprintf("prepared query should not failover %s", cluster.Name), func(t *testing.T) {
|
||||||
|
|
||||||
|
// Validate prepared query exists in cluster
|
||||||
|
queryDef, _, err := cl.PreparedQuery().Get(def.ID, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, queryDef, 1, "expected 1 prepared query")
|
||||||
|
require.Equal(t, 2, len(queryDef[0].Service.Failover.Targets), "expected 2 prepared query failover targets to dc2 and dc3")
|
||||||
|
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond}, t, func(r *retry.R) {
|
||||||
|
queryResult, _, err := cl.PreparedQuery().Execute(def.ID, nil)
|
||||||
|
require.NoError(r, err)
|
||||||
|
|
||||||
|
// expected outcome should show 0 failover
|
||||||
|
require.Equal(r, 0, queryResult.Failovers, "expected 0 prepared query failover")
|
||||||
|
require.Equal(r, cluster.Name, queryResult.Nodes[0].Node.Datacenter, "pq results should come from the local cluster")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac5_2PQFailoverSuite) testPreparedQuerySingleFailover(t *testing.T, ct *commonTopo, cl *api.Client, def *api.PreparedQueryDefinition, cluster, peerClu *topology.Cluster, partition string) {
|
||||||
|
t.Run(fmt.Sprintf("prepared query with single failover %s", cluster.Name), func(t *testing.T) {
|
||||||
|
cfg := ct.Sprawl.Config()
|
||||||
|
svc := ac5_2Context[nodeKey{cluster.Name, partition}]
|
||||||
|
|
||||||
|
nodeCfg := DisableNode(t, cfg, cluster.Name, svc.nodeServer)
|
||||||
|
require.NoError(t, ct.Sprawl.Relaunch(nodeCfg))
|
||||||
|
|
||||||
|
// assert server health status
|
||||||
|
assertServiceHealth(t, cl, svc.serverSID.Name, 0)
|
||||||
|
|
||||||
|
// Validate prepared query exists in cluster
|
||||||
|
queryDef, _, err := cl.PreparedQuery().Get(def.ID, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, queryDef, 1, "expected 1 prepared query")
|
||||||
|
|
||||||
|
pqFailoverTargets := queryDef[0].Service.Failover.Targets
|
||||||
|
require.Len(t, pqFailoverTargets, 2, "expected 2 prepared query failover targets to dc2 and dc3")
|
||||||
|
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond}, t, func(r *retry.R) {
|
||||||
|
queryResult, _, err := cl.PreparedQuery().Execute(def.ID, nil)
|
||||||
|
require.NoError(r, err)
|
||||||
|
|
||||||
|
require.Equal(r, 1, queryResult.Failovers, "expected 1 prepared query failover")
|
||||||
|
require.Equal(r, peerClu.Name, queryResult.Nodes[0].Node.Datacenter, fmt.Sprintf("the pq results should originate from peer clu %s", peerClu.Name))
|
||||||
|
require.Equal(r, pqFailoverTargets[0].Peer, queryResult.Nodes[0].Checks[0].PeerName,
|
||||||
|
fmt.Sprintf("pq results should come from the first failover target peer %s", pqFailoverTargets[0].Peer))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac5_2PQFailoverSuite) testPreparedQueryTwoFailovers(t *testing.T, ct *commonTopo, cl *api.Client, def *api.PreparedQueryDefinition, cluster, peerClu, targetCluster *topology.Cluster, partition string) {
|
||||||
|
t.Run(fmt.Sprintf("prepared query with two failovers %s", cluster.Name), func(t *testing.T) {
|
||||||
|
cfg := ct.Sprawl.Config()
|
||||||
|
|
||||||
|
svc := ac5_2Context[nodeKey{peerClu.Name, partition}]
|
||||||
|
|
||||||
|
cfg = DisableNode(t, cfg, peerClu.Name, svc.nodeServer)
|
||||||
|
require.NoError(t, ct.Sprawl.Relaunch(cfg))
|
||||||
|
|
||||||
|
// assert server health status
|
||||||
|
assertServiceHealth(t, cl, ac5_2Context[nodeKey{cluster.Name, partition}].serverSID.Name, 0) // cluster: failing
|
||||||
|
assertServiceHealth(t, cl, svc.serverSID.Name, 0) // peer cluster: failing
|
||||||
|
|
||||||
|
queryDef, _, err := cl.PreparedQuery().Get(def.ID, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, queryDef, 1, "expected 1 prepared query")
|
||||||
|
|
||||||
|
pqFailoverTargets := queryDef[0].Service.Failover.Targets
|
||||||
|
require.Len(t, pqFailoverTargets, 2, "expected 2 prepared query failover targets to dc2 and dc3")
|
||||||
|
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond}, t, func(r *retry.R) {
|
||||||
|
queryResult, _, err := cl.PreparedQuery().Execute(def.ID, nil)
|
||||||
|
require.NoError(r, err)
|
||||||
|
require.Equal(r, 2, queryResult.Failovers, "expected 2 prepared query failover")
|
||||||
|
|
||||||
|
require.Equal(r, targetCluster.Name, queryResult.Nodes[0].Node.Datacenter, fmt.Sprintf("the pq results should originate from cluster %s", targetCluster.Name))
|
||||||
|
require.Equal(r, pqFailoverTargets[1].Peer, queryResult.Nodes[0].Checks[0].PeerName,
|
||||||
|
fmt.Sprintf("pq results should come from the second failover target peer %s", pqFailoverTargets[1].Peer))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac5_2PQFailoverSuite) testPQSingleFailover(t *testing.T, ct *commonTopo, cl *api.Client, def *api.PreparedQueryDefinition, cluster, peerClu *topology.Cluster, partition string) {
|
||||||
|
t.Run(fmt.Sprintf("delete failing health check in %s and validate single failover %s", peerClu.Name, cluster.Name), func(t *testing.T) {
|
||||||
|
cfg := ct.Sprawl.Config()
|
||||||
|
|
||||||
|
svc := ac5_2Context[nodeKey{peerClu.Name, partition}]
|
||||||
|
|
||||||
|
cfg = EnableNode(t, cfg, peerClu.Name, svc.nodeServer)
|
||||||
|
require.NoError(t, ct.Sprawl.Relaunch(cfg))
|
||||||
|
|
||||||
|
queryDef, _, err := cl.PreparedQuery().Get(def.ID, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
pqFailoverTargets := queryDef[0].Service.Failover.Targets
|
||||||
|
require.Len(t, pqFailoverTargets, 2, "expected 2 prepared query failover targets to dc2 and dc3")
|
||||||
|
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond}, t, func(r *retry.R) {
|
||||||
|
queryResult, _, err := cl.PreparedQuery().Execute(def.ID, nil)
|
||||||
|
require.NoError(r, err)
|
||||||
|
require.Equal(r, 1, queryResult.Failovers, "expected 1 prepared query failover")
|
||||||
|
|
||||||
|
require.Equal(r, peerClu.Name, queryResult.Nodes[0].Node.Datacenter, fmt.Sprintf("the pq results should originate from cluster %s", peerClu.Name))
|
||||||
|
require.Equal(r, pqFailoverTargets[0].Peer, queryResult.Nodes[0].Checks[0].PeerName,
|
||||||
|
fmt.Sprintf("pq results should come from the second failover target peer %s", pqFailoverTargets[0].Peer))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac5_2PQFailoverSuite) testPQZeroFailover(t *testing.T, ct *commonTopo, cl *api.Client, def *api.PreparedQueryDefinition, cluster, peerClu *topology.Cluster, partition string) {
|
||||||
|
t.Run(fmt.Sprintf("delete failing health check in %s and validate zero failover %s", cluster.Name, cluster.Name), func(t *testing.T) {
|
||||||
|
cfg := ct.Sprawl.Config()
|
||||||
|
|
||||||
|
svc := ac5_2Context[nodeKey{cluster.Name, partition}]
|
||||||
|
|
||||||
|
cfg = EnableNode(t, cfg, cluster.Name, svc.nodeServer)
|
||||||
|
require.NoError(t, ct.Sprawl.Relaunch(cfg))
|
||||||
|
|
||||||
|
// assert server health status
|
||||||
|
assertServiceHealth(t, cl, ac5_2Context[nodeKey{cluster.Name, partition}].serverSID.Name, 1) // cluster: passing
|
||||||
|
assertServiceHealth(t, cl, svc.serverSID.Name, 1) // peer cluster: passing
|
||||||
|
|
||||||
|
queryDef, _, err := cl.PreparedQuery().Get(def.ID, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
pqFailoverTargets := queryDef[0].Service.Failover.Targets
|
||||||
|
require.Len(t, pqFailoverTargets, 2, "expected 2 prepared query failover targets to dc2 and dc3")
|
||||||
|
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: 10 * time.Second, Wait: 500 * time.Millisecond}, t, func(r *retry.R) {
|
||||||
|
queryResult, _, err := cl.PreparedQuery().Execute(def.ID, nil)
|
||||||
|
require.NoError(r, err)
|
||||||
|
// expected outcome should show 0 failover
|
||||||
|
require.Equal(r, 0, queryResult.Failovers, "expected 0 prepared query failover")
|
||||||
|
require.Equal(r, cluster.Name, queryResult.Nodes[0].Node.Datacenter, "pq results should come from the local cluster")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// assertServiceHealth checks that a service health status before running tests
|
||||||
|
func assertServiceHealth(t *testing.T, cl *api.Client, serverSVC string, count int) {
|
||||||
|
t.Helper()
|
||||||
|
t.Log("validate service health in catalog")
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: time.Second * 20, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
|
||||||
|
svcs, _, err := cl.Health().Service(
|
||||||
|
serverSVC,
|
||||||
|
"",
|
||||||
|
true,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
require.NoError(r, err)
|
||||||
|
require.Equal(r, count, len(svcs))
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,429 @@
|
||||||
|
package peering
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// note: unlike other *Suite structs that are per-peering direction,
|
||||||
|
// this one is special and does all directions itself, because the
|
||||||
|
// setup is not exactly symmetrical
|
||||||
|
type ac6FailoversSuite struct {
|
||||||
|
ac6 map[nodeKey]ac6FailoversContext
|
||||||
|
}
|
||||||
|
type ac6FailoversContext struct {
|
||||||
|
clientSID topology.ServiceID
|
||||||
|
serverSID topology.ServiceID
|
||||||
|
|
||||||
|
// used to remove the node and trigger failover
|
||||||
|
serverNode topology.NodeID
|
||||||
|
}
|
||||||
|
type nodeKey struct {
|
||||||
|
dc string
|
||||||
|
partition string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: this test cannot share topo
|
||||||
|
func TestAC6Failovers(t *testing.T) {
|
||||||
|
ct := NewCommonTopo(t)
|
||||||
|
s := &ac6FailoversSuite{}
|
||||||
|
s.setup(t, ct)
|
||||||
|
ct.Launch(t)
|
||||||
|
s.test(t, ct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac6FailoversSuite) setup(t *testing.T, ct *commonTopo) {
|
||||||
|
// TODO: update setups to loop through a cluster's partitions+namespaces internally
|
||||||
|
s.setupAC6Failovers(ct, ct.DC1, ct.DC2)
|
||||||
|
s.setupAC6Failovers(ct, ct.DC2, ct.DC1)
|
||||||
|
s.setupAC6FailoversDC3(ct, ct.DC3, ct.DC1, ct.DC2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dc1 is peered with dc2 and dc3.
|
||||||
|
// dc1 has an ac6-client in "default" and "part1" partitions (only default in OSS).
|
||||||
|
// ac6-client has a single upstream ac6-failover-svc in its respective partition^.
|
||||||
|
//
|
||||||
|
// ac6-failover-svc has the following failovers:
|
||||||
|
// - peer-dc2-default
|
||||||
|
// - peer-dc2-part1 (not in OSS)
|
||||||
|
// - peer-dc3-default
|
||||||
|
//
|
||||||
|
// This setup is mirrored from dc2->dc1 as well
|
||||||
|
// (both dcs have dc3 as the last failover target)
|
||||||
|
//
|
||||||
|
// ^NOTE: There are no cross-partition upstreams because MeshGatewayMode = local
|
||||||
|
// and failover information gets stripped out by the mesh gateways so we
|
||||||
|
// can't test failovers.
|
||||||
|
func (s *ac6FailoversSuite) setupAC6Failovers(ct *commonTopo, clu, peerClu *topology.Cluster) {
|
||||||
|
for _, part := range clu.Partitions {
|
||||||
|
partition := part.Name
|
||||||
|
|
||||||
|
// There is a peering per partition in the peered cluster
|
||||||
|
var peers []string
|
||||||
|
for _, peerPart := range peerClu.Partitions {
|
||||||
|
peers = append(peers, LocalPeerName(peerClu, peerPart.Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make an HTTP server with various failover targets
|
||||||
|
serverSID := topology.ServiceID{
|
||||||
|
Name: "ac6-failover-svc",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
server := NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
serverSID,
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
// Export to all known peers
|
||||||
|
ct.ExportService(clu, partition,
|
||||||
|
api.ExportedService{
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Consumers: func() []api.ServiceConsumer {
|
||||||
|
var consumers []api.ServiceConsumer
|
||||||
|
for _, peer := range peers {
|
||||||
|
consumers = append(consumers, api.ServiceConsumer{
|
||||||
|
Peer: peer,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return consumers
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
serverNode := ct.AddServiceNode(clu, serviceExt{Service: server})
|
||||||
|
|
||||||
|
clu.InitialConfigEntries = append(clu.InitialConfigEntries,
|
||||||
|
&api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
Protocol: "http",
|
||||||
|
},
|
||||||
|
&api.ServiceResolverConfigEntry{
|
||||||
|
Kind: api.ServiceResolver,
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
Failover: map[string]api.ServiceResolverFailover{
|
||||||
|
"*": {
|
||||||
|
Targets: func() []api.ServiceResolverFailoverTarget {
|
||||||
|
// Make a failover target for every partition in the peer cluster
|
||||||
|
var targets []api.ServiceResolverFailoverTarget
|
||||||
|
for _, peer := range peers {
|
||||||
|
targets = append(targets, api.ServiceResolverFailoverTarget{
|
||||||
|
Peer: peer,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// Just hard code default partition for dc3, since the exhaustive
|
||||||
|
// testing will be done against dc2.
|
||||||
|
targets = append(targets, api.ServiceResolverFailoverTarget{
|
||||||
|
Peer: "peer-dc3-default",
|
||||||
|
})
|
||||||
|
return targets
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
// Make client which will dial server
|
||||||
|
clientSID := topology.ServiceID{
|
||||||
|
Name: "ac6-client",
|
||||||
|
Partition: partition,
|
||||||
|
}
|
||||||
|
client := NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
clientSID,
|
||||||
|
func(s *topology.Service) {
|
||||||
|
// Upstream per partition
|
||||||
|
s.Upstreams = []*topology.Upstream{
|
||||||
|
{
|
||||||
|
ID: topology.ServiceID{
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: part.Name,
|
||||||
|
},
|
||||||
|
LocalPort: 5000,
|
||||||
|
// exposed so we can hit it directly
|
||||||
|
// TODO: we shouldn't do this; it's not realistic
|
||||||
|
LocalAddress: "0.0.0.0",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
ct.ExportService(clu, partition,
|
||||||
|
api.ExportedService{
|
||||||
|
Name: client.ID.Name,
|
||||||
|
Consumers: func() []api.ServiceConsumer {
|
||||||
|
var consumers []api.ServiceConsumer
|
||||||
|
// Export to each peer
|
||||||
|
for _, peer := range peers {
|
||||||
|
consumers = append(consumers, api.ServiceConsumer{
|
||||||
|
Peer: peer,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return consumers
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
ct.AddServiceNode(clu, serviceExt{Service: client})
|
||||||
|
|
||||||
|
clu.InitialConfigEntries = append(clu.InitialConfigEntries,
|
||||||
|
&api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: client.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
Protocol: "http",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
// Add intention allowing local and peered clients to call server
|
||||||
|
clu.InitialConfigEntries = append(clu.InitialConfigEntries,
|
||||||
|
&api.ServiceIntentionsConfigEntry{
|
||||||
|
Kind: api.ServiceIntentions,
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
// SourceIntention for local client and peered clients
|
||||||
|
Sources: func() []*api.SourceIntention {
|
||||||
|
ixns := []*api.SourceIntention{
|
||||||
|
{
|
||||||
|
Name: client.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(part.Name),
|
||||||
|
Action: api.IntentionActionAllow,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, peer := range peers {
|
||||||
|
ixns = append(ixns, &api.SourceIntention{
|
||||||
|
Name: client.ID.Name,
|
||||||
|
Peer: peer,
|
||||||
|
Action: api.IntentionActionAllow,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return ixns
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if s.ac6 == nil {
|
||||||
|
s.ac6 = map[nodeKey]ac6FailoversContext{}
|
||||||
|
}
|
||||||
|
s.ac6[nodeKey{clu.Datacenter, partition}] = struct {
|
||||||
|
clientSID topology.ServiceID
|
||||||
|
serverSID topology.ServiceID
|
||||||
|
serverNode topology.NodeID
|
||||||
|
}{
|
||||||
|
clientSID: clientSID,
|
||||||
|
serverSID: serverSID,
|
||||||
|
serverNode: serverNode.ID(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac6FailoversSuite) setupAC6FailoversDC3(ct *commonTopo, clu, peer1, peer2 *topology.Cluster) {
|
||||||
|
var peers []string
|
||||||
|
for _, part := range peer1.Partitions {
|
||||||
|
peers = append(peers, LocalPeerName(peer1, part.Name))
|
||||||
|
}
|
||||||
|
for _, part := range peer2.Partitions {
|
||||||
|
peers = append(peers, LocalPeerName(peer2, part.Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
partition := "default"
|
||||||
|
|
||||||
|
// Make an HTTP server
|
||||||
|
server := NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
topology.ServiceID{
|
||||||
|
Name: "ac6-failover-svc",
|
||||||
|
Partition: partition,
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
ct.AddServiceNode(clu, serviceExt{
|
||||||
|
Service: server,
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
Protocol: "http",
|
||||||
|
},
|
||||||
|
Intentions: &api.ServiceIntentionsConfigEntry{
|
||||||
|
Kind: api.ServiceIntentions,
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
Sources: func() []*api.SourceIntention {
|
||||||
|
var ixns []*api.SourceIntention
|
||||||
|
for _, peer := range peers {
|
||||||
|
ixns = append(ixns, &api.SourceIntention{
|
||||||
|
Name: "ac6-client",
|
||||||
|
Peer: peer,
|
||||||
|
Action: api.IntentionActionAllow,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return ixns
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
Exports: func() []api.ServiceConsumer {
|
||||||
|
var consumers []api.ServiceConsumer
|
||||||
|
for _, peer := range peers {
|
||||||
|
consumers = append(consumers, api.ServiceConsumer{
|
||||||
|
Peer: peer,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return consumers
|
||||||
|
}(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac6FailoversSuite) test(t *testing.T, ct *commonTopo) {
|
||||||
|
dc1 := ct.Sprawl.Topology().Clusters["dc1"]
|
||||||
|
dc2 := ct.Sprawl.Topology().Clusters["dc2"]
|
||||||
|
|
||||||
|
type testcase struct {
|
||||||
|
name string
|
||||||
|
cluster *topology.Cluster
|
||||||
|
peer *topology.Cluster
|
||||||
|
partition string
|
||||||
|
}
|
||||||
|
tcs := []testcase{
|
||||||
|
{
|
||||||
|
name: "dc1 default partition failovers",
|
||||||
|
cluster: dc1,
|
||||||
|
peer: dc2, // dc3 is hardcoded
|
||||||
|
partition: "default",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "dc1 part1 partition failovers",
|
||||||
|
cluster: dc1,
|
||||||
|
peer: dc2, // dc3 is hardcoded
|
||||||
|
partition: "part1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "dc2 default partition failovers",
|
||||||
|
cluster: dc2,
|
||||||
|
peer: dc1, // dc3 is hardcoded
|
||||||
|
partition: "default",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "dc2 part1 partition failovers",
|
||||||
|
cluster: dc2,
|
||||||
|
peer: dc1, // dc3 is hardcoded
|
||||||
|
partition: "part1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tc := range tcs {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
// NOTE: *not parallel* because we mutate resources that are shared
|
||||||
|
// between test cases (disable/enable nodes)
|
||||||
|
if !utils.IsEnterprise() && tc.partition != "default" {
|
||||||
|
t.Skip("skipping enterprise test")
|
||||||
|
}
|
||||||
|
partition := tc.partition
|
||||||
|
clu := tc.cluster
|
||||||
|
peerClu := tc.peer
|
||||||
|
|
||||||
|
svcs := clu.ServicesByID(s.ac6[nodeKey{clu.Datacenter, partition}].clientSID)
|
||||||
|
require.Len(t, svcs, 1, "expected exactly one client in datacenter")
|
||||||
|
|
||||||
|
serverSID := s.ac6[nodeKey{clu.Datacenter, partition}].serverSID
|
||||||
|
serverSID.Normalize()
|
||||||
|
|
||||||
|
client := svcs[0]
|
||||||
|
require.Len(t, client.Upstreams, 1, "expected one upstream for client")
|
||||||
|
|
||||||
|
u := client.Upstreams[0]
|
||||||
|
ct.Assert.CatalogServiceExists(t, clu.Name, u.ID.Name, utils.CompatQueryOpts(&api.QueryOptions{
|
||||||
|
Partition: u.ID.Partition,
|
||||||
|
}))
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
cfg := ct.Sprawl.Config()
|
||||||
|
for _, part := range clu.Partitions {
|
||||||
|
EnableNode(t, cfg, clu.Name, s.ac6[nodeKey{clu.Datacenter, part.Name}].serverNode)
|
||||||
|
}
|
||||||
|
for _, part := range peerClu.Partitions {
|
||||||
|
EnableNode(t, cfg, peerClu.Name, s.ac6[nodeKey{peerClu.Datacenter, part.Name}].serverNode)
|
||||||
|
}
|
||||||
|
require.NoError(t, ct.Sprawl.Relaunch(cfg))
|
||||||
|
})
|
||||||
|
|
||||||
|
fmt.Println("### preconditions")
|
||||||
|
// TODO: deduce this number, instead of hard-coding
|
||||||
|
nFailoverTargets := 4
|
||||||
|
// in OSS, we don't have failover targets for non-default partitions
|
||||||
|
if !utils.IsEnterprise() {
|
||||||
|
nFailoverTargets = 3
|
||||||
|
}
|
||||||
|
for i := 0; i < nFailoverTargets; i++ {
|
||||||
|
ct.Assert.UpstreamEndpointStatus(t, client, fmt.Sprintf("failover-target~%d~%s", i, clusterPrefix(u, clu.Datacenter)), "HEALTHY", 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
ct.Assert.FortioFetch2FortioName(t, client, u, clu.Name, serverSID)
|
||||||
|
|
||||||
|
if t.Failed() {
|
||||||
|
t.Fatalf("failed preconditions")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("### Failover to peer target")
|
||||||
|
cfg := ct.Sprawl.Config()
|
||||||
|
DisableNode(t, cfg, clu.Name, s.ac6[nodeKey{clu.Datacenter, partition}].serverNode)
|
||||||
|
require.NoError(t, ct.Sprawl.Relaunch(cfg))
|
||||||
|
// Clusters for imported services rely on outlier detection for
|
||||||
|
// failovers, NOT eds_health_status. This means that killing the
|
||||||
|
// node above does not actually make the envoy cluster UNHEALTHY
|
||||||
|
// so we do not assert for it.
|
||||||
|
expectUID := topology.ServiceID{
|
||||||
|
Name: u.ID.Name,
|
||||||
|
Partition: "default",
|
||||||
|
}
|
||||||
|
expectUID.Normalize()
|
||||||
|
ct.Assert.FortioFetch2FortioName(t, client, u, peerClu.Name, expectUID)
|
||||||
|
|
||||||
|
if utils.IsEnterprise() {
|
||||||
|
fmt.Println("### Failover to peer target in non-default partition")
|
||||||
|
cfg = ct.Sprawl.Config()
|
||||||
|
DisableNode(t, cfg, clu.Name, s.ac6[nodeKey{clu.Datacenter, partition}].serverNode)
|
||||||
|
DisableNode(t, cfg, peerClu.Name, s.ac6[nodeKey{peerClu.Datacenter, "default"}].serverNode)
|
||||||
|
require.NoError(t, ct.Sprawl.Relaunch(cfg))
|
||||||
|
// Retry until outlier_detection deems the cluster
|
||||||
|
// unhealthy and fails over to peer part1.
|
||||||
|
expectUID = topology.ServiceID{
|
||||||
|
Name: u.ID.Name,
|
||||||
|
Partition: "part1",
|
||||||
|
}
|
||||||
|
expectUID.Normalize()
|
||||||
|
ct.Assert.FortioFetch2FortioName(t, client, u, peerClu.Name, expectUID)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("### Failover to dc3 peer target")
|
||||||
|
cfg = ct.Sprawl.Config()
|
||||||
|
DisableNode(t, cfg, clu.Name, s.ac6[nodeKey{clu.Datacenter, partition}].serverNode)
|
||||||
|
// Disable all partitions for peer
|
||||||
|
for _, part := range peerClu.Partitions {
|
||||||
|
DisableNode(t, cfg, peerClu.Name, s.ac6[nodeKey{peerClu.Datacenter, part.Name}].serverNode)
|
||||||
|
}
|
||||||
|
require.NoError(t, ct.Sprawl.Relaunch(cfg))
|
||||||
|
// This will retry until outlier_detection deems the cluster
|
||||||
|
// unhealthy and fails over to dc3.
|
||||||
|
expectUID = topology.ServiceID{
|
||||||
|
Name: u.ID.Name,
|
||||||
|
Partition: "default",
|
||||||
|
}
|
||||||
|
expectUID.Normalize()
|
||||||
|
ct.Assert.FortioFetch2FortioName(t, client, u, "dc3", expectUID)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func clusterPrefix(u *topology.Upstream, dc string) string {
|
||||||
|
u.ID.Normalize()
|
||||||
|
switch u.ID.Partition {
|
||||||
|
case "default":
|
||||||
|
return fmt.Sprintf("%s.%s.%s.internal", u.ID.Name, u.ID.Namespace, dc)
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("%s.%s.%s.%s.internal-v1", u.ID.Name, u.ID.Namespace, u.ID.Partition, dc)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,188 @@
|
||||||
|
package peering
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestRotateGW ensures that peered services continue to be able to talk to their
|
||||||
|
// upstreams during a mesh gateway rotation
|
||||||
|
// NOTE: because suiteRotateGW needs to mutate the topo, we actually *DO NOT* share a topo
|
||||||
|
|
||||||
|
type suiteRotateGW struct {
|
||||||
|
DC string
|
||||||
|
Peer string
|
||||||
|
|
||||||
|
sidServer topology.ServiceID
|
||||||
|
nodeServer topology.NodeID
|
||||||
|
|
||||||
|
sidClient topology.ServiceID
|
||||||
|
nodeClient topology.NodeID
|
||||||
|
|
||||||
|
upstream *topology.Upstream
|
||||||
|
|
||||||
|
newMGWNodeName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRotateGW(t *testing.T) {
|
||||||
|
suites := []*suiteRotateGW{
|
||||||
|
{DC: "dc1", Peer: "dc2"},
|
||||||
|
{DC: "dc2", Peer: "dc1"},
|
||||||
|
}
|
||||||
|
ct := NewCommonTopo(t)
|
||||||
|
for _, s := range suites {
|
||||||
|
s.setup(t, ct)
|
||||||
|
}
|
||||||
|
ct.Launch(t)
|
||||||
|
for _, s := range suites {
|
||||||
|
s := s
|
||||||
|
t.Run(fmt.Sprintf("%s->%s", s.DC, s.Peer), func(t *testing.T) {
|
||||||
|
// no t.Parallel() due to Relaunch
|
||||||
|
s.test(t, ct)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *suiteRotateGW) setup(t *testing.T, ct *commonTopo) {
|
||||||
|
const prefix = "ac7-1-"
|
||||||
|
|
||||||
|
clu := ct.ClusterByDatacenter(t, s.DC)
|
||||||
|
peerClu := ct.ClusterByDatacenter(t, s.Peer)
|
||||||
|
partition := "default"
|
||||||
|
peer := LocalPeerName(peerClu, "default")
|
||||||
|
cluPeerName := LocalPeerName(clu, "default")
|
||||||
|
|
||||||
|
server := NewFortioServiceWithDefaults(
|
||||||
|
peerClu.Datacenter,
|
||||||
|
topology.ServiceID{
|
||||||
|
Name: prefix + "server-http",
|
||||||
|
Partition: partition,
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Make clients which have server upstreams
|
||||||
|
upstream := &topology.Upstream{
|
||||||
|
ID: topology.ServiceID{
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: partition,
|
||||||
|
},
|
||||||
|
// TODO: we shouldn't need this, need to investigate
|
||||||
|
LocalAddress: "0.0.0.0",
|
||||||
|
LocalPort: 5001,
|
||||||
|
Peer: peer,
|
||||||
|
}
|
||||||
|
// create client in us
|
||||||
|
client := NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
topology.ServiceID{
|
||||||
|
Name: prefix + "client",
|
||||||
|
Partition: partition,
|
||||||
|
},
|
||||||
|
func(s *topology.Service) {
|
||||||
|
s.Upstreams = []*topology.Upstream{
|
||||||
|
upstream,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
clientNode := ct.AddServiceNode(clu, serviceExt{Service: client,
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: client.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(client.ID.Partition),
|
||||||
|
Protocol: "http",
|
||||||
|
UpstreamConfig: &api.UpstreamConfiguration{
|
||||||
|
Defaults: &api.UpstreamConfig{
|
||||||
|
MeshGateway: api.MeshGatewayConfig{
|
||||||
|
Mode: api.MeshGatewayModeLocal,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
// actually to be used by the other pairing
|
||||||
|
serverNode := ct.AddServiceNode(peerClu, serviceExt{
|
||||||
|
Service: server,
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
Protocol: "http",
|
||||||
|
},
|
||||||
|
Exports: []api.ServiceConsumer{{Peer: cluPeerName}},
|
||||||
|
Intentions: &api.ServiceIntentionsConfigEntry{
|
||||||
|
Kind: api.ServiceIntentions,
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
Sources: []*api.SourceIntention{
|
||||||
|
{
|
||||||
|
Name: client.ID.Name,
|
||||||
|
Peer: cluPeerName,
|
||||||
|
Action: api.IntentionActionAllow,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
s.sidClient = client.ID
|
||||||
|
s.nodeClient = clientNode.ID()
|
||||||
|
s.upstream = upstream
|
||||||
|
s.sidServer = server.ID
|
||||||
|
s.nodeServer = serverNode.ID()
|
||||||
|
|
||||||
|
// add a second mesh gateway "new"
|
||||||
|
s.newMGWNodeName = fmt.Sprintf("new-%s-default-mgw", clu.Name)
|
||||||
|
clu.Nodes = append(clu.Nodes, newTopologyMeshGatewaySet(
|
||||||
|
topology.NodeKindClient,
|
||||||
|
"default",
|
||||||
|
s.newMGWNodeName,
|
||||||
|
1,
|
||||||
|
[]string{clu.Datacenter, "wan"},
|
||||||
|
func(i int, node *topology.Node) {
|
||||||
|
node.Disabled = true
|
||||||
|
},
|
||||||
|
)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *suiteRotateGW) test(t *testing.T, ct *commonTopo) {
|
||||||
|
dc := ct.Sprawl.Topology().Clusters[s.DC]
|
||||||
|
peer := ct.Sprawl.Topology().Clusters[s.Peer]
|
||||||
|
|
||||||
|
svcHTTPServer := peer.ServiceByID(
|
||||||
|
s.nodeServer,
|
||||||
|
s.sidServer,
|
||||||
|
)
|
||||||
|
svcHTTPClient := dc.ServiceByID(
|
||||||
|
s.nodeClient,
|
||||||
|
s.sidClient,
|
||||||
|
)
|
||||||
|
ct.Assert.HealthyWithPeer(t, dc.Name, svcHTTPServer.ID, LocalPeerName(peer, "default"))
|
||||||
|
|
||||||
|
ct.Assert.FortioFetch2HeaderEcho(t, svcHTTPClient, s.upstream)
|
||||||
|
|
||||||
|
t.Log("relaunching with new gateways")
|
||||||
|
cfg := ct.Sprawl.Config()
|
||||||
|
for _, n := range dc.Nodes {
|
||||||
|
if strings.HasPrefix(n.Name, s.newMGWNodeName) {
|
||||||
|
n.Disabled = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.NoError(t, ct.Sprawl.Relaunch(cfg))
|
||||||
|
ct.Assert.FortioFetch2HeaderEcho(t, svcHTTPClient, s.upstream)
|
||||||
|
|
||||||
|
t.Log("relaunching without old gateways")
|
||||||
|
cfg = ct.Sprawl.Config()
|
||||||
|
for _, n := range dc.Nodes {
|
||||||
|
if strings.HasPrefix(n.Name, fmt.Sprintf("%s-default-mgw", dc.Name)) {
|
||||||
|
n.Disabled = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.NoError(t, ct.Sprawl.Relaunch(cfg))
|
||||||
|
ct.Assert.FortioFetch2HeaderEcho(t, svcHTTPClient, s.upstream)
|
||||||
|
}
|
|
@ -0,0 +1,214 @@
|
||||||
|
package peering
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||||
|
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||||
|
"github.com/mitchellh/copystructure"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestAC7_2RotateLeader ensures that after a leader rotation, information continues to replicate to peers
|
||||||
|
// NOTE: because suiteRotateLeader needs to mutate the topo, we actually *DO NOT* share a topo
|
||||||
|
type ac7_2RotateLeaderSuite struct {
|
||||||
|
DC string
|
||||||
|
Peer string
|
||||||
|
|
||||||
|
sidServer topology.ServiceID
|
||||||
|
nodeServer topology.NodeID
|
||||||
|
|
||||||
|
sidClient topology.ServiceID
|
||||||
|
nodeClient topology.NodeID
|
||||||
|
|
||||||
|
upstream *topology.Upstream
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAC7_2RotateLeader(t *testing.T) {
|
||||||
|
suites := []*ac7_2RotateLeaderSuite{
|
||||||
|
{DC: "dc1", Peer: "dc2"},
|
||||||
|
{DC: "dc2", Peer: "dc1"},
|
||||||
|
}
|
||||||
|
ct := NewCommonTopo(t)
|
||||||
|
for _, s := range suites {
|
||||||
|
s.setup(t, ct)
|
||||||
|
}
|
||||||
|
ct.Launch(t)
|
||||||
|
for _, s := range suites {
|
||||||
|
s := s
|
||||||
|
t.Run(fmt.Sprintf("%s->%s", s.DC, s.Peer), func(t *testing.T) {
|
||||||
|
// no t.Parallel() due to Relaunch
|
||||||
|
s.test(t, ct)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// makes client in clu, server in peerClu
|
||||||
|
func (s *ac7_2RotateLeaderSuite) setup(t *testing.T, ct *commonTopo) {
|
||||||
|
const prefix = "ac7-2-"
|
||||||
|
|
||||||
|
clu := ct.ClusterByDatacenter(t, s.DC)
|
||||||
|
peerClu := ct.ClusterByDatacenter(t, s.Peer)
|
||||||
|
partition := "default"
|
||||||
|
peer := LocalPeerName(peerClu, "default")
|
||||||
|
cluPeerName := LocalPeerName(clu, "default")
|
||||||
|
|
||||||
|
server := NewFortioServiceWithDefaults(
|
||||||
|
peerClu.Datacenter,
|
||||||
|
topology.ServiceID{
|
||||||
|
Name: prefix + "server-http",
|
||||||
|
Partition: partition,
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Make clients which have server upstreams
|
||||||
|
upstream := &topology.Upstream{
|
||||||
|
ID: topology.ServiceID{
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: partition,
|
||||||
|
},
|
||||||
|
LocalPort: 5001,
|
||||||
|
Peer: peer,
|
||||||
|
}
|
||||||
|
// create client in us
|
||||||
|
client := NewFortioServiceWithDefaults(
|
||||||
|
clu.Datacenter,
|
||||||
|
topology.ServiceID{
|
||||||
|
Name: prefix + "client",
|
||||||
|
Partition: partition,
|
||||||
|
},
|
||||||
|
func(s *topology.Service) {
|
||||||
|
s.Upstreams = []*topology.Upstream{
|
||||||
|
upstream,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
clientNode := ct.AddServiceNode(clu, serviceExt{Service: client,
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: client.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(client.ID.Partition),
|
||||||
|
Protocol: "http",
|
||||||
|
UpstreamConfig: &api.UpstreamConfiguration{
|
||||||
|
Defaults: &api.UpstreamConfig{
|
||||||
|
MeshGateway: api.MeshGatewayConfig{
|
||||||
|
Mode: api.MeshGatewayModeLocal,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
// actually to be used by the other pairing
|
||||||
|
serverNode := ct.AddServiceNode(peerClu, serviceExt{
|
||||||
|
Service: server,
|
||||||
|
Config: &api.ServiceConfigEntry{
|
||||||
|
Kind: api.ServiceDefaults,
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
Protocol: "http",
|
||||||
|
},
|
||||||
|
Exports: []api.ServiceConsumer{{Peer: cluPeerName}},
|
||||||
|
Intentions: &api.ServiceIntentionsConfigEntry{
|
||||||
|
Kind: api.ServiceIntentions,
|
||||||
|
Name: server.ID.Name,
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
Sources: []*api.SourceIntention{
|
||||||
|
{
|
||||||
|
Name: client.ID.Name,
|
||||||
|
Peer: cluPeerName,
|
||||||
|
Action: api.IntentionActionAllow,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
s.sidClient = client.ID
|
||||||
|
s.nodeClient = clientNode.ID()
|
||||||
|
s.upstream = upstream
|
||||||
|
s.sidServer = server.ID
|
||||||
|
s.nodeServer = serverNode.ID()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ac7_2RotateLeaderSuite) test(t *testing.T, ct *commonTopo) {
|
||||||
|
dc := ct.Sprawl.Topology().Clusters[s.DC]
|
||||||
|
peer := ct.Sprawl.Topology().Clusters[s.Peer]
|
||||||
|
clDC := ct.APIClientForCluster(t, dc)
|
||||||
|
clPeer := ct.APIClientForCluster(t, peer)
|
||||||
|
|
||||||
|
svcServer := peer.ServiceByID(s.nodeServer, s.sidServer)
|
||||||
|
svcClient := dc.ServiceByID(s.nodeClient, s.sidClient)
|
||||||
|
ct.Assert.HealthyWithPeer(t, dc.Name, svcServer.ID, LocalPeerName(peer, "default"))
|
||||||
|
|
||||||
|
ct.Assert.FortioFetch2HeaderEcho(t, svcClient, s.upstream)
|
||||||
|
|
||||||
|
// force leader election
|
||||||
|
rotateLeader(t, clDC)
|
||||||
|
rotateLeader(t, clPeer)
|
||||||
|
|
||||||
|
// unexport httpServer
|
||||||
|
ce, _, err := clPeer.ConfigEntries().Get(api.ExportedServices, s.sidServer.Partition, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// ceAsES = config entry as ExportedServicesConfigEntry
|
||||||
|
ceAsES := ce.(*api.ExportedServicesConfigEntry)
|
||||||
|
origCE, err := copystructure.Copy(ceAsES)
|
||||||
|
require.NoError(t, err)
|
||||||
|
found := 0
|
||||||
|
foundI := 0
|
||||||
|
for i, svc := range ceAsES.Services {
|
||||||
|
if svc.Name == s.sidServer.Name && svc.Namespace == utils.DefaultToEmpty(s.sidServer.Namespace) {
|
||||||
|
found += 1
|
||||||
|
foundI = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.Equal(t, found, 1)
|
||||||
|
// remove found entry
|
||||||
|
ceAsES.Services = append(ceAsES.Services[:foundI], ceAsES.Services[foundI+1:]...)
|
||||||
|
_, _, err = clPeer.ConfigEntries().Set(ceAsES, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(func() {
|
||||||
|
//restore for next pairing
|
||||||
|
_, _, err = clPeer.ConfigEntries().Set(origCE.(*api.ExportedServicesConfigEntry), nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
// expect health entry in for peer to disappear
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: time.Minute, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
|
||||||
|
svcs, _, err := clDC.Health().Service(s.sidServer.Name, "", true, utils.CompatQueryOpts(&api.QueryOptions{
|
||||||
|
Partition: s.sidServer.Partition,
|
||||||
|
Namespace: s.sidServer.Namespace,
|
||||||
|
Peer: LocalPeerName(peer, "default"),
|
||||||
|
}))
|
||||||
|
require.NoError(r, err)
|
||||||
|
assert.Equal(r, len(svcs), 0, "health entry for imported service gone")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func rotateLeader(t *testing.T, cl *api.Client) {
|
||||||
|
t.Helper()
|
||||||
|
oldLeader := findLeader(t, cl)
|
||||||
|
cl.Operator().RaftLeaderTransfer(nil)
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: 30 * time.Second, Wait: time.Second}, t, func(r *retry.R) {
|
||||||
|
newLeader := findLeader(r, cl)
|
||||||
|
require.NotEqual(r, oldLeader.ID, newLeader.ID)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func findLeader(t require.TestingT, cl *api.Client) *api.RaftServer {
|
||||||
|
raftConfig, err := cl.Operator().RaftGetConfiguration(nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
var leader *api.RaftServer
|
||||||
|
for _, svr := range raftConfig.Servers {
|
||||||
|
if svr.Leader {
|
||||||
|
leader = svr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.NotNil(t, leader)
|
||||||
|
return leader
|
||||||
|
}
|
|
@ -0,0 +1,298 @@
|
||||||
|
package peering
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
|
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||||
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// asserter is a utility to help in reducing boilerplate in invoking test
|
||||||
|
// assertions against consul-topology Sprawl components.
|
||||||
|
//
|
||||||
|
// The methods should largely take in *topology.Service instances in lieu of
|
||||||
|
// ip/ports if there is only one port that makes sense for the assertion (such
|
||||||
|
// as use of the envoy admin port 19000).
|
||||||
|
//
|
||||||
|
// If it's up to the test (like picking an upstream) leave port as an argument
|
||||||
|
// but still take the service and use that to grab the local ip from the
|
||||||
|
// topology.Node.
|
||||||
|
type asserter struct {
|
||||||
|
sp sprawlLite
|
||||||
|
}
|
||||||
|
|
||||||
|
// *sprawl.Sprawl satisfies this. We don't need anything else.
|
||||||
|
type sprawlLite interface {
|
||||||
|
HTTPClientForCluster(clusterName string) (*http.Client, error)
|
||||||
|
APIClientForNode(clusterName string, nid topology.NodeID, token string) (*api.Client, error)
|
||||||
|
Topology() *topology.Topology
|
||||||
|
}
|
||||||
|
|
||||||
|
// newAsserter creates a new assertion helper for the provided sprawl.
|
||||||
|
func newAsserter(sp sprawlLite) *asserter {
|
||||||
|
return &asserter{
|
||||||
|
sp: sp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *asserter) mustGetHTTPClient(t *testing.T, cluster string) *http.Client {
|
||||||
|
client, err := a.httpClientFor(cluster)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *asserter) mustGetAPIClient(t *testing.T, cluster string) *api.Client {
|
||||||
|
cl, err := a.apiClientFor(cluster)
|
||||||
|
require.NoError(t, err)
|
||||||
|
return cl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *asserter) apiClientFor(cluster string) (*api.Client, error) {
|
||||||
|
clu := a.sp.Topology().Clusters[cluster]
|
||||||
|
// TODO: this always goes to the first client, but we might want to balance this
|
||||||
|
cl, err := a.sp.APIClientForNode(cluster, clu.FirstClient().ID(), "")
|
||||||
|
return cl, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// httpClientFor returns a pre-configured http.Client that proxies requests
|
||||||
|
// through the embedded squid instance in each LAN.
|
||||||
|
//
|
||||||
|
// Use this in methods below to magically pick the right proxied http client
|
||||||
|
// given the home of each node being checked.
|
||||||
|
func (a *asserter) httpClientFor(cluster string) (*http.Client, error) {
|
||||||
|
client, err := a.sp.HTTPClientForCluster(cluster)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpstreamEndpointStatus validates that proxy was configured with provided clusterName in the healthStatus
|
||||||
|
//
|
||||||
|
// Exposes libassert.UpstreamEndpointStatus for use against a Sprawl.
|
||||||
|
//
|
||||||
|
// NOTE: this doesn't take a port b/c you always want to use the envoy admin port.
|
||||||
|
func (a *asserter) UpstreamEndpointStatus(
|
||||||
|
t *testing.T,
|
||||||
|
service *topology.Service,
|
||||||
|
clusterName string,
|
||||||
|
healthStatus string,
|
||||||
|
count int,
|
||||||
|
) {
|
||||||
|
t.Helper()
|
||||||
|
node := service.Node
|
||||||
|
ip := node.LocalAddress()
|
||||||
|
port := service.EnvoyAdminPort
|
||||||
|
addr := fmt.Sprintf("%s:%d", ip, port)
|
||||||
|
|
||||||
|
client := a.mustGetHTTPClient(t, node.Cluster)
|
||||||
|
libassert.AssertUpstreamEndpointStatusWithClient(t, client, addr, clusterName, healthStatus, count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPServiceEchoes verifies that a post to the given ip/port combination
|
||||||
|
// returns the data in the response body. Optional path can be provided to
|
||||||
|
// differentiate requests.
|
||||||
|
//
|
||||||
|
// Exposes libassert.HTTPServiceEchoes for use against a Sprawl.
|
||||||
|
//
|
||||||
|
// NOTE: this takes a port b/c you may want to reach this via your choice of upstream.
|
||||||
|
func (a *asserter) HTTPServiceEchoes(
|
||||||
|
t *testing.T,
|
||||||
|
service *topology.Service,
|
||||||
|
port int,
|
||||||
|
path string,
|
||||||
|
) {
|
||||||
|
t.Helper()
|
||||||
|
require.True(t, port > 0)
|
||||||
|
|
||||||
|
node := service.Node
|
||||||
|
ip := node.LocalAddress()
|
||||||
|
addr := fmt.Sprintf("%s:%d", ip, port)
|
||||||
|
|
||||||
|
client := a.mustGetHTTPClient(t, node.Cluster)
|
||||||
|
libassert.HTTPServiceEchoesWithClient(t, client, addr, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPServiceEchoesResHeader verifies that a post to the given ip/port combination
|
||||||
|
// returns the data in the response body with expected response headers.
|
||||||
|
// Optional path can be provided to differentiate requests.
|
||||||
|
//
|
||||||
|
// Exposes libassert.HTTPServiceEchoes for use against a Sprawl.
|
||||||
|
//
|
||||||
|
// NOTE: this takes a port b/c you may want to reach this via your choice of upstream.
|
||||||
|
func (a *asserter) HTTPServiceEchoesResHeader(
|
||||||
|
t *testing.T,
|
||||||
|
service *topology.Service,
|
||||||
|
port int,
|
||||||
|
path string,
|
||||||
|
expectedResHeader map[string]string,
|
||||||
|
) {
|
||||||
|
t.Helper()
|
||||||
|
require.True(t, port > 0)
|
||||||
|
|
||||||
|
node := service.Node
|
||||||
|
ip := node.LocalAddress()
|
||||||
|
addr := fmt.Sprintf("%s:%d", ip, port)
|
||||||
|
|
||||||
|
client := a.mustGetHTTPClient(t, node.Cluster)
|
||||||
|
libassert.HTTPServiceEchoesResHeaderWithClient(t, client, addr, path, expectedResHeader)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *asserter) HTTPStatus(
|
||||||
|
t *testing.T,
|
||||||
|
service *topology.Service,
|
||||||
|
port int,
|
||||||
|
status int,
|
||||||
|
) {
|
||||||
|
t.Helper()
|
||||||
|
require.True(t, port > 0)
|
||||||
|
|
||||||
|
node := service.Node
|
||||||
|
ip := node.LocalAddress()
|
||||||
|
addr := fmt.Sprintf("%s:%d", ip, port)
|
||||||
|
|
||||||
|
client := a.mustGetHTTPClient(t, node.Cluster)
|
||||||
|
|
||||||
|
url := "http://" + addr
|
||||||
|
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: 30 * time.Second, Wait: 500 * time.Millisecond}, t, func(r *retry.R) {
|
||||||
|
resp, err := client.Get(url)
|
||||||
|
if err != nil {
|
||||||
|
r.Fatalf("could not make request to %q: %v", url, err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode != status {
|
||||||
|
r.Fatalf("expected status %d, got %d", status, resp.StatusCode)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// asserts that the service sid in cluster and exported by peer localPeerName is passing health checks,
|
||||||
|
func (a *asserter) HealthyWithPeer(t *testing.T, cluster string, sid topology.ServiceID, peerName string) {
|
||||||
|
t.Helper()
|
||||||
|
cl := a.mustGetAPIClient(t, cluster)
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: time.Minute * 1, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
|
||||||
|
svcs, _, err := cl.Health().Service(
|
||||||
|
sid.Name,
|
||||||
|
"",
|
||||||
|
true,
|
||||||
|
utils.CompatQueryOpts(&api.QueryOptions{
|
||||||
|
Partition: sid.Partition,
|
||||||
|
Namespace: sid.Namespace,
|
||||||
|
Peer: peerName,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
require.NoError(r, err)
|
||||||
|
assert.GreaterOrEqual(r, len(svcs), 1)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *asserter) UpstreamEndpointHealthy(t *testing.T, svc *topology.Service, upstream *topology.Upstream) {
|
||||||
|
t.Helper()
|
||||||
|
node := svc.Node
|
||||||
|
ip := node.LocalAddress()
|
||||||
|
port := svc.EnvoyAdminPort
|
||||||
|
addr := fmt.Sprintf("%s:%d", ip, port)
|
||||||
|
|
||||||
|
client := a.mustGetHTTPClient(t, node.Cluster)
|
||||||
|
libassert.AssertUpstreamEndpointStatusWithClient(t,
|
||||||
|
client,
|
||||||
|
addr,
|
||||||
|
// TODO: what is default? namespace? partition?
|
||||||
|
fmt.Sprintf("%s.default.%s.external", upstream.ID.Name, upstream.Peer),
|
||||||
|
"HEALTHY",
|
||||||
|
1,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// does a fortio /fetch2 to the given fortio service, targetting the given upstream. Returns
|
||||||
|
// the body, and response with response.Body already Closed.
|
||||||
|
//
|
||||||
|
// We treat 400, 503, and 504s as retryable errors
|
||||||
|
func (a *asserter) fortioFetch2Upstream(t *testing.T, fortioSvc *topology.Service, upstream *topology.Upstream, path string) (body []byte, res *http.Response) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
// TODO: fortioSvc.ID.Normalize()? or should that be up to the caller?
|
||||||
|
|
||||||
|
node := fortioSvc.Node
|
||||||
|
client := a.mustGetHTTPClient(t, node.Cluster)
|
||||||
|
urlbase := fmt.Sprintf("%s:%d", node.LocalAddress(), fortioSvc.Port)
|
||||||
|
|
||||||
|
url := fmt.Sprintf("http://%s/fortio/fetch2?url=%s", urlbase,
|
||||||
|
url.QueryEscape(fmt.Sprintf("http://localhost:%d/%s", upstream.LocalPort, path)),
|
||||||
|
)
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodPost, url, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
|
||||||
|
res, err = client.Do(req)
|
||||||
|
require.NoError(r, err)
|
||||||
|
defer res.Body.Close()
|
||||||
|
// not sure when these happen, suspect it's when the mesh gateway in the peer is not yet ready
|
||||||
|
require.NotEqual(r, http.StatusServiceUnavailable, res.StatusCode)
|
||||||
|
require.NotEqual(r, http.StatusGatewayTimeout, res.StatusCode)
|
||||||
|
// not sure when this happens, suspect it's when envoy hasn't configured the local upstream yet
|
||||||
|
require.NotEqual(r, http.StatusBadRequest, res.StatusCode)
|
||||||
|
body, err = io.ReadAll(res.Body)
|
||||||
|
require.NoError(r, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
return body, res
|
||||||
|
}
|
||||||
|
|
||||||
|
// uses the /fortio/fetch2 endpoint to do a header echo check against an
|
||||||
|
// upstream fortio
|
||||||
|
func (a *asserter) FortioFetch2HeaderEcho(t *testing.T, fortioSvc *topology.Service, upstream *topology.Upstream) {
|
||||||
|
const kPassphrase = "x-passphrase"
|
||||||
|
const passphrase = "hello"
|
||||||
|
path := (fmt.Sprintf("/?header=%s:%s", kPassphrase, passphrase))
|
||||||
|
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
|
||||||
|
_, res := a.fortioFetch2Upstream(t, fortioSvc, upstream, path)
|
||||||
|
require.Equal(t, http.StatusOK, res.StatusCode)
|
||||||
|
v := res.Header.Get(kPassphrase)
|
||||||
|
require.Equal(t, passphrase, v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// similar to libassert.AssertFortioName,
|
||||||
|
// uses the /fortio/fetch2 endpoint to hit the debug endpoint on the upstream,
|
||||||
|
// and assert that the FORTIO_NAME == name
|
||||||
|
func (a *asserter) FortioFetch2FortioName(t *testing.T, fortioSvc *topology.Service, upstream *topology.Upstream, clusterName string, sid topology.ServiceID) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var fortioNameRE = regexp.MustCompile(("\nFORTIO_NAME=(.+)\n"))
|
||||||
|
path := "/debug?env=dump"
|
||||||
|
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
|
||||||
|
body, res := a.fortioFetch2Upstream(t, fortioSvc, upstream, path)
|
||||||
|
require.Equal(t, http.StatusOK, res.StatusCode)
|
||||||
|
|
||||||
|
// TODO: not sure we should retry these?
|
||||||
|
m := fortioNameRE.FindStringSubmatch(string(body))
|
||||||
|
require.GreaterOrEqual(r, len(m), 2)
|
||||||
|
// TODO: dedupe from NewFortioService
|
||||||
|
require.Equal(r, fmt.Sprintf("%s::%s", clusterName, sid.String()), m[1])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// CatalogServiceExists is the same as libassert.CatalogServiceExists, except that it uses
|
||||||
|
// a proxied API client
|
||||||
|
func (a *asserter) CatalogServiceExists(t *testing.T, cluster string, svc string, opts *api.QueryOptions) {
|
||||||
|
t.Helper()
|
||||||
|
cl := a.mustGetAPIClient(t, cluster)
|
||||||
|
libassert.CatalogServiceExists(t, cl, svc, opts)
|
||||||
|
}
|
|
@ -0,0 +1,610 @@
|
||||||
|
package peering
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
"text/tabwriter"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testing/deployer/sprawl"
|
||||||
|
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
|
||||||
|
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// commonTopo helps create a shareable topology configured to represent
|
||||||
|
// the common denominator between tests.
|
||||||
|
//
|
||||||
|
// Use NewCommonTopo to create.
|
||||||
|
//
|
||||||
|
// Compatible suites should implement sharedTopoSuite.
|
||||||
|
//
|
||||||
|
// Style:
|
||||||
|
// - avoid referencing components using strings, prefer IDs like Service ID, etc.
|
||||||
|
// - avoid passing addresses and ports, etc. Instead, look up components in sprawl.Topology
|
||||||
|
// by ID to find a concrete type, then pass that to helper functions that know which port to use
|
||||||
|
// - minimize the surface area of information passed between setup and test code (via members)
|
||||||
|
// to those that are strictly necessary
|
||||||
|
type commonTopo struct {
|
||||||
|
//
|
||||||
|
Cfg *topology.Config
|
||||||
|
// shortcuts to corresponding entry in Cfg
|
||||||
|
DC1 *topology.Cluster
|
||||||
|
DC2 *topology.Cluster
|
||||||
|
DC3 *topology.Cluster
|
||||||
|
|
||||||
|
// set after Launch. Should be considered read-only
|
||||||
|
Sprawl *sprawl.Sprawl
|
||||||
|
Assert *asserter
|
||||||
|
|
||||||
|
// track per-DC services to prevent duplicates
|
||||||
|
services map[string]map[topology.ServiceID]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCommonTopo(t *testing.T) *commonTopo {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
ct := commonTopo{}
|
||||||
|
|
||||||
|
// Make 3-server clusters in dc1 and dc2
|
||||||
|
// For simplicity, the Name and Datacenter of the clusters are the same.
|
||||||
|
// dc1 and dc2 should be symmetric.
|
||||||
|
dc1 := clusterWithJustServers("dc1", 3)
|
||||||
|
ct.DC1 = dc1
|
||||||
|
dc2 := clusterWithJustServers("dc2", 3)
|
||||||
|
ct.DC2 = dc2
|
||||||
|
// dc3 is a failover cluster for both dc1 and dc2
|
||||||
|
dc3 := clusterWithJustServers("dc3", 1)
|
||||||
|
// dc3 is only used for certain failover scenarios and does not need tenancies
|
||||||
|
dc3.Partitions = []*topology.Partition{{Name: "default"}}
|
||||||
|
ct.DC3 = dc3
|
||||||
|
|
||||||
|
injectTenancies(dc1)
|
||||||
|
injectTenancies(dc2)
|
||||||
|
// dc3 is only used for certain failover scenarios and does not need tenancies
|
||||||
|
dc3.Partitions = []*topology.Partition{{Name: "default"}}
|
||||||
|
|
||||||
|
ct.services = map[string]map[topology.ServiceID]struct{}{}
|
||||||
|
for _, dc := range []*topology.Cluster{dc1, dc2, dc3} {
|
||||||
|
ct.services[dc.Datacenter] = map[topology.ServiceID]struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
peerings := addPeerings(dc1, dc2)
|
||||||
|
peerings = append(peerings, addPeerings(dc1, dc3)...)
|
||||||
|
peerings = append(peerings, addPeerings(dc2, dc3)...)
|
||||||
|
|
||||||
|
addMeshGateways(dc1, topology.NodeKindClient)
|
||||||
|
addMeshGateways(dc2, topology.NodeKindClient)
|
||||||
|
addMeshGateways(dc3, topology.NodeKindClient)
|
||||||
|
// TODO: consul-topology doesn't support this yet
|
||||||
|
// addMeshGateways(dc2, topology.NodeKindDataplane)
|
||||||
|
|
||||||
|
setupGlobals(dc1)
|
||||||
|
setupGlobals(dc2)
|
||||||
|
setupGlobals(dc3)
|
||||||
|
|
||||||
|
// Build final configuration
|
||||||
|
ct.Cfg = &topology.Config{
|
||||||
|
Images: utils.TargetImages(),
|
||||||
|
Networks: []*topology.Network{
|
||||||
|
{Name: dc1.Datacenter}, // "dc1" LAN
|
||||||
|
{Name: dc2.Datacenter}, // "dc2" LAN
|
||||||
|
{Name: dc3.Datacenter}, // "dc3" LAN
|
||||||
|
{Name: "wan", Type: "wan"},
|
||||||
|
},
|
||||||
|
Clusters: []*topology.Cluster{
|
||||||
|
dc1,
|
||||||
|
dc2,
|
||||||
|
dc3,
|
||||||
|
},
|
||||||
|
Peerings: peerings,
|
||||||
|
}
|
||||||
|
return &ct
|
||||||
|
}
|
||||||
|
|
||||||
|
// calls sprawltest.Launch followed by s.postLaunchChecks
|
||||||
|
func (ct *commonTopo) Launch(t *testing.T) {
|
||||||
|
if ct.Sprawl != nil {
|
||||||
|
t.Fatalf("Launch must only be called once")
|
||||||
|
}
|
||||||
|
ct.Sprawl = sprawltest.Launch(t, ct.Cfg)
|
||||||
|
|
||||||
|
ct.Assert = newAsserter(ct.Sprawl)
|
||||||
|
ct.postLaunchChecks(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// tests that use Relaunch might want to call this again afterwards
|
||||||
|
func (ct *commonTopo) postLaunchChecks(t *testing.T) {
|
||||||
|
t.Logf("TESTING RELATIONSHIPS: \n%s",
|
||||||
|
renderRelationships(computeRelationships(ct.Sprawl.Topology())),
|
||||||
|
)
|
||||||
|
|
||||||
|
// check that exports line up as expected
|
||||||
|
for _, clu := range ct.Sprawl.Config().Clusters {
|
||||||
|
// expected exports per peer
|
||||||
|
type key struct {
|
||||||
|
peer string
|
||||||
|
partition string
|
||||||
|
namespace string
|
||||||
|
}
|
||||||
|
eepp := map[key]int{}
|
||||||
|
for _, e := range clu.InitialConfigEntries {
|
||||||
|
if e.GetKind() == api.ExportedServices {
|
||||||
|
asExport := e.(*api.ExportedServicesConfigEntry)
|
||||||
|
// do we care about the partition?
|
||||||
|
for _, svc := range asExport.Services {
|
||||||
|
for _, con := range svc.Consumers {
|
||||||
|
// do we care about con.Partition?
|
||||||
|
// TODO: surely there is code to normalize this
|
||||||
|
partition := asExport.Partition
|
||||||
|
if partition == "" {
|
||||||
|
partition = "default"
|
||||||
|
}
|
||||||
|
namespace := svc.Namespace
|
||||||
|
if namespace == "" {
|
||||||
|
namespace = "default"
|
||||||
|
}
|
||||||
|
eepp[key{peer: con.Peer, partition: partition, namespace: namespace}] += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cl := ct.APIClientForCluster(t, clu)
|
||||||
|
// TODO: these could probably be done in parallel
|
||||||
|
for k, v := range eepp {
|
||||||
|
retry.RunWith(&retry.Timer{Timeout: 30 * time.Second, Wait: 500 * time.Millisecond}, t, func(r *retry.R) {
|
||||||
|
peering, _, err := cl.Peerings().Read(context.Background(), k.peer, utils.CompatQueryOpts(&api.QueryOptions{
|
||||||
|
Partition: k.partition,
|
||||||
|
Namespace: k.namespace,
|
||||||
|
}))
|
||||||
|
require.Nil(r, err, "reading peering data")
|
||||||
|
require.NotNilf(r, peering, "peering not found %q", k.peer)
|
||||||
|
assert.Len(r, peering.StreamStatus.ExportedServices, v, "peering exported services")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.Failed() {
|
||||||
|
t.Fatal("failing fast: post-Launch assertions failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PeerName is how you'd address a remote dc+partition locally
|
||||||
|
// as your peer name.
|
||||||
|
func LocalPeerName(clu *topology.Cluster, partition string) string {
|
||||||
|
return fmt.Sprintf("peer-%s-%s", clu.Datacenter, partition)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: move these to topology
|
||||||
|
// TODO: alternatively, delete it: we only use it in one place, to bundle up args
|
||||||
|
type serviceExt struct {
|
||||||
|
*topology.Service
|
||||||
|
|
||||||
|
// default NodeKindClient
|
||||||
|
NodeKind topology.NodeKind
|
||||||
|
|
||||||
|
Exports []api.ServiceConsumer
|
||||||
|
Config *api.ServiceConfigEntry
|
||||||
|
Intentions *api.ServiceIntentionsConfigEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ct *commonTopo) AddServiceNode(clu *topology.Cluster, svc serviceExt) *topology.Node {
|
||||||
|
clusterName := clu.Name
|
||||||
|
if _, ok := ct.services[clusterName][svc.ID]; ok {
|
||||||
|
panic(fmt.Sprintf("duplicate service %q in cluster %q", svc.ID, clusterName))
|
||||||
|
}
|
||||||
|
ct.services[clusterName][svc.ID] = struct{}{}
|
||||||
|
|
||||||
|
// TODO: inline
|
||||||
|
serviceHostnameString := func(dc string, id topology.ServiceID) string {
|
||||||
|
n := id.Name
|
||||||
|
// prepend <namespace>- and <partition>- if they are not default/empty
|
||||||
|
// avoids hostname limit of 63 chars in most cases
|
||||||
|
// TODO: this obviously isn't scalable
|
||||||
|
if id.Namespace != "default" && id.Namespace != "" {
|
||||||
|
n = id.Namespace + "-" + n
|
||||||
|
}
|
||||||
|
if id.Partition != "default" && id.Partition != "" {
|
||||||
|
n = id.Partition + "-" + n
|
||||||
|
}
|
||||||
|
n = dc + "-" + n
|
||||||
|
// TODO: experimentally, when this is larger than 63, docker can't start
|
||||||
|
// the host. confirmed by internet rumor https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27763
|
||||||
|
if len(n) > 63 {
|
||||||
|
panic(fmt.Sprintf("docker hostname must not be longer than 63 chars: %q", n))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
node := &topology.Node{
|
||||||
|
Kind: topology.NodeKindClient,
|
||||||
|
Name: serviceHostnameString(clu.Datacenter, svc.ID),
|
||||||
|
Partition: svc.ID.Partition,
|
||||||
|
Addresses: []*topology.Address{
|
||||||
|
{Network: clu.Datacenter},
|
||||||
|
},
|
||||||
|
Services: []*topology.Service{
|
||||||
|
svc.Service,
|
||||||
|
},
|
||||||
|
Cluster: clusterName,
|
||||||
|
}
|
||||||
|
if svc.NodeKind != "" {
|
||||||
|
node.Kind = svc.NodeKind
|
||||||
|
}
|
||||||
|
clu.Nodes = append(clu.Nodes, node)
|
||||||
|
|
||||||
|
// Export if necessary
|
||||||
|
if len(svc.Exports) > 0 {
|
||||||
|
ct.ExportService(clu, svc.ID.Partition, api.ExportedService{
|
||||||
|
Name: svc.ID.Name,
|
||||||
|
Namespace: svc.ID.Namespace,
|
||||||
|
Consumers: svc.Exports,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add any config entries
|
||||||
|
if svc.Config != nil {
|
||||||
|
clu.InitialConfigEntries = append(clu.InitialConfigEntries, svc.Config)
|
||||||
|
}
|
||||||
|
if svc.Intentions != nil {
|
||||||
|
clu.InitialConfigEntries = append(clu.InitialConfigEntries, svc.Intentions)
|
||||||
|
}
|
||||||
|
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ct *commonTopo) APIClientForCluster(t *testing.T, clu *topology.Cluster) *api.Client {
|
||||||
|
cl, err := ct.Sprawl.APIClientForNode(clu.Name, clu.FirstClient().ID(), "")
|
||||||
|
require.NoError(t, err)
|
||||||
|
return cl
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportService looks for an existing ExportedServicesConfigEntry for the given partition
|
||||||
|
// and inserts svcs. If none is found, it inserts a new ExportedServicesConfigEntry.
|
||||||
|
func (ct *commonTopo) ExportService(clu *topology.Cluster, partition string, svcs ...api.ExportedService) {
|
||||||
|
var found bool
|
||||||
|
for _, ce := range clu.InitialConfigEntries {
|
||||||
|
// We check Name because it must be "default" in OSS whereas Partition will be "".
|
||||||
|
if ce.GetKind() == api.ExportedServices && ce.GetName() == partition {
|
||||||
|
found = true
|
||||||
|
e := ce.(*api.ExportedServicesConfigEntry)
|
||||||
|
e.Services = append(e.Services, svcs...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
clu.InitialConfigEntries = append(clu.InitialConfigEntries,
|
||||||
|
&api.ExportedServicesConfigEntry{
|
||||||
|
Name: partition, // this NEEDs to be "default" in OSS
|
||||||
|
Partition: ConfigEntryPartition(partition),
|
||||||
|
Services: svcs,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ct *commonTopo) ClusterByDatacenter(t *testing.T, name string) *topology.Cluster {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
for _, clu := range ct.Cfg.Clusters {
|
||||||
|
if clu.Datacenter == name {
|
||||||
|
return clu
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Fatalf("cluster %q not found", name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since OSS config entries do not contain the partition field,
|
||||||
|
// this func converts default partition to empty string.
|
||||||
|
func ConfigEntryPartition(p string) string {
|
||||||
|
if p == "default" {
|
||||||
|
return "" // make this OSS friendly
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// disableNode is a no-op if the node is already disabled.
|
||||||
|
func DisableNode(t *testing.T, cfg *topology.Config, clusterName string, nid topology.NodeID) *topology.Config {
|
||||||
|
nodes := cfg.Cluster(clusterName).Nodes
|
||||||
|
var found bool
|
||||||
|
for _, n := range nodes {
|
||||||
|
if n.ID() == nid {
|
||||||
|
found = true
|
||||||
|
if n.Disabled {
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
t.Logf("disabling node %s in cluster %s", nid.String(), clusterName)
|
||||||
|
n.Disabled = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.True(t, found, "expected to find nodeID %q in cluster %q", nid.String(), clusterName)
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
// enableNode is a no-op if the node is already enabled.
|
||||||
|
func EnableNode(t *testing.T, cfg *topology.Config, clusterName string, nid topology.NodeID) *topology.Config {
|
||||||
|
nodes := cfg.Cluster(clusterName).Nodes
|
||||||
|
var found bool
|
||||||
|
for _, n := range nodes {
|
||||||
|
if n.ID() == nid {
|
||||||
|
found = true
|
||||||
|
if !n.Disabled {
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
t.Logf("enabling node %s in cluster %s", nid.String(), clusterName)
|
||||||
|
n.Disabled = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.True(t, found, "expected to find nodeID %q in cluster %q", nid.String(), clusterName)
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupGlobals(clu *topology.Cluster) {
|
||||||
|
for _, part := range clu.Partitions {
|
||||||
|
clu.InitialConfigEntries = append(clu.InitialConfigEntries,
|
||||||
|
&api.ProxyConfigEntry{
|
||||||
|
Name: api.ProxyConfigGlobal,
|
||||||
|
Kind: api.ProxyDefaults,
|
||||||
|
Partition: ConfigEntryPartition(part.Name),
|
||||||
|
MeshGateway: api.MeshGatewayConfig{
|
||||||
|
// Although we define service-defaults for most upstreams in
|
||||||
|
// this test suite, failover tests require a global mode
|
||||||
|
// because the default for peered targets is MeshGatewayModeRemote.
|
||||||
|
Mode: api.MeshGatewayModeLocal,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addMeshGateways adds a mesh gateway for every partition in the cluster.
|
||||||
|
// Assumes that the LAN network name is equal to datacenter name.
|
||||||
|
func addMeshGateways(c *topology.Cluster, kind topology.NodeKind) {
|
||||||
|
for _, p := range c.Partitions {
|
||||||
|
c.Nodes = topology.MergeSlices(c.Nodes, newTopologyMeshGatewaySet(
|
||||||
|
kind,
|
||||||
|
p.Name,
|
||||||
|
fmt.Sprintf("%s-%s-mgw", c.Name, p.Name),
|
||||||
|
1,
|
||||||
|
[]string{c.Datacenter, "wan"},
|
||||||
|
nil,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func clusterWithJustServers(name string, numServers int) *topology.Cluster {
|
||||||
|
return &topology.Cluster{
|
||||||
|
Enterprise: utils.IsEnterprise(),
|
||||||
|
Name: name,
|
||||||
|
Datacenter: name,
|
||||||
|
Nodes: newTopologyServerSet(
|
||||||
|
name+"-server",
|
||||||
|
numServers,
|
||||||
|
[]string{name, "wan"},
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func addPeerings(acc *topology.Cluster, dial *topology.Cluster) []*topology.Peering {
|
||||||
|
peerings := []*topology.Peering{}
|
||||||
|
for _, accPart := range acc.Partitions {
|
||||||
|
for _, dialPart := range dial.Partitions {
|
||||||
|
peerings = append(peerings, &topology.Peering{
|
||||||
|
Accepting: topology.PeerCluster{
|
||||||
|
Name: acc.Datacenter,
|
||||||
|
Partition: accPart.Name,
|
||||||
|
PeerName: LocalPeerName(dial, dialPart.Name),
|
||||||
|
},
|
||||||
|
Dialing: topology.PeerCluster{
|
||||||
|
Name: dial.Datacenter,
|
||||||
|
Partition: dialPart.Name,
|
||||||
|
PeerName: LocalPeerName(acc, accPart.Name),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return peerings
|
||||||
|
}
|
||||||
|
|
||||||
|
func injectTenancies(clu *topology.Cluster) {
|
||||||
|
if !utils.IsEnterprise() {
|
||||||
|
clu.Partitions = []*topology.Partition{
|
||||||
|
{
|
||||||
|
Name: "default",
|
||||||
|
Namespaces: []string{
|
||||||
|
"default",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, part := range []string{"default", "part1"} {
|
||||||
|
clu.Partitions = append(clu.Partitions,
|
||||||
|
&topology.Partition{
|
||||||
|
Name: part,
|
||||||
|
Namespaces: []string{
|
||||||
|
"default",
|
||||||
|
"ns1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTopologyServerSet(
|
||||||
|
namePrefix string,
|
||||||
|
num int,
|
||||||
|
networks []string,
|
||||||
|
mutateFn func(i int, node *topology.Node),
|
||||||
|
) []*topology.Node {
|
||||||
|
var out []*topology.Node
|
||||||
|
for i := 1; i <= num; i++ {
|
||||||
|
name := namePrefix + strconv.Itoa(i)
|
||||||
|
|
||||||
|
node := &topology.Node{
|
||||||
|
Kind: topology.NodeKindServer,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
for _, net := range networks {
|
||||||
|
node.Addresses = append(node.Addresses, &topology.Address{Network: net})
|
||||||
|
}
|
||||||
|
|
||||||
|
if mutateFn != nil {
|
||||||
|
mutateFn(i, node)
|
||||||
|
}
|
||||||
|
|
||||||
|
out = append(out, node)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTopologyMeshGatewaySet(
|
||||||
|
nodeKind topology.NodeKind,
|
||||||
|
partition string,
|
||||||
|
namePrefix string,
|
||||||
|
num int,
|
||||||
|
networks []string,
|
||||||
|
mutateFn func(i int, node *topology.Node),
|
||||||
|
) []*topology.Node {
|
||||||
|
var out []*topology.Node
|
||||||
|
for i := 1; i <= num; i++ {
|
||||||
|
name := namePrefix + strconv.Itoa(i)
|
||||||
|
|
||||||
|
node := &topology.Node{
|
||||||
|
Kind: nodeKind,
|
||||||
|
Partition: partition,
|
||||||
|
Name: name,
|
||||||
|
Services: []*topology.Service{{
|
||||||
|
ID: topology.ServiceID{Name: "mesh-gateway"},
|
||||||
|
Port: 8443,
|
||||||
|
EnvoyAdminPort: 19000,
|
||||||
|
IsMeshGateway: true,
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
for _, net := range networks {
|
||||||
|
node.Addresses = append(node.Addresses, &topology.Address{Network: net})
|
||||||
|
}
|
||||||
|
|
||||||
|
if mutateFn != nil {
|
||||||
|
mutateFn(i, node)
|
||||||
|
}
|
||||||
|
|
||||||
|
out = append(out, node)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
const HashicorpDockerProxy = "docker.mirror.hashicorp.services"
|
||||||
|
|
||||||
|
func NewFortioServiceWithDefaults(
|
||||||
|
cluster string,
|
||||||
|
sid topology.ServiceID,
|
||||||
|
mut func(s *topology.Service),
|
||||||
|
) *topology.Service {
|
||||||
|
const (
|
||||||
|
httpPort = 8080
|
||||||
|
grpcPort = 8079
|
||||||
|
adminPort = 19000
|
||||||
|
)
|
||||||
|
sid.Normalize()
|
||||||
|
|
||||||
|
svc := &topology.Service{
|
||||||
|
ID: sid,
|
||||||
|
Image: HashicorpDockerProxy + "/fortio/fortio",
|
||||||
|
Port: httpPort,
|
||||||
|
EnvoyAdminPort: adminPort,
|
||||||
|
CheckTCP: "127.0.0.1:" + strconv.Itoa(httpPort),
|
||||||
|
Env: []string{
|
||||||
|
"FORTIO_NAME=" + cluster + "::" + sid.String(),
|
||||||
|
},
|
||||||
|
Command: []string{
|
||||||
|
"server",
|
||||||
|
"-http-port", strconv.Itoa(httpPort),
|
||||||
|
"-grpc-port", strconv.Itoa(grpcPort),
|
||||||
|
"-redirect-port", "-disabled",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if mut != nil {
|
||||||
|
mut(svc)
|
||||||
|
}
|
||||||
|
return svc
|
||||||
|
}
|
||||||
|
|
||||||
|
// computeRelationships will analyze a full topology and generate all of the
|
||||||
|
// downstream/upstream information for all of them.
|
||||||
|
func computeRelationships(topo *topology.Topology) []Relationship {
|
||||||
|
var out []Relationship
|
||||||
|
for _, cluster := range topo.Clusters {
|
||||||
|
for _, n := range cluster.Nodes {
|
||||||
|
for _, s := range n.Services {
|
||||||
|
for _, u := range s.Upstreams {
|
||||||
|
out = append(out, Relationship{
|
||||||
|
Caller: s,
|
||||||
|
Upstream: u,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// renderRelationships will take the output of ComputeRelationships and display
|
||||||
|
// it in tabular form.
|
||||||
|
func renderRelationships(ships []Relationship) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
w := tabwriter.NewWriter(&buf, 0, 0, 3, ' ', tabwriter.Debug)
|
||||||
|
fmt.Fprintf(w, "DOWN\tnode\tservice\tport\tUP\tservice\t\n")
|
||||||
|
for _, r := range ships {
|
||||||
|
fmt.Fprintf(w,
|
||||||
|
"%s\t%s\t%s\t%d\t%s\t%s\t\n",
|
||||||
|
r.downCluster(),
|
||||||
|
r.Caller.Node.ID().String(),
|
||||||
|
r.Caller.ID.String(),
|
||||||
|
r.Upstream.LocalPort,
|
||||||
|
r.upCluster(),
|
||||||
|
r.Upstream.ID.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "\t\t\t\t\t\t\n")
|
||||||
|
|
||||||
|
w.Flush()
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
type Relationship struct {
|
||||||
|
Caller *topology.Service
|
||||||
|
Upstream *topology.Upstream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r Relationship) String() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"%s on %s in %s via :%d => %s in %s",
|
||||||
|
r.Caller.ID.String(),
|
||||||
|
r.Caller.Node.ID().String(),
|
||||||
|
r.downCluster(),
|
||||||
|
r.Upstream.LocalPort,
|
||||||
|
r.Upstream.ID.String(),
|
||||||
|
r.upCluster(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r Relationship) downCluster() string {
|
||||||
|
return r.Caller.Node.Cluster
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r Relationship) upCluster() string {
|
||||||
|
return r.Upstream.Cluster
|
||||||
|
}
|
|
@ -0,0 +1,82 @@
|
||||||
|
package peering
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tests that use commonTopo should implement sharedTopoSuite.
|
||||||
|
//
|
||||||
|
// Tests that use commonTopo are either cooperative or non-cooperative. Non-cooperative
|
||||||
|
// uses of commonTopo include is anything that may interfere with other tests, namely
|
||||||
|
// mutations, such as:
|
||||||
|
// - any calls to commonTopo.Relaunch; this is generally disruptive to other tests
|
||||||
|
// - stopping or disabling nodes
|
||||||
|
// - ...
|
||||||
|
//
|
||||||
|
// Cooperative tests should just call testFuncMayReuseCommonTopo() to ensure they
|
||||||
|
// are run in the correct `sharetopo` mode. They should also ensure they are included
|
||||||
|
// in the commonTopoSuites slice in TestSuitesOnSharedTopo.
|
||||||
|
type sharedTopoSuite interface {
|
||||||
|
testName() string
|
||||||
|
setup(*testing.T, *commonTopo)
|
||||||
|
test(*testing.T, *commonTopo)
|
||||||
|
}
|
||||||
|
|
||||||
|
var flagNoShareTopo = flag.Bool("no-share-topo", false, "do not share topology; run each test in its own isolated topology")
|
||||||
|
|
||||||
|
func runShareableSuites(t *testing.T, suites []sharedTopoSuite) {
|
||||||
|
t.Helper()
|
||||||
|
if !*flagNoShareTopo {
|
||||||
|
names := []string{}
|
||||||
|
for _, s := range suites {
|
||||||
|
names = append(names, s.testName())
|
||||||
|
}
|
||||||
|
t.Skipf(`Will run as part of "TestSuitesOnSharedTopo": %v`, names)
|
||||||
|
}
|
||||||
|
ct := NewCommonTopo(t)
|
||||||
|
for _, s := range suites {
|
||||||
|
s.setup(t, ct)
|
||||||
|
}
|
||||||
|
ct.Launch(t)
|
||||||
|
for _, s := range suites {
|
||||||
|
s := s
|
||||||
|
t.Run(s.testName(), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s.test(t, ct)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that can share topo must implement sharedTopoSuite and be appended to the sharedTopoSuites
|
||||||
|
// slice inside
|
||||||
|
func TestSuitesOnSharedTopo(t *testing.T) {
|
||||||
|
if *flagNoShareTopo {
|
||||||
|
t.Skip(`shared topo suites disabled by -no-share-topo`)
|
||||||
|
}
|
||||||
|
ct := NewCommonTopo(t)
|
||||||
|
|
||||||
|
sharedTopoSuites := []sharedTopoSuite{}
|
||||||
|
sharedTopoSuites = append(sharedTopoSuites, ac1BasicSuites...)
|
||||||
|
sharedTopoSuites = append(sharedTopoSuites, ac2DiscoChainSuites...)
|
||||||
|
sharedTopoSuites = append(sharedTopoSuites, ac3SvcDefaultsSuites...)
|
||||||
|
sharedTopoSuites = append(sharedTopoSuites, ac4ProxyDefaultsSuites...)
|
||||||
|
sharedTopoSuites = append(sharedTopoSuites, ac5_1NoSvcMeshSuites...)
|
||||||
|
|
||||||
|
for _, s := range sharedTopoSuites {
|
||||||
|
s.setup(t, ct)
|
||||||
|
}
|
||||||
|
ct.Launch(t)
|
||||||
|
for _, s := range sharedTopoSuites {
|
||||||
|
s := s
|
||||||
|
t.Run(s.testName(), func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
s.test(t, ct)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCommonTopologySetup(t *testing.T) {
|
||||||
|
ct := NewCommonTopo(t)
|
||||||
|
ct.Launch(t)
|
||||||
|
}
|
|
@ -13,6 +13,7 @@ require (
|
||||||
github.com/hashicorp/consul/envoyextensions v0.3.0-rc1
|
github.com/hashicorp/consul/envoyextensions v0.3.0-rc1
|
||||||
github.com/hashicorp/consul/proto-public v0.4.0-rc1
|
github.com/hashicorp/consul/proto-public v0.4.0-rc1
|
||||||
github.com/hashicorp/consul/sdk v0.14.0-rc1
|
github.com/hashicorp/consul/sdk v0.14.0-rc1
|
||||||
|
github.com/hashicorp/consul/testing/deployer v0.0.0-00010101000000-000000000000
|
||||||
github.com/hashicorp/go-cleanhttp v0.5.2
|
github.com/hashicorp/go-cleanhttp v0.5.2
|
||||||
github.com/hashicorp/go-multierror v1.1.1
|
github.com/hashicorp/go-multierror v1.1.1
|
||||||
github.com/hashicorp/go-uuid v1.0.3
|
github.com/hashicorp/go-uuid v1.0.3
|
||||||
|
@ -84,7 +85,7 @@ require (
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
||||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||||
github.com/stretchr/objx v0.5.0 // indirect
|
github.com/stretchr/objx v0.5.0 // indirect
|
||||||
golang.org/x/crypto v0.1.0 // indirect
|
golang.org/x/crypto v0.7.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
|
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
|
||||||
golang.org/x/net v0.10.0 // indirect
|
golang.org/x/net v0.10.0 // indirect
|
||||||
golang.org/x/sync v0.2.0 // indirect
|
golang.org/x/sync v0.2.0 // indirect
|
||||||
|
@ -102,4 +103,5 @@ replace (
|
||||||
github.com/hashicorp/consul/envoyextensions => ../../../envoyextensions
|
github.com/hashicorp/consul/envoyextensions => ../../../envoyextensions
|
||||||
github.com/hashicorp/consul/proto-public => ../../../proto-public
|
github.com/hashicorp/consul/proto-public => ../../../proto-public
|
||||||
github.com/hashicorp/consul/sdk => ../../../sdk
|
github.com/hashicorp/consul/sdk => ../../../sdk
|
||||||
|
github.com/hashicorp/consul/testing/deployer => ../../../testing/deployer
|
||||||
)
|
)
|
||||||
|
|
|
@ -311,8 +311,8 @@ golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8U
|
||||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
|
||||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
|
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
|
||||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||||
|
|
|
@ -14,11 +14,12 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
|
||||||
"github.com/hashicorp/go-cleanhttp"
|
"github.com/hashicorp/go-cleanhttp"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
|
|
||||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||||
)
|
)
|
||||||
|
@ -113,7 +114,7 @@ func AssertUpstreamEndpointStatusWithClient(
|
||||||
| length`,
|
| length`,
|
||||||
clusterName, healthStatus)
|
clusterName, healthStatus)
|
||||||
results, err := utils.JQFilter(clusters, filter)
|
results, err := utils.JQFilter(clusters, filter)
|
||||||
require.NoErrorf(r, err, "could not found cluster name %s in \n%s", clusterName, clusters)
|
require.NoErrorf(r, err, "could not found cluster name %q: %v \n%s", clusterName, err, clusters)
|
||||||
require.Len(r, results, 1) // the final part of the pipeline is "length" which only ever returns 1 result
|
require.Len(r, results, 1) // the final part of the pipeline is "length" which only ever returns 1 result
|
||||||
|
|
||||||
result, err := strconv.Atoi(results[0])
|
result, err := strconv.Atoi(results[0])
|
||||||
|
|
|
@ -10,6 +10,8 @@ import (
|
||||||
|
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PeeringStatus verifies the peering connection is the specified state with a default retry.
|
// PeeringStatus verifies the peering connection is the specified state with a default retry.
|
||||||
|
@ -52,14 +54,8 @@ func PeeringExportsOpts(t *testing.T, client *api.Client, peerName string, expor
|
||||||
|
|
||||||
retry.RunWith(failer(), t, func(r *retry.R) {
|
retry.RunWith(failer(), t, func(r *retry.R) {
|
||||||
peering, _, err := client.Peerings().Read(context.Background(), peerName, opts)
|
peering, _, err := client.Peerings().Read(context.Background(), peerName, opts)
|
||||||
if err != nil {
|
require.Nil(r, err, "reading peering data")
|
||||||
r.Fatal("error reading peering data")
|
require.NotNilf(r, peering, "peering not found %q", peerName)
|
||||||
}
|
assert.Len(r, peering.StreamStatus.ExportedServices, exports, "peering exported services")
|
||||||
if peering == nil {
|
|
||||||
r.Fatal("peering not found")
|
|
||||||
}
|
|
||||||
if exports != len(peering.StreamStatus.ExportedServices) {
|
|
||||||
r.Fatal("peering exported services did not match: got ", len(peering.StreamStatus.ExportedServices), " want ", exports)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,7 +34,7 @@ func CatalogServiceExists(t *testing.T, c *api.Client, svc string, opts *api.Que
|
||||||
r.Fatal("error reading service data")
|
r.Fatal("error reading service data")
|
||||||
}
|
}
|
||||||
if len(services) == 0 {
|
if len(services) == 0 {
|
||||||
r.Fatal("did not find catalog entry for ", svc)
|
r.Fatalf("did not find catalog entry for %q with opts %#v", svc, opts)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,11 +22,11 @@ func DefaultToEmpty(name string) string {
|
||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
|
|
||||||
// PartitionQueryOptions returns an *api.QueryOptions with the given partition
|
// CompatQueryOpts cleans a QueryOptions so that Partition and Namespace fields
|
||||||
// field set only if the partition is non-default. This helps when writing
|
// are compatible with OSS or ENT
|
||||||
// tests for joint use in OSS and ENT.
|
// TODO: not sure why we can't do this server-side
|
||||||
func PartitionQueryOptions(partition string) *api.QueryOptions {
|
func CompatQueryOpts(opts *api.QueryOptions) *api.QueryOptions {
|
||||||
return &api.QueryOptions{
|
opts.Partition = DefaultToEmpty(opts.Partition)
|
||||||
Partition: DefaultToEmpty(partition),
|
opts.Namespace = DefaultToEmpty(opts.Namespace)
|
||||||
}
|
return opts
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"flag"
|
"flag"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||||
"github.com/hashicorp/go-version"
|
"github.com/hashicorp/go-version"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -55,6 +56,20 @@ func GetLatestImageName() string {
|
||||||
return LatestImageName
|
return LatestImageName
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TargetImages() topology.Images {
|
||||||
|
img := DockerImage(targetImageName, TargetVersion)
|
||||||
|
|
||||||
|
if IsEnterprise() {
|
||||||
|
return topology.Images{
|
||||||
|
ConsulEnterprise: img,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return topology.Images{
|
||||||
|
ConsulOSS: img,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func IsEnterprise() bool { return isInEnterpriseRepo }
|
func IsEnterprise() bool { return isInEnterpriseRepo }
|
||||||
|
|
||||||
func DockerImage(image, version string) string {
|
func DockerImage(image, version string) string {
|
||||||
|
|
|
@ -73,8 +73,8 @@ func TestTrafficManagement_ResolverDefaultSubset(t *testing.T) {
|
||||||
assertionFn := func() {
|
assertionFn := func() {
|
||||||
_, serverAdminPortV1 := serverConnectProxyV1.GetAdminAddr()
|
_, serverAdminPortV1 := serverConnectProxyV1.GetAdminAddr()
|
||||||
_, serverAdminPortV2 := serverConnectProxyV2.GetAdminAddr()
|
_, serverAdminPortV2 := serverConnectProxyV2.GetAdminAddr()
|
||||||
_, adminPort := staticClientProxy.GetAdminAddr()
|
_, adminPort := staticClientProxy.GetAdminAddr() // httpPort
|
||||||
_, port := staticClientProxy.GetAddr()
|
_, port := staticClientProxy.GetAddr() // EnvoyAdminPort
|
||||||
|
|
||||||
libassert.AssertEnvoyRunning(t, serverAdminPortV1)
|
libassert.AssertEnvoyRunning(t, serverAdminPortV1)
|
||||||
libassert.AssertEnvoyRunning(t, serverAdminPortV2)
|
libassert.AssertEnvoyRunning(t, serverAdminPortV2)
|
||||||
|
|
Loading…
Reference in New Issue