diff --git a/.changelog/11500.txt b/.changelog/11500.txt new file mode 100644 index 0000000000..b19fe8a52d --- /dev/null +++ b/.changelog/11500.txt @@ -0,0 +1,4 @@ +```release-note:bugfix +rpc: Adds a deadline to client RPC calls, so that streams will no longer hang +indefinitely in unstable network conditions. [[GH-8504](https://github.com/hashicorp/consul/issues/8504)] +``` \ No newline at end of file diff --git a/.changelog/12805.txt b/.changelog/12805.txt new file mode 100644 index 0000000000..22f5a52466 --- /dev/null +++ b/.changelog/12805.txt @@ -0,0 +1,3 @@ +```release-note:improvement +connect: Add Envoy 1.22.0 to support matrix, remove 1.18.6 +``` diff --git a/.changelog/12807.txt b/.changelog/12807.txt new file mode 100644 index 0000000000..9d2005a036 --- /dev/null +++ b/.changelog/12807.txt @@ -0,0 +1,3 @@ +```release-note:improvement +acl: Clarify node/service identities must be lowercase +``` diff --git a/.changelog/12808.txt b/.changelog/12808.txt new file mode 100644 index 0000000000..d2f6257bc8 --- /dev/null +++ b/.changelog/12808.txt @@ -0,0 +1,3 @@ +```release-note:note +dependency: Upgrade to use Go 1.18.1 +``` diff --git a/.changelog/12819.txt b/.changelog/12819.txt new file mode 100644 index 0000000000..c98b59c3cd --- /dev/null +++ b/.changelog/12819.txt @@ -0,0 +1,3 @@ +```release-note:improvement +grpc: Add a new ServerDiscovery.WatchServers gRPC endpoint for being notified when the set of ready servers has changed. +``` \ No newline at end of file diff --git a/.changelog/12820.txt b/.changelog/12820.txt new file mode 100644 index 0000000000..af5533b77d --- /dev/null +++ b/.changelog/12820.txt @@ -0,0 +1,3 @@ +```release-note:bug +ca: fix a bug that caused a non blocking leaf cert query after a blocking leaf cert query to block +``` diff --git a/.changelog/12825.txt b/.changelog/12825.txt new file mode 100644 index 0000000000..95a0184fa2 --- /dev/null +++ b/.changelog/12825.txt @@ -0,0 +1,3 @@ +```release-note:feature +grpc: New gRPC endpoint to return envoy bootstrap parameters. +``` \ No newline at end of file diff --git a/.changelog/12844.txt b/.changelog/12844.txt new file mode 100644 index 0000000000..477193881a --- /dev/null +++ b/.changelog/12844.txt @@ -0,0 +1,3 @@ +```release-note:bug +raft: upgrade to v1.3.8 which fixes a bug where non cluster member can still be able to participate in an election. +``` diff --git a/.changelog/12846.txt b/.changelog/12846.txt new file mode 100644 index 0000000000..b3917780fe --- /dev/null +++ b/.changelog/12846.txt @@ -0,0 +1,3 @@ +```release-note:note +ci: change action to pull v1 instead of main +``` diff --git a/.changelog/_12855.txt b/.changelog/_12855.txt new file mode 100644 index 0000000000..e8cfd87175 --- /dev/null +++ b/.changelog/_12855.txt @@ -0,0 +1,3 @@ +```release-note:bug +snapshot-agent: **(Enterprise only)** Fix a bug where providing the ACL token to the snapshot agent via a CLI or ENV variable without a license configured results in an error during license auto-retrieval. +``` diff --git a/.changelog/_1679.txt b/.changelog/_1679.txt new file mode 100644 index 0000000000..d7f5241273 --- /dev/null +++ b/.changelog/_1679.txt @@ -0,0 +1,3 @@ +```release-note:breaking-change +config-entry: Exporting a specific service name across all namespace is invalid. +``` \ No newline at end of file diff --git a/.circleci/config.yml b/.circleci/config.yml index 005deefb14..698fe9585c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -12,18 +12,8 @@ parameters: description: "Boolean whether to run the load test workflow" references: - images: - # When updating the Go version, remember to also update the versions in the - # workflows section for go-test-lib jobs. - go: &GOLANG_IMAGE docker.mirror.hashicorp.services/cimg/go:1.17.5 - ember: &EMBER_IMAGE docker.mirror.hashicorp.services/circleci/node:14-browsers - paths: test-results: &TEST_RESULTS_DIR /tmp/test-results - - cache: - yarn: &YARN_CACHE_KEY consul-ui-v7-{{ checksum "ui/yarn.lock" }} - environment: &ENVIRONMENT TEST_RESULTS_DIR: *TEST_RESULTS_DIR EMAIL: noreply@hashicorp.com @@ -32,6 +22,14 @@ references: S3_ARTIFACT_BUCKET: consul-dev-artifacts-v2 BASH_ENV: .circleci/bash_env.sh VAULT_BINARY_VERSION: 1.9.4 + GO_VERSION: 1.18.1 + images: + # When updating the Go version, remember to also update the versions in the + # workflows section for go-test-lib jobs. + go: &GOLANG_IMAGE docker.mirror.hashicorp.services/cimg/go:1.18.1 + ember: &EMBER_IMAGE docker.mirror.hashicorp.services/circleci/node:14-browsers + cache: + yarn: &YARN_CACHE_KEY consul-ui-v7-{{ checksum "ui/yarn.lock" }} steps: install-gotestsum: &install-gotestsum @@ -188,7 +186,7 @@ jobs: name: Install golangci-lint command: | download=https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh - wget -O- -q $download | sh -x -s -- -d -b /home/circleci/go/bin v1.40.1 + wget -O- -q $download | sh -x -s -- -d -b /home/circleci/go/bin v1.45.2 - run: go mod download - run: name: lint @@ -257,8 +255,8 @@ jobs: - run: command: | sudo rm -rf /usr/local/go - wget https://golang.org/dl/go1.17.5.linux-arm64.tar.gz - sudo tar -C /usr/local -xzvf go1.17.5.linux-arm64.tar.gz + wget https://dl.google.com/go/go${GO_VERSION}.linux-arm64.tar.gz + sudo tar -C /usr/local -xzvf go${GO_VERSION}.linux-arm64.tar.gz - run: *install-gotestsum - run: go mod download - run: @@ -596,50 +594,6 @@ jobs: NOMAD_VERSION: main steps: *NOMAD_INTEGRATION_TEST_STEPS - build-website-docker-image: - docker: - - image: docker.mirror.hashicorp.services/circleci/buildpack-deps - shell: /usr/bin/env bash -euo pipefail -c - steps: - - checkout - - setup_remote_docker - - run: - name: Build Docker Image if Necessary - command: | - # Ignore job if running an enterprise build - IMAGE_TAG=$(cat website/Dockerfile website/package-lock.json | sha256sum | awk '{print $1;}') - echo "Using $IMAGE_TAG" - if [ "$CIRCLE_REPOSITORY_URL" != "git@github.com:hashicorp/consul.git" ]; then - echo "Not Consul OSS Repo, not building website docker image" - elif curl https://hub.docker.com/v2/repositories/hashicorp/consul-website/tags/$IMAGE_TAG -fsL > /dev/null; then - echo "Dependencies have not changed, not building a new website docker image." - else - cd website/ - docker build -t hashicorp/consul-website:$IMAGE_TAG . - docker tag hashicorp/consul-website:$IMAGE_TAG hashicorp/consul-website:latest - docker login -u $WEBSITE_DOCKER_USER -p $WEBSITE_DOCKER_PASS - docker push hashicorp/consul-website - fi - - run: *notify-slack-failure - - algolia-index: - docker: - - image: docker.mirror.hashicorp.services/node:14 - steps: - - checkout - - run: - name: Push content to Algolia Index - command: | - if [ "$CIRCLE_REPOSITORY_URL" != "git@github.com:hashicorp/consul.git" ]; then - echo "Not Consul OSS Repo, not indexing Algolia" - exit 0 - fi - cd website/ - npm install -g npm@latest - npm install - node scripts/index_search_content.js - - run: *notify-slack-failure - # build frontend yarn cache frontend-cache: docker: @@ -846,14 +800,62 @@ jobs: working_directory: ui/packages/consul-ui command: make test-coverage-ci - run: *notify-slack-failure - - envoy-integration-test-1_18_6: &ENVOY_TESTS + + compatibility-integration-test: + machine: + image: ubuntu-2004:202101-01 + docker_layer_caching: true + parallelism: 1 + steps: + - checkout + # Get go binary from workspace + - attach_workspace: + at: . + # Build the consul-dev image from the already built binary + - run: + command: | + sudo rm -rf /usr/local/go + wget https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz + sudo tar -C /usr/local -xzvf go${GO_VERSION}.linux-amd64.tar.gz + environment: + <<: *ENVIRONMENT + - run: *install-gotestsum + - run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile . + - run: + name: Compatibility Integration Tests + command: | + subtests=$(ls -d test/integration/consul-container/*/ | grep -v libs | xargs -n 1 basename | circleci tests split) + echo "Running $(echo $subtests | wc -w) subtests" + echo "$subtests" + subtests_pipe_sepr=$(echo "$subtests" | xargs | sed 's/ /|/g') + mkdir -p /tmp/test-results/ + docker run consul:local consul version + cd ./test/integration/consul-container + gotestsum -- -timeout=30m ./$subtests_pipe_sepr --target-version local --latest-version latest + ls -lrt + environment: + # this is needed because of incompatibility between RYUK container and circleci + GOTESTSUM_JUNITFILE: /tmp/test-results/results.xml + GOTESTSUM_FORMAT: standard-verbose + COMPOSE_INTERACTIVE_NO_CLI: 1 + # tput complains if this isn't set to something. + TERM: ansi + - store_artifacts: + path: ./test/integration/consul-container/upgrade/workdir/logs + destination: container-logs + - store_test_results: + path: *TEST_RESULTS_DIR + - store_artifacts: + path: *TEST_RESULTS_DIR + - run: *notify-slack-failure + + envoy-integration-test-1_19_3: &ENVOY_TESTS machine: image: ubuntu-2004:202101-01 parallelism: 4 resource_class: medium environment: - ENVOY_VERSION: "1.18.6" + ENVOY_VERSION: "1.19.3" steps: &ENVOY_INTEGRATION_TEST_STEPS - checkout # Get go binary from workspace @@ -886,11 +888,6 @@ jobs: path: *TEST_RESULTS_DIR - run: *notify-slack-failure - envoy-integration-test-1_19_3: - <<: *ENVOY_TESTS - environment: - ENVOY_VERSION: "1.19.3" - envoy-integration-test-1_20_2: <<: *ENVOY_TESTS environment: @@ -901,6 +898,11 @@ jobs: environment: ENVOY_VERSION: "1.21.1" + envoy-integration-test-1_22_0: + <<: *ENVOY_TESTS + environment: + ENVOY_VERSION: "1.22.0" + # run integration tests for the connect ca providers test-connect-ca-providers: docker: @@ -1060,26 +1062,26 @@ workflows: - dev-build: *filter-ignore-non-go-branches - go-test: requires: [ dev-build ] - - go-test-lib: - name: "go-test-api go1.16" - path: api - go-version: "1.16" - requires: [ dev-build ] - go-test-lib: name: "go-test-api go1.17" path: api go-version: "1.17" requires: [ dev-build ] - go-test-lib: - name: "go-test-sdk go1.16" - path: sdk - go-version: "1.16" - <<: *filter-ignore-non-go-branches + name: "go-test-api go1.18" + path: api + go-version: "1.18" + requires: [ dev-build ] - go-test-lib: name: "go-test-sdk go1.17" path: sdk go-version: "1.17" <<: *filter-ignore-non-go-branches + - go-test-lib: + name: "go-test-sdk go1.18" + path: sdk + go-version: "1.18" + <<: *filter-ignore-non-go-branches - go-test-race: *filter-ignore-non-go-branches - go-test-32bit: *filter-ignore-non-go-branches build-distros: @@ -1142,9 +1144,6 @@ workflows: - nomad-integration-0_8: requires: - dev-build - - envoy-integration-test-1_18_6: - requires: - - dev-build - envoy-integration-test-1_19_3: requires: - dev-build @@ -1154,21 +1153,13 @@ workflows: - envoy-integration-test-1_21_1: requires: - dev-build + - envoy-integration-test-1_22_0: + requires: + - dev-build + - compatibility-integration-test: + requires: + - dev-build - website: - unless: << pipeline.parameters.trigger-load-test >> - jobs: - - build-website-docker-image: - context: website-docker-image - filters: - branches: - only: - - main - - algolia-index: - filters: - branches: - only: - - stable-website frontend: unless: << pipeline.parameters.trigger-load-test >> jobs: diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..5c2b044e13 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,19 @@ +### Description +Describe why you're making this change, in plain English. + +### Testing & Reproduction steps +* In the case of bugs, describe how to replicate +* If any manual tests were done, document the steps and the conditions to replicate +* Call out any important/ relevant unit tests, e2e tests or integration tests you have added or are adding + +### Links +Include any links here that might be helpful for people reviewing your PR (Tickets, GH issues, API docs, external benchmarks, tools docs, etc). If there are none, feel free to delete this section. + +Please be mindful not to leak any customer or confidential information. HashiCorp employees may want to use our internal URL shortener to obfuscate links. + +### PR Checklist + +* [ ] updated test coverage +* [ ] external facing docs updated +* [ ] not a security concern +* [ ] checklist [folder](./../docs/config) consulted diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a6069e1058..a01c499c6d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -49,7 +49,7 @@ jobs: uses: actions/checkout@v2 - name: Generate metadata file id: generate-metadata-file - uses: hashicorp/actions-generate-metadata@main + uses: hashicorp/actions-generate-metadata@v1 with: version: ${{ needs.get-product-version.outputs.product-version }} product: ${{ env.PKG_NAME }} @@ -65,15 +65,15 @@ jobs: strategy: matrix: include: - - {go: "1.17.5", goos: "linux", goarch: "386"} - - {go: "1.17.5", goos: "linux", goarch: "amd64"} - - {go: "1.17.5", goos: "linux", goarch: "arm"} - - {go: "1.17.5", goos: "linux", goarch: "arm64"} - - {go: "1.17.5", goos: "freebsd", goarch: "386"} - - {go: "1.17.5", goos: "freebsd", goarch: "amd64"} - - {go: "1.17.5", goos: "windows", goarch: "386"} - - {go: "1.17.5", goos: "windows", goarch: "amd64"} - - {go: "1.17.5", goos: "solaris", goarch: "amd64"} + - {go: "1.18.1", goos: "linux", goarch: "386"} + - {go: "1.18.1", goos: "linux", goarch: "amd64"} + - {go: "1.18.1", goos: "linux", goarch: "arm"} + - {go: "1.18.1", goos: "linux", goarch: "arm64"} + - {go: "1.18.1", goos: "freebsd", goarch: "386"} + - {go: "1.18.1", goos: "freebsd", goarch: "amd64"} + - {go: "1.18.1", goos: "windows", goarch: "386"} + - {go: "1.18.1", goos: "windows", goarch: "amd64"} + - {go: "1.18.1", goos: "solaris", goarch: "amd64"} fail-fast: true name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build @@ -173,7 +173,7 @@ jobs: matrix: goos: [ darwin ] goarch: [ "amd64", "arm64" ] - go: [ "1.17.5" ] + go: [ "1.18.1" ] fail-fast: true name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build diff --git a/.gitignore b/.gitignore index e67312cbe5..b9630db38b 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ bin/ changelog.tmp exit-code Thumbs.db +.idea # MacOS .DS_Store diff --git a/.release/ci.hcl b/.release/ci.hcl index 8452ef3b2f..f1237a5421 100644 --- a/.release/ci.hcl +++ b/.release/ci.hcl @@ -84,8 +84,21 @@ event "notarize-darwin-amd64" { } } -event "notarize-windows-386" { +event "notarize-darwin-arm64" { depends = ["notarize-darwin-amd64"] + action "notarize-darwin-arm64" { + organization = "hashicorp" + repository = "crt-workflows-common" + workflow = "notarize-darwin-arm64" + } + + notification { + on = "fail" + } +} + +event "notarize-windows-386" { + depends = ["notarize-darwin-arm64"] action "notarize-windows-386" { organization = "hashicorp" repository = "crt-workflows-common" diff --git a/CHANGELOG.md b/CHANGELOG.md index b4188590d6..cf503d1091 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,106 @@ +## 1.12.0 (April 20, 2022) + +BREAKING CHANGES: + +* sdk: several changes to the testutil configuration structs (removed `ACLMasterToken`, renamed `Master` to `InitialManagement`, and `AgentMaster` to `AgentRecovery`) [[GH-11827](https://github.com/hashicorp/consul/issues/11827)] +* telemetry: the disable_compat_1.9 option now defaults to true. 1.9 style `consul.http...` metrics can still be enabled by setting `disable_compat_1.9 = false`. However, we will remove these metrics in 1.13. [[GH-12675](https://github.com/hashicorp/consul/issues/12675)] + +FEATURES: + +* acl: Add token information to PermissionDeniedErrors [[GH-12567](https://github.com/hashicorp/consul/issues/12567)] +* acl: Added an AWS IAM auth method that allows authenticating to Consul using AWS IAM identities [[GH-12583](https://github.com/hashicorp/consul/issues/12583)] +* ca: Root certificates can now be consumed from a gRPC streaming endpoint: `WatchRoots` [[GH-12678](https://github.com/hashicorp/consul/issues/12678)] +* cli: The `token read` command now supports the `-expanded` flag to display detailed role and policy information for the token. [[GH-12670](https://github.com/hashicorp/consul/issues/12670)] +* config: automatically reload config when a file changes using the `auto-reload-config` CLI flag or `auto_reload_config` config option. [[GH-12329](https://github.com/hashicorp/consul/issues/12329)] +* server: Ensure that service-defaults `Meta` is returned with the response to the `ConfigEntry.ResolveServiceConfig` RPC. [[GH-12529](https://github.com/hashicorp/consul/issues/12529)] +* server: discovery chains now include a response field named "Default" to indicate if they were not constructed from any service-resolver, service-splitter, or service-router config entries [[GH-12511](https://github.com/hashicorp/consul/issues/12511)] +* server: ensure that service-defaults meta is incorporated into the discovery chain response [[GH-12511](https://github.com/hashicorp/consul/issues/12511)] +* tls: it is now possible to configure TLS differently for each of Consul's listeners (i.e. HTTPS, gRPC and the internal multiplexed RPC listener) using the `tls` stanza [[GH-12504](https://github.com/hashicorp/consul/issues/12504)] +* ui: Added support for AWS IAM Auth Methods [[GH-12786](https://github.com/hashicorp/consul/issues/12786)] +* ui: Support connect-native services in the Topology view. [[GH-12098](https://github.com/hashicorp/consul/issues/12098)] +* xds: Add the ability to invoke AWS Lambdas through terminating gateways. [[GH-12681](https://github.com/hashicorp/consul/issues/12681)] +* xds: adding control of the mesh-wide min/max TLS versions and cipher suites from the mesh config entry [[GH-12601](https://github.com/hashicorp/consul/issues/12601)] + +IMPROVEMENTS: + +* Refactor ACL denied error code and start improving error details [[GH-12308](https://github.com/hashicorp/consul/issues/12308)] +* acl: Provide fuller detail in the error messsage when an ACL denies access. [[GH-12470](https://github.com/hashicorp/consul/issues/12470)] +* agent: Allow client agents to perform keyring operations [[GH-12442](https://github.com/hashicorp/consul/issues/12442)] +* agent: add additional validation to TLS config [[GH-12522](https://github.com/hashicorp/consul/issues/12522)] +* agent: add support for specifying TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 and TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 cipher suites [[GH-12522](https://github.com/hashicorp/consul/issues/12522)] +* agent: bump default min version for connections to TLS 1.2 [[GH-12522](https://github.com/hashicorp/consul/issues/12522)] +* api: add QueryBackend to QueryMeta so an api user can determine if a query was served using which backend (streaming or blocking query). [[GH-12791](https://github.com/hashicorp/consul/issues/12791)] +* ci: include 'enhancement' entry type in IMPROVEMENTS section of changelog. [[GH-12376](https://github.com/hashicorp/consul/issues/12376)] +* ui: Exclude Service Instance Health from Health Check reporting on the Node listing page. The health icons on each individual row now only reflect Node health. [[GH-12248](https://github.com/hashicorp/consul/issues/12248)] +* ui: Improve usability of Topology warning/information panels [[GH-12305](https://github.com/hashicorp/consul/issues/12305)] +* ui: Slightly improve usability of main navigation [[GH-12334](https://github.com/hashicorp/consul/issues/12334)] +* ui: Use @hashicorp/flight icons for all our icons. [[GH-12209](https://github.com/hashicorp/consul/issues/12209)] +* Removed impediments to using a namespace prefixed IntermediatePKIPath +in a CA definition. [[GH-12655](https://github.com/hashicorp/consul/issues/12655)] +* acl: Improve handling of region-specific endpoints in the AWS IAM auth method. As part of this, the `STSRegion` field was removed from the auth method config. [[GH-12774](https://github.com/hashicorp/consul/issues/12774)] +* api: Improve error message if service or health check not found by stating that the entity must be referred to by ID, not name [[GH-10894](https://github.com/hashicorp/consul/issues/10894)] +* autopilot: Autopilot state is now tracked on Raft followers in addition to the leader. +Stale queries may be used to query for the non-leaders state. [[GH-12617](https://github.com/hashicorp/consul/issues/12617)] +* autopilot: The `autopilot.healthy` and `autopilot.failure_tolerance` metrics are now +regularly emitted by all servers. [[GH-12617](https://github.com/hashicorp/consul/issues/12617)] +* ci: Enable security scanning for CRT [[GH-11956](https://github.com/hashicorp/consul/issues/11956)] +* connect: Add Envoy 1.21.1 to support matrix, remove 1.17.4 [[GH-12777](https://github.com/hashicorp/consul/issues/12777)] +* connect: Add Envoy 1.22.0 to support matrix, remove 1.18.6 [[GH-12805](https://github.com/hashicorp/consul/issues/12805)] +* connect: reduce raft apply on CA configuration when no change is performed [[GH-12298](https://github.com/hashicorp/consul/issues/12298)] +* deps: update to latest go-discover to fix vulnerable transitive jwt-go dependency [[GH-12739](https://github.com/hashicorp/consul/issues/12739)] +* grpc, xds: improved reliability of grpc and xds servers by adding recovery-middleware to return and log error in case of panic. [[GH-10895](https://github.com/hashicorp/consul/issues/10895)] +* http: if a GET request has a non-empty body, log a warning that suggests a possible problem (parameters were meant for the query string, but accidentally placed in the body) [[GH-11821](https://github.com/hashicorp/consul/issues/11821)] +* metrics: The `consul.raft.boltdb.writeCapacity` metric was added and indicates a theoretical number of writes/second that can be performed to Consul. [[GH-12646](https://github.com/hashicorp/consul/issues/12646)] +* sdk: Add support for `Partition` and `RetryJoin` to the TestServerConfig struct. [[GH-12126](https://github.com/hashicorp/consul/issues/12126)] +* telemetry: Add new `leader` label to `consul.rpc.server.call` and optional `target_datacenter`, `locality`, +`allow_stale`, and `blocking` optional labels. [[GH-12727](https://github.com/hashicorp/consul/issues/12727)] +* ui: In the datacenter selector order Datacenters by Primary, Local then alpanumerically [[GH-12478](https://github.com/hashicorp/consul/issues/12478)] +* ui: Include details on ACL policy dispositions required for unauthorized views [[GH-12354](https://github.com/hashicorp/consul/issues/12354)] +* ui: Move icons away from depending on a CSS preprocessor [[GH-12461](https://github.com/hashicorp/consul/issues/12461)] +* version: Improved performance of the version.GetHumanVersion function by 50% on memory allocation. [[GH-11507](https://github.com/hashicorp/consul/issues/11507)] + +DEPRECATIONS: + +* acl: The `consul.acl.ResolveTokenToIdentity` metric is no longer reported. The values that were previous reported as part of this metric will now be part of the `consul.acl.ResolveToken` metric. [[GH-12166](https://github.com/hashicorp/consul/issues/12166)] +* agent: deprecate older syntax for specifying TLS min version values [[GH-12522](https://github.com/hashicorp/consul/issues/12522)] +* agent: remove support for specifying insecure TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 and TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suites [[GH-12522](https://github.com/hashicorp/consul/issues/12522)] +* config: setting `cert_file`, `key_file`, `ca_file`, `ca_path`, `tls_min_version`, `tls_cipher_suites`, `verify_incoming`, `verify_incoming_rpc`, `verify_incoming_https`, `verify_outgoing` and `verify_server_hostname` at the top-level is now deprecated, use the `tls` stanza instead [[GH-12504](https://github.com/hashicorp/consul/issues/12504)] + +BUG FIXES: + +* acl: Fix parsing of IAM user and role tags in IAM auth method [[GH-12797](https://github.com/hashicorp/consul/issues/12797)] +* dns: allow max of 63 character DNS labels instead of 64 per RFC 1123 [[GH-12535](https://github.com/hashicorp/consul/issues/12535)] +* logging: fix a bug with incorrect severity syslog messages (all messages were sent with NOTICE severity). [[GH-12079](https://github.com/hashicorp/consul/issues/12079)] +* ui: Added Tags tab to gateways(just like exists for non-gateway services) [[GH-12400](https://github.com/hashicorp/consul/issues/12400)] + +NOTES: + +* Forked net/rpc to add middleware support: https://github.com/hashicorp/consul-net-rpc/ . [[GH-12311](https://github.com/hashicorp/consul/issues/12311)] +* dependency: Upgrade to use Go 1.18.1 [[GH-12808](https://github.com/hashicorp/consul/issues/12808)] + +## 1.11.5 (April 13, 2022) + +SECURITY: + +* agent: Added a new check field, `disable_redirects`, that allows for disabling the following of redirects for HTTP checks. The intention is to default this to true in a future release so that redirects must explicitly be enabled. [[GH-12685](https://github.com/hashicorp/consul/issues/12685)] +* connect: Properly set SNI when configured for services behind a terminating gateway. [[GH-12672](https://github.com/hashicorp/consul/issues/12672)] + +IMPROVEMENTS: + +* agent: improve log messages when a service with a critical health check is deregistered due to exceeding the deregister_critical_service_after timeout [[GH-12725](https://github.com/hashicorp/consul/issues/12725)] +* xds: ensure that all connect timeout configs can apply equally to tproxy direct dial connections [[GH-12711](https://github.com/hashicorp/consul/issues/12711)] + +BUG FIXES: + +* acl: **(Enterprise Only)** fixes a bug preventing ACL policies configured with datacenter restrictions from being created if the cluster had been upgraded to Consul 1.11+ from an earlier version. +* connect/ca: cancel old Vault renewal on CA configuration. Provide a 1 - 6 second backoff on repeated token renewal requests to prevent overwhelming Vault. [[GH-12607](https://github.com/hashicorp/consul/issues/12607)] +* namespace: **(Enterprise Only)** Unreserve `consul` namespace to allow K8s namespace mirroring when deploying in `consul` K8s namespace . +* raft: upgrade to v1.3.6 which fixes a bug where a read replica node could attempt bootstrapping raft and prevent other nodes from bootstrapping at all [[GH-12496](https://github.com/hashicorp/consul/issues/12496)] +* replication: Fixed a bug which could prevent ACL replication from continuing successfully after a leader election. [[GH-12565](https://github.com/hashicorp/consul/issues/12565)] +* server: fix spurious blocking query suppression for discovery chains [[GH-12512](https://github.com/hashicorp/consul/issues/12512)] +* ui: Fixes a visual bug where our loading icon can look cut off [[GH-12479](https://github.com/hashicorp/consul/issues/12479)] +* usagemetrics: **(Enterprise only)** Fix a bug where Consul usage metrics stopped being reported when upgrading servers from 1.10 to 1.11 or later. + ## 1.11.4 (February 28, 2022) FEATURES: diff --git a/GNUmakefile b/GNUmakefile index 665f1d0a1d..a0a640ab26 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -5,8 +5,8 @@ SHELL = bash GOTOOLS = \ github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@master \ github.com/hashicorp/go-bindata/go-bindata@master \ - github.com/vektra/mockery/cmd/mockery@master \ - github.com/golangci/golangci-lint/cmd/golangci-lint@v1.40.1 \ + github.com/vektra/mockery/v2@latest \ + github.com/golangci/golangci-lint/cmd/golangci-lint@v1.45.2 \ github.com/hashicorp/lint-consul-retry@master PROTOC_VERSION=3.15.8 @@ -15,7 +15,7 @@ PROTOC_VERSION=3.15.8 # MOG_VERSION can be either a valid string for "go install @" # or the string @DEV to imply use whatever is currently installed locally. ### -MOG_VERSION='v0.2.0' +MOG_VERSION='v0.3.0' ### # PROTOC_GO_INJECT_TAG_VERSION can be either a valid string for "go install @" # or the string @DEV to imply use whatever is currently installed locally. @@ -158,7 +158,8 @@ dev-docker: linux @echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)" @docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null @echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)" - @DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile + # 'consul:local' tag is needed to run the integration tests + @DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' -t 'consul:local' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile # In CircleCI, the linux binary will be attached from a previous step at bin/. This make target # should only run in CI and not locally. diff --git a/INTERNALS.md b/INTERNALS.md deleted file mode 100644 index efa95dab39..0000000000 --- a/INTERNALS.md +++ /dev/null @@ -1 +0,0 @@ -Moved to [docs/README.md](./docs/README.md). diff --git a/acl/acl_oss.go b/acl/acl_oss.go index ca2974e4e1..6932808831 100644 --- a/acl/acl_oss.go +++ b/acl/acl_oss.go @@ -3,7 +3,9 @@ package acl -const DefaultPartitionName = "" +const ( + DefaultPartitionName = "" +) // Reviewer Note: This is a little bit strange; one might want it to be "" like partition name // However in consul/structs/intention.go we define IntentionDefaultNamespace as 'default' and so diff --git a/acl/enterprisemeta_oss.go b/acl/enterprisemeta_oss.go index 2296fdd43e..f0f15bc05f 100644 --- a/acl/enterprisemeta_oss.go +++ b/acl/enterprisemeta_oss.go @@ -106,3 +106,7 @@ func NewEnterpriseMetaWithPartition(_, _ string) EnterpriseMeta { // FillAuthzContext stub func (_ *EnterpriseMeta) FillAuthzContext(_ *AuthorizerContext) {} + +func NormalizeNamespace(_ string) string { + return "" +} diff --git a/agent/acl_endpoint_test.go b/agent/acl_endpoint_test.go index 2c6aad450e..01a3f0b5ec 100644 --- a/agent/acl_endpoint_test.go +++ b/agent/acl_endpoint_test.go @@ -1142,6 +1142,41 @@ func TestACL_HTTP(t *testing.T) { _, err := a.srv.ACLTokenCreate(resp, req) require.Error(t, err) }) + + t.Run("Create with uppercase node identity", func(t *testing.T) { + tokenInput := &structs.ACLToken{ + Description: "agent token for foo node", + NodeIdentities: []*structs.ACLNodeIdentity{ + { + NodeName: "FOO", + Datacenter: "bar", + }, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/acl/token?token=root", jsonBody(tokenInput)) + resp := httptest.NewRecorder() + _, err := a.srv.ACLTokenCreate(resp, req) + require.Error(t, err) + testutil.RequireErrorContains(t, err, "Only lowercase alphanumeric") + }) + + t.Run("Create with uppercase service identity", func(t *testing.T) { + tokenInput := &structs.ACLToken{ + Description: "token for service identity foo", + ServiceIdentities: []*structs.ACLServiceIdentity{ + { + ServiceName: "FOO", + }, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/acl/token?token=root", jsonBody(tokenInput)) + resp := httptest.NewRecorder() + _, err := a.srv.ACLTokenCreate(resp, req) + require.Error(t, err) + testutil.RequireErrorContains(t, err, "Only lowercase alphanumeric") + }) }) } diff --git a/agent/agent.go b/agent/agent.go index 72f861dc47..3978f27378 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -20,6 +20,7 @@ import ( "github.com/armon/go-metrics" "github.com/armon/go-metrics/prometheus" + "github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/go-connlimit" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" @@ -357,6 +358,8 @@ type Agent struct { // into Agent, which will allow us to remove this field. rpcClientHealth *health.Client + rpcClientPeering pbpeering.PeeringServiceClient + // routineManager is responsible for managing longer running go routines // run by the Agent routineManager *routine.Manager @@ -434,6 +437,8 @@ func New(bd BaseDeps) (*Agent, error) { QueryOptionDefaults: config.ApplyDefaultQueryOptions(a.config), } + a.rpcClientPeering = pbpeering.NewPeeringServiceClient(conn) + a.serviceManager = NewServiceManager(&a) // We used to do this in the Start method. However it doesn't need to go @@ -3901,6 +3906,8 @@ func (a *Agent) reloadConfigInternal(newCfg *config.RuntimeConfig) error { ConfigEntryBootstrap: newCfg.ConfigEntryBootstrap, RaftSnapshotThreshold: newCfg.RaftSnapshotThreshold, RaftSnapshotInterval: newCfg.RaftSnapshotInterval, + HeartbeatTimeout: newCfg.ConsulRaftHeartbeatTimeout, + ElectionTimeout: newCfg.ConsulRaftElectionTimeout, RaftTrailingLogs: newCfg.RaftTrailingLogs, } if err := a.delegate.ReloadConfig(cc); err != nil { diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 1032434970..fc29333e92 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -6202,6 +6202,101 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) { } } +func TestAgentConnectCALeafCert_nonBlockingQuery_after_blockingQuery_shouldNotBlock(t *testing.T) { + // see: https://github.com/hashicorp/consul/issues/12048 + + runStep := func(t *testing.T, name string, fn func(t *testing.T)) { + t.Helper() + if !t.Run(name, fn) { + t.FailNow() + } + } + + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + a := NewTestAgent(t, "") + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + testrpc.WaitForActiveCARoot(t, a.RPC, "dc1", nil) + + { + // Register a local service + args := &structs.ServiceDefinition{ + ID: "foo", + Name: "test", + Address: "127.0.0.1", + Port: 8000, + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + } + req := httptest.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + if !assert.Equal(t, 200, resp.Code) { + t.Log("Body: ", resp.Body.String()) + } + } + + var ( + serialNumber string + index string + issued structs.IssuedCert + ) + runStep(t, "do initial non-blocking query", func(t *testing.T) { + req := httptest.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + dec := json.NewDecoder(resp.Body) + require.NoError(t, dec.Decode(&issued)) + serialNumber = issued.SerialNumber + + require.Equal(t, "MISS", resp.Header().Get("X-Cache"), + "for the leaf cert cache type these are always MISS") + index = resp.Header().Get("X-Consul-Index") + }) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + // launch goroutine for blocking query + req := httptest.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?index="+index, nil).Clone(ctx) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + }() + + // We just need to ensure that the above blocking query is in-flight before + // the next step, so do a little sleep. + time.Sleep(50 * time.Millisecond) + + // The initial non-blocking query populated the leaf cert cache entry + // implicitly. The agent cache doesn't prune entries very often at all, so + // in between both of these steps the data should still be there, causing + // this to be a HIT that completes in less than 10m (the default inner leaf + // cert blocking query timeout). + runStep(t, "do a non-blocking query that should not block", func(t *testing.T) { + req := httptest.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + + var issued2 structs.IssuedCert + dec := json.NewDecoder(resp.Body) + require.NoError(t, dec.Decode(&issued2)) + + require.Equal(t, "HIT", resp.Header().Get("X-Cache")) + + // If this is actually returning a cached result, the serial number + // should be unchanged. + require.Equal(t, serialNumber, issued2.SerialNumber) + + require.Equal(t, issued, issued2) + }) +} + func TestAgentConnectCALeafCert_Vault_doesNotChurnLeafCertsAtIdle(t *testing.T) { ca.SkipIfVaultNotPresent(t) diff --git a/agent/agent_test.go b/agent/agent_test.go index ba82f127f6..d7b118fcba 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -16,6 +16,7 @@ import ( "net/http/httptest" "net/url" "os" + "path" "path/filepath" "strconv" "strings" @@ -24,6 +25,8 @@ import ( "time" "github.com/golang/protobuf/jsonpb" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/google/tcpproxy" "github.com/hashicorp/go-hclog" "github.com/hashicorp/serf/coordinate" @@ -3931,9 +3934,11 @@ func TestAgent_ReloadConfigOutgoingRPCConfig(t *testing.T) { a := NewTestAgent(t, hcl) defer a.Shutdown() tlsConf := a.tlsConfigurator.OutgoingRPCConfig() + require.True(t, tlsConf.InsecureSkipVerify) - require.Len(t, tlsConf.ClientCAs.Subjects(), 1) - require.Len(t, tlsConf.RootCAs.Subjects(), 1) + expectedCaPoolByFile := getExpectedCaPoolByFile(t) + assertDeepEqual(t, expectedCaPoolByFile, tlsConf.RootCAs, cmpCertPool) + assertDeepEqual(t, expectedCaPoolByFile, tlsConf.ClientCAs, cmpCertPool) hcl = ` data_dir = "` + dataDir + `" @@ -3946,9 +3951,11 @@ func TestAgent_ReloadConfigOutgoingRPCConfig(t *testing.T) { c := TestConfig(testutil.Logger(t), config.FileSource{Name: t.Name(), Format: "hcl", Data: hcl}) require.NoError(t, a.reloadConfigInternal(c)) tlsConf = a.tlsConfigurator.OutgoingRPCConfig() + require.False(t, tlsConf.InsecureSkipVerify) - require.Len(t, tlsConf.RootCAs.Subjects(), 2) - require.Len(t, tlsConf.ClientCAs.Subjects(), 2) + expectedCaPoolByDir := getExpectedCaPoolByDir(t) + assertDeepEqual(t, expectedCaPoolByDir, tlsConf.RootCAs, cmpCertPool) + assertDeepEqual(t, expectedCaPoolByDir, tlsConf.ClientCAs, cmpCertPool) } func TestAgent_ReloadConfigAndKeepChecksStatus(t *testing.T) { @@ -4018,8 +4025,9 @@ func TestAgent_ReloadConfigIncomingRPCConfig(t *testing.T) { require.NoError(t, err) require.NotNil(t, tlsConf) require.True(t, tlsConf.InsecureSkipVerify) - require.Len(t, tlsConf.ClientCAs.Subjects(), 1) - require.Len(t, tlsConf.RootCAs.Subjects(), 1) + expectedCaPoolByFile := getExpectedCaPoolByFile(t) + assertDeepEqual(t, expectedCaPoolByFile, tlsConf.RootCAs, cmpCertPool) + assertDeepEqual(t, expectedCaPoolByFile, tlsConf.ClientCAs, cmpCertPool) hcl = ` data_dir = "` + dataDir + `" @@ -4034,8 +4042,9 @@ func TestAgent_ReloadConfigIncomingRPCConfig(t *testing.T) { tlsConf, err = tlsConf.GetConfigForClient(nil) require.NoError(t, err) require.False(t, tlsConf.InsecureSkipVerify) - require.Len(t, tlsConf.ClientCAs.Subjects(), 2) - require.Len(t, tlsConf.RootCAs.Subjects(), 2) + expectedCaPoolByDir := getExpectedCaPoolByDir(t) + assertDeepEqual(t, expectedCaPoolByDir, tlsConf.RootCAs, cmpCertPool) + assertDeepEqual(t, expectedCaPoolByDir, tlsConf.ClientCAs, cmpCertPool) } func TestAgent_ReloadConfigTLSConfigFailure(t *testing.T) { @@ -4066,8 +4075,10 @@ func TestAgent_ReloadConfigTLSConfigFailure(t *testing.T) { tlsConf, err := tlsConf.GetConfigForClient(nil) require.NoError(t, err) require.Equal(t, tls.NoClientCert, tlsConf.ClientAuth) - require.Len(t, tlsConf.ClientCAs.Subjects(), 1) - require.Len(t, tlsConf.RootCAs.Subjects(), 1) + + expectedCaPoolByFile := getExpectedCaPoolByFile(t) + assertDeepEqual(t, expectedCaPoolByFile, tlsConf.RootCAs, cmpCertPool) + assertDeepEqual(t, expectedCaPoolByFile, tlsConf.ClientCAs, cmpCertPool) } func TestAgent_consulConfig_AutoEncryptAllowTLS(t *testing.T) { @@ -5845,3 +5856,45 @@ func Test_coalesceTimerTwoPeriods(t *testing.T) { }) } + +func getExpectedCaPoolByFile(t *testing.T) *x509.CertPool { + pool := x509.NewCertPool() + data, err := ioutil.ReadFile("../test/ca/root.cer") + require.NoError(t, err) + if !pool.AppendCertsFromPEM(data) { + t.Fatal("could not add test ca ../test/ca/root.cer to pool") + } + return pool +} + +func getExpectedCaPoolByDir(t *testing.T) *x509.CertPool { + pool := x509.NewCertPool() + entries, err := os.ReadDir("../test/ca_path") + require.NoError(t, err) + + for _, entry := range entries { + filename := path.Join("../test/ca_path", entry.Name()) + + data, err := ioutil.ReadFile(filename) + require.NoError(t, err) + + if !pool.AppendCertsFromPEM(data) { + t.Fatalf("could not add test ca %s to pool", filename) + } + } + + return pool +} + +// lazyCerts has a func field which can't be compared. +var cmpCertPool = cmp.Options{ + cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"), + cmp.AllowUnexported(x509.CertPool{}), +} + +func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) { + t.Helper() + if diff := cmp.Diff(x, y, opts...); diff != "" { + t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) + } +} diff --git a/agent/auto-config/config.go b/agent/auto-config/config.go index a20121fb9a..94f45d1fc6 100644 --- a/agent/auto-config/config.go +++ b/agent/auto-config/config.go @@ -27,7 +27,7 @@ type DirectRPC interface { // agent/cache.Cache struct that we care about type Cache interface { Notify(ctx context.Context, t string, r cache.Request, correlationID string, ch chan<- cache.UpdateEvent) error - Prepopulate(t string, result cache.FetchResult, dc string, token string, key string) error + Prepopulate(t string, result cache.FetchResult, dc string, peerName string, token string, key string) error } // ServerProvider is an interface that can be used to find one server in the local DC known to diff --git a/agent/auto-config/mock_test.go b/agent/auto-config/mock_test.go index 45fd42ef40..1ff53bc629 100644 --- a/agent/auto-config/mock_test.go +++ b/agent/auto-config/mock_test.go @@ -137,7 +137,7 @@ func (m *mockCache) Notify(ctx context.Context, t string, r cache.Request, corre return err } -func (m *mockCache) Prepopulate(t string, result cache.FetchResult, dc string, token string, key string) error { +func (m *mockCache) Prepopulate(t string, result cache.FetchResult, dc string, peerName string, token string, key string) error { var restore string cert, ok := result.Value.(*structs.IssuedCert) if ok { @@ -147,7 +147,7 @@ func (m *mockCache) Prepopulate(t string, result cache.FetchResult, dc string, t cert.PrivateKeyPEM = "redacted" } - ret := m.Called(t, result, dc, token, key) + ret := m.Called(t, result, dc, peerName, token, key) if ok && restore != "" { cert.PrivateKeyPEM = restore @@ -304,6 +304,7 @@ func (m *mockedConfig) expectInitialTLS(t *testing.T, agentName, datacenter, tok rootRes, datacenter, "", + "", rootsReq.CacheInfo().Key, ).Return(nil).Once() @@ -330,6 +331,7 @@ func (m *mockedConfig) expectInitialTLS(t *testing.T, agentName, datacenter, tok cachetype.ConnectCALeafName, leafRes, datacenter, + "", token, leafReq.Key(), ).Return(nil).Once() diff --git a/agent/auto-config/tls.go b/agent/auto-config/tls.go index 0683e94ba4..e8a59d19f9 100644 --- a/agent/auto-config/tls.go +++ b/agent/auto-config/tls.go @@ -96,7 +96,7 @@ func (ac *AutoConfig) populateCertificateCache(certs *structs.SignedResponse) er rootRes := cache.FetchResult{Value: &certs.ConnectCARoots, Index: certs.ConnectCARoots.QueryMeta.Index} rootsReq := ac.caRootsRequest() // getting the roots doesn't require a token so in order to potentially share the cache with another - if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCARootName, rootRes, ac.config.Datacenter, "", rootsReq.CacheInfo().Key); err != nil { + if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCARootName, rootRes, ac.config.Datacenter, structs.DefaultPeerKeyword, "", rootsReq.CacheInfo().Key); err != nil { return err } @@ -108,7 +108,7 @@ func (ac *AutoConfig) populateCertificateCache(certs *structs.SignedResponse) er Index: certs.IssuedCert.RaftIndex.ModifyIndex, State: cachetype.ConnectCALeafSuccess(connect.EncodeSigningKeyID(cert.AuthorityKeyId)), } - if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCALeafName, certRes, leafReq.Datacenter, leafReq.Token, leafReq.Key()); err != nil { + if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCALeafName, certRes, leafReq.Datacenter, structs.DefaultPeerKeyword, leafReq.Token, leafReq.Key()); err != nil { return err } diff --git a/agent/cache-types/catalog_list_services_test.go b/agent/cache-types/catalog_list_services_test.go index a630daaf35..60aa4ed81f 100644 --- a/agent/cache-types/catalog_list_services_test.go +++ b/agent/cache-types/catalog_list_services_test.go @@ -5,10 +5,11 @@ import ( "testing" "time" - "github.com/hashicorp/consul/agent/cache" - "github.com/hashicorp/consul/agent/structs" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/structs" ) func TestCatalogListServices(t *testing.T) { @@ -104,7 +105,7 @@ func TestCatalogListServices_IntegrationWithCache_NotModifiedResponse(t *testing }, } - err := c.Prepopulate(CatalogListServicesName, last, "dc1", "token", req.CacheInfo().Key) + err := c.Prepopulate(CatalogListServicesName, last, "dc1", "", "token", req.CacheInfo().Key) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) diff --git a/agent/cache-types/mock_Agent.go b/agent/cache-types/mock_Agent.go new file mode 100644 index 0000000000..ec3ba4031b --- /dev/null +++ b/agent/cache-types/mock_Agent.go @@ -0,0 +1,92 @@ +// Code generated by mockery v2.11.0. DO NOT EDIT. + +package cachetype + +import ( + local "github.com/hashicorp/consul/agent/local" + memdb "github.com/hashicorp/go-memdb" + + mock "github.com/stretchr/testify/mock" + + structs "github.com/hashicorp/consul/agent/structs" + + testing "testing" + + time "time" +) + +// MockAgent is an autogenerated mock type for the Agent type +type MockAgent struct { + mock.Mock +} + +// LocalBlockingQuery provides a mock function with given fields: alwaysBlock, hash, wait, fn +func (_m *MockAgent) LocalBlockingQuery(alwaysBlock bool, hash string, wait time.Duration, fn func(memdb.WatchSet) (string, interface{}, error)) (string, interface{}, error) { + ret := _m.Called(alwaysBlock, hash, wait, fn) + + var r0 string + if rf, ok := ret.Get(0).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) string); ok { + r0 = rf(alwaysBlock, hash, wait, fn) + } else { + r0 = ret.Get(0).(string) + } + + var r1 interface{} + if rf, ok := ret.Get(1).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) interface{}); ok { + r1 = rf(alwaysBlock, hash, wait, fn) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(interface{}) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) error); ok { + r2 = rf(alwaysBlock, hash, wait, fn) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// LocalState provides a mock function with given fields: +func (_m *MockAgent) LocalState() *local.State { + ret := _m.Called() + + var r0 *local.State + if rf, ok := ret.Get(0).(func() *local.State); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*local.State) + } + } + + return r0 +} + +// ServiceHTTPBasedChecks provides a mock function with given fields: id +func (_m *MockAgent) ServiceHTTPBasedChecks(id structs.ServiceID) []structs.CheckType { + ret := _m.Called(id) + + var r0 []structs.CheckType + if rf, ok := ret.Get(0).(func(structs.ServiceID) []structs.CheckType); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]structs.CheckType) + } + } + + return r0 +} + +// NewMockAgent creates a new instance of MockAgent. It also registers a cleanup function to assert the mocks expectations. +func NewMockAgent(t testing.TB) *MockAgent { + mock := &MockAgent{} + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/agent/cache-types/mock_RPC.go b/agent/cache-types/mock_RPC.go index 6f642c66b8..67052f3549 100644 --- a/agent/cache-types/mock_RPC.go +++ b/agent/cache-types/mock_RPC.go @@ -1,7 +1,12 @@ -// Code generated by mockery v1.0.0 +// Code generated by mockery v2.11.0. DO NOT EDIT. + package cachetype -import mock "github.com/stretchr/testify/mock" +import ( + testing "testing" + + mock "github.com/stretchr/testify/mock" +) // MockRPC is an autogenerated mock type for the RPC type type MockRPC struct { @@ -21,3 +26,12 @@ func (_m *MockRPC) RPC(method string, args interface{}, reply interface{}) error return r0 } + +// NewMockRPC creates a new instance of MockRPC. It also registers a cleanup function to assert the mocks expectations. +func NewMockRPC(t testing.TB) *MockRPC { + mock := &MockRPC{} + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/agent/cache-types/rpc.go b/agent/cache-types/rpc.go index 0aaf040f3d..ebf0925aff 100644 --- a/agent/cache-types/rpc.go +++ b/agent/cache-types/rpc.go @@ -1,6 +1,6 @@ package cachetype -//go:generate mockery -all -inpkg +//go:generate mockery --all --inpackage // RPC is an interface that an RPC client must implement. This is a helper // interface that is implemented by the agent delegate so that Type diff --git a/agent/cache/cache.go b/agent/cache/cache.go index d104cdd3b5..de6d001e32 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -33,7 +33,7 @@ import ( "github.com/hashicorp/consul/lib/ttlcache" ) -//go:generate mockery -all -inpkg +//go:generate mockery --all --inpackage // TODO(kit): remove the namespace from these once the metrics themselves change var Gauges = []prometheus.GaugeDefinition{ @@ -91,7 +91,7 @@ const ( // struct in agent/structs. This API makes cache usage a mostly drop-in // replacement for non-cached RPC calls. // -// The cache is partitioned by ACL and datacenter. This allows the cache +// The cache is partitioned by ACL and datacenter/peer. This allows the cache // to be safe for multi-DC queries and for queries where the data is modified // due to ACLs all without the cache having to have any clever logic, at // the slight expense of a less perfect cache. @@ -376,6 +376,13 @@ func (c *Cache) getEntryLocked( // Check if re-validate is requested. If so the first time round the // loop is not a hit but subsequent ones should be treated normally. if !tEntry.Opts.Refresh && info.MustRevalidate { + if entry.Fetching { + // There is an active blocking query for this data, which has not + // returned. We can logically deduce that the contents of the cache + // are actually current, and we can simply return this while + // leaving the blocking query alone. + return true, true, entry + } return true, false, entry } @@ -399,7 +406,7 @@ func (c *Cache) getWithIndex(ctx context.Context, r getOptions) (interface{}, Re return result.Value, ResultMeta{}, err } - key := makeEntryKey(r.TypeEntry.Name, r.Info.Datacenter, r.Info.Token, r.Info.Key) + key := makeEntryKey(r.TypeEntry.Name, r.Info.Datacenter, r.Info.PeerName, r.Info.Token, r.Info.Key) // First time through first := true @@ -519,7 +526,11 @@ RETRY_GET: } } -func makeEntryKey(t, dc, token, key string) string { +func makeEntryKey(t, dc, peerName, token, key string) string { + // TODO(peering): figure out if this is the desired format + if peerName != "" { + return fmt.Sprintf("%s/%s/%s/%s", t, "peer:"+peerName, token, key) + } return fmt.Sprintf("%s/%s/%s/%s", t, dc, token, key) } @@ -877,8 +888,8 @@ func (c *Cache) Close() error { // on startup. It is used to set the ConnectRootCA and AgentLeafCert when // AutoEncrypt.TLS is turned on. The cache itself cannot fetch that the first // time because it requires a special RPCType. Subsequent runs are fine though. -func (c *Cache) Prepopulate(t string, res FetchResult, dc, token, k string) error { - key := makeEntryKey(t, dc, token, k) +func (c *Cache) Prepopulate(t string, res FetchResult, dc, peerName, token, k string) error { + key := makeEntryKey(t, dc, peerName, token, k) newEntry := cacheEntry{ Valid: true, Value: res.Value, diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index 5c2b3d2035..a93969c2c6 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -1545,7 +1545,7 @@ func TestCacheReload(t *testing.T) { c.entriesLock.Lock() tEntry, ok := c.types["t1"] require.True(t, ok) - keyName := makeEntryKey("t1", "", "", "hello1") + keyName := makeEntryKey("t1", "", "", "", "hello1") ok, entryValid, entry := c.getEntryLocked(tEntry, keyName, RequestInfo{}) require.True(t, ok) require.True(t, entryValid) @@ -1687,7 +1687,7 @@ func TestCache_Prepopulate(t *testing.T) { c := New(Options{}) c.RegisterType("t", typ) - c.Prepopulate("t", FetchResult{Value: 17, Index: 1}, "dc1", "token", "v1") + c.Prepopulate("t", FetchResult{Value: 17, Index: 1}, "dc1", "", "token", "v1") ctx := context.Background() req := fakeRequest{ @@ -1740,7 +1740,7 @@ func TestCache_RefreshLifeCycle(t *testing.T) { c := New(Options{}) c.RegisterType("t", typ) - key := makeEntryKey("t", "dc1", "token", "v1") + key := makeEntryKey("t", "dc1", "", "token", "v1") ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/agent/cache/mock_Request.go b/agent/cache/mock_Request.go index c5af589241..7cd7343f8f 100644 --- a/agent/cache/mock_Request.go +++ b/agent/cache/mock_Request.go @@ -1,8 +1,12 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. +// Code generated by mockery v2.11.0. DO NOT EDIT. package cache -import mock "github.com/stretchr/testify/mock" +import ( + testing "testing" + + mock "github.com/stretchr/testify/mock" +) // MockRequest is an autogenerated mock type for the Request type type MockRequest struct { @@ -22,3 +26,12 @@ func (_m *MockRequest) CacheInfo() RequestInfo { return r0 } + +// NewMockRequest creates a new instance of MockRequest. It also registers a cleanup function to assert the mocks expectations. +func NewMockRequest(t testing.TB) *MockRequest { + mock := &MockRequest{} + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/agent/cache/mock_Type.go b/agent/cache/mock_Type.go index 7c39fca329..628e7a0184 100644 --- a/agent/cache/mock_Type.go +++ b/agent/cache/mock_Type.go @@ -1,8 +1,12 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. +// Code generated by mockery v2.11.0. DO NOT EDIT. package cache -import mock "github.com/stretchr/testify/mock" +import ( + testing "testing" + + mock "github.com/stretchr/testify/mock" +) // MockType is an autogenerated mock type for the Type type type MockType struct { @@ -43,3 +47,12 @@ func (_m *MockType) RegisterOptions() RegisterOptions { return r0 } + +// NewMockType creates a new instance of MockType. It also registers a cleanup function to assert the mocks expectations. +func NewMockType(t testing.TB) *MockType { + mock := &MockType{} + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/agent/cache/request.go b/agent/cache/request.go index b2d3ab854a..fbce6f8917 100644 --- a/agent/cache/request.go +++ b/agent/cache/request.go @@ -16,6 +16,9 @@ type Request interface { // RequestInfo represents cache information for a request. The caching // framework uses this to control the behavior of caching and to determine // cacheability. +// +// TODO(peering): finish ensuring everything that sets a Datacenter sets or doesn't set PeerName. +// TODO(peering): also make sure the peer name is present in the cache key likely in lieu of the datacenter somehow. type RequestInfo struct { // Key is a unique cache key for this request. This key should // be globally unique to identify this request, since any conflicting @@ -28,14 +31,17 @@ type RequestInfo struct { // // Datacenter is the datacenter that the request is targeting. // - // Both of these values are used to partition the cache. The cache framework + // PeerName is the peer that the request is targeting. + // + // All of these values are used to partition the cache. The cache framework // today partitions data on these values to simplify behavior: by // partitioning ACL tokens, the cache doesn't need to be smart about - // filtering results. By filtering datacenter results, the cache can - // service the multi-DC nature of Consul. This comes at the expense of + // filtering results. By filtering datacenter/peer results, the cache can + // service the multi-DC/multi-peer nature of Consul. This comes at the expense of // working set size, but in general the effect is minimal. Token string Datacenter string + PeerName string // MinIndex is the minimum index being queried. This is used to // determine if we already have data satisfying the query or if we need diff --git a/agent/connect/ca/mock_Provider.go b/agent/connect/ca/mock_Provider.go index ec79ea5c70..bdc5d9c8e6 100644 --- a/agent/connect/ca/mock_Provider.go +++ b/agent/connect/ca/mock_Provider.go @@ -1,11 +1,13 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. +// Code generated by mockery v2.11.0. DO NOT EDIT. package ca import ( - x509 "crypto/x509" + testing "testing" mock "github.com/stretchr/testify/mock" + + x509 "crypto/x509" ) // MockProvider is an autogenerated mock type for the Provider type @@ -245,3 +247,12 @@ func (_m *MockProvider) SupportsCrossSigning() (bool, error) { return r0, r1 } + +// NewMockProvider creates a new instance of MockProvider. It also registers a cleanup function to assert the mocks expectations. +func NewMockProvider(t testing.TB) *MockProvider { + mock := &MockProvider{} + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/agent/connect/ca/provider.go b/agent/connect/ca/provider.go index 439c848ebb..0f6e6e14b1 100644 --- a/agent/connect/ca/provider.go +++ b/agent/connect/ca/provider.go @@ -5,7 +5,7 @@ import ( "errors" ) -//go:generate mockery -name Provider -inpkg +//go:generate mockery --name Provider --inpackage // ErrRateLimited is a sentinel error value Providers may return from any method // to indicate that the operation can't complete due to a temporary rate limit. diff --git a/agent/connect_ca_endpoint_test.go b/agent/connect_ca_endpoint_test.go index 27e9161559..2a299bc761 100644 --- a/agent/connect_ca_endpoint_test.go +++ b/agent/connect_ca_endpoint_test.go @@ -3,6 +3,7 @@ package agent import ( "bytes" "crypto/x509" + "encoding/pem" "io/ioutil" "net/http" "net/http/httptest" @@ -288,8 +289,13 @@ func TestConnectCARoots_PEMEncoding(t *testing.T) { data, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) - pool := x509.NewCertPool() - require.True(t, pool.AppendCertsFromPEM(data)) + // expecting the root cert from dc1 and an intermediate in dc2 - require.Len(t, pool.Subjects(), 2) + block, rest := pem.Decode(data) + _, err = x509.ParseCertificate(block.Bytes) + require.NoError(t, err) + + block, _ = pem.Decode(rest) + _, err = x509.ParseCertificate(block.Bytes) + require.NoError(t, err) } diff --git a/agent/consul/acl.go b/agent/consul/acl.go index 2c42642238..2badf78750 100644 --- a/agent/consul/acl.go +++ b/agent/consul/acl.go @@ -1174,7 +1174,21 @@ func (r *ACLResolver) ACLsEnabled() bool { return true } -func (r *ACLResolver) ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (ACLResolveResult, error) { +// TODO(peering): fix all calls to use the new signature and rename it back +func (r *ACLResolver) ResolveTokenAndDefaultMeta( + token string, + entMeta *acl.EnterpriseMeta, + authzContext *acl.AuthorizerContext, +) (ACLResolveResult, error) { + return r.ResolveTokenAndDefaultMetaWithPeerName(token, entMeta, structs.DefaultPeerKeyword, authzContext) +} + +func (r *ACLResolver) ResolveTokenAndDefaultMetaWithPeerName( + token string, + entMeta *acl.EnterpriseMeta, + peerName string, + authzContext *acl.AuthorizerContext, +) (ACLResolveResult, error) { result, err := r.ResolveToken(token) if err != nil { return ACLResolveResult{}, err @@ -1186,9 +1200,19 @@ func (r *ACLResolver) ResolveTokenAndDefaultMeta(token string, entMeta *acl.Ente // Default the EnterpriseMeta based on the Tokens meta or actual defaults // in the case of unknown identity - if result.ACLIdentity != nil { + switch { + case peerName == "" && result.ACLIdentity != nil: entMeta.Merge(result.ACLIdentity.EnterpriseMetadata()) - } else { + case result.ACLIdentity != nil: + // We _do not_ normalize the enterprise meta from the token when a peer + // name was specified because namespaces across clusters are not + // equivalent. A local namespace is _never_ correct for a remote query. + entMeta.Merge( + structs.DefaultEnterpriseMetaInPartition( + result.ACLIdentity.EnterpriseMetadata().PartitionOrDefault(), + ), + ) + default: entMeta.Merge(structs.DefaultEnterpriseMetaInDefaultPartition()) } diff --git a/agent/consul/acl_endpoint.go b/agent/consul/acl_endpoint.go index 658f72a426..3b9b00cf8f 100644 --- a/agent/consul/acl_endpoint.go +++ b/agent/consul/acl_endpoint.go @@ -770,7 +770,7 @@ func (a *ACL) tokenSetInternal(args *structs.ACLTokenSetRequest, reply *structs. return fmt.Errorf("Service identity %q cannot specify a list of datacenters on a local token", svcid.ServiceName) } if !isValidServiceIdentityName(svcid.ServiceName) { - return fmt.Errorf("Service identity %q has an invalid name. Only alphanumeric characters, '-' and '_' are allowed", svcid.ServiceName) + return fmt.Errorf("Service identity %q has an invalid name. Only lowercase alphanumeric characters, '-' and '_' are allowed", svcid.ServiceName) } } token.ServiceIdentities = dedupeServiceIdentities(token.ServiceIdentities) @@ -783,7 +783,7 @@ func (a *ACL) tokenSetInternal(args *structs.ACLTokenSetRequest, reply *structs. return fmt.Errorf("Node identity is missing the datacenter field on this token") } if !isValidNodeIdentityName(nodeid.NodeName) { - return fmt.Errorf("Node identity has an invalid name. Only alphanumeric characters, '-' and '_' are allowed") + return fmt.Errorf("Node identity has an invalid name. Only lowercase alphanumeric characters, '-' and '_' are allowed") } } token.NodeIdentities = dedupeNodeIdentities(token.NodeIdentities) @@ -1682,7 +1682,7 @@ func (a *ACL) RoleSet(args *structs.ACLRoleSetRequest, reply *structs.ACLRole) e return fmt.Errorf("Service identity is missing the service name field on this role") } if !isValidServiceIdentityName(svcid.ServiceName) { - return fmt.Errorf("Service identity %q has an invalid name. Only alphanumeric characters, '-' and '_' are allowed", svcid.ServiceName) + return fmt.Errorf("Service identity %q has an invalid name. Only lowercase alphanumeric characters, '-' and '_' are allowed", svcid.ServiceName) } } role.ServiceIdentities = dedupeServiceIdentities(role.ServiceIdentities) @@ -1695,7 +1695,7 @@ func (a *ACL) RoleSet(args *structs.ACLRoleSetRequest, reply *structs.ACLRole) e return fmt.Errorf("Node identity is missing the datacenter field on this role") } if !isValidNodeIdentityName(nodeid.NodeName) { - return fmt.Errorf("Node identity has an invalid name. Only alphanumeric characters, '-' and '_' are allowed") + return fmt.Errorf("Node identity has an invalid name. Only lowercase alphanumeric characters, '-' and '_' are allowed") } } role.NodeIdentities = dedupeNodeIdentities(role.NodeIdentities) diff --git a/agent/consul/auto_config_endpoint_test.go b/agent/consul/auto_config_endpoint_test.go index 676b126fdb..43df5fdabc 100644 --- a/agent/consul/auto_config_endpoint_test.go +++ b/agent/consul/auto_config_endpoint_test.go @@ -11,12 +11,11 @@ import ( "testing" "time" + msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" "github.com/hashicorp/memberlist" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc" - "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest" diff --git a/agent/consul/autopilot.go b/agent/consul/autopilot.go index 27471b533d..4d934053a3 100644 --- a/agent/consul/autopilot.go +++ b/agent/consul/autopilot.go @@ -10,6 +10,7 @@ import ( autopilot "github.com/hashicorp/raft-autopilot" "github.com/hashicorp/serf/serf" + "github.com/hashicorp/consul/agent/consul/autopilotevents" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/logging" @@ -29,7 +30,8 @@ var AutopilotGauges = []prometheus.GaugeDefinition{ // AutopilotDelegate is a Consul delegate for autopilot operations. type AutopilotDelegate struct { - server *Server + server *Server + readyServersPublisher *autopilotevents.ReadyServersEventPublisher } func (d *AutopilotDelegate) AutopilotConfig() *autopilot.Config { @@ -51,6 +53,8 @@ func (d *AutopilotDelegate) NotifyState(state *autopilot.State) { } else { metrics.SetGauge([]string{"autopilot", "healthy"}, 0) } + + d.readyServersPublisher.PublishReadyServersEvents(state) } func (d *AutopilotDelegate) RemoveFailedServer(srv *autopilot.Server) { @@ -63,7 +67,13 @@ func (d *AutopilotDelegate) RemoveFailedServer(srv *autopilot.Server) { } func (s *Server) initAutopilot(config *Config) { - apDelegate := &AutopilotDelegate{s} + apDelegate := &AutopilotDelegate{ + server: s, + readyServersPublisher: autopilotevents.NewReadyServersEventPublisher(autopilotevents.Config{ + Publisher: s.publisher, + GetStore: func() autopilotevents.StateStore { return s.fsm.State() }, + }), + } s.autopilot = autopilot.New( s.raft, @@ -74,6 +84,9 @@ func (s *Server) initAutopilot(config *Config) { autopilot.WithPromoter(s.autopilotPromoter()), autopilot.WithReconciliationDisabled(), ) + + // registers a snapshot handler for the event publisher to send as the first event for a new stream + s.publisher.RegisterHandler(autopilotevents.EventTopicReadyServers, apDelegate.readyServersPublisher.HandleSnapshot) } func (s *Server) autopilotServers() map[raft.ServerID]*autopilot.Server { @@ -129,7 +142,7 @@ func (s *Server) autopilotServerFromMetadata(srv *metadata.Server) (*autopilot.S // populate the node meta if there is any. When a node first joins or if // there are ACL issues then this could be empty if the server has not // yet been able to register itself in the catalog - _, node, err := s.fsm.State().GetNodeID(types.NodeID(srv.ID), structs.NodeEnterpriseMetaInDefaultPartition()) + _, node, err := s.fsm.State().GetNodeID(types.NodeID(srv.ID), structs.NodeEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword) if err != nil { return nil, fmt.Errorf("error retrieving node from state store: %w", err) } diff --git a/agent/consul/autopilot_test.go b/agent/consul/autopilot_test.go index faf1facc44..2ebd5806b4 100644 --- a/agent/consul/autopilot_test.go +++ b/agent/consul/autopilot_test.go @@ -2,6 +2,7 @@ package consul import ( "context" + "fmt" "os" "testing" "time" @@ -10,6 +11,8 @@ import ( "github.com/hashicorp/serf/serf" "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/agent/consul/autopilotevents" + "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" @@ -522,3 +525,99 @@ func TestAutopilot_MinQuorum(t *testing.T) { } }) } + +func TestAutopilot_EventPublishing(t *testing.T) { + // This is really an integration level test. The general flow this test will follow is: + // + // 1. Start a 3 server cluster + // 2. Subscribe to the ready server events + // 3. Observe the first event which will be pretty immediately ready as it is the + // snapshot event. + // 4. Wait for multiple iterations of the autopilot state updater and ensure no + // other events are seen. The state update interval is 50ms for tests unless + // overridden. + // 5. Add a fouth server. + // 6. Wait for an event to be emitted containing 4 ready servers. + + // 1. create the test cluster + cluster := newTestCluster(t, &testClusterConfig{ + Servers: 3, + ServerConf: testServerACLConfig, + // We want to wait until each server has registered itself in the Catalog. Otherwise + // the first snapshot even we see might have no servers in it while things are being + // initialized. Doing this wait ensure that things are in the right state to start + // the subscription. + }) + + // 2. subscribe to ready server events + req := stream.SubscribeRequest{ + Topic: autopilotevents.EventTopicReadyServers, + Subject: stream.SubjectNone, + Token: TestDefaultInitialManagementToken, + } + sub, err := cluster.Servers[0].publisher.Subscribe(&req) + require.NoError(t, err) + t.Cleanup(sub.Unsubscribe) + + // 3. Observe that an event was generated which should be the snapshot event. + // As we have just bootstrapped the cluster with 3 servers we expect to + // see those 3 here. + validatePayload(t, 3, mustGetEventWithTimeout(t, sub, 50*time.Millisecond)) + + // TODO - its kind of annoying that the EventPublisher doesn't have a mode where + // it knows each event is a full state of the world. The ramifications are that + // we have to expect/ignore the framing events for EndOfSnapshot. + event := mustGetEventWithTimeout(t, sub, 10*time.Millisecond) + require.True(t, event.IsFramingEvent()) + + // 4. Wait for 3 iterations of the ServerHealthInterval to ensure no events + // are being published when the autopilot state is not changing. + eventNotEmitted(t, sub, 150*time.Millisecond) + + // 5. Add a fourth server + _, srv := testServerWithConfig(t, testServerACLConfig, func(c *Config) { + c.Bootstrap = false + c.BootstrapExpect = 0 + }) + joinLAN(t, srv, cluster.Servers[0]) + + // 6. Now wait for the event for the fourth server being added. This may take a little + // while as the joinLAN operation above doesn't wait for the server to actually get + // added to Raft. + validatePayload(t, 4, mustGetEventWithTimeout(t, sub, time.Second)) +} + +// mustGetEventWithTimeout is a helper function for validating that a Subscription.Next call will return +// an event within the given time. It also validates that no error is returned. +func mustGetEventWithTimeout(t *testing.T, subscription *stream.Subscription, timeout time.Duration) stream.Event { + t.Helper() + event, err := getEventWithTimeout(t, subscription, timeout) + require.NoError(t, err) + return event +} + +// getEventWithTimeout is a helper function for retrieving a Event from a Subscription within the specified timeout. +func getEventWithTimeout(t *testing.T, subscription *stream.Subscription, timeout time.Duration) (stream.Event, error) { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + event, err := subscription.Next(ctx) + return event, err +} + +// eventNotEmitted is a helper to validate that no Event is emitted for the given Subscription +func eventNotEmitted(t *testing.T, subscription *stream.Subscription, timeout time.Duration) { + t.Helper() + var event stream.Event + var err error + event, err = getEventWithTimeout(t, subscription, timeout) + require.Equal(t, context.DeadlineExceeded, err, fmt.Sprintf("event:%v", event)) +} + +func validatePayload(t *testing.T, expectedNumServers int, event stream.Event) { + t.Helper() + require.Equal(t, autopilotevents.EventTopicReadyServers, event.Topic) + readyServers, ok := event.Payload.(autopilotevents.EventPayloadReadyServers) + require.True(t, ok) + require.Len(t, readyServers, expectedNumServers) +} diff --git a/agent/consul/autopilotevents/mock_Publisher_test.go b/agent/consul/autopilotevents/mock_Publisher_test.go new file mode 100644 index 0000000000..98f6e4065c --- /dev/null +++ b/agent/consul/autopilotevents/mock_Publisher_test.go @@ -0,0 +1,29 @@ +// Code generated by mockery v2.11.0. DO NOT EDIT. + +package autopilotevents + +import ( + testing "testing" + + stream "github.com/hashicorp/consul/agent/consul/stream" + mock "github.com/stretchr/testify/mock" +) + +// MockPublisher is an autogenerated mock type for the Publisher type +type MockPublisher struct { + mock.Mock +} + +// Publish provides a mock function with given fields: _a0 +func (_m *MockPublisher) Publish(_a0 []stream.Event) { + _m.Called(_a0) +} + +// NewMockPublisher creates a new instance of MockPublisher. It also registers a cleanup function to assert the mocks expectations. +func NewMockPublisher(t testing.TB) *MockPublisher { + mock := &MockPublisher{} + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/agent/consul/autopilotevents/mock_StateStore_test.go b/agent/consul/autopilotevents/mock_StateStore_test.go new file mode 100644 index 0000000000..200e68be71 --- /dev/null +++ b/agent/consul/autopilotevents/mock_StateStore_test.go @@ -0,0 +1,58 @@ +// Code generated by mockery v2.11.0. DO NOT EDIT. + +package autopilotevents + +import ( + acl "github.com/hashicorp/consul/acl" + mock "github.com/stretchr/testify/mock" + + structs "github.com/hashicorp/consul/agent/structs" + + testing "testing" + + types "github.com/hashicorp/consul/types" +) + +// MockStateStore is an autogenerated mock type for the StateStore type +type MockStateStore struct { + mock.Mock +} + +// GetNodeID provides a mock function with given fields: _a0, _a1, _a2 +func (_m *MockStateStore) GetNodeID(_a0 types.NodeID, _a1 *acl.EnterpriseMeta, _a2 string) (uint64, *structs.Node, error) { + ret := _m.Called(_a0, _a1, _a2) + + var r0 uint64 + if rf, ok := ret.Get(0).(func(types.NodeID, *acl.EnterpriseMeta, string) uint64); ok { + r0 = rf(_a0, _a1, _a2) + } else { + r0 = ret.Get(0).(uint64) + } + + var r1 *structs.Node + if rf, ok := ret.Get(1).(func(types.NodeID, *acl.EnterpriseMeta, string) *structs.Node); ok { + r1 = rf(_a0, _a1, _a2) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*structs.Node) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(types.NodeID, *acl.EnterpriseMeta, string) error); ok { + r2 = rf(_a0, _a1, _a2) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// NewMockStateStore creates a new instance of MockStateStore. It also registers a cleanup function to assert the mocks expectations. +func NewMockStateStore(t testing.TB) *MockStateStore { + mock := &MockStateStore{} + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/agent/consul/autopilotevents/mock_timeProvider_test.go b/agent/consul/autopilotevents/mock_timeProvider_test.go new file mode 100644 index 0000000000..147424101d --- /dev/null +++ b/agent/consul/autopilotevents/mock_timeProvider_test.go @@ -0,0 +1,39 @@ +// Code generated by mockery v2.11.0. DO NOT EDIT. + +package autopilotevents + +import ( + testing "testing" + + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// mockTimeProvider is an autogenerated mock type for the timeProvider type +type mockTimeProvider struct { + mock.Mock +} + +// Now provides a mock function with given fields: +func (_m *mockTimeProvider) Now() time.Time { + ret := _m.Called() + + var r0 time.Time + if rf, ok := ret.Get(0).(func() time.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// newMockTimeProvider creates a new instance of mockTimeProvider. It also registers a cleanup function to assert the mocks expectations. +func newMockTimeProvider(t testing.TB) *mockTimeProvider { + mock := &mockTimeProvider{} + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/agent/consul/autopilotevents/ready_servers_events.go b/agent/consul/autopilotevents/ready_servers_events.go new file mode 100644 index 0000000000..7943ccacc8 --- /dev/null +++ b/agent/consul/autopilotevents/ready_servers_events.go @@ -0,0 +1,311 @@ +package autopilotevents + +import ( + "fmt" + "net" + "sort" + "sync" + "time" + + autopilot "github.com/hashicorp/raft-autopilot" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbsubscribe" + "github.com/hashicorp/consul/types" +) + +const ( + EventTopicReadyServers stream.StringTopic = "ready-servers" +) + +// ReadyServerInfo includes information about a server that is ready +// to handle incoming requests. +type ReadyServerInfo struct { + ID string + Address string + TaggedAddresses map[string]string + Version string +} + +func (info *ReadyServerInfo) Equal(other *ReadyServerInfo) bool { + if info.ID != other.ID { + return false + } + + if info.Version != other.Version { + return false + } + + if info.Address != other.Address { + return false + } + + if len(info.TaggedAddresses) != len(other.TaggedAddresses) { + return false + } + + for tag, infoAddr := range info.TaggedAddresses { + if otherAddr, ok := other.TaggedAddresses[tag]; !ok || infoAddr != otherAddr { + return false + } + } + + return true +} + +// EventPayloadReadyServers +type EventPayloadReadyServers []ReadyServerInfo + +func (e EventPayloadReadyServers) Subject() stream.Subject { return stream.SubjectNone } + +func (e EventPayloadReadyServers) HasReadPermission(authz acl.Authorizer) bool { + // Any service in the mesh will need access to where the servers live. Therefore + // we check if the authorizer grants permissions on any service and if so then + // we allow seeing where the servers are. + var authzContext acl.AuthorizerContext + structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier). + FillAuthzContext(&authzContext) + + return authz.ServiceWriteAny(&authzContext) == acl.Allow +} + +func (e EventPayloadReadyServers) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + // TODO(peering) is this right? + // TODO(agentless) is this right? + panic("EventPayloadReadyServers does not implement ToSubscriptionEvent") +} + +func ExtractEventPayload(event stream.Event) (EventPayloadReadyServers, error) { + if event.Topic != EventTopicReadyServers { + return nil, fmt.Errorf("unexpected topic (%q) for a %q event", event.Topic, EventTopicReadyServers) + } + + if payload, ok := event.Payload.(EventPayloadReadyServers); ok { + return payload, nil + } + + return nil, fmt.Errorf("unexpected payload type %T for %q event", event.Payload, EventTopicReadyServers) +} + +type Config struct { + GetStore func() StateStore + Publisher Publisher + timeProvider timeProvider +} + +// ReadyServersEventPublisher is capable to tracking changes to ready servers +// between consecutive calls to PublishReadyServersEvents. It will then publish +// "ready-servers" events as necessary. +type ReadyServersEventPublisher struct { + Config + previous EventPayloadReadyServers + + snapshotLock sync.RWMutex + snapshot []stream.Event +} + +func NewReadyServersEventPublisher(config Config) *ReadyServersEventPublisher { + return &ReadyServersEventPublisher{ + Config: config, + snapshot: []stream.Event{ + { + Topic: EventTopicReadyServers, + Index: 0, + Payload: EventPayloadReadyServers{}, + }, + }, + } +} + +//go:generate mockery --name StateStore --inpackage --testonly +type StateStore interface { + GetNodeID(types.NodeID, *acl.EnterpriseMeta, string) (uint64, *structs.Node, error) +} + +//go:generate mockery --name Publisher --inpackage --testonly +type Publisher interface { + Publish([]stream.Event) +} + +//go:generate mockery --name timeProvider --inpackage --testonly +type timeProvider interface { + Now() time.Time +} + +// PublishReadyServersEvents will publish a "ready-servers" event if the list of +// ready servers has changed since the last time events were published. +func (r *ReadyServersEventPublisher) PublishReadyServersEvents(state *autopilot.State) { + if events, ok := r.readyServersEvents(state); ok { + // update the latest snapshot so that any new event subscription will see + // use the latest state. + r.snapshotLock.Lock() + r.snapshot = events + r.snapshotLock.Unlock() + + // if the event publisher were to not be able to keep up with procesing events + // then its possible this blocks. It could cause autopilot to not update its + // state as often as it should. However if this blocks for over 10s then + // not updating the autopilot state as quickly is likely the least of our + // concerns. If we need to make this async then we probably need to single + // flight these to ensure proper event ordering. + r.Publisher.Publish(events) + } +} + +func (r *ReadyServersEventPublisher) readyServersEvents(state *autopilot.State) ([]stream.Event, bool) { + // First, we need to pull all the ready servers out from the autopilot state. + servers := r.autopilotStateToReadyServers(state) + + // Next we, sort the servers list to make comparison easier later on. We do + // this outside of the next length check conditional block to ensure that all + // values of previousReadyServers we store will be sorted and the future + // comparison's will remain valid. + sort.Slice(servers, func(i, j int) bool { + // no two servers can have the same id so this is sufficient + return servers[i].ID < servers[j].ID + }) + + // If the number of ready servers hasn't changed then we need to inspect individual + // servers to see if there are differences. If the number of servers has changed + // we know that an event should be generated and sent. + if len(r.previous) == len(servers) { + diff := false + // We are relying on the fact that both of the slices will be sorted and that + // we don't care what the actual differences are but instead just that they + // have differences. + for i := 0; i < len(servers); i++ { + if !r.previous[i].Equal(&servers[i]) { + diff = true + break + } + } + + // The list of ready servers is identical to the previous ones. Therefore + // we will not send any event. + if !diff { + return nil, false + } + } + + r.previous = servers + + return []stream.Event{r.newReadyServersEvent(servers)}, true +} + +// autopilotStateToReadyServers will iterate through all servers in the autopilot +// state and compile a list of servers which are "ready". Readiness means that +// they would be an acceptable target for stale queries. +func (r *ReadyServersEventPublisher) autopilotStateToReadyServers(state *autopilot.State) EventPayloadReadyServers { + var servers EventPayloadReadyServers + for _, srv := range state.Servers { + // All healthy servers are caught up enough to be included in a ready servers. + // Servers with voting rights that are still healthy according to Serf are + // also included as they have likely just fallen behind the leader a little + // after initially replicating state. They are still acceptable targets + // for most stale queries and clients can bound the staleness if necessary. + // Including them is a means to prevent flapping the list of servers we + // advertise as ready and flooding the network with notifications to all + // dataplanes of server updates. + // + // TODO (agentless) for a non-voting server that is still alive but fell + // behind, should we cause it to be removed. For voters we know they were caught + // up at some point but for non-voters we cannot know the same thing. + if srv.Health.Healthy || (srv.HasVotingRights() && srv.Server.NodeStatus == autopilot.NodeAlive) { + // autopilot information contains addresses in the : form. We only care about the + // the host so we parse it out here and discard the port. + host, err := extractHost(string(srv.Server.Address)) + if err != nil || host == "" { + + continue + } + + servers = append(servers, ReadyServerInfo{ + ID: string(srv.Server.ID), + Address: host, + Version: srv.Server.Version, + TaggedAddresses: r.getTaggedAddresses(srv), + }) + } + } + + return servers +} + +// getTaggedAddresses will get the tagged addresses for the given server or return nil +// if it encounters an error or unregistered server. +func (r *ReadyServersEventPublisher) getTaggedAddresses(srv *autopilot.ServerState) map[string]string { + // we have no callback to lookup the tagged addresses so we can return early + if r.GetStore == nil { + return nil + } + + // Assuming we have been provided a callback to get a state store implementation, then + // we will attempt to lookup the node for the autopilot server. We use this to get the + // tagged addresses so that consumers of these events will be able to distinguish LAN + // vs WAN addresses as well as IP protocol differentiation. At first I thought we may + // need to hook into catalog events so that if the tagged addresses change then + // we can synthesize new events. That would be pretty complex so this code does not + // deal with that. The reasoning why that is probably okay is that autopilot will + // send us the state at least once every 30s. That means that we will grab the nodes + // from the catalog at that often and publish the events. So while its not quite + // as responsive as actually watching for the Catalog changes, its MUCH simpler to + // code and reason about and having those addresses be updated within 30s is good enough. + _, node, err := r.GetStore().GetNodeID(types.NodeID(srv.Server.ID), structs.NodeEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword) + if err != nil || node == nil { + // no catalog information means we should return a nil addres map + return nil + } + + if len(node.TaggedAddresses) == 0 { + return nil + } + + addrs := make(map[string]string) + for tag, address := range node.TaggedAddresses { + // just like for the Nodes main Address, we only care about the IPs and not the + // port so we parse the host out and discard the port. + host, err := extractHost(address) + if err != nil || host == "" { + continue + } + addrs[tag] = host + } + + return addrs +} + +// newReadyServersEvent will create a stream.Event with the provided ready server info. +func (r *ReadyServersEventPublisher) newReadyServersEvent(servers EventPayloadReadyServers) stream.Event { + now := time.Now() + if r.timeProvider != nil { + now = r.timeProvider.Now() + } + return stream.Event{ + Topic: EventTopicReadyServers, + Index: uint64(now.UnixMicro()), + Payload: servers, + } +} + +// HandleSnapshot is the EventPublisher callback to generate a snapshot for the "ready-servers" event streams. +func (r *ReadyServersEventPublisher) HandleSnapshot(_ stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { + r.snapshotLock.RLock() + defer r.snapshotLock.RUnlock() + buf.Append(r.snapshot) + return r.snapshot[0].Index, nil +} + +// extractHost is a small convenience function to catch errors regarding +// missing ports from the net.SplitHostPort function. +func extractHost(addr string) (string, error) { + host, _, err := net.SplitHostPort(addr) + if err == nil { + return host, nil + } + if ae, ok := err.(*net.AddrError); ok && ae.Err == "missing port in address" { + return addr, nil + } + return "", err +} diff --git a/agent/consul/autopilotevents/ready_servers_events_test.go b/agent/consul/autopilotevents/ready_servers_events_test.go new file mode 100644 index 0000000000..223292404a --- /dev/null +++ b/agent/consul/autopilotevents/ready_servers_events_test.go @@ -0,0 +1,647 @@ +package autopilotevents + +import ( + "testing" + time "time" + + "github.com/hashicorp/raft" + autopilot "github.com/hashicorp/raft-autopilot" + mock "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/stream" + structs "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbsubscribe" + types "github.com/hashicorp/consul/types" +) + +var testTime = time.Date(2022, 4, 14, 10, 56, 00, 0, time.UTC) + +var exampleState = &autopilot.State{ + Servers: map[raft.ServerID]*autopilot.ServerState{ + "792ae13c-d765-470b-852c-e073fdb6e849": { + Health: autopilot.ServerHealth{ + Healthy: true, + }, + State: autopilot.RaftLeader, + Server: autopilot.Server{ + ID: "792ae13c-d765-470b-852c-e073fdb6e849", + Address: "198.18.0.2:8300", + Version: "v1.12.0", + NodeStatus: autopilot.NodeAlive, + }, + }, + "65e79ff4-bbce-467b-a9d6-725c709fa985": { + Health: autopilot.ServerHealth{ + Healthy: true, + }, + State: autopilot.RaftVoter, + Server: autopilot.Server{ + ID: "65e79ff4-bbce-467b-a9d6-725c709fa985", + Address: "198.18.0.3:8300", + Version: "v1.12.0", + NodeStatus: autopilot.NodeAlive, + }, + }, + // this server is up according to Serf but is unhealthy + // due to having an index that is behind + "db11f0ac-0cbe-4215-80cc-b4e843f4df1e": { + Health: autopilot.ServerHealth{ + Healthy: false, + }, + State: autopilot.RaftVoter, + Server: autopilot.Server{ + ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e", + Address: "198.18.0.4:8300", + Version: "v1.12.0", + NodeStatus: autopilot.NodeAlive, + }, + }, + // this server is up according to Serf but is unhealthy + // due to having an index that is behind. It is a non-voter + // and thus will be filtered out + "4c48a154-8176-4e14-ba5d-20bf1f784a7e": { + Health: autopilot.ServerHealth{ + Healthy: false, + }, + State: autopilot.RaftNonVoter, + Server: autopilot.Server{ + ID: "4c48a154-8176-4e14-ba5d-20bf1f784a7e", + Address: "198.18.0.5:8300", + Version: "v1.12.0", + NodeStatus: autopilot.NodeAlive, + }, + }, + // this is a voter that has died + "7a22eec8-de85-43a6-a76e-00b427ef6627": { + Health: autopilot.ServerHealth{ + Healthy: false, + }, + State: autopilot.RaftVoter, + Server: autopilot.Server{ + ID: "7a22eec8-de85-43a6-a76e-00b427ef6627", + Address: "198.18.0.6", + Version: "v1.12.0", + NodeStatus: autopilot.NodeFailed, + }, + }, + }, +} + +func TestEventPayloadReadyServers_HasReadPermission(t *testing.T) { + t.Run("no service:write", func(t *testing.T) { + hasRead := EventPayloadReadyServers{}.HasReadPermission(acl.DenyAll()) + require.False(t, hasRead) + }) + + t.Run("has service:write", func(t *testing.T) { + policy, err := acl.NewPolicyFromSource(` + service "foo" { + policy = "write" + } + `, acl.SyntaxCurrent, nil, nil) + require.NoError(t, err) + + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil) + require.NoError(t, err) + + hasRead := EventPayloadReadyServers{}.HasReadPermission(authz) + require.True(t, hasRead) + }) +} + +func TestAutopilotStateToReadyServers(t *testing.T) { + expected := EventPayloadReadyServers{ + { + ID: "792ae13c-d765-470b-852c-e073fdb6e849", + Address: "198.18.0.2", + Version: "v1.12.0", + }, + { + ID: "65e79ff4-bbce-467b-a9d6-725c709fa985", + Address: "198.18.0.3", + Version: "v1.12.0", + }, + { + ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e", + Address: "198.18.0.4", + Version: "v1.12.0", + }, + } + + r := ReadyServersEventPublisher{} + + actual := r.autopilotStateToReadyServers(exampleState) + require.ElementsMatch(t, expected, actual) +} + +func TestAutopilotStateToReadyServersWithTaggedAddresses(t *testing.T) { + expected := EventPayloadReadyServers{ + { + ID: "792ae13c-d765-470b-852c-e073fdb6e849", + Address: "198.18.0.2", + TaggedAddresses: map[string]string{"wan": "5.4.3.2"}, + Version: "v1.12.0", + }, + { + ID: "65e79ff4-bbce-467b-a9d6-725c709fa985", + Address: "198.18.0.3", + TaggedAddresses: map[string]string{"wan": "1.2.3.4"}, + Version: "v1.12.0", + }, + { + ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e", + Address: "198.18.0.4", + TaggedAddresses: map[string]string{"wan": "9.8.7.6"}, + Version: "v1.12.0", + }, + } + + store := &MockStateStore{} + t.Cleanup(func() { store.AssertExpectations(t) }) + store.On("GetNodeID", + types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"), + structs.NodeEnterpriseMetaInDefaultPartition(), + structs.DefaultPeerKeyword, + ).Once().Return( + uint64(0), + &structs.Node{TaggedAddresses: map[string]string{"wan": "5.4.3.2"}}, + nil, + ) + + store.On("GetNodeID", + types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"), + structs.NodeEnterpriseMetaInDefaultPartition(), + structs.DefaultPeerKeyword, + ).Once().Return( + uint64(0), + &structs.Node{TaggedAddresses: map[string]string{"wan": "1.2.3.4"}}, + nil, + ) + + store.On("GetNodeID", + types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"), + structs.NodeEnterpriseMetaInDefaultPartition(), + structs.DefaultPeerKeyword, + ).Once().Return( + uint64(0), + &structs.Node{TaggedAddresses: map[string]string{"wan": "9.8.7.6"}}, + nil, + ) + + r := NewReadyServersEventPublisher(Config{ + GetStore: func() StateStore { return store }, + }) + + actual := r.autopilotStateToReadyServers(exampleState) + require.ElementsMatch(t, expected, actual) +} + +func TestAutopilotReadyServersEvents(t *testing.T) { + // we have already tested the ReadyServerInfo extraction within the + // TestAutopilotStateToReadyServers test. Therefore this test is going + // to focus only on the change detection. + // + // * - added server + // * - removed server + // * - server with address changed + // * - upgraded server with version change + + expectedServers := EventPayloadReadyServers{ + { + ID: "65e79ff4-bbce-467b-a9d6-725c709fa985", + Address: "198.18.0.3", + Version: "v1.12.0", + }, + { + ID: "792ae13c-d765-470b-852c-e073fdb6e849", + Address: "198.18.0.2", + Version: "v1.12.0", + }, + { + ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e", + Address: "198.18.0.4", + Version: "v1.12.0", + }, + } + + type testCase struct { + // The elements of this slice must already be sorted + previous EventPayloadReadyServers + changeDetected bool + } + + cases := map[string]testCase{ + "no-change": { + previous: EventPayloadReadyServers{ + { + ID: "65e79ff4-bbce-467b-a9d6-725c709fa985", + Address: "198.18.0.3", + Version: "v1.12.0", + }, + { + ID: "792ae13c-d765-470b-852c-e073fdb6e849", + Address: "198.18.0.2", + Version: "v1.12.0", + }, + { + ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e", + Address: "198.18.0.4", + Version: "v1.12.0", + }, + }, + changeDetected: false, + }, + "server-added": { + previous: EventPayloadReadyServers{ + { + ID: "65e79ff4-bbce-467b-a9d6-725c709fa985", + Address: "198.18.0.3", + Version: "v1.12.0", + }, + { + ID: "792ae13c-d765-470b-852c-e073fdb6e849", + Address: "198.18.0.2", + Version: "v1.12.0", + }, + // server with id db11f0ac-0cbe-4215-80cc-b4e843f4df1e will be added. + }, + changeDetected: true, + }, + "server-removed": { + previous: EventPayloadReadyServers{ + { + ID: "65e79ff4-bbce-467b-a9d6-725c709fa985", + Address: "198.18.0.3", + Version: "v1.12.0", + }, + { + ID: "792ae13c-d765-470b-852c-e073fdb6e849", + Address: "198.18.0.2", + Version: "v1.12.0", + }, + // this server isn't present in the state and will be removed + { + ID: "7e3235de-8a75-4c8d-9ec3-847ca87d07e8", + Address: "198.18.0.5", + Version: "v1.12.0", + }, + { + ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e", + Address: "198.18.0.4", + Version: "v1.12.0", + }, + }, + changeDetected: true, + }, + "address-change": { + previous: EventPayloadReadyServers{ + { + ID: "65e79ff4-bbce-467b-a9d6-725c709fa985", + // this value is different from the state and should + // cause an event to be generated + Address: "198.18.0.9", + Version: "v1.12.0", + }, + { + ID: "792ae13c-d765-470b-852c-e073fdb6e849", + Address: "198.18.0.2", + Version: "v1.12.0", + }, + { + ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e", + Address: "198.18.0.4", + Version: "v1.12.0", + }, + }, + changeDetected: true, + }, + "upgraded-version": { + previous: EventPayloadReadyServers{ + { + ID: "65e79ff4-bbce-467b-a9d6-725c709fa985", + Address: "198.18.0.3", + // This is v1.12.0 in the state and therefore an + // event should be generated + Version: "v1.11.4", + }, + { + ID: "792ae13c-d765-470b-852c-e073fdb6e849", + Address: "198.18.0.2", + Version: "v1.12.0", + }, + { + ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e", + Address: "198.18.0.4", + Version: "v1.12.0", + }, + }, + changeDetected: true, + }, + } + + for name, tcase := range cases { + t.Run(name, func(t *testing.T) { + r := ReadyServersEventPublisher{ + previous: tcase.previous, + } + events, changeDetected := r.readyServersEvents(exampleState) + require.Equal(t, tcase.changeDetected, changeDetected, "servers: %+v", events) + if tcase.changeDetected { + require.Len(t, events, 1) + require.Equal(t, EventTopicReadyServers, events[0].Topic) + payload, ok := events[0].Payload.(EventPayloadReadyServers) + require.True(t, ok) + require.ElementsMatch(t, expectedServers, payload) + } else { + require.Empty(t, events) + } + }) + } +} + +func TestAutopilotPublishReadyServersEvents(t *testing.T) { + t.Run("publish", func(t *testing.T) { + pub := &MockPublisher{} + pub.On("Publish", []stream.Event{ + { + Topic: EventTopicReadyServers, + Index: uint64(testTime.UnixMicro()), + Payload: EventPayloadReadyServers{ + { + ID: "65e79ff4-bbce-467b-a9d6-725c709fa985", + Address: "198.18.0.3", + Version: "v1.12.0", + }, + { + ID: "792ae13c-d765-470b-852c-e073fdb6e849", + Address: "198.18.0.2", + Version: "v1.12.0", + }, + { + ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e", + Address: "198.18.0.4", + Version: "v1.12.0", + }, + }, + }, + }) + + mtime := &mockTimeProvider{} + mtime.On("Now").Return(testTime).Once() + + t.Cleanup(func() { + mtime.AssertExpectations(t) + pub.AssertExpectations(t) + }) + + r := NewReadyServersEventPublisher(Config{ + Publisher: pub, + timeProvider: mtime, + }) + + r.PublishReadyServersEvents(exampleState) + }) + + t.Run("suppress", func(t *testing.T) { + pub := &MockPublisher{} + mtime := &mockTimeProvider{} + + t.Cleanup(func() { + mtime.AssertExpectations(t) + pub.AssertExpectations(t) + }) + + r := NewReadyServersEventPublisher(Config{ + Publisher: pub, + timeProvider: mtime, + }) + + r.previous = EventPayloadReadyServers{ + { + ID: "65e79ff4-bbce-467b-a9d6-725c709fa985", + Address: "198.18.0.3", + Version: "v1.12.0", + }, + { + ID: "792ae13c-d765-470b-852c-e073fdb6e849", + Address: "198.18.0.2", + Version: "v1.12.0", + }, + { + ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e", + Address: "198.18.0.4", + Version: "v1.12.0", + }, + } + + r.PublishReadyServersEvents(exampleState) + }) +} + +type MockAppender struct { + mock.Mock +} + +func (m *MockAppender) Append(events []stream.Event) { + m.Called(events) +} + +func TestReadyServerEventsSnapshotHandler(t *testing.T) { + buf := MockAppender{} + buf.On("Append", []stream.Event{ + { + Topic: EventTopicReadyServers, + Index: 0, + Payload: EventPayloadReadyServers{}, + }, + }) + buf.On("Append", []stream.Event{ + { + Topic: EventTopicReadyServers, + Index: 1649933760000000, + Payload: EventPayloadReadyServers{ + { + ID: "65e79ff4-bbce-467b-a9d6-725c709fa985", + Address: "198.18.0.3", + TaggedAddresses: map[string]string{"wan": "1.2.3.4"}, + Version: "v1.12.0", + }, + { + ID: "792ae13c-d765-470b-852c-e073fdb6e849", + Address: "198.18.0.2", + TaggedAddresses: map[string]string{"wan": "5.4.3.2"}, + Version: "v1.12.0", + }, + { + ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e", + Address: "198.18.0.4", + TaggedAddresses: map[string]string{"wan": "9.8.7.6"}, + Version: "v1.12.0", + }, + }, + }, + }).Once() + + mtime := mockTimeProvider{} + mtime.On("Now").Return(testTime).Once() + + store := &MockStateStore{} + t.Cleanup(func() { store.AssertExpectations(t) }) + store.On("GetNodeID", + types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"), + structs.NodeEnterpriseMetaInDefaultPartition(), + structs.DefaultPeerKeyword, + ).Once().Return( + uint64(0), + &structs.Node{TaggedAddresses: map[string]string{"wan": "5.4.3.2"}}, + nil, + ) + + store.On("GetNodeID", + types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"), + structs.NodeEnterpriseMetaInDefaultPartition(), + structs.DefaultPeerKeyword, + ).Once().Return( + uint64(0), + &structs.Node{TaggedAddresses: map[string]string{"wan": "1.2.3.4"}}, + nil, + ) + + store.On("GetNodeID", + types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"), + structs.NodeEnterpriseMetaInDefaultPartition(), + structs.DefaultPeerKeyword, + ).Once().Return( + uint64(0), + &structs.Node{TaggedAddresses: map[string]string{"wan": "9.8.7.6"}}, + nil, + ) + + t.Cleanup(func() { + buf.AssertExpectations(t) + store.AssertExpectations(t) + mtime.AssertExpectations(t) + }) + + r := NewReadyServersEventPublisher(Config{ + GetStore: func() StateStore { return store }, + timeProvider: &mtime, + }) + + req := stream.SubscribeRequest{ + Topic: EventTopicReadyServers, + Subject: stream.SubjectNone, + } + + // get the first snapshot that should have the zero value event + _, err := r.HandleSnapshot(req, &buf) + require.NoError(t, err) + + // setup the value to be returned by the snapshot handler + r.snapshot, _ = r.readyServersEvents(exampleState) + + // now get the second snapshot which has actual servers + _, err = r.HandleSnapshot(req, &buf) + require.NoError(t, err) +} + +type fakePayload struct{} + +func (e fakePayload) Subject() stream.Subject { return stream.SubjectNone } + +func (e fakePayload) HasReadPermission(authz acl.Authorizer) bool { + return false +} + +func (e fakePayload) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + panic("fakePayload does not implement ToSubscriptionEvent") +} + +func TestExtractEventPayload(t *testing.T) { + t.Run("wrong-topic", func(t *testing.T) { + payload, err := ExtractEventPayload(stream.NewCloseSubscriptionEvent([]string{"foo"})) + require.Nil(t, payload) + require.Error(t, err) + require.Contains(t, err.Error(), "unexpected topic") + }) + + t.Run("unexpected-payload", func(t *testing.T) { + payload, err := ExtractEventPayload(stream.Event{ + Topic: EventTopicReadyServers, + Payload: fakePayload{}, + }) + require.Nil(t, payload) + require.Error(t, err) + require.Contains(t, err.Error(), "unexpected payload type") + }) + + t.Run("success", func(t *testing.T) { + expected := EventPayloadReadyServers{ + { + ID: "a7c340ae-ce17-47da-895c-af2509767b3d", + Address: "198.18.0.1", + Version: "1.2.3", + }, + } + actual, err := ExtractEventPayload(stream.Event{ + Topic: EventTopicReadyServers, + Payload: expected, + }) + + require.NoError(t, err) + require.Equal(t, expected, actual) + }) +} + +func TestReadyServerInfo_Equal(t *testing.T) { + base := func() *ReadyServerInfo { + return &ReadyServerInfo{ + ID: "0356e5ae-ed6b-4024-b953-e1b6a8f0f81b", + Version: "1.12.0", + Address: "198.18.0.1", + TaggedAddresses: map[string]string{ + "wan": "1.2.3.4", + }, + } + } + type testCase struct { + modify func(i *ReadyServerInfo) + equal bool + } + + cases := map[string]testCase{ + "unmodified": { + equal: true, + }, + "id-mod": { + modify: func(i *ReadyServerInfo) { i.ID = "30f8f451-e54b-4c7e-a533-b55dddb51be6" }, + }, + "version-mod": { + modify: func(i *ReadyServerInfo) { i.Version = "1.12.1" }, + }, + "address-mod": { + modify: func(i *ReadyServerInfo) { i.Address = "198.18.0.2" }, + }, + "tagged-addresses-added": { + modify: func(i *ReadyServerInfo) { i.TaggedAddresses["wan_ipv4"] = "1.2.3.4" }, + }, + "tagged-addresses-mod": { + modify: func(i *ReadyServerInfo) { i.TaggedAddresses["wan"] = "4.3.2.1" }, + }, + } + + for name, tcase := range cases { + t.Run(name, func(t *testing.T) { + original := base() + modified := base() + if tcase.modify != nil { + tcase.modify(modified) + } + + require.Equal(t, tcase.equal, original.Equal(modified)) + + }) + } +} diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index 77ac97e77e..742ddd1b38 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -133,7 +133,7 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error } // Check the complete register request against the given ACL policy. - _, ns, err := state.NodeServices(nil, args.Node, entMeta) + _, ns, err := state.NodeServices(nil, args.Node, entMeta, args.PeerName) if err != nil { return fmt.Errorf("Node lookup failed: %v", err) } @@ -367,7 +367,7 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e var ns *structs.NodeService if args.ServiceID != "" { - _, ns, err = state.NodeService(args.Node, args.ServiceID, &args.EnterpriseMeta) + _, ns, err = state.NodeService(args.Node, args.ServiceID, &args.EnterpriseMeta, args.PeerName) if err != nil { return fmt.Errorf("Service lookup failed: %v", err) } @@ -375,7 +375,7 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e var nc *structs.HealthCheck if args.CheckID != "" { - _, nc, err = state.NodeCheck(args.Node, args.CheckID, &args.EnterpriseMeta) + _, nc, err = state.NodeCheck(args.Node, args.CheckID, &args.EnterpriseMeta, args.PeerName) if err != nil { return fmt.Errorf("Check lookup failed: %v", err) } @@ -486,9 +486,9 @@ func (c *Catalog) ListNodes(args *structs.DCSpecificRequest, reply *structs.Inde func(ws memdb.WatchSet, state *state.Store) error { var err error if len(args.NodeMetaFilters) > 0 { - reply.Index, reply.Nodes, err = state.NodesByMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta) + reply.Index, reply.Nodes, err = state.NodesByMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName) } else { - reply.Index, reply.Nodes, err = state.Nodes(ws, &args.EnterpriseMeta) + reply.Index, reply.Nodes, err = state.Nodes(ws, &args.EnterpriseMeta, args.PeerName) } if err != nil { return err @@ -546,9 +546,9 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I func(ws memdb.WatchSet, state *state.Store) error { var err error if len(args.NodeMetaFilters) > 0 { - reply.Index, reply.Services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta) + reply.Index, reply.Services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName) } else { - reply.Index, reply.Services, err = state.Services(ws, &args.EnterpriseMeta) + reply.Index, reply.Services, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName) } if err != nil { return err @@ -584,7 +584,7 @@ func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.In &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, services, err := state.ServiceList(ws, &args.EnterpriseMeta) + index, services, err := state.ServiceList(ws, &args.EnterpriseMeta, args.PeerName) if err != nil { return err } @@ -611,13 +611,13 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru switch { case args.Connect: f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.ServiceNodes, error) { - return s.ConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta) + return s.ConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName) } default: f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.ServiceNodes, error) { if args.ServiceAddress != "" { - return s.ServiceAddressNodes(ws, args.ServiceAddress, &args.EnterpriseMeta) + return s.ServiceAddressNodes(ws, args.ServiceAddress, &args.EnterpriseMeta, args.PeerName) } if args.TagFilter { @@ -630,10 +630,10 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru tags = []string{args.ServiceTag} } - return s.ServiceTagNodes(ws, args.ServiceName, tags, &args.EnterpriseMeta) + return s.ServiceTagNodes(ws, args.ServiceName, tags, &args.EnterpriseMeta, args.PeerName) } - return s.ServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta) + return s.ServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName) } } @@ -768,7 +768,7 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, services, err := state.NodeServices(ws, args.Node, &args.EnterpriseMeta) + index, services, err := state.NodeServices(ws, args.Node, &args.EnterpriseMeta, args.PeerName) if err != nil { return err } @@ -824,7 +824,7 @@ func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *stru &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, services, err := state.NodeServiceList(ws, args.Node, &args.EnterpriseMeta) + index, services, err := state.NodeServiceList(ws, args.Node, &args.EnterpriseMeta, args.PeerName) if err != nil { return err } diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index d6d303c2bb..78ac4c36bd 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -1650,6 +1650,7 @@ func TestCatalog_ListServices_Stale(t *testing.T) { c.PrimaryDatacenter = "dc1" // Enable ACLs! c.ACLsEnabled = true c.Bootstrap = false // Disable bootstrap + c.RPCHoldTimeout = 10 * time.Millisecond }) defer os.RemoveAll(dir2) defer s2.Shutdown() diff --git a/agent/consul/client.go b/agent/consul/client.go index 6a15acb94e..7ce00af333 100644 --- a/agent/consul/client.go +++ b/agent/consul/client.go @@ -291,20 +291,26 @@ TRY: } // Move off to another server, and see if we can retry. - c.logger.Error("RPC failed to server", - "method", method, - "server", server.Addr, - "error", rpcErr, - ) - metrics.IncrCounterWithLabels([]string{"client", "rpc", "failed"}, 1, []metrics.Label{{Name: "server", Value: server.Name}}) manager.NotifyFailedServer(server) // Use the zero value for RPCInfo if the request doesn't implement RPCInfo info, _ := args.(structs.RPCInfo) if retry := canRetry(info, rpcErr, firstCheck, c.config); !retry { + c.logger.Error("RPC failed to server", + "method", method, + "server", server.Addr, + "error", rpcErr, + ) + metrics.IncrCounterWithLabels([]string{"client", "rpc", "failed"}, 1, []metrics.Label{{Name: "server", Value: server.Name}}) return rpcErr } + c.logger.Warn("Retrying RPC to server", + "method", method, + "server", server.Addr, + "error", rpcErr, + ) + // We can wait a bit and retry! jitter := lib.RandomStagger(c.config.RPCHoldTimeout / structs.JitterFraction) select { diff --git a/agent/consul/client_test.go b/agent/consul/client_test.go index d593f5aa9c..d8f0fbd4df 100644 --- a/agent/consul/client_test.go +++ b/agent/consul/client_test.go @@ -48,6 +48,7 @@ func testClientConfig(t *testing.T) (string, *Config) { config.SerfLANConfig.MemberlistConfig.ProbeTimeout = 200 * time.Millisecond config.SerfLANConfig.MemberlistConfig.ProbeInterval = time.Second config.SerfLANConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond + config.RPCHoldTimeout = 10 * time.Second return dir, config } @@ -72,7 +73,7 @@ func testClientWithConfigWithErr(t *testing.T, cb func(c *Config)) (string, *Cli } // Apply config to copied fields because many tests only set the old - //values. + // values. config.ACLResolverSettings.ACLsEnabled = config.ACLsEnabled config.ACLResolverSettings.NodeName = config.NodeName config.ACLResolverSettings.Datacenter = config.Datacenter @@ -509,7 +510,7 @@ func newDefaultDeps(t *testing.T, c *Config) Deps { logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ Name: c.NodeName, - Level: testutil.TestLogLevel, + Level: hclog.Trace, Output: testutil.NewLogBuffer(t), }) @@ -521,13 +522,16 @@ func newDefaultDeps(t *testing.T, c *Config) Deps { resolver.Register(builder) connPool := &pool.ConnPool{ - Server: false, - SrcAddr: c.RPCSrcAddr, - Logger: logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true}), - MaxTime: 2 * time.Minute, - MaxStreams: 4, - TLSConfigurator: tls, - Datacenter: c.Datacenter, + Server: false, + SrcAddr: c.RPCSrcAddr, + Logger: logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true}), + MaxTime: 2 * time.Minute, + MaxStreams: 4, + TLSConfigurator: tls, + Datacenter: c.Datacenter, + Timeout: c.RPCHoldTimeout, + DefaultQueryTime: c.DefaultQueryTime, + MaxQueryTime: c.MaxQueryTime, } return Deps{ @@ -853,3 +857,67 @@ func TestClient_ShortReconnectTimeout(t *testing.T) { 50*time.Millisecond, "The client node was not reaped within the alotted time") } + +type waiter struct { + duration time.Duration +} + +func (w *waiter) Wait(struct{}, *struct{}) error { + time.Sleep(w.duration) + return nil +} + +func TestClient_RPC_Timeout(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + t.Parallel() + + _, s1 := testServerWithConfig(t) + + _, c1 := testClientWithConfig(t, func(c *Config) { + c.Datacenter = "dc1" + c.NodeName = uniqueNodeName(t.Name()) + c.RPCHoldTimeout = 10 * time.Millisecond + c.DefaultQueryTime = 100 * time.Millisecond + c.MaxQueryTime = 200 * time.Millisecond + }) + joinLAN(t, c1, s1) + + retry.Run(t, func(r *retry.R) { + var out struct{} + if err := c1.RPC("Status.Ping", struct{}{}, &out); err != nil { + r.Fatalf("err: %v", err) + } + }) + + // waiter will sleep for 50ms + require.NoError(t, s1.RegisterEndpoint("Wait", &waiter{duration: 50 * time.Millisecond})) + + // Requests with QueryOptions have a default timeout of RPCHoldTimeout (10ms) + // so we expect the RPC call to timeout. + var out struct{} + err := c1.RPC("Wait.Wait", &structs.NodeSpecificRequest{}, &out) + require.Error(t, err) + require.Contains(t, err.Error(), "rpc error making call: i/o deadline reached") + + // Blocking requests have a longer timeout (100ms) so this should pass + out = struct{}{} + err = c1.RPC("Wait.Wait", &structs.NodeSpecificRequest{ + QueryOptions: structs.QueryOptions{ + MinQueryIndex: 1, + }, + }, &out) + require.NoError(t, err) + + // We pass in a custom MaxQueryTime (20ms) through QueryOptions which should fail + out = struct{}{} + err = c1.RPC("Wait.Wait", &structs.NodeSpecificRequest{ + QueryOptions: structs.QueryOptions{ + MinQueryIndex: 1, + MaxQueryTime: 20 * time.Millisecond, + }, + }, &out) + require.Error(t, err) + require.Contains(t, err.Error(), "rpc error making call: i/o deadline reached") +} diff --git a/agent/consul/config.go b/agent/consul/config.go index 4b017da6bc..40d627bede 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -604,6 +604,8 @@ type ReloadableConfig struct { RaftSnapshotThreshold int RaftSnapshotInterval time.Duration RaftTrailingLogs int + HeartbeatTimeout time.Duration + ElectionTimeout time.Duration } type RaftBoltDBConfig struct { diff --git a/agent/consul/fsm/commands_oss.go b/agent/consul/fsm/commands_oss.go index b07d12c14f..861bffdfd6 100644 --- a/agent/consul/fsm/commands_oss.go +++ b/agent/consul/fsm/commands_oss.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/proto/pbpeering" ) var CommandsSummaries = []prometheus.SummaryDefinition{ @@ -93,6 +94,10 @@ var CommandsSummaries = []prometheus.SummaryDefinition{ Name: []string{"fsm", "system_metadata"}, Help: "Measures the time it takes to apply a system metadata operation to the FSM.", }, + { + Name: []string{"fsm", "peering"}, + Help: "Measures the time it takes to apply a peering operation to the FSM.", + }, // TODO(kit): We generate the config-entry fsm summaries by reading off of the request. It is // possible to statically declare these when we know all of the names, but I didn't get to it // in this patch. Config-entries are known though and we should add these in the future. @@ -131,6 +136,11 @@ func init() { registerCommand(structs.ACLAuthMethodDeleteRequestType, (*FSM).applyACLAuthMethodDeleteOperation) registerCommand(structs.FederationStateRequestType, (*FSM).applyFederationStateOperation) registerCommand(structs.SystemMetadataRequestType, (*FSM).applySystemMetadataOperation) + registerCommand(structs.PeeringWriteType, (*FSM).applyPeeringWrite) + registerCommand(structs.PeeringDeleteType, (*FSM).applyPeeringDelete) + registerCommand(structs.PeeringTerminateByIDType, (*FSM).applyPeeringTerminate) + registerCommand(structs.PeeringTrustBundleWriteType, (*FSM).applyPeeringTrustBundleWrite) + registerCommand(structs.PeeringTrustBundleDeleteType, (*FSM).applyPeeringTrustBundleDelete) } func (c *FSM) applyRegister(buf []byte, index uint64) interface{} { @@ -159,17 +169,17 @@ func (c *FSM) applyDeregister(buf []byte, index uint64) interface{} { // here is also baked into vetDeregisterWithACL() in acl.go, so if you // make changes here, be sure to also adjust the code over there. if req.ServiceID != "" { - if err := c.state.DeleteService(index, req.Node, req.ServiceID, &req.EnterpriseMeta); err != nil { + if err := c.state.DeleteService(index, req.Node, req.ServiceID, &req.EnterpriseMeta, req.PeerName); err != nil { c.logger.Warn("DeleteNodeService failed", "error", err) return err } } else if req.CheckID != "" { - if err := c.state.DeleteCheck(index, req.Node, req.CheckID, &req.EnterpriseMeta); err != nil { + if err := c.state.DeleteCheck(index, req.Node, req.CheckID, &req.EnterpriseMeta, req.PeerName); err != nil { c.logger.Warn("DeleteNodeCheck failed", "error", err) return err } } else { - if err := c.state.DeleteNode(index, req.Node, &req.EnterpriseMeta); err != nil { + if err := c.state.DeleteNode(index, req.Node, &req.EnterpriseMeta, req.PeerName); err != nil { c.logger.Warn("DeleteNode failed", "error", err) return err } @@ -679,3 +689,73 @@ func (c *FSM) applySystemMetadataOperation(buf []byte, index uint64) interface{} return fmt.Errorf("invalid system metadata operation type: %v", req.Op) } } + +func (c *FSM) applyPeeringWrite(buf []byte, index uint64) interface{} { + var req pbpeering.PeeringWriteRequest + if err := structs.DecodeProto(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode peering write request: %v", err)) + } + + defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering"}, time.Now(), + []metrics.Label{{Name: "op", Value: "write"}}) + + return c.state.PeeringWrite(index, req.Peering) +} + +// TODO(peering): replace with deferred deletion since this operation +// should involve cleanup of data associated with the peering. +func (c *FSM) applyPeeringDelete(buf []byte, index uint64) interface{} { + var req pbpeering.PeeringDeleteRequest + if err := structs.DecodeProto(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode peering delete request: %v", err)) + } + + defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering"}, time.Now(), + []metrics.Label{{Name: "op", Value: "delete"}}) + + q := state.Query{ + Value: req.Name, + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition), + } + return c.state.PeeringDelete(index, q) +} + +func (c *FSM) applyPeeringTerminate(buf []byte, index uint64) interface{} { + var req pbpeering.PeeringTerminateByIDRequest + if err := structs.DecodeProto(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode peering delete request: %v", err)) + } + + defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering"}, time.Now(), + []metrics.Label{{Name: "op", Value: "terminate"}}) + + return c.state.PeeringTerminateByID(index, req.ID) +} + +func (c *FSM) applyPeeringTrustBundleWrite(buf []byte, index uint64) interface{} { + var req pbpeering.PeeringTrustBundleWriteRequest + if err := structs.DecodeProto(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode peering trust bundle write request: %v", err)) + } + + defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering_trust_bundle"}, time.Now(), + []metrics.Label{{Name: "op", Value: "write"}}) + + return c.state.PeeringTrustBundleWrite(index, req.PeeringTrustBundle) +} + +func (c *FSM) applyPeeringTrustBundleDelete(buf []byte, index uint64) interface{} { + var req pbpeering.PeeringTrustBundleDeleteRequest + if err := structs.DecodeProto(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode peering trust bundle delete request: %v", err)) + } + + defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering_trust_bundle"}, time.Now(), + []metrics.Label{{Name: "op", Value: "delete"}}) + + q := state.Query{ + Value: req.Name, + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition), + } + return c.state.PeeringTrustBundleDelete(index, q) +} diff --git a/agent/consul/fsm/commands_oss_test.go b/agent/consul/fsm/commands_oss_test.go index 27a21c871d..061c4a9cc6 100644 --- a/agent/consul/fsm/commands_oss_test.go +++ b/agent/consul/fsm/commands_oss_test.go @@ -69,7 +69,7 @@ func TestFSM_RegisterNode(t *testing.T) { } // Verify we are registered - _, node, err := fsm.state.GetNode("foo", nil) + _, node, err := fsm.state.GetNode("foo", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -81,7 +81,7 @@ func TestFSM_RegisterNode(t *testing.T) { } // Verify service registered - _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) + _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "") if err != nil { t.Fatalf("err: %s", err) } @@ -128,7 +128,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) { } // Verify we are registered - _, node, err := fsm.state.GetNode("foo", nil) + _, node, err := fsm.state.GetNode("foo", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -137,7 +137,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) { } // Verify service registered - _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) + _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "") if err != nil { t.Fatalf("err: %s", err) } @@ -146,7 +146,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) { } // Verify check - _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) + _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "") if err != nil { t.Fatalf("err: %s", err) } @@ -200,7 +200,7 @@ func TestFSM_DeregisterService(t *testing.T) { } // Verify we are registered - _, node, err := fsm.state.GetNode("foo", nil) + _, node, err := fsm.state.GetNode("foo", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -209,7 +209,7 @@ func TestFSM_DeregisterService(t *testing.T) { } // Verify service not registered - _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) + _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "") if err != nil { t.Fatalf("err: %s", err) } @@ -263,7 +263,7 @@ func TestFSM_DeregisterCheck(t *testing.T) { } // Verify we are registered - _, node, err := fsm.state.GetNode("foo", nil) + _, node, err := fsm.state.GetNode("foo", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -272,7 +272,7 @@ func TestFSM_DeregisterCheck(t *testing.T) { } // Verify check not registered - _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) + _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "") if err != nil { t.Fatalf("err: %s", err) } @@ -332,7 +332,7 @@ func TestFSM_DeregisterNode(t *testing.T) { } // Verify we are not registered - _, node, err := fsm.state.GetNode("foo", nil) + _, node, err := fsm.state.GetNode("foo", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -341,7 +341,7 @@ func TestFSM_DeregisterNode(t *testing.T) { } // Verify service not registered - _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) + _, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "") if err != nil { t.Fatalf("err: %s", err) } @@ -350,7 +350,7 @@ func TestFSM_DeregisterNode(t *testing.T) { } // Verify checks not registered - _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition()) + _, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "") if err != nil { t.Fatalf("err: %s", err) } @@ -1468,7 +1468,7 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) { // Verify we are not registered for i := 0; i < 10; i++ { - _, node, err := fsm.state.GetNode(fmt.Sprintf("foo%d", i), nil) + _, node, err := fsm.state.GetNode(fmt.Sprintf("foo%d", i), nil, "") require.NoError(t, err) assert.Nil(t, node) } @@ -1491,7 +1491,7 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) { // Verify we are still not registered for i := 0; i < 10; i++ { - _, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i), nil) + _, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i), nil, "") require.NoError(t, err) assert.Nil(t, node) } @@ -1515,19 +1515,19 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) { // Verify we are registered for i := 0; i < 10; i++ { - _, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i), nil) + _, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i), nil, "") require.NoError(t, err) assert.NotNil(t, node) // Verify service registered - _, services, err := fsm2.state.NodeServices(nil, fmt.Sprintf("foo%d", i), structs.DefaultEnterpriseMetaInDefaultPartition()) + _, services, err := fsm2.state.NodeServices(nil, fmt.Sprintf("foo%d", i), structs.DefaultEnterpriseMetaInDefaultPartition(), "") require.NoError(t, err) require.NotNil(t, services) _, ok := services.Services["db"] assert.True(t, ok) // Verify check - _, checks, err := fsm2.state.NodeChecks(nil, fmt.Sprintf("foo%d", i), nil) + _, checks, err := fsm2.state.NodeChecks(nil, fmt.Sprintf("foo%d", i), nil, "") require.NoError(t, err) require.NotNil(t, checks) assert.Equal(t, string(checks[0].CheckID), "db") diff --git a/agent/consul/fsm/snapshot_oss.go b/agent/consul/fsm/snapshot_oss.go index 48dea223e1..3ee4c85580 100644 --- a/agent/consul/fsm/snapshot_oss.go +++ b/agent/consul/fsm/snapshot_oss.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" ) func init() { @@ -35,6 +36,8 @@ func init() { registerRestorer(structs.SystemMetadataRequestType, restoreSystemMetadata) registerRestorer(structs.ServiceVirtualIPRequestType, restoreServiceVirtualIP) registerRestorer(structs.FreeVirtualIPRequestType, restoreFreeVirtualIP) + registerRestorer(structs.PeeringWriteType, restorePeering) + registerRestorer(structs.PeeringTrustBundleWriteType, restorePeeringTrustBundle) } func persistOSS(s *snapshot, sink raft.SnapshotSink, encoder *codec.Encoder) error { @@ -86,6 +89,12 @@ func persistOSS(s *snapshot, sink raft.SnapshotSink, encoder *codec.Encoder) err if err := s.persistIndex(sink, encoder); err != nil { return err } + if err := s.persistPeerings(sink, encoder); err != nil { + return err + } + if err := s.persistPeeringTrustBundles(sink, encoder); err != nil { + return err + } return nil } @@ -112,6 +121,7 @@ func (s *snapshot) persistNodes(sink raft.SnapshotSink, NodeMeta: n.Meta, RaftIndex: n.RaftIndex, EnterpriseMeta: *nodeEntMeta, + PeerName: n.PeerName, } // Register the node itself @@ -123,7 +133,7 @@ func (s *snapshot) persistNodes(sink raft.SnapshotSink, } // Register each service this node has - services, err := s.state.Services(n.Node, nodeEntMeta) + services, err := s.state.Services(n.Node, nodeEntMeta, n.PeerName) if err != nil { return err } @@ -139,7 +149,7 @@ func (s *snapshot) persistNodes(sink raft.SnapshotSink, // Register each check this node has req.Service = nil - checks, err := s.state.Checks(n.Node, nodeEntMeta) + checks, err := s.state.Checks(n.Node, nodeEntMeta, n.PeerName) if err != nil { return err } @@ -161,7 +171,6 @@ func (s *snapshot) persistNodes(sink raft.SnapshotSink, if err != nil { return err } - // TODO(partitions) for coord := coords.Next(); coord != nil; coord = coords.Next() { if _, err := sink.Write([]byte{byte(structs.CoordinateBatchUpdateType)}); err != nil { return err @@ -547,6 +556,42 @@ func (s *snapshot) persistVirtualIPs(sink raft.SnapshotSink, encoder *codec.Enco return nil } +func (s *snapshot) persistPeerings(sink raft.SnapshotSink, encoder *codec.Encoder) error { + peerings, err := s.state.Peerings() + if err != nil { + return err + } + + for entry := peerings.Next(); entry != nil; entry = peerings.Next() { + if _, err := sink.Write([]byte{byte(structs.PeeringWriteType)}); err != nil { + return err + } + if err := encoder.Encode(entry.(*pbpeering.Peering)); err != nil { + return err + } + } + + return nil +} + +func (s *snapshot) persistPeeringTrustBundles(sink raft.SnapshotSink, encoder *codec.Encoder) error { + ptbs, err := s.state.PeeringTrustBundles() + if err != nil { + return err + } + + for entry := ptbs.Next(); entry != nil; entry = ptbs.Next() { + if _, err := sink.Write([]byte{byte(structs.PeeringTrustBundleWriteType)}); err != nil { + return err + } + if err := encoder.Encode(entry.(*pbpeering.PeeringTrustBundle)); err != nil { + return err + } + } + + return nil +} + func restoreRegistration(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error { var req structs.RegisterRequest if err := decoder.Decode(&req); err != nil { @@ -849,3 +894,25 @@ func restoreFreeVirtualIP(header *SnapshotHeader, restore *state.Restore, decode } return nil } + +func restorePeering(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error { + var req pbpeering.Peering + if err := decoder.Decode(&req); err != nil { + return err + } + if err := restore.Peering(&req); err != nil { + return err + } + return nil +} + +func restorePeeringTrustBundle(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error { + var req pbpeering.PeeringTrustBundle + if err := decoder.Decode(&req); err != nil { + return err + } + if err := restore.PeeringTrustBundle(&req); err != nil { + return err + } + return nil +} diff --git a/agent/consul/fsm/snapshot_oss_test.go b/agent/consul/fsm/snapshot_oss_test.go index c75bbc1973..558abf4beb 100644 --- a/agent/consul/fsm/snapshot_oss_test.go +++ b/agent/consul/fsm/snapshot_oss_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib/stringslice" + "github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/sdk/testutil" ) @@ -473,6 +474,18 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.Equal(t, expect[i], sn.Service.Name) } + // Peerings + require.NoError(t, fsm.state.PeeringWrite(31, &pbpeering.Peering{ + Name: "baz", + })) + + // Peering Trust Bundles + require.NoError(t, fsm.state.PeeringTrustBundleWrite(32, &pbpeering.PeeringTrustBundle{ + TrustDomain: "qux.com", + PeerName: "qux", + RootPEMs: []string{"qux certificate bundle"}, + })) + // Snapshot snap, err := fsm.Snapshot() require.NoError(t, err) @@ -528,7 +541,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.NoError(t, fsm2.Restore(sink)) // Verify the contents - _, nodes, err := fsm2.state.Nodes(nil, nil) + _, nodes, err := fsm2.state.Nodes(nil, nil, "") require.NoError(t, err) require.Len(t, nodes, 2, "incorect number of nodes: %v", nodes) @@ -556,7 +569,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.Equal(t, uint64(1), nodes[1].CreateIndex) require.Equal(t, uint64(23), nodes[1].ModifyIndex) - _, fooSrv, err := fsm2.state.NodeServices(nil, "foo", nil) + _, fooSrv, err := fsm2.state.NodeServices(nil, "foo", nil, "") require.NoError(t, err) require.Len(t, fooSrv.Services, 4) require.Contains(t, fooSrv.Services["db"].Tags, "primary") @@ -569,7 +582,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.Equal(t, uint64(3), fooSrv.Services["web"].CreateIndex) require.Equal(t, uint64(3), fooSrv.Services["web"].ModifyIndex) - _, checks, err := fsm2.state.NodeChecks(nil, "foo", nil) + _, checks, err := fsm2.state.NodeChecks(nil, "foo", nil, "") require.NoError(t, err) require.Len(t, checks, 1) require.Equal(t, "foo", checks[0].Node) @@ -768,6 +781,27 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { require.Equal(t, expect[i], sn.Service.Name) } + // Verify peering is restored + idx, prngRestored, err := fsm2.state.PeeringRead(nil, state.Query{ + Value: "baz", + }) + require.NoError(t, err) + require.Equal(t, uint64(31), idx) + require.NotNil(t, prngRestored) + require.Equal(t, "baz", prngRestored.Name) + + // Verify peering trust bundle is restored + idx, ptbRestored, err := fsm2.state.PeeringTrustBundleRead(nil, state.Query{ + Value: "qux", + }) + require.NoError(t, err) + require.Equal(t, uint64(32), idx) + require.NotNil(t, ptbRestored) + require.Equal(t, "qux.com", ptbRestored.TrustDomain) + require.Equal(t, "qux", ptbRestored.PeerName) + require.Len(t, ptbRestored.RootPEMs, 1) + require.Equal(t, "qux certificate bundle", ptbRestored.RootPEMs[0]) + // Snapshot snap, err = fsm2.Snapshot() require.NoError(t, err) @@ -821,7 +855,7 @@ func TestFSM_BadRestore_OSS(t *testing.T) { require.Error(t, fsm.Restore(sink)) // Verify the contents didn't get corrupted. - _, nodes, err := fsm.state.Nodes(nil, nil) + _, nodes, err := fsm.state.Nodes(nil, nil, "") require.NoError(t, err) require.Len(t, nodes, 1) require.Equal(t, "foo", nodes[0].Node) diff --git a/agent/consul/grpc_integration_test.go b/agent/consul/grpc_integration_test.go index c243ebfee6..d588e324d7 100644 --- a/agent/consul/grpc_integration_test.go +++ b/agent/consul/grpc_integration_test.go @@ -2,16 +2,15 @@ package consul import ( "context" - "net" - "os" "testing" + "time" "github.com/stretchr/testify/require" - "google.golang.org/grpc" "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/grpc/public" "github.com/hashicorp/consul/proto-public/pbconnectca" - "github.com/hashicorp/consul/testrpc" + "github.com/hashicorp/consul/proto-public/pbserverdiscovery" ) func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) { @@ -19,8 +18,6 @@ func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) { t.Skip("too slow for testing.Short") } - t.Parallel() - // The gRPC endpoint itself well-tested with mocks. This test checks we're // correctly wiring everything up in the server by: // @@ -28,42 +25,24 @@ func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) { // * Making a request to a follower's public gRPC port. // * Ensuring that the request is correctly forwarded to the leader. // * Ensuring we get a valid certificate back (so it went through the CAManager). - dir1, server1 := testServerWithConfig(t, func(c *Config) { + server1, conn1 := testGRPCIntegrationServer(t, func(c *Config) { c.Bootstrap = false c.BootstrapExpect = 2 }) - defer os.RemoveAll(dir1) - defer server1.Shutdown() - dir2, server2 := testServerWithConfig(t, func(c *Config) { + server2, conn2 := testGRPCIntegrationServer(t, func(c *Config) { c.Bootstrap = false }) - defer os.RemoveAll(dir2) - defer server2.Shutdown() joinLAN(t, server2, server1) - testrpc.WaitForLeader(t, server1.RPC, "dc1") + waitForLeaderEstablishment(t, server1, server2) - var follower *Server - if server1.IsLeader() { - follower = server2 - } else { - follower = server1 + conn := conn2 + if server2.IsLeader() { + conn = conn1 } - // publicGRPCServer is bound to a listener by the wrapping agent code, so we - // need to do it ourselves here. - lis, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - go func() { - require.NoError(t, follower.publicGRPCServer.Serve(lis)) - }() - t.Cleanup(follower.publicGRPCServer.Stop) - - conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) - require.NoError(t, err) - client := pbconnectca.NewConnectCAServiceClient(conn) csr, _ := connect.TestCSR(t, &connect.SpiffeIDService{ @@ -73,8 +52,13 @@ func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) { Service: "foo", }) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + ctx = public.ContextWithToken(ctx, TestDefaultInitialManagementToken) + // This would fail if it wasn't forwarded to the leader. - rsp, err := client.Sign(context.Background(), &pbconnectca.SignRequest{ + rsp, err := client.Sign(ctx, &pbconnectca.SignRequest{ Csr: csr, }) require.NoError(t, err) @@ -82,3 +66,52 @@ func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) { _, err = connect.ParseCert(rsp.CertPem) require.NoError(t, err) } + +func TestGRPCIntegration_ServerDiscovery_WatchServers(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + // The gRPC endpoint itself well-tested with mocks. This test checks we're + // correctly wiring everything up in the server by: + // + // * Starting a server + // * Initiating the gRPC stream + // * Validating the snapshot + // * Adding another server + // * Validating another message is sent. + + server1, conn := testGRPCIntegrationServer(t, func(c *Config) { + c.Bootstrap = true + c.BootstrapExpect = 1 + }) + waitForLeaderEstablishment(t, server1) + + client := pbserverdiscovery.NewServerDiscoveryServiceClient(conn) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + ctx = public.ContextWithToken(ctx, TestDefaultInitialManagementToken) + + serverStream, err := client.WatchServers(ctx, &pbserverdiscovery.WatchServersRequest{Wan: false}) + require.NoError(t, err) + + rsp, err := serverStream.Recv() + require.NoError(t, err) + require.NotNil(t, rsp) + require.Len(t, rsp.Servers, 1) + + _, server2, _ := testACLServerWithConfig(t, func(c *Config) { + c.Bootstrap = false + }, false) + + // join the new server to the leader + joinLAN(t, server2, server1) + + // now receive the event containing 2 servers + rsp, err = serverStream.Recv() + require.NoError(t, err) + require.NotNil(t, rsp) + require.Len(t, rsp.Servers, 2) +} diff --git a/agent/consul/health_endpoint.go b/agent/consul/health_endpoint.go index f9268c21c4..60dc968c55 100644 --- a/agent/consul/health_endpoint.go +++ b/agent/consul/health_endpoint.go @@ -47,9 +47,9 @@ func (h *Health) ChecksInState(args *structs.ChecksInStateRequest, var checks structs.HealthChecks var err error if len(args.NodeMetaFilters) > 0 { - index, checks, err = state.ChecksInStateByNodeMeta(ws, args.State, args.NodeMetaFilters, &args.EnterpriseMeta) + index, checks, err = state.ChecksInStateByNodeMeta(ws, args.State, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName) } else { - index, checks, err = state.ChecksInState(ws, args.State, &args.EnterpriseMeta) + index, checks, err = state.ChecksInState(ws, args.State, &args.EnterpriseMeta, args.PeerName) } if err != nil { return err @@ -98,7 +98,7 @@ func (h *Health) NodeChecks(args *structs.NodeSpecificRequest, &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, checks, err := state.NodeChecks(ws, args.Node, &args.EnterpriseMeta) + index, checks, err := state.NodeChecks(ws, args.Node, &args.EnterpriseMeta, args.PeerName) if err != nil { return err } @@ -157,9 +157,9 @@ func (h *Health) ServiceChecks(args *structs.ServiceSpecificRequest, var checks structs.HealthChecks var err error if len(args.NodeMetaFilters) > 0 { - index, checks, err = state.ServiceChecksByNodeMeta(ws, args.ServiceName, args.NodeMetaFilters, &args.EnterpriseMeta) + index, checks, err = state.ServiceChecksByNodeMeta(ws, args.ServiceName, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName) } else { - index, checks, err = state.ServiceChecks(ws, args.ServiceName, &args.EnterpriseMeta) + index, checks, err = state.ServiceChecks(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName) } if err != nil { return err @@ -304,7 +304,7 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc // can be used by the ServiceNodes endpoint. func (h *Health) serviceNodesConnect(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) { - return s.CheckConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta) + return s.CheckConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName) } func (h *Health) serviceNodesIngress(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) { @@ -317,11 +317,11 @@ func (h *Health) serviceNodesTagFilter(ws memdb.WatchSet, s *state.Store, args * // Agents < v1.3.0 populate the ServiceTag field. In this case, // use ServiceTag instead of the ServiceTags field. if args.ServiceTag != "" { - return s.CheckServiceTagNodes(ws, args.ServiceName, []string{args.ServiceTag}, &args.EnterpriseMeta) + return s.CheckServiceTagNodes(ws, args.ServiceName, []string{args.ServiceTag}, &args.EnterpriseMeta, args.PeerName) } - return s.CheckServiceTagNodes(ws, args.ServiceName, args.ServiceTags, &args.EnterpriseMeta) + return s.CheckServiceTagNodes(ws, args.ServiceName, args.ServiceTags, &args.EnterpriseMeta, args.PeerName) } func (h *Health) serviceNodesDefault(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) { - return s.CheckServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta) + return s.CheckServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName) } diff --git a/agent/consul/health_endpoint_test.go b/agent/consul/health_endpoint_test.go index 4193f7fee0..c3ebab97c3 100644 --- a/agent/consul/health_endpoint_test.go +++ b/agent/consul/health_endpoint_test.go @@ -13,7 +13,6 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" - "github.com/hashicorp/consul/lib/stringslice" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/types" @@ -558,124 +557,109 @@ func TestHealth_ServiceNodes(t *testing.T) { } t.Parallel() - dir1, s1 := testServer(t) - defer os.RemoveAll(dir1) - defer s1.Shutdown() + _, s1 := testServer(t) codec := rpcClient(t, s1) - defer codec.Close() - testrpc.WaitForLeader(t, s1.RPC, "dc1") + waitForLeaderEstablishment(t, s1) - arg := structs.RegisterRequest{ - Datacenter: "dc1", - Node: "foo", - Address: "127.0.0.1", - Service: &structs.NodeService{ - ID: "db", - Service: "db", - Tags: []string{"primary"}, - }, - Check: &structs.HealthCheck{ - Name: "db connect", - Status: api.HealthPassing, - ServiceID: "db", - }, - } - var out struct{} - if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil { - t.Fatalf("err: %v", err) - } + testingPeerNames := []string{"", "my-peer"} - arg = structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "127.0.0.2", - Service: &structs.NodeService{ - ID: "db", - Service: "db", - Tags: []string{"replica"}, - }, - Check: &structs.HealthCheck{ - Name: "db connect", - Status: api.HealthWarning, - ServiceID: "db", - }, - } - if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil { - t.Fatalf("err: %v", err) - } - - var out2 structs.IndexedCheckServiceNodes - req := structs.ServiceSpecificRequest{ - Datacenter: "dc1", - ServiceName: "db", - ServiceTags: []string{"primary"}, - TagFilter: false, - } - if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil { - t.Fatalf("err: %v", err) - } - - nodes := out2.Nodes - if len(nodes) != 2 { - t.Fatalf("Bad: %v", nodes) - } - if nodes[0].Node.Node != "bar" { - t.Fatalf("Bad: %v", nodes[0]) - } - if nodes[1].Node.Node != "foo" { - t.Fatalf("Bad: %v", nodes[1]) - } - if !stringslice.Contains(nodes[0].Service.Tags, "replica") { - t.Fatalf("Bad: %v", nodes[0]) - } - if !stringslice.Contains(nodes[1].Service.Tags, "primary") { - t.Fatalf("Bad: %v", nodes[1]) - } - if nodes[0].Checks[0].Status != api.HealthWarning { - t.Fatalf("Bad: %v", nodes[0]) - } - if nodes[1].Checks[0].Status != api.HealthPassing { - t.Fatalf("Bad: %v", nodes[1]) - } - - // Same should still work for <1.3 RPCs with singular tags - // DEPRECATED (singular-service-tag) - remove this when backwards RPC compat - // with 1.2.x is not required. - { - var out2 structs.IndexedCheckServiceNodes - req := structs.ServiceSpecificRequest{ - Datacenter: "dc1", - ServiceName: "db", - ServiceTag: "primary", - TagFilter: false, - } - if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil { - t.Fatalf("err: %v", err) + // TODO(peering): will have to seed this data differently in the future + for _, peerName := range testingPeerNames { + arg := structs.RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + PeerName: peerName, + Service: &structs.NodeService{ + ID: "db", + Service: "db", + Tags: []string{"primary"}, + PeerName: peerName, + }, + Check: &structs.HealthCheck{ + Name: "db connect", + Status: api.HealthPassing, + ServiceID: "db", + PeerName: peerName, + }, } + var out struct{} + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)) + arg = structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.2", + PeerName: peerName, + Service: &structs.NodeService{ + ID: "db", + Service: "db", + Tags: []string{"replica"}, + PeerName: peerName, + }, + Check: &structs.HealthCheck{ + Name: "db connect", + Status: api.HealthWarning, + ServiceID: "db", + PeerName: peerName, + }, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out)) + } + + verify := func(t *testing.T, out2 structs.IndexedCheckServiceNodes, peerName string) { nodes := out2.Nodes - if len(nodes) != 2 { - t.Fatalf("Bad: %v", nodes) - } - if nodes[0].Node.Node != "bar" { - t.Fatalf("Bad: %v", nodes[0]) - } - if nodes[1].Node.Node != "foo" { - t.Fatalf("Bad: %v", nodes[1]) - } - if !stringslice.Contains(nodes[0].Service.Tags, "replica") { - t.Fatalf("Bad: %v", nodes[0]) - } - if !stringslice.Contains(nodes[1].Service.Tags, "primary") { - t.Fatalf("Bad: %v", nodes[1]) - } - if nodes[0].Checks[0].Status != api.HealthWarning { - t.Fatalf("Bad: %v", nodes[0]) - } - if nodes[1].Checks[0].Status != api.HealthPassing { - t.Fatalf("Bad: %v", nodes[1]) + require.Len(t, nodes, 2) + require.Equal(t, peerName, nodes[0].Node.PeerName) + require.Equal(t, peerName, nodes[1].Node.PeerName) + require.Equal(t, "bar", nodes[0].Node.Node) + require.Equal(t, "foo", nodes[1].Node.Node) + require.Equal(t, peerName, nodes[0].Service.PeerName) + require.Equal(t, peerName, nodes[1].Service.PeerName) + require.Contains(t, nodes[0].Service.Tags, "replica") + require.Contains(t, nodes[1].Service.Tags, "primary") + require.Equal(t, peerName, nodes[0].Checks[0].PeerName) + require.Equal(t, peerName, nodes[1].Checks[0].PeerName) + require.Equal(t, api.HealthWarning, nodes[0].Checks[0].Status) + require.Equal(t, api.HealthPassing, nodes[1].Checks[0].Status) + } + + for _, peerName := range testingPeerNames { + testName := "peer named " + peerName + if peerName == "" { + testName = "local peer" } + t.Run(testName, func(t *testing.T) { + t.Run("with service tags", func(t *testing.T) { + var out2 structs.IndexedCheckServiceNodes + req := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "db", + ServiceTags: []string{"primary"}, + TagFilter: false, + PeerName: peerName, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2)) + verify(t, out2, peerName) + }) + + // Same should still work for <1.3 RPCs with singular tags + // DEPRECATED (singular-service-tag) - remove this when backwards RPC compat + // with 1.2.x is not required. + t.Run("with legacy service tag", func(t *testing.T) { + var out2 structs.IndexedCheckServiceNodes + req := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: "db", + ServiceTag: "primary", + TagFilter: false, + PeerName: peerName, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2)) + verify(t, out2, peerName) + }) + }) } } diff --git a/agent/consul/internal_endpoint.go b/agent/consul/internal_endpoint.go index d78f20046b..718002889c 100644 --- a/agent/consul/internal_endpoint.go +++ b/agent/consul/internal_endpoint.go @@ -38,7 +38,7 @@ func (m *Internal) NodeInfo(args *structs.NodeSpecificRequest, &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, dump, err := state.NodeInfo(ws, args.Node, &args.EnterpriseMeta) + index, dump, err := state.NodeInfo(ws, args.Node, &args.EnterpriseMeta, args.PeerName) if err != nil { return err } @@ -69,7 +69,7 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest, &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, dump, err := state.NodeDump(ws, &args.EnterpriseMeta) + index, dump, err := state.NodeDump(ws, &args.EnterpriseMeta, args.PeerName) if err != nil { return err } @@ -112,7 +112,7 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs. &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { // Get, store, and filter nodes - maxIdx, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta) + maxIdx, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, args.PeerName) if err != nil { return err } @@ -314,7 +314,7 @@ func (m *Internal) GatewayServiceDump(args *structs.ServiceSpecificRequest, repl // Loop over the gateway <-> serviceName mappings and fetch all service instances for each var result structs.ServiceDump for _, gs := range gatewayServices { - idx, instances, err := state.CheckServiceNodes(ws, gs.Service.Name, &gs.Service.EnterpriseMeta) + idx, instances, err := state.CheckServiceNodes(ws, gs.Service.Name, &gs.Service.EnterpriseMeta, args.PeerName) if err != nil { return err } diff --git a/agent/consul/issue_test.go b/agent/consul/issue_test.go index 516e42ff97..7839be0b95 100644 --- a/agent/consul/issue_test.go +++ b/agent/consul/issue_test.go @@ -62,7 +62,7 @@ func TestHealthCheckRace(t *testing.T) { } // Verify the index - idx, out1, err := state.CheckServiceNodes(nil, "db", nil) + idx, out1, err := state.CheckServiceNodes(nil, "db", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -85,7 +85,7 @@ func TestHealthCheckRace(t *testing.T) { } // Verify the index changed - idx, out2, err := state.CheckServiceNodes(nil, "db", nil) + idx, out2, err := state.CheckServiceNodes(nil, "db", nil, "") if err != nil { t.Fatalf("err: %s", err) } diff --git a/agent/consul/leader.go b/agent/consul/leader.go index 456fbec1ea..aedcb032f2 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -305,6 +305,8 @@ func (s *Server) establishLeadership(ctx context.Context) error { s.startFederationStateAntiEntropy(ctx) + s.startPeeringStreamSync(ctx) + if err := s.startConnectLeader(ctx); err != nil { return err } @@ -342,6 +344,8 @@ func (s *Server) revokeLeadership() { s.stopACLReplication() + s.stopPeeringStreamSync() + s.stopConnectLeader() s.stopACLTokenReaping() @@ -887,7 +891,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *acl.Ent } state := s.fsm.State() - _, checks, err := state.ChecksInState(nil, api.HealthAny, nodeEntMeta) + _, checks, err := state.ChecksInState(nil, api.HealthAny, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } @@ -903,7 +907,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *acl.Ent } // Get the node services, look for ConsulServiceID - _, services, err := state.NodeServices(nil, check.Node, nodeEntMeta) + _, services, err := state.NodeServices(nil, check.Node, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } @@ -914,7 +918,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *acl.Ent CHECKS: for _, service := range services.Services { if service.ID == structs.ConsulServiceID { - _, node, err := state.GetNode(check.Node, nodeEntMeta) + _, node, err := state.GetNode(check.Node, nodeEntMeta, check.PeerName) if err != nil { s.logger.Error("Unable to look up node with name", "name", check.Node, "error", err) continue CHECKS @@ -1051,7 +1055,7 @@ func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *acl.Enterpri // Check if the node exists state := s.fsm.State() - _, node, err := state.GetNode(member.Name, nodeEntMeta) + _, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } @@ -1059,7 +1063,7 @@ func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *acl.Enterpri // Check if the associated service is available if service != nil { match := false - _, services, err := state.NodeServices(nil, member.Name, nodeEntMeta) + _, services, err := state.NodeServices(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } @@ -1077,7 +1081,7 @@ func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *acl.Enterpri } // Check if the serfCheck is in the passing state - _, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta) + _, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } @@ -1127,7 +1131,7 @@ func (s *Server) handleFailedMember(member serf.Member, nodeEntMeta *acl.Enterpr // Check if the node exists state := s.fsm.State() - _, node, err := state.GetNode(member.Name, nodeEntMeta) + _, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } @@ -1142,7 +1146,7 @@ func (s *Server) handleFailedMember(member serf.Member, nodeEntMeta *acl.Enterpr if node.Address == member.Addr.String() { // Check if the serfCheck is in the critical state - _, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta) + _, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } @@ -1220,7 +1224,7 @@ func (s *Server) handleDeregisterMember(reason string, member serf.Member, nodeE // Check if the node does not exist state := s.fsm.State() - _, node, err := state.GetNode(member.Name, nodeEntMeta) + _, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword) if err != nil { return err } diff --git a/agent/consul/leader_connect_ca.go b/agent/consul/leader_connect_ca.go index 91da428aca..2239bc6fd4 100644 --- a/agent/consul/leader_connect_ca.go +++ b/agent/consul/leader_connect_ca.go @@ -266,7 +266,7 @@ func newCARoot(pemValue, provider, clusterID string) (*structs.CARoot, error) { } return &structs.CARoot{ ID: connect.CalculateCertFingerprint(primaryCert.Raw), - Name: fmt.Sprintf("%s CA Primary Cert", strings.Title(provider)), + Name: fmt.Sprintf("%s CA Primary Cert", providerPrettyName(provider)), SerialNumber: primaryCert.SerialNumber.Uint64(), SigningKeyID: connect.EncodeSigningKeyID(primaryCert.SubjectKeyId), ExternalTrustDomain: clusterID, @@ -1581,3 +1581,18 @@ func (c *CAManager) isIntermediateUsedToSignLeaf() bool { provider, _ := c.getCAProvider() return primaryUsesIntermediate(provider) } + +func providerPrettyName(provider string) string { + switch provider { + case "consul": + return "Consul" + case "vault": + return "Vault" + case "aws-pca": + return "Aws-Pca" + case "provider-name": + return "Provider-Name" + default: + return provider + } +} diff --git a/agent/consul/leader_federation_state_ae.go b/agent/consul/leader_federation_state_ae.go index ef6f6378f6..6cc0d4ba22 100644 --- a/agent/consul/leader_federation_state_ae.go +++ b/agent/consul/leader_federation_state_ae.go @@ -157,7 +157,7 @@ func (s *Server) fetchFederationStateAntiEntropyDetails( // Fetch our current list of all mesh gateways. entMeta := structs.WildcardEnterpriseMetaInDefaultPartition() - idx2, raw, err := state.ServiceDump(ws, structs.ServiceKindMeshGateway, true, entMeta) + idx2, raw, err := state.ServiceDump(ws, structs.ServiceKindMeshGateway, true, entMeta, structs.DefaultPeerKeyword) if err != nil { return err } diff --git a/agent/consul/leader_peering.go b/agent/consul/leader_peering.go new file mode 100644 index 0000000000..d1dfc8c432 --- /dev/null +++ b/agent/consul/leader_peering.go @@ -0,0 +1,244 @@ +package consul + +import ( + "container/ring" + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-memdb" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-uuid" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/hashicorp/consul/agent/pool" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" +) + +func (s *Server) startPeeringStreamSync(ctx context.Context) { + s.leaderRoutineManager.Start(ctx, peeringStreamsRoutineName, s.runPeeringSync) +} + +func (s *Server) runPeeringSync(ctx context.Context) error { + logger := s.logger.Named("peering-syncer") + cancelFns := make(map[string]context.CancelFunc) + + retryLoopBackoff(ctx, func() error { + if err := s.syncPeeringsAndBlock(ctx, logger, cancelFns); err != nil { + return err + } + return nil + + }, func(err error) { + s.logger.Error("error syncing peering streams from state store", "error", err) + }) + + return nil +} + +func (s *Server) stopPeeringStreamSync() { + // will be a no-op when not started + s.leaderRoutineManager.Stop(peeringStreamsRoutineName) +} + +// syncPeeringsAndBlock is a long-running goroutine that is responsible for watching +// changes to peerings in the state store and managing streams to those peers. +func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger, cancelFns map[string]context.CancelFunc) error { + state := s.fsm.State() + + // Pull the state store contents and set up to block for changes. + ws := memdb.NewWatchSet() + ws.Add(state.AbandonCh()) + ws.Add(ctx.Done()) + + _, peers, err := state.PeeringList(ws, *structs.NodeEnterpriseMetaInPartition(structs.WildcardSpecifier)) + if err != nil { + return err + } + + // TODO(peering) Adjust this debug info. + // Generate a UUID to trace different passes through this function. + seq, err := uuid.GenerateUUID() + if err != nil { + s.logger.Debug("failed to generate sequence uuid while syncing peerings") + } + + logger.Trace("syncing new list of peers", "num_peers", len(peers), "sequence_id", seq) + + // Stored tracks the unique set of peers that should be dialed. + // It is used to reconcile the list of active streams. + stored := make(map[string]struct{}) + + var merr *multierror.Error + + // Create connections and streams to peers in the state store that do not have an active stream. + for _, peer := range peers { + logger.Trace("evaluating stored peer", "peer", peer.Name, "should_dial", peer.ShouldDial(), "sequence_id", seq) + + if !peer.ShouldDial() { + continue + } + + // TODO(peering) Account for deleted peers that are still in the state store + stored[peer.ID] = struct{}{} + + status, found := s.peeringService.StreamStatus(peer.ID) + + // TODO(peering): If there is new peering data and a connected stream, should we tear down the stream? + // If the data in the updated token is bad, the user wouldn't know until the old servers/certs become invalid. + // Alternatively we could do a basic Ping from the initiate peering endpoint to avoid dealing with that here. + if found && status.Connected { + // Nothing to do when we already have an active stream to the peer. + continue + } + logger.Trace("ensuring stream to peer", "peer_id", peer.ID, "sequence_id", seq) + + if cancel, ok := cancelFns[peer.ID]; ok { + // If the peer is known but we're not connected, clean up the retry-er and start over. + // There may be new data in the state store that would enable us to get out of an error state. + logger.Trace("cancelling context to re-establish stream", "peer_id", peer.ID, "sequence_id", seq) + cancel() + } + + if err := s.establishStream(ctx, logger, peer, cancelFns); err != nil { + // TODO(peering): These errors should be reported in the peer status, otherwise they're only in the logs. + // Lockable status isn't available here though. Could report it via the peering.Service? + logger.Error("error establishing peering stream", "peer_id", peer.ID, "error", err) + merr = multierror.Append(merr, err) + + // Continue on errors to avoid one bad peering from blocking the establishment and cleanup of others. + continue + } + } + + logger.Trace("checking connected streams", "streams", s.peeringService.ConnectedStreams(), "sequence_id", seq) + + // Clean up active streams of peerings that were deleted from the state store. + // TODO(peering): This is going to trigger shutting down peerings we generated a token for. Is that OK? + for stream, doneCh := range s.peeringService.ConnectedStreams() { + if _, ok := stored[stream]; ok { + // Active stream is in the state store, nothing to do. + continue + } + + select { + case <-doneCh: + // channel is closed, do nothing to avoid a panic + default: + logger.Trace("tearing down stream for deleted peer", "peer_id", stream, "sequence_id", seq) + close(doneCh) + } + } + + logger.Trace("blocking for changes", "sequence_id", seq) + + // Block for any changes to the state store. + ws.WatchCtx(ctx) + + logger.Trace("unblocked", "sequence_id", seq) + return merr.ErrorOrNil() +} + +func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer *pbpeering.Peering, cancelFns map[string]context.CancelFunc) error { + tlsOption := grpc.WithInsecure() + if len(peer.PeerCAPems) > 0 { + var haveCerts bool + pool := x509.NewCertPool() + for _, pem := range peer.PeerCAPems { + if !pool.AppendCertsFromPEM([]byte(pem)) { + return fmt.Errorf("failed to parse PEM %s", pem) + } + if len(pem) > 0 { + haveCerts = true + } + } + if !haveCerts { + return fmt.Errorf("failed to build cert pool from peer CA pems") + } + cfg := tls.Config{ + ServerName: peer.PeerServerName, + RootCAs: pool, + } + tlsOption = grpc.WithTransportCredentials(credentials.NewTLS(&cfg)) + } + + // Create a ring buffer to cycle through peer addresses in the retry loop below. + buffer := ring.New(len(peer.PeerServerAddresses)) + for _, addr := range peer.PeerServerAddresses { + buffer.Value = addr + buffer = buffer.Next() + } + + logger.Trace("establishing stream to peer", "peer_id", peer.ID) + + retryCtx, cancel := context.WithCancel(ctx) + cancelFns[peer.ID] = cancel + + // Establish a stream-specific retry so that retrying stream/conn errors isn't dependent on state store changes. + go retryLoopBackoff(retryCtx, func() error { + // Try a new address on each iteration by advancing the ring buffer on errors. + defer func() { + buffer = buffer.Next() + }() + addr, ok := buffer.Value.(string) + if !ok { + return fmt.Errorf("peer server address type %T is not a string", buffer.Value) + } + + logger.Trace("dialing peer", "peer_id", peer.ID, "addr", addr) + conn, err := grpc.DialContext(retryCtx, addr, + grpc.WithContextDialer(newPeerDialer(addr)), + grpc.WithBlock(), + tlsOption, + ) + if err != nil { + return fmt.Errorf("failed to dial: %w", err) + } + defer conn.Close() + + client := pbpeering.NewPeeringServiceClient(conn) + stream, err := client.StreamResources(retryCtx) + if err != nil { + return err + } + + err = s.peeringService.HandleStream(peer.ID, peer.PeerID, stream) + if err == nil { + // This will cancel the retry-er context, letting us break out of this loop when we want to shut down the stream. + cancel() + } + return err + + }, func(err error) { + // TODO(peering): These errors should be reported in the peer status, otherwise they're only in the logs. + // Lockable status isn't available here though. Could report it via the peering.Service? + logger.Error("error managing peering stream", "peer_id", peer.ID, "error", err) + }) + + return nil +} + +func newPeerDialer(peerAddr string) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (net.Conn, error) { + d := net.Dialer{} + conn, err := d.DialContext(ctx, "tcp", peerAddr) + if err != nil { + return nil, err + } + + // TODO(peering): This is going to need to be revisited. This type uses the TLS settings configured on the agent, but + // for peering we never want mutual TLS because the client peer doesn't share its CA cert. + _, err = conn.Write([]byte{byte(pool.RPCGRPC)}) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil + } +} diff --git a/agent/consul/leader_peering_test.go b/agent/consul/leader_peering_test.go new file mode 100644 index 0000000000..dd79529b38 --- /dev/null +++ b/agent/consul/leader_peering_test.go @@ -0,0 +1,197 @@ +package consul + +import ( + "context" + "encoding/base64" + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/consul/testrpc" +) + +func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + // TODO(peering): Configure with TLS + _, s1 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "s1.dc1" + c.Datacenter = "dc1" + c.TLSConfig.Domain = "consul" + }) + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create a peering by generating a token + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(), + grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())), + grpc.WithInsecure(), + grpc.WithBlock()) + require.NoError(t, err) + defer conn.Close() + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + + req := pbpeering.GenerateTokenRequest{ + PeerName: "my-peer-s2", + } + resp, err := peeringClient.GenerateToken(ctx, &req) + require.NoError(t, err) + + tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken) + require.NoError(t, err) + + var token structs.PeeringToken + require.NoError(t, json.Unmarshal(tokenJSON, &token)) + + // S1 should not have a stream tracked for dc2 because s1 generated a token for baz, and therefore needs to wait to be dialed. + time.Sleep(1 * time.Second) + _, found := s1.peeringService.StreamStatus(token.PeerID) + require.False(t, found) + + // Bring up s2 and store s1's token so that it attempts to dial. + _, s2 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "s2.dc2" + c.Datacenter = "dc2" + c.PrimaryDatacenter = "dc2" + }) + testrpc.WaitForLeader(t, s2.RPC, "dc2") + + // Simulate a peering initiation event by writing a peering with data from a peering token. + // Eventually the leader in dc2 should dial and connect to the leader in dc1. + p := &pbpeering.Peering{ + Name: "my-peer-s1", + PeerID: token.PeerID, + PeerCAPems: token.CA, + PeerServerName: token.ServerName, + PeerServerAddresses: token.ServerAddresses, + } + require.True(t, p.ShouldDial()) + + // We maintain a pointer to the peering on the write so that we can get the ID without needing to re-query the state store. + require.NoError(t, s2.fsm.State().PeeringWrite(1000, p)) + + retry.Run(t, func(r *retry.R) { + status, found := s2.peeringService.StreamStatus(p.ID) + require.True(r, found) + require.True(r, status.Connected) + }) + + // Delete the peering to trigger the termination sequence + require.NoError(t, s2.fsm.State().PeeringDelete(2000, state.Query{ + Value: "my-peer-s1", + })) + s2.logger.Trace("deleted peering for my-peer-s1") + + retry.Run(t, func(r *retry.R) { + _, found := s2.peeringService.StreamStatus(p.ID) + require.False(r, found) + }) + + // s1 should have also marked the peering as terminated. + retry.Run(t, func(r *retry.R) { + _, peering, err := s1.fsm.State().PeeringRead(nil, state.Query{ + Value: "my-peer-s2", + }) + require.NoError(r, err) + require.Equal(r, pbpeering.PeeringState_TERMINATED, peering.State) + }) +} + +func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + // TODO(peering): Configure with TLS + _, s1 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "s1.dc1" + c.Datacenter = "dc1" + c.TLSConfig.Domain = "consul" + }) + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create a peering by generating a token + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(), + grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())), + grpc.WithInsecure(), + grpc.WithBlock()) + require.NoError(t, err) + defer conn.Close() + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + + req := pbpeering.GenerateTokenRequest{ + PeerName: "my-peer-s2", + } + resp, err := peeringClient.GenerateToken(ctx, &req) + require.NoError(t, err) + + tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken) + require.NoError(t, err) + + var token structs.PeeringToken + require.NoError(t, json.Unmarshal(tokenJSON, &token)) + + // Bring up s2 and store s1's token so that it attempts to dial. + _, s2 := testServerWithConfig(t, func(c *Config) { + c.NodeName = "s2.dc2" + c.Datacenter = "dc2" + c.PrimaryDatacenter = "dc2" + }) + testrpc.WaitForLeader(t, s2.RPC, "dc2") + + // Simulate a peering initiation event by writing a peering with data from a peering token. + // Eventually the leader in dc2 should dial and connect to the leader in dc1. + p := &pbpeering.Peering{ + Name: "my-peer-s1", + PeerID: token.PeerID, + PeerCAPems: token.CA, + PeerServerName: token.ServerName, + PeerServerAddresses: token.ServerAddresses, + } + require.True(t, p.ShouldDial()) + + // We maintain a pointer to the peering on the write so that we can get the ID without needing to re-query the state store. + require.NoError(t, s2.fsm.State().PeeringWrite(1000, p)) + + retry.Run(t, func(r *retry.R) { + status, found := s2.peeringService.StreamStatus(p.ID) + require.True(r, found) + require.True(r, status.Connected) + }) + + // Delete the peering from the server peer to trigger the termination sequence + require.NoError(t, s1.fsm.State().PeeringDelete(2000, state.Query{ + Value: "my-peer-s2", + })) + s2.logger.Trace("deleted peering for my-peer-s1") + + retry.Run(t, func(r *retry.R) { + _, found := s1.peeringService.StreamStatus(p.PeerID) + require.False(r, found) + }) + + // s2 should have received the termination message and updated the peering state + retry.Run(t, func(r *retry.R) { + _, peering, err := s2.fsm.State().PeeringRead(nil, state.Query{ + Value: "my-peer-s1", + }) + require.NoError(r, err) + require.Equal(r, pbpeering.PeeringState_TERMINATED, peering.State) + }) +} diff --git a/agent/consul/leader_test.go b/agent/consul/leader_test.go index cb767acf04..c043fa0f5b 100644 --- a/agent/consul/leader_test.go +++ b/agent/consul/leader_test.go @@ -51,7 +51,7 @@ func TestLeader_RegisterMember(t *testing.T) { // Client should be registered state := s1.fsm.State() retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -61,7 +61,7 @@ func TestLeader_RegisterMember(t *testing.T) { }) // Should have a check - _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil) + _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -80,7 +80,7 @@ func TestLeader_RegisterMember(t *testing.T) { // Server should be registered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(s1.config.NodeName, nil) + _, node, err := state.GetNode(s1.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -90,7 +90,7 @@ func TestLeader_RegisterMember(t *testing.T) { }) // Service should be registered - _, services, err := state.NodeServices(nil, s1.config.NodeName, nil) + _, services, err := state.NodeServices(nil, s1.config.NodeName, nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -129,7 +129,7 @@ func TestLeader_FailedMember(t *testing.T) { // Should be registered state := s1.fsm.State() retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -139,7 +139,7 @@ func TestLeader_FailedMember(t *testing.T) { }) // Should have a check - _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil) + _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -154,7 +154,7 @@ func TestLeader_FailedMember(t *testing.T) { } retry.Run(t, func(r *retry.R) { - _, checks, err = state.NodeChecks(nil, c1.config.NodeName, nil) + _, checks, err = state.NodeChecks(nil, c1.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -193,7 +193,7 @@ func TestLeader_LeftMember(t *testing.T) { // Should be registered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") require.NoError(r, err) require.NotNil(r, node, "client not registered") }) @@ -204,7 +204,7 @@ func TestLeader_LeftMember(t *testing.T) { // Should be deregistered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") require.NoError(r, err) require.Nil(r, node, "client still registered") }) @@ -236,7 +236,7 @@ func TestLeader_ReapMember(t *testing.T) { // Should be registered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") require.NoError(r, err) require.NotNil(r, node, "client not registered") }) @@ -257,7 +257,7 @@ func TestLeader_ReapMember(t *testing.T) { // anti-entropy will put it back. reaped := false for start := time.Now(); time.Since(start) < 5*time.Second; { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") require.NoError(t, err) if node == nil { reaped = true @@ -296,7 +296,7 @@ func TestLeader_ReapOrLeftMember_IgnoreSelf(t *testing.T) { // Should be registered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(nodeName, nil) + _, node, err := state.GetNode(nodeName, nil, "") require.NoError(r, err) require.NotNil(r, node, "server not registered") }) @@ -318,7 +318,7 @@ func TestLeader_ReapOrLeftMember_IgnoreSelf(t *testing.T) { // anti-entropy will put it back if it did get deleted. reaped := false for start := time.Now(); time.Since(start) < 5*time.Second; { - _, node, err := state.GetNode(nodeName, nil) + _, node, err := state.GetNode(nodeName, nil, "") require.NoError(t, err) if node == nil { reaped = true @@ -402,7 +402,7 @@ func TestLeader_CheckServersMeta(t *testing.T) { } // s3 should be registered retry.Run(t, func(r *retry.R) { - _, service, err := state.NodeService(s3.config.NodeName, "consul", &consulService.EnterpriseMeta) + _, service, err := state.NodeService(s3.config.NodeName, "consul", &consulService.EnterpriseMeta, "") if err != nil { r.Fatalf("err: %v", err) } @@ -438,7 +438,7 @@ func TestLeader_CheckServersMeta(t *testing.T) { if err != nil { r.Fatalf("Unexpected error :%v", err) } - _, service, err := state.NodeService(s3.config.NodeName, "consul", &consulService.EnterpriseMeta) + _, service, err := state.NodeService(s3.config.NodeName, "consul", &consulService.EnterpriseMeta, "") if err != nil { r.Fatalf("err: %v", err) } @@ -506,7 +506,7 @@ func TestLeader_ReapServer(t *testing.T) { // s3 should be registered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(s3.config.NodeName, nil) + _, node, err := state.GetNode(s3.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -527,7 +527,7 @@ func TestLeader_ReapServer(t *testing.T) { } // s3 should be deregistered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(s3.config.NodeName, nil) + _, node, err := state.GetNode(s3.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -582,7 +582,7 @@ func TestLeader_Reconcile_ReapMember(t *testing.T) { // Node should be gone state := s1.fsm.State() - _, node, err := state.GetNode("no-longer-around", nil) + _, node, err := state.GetNode("no-longer-around", nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -615,7 +615,7 @@ func TestLeader_Reconcile(t *testing.T) { // Should not be registered state := s1.fsm.State() - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -625,7 +625,7 @@ func TestLeader_Reconcile(t *testing.T) { // Should be registered retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -657,7 +657,7 @@ func TestLeader_Reconcile_Races(t *testing.T) { state := s1.fsm.State() var nodeAddr string retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -693,7 +693,7 @@ func TestLeader_Reconcile_Races(t *testing.T) { if err := s1.reconcile(); err != nil { t.Fatalf("err: %v", err) } - _, node, err := state.GetNode(c1.config.NodeName, nil) + _, node, err := state.GetNode(c1.config.NodeName, nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -707,7 +707,7 @@ func TestLeader_Reconcile_Races(t *testing.T) { // Fail the member and wait for the health to go critical. c1.Shutdown() retry.Run(t, func(r *retry.R) { - _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil) + _, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -720,7 +720,7 @@ func TestLeader_Reconcile_Races(t *testing.T) { }) // Make sure the metadata didn't get clobbered. - _, node, err = state.GetNode(c1.config.NodeName, nil) + _, node, err = state.GetNode(c1.config.NodeName, nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -835,7 +835,7 @@ func TestLeader_LeftLeader(t *testing.T) { // Verify the old leader is deregistered state := remain.fsm.State() retry.Run(t, func(r *retry.R) { - _, node, err := state.GetNode(leader.config.NodeName, nil) + _, node, err := state.GetNode(leader.config.NodeName, nil, "") if err != nil { r.Fatalf("err: %v", err) } @@ -2336,7 +2336,7 @@ func TestLeader_EnableVirtualIPs(t *testing.T) { }) require.NoError(t, err) - _, node, err := state.NodeService("bar", "tgate1", nil) + _, node, err := state.NodeService("bar", "tgate1", nil, "") require.NoError(t, err) sn := structs.ServiceName{Name: "api"} key := structs.ServiceGatewayVirtualIPTag(sn) diff --git a/agent/consul/peering_backend.go b/agent/consul/peering_backend.go new file mode 100644 index 0000000000..7e8c698c8e --- /dev/null +++ b/agent/consul/peering_backend.go @@ -0,0 +1,126 @@ +package consul + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "google.golang.org/grpc" + + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/rpc/peering" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" +) + +type peeringBackend struct { + srv *Server + connPool GRPCClientConner + apply *peeringApply +} + +var _ peering.Backend = (*peeringBackend)(nil) + +// NewPeeringBackend returns a peering.Backend implementation that is bound to the given server. +func NewPeeringBackend(srv *Server, connPool GRPCClientConner) peering.Backend { + return &peeringBackend{ + srv: srv, + connPool: connPool, + apply: &peeringApply{srv: srv}, + } +} + +func (b *peeringBackend) Forward(info structs.RPCInfo, f func(*grpc.ClientConn) error) (handled bool, err error) { + // Only forward the request if the dc in the request matches the server's datacenter. + if info.RequestDatacenter() != "" && info.RequestDatacenter() != b.srv.config.Datacenter { + return false, fmt.Errorf("requests to generate peering tokens cannot be forwarded to remote datacenters") + } + return b.srv.ForwardGRPC(b.connPool, info, f) +} + +// GetAgentCACertificates gets the server's raw CA data from its TLS Configurator. +func (b *peeringBackend) GetAgentCACertificates() ([]string, error) { + // TODO(peering): handle empty CA pems + return b.srv.tlsConfigurator.ManualCAPems(), nil +} + +// GetServerAddresses looks up server node addresses from the state store. +func (b *peeringBackend) GetServerAddresses() ([]string, error) { + state := b.srv.fsm.State() + _, nodes, err := state.ServiceNodes(nil, "consul", structs.DefaultEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword) + if err != nil { + return nil, err + } + var addrs []string + for _, node := range nodes { + addrs = append(addrs, node.Address+":"+strconv.Itoa(node.ServicePort)) + } + return addrs, nil +} + +// GetServerName returns the SNI to be returned in the peering token data which +// will be used by peers when establishing peering connections over TLS. +func (b *peeringBackend) GetServerName() string { + return b.srv.tlsConfigurator.ServerSNI(b.srv.config.Datacenter, "") +} + +// EncodeToken encodes a peering token as a bas64-encoded representation of JSON (for now). +func (b *peeringBackend) EncodeToken(tok *structs.PeeringToken) ([]byte, error) { + jsonToken, err := json.Marshal(tok) + if err != nil { + return nil, fmt.Errorf("failed to marshal token: %w", err) + } + return []byte(base64.StdEncoding.EncodeToString(jsonToken)), nil +} + +// DecodeToken decodes a peering token from a base64-encoded JSON byte array (for now). +func (b *peeringBackend) DecodeToken(tokRaw []byte) (*structs.PeeringToken, error) { + tokJSONRaw, err := base64.StdEncoding.DecodeString(string(tokRaw)) + if err != nil { + return nil, fmt.Errorf("failed to decode token: %w", err) + } + var tok structs.PeeringToken + if err := json.Unmarshal(tokJSONRaw, &tok); err != nil { + return nil, err + } + return &tok, nil +} + +func (s peeringBackend) Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) { + return s.srv.publisher.Subscribe(req) +} + +func (b *peeringBackend) Store() peering.Store { + return b.srv.fsm.State() +} + +func (b *peeringBackend) Apply() peering.Apply { + return b.apply +} + +func (b *peeringBackend) EnterpriseCheckPartitions(partition string) error { + return b.enterpriseCheckPartitions(partition) +} + +type peeringApply struct { + srv *Server +} + +func (a *peeringApply) PeeringWrite(req *pbpeering.PeeringWriteRequest) error { + _, err := a.srv.raftApplyProtobuf(structs.PeeringWriteType, req) + return err +} + +func (a *peeringApply) PeeringDelete(req *pbpeering.PeeringDeleteRequest) error { + _, err := a.srv.raftApplyProtobuf(structs.PeeringDeleteType, req) + return err +} + +// TODO(peering): This needs RPC metrics interceptor since it's not triggered by an RPC. +func (a *peeringApply) PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error { + _, err := a.srv.raftApplyProtobuf(structs.PeeringTerminateByIDType, req) + return err +} + +var _ peering.Apply = (*peeringApply)(nil) diff --git a/agent/consul/peering_backend_oss.go b/agent/consul/peering_backend_oss.go new file mode 100644 index 0000000000..5f5a117dba --- /dev/null +++ b/agent/consul/peering_backend_oss.go @@ -0,0 +1,15 @@ +//go:build !consulent +// +build !consulent + +package consul + +import ( + "fmt" +) + +func (b *peeringBackend) enterpriseCheckPartitions(partition string) error { + if partition != "" { + return fmt.Errorf("Partitions are a Consul Enterprise feature") + } + return nil +} diff --git a/agent/consul/peering_backend_oss_test.go b/agent/consul/peering_backend_oss_test.go new file mode 100644 index 0000000000..75decc0a8c --- /dev/null +++ b/agent/consul/peering_backend_oss_test.go @@ -0,0 +1,51 @@ +//go:build !consulent +// +build !consulent + +package consul + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + gogrpc "google.golang.org/grpc" + + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/testrpc" +) + +func TestPeeringBackend_RejectsPartition(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + _, s1 := testServerWithConfig(t, func(c *Config) { + c.Datacenter = "dc1" + c.Bootstrap = true + }) + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // make a grpc client to dial s1 directly + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + t.Cleanup(cancel) + + conn, err := gogrpc.DialContext(ctx, s1.config.RPCAddr.String(), + gogrpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())), + gogrpc.WithInsecure(), + gogrpc.WithBlock()) + require.NoError(t, err) + t.Cleanup(func() { conn.Close() }) + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + + req := pbpeering.GenerateTokenRequest{ + Datacenter: "dc1", + Partition: "test", + } + _, err = peeringClient.GenerateToken(ctx, &req) + require.Error(t, err) + require.Contains(t, err.Error(), "Partitions are a Consul Enterprise feature") +} diff --git a/agent/consul/peering_backend_test.go b/agent/consul/peering_backend_test.go new file mode 100644 index 0000000000..eb89cd531e --- /dev/null +++ b/agent/consul/peering_backend_test.go @@ -0,0 +1,115 @@ +package consul + +import ( + "context" + "net" + "testing" + "time" + + "github.com/stretchr/testify/require" + gogrpc "google.golang.org/grpc" + + "github.com/hashicorp/consul/agent/pool" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/testrpc" +) + +func TestPeeringBackend_DoesNotForwardToDifferentDC(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + _, s1 := testServerDC(t, "dc1") + _, s2 := testServerDC(t, "dc2") + + joinWAN(t, s2, s1) + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s2.RPC, "dc2") + + // make a grpc client to dial s2 directly + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + t.Cleanup(cancel) + + conn, err := gogrpc.DialContext(ctx, s2.config.RPCAddr.String(), + gogrpc.WithContextDialer(newServerDialer(s2.config.RPCAddr.String())), + gogrpc.WithInsecure(), + gogrpc.WithBlock()) + require.NoError(t, err) + t.Cleanup(func() { conn.Close() }) + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + + // GenerateToken request should fail against dc1, because we are dialing dc2. The GenerateToken request should never be forwarded across datacenters. + req := pbpeering.GenerateTokenRequest{ + PeerName: "peer1-usw1", + Datacenter: "dc1", + } + _, err = peeringClient.GenerateToken(ctx, &req) + require.Error(t, err) + require.Contains(t, err.Error(), "requests to generate peering tokens cannot be forwarded to remote datacenters") +} + +func TestPeeringBackend_ForwardToLeader(t *testing.T) { + t.Parallel() + + _, conf1 := testServerConfig(t) + server1, err := newServer(t, conf1) + require.NoError(t, err) + + _, conf2 := testServerConfig(t) + conf2.Bootstrap = false + server2, err := newServer(t, conf2) + require.NoError(t, err) + + // Join a 2nd server (not the leader) + testrpc.WaitForLeader(t, server1.RPC, "dc1") + joinLAN(t, server2, server1) + testrpc.WaitForLeader(t, server2.RPC, "dc1") + + // Make a write call to server2 and make sure it gets forwarded to server1 + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + // Dial server2 directly + conn, err := gogrpc.DialContext(ctx, server2.config.RPCAddr.String(), + gogrpc.WithContextDialer(newServerDialer(server2.config.RPCAddr.String())), + gogrpc.WithInsecure(), + gogrpc.WithBlock()) + require.NoError(t, err) + t.Cleanup(func() { conn.Close() }) + + peeringClient := pbpeering.NewPeeringServiceClient(conn) + + runStep(t, "forward a write", func(t *testing.T) { + // Do the grpc Write call to server2 + req := pbpeering.GenerateTokenRequest{ + Datacenter: "dc1", + PeerName: "foo", + } + _, err := peeringClient.GenerateToken(ctx, &req) + require.NoError(t, err) + + // TODO(peering) check that state store is updated on leader, indicating a forwarded request after state store + // is implemented. + }) +} + +func newServerDialer(serverAddr string) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (net.Conn, error) { + d := net.Dialer{} + conn, err := d.DialContext(ctx, "tcp", serverAddr) + if err != nil { + return nil, err + } + + _, err = conn.Write([]byte{byte(pool.RPCGRPC)}) + if err != nil { + conn.Close() + return nil, err + } + + return conn, nil + } +} diff --git a/agent/consul/prepared_query/walk_test.go b/agent/consul/prepared_query/walk_test.go index 2c6920afdb..e45aa3a1e4 100644 --- a/agent/consul/prepared_query/walk_test.go +++ b/agent/consul/prepared_query/walk_test.go @@ -3,12 +3,12 @@ package prepared_query import ( "fmt" "reflect" + "sort" "testing" - "sort" + "github.com/stretchr/testify/require" "github.com/hashicorp/consul/agent/structs" - "github.com/stretchr/testify/require" ) func TestWalk_ServiceQuery(t *testing.T) { @@ -42,6 +42,7 @@ func TestWalk_ServiceQuery(t *testing.T) { ".Tags[0]:tag1", ".Tags[1]:tag2", ".Tags[2]:tag3", + ".PeerName:", } expected = append(expected, entMetaWalkFields...) sort.Strings(expected) diff --git a/agent/consul/prepared_query_endpoint.go b/agent/consul/prepared_query_endpoint.go index 15f818171d..31890449c0 100644 --- a/agent/consul/prepared_query_endpoint.go +++ b/agent/consul/prepared_query_endpoint.go @@ -404,7 +404,7 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest, qs.Node = args.Agent.Node } else if qs.Node == "_ip" { if args.Source.Ip != "" { - _, nodes, err := state.Nodes(nil, structs.NodeEnterpriseMetaInDefaultPartition()) + _, nodes, err := state.Nodes(nil, structs.NodeEnterpriseMetaInDefaultPartition(), structs.TODOPeerKeyword) if err != nil { return err } @@ -534,7 +534,7 @@ func (p *PreparedQuery) execute(query *structs.PreparedQuery, f = state.CheckConnectServiceNodes } - _, nodes, err := f(nil, query.Service.Service, &query.Service.EnterpriseMeta) + _, nodes, err := f(nil, query.Service.Service, &query.Service.EnterpriseMeta, query.Service.PeerName) if err != nil { return err } diff --git a/agent/consul/rpc_test.go b/agent/consul/rpc_test.go index 5e1323a1e2..4103be7467 100644 --- a/agent/consul/rpc_test.go +++ b/agent/consul/rpc_test.go @@ -1374,6 +1374,10 @@ func (r isReadRequest) HasTimedOut(since time.Time, rpcHoldTimeout, maxQueryTime return false, nil } +func (r isReadRequest) Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) time.Duration { + return time.Duration(-1) +} + func TestRPC_AuthorizeRaftRPC(t *testing.T) { caPEM, caPK, err := tlsutil.GenerateCA(tlsutil.CAOpts{Days: 5, Domain: "consul"}) require.NoError(t, err) diff --git a/agent/consul/server.go b/agent/consul/server.go index d9b4aed6b9..2f64fa6729 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -16,24 +16,20 @@ import ( "sync/atomic" "time" - "github.com/hashicorp/consul/agent/rpc/middleware" - - "github.com/hashicorp/go-version" - "go.etcd.io/bbolt" - "github.com/armon/go-metrics" + "github.com/hashicorp/consul-net-rpc/net/rpc" connlimit "github.com/hashicorp/go-connlimit" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" + "github.com/hashicorp/go-version" "github.com/hashicorp/raft" autopilot "github.com/hashicorp/raft-autopilot" raftboltdb "github.com/hashicorp/raft-boltdb/v2" "github.com/hashicorp/serf/serf" + "go.etcd.io/bbolt" "golang.org/x/time/rate" "google.golang.org/grpc" - "github.com/hashicorp/consul-net-rpc/net/rpc" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/authmethod" "github.com/hashicorp/consul/agent/consul/authmethod/ssoauth" @@ -46,14 +42,18 @@ import ( "github.com/hashicorp/consul/agent/grpc/private/services/subscribe" "github.com/hashicorp/consul/agent/grpc/public/services/connectca" "github.com/hashicorp/consul/agent/grpc/public/services/dataplane" + "github.com/hashicorp/consul/agent/grpc/public/services/serverdiscovery" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/router" + "github.com/hashicorp/consul/agent/rpc/middleware" + "github.com/hashicorp/consul/agent/rpc/peering" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib/routine" "github.com/hashicorp/consul/logging" + "github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/types" @@ -123,6 +123,7 @@ const ( intermediateCertRenewWatchRoutineName = "intermediate cert renew watch" backgroundCAInitializationRoutineName = "CA initialization" virtualIPCheckRoutineName = "virtual IP version check" + peeringStreamsRoutineName = "streaming peering resources" ) var ( @@ -355,6 +356,9 @@ type Server struct { // this into the Deps struct and created it much earlier on. publisher *stream.EventPublisher + // peering is a service used to handle peering streams. + peeringService *peering.Service + // embedded struct to hold all the enterprise specific data EnterpriseServer } @@ -677,8 +681,16 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve s.publicConnectCAServer.Register(s.publicGRPCServer) dataplane.NewServer(dataplane.Config{ + GetStore: func() dataplane.StateStore { return s.FSM().State() }, Logger: logger.Named("grpc-api.dataplane"), ACLResolver: plainACLResolver{s.ACLResolver}, + Datacenter: s.config.Datacenter, + }).Register(s.publicGRPCServer) + + serverdiscovery.NewServer(serverdiscovery.Config{ + Publisher: s.publisher, + ACLResolver: plainACLResolver{s.ACLResolver}, + Logger: logger.Named("grpc-api.server-discovery"), }).Register(s.publicGRPCServer) // Initialize private gRPC server. @@ -721,12 +733,19 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve } func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler { + p := peering.NewService( + deps.Logger.Named("grpc-api.peering"), + NewPeeringBackend(s, deps.GRPCConnPool), + ) + s.peeringService = p + register := func(srv *grpc.Server) { if config.RPCConfig.EnableStreaming { pbsubscribe.RegisterStateChangeSubscriptionServer(srv, subscribe.NewServer( &subscribeBackend{srv: s, connPool: deps.GRPCConnPool}, deps.Logger.Named("grpc-api.subscription"))) } + pbpeering.RegisterPeeringServiceServer(srv, s.peeringService) s.registerEnterpriseGRPCServices(deps, srv) // Note: this public gRPC service is also exposed on the private server to @@ -774,7 +793,7 @@ func (s *Server) setupRaft() error { }() var serverAddressProvider raft.ServerAddressProvider = nil - if s.config.RaftConfig.ProtocolVersion >= 3 { //ServerAddressProvider needs server ids to work correctly, which is only supported in protocol version 3 or higher + if s.config.RaftConfig.ProtocolVersion >= 3 { // ServerAddressProvider needs server ids to work correctly, which is only supported in protocol version 3 or higher serverAddressProvider = s.serverLookup } @@ -1554,6 +1573,8 @@ func computeRaftReloadableConfig(config ReloadableConfig) raft.ReloadableConfig TrailingLogs: defaultConf.RaftConfig.TrailingLogs, SnapshotInterval: defaultConf.RaftConfig.SnapshotInterval, SnapshotThreshold: defaultConf.RaftConfig.SnapshotThreshold, + ElectionTimeout: defaultConf.RaftConfig.ElectionTimeout, + HeartbeatTimeout: defaultConf.RaftConfig.HeartbeatTimeout, } if config.RaftSnapshotThreshold != 0 { raftCfg.SnapshotThreshold = uint64(config.RaftSnapshotThreshold) @@ -1564,6 +1585,12 @@ func computeRaftReloadableConfig(config ReloadableConfig) raft.ReloadableConfig if config.RaftTrailingLogs != 0 { raftCfg.TrailingLogs = uint64(config.RaftTrailingLogs) } + if config.HeartbeatTimeout >= 5*time.Millisecond { + raftCfg.HeartbeatTimeout = config.HeartbeatTimeout + } + if config.ElectionTimeout >= 5*time.Millisecond { + raftCfg.ElectionTimeout = config.ElectionTimeout + } return raftCfg } @@ -1601,7 +1628,7 @@ func (s *Server) trackLeaderChanges() { continue } - s.grpcLeaderForwarder.UpdateLeaderAddr(s.config.Datacenter, string(leaderObs.Leader)) + s.grpcLeaderForwarder.UpdateLeaderAddr(s.config.Datacenter, string(leaderObs.LeaderAddr)) case <-s.shutdownCh: s.raft.DeregisterObserver(observer) return diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index 0d6a4925b6..c375ff7945 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -165,7 +165,7 @@ func testServerConfig(t *testing.T) (string, *Config) { // TODO (slackpad) - We should be able to run all tests w/o this, but it // looks like several depend on it. - config.RPCHoldTimeout = 5 * time.Second + config.RPCHoldTimeout = 10 * time.Second config.ConnectEnabled = true config.CAConfig = &structs.CAConfiguration{ @@ -237,6 +237,8 @@ func testServerWithConfig(t *testing.T, configOpts ...func(*Config)) (string, *S r.Fatalf("err: %v", err) } }) + t.Cleanup(func() { srv.Shutdown() }) + return dir, srv } @@ -257,6 +259,26 @@ func testACLServerWithConfig(t *testing.T, cb func(*Config), initReplicationToke return dir, srv, codec } +func testGRPCIntegrationServer(t *testing.T, cb func(*Config)) (*Server, *grpc.ClientConn) { + _, srv, _ := testACLServerWithConfig(t, cb, false) + + // Normally the gRPC server listener is created at the agent level and passed down into + // the Server creation. For our tests, we need to ensure + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + go func() { + _ = srv.publicGRPCServer.Serve(ln) + }() + t.Cleanup(srv.publicGRPCServer.Stop) + + conn, err := grpc.Dial(ln.Addr().String(), grpc.WithInsecure()) + require.NoError(t, err) + + t.Cleanup(func() { _ = conn.Close() }) + + return srv, conn +} + func newServer(t *testing.T, c *Config) (*Server, error) { return newServerWithDeps(t, c, newDefaultDeps(t, c)) } @@ -1836,6 +1858,8 @@ func TestServer_computeRaftReloadableConfig(t *testing.T) { SnapshotThreshold: defaults.SnapshotThreshold, SnapshotInterval: defaults.SnapshotInterval, TrailingLogs: defaults.TrailingLogs, + ElectionTimeout: defaults.ElectionTimeout, + HeartbeatTimeout: defaults.HeartbeatTimeout, }, }, { @@ -1847,6 +1871,8 @@ func TestServer_computeRaftReloadableConfig(t *testing.T) { SnapshotThreshold: 123456, SnapshotInterval: defaults.SnapshotInterval, TrailingLogs: defaults.TrailingLogs, + ElectionTimeout: defaults.ElectionTimeout, + HeartbeatTimeout: defaults.HeartbeatTimeout, }, }, { @@ -1858,6 +1884,8 @@ func TestServer_computeRaftReloadableConfig(t *testing.T) { SnapshotThreshold: defaults.SnapshotThreshold, SnapshotInterval: 13 * time.Minute, TrailingLogs: defaults.TrailingLogs, + ElectionTimeout: defaults.ElectionTimeout, + HeartbeatTimeout: defaults.HeartbeatTimeout, }, }, { @@ -1869,6 +1897,8 @@ func TestServer_computeRaftReloadableConfig(t *testing.T) { SnapshotThreshold: defaults.SnapshotThreshold, SnapshotInterval: defaults.SnapshotInterval, TrailingLogs: 78910, + ElectionTimeout: defaults.ElectionTimeout, + HeartbeatTimeout: defaults.HeartbeatTimeout, }, }, { @@ -1877,11 +1907,15 @@ func TestServer_computeRaftReloadableConfig(t *testing.T) { RaftSnapshotThreshold: 123456, RaftSnapshotInterval: 13 * time.Minute, RaftTrailingLogs: 78910, + ElectionTimeout: 300 * time.Millisecond, + HeartbeatTimeout: 400 * time.Millisecond, }, want: raft.ReloadableConfig{ SnapshotThreshold: 123456, SnapshotInterval: 13 * time.Minute, TrailingLogs: 78910, + ElectionTimeout: 300 * time.Millisecond, + HeartbeatTimeout: 400 * time.Millisecond, }, }, } diff --git a/agent/consul/state/acl_schema.go b/agent/consul/state/acl_schema.go index f2b77dcbf7..5b9529bbd0 100644 --- a/agent/consul/state/acl_schema.go +++ b/agent/consul/state/acl_schema.go @@ -239,6 +239,26 @@ func prefixIndexFromUUIDQuery(arg interface{}) ([]byte, error) { return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg) } +func prefixIndexFromUUIDWithPeerQuery(arg interface{}) ([]byte, error) { + switch v := arg.(type) { + case Query: + var b indexBuilder + peername := v.PeerOrEmpty() + if peername == "" { + b.String(structs.LocalPeerKeyword) + } else { + b.String(strings.ToLower(peername)) + } + uuidBytes, err := variableLengthUUIDStringToBytes(v.Value) + if err != nil { + return nil, err + } + return append(b.Bytes(), uuidBytes...), nil + } + + return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg) +} + func multiIndexPolicyFromACLRole(raw interface{}) ([][]byte, error) { role, ok := raw.(*structs.ACLRole) if !ok { diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index db256cfe10..32b3633553 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -34,6 +34,8 @@ var ( startingVirtualIP = net.IP{240, 0, 0, 0} virtualIPMaxOffset = net.IP{15, 255, 255, 254} + + ErrNodeNotFound = errors.New("node not found") ) func resizeNodeLookupKey(s string) string { @@ -57,7 +59,7 @@ func (s *Snapshot) Nodes() (memdb.ResultIterator, error) { // Services is used to pull the full list of services for a given node for use // during snapshots. -func (s *Snapshot) Services(node string, entMeta *acl.EnterpriseMeta) (memdb.ResultIterator, error) { +func (s *Snapshot) Services(node string, entMeta *acl.EnterpriseMeta, peerName string) (memdb.ResultIterator, error) { // TODO: accept non-pointer value if entMeta == nil { entMeta = structs.NodeEnterpriseMetaInDefaultPartition() @@ -65,12 +67,13 @@ func (s *Snapshot) Services(node string, entMeta *acl.EnterpriseMeta) (memdb.Res return s.tx.Get(tableServices, indexNode, Query{ Value: node, EnterpriseMeta: *entMeta, + PeerName: peerName, }) } // Checks is used to pull the full list of checks for a given node for use // during snapshots. -func (s *Snapshot) Checks(node string, entMeta *acl.EnterpriseMeta) (memdb.ResultIterator, error) { +func (s *Snapshot) Checks(node string, entMeta *acl.EnterpriseMeta, peerName string) (memdb.ResultIterator, error) { // TODO: accept non-pointer value if entMeta == nil { entMeta = structs.NodeEnterpriseMetaInDefaultPartition() @@ -78,6 +81,7 @@ func (s *Snapshot) Checks(node string, entMeta *acl.EnterpriseMeta) (memdb.Resul return s.tx.Get(tableChecks, indexNode, Query{ Value: node, EnterpriseMeta: *entMeta, + PeerName: peerName, }) } @@ -134,8 +138,12 @@ func (s *Store) ensureCheckIfNodeMatches( preserveIndexes bool, node string, nodePartition string, + nodePeerName string, check *structs.HealthCheck, ) error { + if !strings.EqualFold(check.PeerName, nodePeerName) { + return fmt.Errorf("check peer name %q does not match node peer name %q", check.PeerName, nodePeerName) + } if !strings.EqualFold(check.Node, node) || !acl.EqualPartitions(nodePartition, check.PartitionOrDefault()) { return fmt.Errorf("check node %q does not match node %q", printNodeName(check.Node, check.PartitionOrDefault()), @@ -159,6 +167,9 @@ func printNodeName(nodeName, partition string) string { // registration is performed within a single transaction to avoid race // conditions on state updates. func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes bool, req *structs.RegisterRequest, restore bool) error { + if err := validateRegisterRequestPeerNamesTxn(tx, req, restore); err != nil { + return err + } if _, err := validateRegisterRequestTxn(tx, req, restore); err != nil { return err } @@ -172,6 +183,7 @@ func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes b Partition: req.PartitionOrDefault(), TaggedAddresses: req.TaggedAddresses, Meta: req.NodeMeta, + PeerName: req.PeerName, } if preserveIndexes { node.CreateIndex = req.CreateIndex @@ -187,6 +199,7 @@ func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes b existing, err := tx.First(tableNodes, indexID, Query{ Value: node.Node, EnterpriseMeta: *node.GetEnterpriseMeta(), + PeerName: node.PeerName, }) if err != nil { return fmt.Errorf("node lookup failed: %s", err) @@ -206,6 +219,7 @@ func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes b EnterpriseMeta: req.Service.EnterpriseMeta, Node: req.Node, Service: req.Service.ID, + PeerName: req.PeerName, }) if err != nil { return fmt.Errorf("failed service lookup: %s", err) @@ -220,12 +234,14 @@ func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes b // Add the checks, if any. if req.Check != nil { - if err := s.ensureCheckIfNodeMatches(tx, idx, preserveIndexes, req.Node, req.PartitionOrDefault(), req.Check); err != nil { + err := s.ensureCheckIfNodeMatches(tx, idx, preserveIndexes, req.Node, req.PartitionOrDefault(), req.PeerName, req.Check) + if err != nil { return err } } for _, check := range req.Checks { - if err := s.ensureCheckIfNodeMatches(tx, idx, preserveIndexes, req.Node, req.PartitionOrDefault(), check); err != nil { + err := s.ensureCheckIfNodeMatches(tx, idx, preserveIndexes, req.Node, req.PartitionOrDefault(), req.PeerName, check) + if err != nil { return err } } @@ -233,6 +249,61 @@ func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes b return nil } +func validateRegisterRequestPeerNamesTxn(_ ReadTxn, args *structs.RegisterRequest, _ bool) error { + var ( + peerNames = make(map[string]struct{}) + ) + if args.Service != nil { + if args.Service.PeerName == "" { + args.Service.PeerName = args.PeerName + } + + peerName := args.Service.PeerName + // TODO(peering): validate the peering exists (skip check on restore) + + peerNames[peerName] = struct{}{} + } + + validateCheck := func(chk *structs.HealthCheck) error { + if chk.PeerName == "" { + chk.PeerName = args.PeerName + } + + peerName := chk.PeerName + // TODO(peering): validate the peering exists (skip check on restore) + + peerNames[peerName] = struct{}{} + + return nil + } + + if args.Check != nil { + if err := validateCheck(args.Check); err != nil { + return err + } + } + for _, chk := range args.Checks { + if err := validateCheck(chk); err != nil { + return err + } + } + + { + // TODO(peering): validate the node's peering exists (skip check on restore) + peerName := args.PeerName + peerNames[peerName] = struct{}{} + } + + if len(peerNames) > 1 { + return fmt.Errorf("Cannot register services and checks for multiple peer names in one registration request") + } else if len(peerNames) == 0 { + return fmt.Errorf("No peer names are present on the registration request; this makes no sense") + } + + return nil + +} + // EnsureNode is used to upsert node registration or modification. func (s *Store) EnsureNode(idx uint64, node *structs.Node) error { tx := s.db.WriteTxn(idx) @@ -250,8 +321,11 @@ func (s *Store) EnsureNode(idx uint64, node *structs.Node) error { // If allowClashWithoutID then, getting a conflict on another node without ID will be allowed func ensureNoNodeWithSimilarNameTxn(tx ReadTxn, node *structs.Node, allowClashWithoutID bool) error { // Retrieve all of the nodes - - enodes, err := tx.Get(tableNodes, indexID+"_prefix", node.GetEnterpriseMeta()) + q := Query{ + PeerName: node.PeerName, + EnterpriseMeta: *node.GetEnterpriseMeta(), + } + enodes, err := tx.Get(tableNodes, indexID+"_prefix", q) if err != nil { return fmt.Errorf("Cannot lookup all nodes: %s", err) } @@ -264,6 +338,7 @@ func ensureNoNodeWithSimilarNameTxn(tx ReadTxn, node *structs.Node, allowClashWi EnterpriseMeta: *node.GetEnterpriseMeta(), Node: enode.Node, CheckID: string(structs.SerfCheckID), + PeerName: enode.PeerName, }) if err != nil { return fmt.Errorf("Cannot get status of node %s: %s", enode.Node, err) @@ -291,7 +366,7 @@ func ensureNoNodeWithSimilarNameTxn(tx ReadTxn, node *structs.Node, allowClashWi // Returns a bool indicating if a write happened and any error. func (s *Store) ensureNodeCASTxn(tx WriteTxn, idx uint64, node *structs.Node) (bool, error) { // Retrieve the existing entry. - existing, err := getNodeTxn(tx, node.Node, node.GetEnterpriseMeta()) + existing, err := getNodeTxn(tx, node.Node, node.GetEnterpriseMeta(), node.PeerName) if err != nil { return false, err } @@ -324,7 +399,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod // name is the same. var n *structs.Node if node.ID != "" { - existing, err := getNodeIDTxn(tx, node.ID, node.GetEnterpriseMeta()) + existing, err := getNodeIDTxn(tx, node.ID, node.GetEnterpriseMeta(), node.PeerName) if err != nil { return fmt.Errorf("node lookup failed: %s", err) } @@ -337,7 +412,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod return fmt.Errorf("Error while renaming Node ID: %q (%s): %s", node.ID, node.Address, dupNameError) } // We are actually renaming a node, remove its reference first - err := s.deleteNodeTxn(tx, idx, n.Node, n.GetEnterpriseMeta()) + err := s.deleteNodeTxn(tx, idx, n.Node, n.GetEnterpriseMeta(), n.PeerName) if err != nil { return fmt.Errorf("Error while renaming Node ID: %q (%s) from %s to %s", node.ID, node.Address, n.Node, node.Node) @@ -360,6 +435,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod existing, err := tx.First(tableNodes, indexID, Query{ Value: node.Node, EnterpriseMeta: *node.GetEnterpriseMeta(), + PeerName: node.PeerName, }) if err != nil { return fmt.Errorf("node name lookup failed: %s", err) @@ -396,7 +472,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod } // GetNode is used to retrieve a node registration by node name ID. -func (s *Store) GetNode(nodeNameOrID string, entMeta *acl.EnterpriseMeta) (uint64, *structs.Node, error) { +func (s *Store) GetNode(nodeNameOrID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.Node, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -406,20 +482,21 @@ func (s *Store) GetNode(nodeNameOrID string, entMeta *acl.EnterpriseMeta) (uint6 } // Get the table index. - idx := catalogNodesMaxIndex(tx, entMeta) + idx := catalogNodesMaxIndex(tx, entMeta, peerName) // Retrieve the node from the state store - node, err := getNodeTxn(tx, nodeNameOrID, entMeta) + node, err := getNodeTxn(tx, nodeNameOrID, entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("node lookup failed: %s", err) } return idx, node, nil } -func getNodeTxn(tx ReadTxn, nodeNameOrID string, entMeta *acl.EnterpriseMeta) (*structs.Node, error) { +func getNodeTxn(tx ReadTxn, nodeNameOrID string, entMeta *acl.EnterpriseMeta, peerName string) (*structs.Node, error) { node, err := tx.First(tableNodes, indexID, Query{ Value: nodeNameOrID, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return nil, fmt.Errorf("node lookup failed: %s", err) @@ -430,10 +507,11 @@ func getNodeTxn(tx ReadTxn, nodeNameOrID string, entMeta *acl.EnterpriseMeta) (* return nil, nil } -func getNodeIDTxn(tx ReadTxn, id types.NodeID, entMeta *acl.EnterpriseMeta) (*structs.Node, error) { +func getNodeIDTxn(tx ReadTxn, id types.NodeID, entMeta *acl.EnterpriseMeta, peerName string) (*structs.Node, error) { node, err := tx.First(tableNodes, indexUUID+"_prefix", Query{ Value: string(id), EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return nil, fmt.Errorf("node lookup by ID failed: %s", err) @@ -445,7 +523,7 @@ func getNodeIDTxn(tx ReadTxn, id types.NodeID, entMeta *acl.EnterpriseMeta) (*st } // GetNodeID is used to retrieve a node registration by node ID. -func (s *Store) GetNodeID(id types.NodeID, entMeta *acl.EnterpriseMeta) (uint64, *structs.Node, error) { +func (s *Store) GetNodeID(id types.NodeID, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.Node, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -454,16 +532,15 @@ func (s *Store) GetNodeID(id types.NodeID, entMeta *acl.EnterpriseMeta) (uint64, entMeta = structs.NodeEnterpriseMetaInDefaultPartition() } - // Get the table index. - idx := catalogNodesMaxIndex(tx, entMeta) + idx := catalogNodesMaxIndex(tx, entMeta, peerName) // Retrieve the node from the state store - node, err := getNodeIDTxn(tx, id, entMeta) + node, err := getNodeIDTxn(tx, id, entMeta, peerName) return idx, node, err } // Nodes is used to return all of the known nodes. -func (s *Store) Nodes(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.Nodes, error) { +func (s *Store) Nodes(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Nodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -472,11 +549,14 @@ func (s *Store) Nodes(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, s entMeta = structs.NodeEnterpriseMetaInDefaultPartition() } - // Get the table index. - idx := catalogNodesMaxIndex(tx, entMeta) + idx := catalogNodesMaxIndex(tx, entMeta, peerName) // Retrieve all of the nodes - nodes, err := tx.Get(tableNodes, indexID+"_prefix", entMeta) + q := Query{ + PeerName: peerName, + EnterpriseMeta: *entMeta, + } + nodes, err := tx.Get(tableNodes, indexID+"_prefix", q) if err != nil { return 0, nil, fmt.Errorf("failed nodes lookup: %s", err) } @@ -491,7 +571,7 @@ func (s *Store) Nodes(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, s } // NodesByMeta is used to return all nodes with the given metadata key/value pairs. -func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta) (uint64, structs.Nodes, error) { +func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Nodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -500,8 +580,7 @@ func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string, entMet entMeta = structs.NodeEnterpriseMetaInDefaultPartition() } - // Get the table index. - idx := catalogNodesMaxIndex(tx, entMeta) + idx := catalogNodesMaxIndex(tx, entMeta, peerName) if len(filters) == 0 { return idx, nil, nil // NodesByMeta is never called with an empty map, but just in case make it return no results. @@ -519,6 +598,7 @@ func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string, entMet Key: firstKey, Value: firstValue, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return 0, nil, fmt.Errorf("failed nodes lookup: %s", err) @@ -537,7 +617,7 @@ func (s *Store) NodesByMeta(ws memdb.WatchSet, filters map[string]string, entMet } // DeleteNode is used to delete a given node by its ID. -func (s *Store) DeleteNode(idx uint64, nodeName string, entMeta *acl.EnterpriseMeta) error { +func (s *Store) DeleteNode(idx uint64, nodeName string, entMeta *acl.EnterpriseMeta, peerName string) error { tx := s.db.WriteTxn(idx) defer tx.Abort() @@ -547,7 +627,7 @@ func (s *Store) DeleteNode(idx uint64, nodeName string, entMeta *acl.EnterpriseM } // Call the node deletion. - if err := s.deleteNodeTxn(tx, idx, nodeName, entMeta); err != nil { + if err := s.deleteNodeTxn(tx, idx, nodeName, entMeta, peerName); err != nil { return err } @@ -557,9 +637,9 @@ func (s *Store) DeleteNode(idx uint64, nodeName string, entMeta *acl.EnterpriseM // deleteNodeCASTxn is used to try doing a node delete operation with a given // raft index. If the CAS index specified is not equal to the last observed index for // the given check, then the call is a noop, otherwise a normal check delete is invoked. -func (s *Store) deleteNodeCASTxn(tx WriteTxn, idx, cidx uint64, nodeName string, entMeta *acl.EnterpriseMeta) (bool, error) { +func (s *Store) deleteNodeCASTxn(tx WriteTxn, idx, cidx uint64, nodeName string, entMeta *acl.EnterpriseMeta, peerName string) (bool, error) { // Look up the node. - node, err := getNodeTxn(tx, nodeName, entMeta) + node, err := getNodeTxn(tx, nodeName, entMeta, peerName) if err != nil { return false, err } @@ -575,7 +655,7 @@ func (s *Store) deleteNodeCASTxn(tx WriteTxn, idx, cidx uint64, nodeName string, } // Call the actual deletion if the above passed. - if err := s.deleteNodeTxn(tx, idx, nodeName, entMeta); err != nil { + if err := s.deleteNodeTxn(tx, idx, nodeName, entMeta, peerName); err != nil { return false, err } @@ -584,21 +664,22 @@ func (s *Store) deleteNodeCASTxn(tx WriteTxn, idx, cidx uint64, nodeName string, // deleteNodeTxn is the inner method used for removing a node from // the store within a given transaction. -func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta *acl.EnterpriseMeta) error { +func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta *acl.EnterpriseMeta, peerName string) error { // TODO: accept non-pointer value if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } // Look up the node. - node, err := tx.First(tableNodes, indexID, Query{ + nodeRaw, err := tx.First(tableNodes, indexID, Query{ Value: nodeName, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return fmt.Errorf("node lookup failed: %s", err) } - if node == nil { + if nodeRaw == nil { return nil } @@ -606,6 +687,7 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta services, err := tx.Get(tableServices, indexNode, Query{ Value: nodeName, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return fmt.Errorf("failed service lookup: %s", err) @@ -615,17 +697,17 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta svc := service.(*structs.ServiceNode) deleteServices = append(deleteServices, svc) - if err := catalogUpdateServiceIndexes(tx, svc.ServiceName, idx, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceIndexes(tx, idx, svc.ServiceName, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } - if err := catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, idx, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceKindIndexes(tx, idx, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } } // Do the delete in a separate loop so we don't trash the iterator. for _, svc := range deleteServices { - if err := s.deleteServiceTxn(tx, idx, nodeName, svc.ServiceID, &svc.EnterpriseMeta); err != nil { + if err := s.deleteServiceTxn(tx, idx, nodeName, svc.ServiceID, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } } @@ -635,6 +717,7 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta checks, err := tx.Get(tableChecks, indexNode, Query{ Value: nodeName, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return fmt.Errorf("failed check lookup: %s", err) @@ -646,46 +729,52 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string, entMeta // Do the delete in a separate loop so we don't trash the iterator. for _, chk := range deleteChecks { - if err := s.deleteCheckTxn(tx, idx, nodeName, chk.CheckID, &chk.EnterpriseMeta); err != nil { + if err := s.deleteCheckTxn(tx, idx, nodeName, chk.CheckID, &chk.EnterpriseMeta, chk.PeerName); err != nil { return err } } - // Delete any coordinates associated with this node. - coords, err := tx.Get(tableCoordinates, indexNode, Query{ - Value: nodeName, - EnterpriseMeta: *entMeta, - }) - if err != nil { - return fmt.Errorf("failed coordinate lookup: %s", err) - } - var coordsToDelete []*structs.Coordinate - for coord := coords.Next(); coord != nil; coord = coords.Next() { - coordsToDelete = append(coordsToDelete, coord.(*structs.Coordinate)) - } - for _, coord := range coordsToDelete { - if err := deleteCoordinateTxn(tx, idx, coord); err != nil { - return fmt.Errorf("failed deleting coordinate: %s", err) + if peerName == "" { + // Delete any coordinates associated with this node. + coords, err := tx.Get(tableCoordinates, indexNode, Query{ + Value: nodeName, + EnterpriseMeta: *entMeta, + PeerName: structs.DefaultPeerKeyword, + }) + if err != nil { + return fmt.Errorf("failed coordinate lookup: %s", err) + } + var coordsToDelete []*structs.Coordinate + for coord := coords.Next(); coord != nil; coord = coords.Next() { + coordsToDelete = append(coordsToDelete, coord.(*structs.Coordinate)) + } + for _, coord := range coordsToDelete { + if err := deleteCoordinateTxn(tx, idx, coord); err != nil { + return fmt.Errorf("failed deleting coordinate: %s", err) + } } } // Delete the node and update the index. - if err := tx.Delete(tableNodes, node); err != nil { + if err := tx.Delete(tableNodes, nodeRaw); err != nil { return fmt.Errorf("failed deleting node: %s", err) } - if err := catalogUpdateNodesIndexes(tx, idx, entMeta); err != nil { + node := nodeRaw.(*structs.Node) + if err := catalogUpdateNodesIndexes(tx, idx, entMeta, node.PeerName); err != nil { return fmt.Errorf("failed updating index: %s", err) } - // Invalidate any sessions for this node. - toDelete, err := allNodeSessionsTxn(tx, nodeName, entMeta.PartitionOrDefault()) - if err != nil { - return err - } + if peerName == "" { + // Invalidate any sessions for this node. + toDelete, err := allNodeSessionsTxn(tx, nodeName, entMeta.PartitionOrDefault()) + if err != nil { + return err + } - for _, session := range toDelete { - if err := s.deleteSessionTxn(tx, idx, session.ID, &session.EnterpriseMeta); err != nil { - return fmt.Errorf("failed to delete session '%s': %v", session.ID, err) + for _, session := range toDelete { + if err := s.deleteSessionTxn(tx, idx, session.ID, &session.EnterpriseMeta); err != nil { + return fmt.Errorf("failed to delete session '%s': %v", session.ID, err) + } } } @@ -711,7 +800,13 @@ var errCASCompareFailed = errors.New("compare-and-set: comparison failed") // Returns an error if the write didn't happen and nil if write was successful. func ensureServiceCASTxn(tx WriteTxn, idx uint64, node string, svc *structs.NodeService) error { // Retrieve the existing service. - existing, err := tx.First(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: svc.EnterpriseMeta, Node: node, Service: svc.ID}) + existing, err := tx.First(tableServices, indexID, + NodeServiceQuery{ + EnterpriseMeta: svc.EnterpriseMeta, + Node: node, + Service: svc.ID, + PeerName: svc.PeerName, + }) if err != nil { return fmt.Errorf("failed service lookup: %s", err) } @@ -740,6 +835,7 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool EnterpriseMeta: svc.EnterpriseMeta, Node: node, Service: svc.ID, + PeerName: svc.PeerName, }) if err != nil { return fmt.Errorf("failed service lookup: %s", err) @@ -749,9 +845,11 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool return fmt.Errorf("Invalid Service Meta for node %s and serviceID %s: %v", node, svc.ID, err) } - // Check if this service is covered by a gateway's wildcard specifier - if err = checkGatewayWildcardsAndUpdate(tx, idx, svc); err != nil { - return fmt.Errorf("failed updating gateway mapping: %s", err) + if svc.PeerName == "" { + // Check if this service is covered by a gateway's wildcard specifier + if err = checkGatewayWildcardsAndUpdate(tx, idx, svc); err != nil { + return fmt.Errorf("failed updating gateway mapping: %s", err) + } } if err := upsertKindServiceName(tx, idx, svc.Kind, svc.CompoundServiceName()); err != nil { return fmt.Errorf("failed to persist service name: %v", err) @@ -787,28 +885,30 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool } } - // If there's a terminating gateway config entry for this service, populate the tagged addresses - // with virtual IP mappings. - termGatewayVIPsSupported, err := terminatingGatewayVirtualIPsSupported(tx, nil) - if err != nil { - return err - } - if termGatewayVIPsSupported && svc.Kind == structs.ServiceKindTerminatingGateway { - _, conf, err := configEntryTxn(tx, nil, structs.TerminatingGateway, svc.Service, &svc.EnterpriseMeta) + if svc.PeerName == "" { + // If there's a terminating gateway config entry for this service, populate the tagged addresses + // with virtual IP mappings. + termGatewayVIPsSupported, err := terminatingGatewayVirtualIPsSupported(tx, nil) if err != nil { - return fmt.Errorf("failed to retrieve terminating gateway config: %s", err) + return err } - if conf != nil { - termGatewayConf := conf.(*structs.TerminatingGatewayConfigEntry) - addrs, err := getTermGatewayVirtualIPs(tx, termGatewayConf.Services, &svc.EnterpriseMeta) + if termGatewayVIPsSupported && svc.Kind == structs.ServiceKindTerminatingGateway { + _, conf, err := configEntryTxn(tx, nil, structs.TerminatingGateway, svc.Service, &svc.EnterpriseMeta) if err != nil { - return err + return fmt.Errorf("failed to retrieve terminating gateway config: %s", err) } - if svc.TaggedAddresses == nil { - svc.TaggedAddresses = make(map[string]structs.ServiceAddress) - } - for key, addr := range addrs { - svc.TaggedAddresses[key] = addr + if conf != nil { + termGatewayConf := conf.(*structs.TerminatingGatewayConfigEntry) + addrs, err := getTermGatewayVirtualIPs(tx, termGatewayConf.Services, &svc.EnterpriseMeta) + if err != nil { + return err + } + if svc.TaggedAddresses == nil { + svc.TaggedAddresses = make(map[string]structs.ServiceAddress) + } + for key, addr := range addrs { + svc.TaggedAddresses[key] = addr + } } } } @@ -821,6 +921,7 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool n, err := tx.First(tableNodes, indexID, Query{ Value: node, EnterpriseMeta: svc.EnterpriseMeta, + PeerName: svc.PeerName, }) if err != nil { return fmt.Errorf("failed node lookup: %s", err) @@ -854,6 +955,7 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool // assignServiceVirtualIP assigns a virtual IP to the target service and updates // the global virtual IP counter if necessary. func assignServiceVirtualIP(tx WriteTxn, sn structs.ServiceName) (string, error) { + // TODO(peering): support VIPs serviceVIP, err := tx.First(tableServiceVirtualIPs, indexID, sn) if err != nil { return "", fmt.Errorf("failed service virtual IP lookup: %s", err) @@ -978,15 +1080,15 @@ func terminatingGatewayVirtualIPsSupported(tx ReadTxn, ws memdb.WatchSet) (bool, } // Services returns all services along with a list of associated tags. -func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.Services, error) { +func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Services, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := catalogServicesMaxIndex(tx, entMeta) + idx := catalogServicesMaxIndex(tx, entMeta, peerName) // List all the services. - services, err := catalogServiceListNoWildcard(tx, entMeta) + services, err := catalogServiceListNoWildcard(tx, entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed querying services: %s", err) } @@ -1018,17 +1120,24 @@ func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64 return idx, results, nil } -func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceList, error) { +func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceList, error) { tx := s.db.Txn(false) defer tx.Abort() - return serviceListTxn(tx, ws, entMeta) + return serviceListTxn(tx, ws, entMeta, peerName) } -func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceList, error) { - idx := catalogServicesMaxIndex(tx, entMeta) +func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceList, error) { + if entMeta == nil { + entMeta = structs.NodeEnterpriseMetaInDefaultPartition() + } - services, err := tx.Get(tableServices, indexID+"_prefix", entMeta) + idx := catalogServicesMaxIndex(tx, entMeta, peerName) + + services, err := tx.Get(tableServices, indexID+"_prefix", Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + }) if err != nil { return 0, nil, fmt.Errorf("failed querying services: %s", err) } @@ -1049,7 +1158,7 @@ func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) } // ServicesByNodeMeta returns all services, filtered by the given node metadata. -func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta) (uint64, structs.Services, error) { +func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Services, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1059,8 +1168,9 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, } // Get the table index. - idx := catalogServicesMaxIndex(tx, entMeta) - if nodeIdx := catalogNodesMaxIndex(tx, entMeta); nodeIdx > idx { + idx := catalogServicesMaxIndex(tx, entMeta, peerName) + + if nodeIdx := catalogNodesMaxIndex(tx, entMeta, peerName); nodeIdx > idx { idx = nodeIdx } @@ -1080,6 +1190,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, Key: firstKey, Value: firstValue, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return 0, nil, fmt.Errorf("failed nodes lookup: %s", err) @@ -1088,7 +1199,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, // We don't want to track an unlimited number of services, so we pull a // top-level watch to use as a fallback. - allServices, err := catalogServiceListNoWildcard(tx, entMeta) + allServices, err := catalogServiceListNoWildcard(tx, entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed services lookup: %s", err) } @@ -1103,7 +1214,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, } // List all the services on the node - services, err := catalogServiceListByNode(tx, n.Node, entMeta, false) + services, err := catalogServiceListByNode(tx, n.Node, entMeta, n.PeerName, false) if err != nil { return 0, nil, fmt.Errorf("failed querying services: %s", err) } @@ -1144,8 +1255,8 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, // * return when the last instance of a service is removed // * block until an instance for this service is available, or another // service is unregistered. -func maxIndexForService(tx ReadTxn, serviceName string, serviceExists, checks bool, entMeta *acl.EnterpriseMeta) uint64 { - idx, _ := maxIndexAndWatchChForService(tx, serviceName, serviceExists, checks, entMeta) +func maxIndexForService(tx ReadTxn, serviceName string, serviceExists, checks bool, entMeta *acl.EnterpriseMeta, peerName string) uint64 { + idx, _ := maxIndexAndWatchChForService(tx, serviceName, serviceExists, checks, entMeta, peerName) return idx } @@ -1163,20 +1274,20 @@ func maxIndexForService(tx ReadTxn, serviceName string, serviceExists, checks bo // returned for the chan. This allows for blocking watchers to _only_ watch this // one chan in the common case, falling back to watching all touched MemDB // indexes in more complicated cases. -func maxIndexAndWatchChForService(tx ReadTxn, serviceName string, serviceExists, checks bool, entMeta *acl.EnterpriseMeta) (uint64, <-chan struct{}) { +func maxIndexAndWatchChForService(tx ReadTxn, serviceName string, serviceExists, checks bool, entMeta *acl.EnterpriseMeta, peerName string) (uint64, <-chan struct{}) { if !serviceExists { - res, err := catalogServiceLastExtinctionIndex(tx, entMeta) + res, err := catalogServiceLastExtinctionIndex(tx, entMeta, peerName) if missingIdx, ok := res.(*IndexEntry); ok && err == nil { // Note safe to only watch the extinction index as it's not updated when new instances come along so return nil watchCh return missingIdx.Value, nil } } - ch, res, err := catalogServiceMaxIndex(tx, serviceName, entMeta) + ch, res, err := catalogServiceMaxIndex(tx, serviceName, entMeta, peerName) if idx, ok := res.(*IndexEntry); ok && err == nil { return idx.Value, ch } - return catalogMaxIndex(tx, entMeta, checks), nil + return catalogMaxIndex(tx, entMeta, peerName, checks), nil } // Wrapper for maxIndexAndWatchChForService that operates on a list of ServiceNodes @@ -1190,7 +1301,7 @@ func maxIndexAndWatchChsForServiceNodes(tx ReadTxn, for i := 0; i < len(nodes); i++ { sn := structs.NewServiceName(nodes[i].ServiceName, &nodes[i].EnterpriseMeta) if ok := seen[sn]; !ok { - idx, svcCh := maxIndexAndWatchChForService(tx, sn.Name, true, watchChecks, &sn.EnterpriseMeta) + idx, svcCh := maxIndexAndWatchChForService(tx, sn.Name, true, watchChecks, &sn.EnterpriseMeta, nodes[i].PeerName) if idx > maxIdx { maxIdx = idx } @@ -1207,7 +1318,7 @@ func maxIndexAndWatchChsForServiceNodes(tx ReadTxn, // ConnectServiceNodes returns the nodes associated with a Connect // compatible destination for the given service name. This will include // both proxies and native integrations. -func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceNodes, error) { tx := s.db.ReadTxn() defer tx.Abort() @@ -1215,12 +1326,16 @@ func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMe if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: serviceName, EnterpriseMeta: *entMeta} + q := Query{ + Value: serviceName, + PeerName: peerName, + EnterpriseMeta: *entMeta, + } return serviceNodesTxn(tx, ws, indexConnect, q) } // ServiceNodes returns the nodes associated with a given service name. -func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceNodes, error) { tx := s.db.ReadTxn() defer tx.Abort() @@ -1228,7 +1343,11 @@ func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: serviceName, EnterpriseMeta: *entMeta} + q := Query{ + Value: serviceName, + PeerName: peerName, + EnterpriseMeta: *entMeta, + } return serviceNodesTxn(tx, ws, indexService, q) } @@ -1251,9 +1370,9 @@ func serviceNodesTxn(tx ReadTxn, ws memdb.WatchSet, index string, q Query) (uint // We append rather than replace since it allows users to migrate a service // to the mesh with a mix of sidecars and gateways until all its instances have a sidecar. var idx uint64 - if connect { + if connect && q.PeerName == "" { // Look up gateway nodes associated with the service - gwIdx, nodes, err := serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindTerminatingGateway, &q.EnterpriseMeta) + gwIdx, nodes, err := serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindTerminatingGateway, &q.EnterpriseMeta, structs.DefaultPeerKeyword) if err != nil { return 0, nil, fmt.Errorf("failed gateway nodes lookup: %v", err) } @@ -1276,7 +1395,7 @@ func serviceNodesTxn(tx ReadTxn, ws memdb.WatchSet, index string, q Query) (uint } // Fill in the node details. - results, err = parseServiceNodes(tx, ws, results, &q.EnterpriseMeta) + results, err = parseServiceNodes(tx, ws, results, &q.EnterpriseMeta, q.PeerName) if err != nil { return 0, nil, fmt.Errorf("failed parsing service nodes: %s", err) } @@ -1284,7 +1403,7 @@ func serviceNodesTxn(tx ReadTxn, ws memdb.WatchSet, index string, q Query) (uint // Get the table index. // TODO (gateways) (freddy) Why do we always consider the main service index here? // This doesn't seem to make sense for Connect when there's more than 1 result - svcIdx := maxIndexForService(tx, serviceName, len(results) > 0, false, &q.EnterpriseMeta) + svcIdx := maxIndexForService(tx, serviceName, len(results) > 0, false, &q.EnterpriseMeta, q.PeerName) if idx < svcIdx { idx = svcIdx } @@ -1294,7 +1413,7 @@ func serviceNodesTxn(tx ReadTxn, ws memdb.WatchSet, index string, q Query) (uint // ServiceTagNodes returns the nodes associated with a given service, filtering // out services that don't contain the given tags. -func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tags []string, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tags []string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1303,8 +1422,11 @@ func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tags []string entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: service, EnterpriseMeta: *entMeta} - services, err := tx.Get(tableServices, indexService, q) + services, err := tx.Get(tableServices, indexService, Query{ + Value: service, + EnterpriseMeta: *entMeta, + PeerName: peerName, + }) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } @@ -1322,12 +1444,12 @@ func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tags []string } // Fill in the node details. - results, err = parseServiceNodes(tx, ws, results, entMeta) + results, err = parseServiceNodes(tx, ws, results, entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed parsing service nodes: %s", err) } // Get the table index. - idx := maxIndexForService(tx, service, serviceExists, false, entMeta) + idx := maxIndexForService(tx, service, serviceExists, false, entMeta, peerName) return idx, results, nil } @@ -1364,12 +1486,16 @@ func serviceTagsFilter(sn *structs.ServiceNode, tags []string) bool { // ServiceAddressNodes returns the nodes associated with a given service, filtering // out services that don't match the given serviceAddress -func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() // List all the services. - services, err := tx.Get(tableServices, indexID+"_prefix", entMeta) + q := Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + } + services, err := tx.Get(tableServices, indexID+"_prefix", q) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } @@ -1392,7 +1518,7 @@ func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta * } // Fill in the node details. - results, err = parseServiceNodes(tx, ws, results, entMeta) + results, err = parseServiceNodes(tx, ws, results, entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed parsing service nodes: %s", err) } @@ -1401,10 +1527,14 @@ func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta * // parseServiceNodes iterates over a services query and fills in the node details, // returning a ServiceNodes slice. -func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNodes, entMeta *acl.EnterpriseMeta) (structs.ServiceNodes, error) { +func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNodes, entMeta *acl.EnterpriseMeta, peerName string) (structs.ServiceNodes, error) { // We don't want to track an unlimited number of nodes, so we pull a // top-level watch to use as a fallback. - allNodes, err := tx.Get(tableNodes, indexID+"_prefix", entMeta) + q := Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + } + allNodes, err := tx.Get(tableNodes, indexID+"_prefix", q) if err != nil { return nil, fmt.Errorf("failed nodes lookup: %s", err) } @@ -1422,6 +1552,7 @@ func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNo watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{ Value: sn.Node, EnterpriseMeta: sn.EnterpriseMeta, + PeerName: sn.PeerName, }) if err != nil { return nil, fmt.Errorf("failed node lookup: %s", err) @@ -1446,15 +1577,15 @@ func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNo // NodeService is used to retrieve a specific service associated with the given // node. -func (s *Store) NodeService(nodeName string, serviceID string, entMeta *acl.EnterpriseMeta) (uint64, *structs.NodeService, error) { +func (s *Store) NodeService(nodeName string, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeService, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := catalogServicesMaxIndex(tx, entMeta) + idx := catalogServicesMaxIndex(tx, entMeta, peerName) // Query the service - service, err := getNodeServiceTxn(tx, nodeName, serviceID, entMeta) + service, err := getNodeServiceTxn(tx, nodeName, serviceID, entMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed querying service for node %q: %s", nodeName, err) } @@ -1462,7 +1593,18 @@ func (s *Store) NodeService(nodeName string, serviceID string, entMeta *acl.Ente return idx, service, nil } -func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *acl.EnterpriseMeta) (*structs.NodeService, error) { +func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (*structs.NodeService, error) { + sn, err := getServiceNodeTxn(tx, nodeName, serviceID, entMeta, peerName) + if err != nil { + return nil, err + } + if sn != nil { + return sn.ToNodeService(), nil + } + return nil, nil +} + +func getServiceNodeTxn(tx ReadTxn, nodeName, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (*structs.ServiceNode, error) { // TODO: pass non-pointer type for ent meta if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() @@ -1473,19 +1615,58 @@ func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *acl.Ente EnterpriseMeta: *entMeta, Node: nodeName, Service: serviceID, + PeerName: peerName, }) if err != nil { return nil, fmt.Errorf("failed querying service for node %q: %s", nodeName, err) } if service != nil { - return service.(*structs.ServiceNode).ToNodeService(), nil + return service.(*structs.ServiceNode), nil } return nil, nil } -func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta, allowWildcard bool) (bool, uint64, *structs.Node, memdb.ResultIterator, error) { +// ServiceNode is used to retrieve a specific service by service ID and node ID or name. +func (s *Store) ServiceNode(nodeID, nodeName, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.ServiceNode, error) { + var ( + node *structs.Node + err error + ) + if nodeID != "" { + _, node, err = s.GetNodeID(types.NodeID(nodeID), entMeta, peerName) + if err != nil { + return 0, nil, fmt.Errorf("Failure looking up node by ID %s: %w", nodeID, err) + } + } else if nodeName != "" { + _, node, err = s.GetNode(nodeName, entMeta, peerName) + if err != nil { + return 0, nil, fmt.Errorf("Failure looking up node by name %s: %w", nodeName, err) + } + } else { + return 0, nil, fmt.Errorf("Node ID or name required to lookup the service") + } + if node == nil { + return 0, nil, ErrNodeNotFound + } + + tx := s.db.Txn(false) + defer tx.Abort() + + // Get the table index. + idx := catalogServicesMaxIndex(tx, entMeta, peerName) + + // Query the service + service, err := getServiceNodeTxn(tx, node.Node, serviceID, entMeta, peerName) + if err != nil { + return 0, nil, fmt.Errorf("failed querying service for node %q: %w", node.Node, err) + } + + return idx, service, nil +} + +func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta, peerName string, allowWildcard bool) (bool, uint64, *structs.Node, memdb.ResultIterator, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -1495,10 +1676,14 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *ac } // Get the table index. - idx := catalogMaxIndex(tx, entMeta, false) + idx := catalogMaxIndex(tx, entMeta, peerName, false) // Query the node by node name - watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{Value: nodeNameOrID, EnterpriseMeta: *entMeta}) + watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{ + Value: nodeNameOrID, + EnterpriseMeta: *entMeta, + PeerName: peerName, + }) if err != nil { return true, 0, nil, nil, fmt.Errorf("node lookup failed: %s", err) } @@ -1515,6 +1700,7 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *ac iter, err := tx.Get(tableNodes, indexUUID+"_prefix", Query{ Value: resizeNodeLookupKey(nodeNameOrID), EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { ws.Add(watchCh) @@ -1545,7 +1731,7 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *ac nodeName := node.Node // Read all of the services - services, err := catalogServiceListByNode(tx, nodeName, entMeta, allowWildcard) + services, err := catalogServiceListByNode(tx, nodeName, entMeta, peerName, allowWildcard) if err != nil { return true, 0, nil, nil, fmt.Errorf("failed querying services for node %q: %s", nodeName, err) } @@ -1555,8 +1741,8 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *ac } // NodeServices is used to query service registrations by node name or UUID. -func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta) (uint64, *structs.NodeServices, error) { - done, idx, node, services, err := s.nodeServices(ws, nodeNameOrID, entMeta, false) +func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeServices, error) { + done, idx, node, services, err := s.nodeServices(ws, nodeNameOrID, entMeta, peerName, false) if done || err != nil { return idx, nil, err } @@ -1579,8 +1765,8 @@ func (s *Store) NodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *ac } // NodeServices is used to query service registrations by node name or UUID. -func (s *Store) NodeServiceList(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta) (uint64, *structs.NodeServiceList, error) { - done, idx, node, services, err := s.nodeServices(ws, nodeNameOrID, entMeta, true) +func (s *Store) NodeServiceList(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeServiceList, error) { + done, idx, node, services, err := s.nodeServices(ws, nodeNameOrID, entMeta, peerName, true) if done || err != nil { return idx, nil, err } @@ -1606,12 +1792,12 @@ func (s *Store) NodeServiceList(ws memdb.WatchSet, nodeNameOrID string, entMeta } // DeleteService is used to delete a given service associated with a node. -func (s *Store) DeleteService(idx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta) error { +func (s *Store) DeleteService(idx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) error { tx := s.db.WriteTxn(idx) defer tx.Abort() // Call the service deletion - if err := s.deleteServiceTxn(tx, idx, nodeName, serviceID, entMeta); err != nil { + if err := s.deleteServiceTxn(tx, idx, nodeName, serviceID, entMeta, peerName); err != nil { return err } @@ -1621,9 +1807,9 @@ func (s *Store) DeleteService(idx uint64, nodeName, serviceID string, entMeta *a // deleteServiceCASTxn is used to try doing a service delete operation with a given // raft index. If the CAS index specified is not equal to the last observed index for // the given service, then the call is a noop, otherwise a normal delete is invoked. -func (s *Store) deleteServiceCASTxn(tx WriteTxn, idx, cidx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta) (bool, error) { +func (s *Store) deleteServiceCASTxn(tx WriteTxn, idx, cidx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) (bool, error) { // Look up the service. - service, err := getNodeServiceTxn(tx, nodeName, serviceID, entMeta) + service, err := getNodeServiceTxn(tx, nodeName, serviceID, entMeta, peerName) if err != nil { return false, fmt.Errorf("service lookup failed: %s", err) } @@ -1639,7 +1825,7 @@ func (s *Store) deleteServiceCASTxn(tx WriteTxn, idx, cidx uint64, nodeName, ser } // Call the actual deletion if the above passed. - if err := s.deleteServiceTxn(tx, idx, nodeName, serviceID, entMeta); err != nil { + if err := s.deleteServiceTxn(tx, idx, nodeName, serviceID, entMeta, peerName); err != nil { return false, err } @@ -1648,13 +1834,19 @@ func (s *Store) deleteServiceCASTxn(tx WriteTxn, idx, cidx uint64, nodeName, ser // deleteServiceTxn is the inner method called to remove a service // registration within an existing transaction. -func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta) error { +func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID string, entMeta *acl.EnterpriseMeta, peerName string) error { // TODO: pass non-pointer type for ent meta if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - service, err := tx.First(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: *entMeta, Node: nodeName, Service: serviceID}) + service, err := tx.First(tableServices, indexID, + NodeServiceQuery{ + EnterpriseMeta: *entMeta, + Node: nodeName, + Service: serviceID, + PeerName: peerName, + }) if err != nil { return fmt.Errorf("failed service lookup: %s", err) } @@ -1672,6 +1864,7 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st Node: nodeName, Service: serviceID, EnterpriseMeta: *entMeta, + PeerName: peerName, } checks, err := tx.Get(tableChecks, indexNodeService, nsq) if err != nil { @@ -1684,13 +1877,13 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st // Do the delete in a separate loop so we don't trash the iterator. for _, check := range deleteChecks { - if err := s.deleteCheckTxn(tx, idx, nodeName, check.CheckID, &check.EnterpriseMeta); err != nil { + if err := s.deleteCheckTxn(tx, idx, nodeName, check.CheckID, &check.EnterpriseMeta, check.PeerName); err != nil { return err } } // Update the index. - if err := catalogUpdateCheckIndexes(tx, idx, entMeta); err != nil { + if err := catalogUpdateCheckIndexes(tx, idx, entMeta, peerName); err != nil { return err } @@ -1698,30 +1891,35 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st if err := tx.Delete(tableServices, service); err != nil { return fmt.Errorf("failed deleting service: %s", err) } - if err := catalogUpdateServicesIndexes(tx, idx, entMeta); err != nil { - return fmt.Errorf("failed updating index: %s", err) - } svc := service.(*structs.ServiceNode) - name := svc.CompoundServiceName() - - if err := catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, idx, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServicesIndexes(tx, idx, entMeta, svc.PeerName); err != nil { return err } + if err := catalogUpdateServiceKindIndexes(tx, idx, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil { + return err + } + + name := svc.CompoundServiceName() + if err := cleanupMeshTopology(tx, idx, svc); err != nil { return fmt.Errorf("failed to clean up mesh-topology associations for %q: %v", name.String(), err) } - q := Query{Value: svc.ServiceName, EnterpriseMeta: *entMeta} + q := Query{ + Value: svc.ServiceName, + EnterpriseMeta: *entMeta, + PeerName: svc.PeerName, + } if remainingService, err := tx.First(tableServices, indexService, q); err == nil { if remainingService != nil { // We have at least one remaining service, update the index - if err := catalogUpdateServiceIndexes(tx, svc.ServiceName, idx, entMeta); err != nil { + if err := catalogUpdateServiceIndexes(tx, idx, svc.ServiceName, entMeta, svc.PeerName); err != nil { return err } } else { // There are no more service instances, cleanup the service. index - _, serviceIndex, err := catalogServiceMaxIndex(tx, svc.ServiceName, entMeta) + _, serviceIndex, err := catalogServiceMaxIndex(tx, svc.ServiceName, entMeta, svc.PeerName) if err == nil && serviceIndex != nil { // we found service. index, garbage collect it if errW := tx.Delete(tableIndex, serviceIndex); errW != nil { @@ -1729,11 +1927,13 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st } } - if err := catalogUpdateServiceExtinctionIndex(tx, idx, entMeta); err != nil { + if err := catalogUpdateServiceExtinctionIndex(tx, idx, entMeta, svc.PeerName); err != nil { return err } - if err := cleanupGatewayWildcards(tx, idx, svc); err != nil { - return fmt.Errorf("failed to clean up gateway-service associations for %q: %v", name.String(), err) + if svc.PeerName == "" { + if err := cleanupGatewayWildcards(tx, idx, svc); err != nil { + return fmt.Errorf("failed to clean up gateway-service associations for %q: %v", name.String(), err) + } } if err := freeServiceVirtualIP(tx, svc.ServiceName, nil, entMeta); err != nil { return fmt.Errorf("failed to clean up virtual IP for %q: %v", name.String(), err) @@ -1818,20 +2018,24 @@ func (s *Store) EnsureCheck(idx uint64, hc *structs.HealthCheck) error { } // updateAllServiceIndexesOfNode updates the Raft index of all the services associated with this node -func updateAllServiceIndexesOfNode(tx WriteTxn, idx uint64, nodeID string, entMeta *acl.EnterpriseMeta) error { +func updateAllServiceIndexesOfNode(tx WriteTxn, idx uint64, nodeID string, entMeta *acl.EnterpriseMeta, peerName string) error { + if peerName == "" { + peerName = structs.LocalPeerKeyword + } services, err := tx.Get(tableServices, indexNode, Query{ Value: nodeID, EnterpriseMeta: *entMeta.WithWildcardNamespace(), + PeerName: peerName, }) if err != nil { return fmt.Errorf("failed updating services for node %s: %s", nodeID, err) } for service := services.Next(); service != nil; service = services.Next() { svc := service.(*structs.ServiceNode) - if err := catalogUpdateServiceIndexes(tx, svc.ServiceName, idx, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceIndexes(tx, idx, svc.ServiceName, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } - if err := catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, idx, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceKindIndexes(tx, idx, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } } @@ -1842,7 +2046,7 @@ func updateAllServiceIndexesOfNode(tx WriteTxn, idx uint64, nodeID string, entMe // Returns a bool indicating if a write happened and any error. func (s *Store) ensureCheckCASTxn(tx WriteTxn, idx uint64, hc *structs.HealthCheck) (bool, error) { // Retrieve the existing entry. - _, existing, err := getNodeCheckTxn(tx, hc.Node, hc.CheckID, &hc.EnterpriseMeta) + _, existing, err := getNodeCheckTxn(tx, hc.Node, hc.CheckID, &hc.EnterpriseMeta, hc.PeerName) if err != nil { return false, fmt.Errorf("failed health check lookup: %s", err) } @@ -1876,6 +2080,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc EnterpriseMeta: hc.EnterpriseMeta, Node: hc.Node, CheckID: string(hc.CheckID), + PeerName: hc.PeerName, }) if err != nil { return fmt.Errorf("failed health check lookup: %s", err) @@ -1899,6 +2104,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc node, err := tx.First(tableNodes, indexID, Query{ Value: hc.Node, EnterpriseMeta: hc.EnterpriseMeta, + PeerName: hc.PeerName, }) if err != nil { return fmt.Errorf("failed node lookup: %s", err) @@ -1915,6 +2121,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc EnterpriseMeta: hc.EnterpriseMeta, Node: hc.Node, Service: hc.ServiceID, + PeerName: hc.PeerName, }) if err != nil { return fmt.Errorf("failed service lookup: %s", err) @@ -1930,10 +2137,10 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc if existing != nil && existing.(*structs.HealthCheck).IsSame(hc) { modified = false } else { - if err = catalogUpdateServiceIndexes(tx, svc.ServiceName, idx, &svc.EnterpriseMeta); err != nil { + if err = catalogUpdateServiceIndexes(tx, idx, svc.ServiceName, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } - if err := catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, idx, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceKindIndexes(tx, idx, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } } @@ -1943,7 +2150,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc } else { // Since the check has been modified, it impacts all services of node // Update the status for all the services associated with this node - err = updateAllServiceIndexesOfNode(tx, idx, hc.Node, &hc.EnterpriseMeta) + err = updateAllServiceIndexesOfNode(tx, idx, hc.Node, &hc.EnterpriseMeta, hc.PeerName) if err != nil { return err } @@ -1951,7 +2158,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc } // Delete any sessions for this check if the health is critical. - if hc.Status == api.HealthCritical { + if hc.Status == api.HealthCritical && hc.PeerName == "" { sessions, err := checkSessionsTxn(tx, hc) if err != nil { return err @@ -1977,18 +2184,18 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc // NodeCheck is used to retrieve a specific check associated with the given // node. -func (s *Store) NodeCheck(nodeName string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) (uint64, *structs.HealthCheck, error) { +func (s *Store) NodeCheck(nodeName string, checkID types.CheckID, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.HealthCheck, error) { tx := s.db.Txn(false) defer tx.Abort() - return getNodeCheckTxn(tx, nodeName, checkID, entMeta) + return getNodeCheckTxn(tx, nodeName, checkID, entMeta, peerName) } // nodeCheckTxn is used as the inner method to handle reading a health check // from the state store. -func getNodeCheckTxn(tx ReadTxn, nodeName string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) (uint64, *structs.HealthCheck, error) { +func getNodeCheckTxn(tx ReadTxn, nodeName string, checkID types.CheckID, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.HealthCheck, error) { // Get the table index. - idx := catalogChecksMaxIndex(tx, entMeta) + idx := catalogChecksMaxIndex(tx, entMeta, peerName) // TODO: accept non-pointer value if entMeta == nil { @@ -1996,7 +2203,13 @@ func getNodeCheckTxn(tx ReadTxn, nodeName string, checkID types.CheckID, entMeta } // Return the check. - check, err := tx.First(tableChecks, indexID, NodeCheckQuery{EnterpriseMeta: *entMeta, Node: nodeName, CheckID: string(checkID)}) + check, err := tx.First(tableChecks, indexID, + NodeCheckQuery{ + EnterpriseMeta: *entMeta, + Node: nodeName, + CheckID: string(checkID), + PeerName: peerName, + }) if err != nil { return 0, nil, fmt.Errorf("failed check lookup: %s", err) } @@ -2009,7 +2222,7 @@ func getNodeCheckTxn(tx ReadTxn, nodeName string, checkID types.CheckID, entMeta // NodeChecks is used to retrieve checks associated with the // given node from the state store. -func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { +func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2018,10 +2231,14 @@ func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *acl.Ente } // Get the table index. - idx := catalogChecksMaxIndex(tx, entMeta) + idx := catalogChecksMaxIndex(tx, entMeta, peerName) // Return the checks. - iter, err := catalogListChecksByNode(tx, Query{Value: nodeName, EnterpriseMeta: *entMeta}) + iter, err := catalogListChecksByNode(tx, Query{ + Value: nodeName, + EnterpriseMeta: *entMeta, + PeerName: peerName, + }) if err != nil { return 0, nil, fmt.Errorf("failed check lookup: %s", err) } @@ -2037,17 +2254,21 @@ func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *acl.Ente // ServiceChecks is used to get all checks associated with a // given service ID. The query is performed against a service // _name_ instead of a service ID. -func (s *Store) ServiceChecks(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { +func (s *Store) ServiceChecks(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := catalogChecksMaxIndex(tx, entMeta) + idx := catalogChecksMaxIndex(tx, entMeta, peerName) if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: serviceName, EnterpriseMeta: *entMeta} + q := Query{ + Value: serviceName, + EnterpriseMeta: *entMeta, + PeerName: peerName, + } iter, err := tx.Get(tableChecks, indexService, q) if err != nil { return 0, nil, fmt.Errorf("failed check lookup: %s", err) @@ -2064,35 +2285,37 @@ func (s *Store) ServiceChecks(ws memdb.WatchSet, serviceName string, entMeta *ac // ServiceChecksByNodeMeta is used to get all checks associated with a // given service ID, filtered by the given node metadata values. The query // is performed against a service _name_ instead of a service ID. -func (s *Store) ServiceChecksByNodeMeta(ws memdb.WatchSet, serviceName string, - filters map[string]string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { - +func (s *Store) ServiceChecksByNodeMeta(ws memdb.WatchSet, serviceName string, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. - idx := maxIndexForService(tx, serviceName, true, true, entMeta) + idx := maxIndexForService(tx, serviceName, true, true, entMeta, peerName) if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: serviceName, EnterpriseMeta: *entMeta} + q := Query{ + Value: serviceName, + EnterpriseMeta: *entMeta, + PeerName: peerName, + } iter, err := tx.Get(tableChecks, indexService, q) if err != nil { return 0, nil, fmt.Errorf("failed check lookup: %s", err) } ws.Add(iter.WatchCh()) - return parseChecksByNodeMeta(tx, ws, idx, iter, filters, entMeta) + return parseChecksByNodeMeta(tx, ws, idx, iter, filters, entMeta, peerName) } // ChecksInState is used to query the state store for all checks // which are in the provided state. -func (s *Store) ChecksInState(ws memdb.WatchSet, state string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { +func (s *Store) ChecksInState(ws memdb.WatchSet, state string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() - idx, iter, err := checksInStateTxn(tx, ws, state, entMeta) + idx, iter, err := checksInStateTxn(tx, ws, state, entMeta, peerName) if err != nil { return 0, nil, err } @@ -2106,21 +2329,21 @@ func (s *Store) ChecksInState(ws memdb.WatchSet, state string, entMeta *acl.Ente // ChecksInStateByNodeMeta is used to query the state store for all checks // which are in the provided state, filtered by the given node metadata values. -func (s *Store) ChecksInStateByNodeMeta(ws memdb.WatchSet, state string, filters map[string]string, entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { +func (s *Store) ChecksInStateByNodeMeta(ws memdb.WatchSet, state string, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.HealthChecks, error) { tx := s.db.Txn(false) defer tx.Abort() - idx, iter, err := checksInStateTxn(tx, ws, state, entMeta) + idx, iter, err := checksInStateTxn(tx, ws, state, entMeta, peerName) if err != nil { return 0, nil, err } - return parseChecksByNodeMeta(tx, ws, idx, iter, filters, entMeta) + return parseChecksByNodeMeta(tx, ws, idx, iter, filters, entMeta, peerName) } -func checksInStateTxn(tx ReadTxn, ws memdb.WatchSet, state string, entMeta *acl.EnterpriseMeta) (uint64, memdb.ResultIterator, error) { +func checksInStateTxn(tx ReadTxn, ws memdb.WatchSet, state string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, memdb.ResultIterator, error) { // Get the table index. - idx := catalogChecksMaxIndex(tx, entMeta) + idx := catalogChecksMaxIndex(tx, entMeta, peerName) if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() @@ -2130,9 +2353,17 @@ func checksInStateTxn(tx ReadTxn, ws memdb.WatchSet, state string, entMeta *acl. var iter memdb.ResultIterator var err error if state == api.HealthAny { - iter, err = tx.Get(tableChecks, indexID+"_prefix", entMeta) + q := Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + } + iter, err = tx.Get(tableChecks, indexID+"_prefix", q) } else { - q := Query{Value: state, EnterpriseMeta: *entMeta} + q := Query{ + Value: state, + EnterpriseMeta: *entMeta, + PeerName: peerName, + } iter, err = tx.Get(tableChecks, indexStatus, q) } if err != nil { @@ -2145,13 +2376,26 @@ func checksInStateTxn(tx ReadTxn, ws memdb.WatchSet, state string, entMeta *acl. // parseChecksByNodeMeta is a helper function used to deduplicate some // repetitive code for returning health checks filtered by node metadata fields. -func parseChecksByNodeMeta(tx ReadTxn, ws memdb.WatchSet, - idx uint64, iter memdb.ResultIterator, filters map[string]string, - entMeta *acl.EnterpriseMeta) (uint64, structs.HealthChecks, error) { +func parseChecksByNodeMeta( + tx ReadTxn, + ws memdb.WatchSet, + idx uint64, + iter memdb.ResultIterator, + filters map[string]string, + entMeta *acl.EnterpriseMeta, + peerName string, +) (uint64, structs.HealthChecks, error) { + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } // We don't want to track an unlimited number of nodes, so we pull a // top-level watch to use as a fallback. - allNodes, err := tx.Get(tableNodes, indexID+"_prefix", entMeta) + q := Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + } + allNodes, err := tx.Get(tableNodes, indexID+"_prefix", q) if err != nil { return 0, nil, fmt.Errorf("failed nodes lookup: %s", err) } @@ -2164,6 +2408,7 @@ func parseChecksByNodeMeta(tx ReadTxn, ws memdb.WatchSet, watchCh, node, err := tx.FirstWatch(tableNodes, indexID, Query{ Value: healthCheck.Node, EnterpriseMeta: healthCheck.EnterpriseMeta, + PeerName: healthCheck.PeerName, }) if err != nil { return 0, nil, fmt.Errorf("failed node lookup: %s", err) @@ -2183,12 +2428,12 @@ func parseChecksByNodeMeta(tx ReadTxn, ws memdb.WatchSet, } // DeleteCheck is used to delete a health check registration. -func (s *Store) DeleteCheck(idx uint64, node string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) error { +func (s *Store) DeleteCheck(idx uint64, node string, checkID types.CheckID, entMeta *acl.EnterpriseMeta, peerName string) error { tx := s.db.WriteTxn(idx) defer tx.Abort() // Call the check deletion - if err := s.deleteCheckTxn(tx, idx, node, checkID, entMeta); err != nil { + if err := s.deleteCheckTxn(tx, idx, node, checkID, entMeta, peerName); err != nil { return err } @@ -2198,9 +2443,16 @@ func (s *Store) DeleteCheck(idx uint64, node string, checkID types.CheckID, entM // deleteCheckCASTxn is used to try doing a check delete operation with a given // raft index. If the CAS index specified is not equal to the last observed index for // the given check, then the call is a noop, otherwise a normal check delete is invoked. -func (s *Store) deleteCheckCASTxn(tx WriteTxn, idx, cidx uint64, node string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) (bool, error) { +func (s *Store) deleteCheckCASTxn( + tx WriteTxn, + idx, cidx uint64, + node string, + checkID types.CheckID, + entMeta *acl.EnterpriseMeta, + peerName string, +) (bool, error) { // Try to retrieve the existing health check. - _, hc, err := getNodeCheckTxn(tx, node, checkID, entMeta) + _, hc, err := getNodeCheckTxn(tx, node, checkID, entMeta, peerName) if err != nil { return false, fmt.Errorf("check lookup failed: %s", err) } @@ -2216,7 +2468,7 @@ func (s *Store) deleteCheckCASTxn(tx WriteTxn, idx, cidx uint64, node string, ch } // Call the actual deletion if the above passed. - if err := s.deleteCheckTxn(tx, idx, node, checkID, entMeta); err != nil { + if err := s.deleteCheckTxn(tx, idx, node, checkID, entMeta, peerName); err != nil { return false, err } @@ -2225,11 +2477,16 @@ func (s *Store) deleteCheckCASTxn(tx WriteTxn, idx, cidx uint64, node string, ch // NodeServiceQuery is a type used to query the checks table. type NodeServiceQuery struct { - Node string - Service string + Node string + Service string + PeerName string acl.EnterpriseMeta } +func (q NodeServiceQuery) PeerOrEmpty() string { + return q.PeerName +} + // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer // receiver for this method. Remove once that is fixed. func (q NodeServiceQuery) NamespaceOrDefault() string { @@ -2244,13 +2501,19 @@ func (q NodeServiceQuery) PartitionOrDefault() string { // deleteCheckTxn is the inner method used to call a health // check deletion within an existing transaction. -func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID types.CheckID, entMeta *acl.EnterpriseMeta) error { +func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID types.CheckID, entMeta *acl.EnterpriseMeta, peerName string) error { if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } // Try to retrieve the existing health check. - hc, err := tx.First(tableChecks, indexID, NodeCheckQuery{EnterpriseMeta: *entMeta, Node: node, CheckID: string(checkID)}) + hc, err := tx.First(tableChecks, indexID, + NodeCheckQuery{ + EnterpriseMeta: *entMeta, + Node: node, + CheckID: string(checkID), + PeerName: peerName, + }) if err != nil { return fmt.Errorf("check lookup failed: %s", err) } @@ -2261,24 +2524,29 @@ func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID typ if existing != nil { // When no service is linked to this service, update all services of node if existing.ServiceID != "" { - if err := catalogUpdateServiceIndexes(tx, existing.ServiceName, idx, &existing.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceIndexes(tx, idx, existing.ServiceName, &existing.EnterpriseMeta, existing.PeerName); err != nil { return err } - - svcRaw, err := tx.First(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: existing.EnterpriseMeta, Node: existing.Node, Service: existing.ServiceID}) + svcRaw, err := tx.First(tableServices, indexID, + NodeServiceQuery{ + EnterpriseMeta: existing.EnterpriseMeta, + Node: existing.Node, + Service: existing.ServiceID, + PeerName: existing.PeerName, + }) if err != nil { return fmt.Errorf("failed retrieving service from state store: %v", err) } svc := svcRaw.(*structs.ServiceNode) - if err := catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, idx, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceKindIndexes(tx, idx, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } } else { - if err := updateAllServiceIndexesOfNode(tx, idx, existing.Node, &existing.EnterpriseMeta); err != nil { + if err := updateAllServiceIndexesOfNode(tx, idx, existing.Node, &existing.EnterpriseMeta, existing.PeerName); err != nil { return fmt.Errorf("Failed to update services linked to deleted healthcheck: %s", err) } - if err := catalogUpdateServicesIndexes(tx, idx, entMeta); err != nil { + if err := catalogUpdateServicesIndexes(tx, idx, entMeta, existing.PeerName); err != nil { return err } } @@ -2289,20 +2557,22 @@ func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID typ return fmt.Errorf("failed removing check: %s", err) } - if err := catalogUpdateCheckIndexes(tx, idx, entMeta); err != nil { + if err := catalogUpdateCheckIndexes(tx, idx, entMeta, peerName); err != nil { return err } - // Delete any sessions for this check. - sessions, err := checkSessionsTxn(tx, existing) - if err != nil { - return err - } + if peerName == "" { + // Delete any sessions for this check. + sessions, err := checkSessionsTxn(tx, existing) + if err != nil { + return err + } - // Do the delete in a separate loop so we don't trash the iterator. - for _, sess := range sessions { - if err := s.deleteSessionTxn(tx, idx, sess.Session, &sess.EnterpriseMeta); err != nil { - return fmt.Errorf("failed deleting session: %s", err) + // Do the delete in a separate loop so we don't trash the iterator. + for _, sess := range sessions { + if err := s.deleteSessionTxn(tx, idx, sess.Session, &sess.EnterpriseMeta); err != nil { + return fmt.Errorf("failed deleting session: %s", err) + } } } @@ -2310,12 +2580,12 @@ func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID typ } // CombinedCheckServiceNodes is used to query all nodes and checks for both typical and Connect endpoints of a service -func (s *Store) CombinedCheckServiceNodes(ws memdb.WatchSet, service structs.ServiceName) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) CombinedCheckServiceNodes(ws memdb.WatchSet, service structs.ServiceName, peerName string) (uint64, structs.CheckServiceNodes, error) { var ( resp structs.CheckServiceNodes maxIdx uint64 ) - idx, csn, err := s.CheckServiceNodes(ws, service.Name, &service.EnterpriseMeta) + idx, csn, err := s.CheckServiceNodes(ws, service.Name, &service.EnterpriseMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed to get downstream nodes for %q: %v", service, err) } @@ -2323,8 +2593,7 @@ func (s *Store) CombinedCheckServiceNodes(ws memdb.WatchSet, service structs.Ser maxIdx = idx } resp = append(resp, csn...) - - idx, csn, err = s.CheckConnectServiceNodes(ws, service.Name, &service.EnterpriseMeta) + idx, csn, err = s.CheckConnectServiceNodes(ws, service.Name, &service.EnterpriseMeta, peerName) if err != nil { return 0, nil, fmt.Errorf("failed to get downstream connect nodes for %q: %v", service, err) } @@ -2337,14 +2606,14 @@ func (s *Store) CombinedCheckServiceNodes(ws memdb.WatchSet, service structs.Ser } // CheckServiceNodes is used to query all nodes and checks for a given service. -func (s *Store) CheckServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { - return s.checkServiceNodes(ws, serviceName, false, entMeta) +func (s *Store) CheckServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { + return s.checkServiceNodes(ws, serviceName, false, entMeta, peerName) } // CheckConnectServiceNodes is used to query all nodes and checks for Connect // compatible endpoints for a given service. -func (s *Store) CheckConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { - return s.checkServiceNodes(ws, serviceName, true, entMeta) +func (s *Store) CheckConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { + return s.checkServiceNodes(ws, serviceName, true, entMeta, peerName) } // CheckIngressServiceNodes is used to query all nodes and checks for ingress @@ -2353,7 +2622,7 @@ func (s *Store) CheckIngressServiceNodes(ws memdb.WatchSet, serviceName string, tx := s.db.Txn(false) defer tx.Abort() - maxIdx, nodes, err := serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindIngressGateway, entMeta) + maxIdx, nodes, err := serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindIngressGateway, entMeta, structs.DefaultPeerKeyword) if err != nil { return 0, nil, fmt.Errorf("failed gateway nodes lookup: %v", err) } @@ -2375,7 +2644,7 @@ func (s *Store) CheckIngressServiceNodes(ws memdb.WatchSet, serviceName string, var results structs.CheckServiceNodes for sn := range names { - idx, n, err := checkServiceNodesTxn(tx, ws, sn.Name, false, &sn.EnterpriseMeta) + idx, n, err := checkServiceNodesTxn(tx, ws, sn.Name, false, &sn.EnterpriseMeta, structs.DefaultPeerKeyword) if err != nil { return 0, nil, err } @@ -2385,14 +2654,14 @@ func (s *Store) CheckIngressServiceNodes(ws memdb.WatchSet, serviceName string, return maxIdx, results, nil } -func (s *Store) checkServiceNodes(ws memdb.WatchSet, serviceName string, connect bool, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) checkServiceNodes(ws memdb.WatchSet, serviceName string, connect bool, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() - return checkServiceNodesTxn(tx, ws, serviceName, connect, entMeta) + return checkServiceNodesTxn(tx, ws, serviceName, connect, entMeta, peerName) } -func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, connect bool, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, connect bool, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { index := indexService if connect { index = indexConnect @@ -2406,6 +2675,7 @@ func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, con q := Query{ Value: serviceName, EnterpriseMeta: *entMeta, + PeerName: peerName, } iter, err := tx.Get(tableServices, index, q) if err != nil { @@ -2440,9 +2710,10 @@ func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, con // We append rather than replace since it allows users to migrate a service // to the mesh with a mix of sidecars and gateways until all its instances have a sidecar. var idx uint64 - if connect { + if connect && peerName == "" { // Look up gateway nodes associated with the service - gwIdx, nodes, err := serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindTerminatingGateway, entMeta) + // TODO(peering): we'll have to do something here + gwIdx, nodes, err := serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindTerminatingGateway, entMeta, structs.DefaultPeerKeyword) if err != nil { return 0, nil, fmt.Errorf("failed gateway nodes lookup: %v", err) } @@ -2474,7 +2745,7 @@ func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, con // We know service values should exist since the serviceNames map is only // populated if there is at least one result above. so serviceExists arg // below is always true. - svcIdx, svcCh := maxIndexAndWatchChForService(tx, n.Name, true, true, &n.EnterpriseMeta) + svcIdx, svcCh := maxIndexAndWatchChForService(tx, n.Name, true, true, &n.EnterpriseMeta, peerName) // Take the max index represented idx = lib.MaxUint64(idx, svcIdx) if svcCh != nil { @@ -2495,7 +2766,7 @@ func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, con // use target serviceName here but it actually doesn't matter. No chan will // be returned as we can't use the optimization in this case (and don't need // to as there is only one chan to watch anyway). - svcIdx, _ := maxIndexAndWatchChForService(tx, serviceName, false, true, entMeta) + svcIdx, _ := maxIndexAndWatchChForService(tx, serviceName, false, true, entMeta, peerName) idx = lib.MaxUint64(idx, svcIdx) } @@ -2521,12 +2792,12 @@ func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, con ws.Add(iter.WatchCh()) } - return parseCheckServiceNodes(tx, fallbackWS, idx, results, entMeta, err) + return parseCheckServiceNodes(tx, fallbackWS, idx, results, entMeta, peerName, err) } // CheckServiceTagNodes is used to query all nodes and checks for a given // service, filtering out services that don't contain the given tag. -func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags []string, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags []string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2535,7 +2806,7 @@ func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: serviceName, EnterpriseMeta: *entMeta} + q := Query{Value: serviceName, EnterpriseMeta: *entMeta, PeerName: peerName} iter, err := tx.Get(tableServices, indexService, q) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) @@ -2554,8 +2825,8 @@ func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags } // Get the table index. - idx := maxIndexForService(tx, serviceName, serviceExists, true, entMeta) - return parseCheckServiceNodes(tx, ws, idx, results, entMeta, err) + idx := maxIndexForService(tx, serviceName, serviceExists, true, entMeta, peerName) + return parseCheckServiceNodes(tx, ws, idx, results, entMeta, peerName, err) } // GatewayServices is used to query all services associated with a gateway @@ -2601,23 +2872,37 @@ func (s *Store) ServiceNamesOfKind(ws memdb.WatchSet, kind structs.ServiceKind) tx := s.db.Txn(false) defer tx.Abort() - return serviceNamesOfKindTxn(tx, ws, kind) + wildcardMeta := structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier) + return serviceNamesOfKindTxn(tx, ws, kind, *wildcardMeta) } -func serviceNamesOfKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind) (uint64, []*KindServiceName, error) { - var names []*KindServiceName - iter, err := tx.Get(tableKindServiceNames, indexKindOnly, kind) +func serviceNamesOfKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta acl.EnterpriseMeta) (uint64, []*KindServiceName, error) { + iter, err := tx.Get(tableKindServiceNames, indexKind, Query{Value: string(kind), EnterpriseMeta: entMeta}) if err != nil { return 0, nil, err } + + // TODO(peering): Maybe delete this watch and rely on the max idx tables below, to avoid waking up on unrelated changes ws.Add(iter.WatchCh()) - idx := kindServiceNamesMaxIndex(tx, ws, kind) + var names []*KindServiceName for name := iter.Next(); name != nil; name = iter.Next() { ksn := name.(*KindServiceName) names = append(names, ksn) } + var idx uint64 + switch { + case entMeta.PartitionOrDefault() == structs.WildcardSpecifier: + idx = kindServiceNamesMaxIndex(tx, ws, kind.Normalized()) + + case entMeta.NamespaceOrDefault() == structs.WildcardSpecifier: + idx = kindServiceNamesMaxIndex(tx, ws, partitionedIndexEntryName(kind.Normalized(), entMeta.PartitionOrDefault())) + + default: + idx = kindServiceNamesMaxIndex(tx, ws, partitionedAndNamespacedIndexEntryName(kind.Normalized(), &entMeta)) + + } return idx, names, nil } @@ -2632,7 +2917,9 @@ func parseCheckServiceNodes( tx ReadTxn, ws memdb.WatchSet, idx uint64, services structs.ServiceNodes, entMeta *acl.EnterpriseMeta, - err error) (uint64, structs.CheckServiceNodes, error) { + peerName string, + err error, +) (uint64, structs.CheckServiceNodes, error) { if err != nil { return 0, nil, err } @@ -2643,9 +2930,16 @@ func parseCheckServiceNodes( return idx, nil, nil } + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } + // We don't want to track an unlimited number of nodes, so we pull a // top-level watch to use as a fallback. - allNodes, err := tx.Get(tableNodes, indexID+"_prefix", entMeta) + allNodes, err := tx.Get(tableNodes, indexID+"_prefix", Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + }) if err != nil { return 0, nil, fmt.Errorf("failed nodes lookup: %s", err) } @@ -2654,7 +2948,10 @@ func parseCheckServiceNodes( // We need a similar fallback for checks. Since services need the // status of node + service-specific checks, we pull in a top-level // watch over all checks. - allChecks, err := tx.Get(tableChecks, indexID+"_prefix", entMeta) + allChecks, err := tx.Get(tableChecks, indexID+"_prefix", Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + }) if err != nil { return 0, nil, fmt.Errorf("failed checks lookup: %s", err) } @@ -2666,6 +2963,7 @@ func parseCheckServiceNodes( watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{ Value: sn.Node, EnterpriseMeta: sn.EnterpriseMeta, + PeerName: sn.PeerName, }) if err != nil { return 0, nil, fmt.Errorf("failed node lookup: %s", err) @@ -2684,6 +2982,7 @@ func parseCheckServiceNodes( Node: sn.Node, Service: "", // node checks have no service EnterpriseMeta: *sn.EnterpriseMeta.WithWildcardNamespace(), + PeerName: sn.PeerName, } iter, err := tx.Get(tableChecks, indexNodeService, q) if err != nil { @@ -2699,6 +2998,7 @@ func parseCheckServiceNodes( Node: sn.Node, Service: sn.ServiceID, EnterpriseMeta: sn.EnterpriseMeta, + PeerName: sn.PeerName, } iter, err = tx.Get(tableChecks, indexNodeService, q) if err != nil { @@ -2722,7 +3022,7 @@ func parseCheckServiceNodes( // NodeInfo is used to generate a dump of a single node. The dump includes // all services and checks which are registered against the node. -func (s *Store) NodeInfo(ws memdb.WatchSet, node string, entMeta *acl.EnterpriseMeta) (uint64, structs.NodeDump, error) { +func (s *Store) NodeInfo(ws memdb.WatchSet, node string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.NodeDump, error) { tx := s.db.Txn(false) defer tx.Abort() @@ -2731,55 +3031,72 @@ func (s *Store) NodeInfo(ws memdb.WatchSet, node string, entMeta *acl.Enterprise } // Get the table index. - idx := catalogMaxIndex(tx, entMeta, true) + idx := catalogMaxIndex(tx, entMeta, peerName, true) // Query the node by the passed node nodes, err := tx.Get(tableNodes, indexID, Query{ Value: node, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return 0, nil, fmt.Errorf("failed node lookup: %s", err) } ws.Add(nodes.WatchCh()) - return parseNodes(tx, ws, idx, nodes, entMeta) + return parseNodes(tx, ws, idx, nodes, entMeta, peerName) } // NodeDump is used to generate a dump of all nodes. This call is expensive // as it has to query every node, service, and check. The response can also // be quite large since there is currently no filtering applied. -func (s *Store) NodeDump(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.NodeDump, error) { +func (s *Store) NodeDump(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.NodeDump, error) { tx := s.db.Txn(false) defer tx.Abort() + if entMeta == nil { + entMeta = structs.NodeEnterpriseMetaInDefaultPartition() + } + // Get the table index. - idx := catalogMaxIndex(tx, entMeta, true) + idx := catalogMaxIndex(tx, entMeta, peerName, true) // Fetch all of the registered nodes - nodes, err := tx.Get(tableNodes, indexID+"_prefix", entMeta) + q := Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + } + nodes, err := tx.Get(tableNodes, indexID+"_prefix", q) if err != nil { return 0, nil, fmt.Errorf("failed node lookup: %s", err) } ws.Add(nodes.WatchCh()) - return parseNodes(tx, ws, idx, nodes, entMeta) + return parseNodes(tx, ws, idx, nodes, entMeta, peerName) } -func (s *Store) ServiceDump(ws memdb.WatchSet, kind structs.ServiceKind, useKind bool, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) ServiceDump(ws memdb.WatchSet, kind structs.ServiceKind, useKind bool, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() if useKind { - return serviceDumpKindTxn(tx, ws, kind, entMeta) + return serviceDumpKindTxn(tx, ws, kind, entMeta, peerName) } else { - return serviceDumpAllTxn(tx, ws, entMeta) + return serviceDumpAllTxn(tx, ws, entMeta, peerName) } } -func serviceDumpAllTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func serviceDumpAllTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { // Get the table index - idx := catalogMaxIndexWatch(tx, ws, entMeta, true) + idx := catalogMaxIndexWatch(tx, ws, entMeta, "", true) - services, err := tx.Get(tableServices, indexID+"_prefix", entMeta) + if entMeta == nil { + entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() + } + + q := Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + } + services, err := tx.Get(tableServices, indexID+"_prefix", q) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } @@ -2790,19 +3107,23 @@ func serviceDumpAllTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMet results = append(results, sn) } - return parseCheckServiceNodes(tx, nil, idx, results, entMeta, err) + return parseCheckServiceNodes(tx, nil, idx, results, entMeta, peerName, err) } -func serviceDumpKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) { +func serviceDumpKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.CheckServiceNodes, error) { // unlike when we are dumping all services here we only need to watch the kind specific index entry for changing (or nodes, checks) // updating any services, nodes or checks will bump the appropriate service kind index so there is no need to watch any of the individual // entries - idx := catalogServiceKindMaxIndex(tx, ws, kind, entMeta) + idx := catalogServiceKindMaxIndex(tx, ws, kind, entMeta, peerName) if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: string(kind), EnterpriseMeta: *entMeta} + q := Query{ + Value: string(kind), + EnterpriseMeta: *entMeta, + PeerName: peerName, + } services, err := tx.Get(tableServices, indexKind, q) if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) @@ -2814,14 +3135,15 @@ func serviceDumpKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, results = append(results, sn) } - return parseCheckServiceNodes(tx, nil, idx, results, entMeta, err) + return parseCheckServiceNodes(tx, nil, idx, results, entMeta, peerName, err) } // parseNodes takes an iterator over a set of nodes and returns a struct // containing the nodes along with all of their associated services // and/or health checks. +// TODO(peering): support parsing by peerName func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64, - iter memdb.ResultIterator, entMeta *acl.EnterpriseMeta) (uint64, structs.NodeDump, error) { + iter memdb.ResultIterator, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.NodeDump, error) { if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() @@ -2829,7 +3151,11 @@ func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64, // We don't want to track an unlimited number of services, so we pull a // top-level watch to use as a fallback. - allServices, err := tx.Get(tableServices, indexID+"_prefix", entMeta) + q := Query{ + EnterpriseMeta: *entMeta, + PeerName: peerName, + } + allServices, err := tx.Get(tableServices, indexID+"_prefix", q) if err != nil { return 0, nil, fmt.Errorf("failed services lookup: %s", err) } @@ -2851,13 +3177,14 @@ func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64, ID: node.ID, Node: node.Node, Partition: node.Partition, + PeerName: node.PeerName, Address: node.Address, TaggedAddresses: node.TaggedAddresses, Meta: node.Meta, } // Query the node services - services, err := catalogServiceListByNode(tx, node.Node, entMeta, true) + services, err := catalogServiceListByNode(tx, node.Node, entMeta, node.PeerName, true) if err != nil { return 0, nil, fmt.Errorf("failed services lookup: %s", err) } @@ -2868,7 +3195,11 @@ func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64, } // Query the service level checks - checks, err := catalogListChecksByNode(tx, Query{Value: node.Node, EnterpriseMeta: *entMeta}) + checks, err := catalogListChecksByNode(tx, Query{ + Value: node.Node, + EnterpriseMeta: *entMeta, + PeerName: node.PeerName, + }) if err != nil { return 0, nil, fmt.Errorf("failed node lookup: %s", err) } @@ -2886,6 +3217,7 @@ func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64, // checkSessionsTxn returns the IDs of all sessions associated with a health check func checkSessionsTxn(tx ReadTxn, hc *structs.HealthCheck) ([]*sessionCheck, error) { + // TODO(peering): what are implications for imported health checks? mappings, err := tx.Get(tableSessionChecks, indexNodeCheck, MultiQuery{Value: []string{hc.Node, string(hc.CheckID)}, EnterpriseMeta: *structs.DefaultEnterpriseMetaInPartition(hc.PartitionOrDefault())}) if err != nil { @@ -3343,7 +3675,11 @@ func (s *Store) collectGatewayServices(tx ReadTxn, ws memdb.WatchSet, iter memdb // TODO(ingress): How to handle index rolling back when a config entry is // deleted that references a service? // We might need something like the service_last_extinction index? -func serviceGatewayNodes(tx ReadTxn, ws memdb.WatchSet, service string, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta) (uint64, structs.ServiceNodes, error) { +func serviceGatewayNodes(tx ReadTxn, ws memdb.WatchSet, service string, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceNodes, error) { + if peerName != "" { + return 0, nil, nil + } + // Look up gateway name associated with the service gws, err := tx.Get(tableGatewayServices, indexService, structs.NewServiceName(service, entMeta)) if err != nil { @@ -3385,7 +3721,7 @@ func serviceGatewayNodes(tx ReadTxn, ws memdb.WatchSet, service string, kind str } // This prevents the index from sliding back if case all instances of the gateway service are deregistered - svcIdx := maxIndexForService(tx, mapping.Gateway.Name, exists, false, &mapping.Gateway.EnterpriseMeta) + svcIdx := maxIndexForService(tx, mapping.Gateway.Name, exists, false, &mapping.Gateway.EnterpriseMeta, structs.DefaultPeerKeyword) maxIdx = lib.MaxUint64(maxIdx, svcIdx) // Ensure that blocking queries wake up if the gateway-service mapping exists, but the gateway does not exist yet @@ -3481,7 +3817,11 @@ func (s *Store) ServiceTopology( if entMeta == nil { entMeta = structs.DefaultEnterpriseMetaInDefaultPartition() } - q := Query{Value: service, EnterpriseMeta: *entMeta} + q := Query{ + Value: service, + EnterpriseMeta: *entMeta, + PeerName: structs.TODOPeerKeyword, + } idx, proxies, err := serviceNodesTxn(tx, ws, indexConnect, q) if err != nil { @@ -3596,7 +3936,7 @@ func (s *Store) ServiceTopology( upstreamDecisions[un.String()] = decision } - idx, unfilteredUpstreams, err := s.combinedServiceNodesTxn(tx, ws, upstreamNames) + idx, unfilteredUpstreams, err := s.combinedServiceNodesTxn(tx, ws, upstreamNames, structs.DefaultPeerKeyword) if err != nil { return 0, nil, fmt.Errorf("failed to get upstreams for %q: %v", sn.String(), err) } @@ -3714,7 +4054,7 @@ func (s *Store) ServiceTopology( downstreamDecisions[dn.String()] = decision } - idx, unfilteredDownstreams, err := s.combinedServiceNodesTxn(tx, ws, downstreamNames) + idx, unfilteredDownstreams, err := s.combinedServiceNodesTxn(tx, ws, downstreamNames, structs.DefaultPeerKeyword) if err != nil { return 0, nil, fmt.Errorf("failed to get downstreams for %q: %v", sn.String(), err) @@ -3763,14 +4103,14 @@ func (s *Store) ServiceTopology( // combinedServiceNodesTxn returns typical and connect endpoints for a list of services. // This enabled aggregating checks statuses across both. -func (s *Store) combinedServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, names []structs.ServiceName) (uint64, structs.CheckServiceNodes, error) { +func (s *Store) combinedServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, names []structs.ServiceName, peerName string) (uint64, structs.CheckServiceNodes, error) { var ( maxIdx uint64 resp structs.CheckServiceNodes ) for _, u := range names { // Collect typical then connect instances - idx, csn, err := checkServiceNodesTxn(tx, ws, u.Name, false, &u.EnterpriseMeta) + idx, csn, err := checkServiceNodesTxn(tx, ws, u.Name, false, &u.EnterpriseMeta, peerName) if err != nil { return 0, nil, err } @@ -3779,7 +4119,7 @@ func (s *Store) combinedServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, names []s } resp = append(resp, csn...) - idx, csn, err = checkServiceNodesTxn(tx, ws, u.Name, true, &u.EnterpriseMeta) + idx, csn, err = checkServiceNodesTxn(tx, ws, u.Name, true, &u.EnterpriseMeta, peerName) if err != nil { return 0, nil, err } @@ -3880,6 +4220,7 @@ func linkedFromRegistrationTxn(tx ReadTxn, ws memdb.WatchSet, service structs.Se // updateMeshTopology creates associations between the input service and its upstreams in the topology table func updateMeshTopology(tx WriteTxn, idx uint64, node string, svc *structs.NodeService, existing interface{}) error { + // TODO(peering): make this peering aware oldUpstreams := make(map[structs.ServiceName]bool) if e, ok := existing.(*structs.ServiceNode); ok { for _, u := range e.ServiceProxy.Upstreams { @@ -3960,6 +4301,7 @@ func updateMeshTopology(tx WriteTxn, idx uint64, node string, svc *structs.NodeS // cleanupMeshTopology removes a service from the mesh topology table // This is only safe to call when there are no more known instances of this proxy func cleanupMeshTopology(tx WriteTxn, idx uint64, service *structs.ServiceNode) error { + // TODO(peering): make this peering aware? if service.ServiceKind != structs.ServiceKindConnectProxy { return nil } @@ -4067,6 +4409,7 @@ func truncateGatewayServiceTopologyMappings(tx WriteTxn, idx uint64, gateway str } func upsertKindServiceName(tx WriteTxn, idx uint64, kind structs.ServiceKind, name structs.ServiceName) error { + // TODO(peering): make this peering aware q := KindServiceNameQuery{Name: name.Name, Kind: kind, EnterpriseMeta: name.EnterpriseMeta} existing, err := tx.First(tableKindServiceNames, indexID, q) if err != nil { @@ -4089,10 +4432,7 @@ func upsertKindServiceName(tx WriteTxn, idx uint64, kind structs.ServiceKind, na if err := tx.Insert(tableKindServiceNames, &ksn); err != nil { return fmt.Errorf("failed inserting %s/%s into %s: %s", kind, name.String(), tableKindServiceNames, err) } - if err := indexUpdateMaxTxn(tx, idx, kindServiceNameIndexName(kind)); err != nil { - return fmt.Errorf("failed updating %s index: %v", tableKindServiceNames, err) - } - return nil + return updateKindServiceNamesIndex(tx, idx, kind, name.EnterpriseMeta) } func cleanupKindServiceName(tx WriteTxn, idx uint64, name structs.ServiceName, kind structs.ServiceKind) error { @@ -4100,15 +4440,12 @@ func cleanupKindServiceName(tx WriteTxn, idx uint64, name structs.ServiceName, k if _, err := tx.DeleteAll(tableKindServiceNames, indexID, q); err != nil { return fmt.Errorf("failed to delete %s from %s: %s", name, tableKindServiceNames, err) } - - if err := indexUpdateMaxTxn(tx, idx, kindServiceNameIndexName(kind)); err != nil { - return fmt.Errorf("failed updating %s index: %v", tableKindServiceNames, err) - } - return nil + return updateKindServiceNamesIndex(tx, idx, kind, name.EnterpriseMeta) } // CatalogDump returns all the contents of the node, service and check tables. // In Enterprise, this will return entries across all partitions and namespaces. +// TODO(peering) make this peering aware? func (s *Store) CatalogDump() (*structs.CatalogContents, error) { tx := s.db.Txn(false) contents := &structs.CatalogContents{} diff --git a/agent/consul/state/catalog_events.go b/agent/consul/state/catalog_events.go index 13c5c4ba0c..ba80a2c740 100644 --- a/agent/consul/state/catalog_events.go +++ b/agent/consul/state/catalog_events.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" ) @@ -17,33 +18,13 @@ import ( type EventSubjectService struct { Key string EnterpriseMeta acl.EnterpriseMeta + PeerName string overrideKey string overrideNamespace string overridePartition string } -// String satisfies the stream.Subject interface. -func (s EventSubjectService) String() string { - partition := s.EnterpriseMeta.PartitionOrDefault() - if v := s.overridePartition; v != "" { - partition = strings.ToLower(v) - } - - namespace := s.EnterpriseMeta.NamespaceOrDefault() - if v := s.overrideNamespace; v != "" { - namespace = strings.ToLower(v) - } - - key := s.Key - if v := s.overrideKey; v != "" { - key = v - } - key = strings.ToLower(key) - - return partition + "/" + namespace + "/" + key -} - // EventPayloadCheckServiceNode is used as the Payload for a stream.Event to // indicates changes to a CheckServiceNode for service health. // @@ -62,6 +43,7 @@ type EventPayloadCheckServiceNode struct { } func (e EventPayloadCheckServiceNode) HasReadPermission(authz acl.Authorizer) bool { + // TODO(peering): figure out how authz works for peered data return e.Value.CanRead(authz) == acl.Allow } @@ -76,6 +58,31 @@ func (e EventPayloadCheckServiceNode) Subject() stream.Subject { } } +func (e EventPayloadCheckServiceNode) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + return &pbsubscribe.Event{ + Index: idx, + Payload: &pbsubscribe.Event_ServiceHealth{ + ServiceHealth: &pbsubscribe.ServiceHealthUpdate{ + Op: e.Op, + CheckServiceNode: pbservice.NewCheckServiceNodeFromStructs(e.Value), + }, + }, + } +} + +func PBToStreamSubscribeRequest(req *pbsubscribe.SubscribeRequest, entMeta acl.EnterpriseMeta) *stream.SubscribeRequest { + return &stream.SubscribeRequest{ + Topic: req.Topic, + Subject: EventSubjectService{ + Key: req.Key, + EnterpriseMeta: entMeta, + PeerName: req.PeerName, + }, + Token: req.Token, + Index: req.Index, + } +} + // serviceHealthSnapshot returns a stream.SnapshotFunc that provides a snapshot // of stream.Events that describe the current state of a service health query. func (s *Store) ServiceHealthSnapshot(req stream.SubscribeRequest, buf stream.SnapshotAppender) (index uint64, err error) { @@ -89,7 +96,7 @@ func (s *Store) ServiceHealthSnapshot(req stream.SubscribeRequest, buf stream.Sn return 0, fmt.Errorf("expected SubscribeRequest.Subject to be a: state.EventSubjectService, was a: %T", req.Subject) } - idx, nodes, err := checkServiceNodesTxn(tx, nil, subject.Key, connect, &subject.EnterpriseMeta) + idx, nodes, err := checkServiceNodesTxn(tx, nil, subject.Key, connect, &subject.EnterpriseMeta, subject.PeerName) if err != nil { return 0, err } @@ -127,6 +134,7 @@ type nodeServiceTuple struct { Node string ServiceID string EntMeta acl.EnterpriseMeta + PeerName string } func newNodeServiceTupleFromServiceNode(sn *structs.ServiceNode) nodeServiceTuple { @@ -134,6 +142,7 @@ func newNodeServiceTupleFromServiceNode(sn *structs.ServiceNode) nodeServiceTupl Node: strings.ToLower(sn.Node), ServiceID: sn.ServiceID, EntMeta: sn.EnterpriseMeta, + PeerName: sn.PeerName, } } @@ -142,6 +151,7 @@ func newNodeServiceTupleFromServiceHealthCheck(hc *structs.HealthCheck) nodeServ Node: strings.ToLower(hc.Node), ServiceID: hc.ServiceID, EntMeta: hc.EnterpriseMeta, + PeerName: hc.PeerName, } } @@ -153,6 +163,7 @@ type serviceChange struct { type nodeTuple struct { Node string Partition string + PeerName string } var serviceChangeIndirect = serviceChange{changeType: changeIndirect} @@ -286,7 +297,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event } // Rebuild events for all services on this node es, err := newServiceHealthEventsForNode(tx, changes.Index, node.Node, - structs.WildcardEnterpriseMetaInPartition(node.Partition)) + structs.WildcardEnterpriseMetaInPartition(node.Partition), node.PeerName) if err != nil { return nil, err } @@ -342,6 +353,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event q := Query{ Value: gs.Gateway.Name, EnterpriseMeta: gatewayName.EnterpriseMeta, + PeerName: structs.TODOPeerKeyword, } _, nodes, err := serviceNodesTxn(tx, nil, indexService, q) if err != nil { @@ -504,6 +516,8 @@ func connectEventsByServiceKind(tx ReadTxn, origEvent stream.Event) ([]stream.Ev case structs.ServiceKindTerminatingGateway: var result []stream.Event + // TODO(peering): handle terminating gateways somehow + sn := structs.ServiceName{ Name: node.Service.Service, EnterpriseMeta: node.Service.EnterpriseMeta, @@ -551,16 +565,17 @@ func getPayloadCheckServiceNode(payload stream.Payload) *structs.CheckServiceNod // given node. This mirrors some of the the logic in the oddly-named // parseCheckServiceNodes but is more efficient since we know they are all on // the same node. -func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string, entMeta *acl.EnterpriseMeta) ([]stream.Event, error) { +func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string, entMeta *acl.EnterpriseMeta, peerName string) ([]stream.Event, error) { services, err := tx.Get(tableServices, indexNode, Query{ Value: node, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return nil, err } - n, checksFunc, err := getNodeAndChecks(tx, node, entMeta) + n, checksFunc, err := getNodeAndChecks(tx, node, entMeta, peerName) if err != nil { return nil, err } @@ -578,11 +593,12 @@ func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string, entMeta // getNodeAndNodeChecks returns a the node structure and a function that returns // the full list of checks for a specific service on that node. -func getNodeAndChecks(tx ReadTxn, node string, entMeta *acl.EnterpriseMeta) (*structs.Node, serviceChecksFunc, error) { +func getNodeAndChecks(tx ReadTxn, node string, entMeta *acl.EnterpriseMeta, peerName string) (*structs.Node, serviceChecksFunc, error) { // Fetch the node nodeRaw, err := tx.First(tableNodes, indexID, Query{ Value: node, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return nil, nil, err @@ -595,6 +611,7 @@ func getNodeAndChecks(tx ReadTxn, node string, entMeta *acl.EnterpriseMeta) (*st iter, err := tx.Get(tableChecks, indexNode, Query{ Value: node, EnterpriseMeta: *entMeta, + PeerName: peerName, }) if err != nil { return nil, nil, err @@ -629,7 +646,7 @@ func getNodeAndChecks(tx ReadTxn, node string, entMeta *acl.EnterpriseMeta) (*st type serviceChecksFunc func(serviceID string) structs.HealthChecks func newServiceHealthEventForService(tx ReadTxn, idx uint64, tuple nodeServiceTuple) (stream.Event, error) { - n, checksFunc, err := getNodeAndChecks(tx, tuple.Node, &tuple.EntMeta) + n, checksFunc, err := getNodeAndChecks(tx, tuple.Node, &tuple.EntMeta, tuple.PeerName) if err != nil { return stream.Event{}, err } @@ -638,6 +655,7 @@ func newServiceHealthEventForService(tx ReadTxn, idx uint64, tuple nodeServiceTu EnterpriseMeta: tuple.EntMeta, Node: tuple.Node, Service: tuple.ServiceID, + PeerName: tuple.PeerName, }) if err != nil { return stream.Event{}, err @@ -690,6 +708,7 @@ func newServiceHealthEventDeregister(idx uint64, sn *structs.ServiceNode) stream Node: &structs.Node{ Node: sn.Node, Partition: entMeta.PartitionOrEmpty(), + PeerName: sn.PeerName, }, Service: sn.ToNodeService(), } diff --git a/agent/consul/state/catalog_events_oss.go b/agent/consul/state/catalog_events_oss.go index d088a6cfd1..e59636d318 100644 --- a/agent/consul/state/catalog_events_oss.go +++ b/agent/consul/state/catalog_events_oss.go @@ -13,6 +13,7 @@ func (nst nodeServiceTuple) nodeTuple() nodeTuple { return nodeTuple{ Node: strings.ToLower(nst.Node), Partition: "", + PeerName: nst.PeerName, } } @@ -20,6 +21,7 @@ func newNodeTupleFromNode(node *structs.Node) nodeTuple { return nodeTuple{ Node: strings.ToLower(node.Node), Partition: "", + PeerName: node.PeerName, } } @@ -27,5 +29,20 @@ func newNodeTupleFromHealthCheck(hc *structs.HealthCheck) nodeTuple { return nodeTuple{ Node: strings.ToLower(hc.Node), Partition: "", + PeerName: hc.PeerName, } } + +// String satisfies the stream.Subject interface. +func (s EventSubjectService) String() string { + key := s.Key + if v := s.overrideKey; v != "" { + key = v + } + key = strings.ToLower(key) + + if s.PeerName == "" { + return key + } + return s.PeerName + "/" + key +} diff --git a/agent/consul/state/catalog_events_oss_test.go b/agent/consul/state/catalog_events_oss_test.go new file mode 100644 index 0000000000..ace7cfe712 --- /dev/null +++ b/agent/consul/state/catalog_events_oss_test.go @@ -0,0 +1,45 @@ +//go:build !consulent +// +build !consulent + +package state + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/structs" +) + +func TestEventPayloadCheckServiceNode_Subject_OSS(t *testing.T) { + for desc, tc := range map[string]struct { + evt EventPayloadCheckServiceNode + sub string + }{ + "mixed casing": { + EventPayloadCheckServiceNode{ + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + Service: "FoO", + }, + }, + }, + "foo", + }, + "override key": { + EventPayloadCheckServiceNode{ + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + Service: "foo", + }, + }, + overrideKey: "bar", + }, + "bar", + }, + } { + t.Run(desc, func(t *testing.T) { + require.Equal(t, tc.sub, tc.evt.Subject().String()) + }) + } +} diff --git a/agent/consul/state/catalog_events_test.go b/agent/consul/state/catalog_events_test.go index 1f6f6d885a..0b455543e3 100644 --- a/agent/consul/state/catalog_events_test.go +++ b/agent/consul/state/catalog_events_test.go @@ -16,49 +16,6 @@ import ( "github.com/hashicorp/consul/types" ) -func TestEventPayloadCheckServiceNode_Subject(t *testing.T) { - for desc, tc := range map[string]struct { - evt EventPayloadCheckServiceNode - sub string - }{ - "default partition and namespace": { - EventPayloadCheckServiceNode{ - Value: &structs.CheckServiceNode{ - Service: &structs.NodeService{ - Service: "foo", - }, - }, - }, - "default/default/foo", - }, - "mixed casing": { - EventPayloadCheckServiceNode{ - Value: &structs.CheckServiceNode{ - Service: &structs.NodeService{ - Service: "FoO", - }, - }, - }, - "default/default/foo", - }, - "override key": { - EventPayloadCheckServiceNode{ - Value: &structs.CheckServiceNode{ - Service: &structs.NodeService{ - Service: "foo", - }, - }, - overrideKey: "bar", - }, - "default/default/bar", - }, - } { - t.Run(desc, func(t *testing.T) { - require.Equal(t, tc.sub, tc.evt.Subject().String()) - }) - } -} - func TestServiceHealthSnapshot(t *testing.T) { store := NewStateStore(nil) @@ -307,7 +264,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { return nil }, Mutate: func(s *Store, tx *txn) error { - return s.deleteServiceTxn(tx, tx.Index, "node1", "web", nil) + return s.deleteServiceTxn(tx, tx.Index, "node1", "web", nil, "") }, WantEvents: []stream.Event{ // Should only publish deregistration for that service @@ -327,7 +284,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { return nil }, Mutate: func(s *Store, tx *txn) error { - return s.deleteNodeTxn(tx, tx.Index, "node1", nil) + return s.deleteNodeTxn(tx, tx.Index, "node1", nil, "") }, WantEvents: []stream.Event{ // Should publish deregistration events for all services @@ -380,7 +337,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { return s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "web", regConnectNative), false) }, Mutate: func(s *Store, tx *txn) error { - return s.deleteServiceTxn(tx, tx.Index, "node1", "web", nil) + return s.deleteServiceTxn(tx, tx.Index, "node1", "web", nil, "") }, WantEvents: []stream.Event{ // We should see both a regular service dereg event and a connect one @@ -444,7 +401,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { }, Mutate: func(s *Store, tx *txn) error { // Delete only the sidecar - return s.deleteServiceTxn(tx, tx.Index, "node1", "web_sidecar_proxy", nil) + return s.deleteServiceTxn(tx, tx.Index, "node1", "web_sidecar_proxy", nil, "") }, WantEvents: []stream.Event{ // We should see both a regular service dereg event and a connect one @@ -910,7 +867,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { }, Mutate: func(s *Store, tx *txn) error { // Delete only the node-level check - if err := s.deleteCheckTxn(tx, tx.Index, "node1", "serf-health", nil); err != nil { + if err := s.deleteCheckTxn(tx, tx.Index, "node1", "serf-health", nil, ""); err != nil { return err } return nil @@ -964,11 +921,11 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { }, Mutate: func(s *Store, tx *txn) error { // Delete the service-level check for the main service - if err := s.deleteCheckTxn(tx, tx.Index, "node1", "service:web", nil); err != nil { + if err := s.deleteCheckTxn(tx, tx.Index, "node1", "service:web", nil, ""); err != nil { return err } // Also delete for a proxy - if err := s.deleteCheckTxn(tx, tx.Index, "node1", "service:web_sidecar_proxy", nil); err != nil { + if err := s.deleteCheckTxn(tx, tx.Index, "node1", "service:web_sidecar_proxy", nil, ""); err != nil { return err } return nil @@ -1029,10 +986,10 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { // In one transaction the operator moves the web service and it's // sidecar from node2 back to node1 and deletes them from node2 - if err := s.deleteServiceTxn(tx, tx.Index, "node2", "web", nil); err != nil { + if err := s.deleteServiceTxn(tx, tx.Index, "node2", "web", nil, ""); err != nil { return err } - if err := s.deleteServiceTxn(tx, tx.Index, "node2", "web_sidecar_proxy", nil); err != nil { + if err := s.deleteServiceTxn(tx, tx.Index, "node2", "web_sidecar_proxy", nil, ""); err != nil { return err } @@ -1544,7 +1501,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { testServiceRegistration(t, "tgate1", regTerminatingGateway), false) }, Mutate: func(s *Store, tx *txn) error { - return s.deleteServiceTxn(tx, tx.Index, "node1", "srv1", nil) + return s.deleteServiceTxn(tx, tx.Index, "node1", "srv1", nil, "") }, WantEvents: []stream.Event{ testServiceHealthDeregistrationEvent(t, "srv1"), @@ -1649,7 +1606,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) { testServiceRegistration(t, "tgate1", regTerminatingGateway), false) }, Mutate: func(s *Store, tx *txn) error { - return s.deleteServiceTxn(tx, tx.Index, "node1", "tgate1", structs.DefaultEnterpriseMetaInDefaultPartition()) + return s.deleteServiceTxn(tx, tx.Index, "node1", "tgate1", structs.DefaultEnterpriseMetaInDefaultPartition(), "") }, WantEvents: []stream.Event{ testServiceHealthDeregistrationEvent(t, diff --git a/agent/consul/state/catalog_oss.go b/agent/consul/state/catalog_oss.go index 8a30d45892..b0c0c53376 100644 --- a/agent/consul/state/catalog_oss.go +++ b/agent/consul/state/catalog_oss.go @@ -15,54 +15,83 @@ import ( func withEnterpriseSchema(_ *memdb.DBSchema) {} -func serviceIndexName(name string, _ *acl.EnterpriseMeta) string { - return fmt.Sprintf("service.%s", name) +func serviceIndexName(name string, _ *acl.EnterpriseMeta, peerName string) string { + return peeredIndexEntryName(fmt.Sprintf("service.%s", name), peerName) } -func serviceKindIndexName(kind structs.ServiceKind, _ *acl.EnterpriseMeta) string { - return "service_kind." + kind.Normalized() +func serviceKindIndexName(kind structs.ServiceKind, _ *acl.EnterpriseMeta, peerName string) string { + base := "service_kind." + kind.Normalized() + return peeredIndexEntryName(base, peerName) } -func catalogUpdateNodesIndexes(tx WriteTxn, idx uint64, entMeta *acl.EnterpriseMeta) error { +func catalogUpdateNodesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error { // overall nodes index if err := indexUpdateMaxTxn(tx, idx, tableNodes); err != nil { return fmt.Errorf("failed updating index: %s", err) } + // peered index + if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(tableNodes, peerName)); err != nil { + return fmt.Errorf("failed updating partitioned+peered index for nodes table: %w", err) + } + return nil } -func catalogUpdateServicesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta) error { +// catalogUpdateServicesIndexes upserts the max index for the entire services table with varying levels +// of granularity (no-op if `idx` is lower than what exists for that index key): +// - all services +// - all services in a specified peer (including internal) +func catalogUpdateServicesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error { // overall services index if err := indexUpdateMaxTxn(tx, idx, tableServices); err != nil { - return fmt.Errorf("failed updating index: %s", err) + return fmt.Errorf("failed updating index for services table: %w", err) + } + + // peered services index + if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(tableServices, peerName)); err != nil { + return fmt.Errorf("failed updating peered index for services table: %w", err) } return nil } -func catalogUpdateServiceKindIndexes(tx WriteTxn, kind structs.ServiceKind, idx uint64, _ *acl.EnterpriseMeta) error { +// catalogUpdateServiceKindIndexes upserts the max index for the ServiceKind with varying levels +// of granularity (no-op if `idx` is lower than what exists for that index key): +// - all services of ServiceKind +// - all services of ServiceKind in a specified peer (including internal) +func catalogUpdateServiceKindIndexes(tx WriteTxn, idx uint64, kind structs.ServiceKind, _ *acl.EnterpriseMeta, peerName string) error { + base := "service_kind." + kind.Normalized() // service-kind index - if err := indexUpdateMaxTxn(tx, idx, serviceKindIndexName(kind, nil)); err != nil { - return fmt.Errorf("failed updating index: %s", err) + if err := indexUpdateMaxTxn(tx, idx, base); err != nil { + return fmt.Errorf("failed updating index for service kind: %w", err) } + // peered index + if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(base, peerName)); err != nil { + return fmt.Errorf("failed updating peered index for service kind: %w", err) + } return nil } -func catalogUpdateServiceIndexes(tx WriteTxn, serviceName string, idx uint64, _ *acl.EnterpriseMeta) error { +func catalogUpdateServiceIndexes(tx WriteTxn, idx uint64, serviceName string, _ *acl.EnterpriseMeta, peerName string) error { // per-service index - if err := indexUpdateMaxTxn(tx, idx, serviceIndexName(serviceName, nil)); err != nil { + if err := indexUpdateMaxTxn(tx, idx, serviceIndexName(serviceName, nil, peerName)); err != nil { return fmt.Errorf("failed updating index: %s", err) } return nil } -func catalogUpdateServiceExtinctionIndex(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta) error { - if err := tx.Insert(tableIndex, &IndexEntry{indexServiceExtinction, idx}); err != nil { - return fmt.Errorf("failed updating missing service extinction index: %s", err) +func catalogUpdateServiceExtinctionIndex(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error { + if err := indexUpdateMaxTxn(tx, idx, indexServiceExtinction); err != nil { + return fmt.Errorf("failed updating missing service extinction index: %w", err) } + // update the peer index + if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(indexServiceExtinction, peerName)); err != nil { + return fmt.Errorf("failed updating missing service extinction peered index: %w", err) + } + return nil } @@ -75,14 +104,14 @@ func catalogInsertNode(tx WriteTxn, node *structs.Node) error { return fmt.Errorf("failed inserting node: %s", err) } - if err := catalogUpdateNodesIndexes(tx, node.ModifyIndex, node.GetEnterpriseMeta()); err != nil { + if err := catalogUpdateNodesIndexes(tx, node.ModifyIndex, node.GetEnterpriseMeta(), node.PeerName); err != nil { return err } // Update the node's service indexes as the node information is included // in health queries and we would otherwise miss node updates in some cases // for those queries. - if err := updateAllServiceIndexesOfNode(tx, node.ModifyIndex, node.Node, node.GetEnterpriseMeta()); err != nil { + if err := updateAllServiceIndexesOfNode(tx, node.ModifyIndex, node.Node, node.GetEnterpriseMeta(), node.PeerName); err != nil { return fmt.Errorf("failed updating index: %s", err) } @@ -95,73 +124,95 @@ func catalogInsertService(tx WriteTxn, svc *structs.ServiceNode) error { return fmt.Errorf("failed inserting service: %s", err) } - if err := catalogUpdateServicesIndexes(tx, svc.ModifyIndex, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServicesIndexes(tx, svc.ModifyIndex, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } - if err := catalogUpdateServiceIndexes(tx, svc.ServiceName, svc.ModifyIndex, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceIndexes(tx, svc.ModifyIndex, svc.ServiceName, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } - if err := catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, svc.ModifyIndex, &svc.EnterpriseMeta); err != nil { + if err := catalogUpdateServiceKindIndexes(tx, svc.ModifyIndex, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil { return err } return nil } -func catalogNodesMaxIndex(tx ReadTxn, entMeta *acl.EnterpriseMeta) uint64 { - return maxIndexTxn(tx, tableNodes) +func catalogNodesMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) uint64 { + return maxIndexTxn(tx, peeredIndexEntryName(tableNodes, peerName)) } -func catalogServicesMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta) uint64 { - return maxIndexTxn(tx, tableServices) +func catalogServicesMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) uint64 { + return maxIndexTxn(tx, peeredIndexEntryName(tableServices, peerName)) } -func catalogServiceMaxIndex(tx ReadTxn, serviceName string, _ *acl.EnterpriseMeta) (<-chan struct{}, interface{}, error) { - return tx.FirstWatch(tableIndex, "id", serviceIndexName(serviceName, nil)) +func catalogServiceMaxIndex(tx ReadTxn, serviceName string, _ *acl.EnterpriseMeta, peerName string) (<-chan struct{}, interface{}, error) { + return tx.FirstWatch(tableIndex, indexID, serviceIndexName(serviceName, nil, peerName)) } -func catalogServiceKindMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta) uint64 { - return maxIndexWatchTxn(tx, ws, serviceKindIndexName(kind, nil)) +func catalogServiceKindMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, _ *acl.EnterpriseMeta, peerName string) uint64 { + return maxIndexWatchTxn(tx, ws, serviceKindIndexName(kind, nil, peerName)) } -func catalogServiceListNoWildcard(tx ReadTxn, _ *acl.EnterpriseMeta) (memdb.ResultIterator, error) { - return tx.Get(tableServices, indexID) -} - -func catalogServiceListByNode(tx ReadTxn, node string, _ *acl.EnterpriseMeta, _ bool) (memdb.ResultIterator, error) { - return tx.Get(tableServices, indexNode, Query{Value: node}) -} - -func catalogServiceLastExtinctionIndex(tx ReadTxn, _ *acl.EnterpriseMeta) (interface{}, error) { - return tx.First(tableIndex, "id", indexServiceExtinction) -} - -func catalogMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, checks bool) uint64 { - if checks { - return maxIndexTxn(tx, tableNodes, tableServices, tableChecks) +func catalogServiceListNoWildcard(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) (memdb.ResultIterator, error) { + q := Query{ + PeerName: peerName, } - return maxIndexTxn(tx, tableNodes, tableServices) + return tx.Get(tableServices, indexID+"_prefix", q) } -func catalogMaxIndexWatch(tx ReadTxn, ws memdb.WatchSet, _ *acl.EnterpriseMeta, checks bool) uint64 { +func catalogServiceListByNode(tx ReadTxn, node string, _ *acl.EnterpriseMeta, peerName string, _ bool) (memdb.ResultIterator, error) { + return tx.Get(tableServices, indexNode, Query{Value: node, PeerName: peerName}) +} + +func catalogServiceLastExtinctionIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) (interface{}, error) { + return tx.First(tableIndex, indexID, peeredIndexEntryName(indexServiceExtinction, peerName)) +} + +func catalogMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string, checks bool) uint64 { if checks { - return maxIndexWatchTxn(tx, ws, tableNodes, tableServices, tableChecks) + return maxIndexTxn(tx, + peeredIndexEntryName(tableChecks, peerName), + peeredIndexEntryName(tableServices, peerName), + peeredIndexEntryName(tableNodes, peerName), + ) } - return maxIndexWatchTxn(tx, ws, tableNodes, tableServices) + return maxIndexTxn(tx, + peeredIndexEntryName(tableServices, peerName), + peeredIndexEntryName(tableNodes, peerName), + ) } -func catalogUpdateCheckIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta) error { +func catalogMaxIndexWatch(tx ReadTxn, ws memdb.WatchSet, _ *acl.EnterpriseMeta, peerName string, checks bool) uint64 { + // TODO(peering_indexes): pipe peerName here + if checks { + return maxIndexWatchTxn(tx, ws, + peeredIndexEntryName(tableChecks, peerName), + peeredIndexEntryName(tableServices, peerName), + peeredIndexEntryName(tableNodes, peerName), + ) + } + return maxIndexWatchTxn(tx, ws, + peeredIndexEntryName(tableServices, peerName), + peeredIndexEntryName(tableNodes, peerName), + ) +} + +func catalogUpdateCheckIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error { // update the universal index entry - if err := tx.Insert(tableIndex, &IndexEntry{tableChecks, idx}); err != nil { + if err := indexUpdateMaxTxn(tx, idx, tableChecks); err != nil { + return fmt.Errorf("failed updating index: %s", err) + } + + if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(tableChecks, peerName)); err != nil { return fmt.Errorf("failed updating index: %s", err) } return nil } -func catalogChecksMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta) uint64 { - return maxIndexTxn(tx, tableChecks) +func catalogChecksMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) uint64 { + return maxIndexTxn(tx, peeredIndexEntryName(tableChecks, peerName)) } func catalogListChecksByNode(tx ReadTxn, q Query) (memdb.ResultIterator, error) { @@ -174,7 +225,7 @@ func catalogInsertCheck(tx WriteTxn, chk *structs.HealthCheck, idx uint64) error return fmt.Errorf("failed inserting check: %s", err) } - if err := catalogUpdateCheckIndexes(tx, idx, &chk.EnterpriseMeta); err != nil { + if err := catalogUpdateCheckIndexes(tx, idx, &chk.EnterpriseMeta, chk.PeerName); err != nil { return err } @@ -207,3 +258,10 @@ func indexFromKindServiceName(arg interface{}) ([]byte, error) { return nil, fmt.Errorf("type must be KindServiceNameQuery or *KindServiceName: %T", arg) } } + +func updateKindServiceNamesIndex(tx WriteTxn, idx uint64, kind structs.ServiceKind, entMeta acl.EnterpriseMeta) error { + if err := indexUpdateMaxTxn(tx, idx, kindServiceNameIndexName(kind.Normalized())); err != nil { + return fmt.Errorf("failed updating %s table index: %v", tableKindServiceNames, err) + } + return nil +} diff --git a/agent/consul/state/catalog_oss_test.go b/agent/consul/state/catalog_oss_test.go index 9edaff833b..7ed7429fc9 100644 --- a/agent/consul/state/catalog_oss_test.go +++ b/agent/consul/state/catalog_oss_test.go @@ -19,6 +19,14 @@ func testIndexerTableChecks() map[string]indexerTestCase { CheckID: "CheckID", Status: "PASSING", } + objWPeer := &structs.HealthCheck{ + Node: "NoDe", + ServiceID: "SeRvIcE", + ServiceName: "ServiceName", + CheckID: "CheckID", + Status: "PASSING", + PeerName: "Peer1", + } return map[string]indexerTestCase{ indexID: { read: indexValue{ @@ -26,11 +34,11 @@ func testIndexerTableChecks() map[string]indexerTestCase { Node: "NoDe", CheckID: "CheckId", }, - expected: []byte("node\x00checkid\x00"), + expected: []byte("internal\x00node\x00checkid\x00"), }, write: indexValue{ source: obj, - expected: []byte("node\x00checkid\x00"), + expected: []byte("internal\x00node\x00checkid\x00"), }, prefix: []indexValue{ { @@ -39,28 +47,75 @@ func testIndexerTableChecks() map[string]indexerTestCase { }, { source: Query{Value: "nOdE"}, - expected: []byte("node\x00"), + expected: []byte("internal\x00node\x00"), + }, + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: NodeCheckQuery{ + Node: "NoDe", + CheckID: "CheckId", + PeerName: "Peer1", + }, + expected: []byte("peer1\x00node\x00checkid\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00node\x00checkid\x00"), + }, + prefix: []indexValue{ + { + source: Query{Value: "nOdE", + PeerName: "Peer1"}, + expected: []byte("peer1\x00node\x00"), + }, + }, }, }, }, indexStatus: { read: indexValue{ source: Query{Value: "PASSING"}, - expected: []byte("passing\x00"), + expected: []byte("internal\x00passing\x00"), }, write: indexValue{ source: obj, - expected: []byte("passing\x00"), + expected: []byte("internal\x00passing\x00"), + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: Query{Value: "PASSING", PeerName: "Peer1"}, + expected: []byte("peer1\x00passing\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00passing\x00"), + }, + }, }, }, indexService: { read: indexValue{ source: Query{Value: "ServiceName"}, - expected: []byte("servicename\x00"), + expected: []byte("internal\x00servicename\x00"), }, write: indexValue{ source: obj, - expected: []byte("servicename\x00"), + expected: []byte("internal\x00servicename\x00"), + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: Query{Value: "ServiceName", PeerName: "Peer1"}, + expected: []byte("peer1\x00servicename\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00servicename\x00"), + }, + }, }, }, indexNodeService: { @@ -69,11 +124,27 @@ func testIndexerTableChecks() map[string]indexerTestCase { Node: "NoDe", Service: "SeRvIcE", }, - expected: []byte("node\x00service\x00"), + expected: []byte("internal\x00node\x00service\x00"), }, write: indexValue{ source: obj, - expected: []byte("node\x00service\x00"), + expected: []byte("internal\x00node\x00service\x00"), + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: NodeServiceQuery{ + Node: "NoDe", + PeerName: "Peer1", + Service: "SeRvIcE", + }, + expected: []byte("peer1\x00node\x00service\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00node\x00service\x00"), + }, + }, }, }, indexNode: { @@ -81,11 +152,26 @@ func testIndexerTableChecks() map[string]indexerTestCase { source: Query{ Value: "NoDe", }, - expected: []byte("node\x00"), + expected: []byte("internal\x00node\x00"), }, write: indexValue{ source: obj, - expected: []byte("node\x00"), + expected: []byte("internal\x00node\x00"), + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: Query{ + Value: "NoDe", + PeerName: "Peer1", + }, + expected: []byte("peer1\x00node\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00node\x00"), + }, + }, }, }, } @@ -186,11 +272,11 @@ func testIndexerTableNodes() map[string]indexerTestCase { indexID: { read: indexValue{ source: Query{Value: "NoDeId"}, - expected: []byte("nodeid\x00"), + expected: []byte("internal\x00nodeid\x00"), }, write: indexValue{ source: &structs.Node{Node: "NoDeId"}, - expected: []byte("nodeid\x00"), + expected: []byte("internal\x00nodeid\x00"), }, prefix: []indexValue{ { @@ -203,38 +289,90 @@ func testIndexerTableNodes() map[string]indexerTestCase { }, { source: Query{Value: "NoDeId"}, - expected: []byte("nodeid\x00"), + expected: []byte("internal\x00nodeid\x00"), + }, + { + source: Query{}, + expected: []byte("internal\x00"), + }, + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: Query{Value: "NoDeId", PeerName: "Peer1"}, + expected: []byte("peer1\x00nodeid\x00"), + }, + write: indexValue{ + source: &structs.Node{Node: "NoDeId", PeerName: "Peer1"}, + expected: []byte("peer1\x00nodeid\x00"), + }, + prefix: []indexValue{ + { + source: Query{PeerName: "Peer1"}, + expected: []byte("peer1\x00"), + }, + { + source: Query{Value: "NoDeId", PeerName: "Peer1"}, + expected: []byte("peer1\x00nodeid\x00"), + }, + }, }, }, }, indexUUID: { read: indexValue{ source: Query{Value: uuid}, - expected: uuidBuf, + expected: append([]byte("internal\x00"), uuidBuf...), }, write: indexValue{ source: &structs.Node{ ID: types.NodeID(uuid), Node: "NoDeId", }, - expected: uuidBuf, + expected: append([]byte("internal\x00"), uuidBuf...), }, prefix: []indexValue{ - { - source: (*acl.EnterpriseMeta)(nil), - expected: nil, - }, - { - source: acl.EnterpriseMeta{}, - expected: nil, - }, { // partial length source: Query{Value: uuid[:6]}, - expected: uuidBuf[:3], + expected: append([]byte("internal\x00"), uuidBuf[:3]...), }, { // full length source: Query{Value: uuid}, - expected: uuidBuf, + expected: append([]byte("internal\x00"), uuidBuf...), + }, + { + source: Query{}, + expected: []byte("internal\x00"), + }, + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: Query{Value: uuid, PeerName: "Peer1"}, + expected: append([]byte("peer1\x00"), uuidBuf...), + }, + write: indexValue{ + source: &structs.Node{ + ID: types.NodeID(uuid), + PeerName: "Peer1", + Node: "NoDeId", + }, + expected: append([]byte("peer1\x00"), uuidBuf...), + }, + prefix: []indexValue{ + { // partial length + source: Query{Value: uuid[:6], PeerName: "Peer1"}, + expected: append([]byte("peer1\x00"), uuidBuf[:3]...), + }, + { // full length + source: Query{Value: uuid, PeerName: "Peer1"}, + expected: append([]byte("peer1\x00"), uuidBuf...), + }, + { + source: Query{PeerName: "Peer1"}, + expected: []byte("peer1\x00"), + }, + }, }, }, }, @@ -244,7 +382,7 @@ func testIndexerTableNodes() map[string]indexerTestCase { Key: "KeY", Value: "VaLuE", }, - expected: []byte("KeY\x00VaLuE\x00"), + expected: []byte("internal\x00KeY\x00VaLuE\x00"), }, writeMulti: indexValueMulti{ source: &structs.Node{ @@ -255,8 +393,34 @@ func testIndexerTableNodes() map[string]indexerTestCase { }, }, expected: [][]byte{ - []byte("MaP-kEy-1\x00mAp-VaL-1\x00"), - []byte("mAp-KeY-2\x00MaP-vAl-2\x00"), + []byte("internal\x00MaP-kEy-1\x00mAp-VaL-1\x00"), + []byte("internal\x00mAp-KeY-2\x00MaP-vAl-2\x00"), + }, + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: KeyValueQuery{ + Key: "KeY", + Value: "VaLuE", + PeerName: "Peer1", + }, + expected: []byte("peer1\x00KeY\x00VaLuE\x00"), + }, + writeMulti: indexValueMulti{ + source: &structs.Node{ + Node: "NoDeId", + Meta: map[string]string{ + "MaP-kEy-1": "mAp-VaL-1", + "mAp-KeY-2": "MaP-vAl-2", + }, + PeerName: "Peer1", + }, + expected: [][]byte{ + []byte("peer1\x00MaP-kEy-1\x00mAp-VaL-1\x00"), + []byte("peer1\x00mAp-KeY-2\x00MaP-vAl-2\x00"), + }, + }, }, }, }, @@ -271,6 +435,12 @@ func testIndexerTableServices() map[string]indexerTestCase { ServiceID: "SeRviCe", ServiceName: "ServiceName", } + objWPeer := &structs.ServiceNode{ + Node: "NoDeId", + ServiceID: "SeRviCe", + ServiceName: "ServiceName", + PeerName: "Peer1", + } return map[string]indexerTestCase{ indexID: { @@ -279,11 +449,11 @@ func testIndexerTableServices() map[string]indexerTestCase { Node: "NoDeId", Service: "SeRvIcE", }, - expected: []byte("nodeid\x00service\x00"), + expected: []byte("internal\x00nodeid\x00service\x00"), }, write: indexValue{ source: obj, - expected: []byte("nodeid\x00service\x00"), + expected: []byte("internal\x00nodeid\x00service\x00"), }, prefix: []indexValue{ { @@ -294,9 +464,39 @@ func testIndexerTableServices() map[string]indexerTestCase { source: acl.EnterpriseMeta{}, expected: nil, }, + { + source: Query{}, + expected: []byte("internal\x00"), + }, { source: Query{Value: "NoDeId"}, - expected: []byte("nodeid\x00"), + expected: []byte("internal\x00nodeid\x00"), + }, + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: NodeServiceQuery{ + Node: "NoDeId", + PeerName: "Peer1", + Service: "SeRvIcE", + }, + expected: []byte("peer1\x00nodeid\x00service\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00nodeid\x00service\x00"), + }, + prefix: []indexValue{ + { + source: Query{Value: "NoDeId", PeerName: "Peer1"}, + expected: []byte("peer1\x00nodeid\x00"), + }, + { + source: Query{PeerName: "Peer1"}, + expected: []byte("peer1\x00"), + }, + }, }, }, }, @@ -305,34 +505,61 @@ func testIndexerTableServices() map[string]indexerTestCase { source: Query{ Value: "NoDeId", }, - expected: []byte("nodeid\x00"), + expected: []byte("internal\x00nodeid\x00"), }, write: indexValue{ source: obj, - expected: []byte("nodeid\x00"), + expected: []byte("internal\x00nodeid\x00"), + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: Query{ + Value: "NoDeId", + PeerName: "Peer1", + }, + expected: []byte("peer1\x00nodeid\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00nodeid\x00"), + }, + }, }, }, indexService: { read: indexValue{ source: Query{Value: "ServiceName"}, - expected: []byte("servicename\x00"), + expected: []byte("internal\x00servicename\x00"), }, write: indexValue{ source: obj, - expected: []byte("servicename\x00"), + expected: []byte("internal\x00servicename\x00"), + }, + extra: []indexerTestCase{ + { + read: indexValue{ + source: Query{Value: "ServiceName", PeerName: "Peer1"}, + expected: []byte("peer1\x00servicename\x00"), + }, + write: indexValue{ + source: objWPeer, + expected: []byte("peer1\x00servicename\x00"), + }, + }, }, }, indexConnect: { read: indexValue{ source: Query{Value: "ConnectName"}, - expected: []byte("connectname\x00"), + expected: []byte("internal\x00connectname\x00"), }, write: indexValue{ source: &structs.ServiceNode{ ServiceName: "ConnectName", ServiceConnect: structs.ServiceConnect{Native: true}, }, - expected: []byte("connectname\x00"), + expected: []byte("internal\x00connectname\x00"), }, extra: []indexerTestCase{ { @@ -344,7 +571,20 @@ func testIndexerTableServices() map[string]indexerTestCase { DestinationServiceName: "ConnectName", }, }, - expected: []byte("connectname\x00"), + expected: []byte("internal\x00connectname\x00"), + }, + }, + { + write: indexValue{ + source: &structs.ServiceNode{ + ServiceName: "ServiceName", + ServiceKind: structs.ServiceKindConnectProxy, + ServiceProxy: structs.ConnectProxyConfig{ + DestinationServiceName: "ConnectName", + }, + PeerName: "Peer1", + }, + expected: []byte("peer1\x00connectname\x00"), }, }, { @@ -362,18 +602,32 @@ func testIndexerTableServices() map[string]indexerTestCase { expectedIndexMissing: true, }, }, + { + read: indexValue{ + source: Query{Value: "ConnectName", PeerName: "Peer1"}, + expected: []byte("peer1\x00connectname\x00"), + }, + write: indexValue{ + source: &structs.ServiceNode{ + ServiceName: "ConnectName", + ServiceConnect: structs.ServiceConnect{Native: true}, + PeerName: "Peer1", + }, + expected: []byte("peer1\x00connectname\x00"), + }, + }, }, }, indexKind: { read: indexValue{ source: Query{Value: "connect-proxy"}, - expected: []byte("connect-proxy\x00"), + expected: []byte("internal\x00connect-proxy\x00"), }, write: indexValue{ source: &structs.ServiceNode{ ServiceKind: structs.ServiceKindConnectProxy, }, - expected: []byte("connect-proxy\x00"), + expected: []byte("internal\x00connect-proxy\x00"), }, extra: []indexerTestCase{ { @@ -382,7 +636,30 @@ func testIndexerTableServices() map[string]indexerTestCase { ServiceName: "ServiceName", ServiceKind: structs.ServiceKindTypical, }, - expected: []byte("\x00"), + expected: []byte("internal\x00\x00"), + }, + }, + { + write: indexValue{ + source: &structs.ServiceNode{ + ServiceName: "ServiceName", + ServiceKind: structs.ServiceKindTypical, + PeerName: "Peer1", + }, + expected: []byte("peer1\x00\x00"), + }, + }, + { + read: indexValue{ + source: Query{Value: "connect-proxy", PeerName: "Peer1"}, + expected: []byte("peer1\x00connect-proxy\x00"), + }, + write: indexValue{ + source: &structs.ServiceNode{ + ServiceKind: structs.ServiceKindConnectProxy, + PeerName: "Peer1", + }, + expected: []byte("peer1\x00connect-proxy\x00"), }, }, }, @@ -440,7 +717,7 @@ func testIndexerTableKindServiceNames() map[string]indexerTestCase { }, indexKind: { read: indexValue{ - source: structs.ServiceKindConnectProxy, + source: Query{Value: string(structs.ServiceKindConnectProxy)}, expected: []byte("connect-proxy\x00"), }, write: indexValue{ diff --git a/agent/consul/state/catalog_schema.go b/agent/consul/state/catalog_schema.go index b2d0907dc8..9a2fbecadb 100644 --- a/agent/consul/state/catalog_schema.go +++ b/agent/consul/state/catalog_schema.go @@ -48,9 +48,9 @@ func nodesTableSchema() *memdb.TableSchema { AllowMissing: false, Unique: true, Indexer: indexerSingleWithPrefix{ - readIndex: indexFromQuery, - writeIndex: indexFromNode, - prefixIndex: prefixIndexFromQueryNoNamespace, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexFromNode), + prefixIndex: prefixIndexFromQueryWithPeer, }, }, indexUUID: { @@ -58,9 +58,9 @@ func nodesTableSchema() *memdb.TableSchema { AllowMissing: true, Unique: true, Indexer: indexerSingleWithPrefix{ - readIndex: indexFromUUIDQuery, - writeIndex: indexIDFromNode, - prefixIndex: prefixIndexFromUUIDQuery, + readIndex: indexWithPeerName(indexFromUUIDQuery), + writeIndex: indexWithPeerName(indexIDFromNode), + prefixIndex: prefixIndexFromUUIDWithPeerQuery, }, }, indexMeta: { @@ -68,8 +68,8 @@ func nodesTableSchema() *memdb.TableSchema { AllowMissing: true, Unique: false, Indexer: indexerMulti{ - readIndex: indexFromKeyValueQuery, - writeIndexMulti: indexMetaFromNode, + readIndex: indexWithPeerName(indexFromKeyValueQuery), + writeIndexMulti: multiIndexWithPeerName(indexMetaFromNode), }, }, }, @@ -146,9 +146,9 @@ func servicesTableSchema() *memdb.TableSchema { AllowMissing: false, Unique: true, Indexer: indexerSingleWithPrefix{ - readIndex: indexFromNodeServiceQuery, - writeIndex: indexFromServiceNode, - prefixIndex: prefixIndexFromQuery, + readIndex: indexWithPeerName(indexFromNodeServiceQuery), + writeIndex: indexWithPeerName(indexFromServiceNode), + prefixIndex: prefixIndexFromQueryWithPeer, }, }, indexNode: { @@ -156,8 +156,8 @@ func servicesTableSchema() *memdb.TableSchema { AllowMissing: false, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromQuery, - writeIndex: indexFromNodeIdentity, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexFromNodeIdentity), }, }, indexService: { @@ -165,8 +165,8 @@ func servicesTableSchema() *memdb.TableSchema { AllowMissing: true, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromQuery, - writeIndex: indexServiceNameFromServiceNode, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexServiceNameFromServiceNode), }, }, indexConnect: { @@ -174,8 +174,8 @@ func servicesTableSchema() *memdb.TableSchema { AllowMissing: true, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromQuery, - writeIndex: indexConnectNameFromServiceNode, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexConnectNameFromServiceNode), }, }, indexKind: { @@ -183,8 +183,8 @@ func servicesTableSchema() *memdb.TableSchema { AllowMissing: false, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromQuery, - writeIndex: indexKindFromServiceNode, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexKindFromServiceNode), }, }, }, @@ -295,6 +295,61 @@ func indexKindFromServiceNode(raw interface{}) ([]byte, error) { return b.Bytes(), nil } +// indexWithPeerName adds peer name to the index. +func indexWithPeerName( + fn func(interface{}) ([]byte, error), +) func(interface{}) ([]byte, error) { + return func(raw interface{}) ([]byte, error) { + v, err := fn(raw) + if err != nil { + return nil, err + } + + n, ok := raw.(peerIndexable) + if !ok { + return nil, fmt.Errorf("type must be peerIndexable: %T", raw) + } + + peername := n.PeerOrEmpty() + if peername == "" { + peername = structs.LocalPeerKeyword + } + b := newIndexBuilder(len(v) + len(peername) + 1) + b.String(strings.ToLower(peername)) + b.Raw(v) + return b.Bytes(), nil + } +} + +// multiIndexWithPeerName adds peer name to multiple indices, and returns multiple indices. +func multiIndexWithPeerName( + fn func(interface{}) ([][]byte, error), +) func(interface{}) ([][]byte, error) { + return func(raw interface{}) ([][]byte, error) { + results, err := fn(raw) + if err != nil { + return nil, err + } + + n, ok := raw.(peerIndexable) + if !ok { + return nil, fmt.Errorf("type must be peerIndexable: %T", raw) + } + + peername := n.PeerOrEmpty() + if peername == "" { + peername = structs.LocalPeerKeyword + } + for i, v := range results { + b := newIndexBuilder(len(v) + len(peername) + 1) + b.String(strings.ToLower(peername)) + b.Raw(v) + results[i] = b.Bytes() + } + return results, nil + } +} + // checksTableSchema returns a new table schema used for storing and indexing // health check information. Health checks have a number of different attributes // we want to filter by, so this table is a bit more complex. @@ -307,9 +362,9 @@ func checksTableSchema() *memdb.TableSchema { AllowMissing: false, Unique: true, Indexer: indexerSingleWithPrefix{ - readIndex: indexFromNodeCheckQuery, - writeIndex: indexFromHealthCheck, - prefixIndex: prefixIndexFromQuery, + readIndex: indexWithPeerName(indexFromNodeCheckQuery), + writeIndex: indexWithPeerName(indexFromHealthCheck), + prefixIndex: prefixIndexFromQueryWithPeer, }, }, indexStatus: { @@ -317,8 +372,8 @@ func checksTableSchema() *memdb.TableSchema { AllowMissing: false, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromQuery, - writeIndex: indexStatusFromHealthCheck, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexStatusFromHealthCheck), }, }, indexService: { @@ -326,8 +381,8 @@ func checksTableSchema() *memdb.TableSchema { AllowMissing: true, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromQuery, - writeIndex: indexServiceNameFromHealthCheck, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexServiceNameFromHealthCheck), }, }, indexNode: { @@ -335,8 +390,8 @@ func checksTableSchema() *memdb.TableSchema { AllowMissing: true, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromQuery, - writeIndex: indexFromNodeIdentity, + readIndex: indexWithPeerName(indexFromQuery), + writeIndex: indexWithPeerName(indexFromNodeIdentity), }, }, indexNodeService: { @@ -344,8 +399,8 @@ func checksTableSchema() *memdb.TableSchema { AllowMissing: true, Unique: false, Indexer: indexerSingle{ - readIndex: indexFromNodeServiceQuery, - writeIndex: indexNodeServiceFromHealthCheck, + readIndex: indexWithPeerName(indexFromNodeServiceQuery), + writeIndex: indexWithPeerName(indexNodeServiceFromHealthCheck), }, }, }, @@ -588,11 +643,20 @@ type upstreamDownstream struct { // NodeCheckQuery is used to query the ID index of the checks table. type NodeCheckQuery struct { - Node string - CheckID string + Node string + CheckID string + PeerName string acl.EnterpriseMeta } +type peerIndexable interface { + PeerOrEmpty() string +} + +func (q NodeCheckQuery) PeerOrEmpty() string { + return q.PeerName +} + // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer // receiver for this method. Remove once that is fixed. func (q NodeCheckQuery) NamespaceOrDefault() string { @@ -680,7 +744,16 @@ type KindServiceName struct { structs.RaftIndex } +func (n *KindServiceName) PartitionOrDefault() string { + return n.Service.PartitionOrDefault() +} + +func (n *KindServiceName) NamespaceOrDefault() string { + return n.Service.NamespaceOrDefault() +} + func kindServiceNameTableSchema() *memdb.TableSchema { + // TODO(peering): make this peer-aware return &memdb.TableSchema{ Name: tableKindServiceNames, Indexes: map[string]*memdb.IndexSchema{ @@ -693,8 +766,8 @@ func kindServiceNameTableSchema() *memdb.TableSchema { writeIndex: indexFromKindServiceName, }, }, - indexKindOnly: { - Name: indexKindOnly, + indexKind: { + Name: indexKind, AllowMissing: false, Unique: false, Indexer: indexerSingle{ @@ -732,20 +805,20 @@ func indexFromKindServiceNameKindOnly(raw interface{}) ([]byte, error) { b.String(strings.ToLower(string(x.Kind))) return b.Bytes(), nil - case structs.ServiceKind: + case Query: var b indexBuilder - b.String(strings.ToLower(string(x))) + b.String(strings.ToLower(x.Value)) return b.Bytes(), nil default: - return nil, fmt.Errorf("type must be *KindServiceName or structs.ServiceKind: %T", raw) + return nil, fmt.Errorf("type must be *KindServiceName or Query: %T", raw) } } -func kindServiceNamesMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind) uint64 { +func kindServiceNamesMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind string) uint64 { return maxIndexWatchTxn(tx, ws, kindServiceNameIndexName(kind)) } -func kindServiceNameIndexName(kind structs.ServiceKind) string { - return "kind_service_names." + kind.Normalized() +func kindServiceNameIndexName(kind string) string { + return "kind_service_names." + kind } diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index c9860a9dfb..efd8628386 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib/stringslice" + "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/types" ) @@ -30,21 +31,21 @@ func makeRandomNodeID(t *testing.T) types.NodeID { func TestStateStore_GetNodeID(t *testing.T) { s := testStateStore(t) - _, out, err := s.GetNodeID(types.NodeID("wrongId"), nil) + _, out, err := s.GetNodeID(types.NodeID("wrongId"), nil, "") if err == nil || out != nil || !strings.Contains(err.Error(), "node lookup by ID failed: index error: UUID (without hyphens) must be") { t.Errorf("want an error, nil value, err:=%q ; out:=%q", err.Error(), out) } - _, out, err = s.GetNodeID(types.NodeID("0123456789abcdefghijklmnopqrstuvwxyz"), nil) + _, out, err = s.GetNodeID(types.NodeID("0123456789abcdefghijklmnopqrstuvwxyz"), nil, "") if err == nil || out != nil || !strings.Contains(err.Error(), "node lookup by ID failed: index error: invalid UUID") { t.Errorf("want an error, nil value, err:=%q ; out:=%q", err, out) } - _, out, err = s.GetNodeID(types.NodeID("00a916bc-a357-4a19-b886-59419fcee50Z"), nil) + _, out, err = s.GetNodeID(types.NodeID("00a916bc-a357-4a19-b886-59419fcee50Z"), nil, "") if err == nil || out != nil || !strings.Contains(err.Error(), "node lookup by ID failed: index error: invalid UUID") { t.Errorf("want an error, nil value, err:=%q ; out:=%q", err, out) } - _, out, err = s.GetNodeID(types.NodeID("00a916bc-a357-4a19-b886-59419fcee506"), nil) + _, out, err = s.GetNodeID(types.NodeID("00a916bc-a357-4a19-b886-59419fcee506"), nil, "") if err != nil || out != nil { t.Errorf("do not want any error nor returned value, err:=%q ; out:=%q", err, out) } @@ -57,14 +58,14 @@ func TestStateStore_GetNodeID(t *testing.T) { } require.NoError(t, s.EnsureRegistration(1, req)) - _, out, err = s.GetNodeID(nodeID, nil) + _, out, err = s.GetNodeID(nodeID, nil, "") require.NoError(t, err) if out == nil || out.ID != nodeID { t.Fatalf("out should not be nil and contain nodeId, but was:=%#v", out) } // Case insensitive lookup should work as well - _, out, err = s.GetNodeID(types.NodeID("00a916bC-a357-4a19-b886-59419fceeAAA"), nil) + _, out, err = s.GetNodeID(types.NodeID("00a916bC-a357-4a19-b886-59419fceeAAA"), nil, "") require.NoError(t, err) if out == nil || out.ID != nodeID { t.Fatalf("out should not be nil and contain nodeId, but was:=%#v", out) @@ -72,30 +73,59 @@ func TestStateStore_GetNodeID(t *testing.T) { } func TestStateStore_GetNode(t *testing.T) { - s := testStateStore(t) + assertExists := func(t *testing.T, s *Store, node, peerName string, expectIndex uint64) { + idx, out, err := s.GetNode(node, nil, peerName) + require.NoError(t, err) + require.NotNil(t, out) + require.Equal(t, expectIndex, idx) + require.Equal(t, strings.ToLower(node), out.Node) + require.Equal(t, strings.ToLower(peerName), out.PeerName) + } + assertNotExist := func(t *testing.T, s *Store, node, peerName string) { + idx, out, err := s.GetNode(node, nil, peerName) + require.NoError(t, err) + require.Nil(t, out) + require.Equal(t, uint64(0), idx) + } - // initially does not exist - idx, out, err := s.GetNode("node1", nil) - require.NoError(t, err) - require.Nil(t, out) - require.Equal(t, uint64(0), idx) + t.Run("default peer", func(t *testing.T) { + s := testStateStore(t) - // Create it - testRegisterNode(t, s, 1, "node1") + // initially does not exist + assertNotExist(t, s, "node1", "") - // now exists - idx, out, err = s.GetNode("node1", nil) - require.NoError(t, err) - require.NotNil(t, out) - require.Equal(t, uint64(1), idx) - require.Equal(t, "node1", out.Node) + // Create it + testRegisterNode(t, s, 1, "node1") - // Case insensitive lookup should work as well - idx, out, err = s.GetNode("NoDe1", nil) - require.NoError(t, err) - require.NotNil(t, out) - require.Equal(t, uint64(1), idx) - require.Equal(t, "node1", out.Node) + // now exists + assertExists(t, s, "node1", "", 1) + + // Case insensitive lookup should work as well + assertExists(t, s, "NoDe1", "", 1) + }) + + t.Run("random peer", func(t *testing.T) { + s := testStateStore(t) + + // initially do not exist + assertNotExist(t, s, "node1", "") + assertNotExist(t, s, "node1", "my-peer") + + // Create one with no peer, and one with a peer to test a peer-name crossing issue. + testRegisterNode(t, s, 1, "node1") + testRegisterNodeOpts(t, s, 2, "node1", func(n *structs.Node) error { + n.PeerName = "my-peer" + return nil + }) + + // now exist + assertExists(t, s, "node1", "", 1) + assertExists(t, s, "node1", "my-peer", 2) + + // Case insensitive lookup should work as well + assertExists(t, s, "NoDe1", "", 1) + assertExists(t, s, "NoDe1", "my-peer", 2) + }) } func TestStateStore_ensureNoNodeWithSimilarNameTxn(t *testing.T) { @@ -169,396 +199,570 @@ func TestStateStore_ensureNoNodeWithSimilarNameTxn(t *testing.T) { func TestStateStore_EnsureRegistration(t *testing.T) { t.Parallel() - s := testStateStore(t) - // Start with just a node. - nodeID := makeRandomNodeID(t) - req := &structs.RegisterRequest{ - ID: nodeID, - Node: "node1", - Address: "1.2.3.4", - TaggedAddresses: map[string]string{"hello": "world"}, - NodeMeta: map[string]string{"somekey": "somevalue"}, - } - if err := s.EnsureRegistration(1, req); err != nil { - t.Fatalf("err: %s", err) - } + run := func(t *testing.T, peerName string) { + s := testStateStore(t) + // Start with just a node. + nodeID := makeRandomNodeID(t) - // Retrieve the node and verify its contents. - verifyNode := func() { - node := &structs.Node{ - ID: nodeID, - Node: "node1", - Address: "1.2.3.4", - Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), - TaggedAddresses: map[string]string{"hello": "world"}, - Meta: map[string]string{"somekey": "somevalue"}, - RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 1}, + makeReq := func(f func(*structs.RegisterRequest)) *structs.RegisterRequest { + req := &structs.RegisterRequest{ + ID: nodeID, + Node: "node1", + Address: "1.2.3.4", + TaggedAddresses: map[string]string{"hello": "world"}, + NodeMeta: map[string]string{"somekey": "somevalue"}, + PeerName: peerName, + } + if f != nil { + f(req) + } + return req } - _, out, err := s.GetNode("node1", nil) - if err != nil { - t.Fatalf("got err %s want nil", err) + verifyNode := func(t *testing.T) { + node := &structs.Node{ + ID: nodeID, + Node: "node1", + Address: "1.2.3.4", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + TaggedAddresses: map[string]string{"hello": "world"}, + Meta: map[string]string{"somekey": "somevalue"}, + RaftIndex: structs.RaftIndex{CreateIndex: 1, ModifyIndex: 1}, + PeerName: peerName, + } + + _, out, err := s.GetNode("node1", nil, peerName) + require.NoError(t, err) + require.Equal(t, node, out) + + _, out2, err := s.GetNodeID(nodeID, nil, peerName) + require.NoError(t, err) + require.NotNil(t, out2) + require.Equal(t, out, out2) } - require.Equal(t, node, out) + verifyService := func(t *testing.T) { + svcmap := map[string]*structs.NodeService{ + "redis1": { + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Tags: []string{"primary"}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + RaftIndex: structs.RaftIndex{CreateIndex: 2, ModifyIndex: 2}, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + PeerName: peerName, + }, + } - _, out2, err := s.GetNodeID(nodeID, nil) - if err != nil { - t.Fatalf("got err %s want nil", err) + idx, out, err := s.NodeServices(nil, "node1", nil, peerName) + require.NoError(t, err) + require.Equal(t, uint64(2), idx) + require.Equal(t, svcmap, out.Services) + + idx, r, err := s.NodeService("node1", "redis1", nil, peerName) + require.NoError(t, err) + require.Equal(t, uint64(2), idx) + require.Equal(t, svcmap["redis1"], r) + + // lookup service by node name + idx, sn, err := s.ServiceNode("", "node1", "redis1", nil, peerName) + require.NoError(t, err) + require.Equal(t, uint64(2), idx) + require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn) + + // lookup service by node ID + idx, sn, err = s.ServiceNode(string(nodeID), "", "redis1", nil, peerName) + require.NoError(t, err) + require.Equal(t, uint64(2), idx) + require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn) + + // lookup service by invalid node + _, _, err = s.ServiceNode("", "invalid-node", "redis1", nil, peerName) + testutil.RequireErrorContains(t, err, "node not found") + + // lookup service without node name or ID + _, _, err = s.ServiceNode("", "", "redis1", nil, peerName) + testutil.RequireErrorContains(t, err, "Node ID or name required to lookup the service") } - if out2 == nil { - t.Fatalf("out2 should not be nil") + verifyCheck := func(t *testing.T) { + checks := structs.HealthChecks{ + &structs.HealthCheck{ + Node: "node1", + CheckID: "check1", + Name: "check", + Status: "critical", + RaftIndex: structs.RaftIndex{CreateIndex: 3, ModifyIndex: 3}, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + PeerName: peerName, + }, + } + + idx, out, err := s.NodeChecks(nil, "node1", nil, peerName) + require.NoError(t, err) + require.Equal(t, uint64(3), idx) + require.Equal(t, checks, out) + + idx, c, err := s.NodeCheck("node1", "check1", nil, peerName) + require.NoError(t, err) + require.Equal(t, uint64(3), idx) + require.Equal(t, checks[0], c) } - require.Equal(t, out, out2) - } - verifyNode() + verifyChecks := func(t *testing.T) { + checks := structs.HealthChecks{ + &structs.HealthCheck{ + Node: "node1", + CheckID: "check1", + Name: "check", + Status: "critical", + RaftIndex: structs.RaftIndex{CreateIndex: 3, ModifyIndex: 3}, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + PeerName: peerName, + }, + &structs.HealthCheck{ + Node: "node1", + CheckID: "check2", + Name: "check", + Status: "critical", + ServiceID: "redis1", + ServiceName: "redis", + ServiceTags: []string{"primary"}, + RaftIndex: structs.RaftIndex{CreateIndex: 4, ModifyIndex: 4}, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + PeerName: peerName, + }, + } - // Add in a invalid service definition with too long Key value for Meta - req.Service = &structs.NodeService{ - ID: "redis1", - Service: "redis", - Address: "1.1.1.1", - Port: 8080, - Meta: map[string]string{strings.Repeat("a", 129): "somevalue"}, - Tags: []string{"primary"}, - } - if err := s.EnsureRegistration(9, req); err == nil { - t.Fatalf("Service should not have been registered since Meta is invalid") - } - - // Add in a service definition. - req.Service = &structs.NodeService{ - ID: "redis1", - Service: "redis", - Address: "1.1.1.1", - Port: 8080, - Tags: []string{"primary"}, - Weights: &structs.Weights{Passing: 1, Warning: 1}, - } - if err := s.EnsureRegistration(2, req); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify that the service got registered. - verifyService := func() { - svcmap := map[string]*structs.NodeService{ - "redis1": { - ID: "redis1", - Service: "redis", - Address: "1.1.1.1", - Port: 8080, - Tags: []string{"primary"}, - Weights: &structs.Weights{Passing: 1, Warning: 1}, - RaftIndex: structs.RaftIndex{CreateIndex: 2, ModifyIndex: 2}, - EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), - }, + idx, out, err := s.NodeChecks(nil, "node1", nil, peerName) + require.NoError(t, err) + require.Equal(t, uint64(4), idx) + require.Equal(t, checks, out) } - idx, out, err := s.NodeServices(nil, "node1", nil) - if gotidx, wantidx := idx, uint64(2); err != nil || gotidx != wantidx { - t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) - } - require.Equal(t, svcmap, out.Services) + runStep(t, "add a node", func(t *testing.T) { + req := makeReq(nil) + require.NoError(t, s.EnsureRegistration(1, req)) - idx, r, err := s.NodeService("node1", "redis1", nil) - if gotidx, wantidx := idx, uint64(2); err != nil || gotidx != wantidx { - t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) - } - require.Equal(t, svcmap["redis1"], r) - } - verifyNode() - verifyService() + // Retrieve the node and verify its contents. + verifyNode(t) + }) - // Add in a top-level check. - req.Check = &structs.HealthCheck{ - Node: "node1", - CheckID: "check1", - Name: "check", - } - if err := s.EnsureRegistration(3, req); err != nil { - t.Fatalf("err: %s", err) + runStep(t, "add a node with invalid meta", func(t *testing.T) { + // Add in a invalid service definition with too long Key value for Meta + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Meta: map[string]string{strings.Repeat("a", 129): "somevalue"}, + Tags: []string{"primary"}, + PeerName: peerName, + } + }) + testutil.RequireErrorContains(t, s.EnsureRegistration(9, req), `Key is too long (limit: 128 characters)`) + }) + + // Add in a service definition. + runStep(t, "add a service definition", func(t *testing.T) { + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Tags: []string{"primary"}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + PeerName: peerName, + } + }) + require.NoError(t, s.EnsureRegistration(2, req)) + + // Verify that the service got registered. + verifyNode(t) + verifyService(t) + }) + + // Add in a top-level check. + runStep(t, "add a top level check", func(t *testing.T) { + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Tags: []string{"primary"}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + PeerName: peerName, + } + req.Check = &structs.HealthCheck{ + Node: "node1", + CheckID: "check1", + Name: "check", + PeerName: peerName, + } + }) + require.NoError(t, s.EnsureRegistration(3, req)) + + // Verify that the check got registered. + verifyNode(t) + verifyService(t) + verifyCheck(t) + }) + + // Add a service check which should populate the ServiceName + // and ServiceTags fields in the response. + runStep(t, "add a service check", func(t *testing.T) { + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Tags: []string{"primary"}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + PeerName: peerName, + } + req.Check = &structs.HealthCheck{ + Node: "node1", + CheckID: "check1", + Name: "check", + PeerName: peerName, + } + req.Checks = structs.HealthChecks{ + &structs.HealthCheck{ + Node: "node1", + CheckID: "check2", + Name: "check", + ServiceID: "redis1", + PeerName: peerName, + }, + } + }) + require.NoError(t, s.EnsureRegistration(4, req)) + + // Verify that the additional check got registered. + verifyNode(t) + verifyService(t) + verifyChecks(t) + }) + + // Try to register a check for some other node (top-level check). + runStep(t, "try to register a check for some other node via the top level check", func(t *testing.T) { + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Tags: []string{"primary"}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + PeerName: peerName, + } + req.Check = &structs.HealthCheck{ + Node: "nope", + CheckID: "check1", + Name: "check", + PeerName: peerName, + } + req.Checks = structs.HealthChecks{ + &structs.HealthCheck{ + Node: "node1", + CheckID: "check2", + Name: "check", + ServiceID: "redis1", + PeerName: peerName, + }, + } + }) + testutil.RequireErrorContains(t, s.EnsureRegistration(5, req), `does not match node`) + verifyNode(t) + verifyService(t) + verifyChecks(t) + }) + + runStep(t, "try to register a check for some other node via the checks array", func(t *testing.T) { + // Try to register a check for some other node (checks array). + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Tags: []string{"primary"}, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + PeerName: peerName, + } + req.Checks = structs.HealthChecks{ + &structs.HealthCheck{ + Node: "nope", + CheckID: "check2", + Name: "check", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + PeerName: peerName, + }, + } + }) + testutil.RequireErrorContains(t, s.EnsureRegistration(6, req), `does not match node`) + verifyNode(t) + verifyService(t) + verifyChecks(t) + }) } - // Verify that the check got registered. - verifyCheck := func() { - checks := structs.HealthChecks{ - &structs.HealthCheck{ - Node: "node1", - CheckID: "check1", - Name: "check", - Status: "critical", - RaftIndex: structs.RaftIndex{CreateIndex: 3, ModifyIndex: 3}, - EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), - }, - } + t.Run("default peer", func(t *testing.T) { + run(t, structs.DefaultPeerKeyword) + }) - idx, out, err := s.NodeChecks(nil, "node1", nil) - if gotidx, wantidx := idx, uint64(3); err != nil || gotidx != wantidx { - t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) - } - require.Equal(t, checks, out) - - idx, c, err := s.NodeCheck("node1", "check1", nil) - if gotidx, wantidx := idx, uint64(3); err != nil || gotidx != wantidx { - t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) - } - require.Equal(t, checks[0], c) - } - verifyNode() - verifyService() - verifyCheck() - - // Add a service check which should populate the ServiceName - // and ServiceTags fields in the response. - req.Checks = structs.HealthChecks{ - &structs.HealthCheck{ - Node: "node1", - CheckID: "check2", - Name: "check", - ServiceID: "redis1", - }, - } - if err := s.EnsureRegistration(4, req); err != nil { - t.Fatalf("err: %s", err) - } - - // Verify that the additional check got registered. - verifyNode() - verifyService() - verifyChecks := func() { - checks := structs.HealthChecks{ - &structs.HealthCheck{ - Node: "node1", - CheckID: "check1", - Name: "check", - Status: "critical", - RaftIndex: structs.RaftIndex{CreateIndex: 3, ModifyIndex: 3}, - EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), - }, - &structs.HealthCheck{ - Node: "node1", - CheckID: "check2", - Name: "check", - Status: "critical", - ServiceID: "redis1", - ServiceName: "redis", - ServiceTags: []string{"primary"}, - RaftIndex: structs.RaftIndex{CreateIndex: 4, ModifyIndex: 4}, - EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), - }, - } - - idx, out, err := s.NodeChecks(nil, "node1", nil) - if gotidx, wantidx := idx, uint64(4); err != nil || gotidx != wantidx { - t.Fatalf("got err, idx: %s, %d want nil, %d", err, gotidx, wantidx) - } - require.Equal(t, checks, out) - } - verifyChecks() - - // Try to register a check for some other node (top-level check). - req.Check = &structs.HealthCheck{ - Node: "nope", - CheckID: "check1", - Name: "check", - } - err := s.EnsureRegistration(5, req) - if err == nil || !strings.Contains(err.Error(), "does not match node") { - t.Fatalf("err: %s", err) - } - verifyNode() - verifyService() - verifyChecks() - - // Try to register a check for some other node (checks array). - req.Check = nil - req.Checks = structs.HealthChecks{ - &structs.HealthCheck{ - Node: "nope", - CheckID: "check2", - Name: "check", - EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), - }, - } - err = s.EnsureRegistration(6, req) - if err == nil || !strings.Contains(err.Error(), "does not match node") { - t.Fatalf("err: %s", err) - } - verifyNode() - verifyService() - verifyChecks() + t.Run("random peer", func(t *testing.T) { + run(t, "my-peer") + }) } func TestStateStore_EnsureRegistration_Restore(t *testing.T) { - s := testStateStore(t) + const ( + nodeID = "099eac9d-8e3e-464b-b3f5-8d7dcfcf9f71" + nodeName = "node1" + ) - // Start with just a node. - req := &structs.RegisterRequest{ - ID: makeRandomNodeID(t), - Node: "node1", - Address: "1.2.3.4", - RaftIndex: structs.RaftIndex{ - CreateIndex: 1, - ModifyIndex: 1, - }, - } - nodeID := string(req.ID) - nodeName := req.Node - restore := s.Restore() - if err := restore.Registration(1, req); err != nil { - t.Fatalf("err: %s", err) - } - restore.Commit() - - // Retrieve the node and verify its contents. - verifyNode := func(nodeLookup string) { - _, out, err := s.GetNode(nodeLookup, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if out == nil { - _, out, err = s.GetNodeID(types.NodeID(nodeLookup), nil) - if err != nil { - t.Fatalf("err: %s", err) + run := func(t *testing.T, peerName string) { + verifyNode := func(t *testing.T, s *Store, nodeLookup string) { + idx, out, err := s.GetNode(nodeLookup, nil, peerName) + require.NoError(t, err) + byID := false + if out == nil { + _, out, err = s.GetNodeID(types.NodeID(nodeLookup), nil, peerName) + require.NoError(t, err) + byID = true } + + require.NotNil(t, out) + require.Equal(t, uint64(1), idx) + + require.Equal(t, "1.2.3.4", out.Address) + if byID { + require.Equal(t, nodeLookup, string(out.ID)) + } else { + require.Equal(t, nodeLookup, out.Node) + } + require.Equal(t, peerName, out.PeerName) + require.Equal(t, uint64(1), out.CreateIndex) + require.Equal(t, uint64(1), out.ModifyIndex) + } + verifyService := func(t *testing.T, s *Store, nodeLookup string) { + idx, out, err := s.NodeServices(nil, nodeLookup, nil, peerName) + require.NoError(t, err) + + require.Len(t, out.Services, 1) + require.Equal(t, uint64(2), idx) + svc := out.Services["redis1"] + + require.Equal(t, "redis1", svc.ID) + require.Equal(t, "redis", svc.Service) + require.Equal(t, peerName, svc.PeerName) + require.Equal(t, "1.1.1.1", svc.Address) + require.Equal(t, 8080, svc.Port) + require.Equal(t, uint64(2), svc.CreateIndex) + require.Equal(t, uint64(2), svc.ModifyIndex) + } + verifyCheck := func(t *testing.T, s *Store) { + idx, out, err := s.NodeChecks(nil, nodeName, nil, peerName) + require.NoError(t, err) + + require.Len(t, out, 1) + require.Equal(t, uint64(3), idx) + + c := out[0] + + require.Equal(t, strings.ToUpper(nodeName), c.Node) + require.Equal(t, "check1", string(c.CheckID)) + require.Equal(t, "check", c.Name) + require.Equal(t, peerName, c.PeerName) + require.Equal(t, uint64(3), c.CreateIndex) + require.Equal(t, uint64(3), c.ModifyIndex) + } + verifyChecks := func(t *testing.T, s *Store) { + idx, out, err := s.NodeChecks(nil, nodeName, nil, peerName) + require.NoError(t, err) + + require.Len(t, out, 2) + require.Equal(t, uint64(4), idx) + + c1 := out[0] + require.Equal(t, strings.ToUpper(nodeName), c1.Node) + require.Equal(t, "check1", string(c1.CheckID)) + require.Equal(t, "check", c1.Name) + require.Equal(t, peerName, c1.PeerName) + require.Equal(t, uint64(3), c1.CreateIndex) + require.Equal(t, uint64(3), c1.ModifyIndex) + + c2 := out[1] + require.Equal(t, nodeName, c2.Node) + require.Equal(t, "check2", string(c2.CheckID)) + require.Equal(t, "check", c2.Name) + require.Equal(t, peerName, c2.PeerName) + require.Equal(t, uint64(4), c2.CreateIndex) + require.Equal(t, uint64(4), c2.ModifyIndex) } - if out == nil || out.Address != "1.2.3.4" || - !(out.Node == nodeLookup || string(out.ID) == nodeLookup) || - out.CreateIndex != 1 || out.ModifyIndex != 1 { - t.Fatalf("bad node returned: %#v", out) - } - } - verifyNode(nodeID) - verifyNode(nodeName) - - // Add in a service definition. - req.Service = &structs.NodeService{ - ID: "redis1", - Service: "redis", - Address: "1.1.1.1", - Port: 8080, - Weights: &structs.Weights{Passing: 1, Warning: 1}, - RaftIndex: structs.RaftIndex{ - CreateIndex: 2, - ModifyIndex: 2, - }, - } - restore = s.Restore() - if err := restore.Registration(2, req); err != nil { - t.Fatalf("err: %s", err) - } - restore.Commit() - - // Verify that the service got registered. - verifyService := func(nodeLookup string) { - idx, out, err := s.NodeServices(nil, nodeLookup, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != 2 { - t.Fatalf("bad index: %d", idx) - } - if len(out.Services) != 1 { - t.Fatalf("bad: %#v", out.Services) - } - s := out.Services["redis1"] - if s.ID != "redis1" || s.Service != "redis" || - s.Address != "1.1.1.1" || s.Port != 8080 || - s.CreateIndex != 2 || s.ModifyIndex != 2 { - t.Fatalf("bad service returned: %#v", s) - } - } - - // Add in a top-level check. - // - // Verify that node name references in checks are case-insensitive during - // restore. - req.Check = &structs.HealthCheck{ - Node: strings.ToUpper(nodeName), - CheckID: "check1", - Name: "check", - RaftIndex: structs.RaftIndex{ - CreateIndex: 3, - ModifyIndex: 3, - }, - } - restore = s.Restore() - if err := restore.Registration(3, req); err != nil { - t.Fatalf("err: %s", err) - } - restore.Commit() - - // Verify that the check got registered. - verifyCheck := func() { - idx, out, err := s.NodeChecks(nil, nodeName, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != 3 { - t.Fatalf("bad index: %d", idx) - } - if len(out) != 1 { - t.Fatalf("bad: %#v", out) - } - c := out[0] - if c.Node != strings.ToUpper(nodeName) || c.CheckID != "check1" || c.Name != "check" || - c.CreateIndex != 3 || c.ModifyIndex != 3 { - t.Fatalf("bad check returned: %#v", c) - } - } - verifyNode(nodeID) - verifyNode(nodeName) - verifyService(nodeID) - verifyService(nodeName) - verifyCheck() - - // Add in another check via the slice. - req.Checks = structs.HealthChecks{ - &structs.HealthCheck{ - Node: nodeName, - CheckID: "check2", - Name: "check", - RaftIndex: structs.RaftIndex{ - CreateIndex: 4, - ModifyIndex: 4, - }, - }, - } - restore = s.Restore() - if err := restore.Registration(4, req); err != nil { - t.Fatalf("err: %s", err) - } - restore.Commit() - - // Verify that the additional check got registered. - verifyNode(nodeID) - verifyNode(nodeName) - verifyService(nodeID) - verifyService(nodeName) - func() { - idx, out, err := s.NodeChecks(nil, nodeName, nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != 4 { - t.Fatalf("bad index: %d", idx) - } - if len(out) != 2 { - t.Fatalf("bad: %#v", out) - } - c1 := out[0] - if c1.Node != strings.ToUpper(nodeName) || c1.CheckID != "check1" || c1.Name != "check" || - c1.CreateIndex != 3 || c1.ModifyIndex != 3 { - t.Fatalf("bad check returned, should not be modified: %#v", c1) + makeReq := func(f func(*structs.RegisterRequest)) *structs.RegisterRequest { + req := &structs.RegisterRequest{ + ID: types.NodeID(nodeID), + Node: nodeName, + Address: "1.2.3.4", + RaftIndex: structs.RaftIndex{ + CreateIndex: 1, + ModifyIndex: 1, + }, + PeerName: peerName, + } + if f != nil { + f(req) + } + return req } - c2 := out[1] - if c2.Node != nodeName || c2.CheckID != "check2" || c2.Name != "check" || - c2.CreateIndex != 4 || c2.ModifyIndex != 4 { - t.Fatalf("bad check returned: %#v", c2) - } - }() + s := testStateStore(t) + + // Start with just a node. + runStep(t, "add a node", func(t *testing.T) { + req := makeReq(nil) + restore := s.Restore() + require.NoError(t, restore.Registration(1, req)) + require.NoError(t, restore.Commit()) + + // Retrieve the node and verify its contents. + verifyNode(t, s, nodeID) + verifyNode(t, s, nodeName) + }) + + // Add in a service definition. + runStep(t, "add a service definition", func(t *testing.T) { + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + RaftIndex: structs.RaftIndex{ + CreateIndex: 2, + ModifyIndex: 2, + }, + PeerName: peerName, + } + }) + restore := s.Restore() + require.NoError(t, restore.Registration(2, req)) + require.NoError(t, restore.Commit()) + + // Verify that the service got registered. + verifyNode(t, s, nodeID) + verifyNode(t, s, nodeName) + verifyService(t, s, nodeID) + verifyService(t, s, nodeName) + }) + + runStep(t, "add a top-level check", func(t *testing.T) { + // Add in a top-level check. + // + // Verify that node name references in checks are case-insensitive during + // restore. + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + RaftIndex: structs.RaftIndex{ + CreateIndex: 2, + ModifyIndex: 2, + }, + PeerName: peerName, + } + req.Check = &structs.HealthCheck{ + Node: strings.ToUpper(nodeName), + CheckID: "check1", + Name: "check", + RaftIndex: structs.RaftIndex{ + CreateIndex: 3, + ModifyIndex: 3, + }, + PeerName: peerName, + } + }) + restore := s.Restore() + require.NoError(t, restore.Registration(3, req)) + require.NoError(t, restore.Commit()) + + // Verify that the check got registered. + verifyNode(t, s, nodeID) + verifyNode(t, s, nodeName) + verifyService(t, s, nodeID) + verifyService(t, s, nodeName) + verifyCheck(t, s) + }) + + runStep(t, "add another check via the slice", func(t *testing.T) { + // Add in another check via the slice. + req := makeReq(func(req *structs.RegisterRequest) { + req.Service = &structs.NodeService{ + ID: "redis1", + Service: "redis", + Address: "1.1.1.1", + Port: 8080, + Weights: &structs.Weights{Passing: 1, Warning: 1}, + RaftIndex: structs.RaftIndex{ + CreateIndex: 2, + ModifyIndex: 2, + }, + PeerName: peerName, + } + req.Check = &structs.HealthCheck{ + Node: strings.ToUpper(nodeName), + CheckID: "check1", + Name: "check", + RaftIndex: structs.RaftIndex{ + CreateIndex: 3, + ModifyIndex: 3, + }, + PeerName: peerName, + } + req.Checks = structs.HealthChecks{ + &structs.HealthCheck{ + Node: nodeName, + CheckID: "check2", + Name: "check", + RaftIndex: structs.RaftIndex{ + CreateIndex: 4, + ModifyIndex: 4, + }, + PeerName: peerName, + }, + } + }) + restore := s.Restore() + require.NoError(t, restore.Registration(4, req)) + require.NoError(t, restore.Commit()) + + // Verify that the additional check got registered. + verifyNode(t, s, nodeID) + verifyNode(t, s, nodeName) + verifyService(t, s, nodeID) + verifyService(t, s, nodeName) + verifyChecks(t, s) + }) + } + + t.Run("default peer", func(t *testing.T) { + run(t, structs.DefaultPeerKeyword) + }) + + t.Run("random peer", func(t *testing.T) { + run(t, "my-peer") + }) } func deprecatedEnsureNodeWithoutIDCanRegister(t *testing.T, s *Store, nodeName string, txIdx uint64) { @@ -573,7 +777,7 @@ func deprecatedEnsureNodeWithoutIDCanRegister(t *testing.T, s *Store, nodeName s if err := s.EnsureNode(txIdx, in); err != nil { t.Fatalf("err: %s", err) } - idx, out, err := s.GetNode(nodeName, nil) + idx, out, err := s.GetNode(nodeName, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -603,7 +807,7 @@ func TestStateStore_EnsureNodeDeprecated(t *testing.T) { t.Fatalf("err: %v", err) } // Retrieve the node again - idx, out, err := s.GetNode(firstNodeName, nil) + idx, out, err := s.GetNode(firstNodeName, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -626,7 +830,7 @@ func TestStateStore_EnsureNodeDeprecated(t *testing.T) { t.Fatalf("err: %v", err) } // Retrieve the node again - idx, out, err = s.GetNode(firstNodeName, nil) + idx, out, err = s.GetNode(firstNodeName, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -699,11 +903,11 @@ func TestNodeRenamingNodes(t *testing.T) { t.Fatalf("err: %s", err) } - if _, node, err := s.GetNodeID(nodeID1, nil); err != nil || node == nil || node.ID != nodeID1 { + if _, node, err := s.GetNodeID(nodeID1, nil, ""); err != nil || node == nil || node.ID != nodeID1 { t.Fatalf("err: %s, node:= %q", err, node) } - if _, node, err := s.GetNodeID(nodeID2, nil); err != nil && node == nil || node.ID != nodeID2 { + if _, node, err := s.GetNodeID(nodeID2, nil, ""); err != nil && node == nil || node.ID != nodeID2 { t.Fatalf("err: %s", err) } @@ -748,13 +952,13 @@ func TestNodeRenamingNodes(t *testing.T) { } // Retrieve the node again - idx, out, err := s.GetNode("node2bis", nil) + idx, out, err := s.GetNode("node2bis", nil, "") if err != nil { t.Fatalf("err: %s", err) } // Retrieve the node again - idx2, out2, err := s.GetNodeID(nodeID2, nil) + idx2, out2, err := s.GetNodeID(nodeID2, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -772,7 +976,7 @@ func TestStateStore_EnsureNode(t *testing.T) { s := testStateStore(t) // Fetching a non-existent node returns nil - if _, node, err := s.GetNode("node1", nil); node != nil || err != nil { + if _, node, err := s.GetNode("node1", nil, ""); node != nil || err != nil { t.Fatalf("expected (nil, nil), got: (%#v, %#v)", node, err) } @@ -789,7 +993,7 @@ func TestStateStore_EnsureNode(t *testing.T) { } // Retrieve the node again - idx, out, err := s.GetNode("node1", nil) + idx, out, err := s.GetNode("node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -818,7 +1022,7 @@ func TestStateStore_EnsureNode(t *testing.T) { } // Retrieve the node - idx, out, err = s.GetNode("node1", nil) + idx, out, err = s.GetNode("node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -835,7 +1039,7 @@ func TestStateStore_EnsureNode(t *testing.T) { if err := s.EnsureNode(3, in2); err != nil { t.Fatalf("err: %s", err) } - _, out, err = s.GetNode("node1", nil) + _, out, err = s.GetNode("node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -852,7 +1056,7 @@ func TestStateStore_EnsureNode(t *testing.T) { if err := s.EnsureNode(3, in3); err != nil { t.Fatalf("err: %s", err) } - idx, out, err = s.GetNode("node1", nil) + idx, out, err = s.GetNode("node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -879,13 +1083,13 @@ func TestStateStore_EnsureNode(t *testing.T) { } // Retrieve the node - _, out, err = s.GetNode("node1", nil) + _, out, err = s.GetNode("node1", nil, "") require.NoError(t, err) if out != nil { t.Fatalf("Node should not exist anymore: %q", out) } - idx, out, err = s.GetNode("node1-renamed", nil) + idx, out, err = s.GetNode("node1-renamed", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -944,7 +1148,7 @@ func TestStateStore_EnsureNode(t *testing.T) { } // Retrieve the node - _, out, err = s.GetNode("Node1bis", nil) + _, out, err = s.GetNode("Node1bis", nil, "") require.NoError(t, err) if out == nil { t.Fatalf("Node should exist, but was null") @@ -960,7 +1164,7 @@ func TestStateStore_EnsureNode(t *testing.T) { t.Fatalf("err: %s", err) } - idx, out, err = s.GetNode("Node1bis", nil) + idx, out, err = s.GetNode("Node1bis", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1003,7 +1207,7 @@ func TestStateStore_EnsureNode(t *testing.T) { if err := s.EnsureNode(12, in); err != nil { t.Fatalf("err: %s", err) } - idx, out, err = s.GetNode("Node1-Renamed2", nil) + idx, out, err = s.GetNode("Node1-Renamed2", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1033,7 +1237,7 @@ func TestStateStore_EnsureNode(t *testing.T) { if err := s.EnsureNode(15, in); err != nil { t.Fatalf("[DEPRECATED] it should work, err:= %q", err) } - _, out, err = s.GetNode("Node1-Renamed2", nil) + _, out, err = s.GetNode("Node1-Renamed2", nil, "") if err != nil { t.Fatalf("[DEPRECATED] err: %s", err) } @@ -1050,7 +1254,7 @@ func TestStateStore_GetNodes(t *testing.T) { // Listing with no results returns nil. ws := memdb.NewWatchSet() - idx, res, err := s.Nodes(ws, nil) + idx, res, err := s.Nodes(ws, nil, "") if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -1065,7 +1269,7 @@ func TestStateStore_GetNodes(t *testing.T) { // Retrieve the nodes. ws = memdb.NewWatchSet() - idx, nodes, err := s.Nodes(ws, nil) + idx, nodes, err := s.Nodes(ws, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1095,7 +1299,7 @@ func TestStateStore_GetNodes(t *testing.T) { if watchFired(ws) { t.Fatalf("bad") } - if err := s.DeleteNode(3, "node1", nil); err != nil { + if err := s.DeleteNode(3, "node1", nil, ""); err != nil { t.Fatalf("err: %s", err) } if !watchFired(ws) { @@ -1115,7 +1319,7 @@ func BenchmarkGetNodes(b *testing.B) { ws := memdb.NewWatchSet() for i := 0; i < b.N; i++ { - s.Nodes(ws, nil) + s.Nodes(ws, nil, "") } } @@ -1124,7 +1328,7 @@ func TestStateStore_GetNodesByMeta(t *testing.T) { // Listing with no results returns nil ws := memdb.NewWatchSet() - idx, res, err := s.NodesByMeta(ws, map[string]string{"somekey": "somevalue"}, nil) + idx, res, err := s.NodesByMeta(ws, map[string]string{"somekey": "somevalue"}, nil, "") if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -1169,7 +1373,7 @@ func TestStateStore_GetNodesByMeta(t *testing.T) { } for _, tc := range cases { - _, result, err := s.NodesByMeta(nil, tc.filters, nil) + _, result, err := s.NodesByMeta(nil, tc.filters, nil, "") if err != nil { t.Fatalf("bad: %v", err) } @@ -1187,7 +1391,7 @@ func TestStateStore_GetNodesByMeta(t *testing.T) { // Set up a watch. ws = memdb.NewWatchSet() - _, _, err = s.NodesByMeta(ws, map[string]string{"role": "client"}, nil) + _, _, err = s.NodesByMeta(ws, map[string]string{"role": "client"}, nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -1229,13 +1433,13 @@ func TestStateStore_NodeServices(t *testing.T) { // Look up by name. t.Run("Look up by name", func(t *testing.T) { { - _, ns, err := s.NodeServices(nil, "node1", nil) + _, ns, err := s.NodeServices(nil, "node1", nil, "") require.NoError(t, err) require.NotNil(t, ns) require.Equal(t, "node1", ns.Node.Node) } { - _, ns, err := s.NodeServices(nil, "node2", nil) + _, ns, err := s.NodeServices(nil, "node2", nil, "") require.NoError(t, err) require.NotNil(t, ns) require.Equal(t, "node2", ns.Node.Node) @@ -1244,13 +1448,13 @@ func TestStateStore_NodeServices(t *testing.T) { t.Run("Look up by UUID", func(t *testing.T) { { - _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-aaaaaaaaaaaa", nil) + _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-aaaaaaaaaaaa", nil, "") require.NoError(t, err) require.NotNil(t, ns) require.Equal(t, "node1", ns.Node.Node) } { - _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bbbbbbbbbbbb", nil) + _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bbbbbbbbbbbb", nil, "") require.NoError(t, err) require.NotNil(t, ns) require.Equal(t, "node2", ns.Node.Node) @@ -1258,20 +1462,20 @@ func TestStateStore_NodeServices(t *testing.T) { }) t.Run("Ambiguous prefix", func(t *testing.T) { - _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510", nil) + _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510", nil, "") require.NoError(t, err) require.Nil(t, ns) }) t.Run("Bad node", func(t *testing.T) { // Bad node, and not a UUID (should not get a UUID error). - _, ns, err := s.NodeServices(nil, "nope", nil) + _, ns, err := s.NodeServices(nil, "nope", nil, "") require.NoError(t, err) require.Nil(t, ns) }) t.Run("Specific prefix", func(t *testing.T) { - _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bb", nil) + _, ns, err := s.NodeServices(nil, "40e4a748-2192-161a-0510-bb", nil, "") require.NoError(t, err) require.NotNil(t, ns) require.Equal(t, "node2", ns.Node.Node) @@ -1287,12 +1491,12 @@ func TestStateStore_DeleteNode(t *testing.T) { testRegisterCheck(t, s, 2, "node1", "", "check1", api.HealthPassing) // Delete the node - if err := s.DeleteNode(3, "node1", nil); err != nil { + if err := s.DeleteNode(3, "node1", nil, ""); err != nil { t.Fatalf("err: %s", err) } // The node was removed - if idx, n, err := s.GetNode("node1", nil); err != nil || n != nil || idx != 3 { + if idx, n, err := s.GetNode("node1", nil, ""); err != nil || n != nil || idx != 3 { t.Fatalf("bad: %#v %d (err: %#v)", n, idx, err) } @@ -1326,7 +1530,7 @@ func TestStateStore_DeleteNode(t *testing.T) { // Deleting a nonexistent node should be idempotent and not return // an error - if err := s.DeleteNode(4, "node1", nil); err != nil { + if err := s.DeleteNode(4, "node1", nil, ""); err != nil { t.Fatalf("err: %s", err) } if idx := s.maxIndex(tableNodes); idx != 3 { @@ -1384,7 +1588,7 @@ func TestStateStore_EnsureService(t *testing.T) { // Fetching services for a node with none returns nil. ws := memdb.NewWatchSet() - idx, res, err := s.NodeServices(ws, "node1", nil) + idx, res, err := s.NodeServices(ws, "node1", nil, "") if err != nil || res != nil || idx != 0 { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -1417,7 +1621,7 @@ func TestStateStore_EnsureService(t *testing.T) { // Service successfully registers into the state store. ws = memdb.NewWatchSet() - _, _, err = s.NodeServices(ws, "node1", nil) + _, _, err = s.NodeServices(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -1439,7 +1643,7 @@ func TestStateStore_EnsureService(t *testing.T) { // Register a different service on the bad node. ws = memdb.NewWatchSet() - _, _, err = s.NodeServices(ws, "node1", nil) + _, _, err = s.NodeServices(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -1454,7 +1658,7 @@ func TestStateStore_EnsureService(t *testing.T) { // Retrieve the services. ws = memdb.NewWatchSet() - idx, out, err := s.NodeServices(ws, "node1", nil) + idx, out, err := s.NodeServices(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1495,7 +1699,7 @@ func TestStateStore_EnsureService(t *testing.T) { } // Retrieve the service again and ensure it matches.. - idx, out, err = s.NodeServices(nil, "node1", nil) + idx, out, err = s.NodeServices(nil, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1540,7 +1744,7 @@ func TestStateStore_EnsureService_connectProxy(t *testing.T) { assert.Nil(t, s.EnsureService(10, "node1", ns1)) // Retrieve and verify - _, out, err := s.NodeServices(nil, "node1", nil) + _, out, err := s.NodeServices(nil, "node1", nil, "") assert.Nil(t, err) assert.NotNil(t, out) assert.Len(t, out.Services, 1) @@ -1579,7 +1783,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) { assert.Equal(t, "240.0.0.1", vip) // Retrieve and verify - _, out, err := s.NodeServices(nil, "node1", nil) + _, out, err := s.NodeServices(nil, "node1", nil, "") require.NoError(t, err) assert.NotNil(t, out) assert.Len(t, out.Services, 1) @@ -1610,7 +1814,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) { assert.Equal(t, "240.0.0.2", vip) // Retrieve and verify - _, out, err = s.NodeServices(nil, "node1", nil) + _, out, err = s.NodeServices(nil, "node1", nil, "") assert.Nil(t, err) assert.NotNil(t, out) assert.Len(t, out.Services, 2) @@ -1620,7 +1824,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) { assert.Equal(t, ns2.Port, taggedAddress.Port) // Delete the first service and make sure it no longer has a virtual IP assigned. - require.NoError(t, s.DeleteService(12, "node1", "foo", entMeta)) + require.NoError(t, s.DeleteService(12, "node1", "foo", entMeta, "")) vip, err = s.VirtualIPForService(structs.ServiceName{Name: "connect-proxy"}) require.NoError(t, err) assert.Equal(t, "", vip) @@ -1647,7 +1851,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) { assert.Equal(t, "240.0.0.2", vip) // Make sure the new instance has the same virtual IP. - _, out, err = s.NodeServices(nil, "node1", nil) + _, out, err = s.NodeServices(nil, "node1", nil, "") require.NoError(t, err) taggedAddress = out.Services["redis-proxy2"].TaggedAddresses[structs.TaggedAddressVirtualIP] assert.Equal(t, vip, taggedAddress.Address) @@ -1675,7 +1879,7 @@ func TestStateStore_EnsureService_VirtualIPAssign(t *testing.T) { assert.Equal(t, "240.0.0.1", vip) // Retrieve and verify - _, out, err = s.NodeServices(nil, "node1", nil) + _, out, err = s.NodeServices(nil, "node1", nil, "") require.NoError(t, err) taggedAddress = out.Services["web-proxy"].TaggedAddresses[structs.TaggedAddressVirtualIP] assert.Equal(t, vip, taggedAddress.Address) @@ -1711,7 +1915,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) { assert.Equal(t, "240.0.0.1", vip) // Retrieve and verify - _, out, err := s.NodeServices(nil, "node1", nil) + _, out, err := s.NodeServices(nil, "node1", nil, "") require.NoError(t, err) assert.NotNil(t, out) @@ -1741,7 +1945,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) { assert.Equal(t, "240.0.0.2", vip) // Retrieve and verify - _, out, err = s.NodeServices(nil, "node1", nil) + _, out, err = s.NodeServices(nil, "node1", nil, "") assert.Nil(t, err) assert.NotNil(t, out) @@ -1750,7 +1954,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) { assert.Equal(t, ns2.Port, taggedAddress.Port) // Delete the last service and make sure it no longer has a virtual IP assigned. - require.NoError(t, s.DeleteService(12, "node1", "redis", entMeta)) + require.NoError(t, s.DeleteService(12, "node1", "redis", entMeta, "")) vip, err = s.VirtualIPForService(structs.ServiceName{Name: "redis"}) require.NoError(t, err) assert.Equal(t, "", vip) @@ -1776,7 +1980,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) { assert.Equal(t, "240.0.0.2", vip) // Retrieve and verify - _, out, err = s.NodeServices(nil, "node1", nil) + _, out, err = s.NodeServices(nil, "node1", nil, "") assert.Nil(t, err) assert.NotNil(t, out) @@ -1806,7 +2010,7 @@ func TestStateStore_EnsureService_ReassignFreedVIPs(t *testing.T) { assert.Equal(t, "240.0.0.3", vip) // Retrieve and verify - _, out, err = s.NodeServices(nil, "node1", nil) + _, out, err = s.NodeServices(nil, "node1", nil, "") assert.Nil(t, err) assert.NotNil(t, out) @@ -1820,7 +2024,7 @@ func TestStateStore_Services(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, services, err := s.Services(ws, nil) + idx, services, err := s.Services(ws, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1861,7 +2065,7 @@ func TestStateStore_Services(t *testing.T) { // Pull all the services. ws = memdb.NewWatchSet() - idx, services, err = s.Services(ws, nil) + idx, services, err = s.Services(ws, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1884,7 +2088,7 @@ func TestStateStore_Services(t *testing.T) { } // Deleting a node with a service should fire the watch. - if err := s.DeleteNode(6, "node1", nil); err != nil { + if err := s.DeleteNode(6, "node1", nil, ""); err != nil { t.Fatalf("err: %s", err) } if !watchFired(ws) { @@ -1898,7 +2102,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { ws := memdb.NewWatchSet() t.Run("Listing with no results returns nil", func(t *testing.T) { - idx, res, err := s.ServicesByNodeMeta(ws, map[string]string{"somekey": "somevalue"}, nil) + idx, res, err := s.ServicesByNodeMeta(ws, map[string]string{"somekey": "somevalue"}, nil, "") if idx != 0 || len(res) != 0 || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -1940,7 +2144,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { ws = memdb.NewWatchSet() t.Run("Filter the services by the first node's meta value", func(t *testing.T) { - _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"role": "client"}, nil) + _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"role": "client"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1952,7 +2156,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { }) t.Run("Get all services using the common meta value", func(t *testing.T) { - _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil) + _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1964,7 +2168,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { }) t.Run("Get an empty list for an invalid meta value", func(t *testing.T) { - _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"invalid": "nope"}, nil) + _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"invalid": "nope"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -1973,7 +2177,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { }) t.Run("Get the first node's service instance using multiple meta filters", func(t *testing.T) { - _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"role": "client", "common": "1"}, nil) + _, res, err := s.ServicesByNodeMeta(ws, map[string]string{"role": "client", "common": "1"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2007,7 +2211,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) { // Now get a fresh watch, which will be forced to watch the whole // service table. ws := memdb.NewWatchSet() - _, _, err := s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil) + _, _, err := s.ServicesByNodeMeta(ws, map[string]string{"common": "1"}, nil, "") require.NoError(t, err) testRegisterService(t, s, idx, "nope", "more-nope") @@ -2032,7 +2236,7 @@ func TestStateStore_ServiceNodes(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.ServiceNodes(ws, "db", nil) + idx, nodes, err := s.ServiceNodes(ws, "db", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2071,7 +2275,7 @@ func TestStateStore_ServiceNodes(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.ServiceNodes(ws, "db", nil) + idx, nodes, err = s.ServiceNodes(ws, "db", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2134,7 +2338,7 @@ func TestStateStore_ServiceNodes(t *testing.T) { } // But removing a node with the "db" service should fire the watch. - if err := s.DeleteNode(18, "bar", nil); err != nil { + if err := s.DeleteNode(18, "bar", nil, ""); err != nil { t.Fatalf("err: %s", err) } if !watchFired(ws) { @@ -2158,7 +2362,7 @@ func TestStateStore_ServiceNodes(t *testing.T) { // Now get a fresh watch, which will be forced to watch the whole nodes // table. ws = memdb.NewWatchSet() - _, _, err = s.ServiceNodes(ws, "db", nil) + _, _, err = s.ServiceNodes(ws, "db", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2175,7 +2379,7 @@ func TestStateStore_ServiceTagNodes(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.ServiceTagNodes(ws, "db", []string{"primary"}, nil) + idx, nodes, err := s.ServiceTagNodes(ws, "db", []string{"primary"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2208,7 +2412,7 @@ func TestStateStore_ServiceTagNodes(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.ServiceTagNodes(ws, "db", []string{"primary"}, nil) + idx, nodes, err = s.ServiceTagNodes(ws, "db", []string{"primary"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2238,7 +2442,7 @@ func TestStateStore_ServiceTagNodes(t *testing.T) { } // But removing a node with the "db:primary" service should fire the watch. - if err := s.DeleteNode(21, "foo", nil); err != nil { + if err := s.DeleteNode(21, "foo", nil, ""); err != nil { t.Fatalf("err: %s", err) } if !watchFired(ws) { @@ -2269,7 +2473,7 @@ func TestStateStore_ServiceTagNodes_MultipleTags(t *testing.T) { t.Fatalf("err: %v", err) } - idx, nodes, err := s.ServiceTagNodes(nil, "db", []string{"primary"}, nil) + idx, nodes, err := s.ServiceTagNodes(nil, "db", []string{"primary"}, nil, "") require.NoError(t, err) require.Equal(t, int(idx), 19) require.Len(t, nodes, 1) @@ -2278,13 +2482,13 @@ func TestStateStore_ServiceTagNodes_MultipleTags(t *testing.T) { require.Contains(t, nodes[0].ServiceTags, "primary") require.Equal(t, nodes[0].ServicePort, 8000) - idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2"}, nil) + idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2"}, nil, "") require.NoError(t, err) require.Equal(t, int(idx), 19) require.Len(t, nodes, 3) // Test filtering on multiple tags - idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2", "replica"}, nil) + idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"v2", "replica"}, nil, "") require.NoError(t, err) require.Equal(t, int(idx), 19) require.Len(t, nodes, 2) @@ -2293,7 +2497,7 @@ func TestStateStore_ServiceTagNodes_MultipleTags(t *testing.T) { require.Contains(t, nodes[1].ServiceTags, "v2") require.Contains(t, nodes[1].ServiceTags, "replica") - idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"dev"}, nil) + idx, nodes, err = s.ServiceTagNodes(nil, "db", []string{"dev"}, nil, "") require.NoError(t, err) require.Equal(t, int(idx), 19) require.Len(t, nodes, 1) @@ -2313,9 +2517,9 @@ func TestStateStore_DeleteService(t *testing.T) { // Delete the service. ws := memdb.NewWatchSet() - _, _, err := s.NodeServices(ws, "node1", nil) + _, _, err := s.NodeServices(ws, "node1", nil, "") require.NoError(t, err) - if err := s.DeleteService(4, "node1", "service1", nil); err != nil { + if err := s.DeleteService(4, "node1", "service1", nil, ""); err != nil { t.Fatalf("err: %s", err) } if !watchFired(ws) { @@ -2324,7 +2528,7 @@ func TestStateStore_DeleteService(t *testing.T) { // Service doesn't exist. ws = memdb.NewWatchSet() - _, ns, err := s.NodeServices(ws, "node1", nil) + _, ns, err := s.NodeServices(ws, "node1", nil, "") if err != nil || ns == nil || len(ns.Services) != 0 { t.Fatalf("bad: %#v (err: %#v)", ns, err) } @@ -2348,7 +2552,7 @@ func TestStateStore_DeleteService(t *testing.T) { // Deleting a nonexistent service should be idempotent and not return an // error, nor fire a watch. - if err := s.DeleteService(5, "node1", "service1", nil); err != nil { + if err := s.DeleteService(5, "node1", "service1", nil, ""); err != nil { t.Fatalf("err: %s", err) } if idx := s.maxIndex(tableServices); idx != 4 { @@ -2364,7 +2568,7 @@ func TestStateStore_ConnectServiceNodes(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.ConnectServiceNodes(ws, "db", nil) + idx, nodes, err := s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(0)) assert.Len(t, nodes, 0) @@ -2382,7 +2586,7 @@ func TestStateStore_ConnectServiceNodes(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(17)) assert.Len(t, nodes, 3) @@ -2398,7 +2602,7 @@ func TestStateStore_ConnectServiceNodes(t *testing.T) { assert.False(t, watchFired(ws)) // But removing a node with the "db" service should fire the watch. - assert.Nil(t, s.DeleteNode(18, "bar", nil)) + assert.Nil(t, s.DeleteNode(18, "bar", nil, "")) assert.True(t, watchFired(ws)) } @@ -2407,7 +2611,7 @@ func TestStateStore_ConnectServiceNodes_Gateways(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.ConnectServiceNodes(ws, "db", nil) + idx, nodes, err := s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(0)) assert.Len(t, nodes, 0) @@ -2428,7 +2632,7 @@ func TestStateStore_ConnectServiceNodes_Gateways(t *testing.T) { // Reset WatchSet to ensure watch fires when associating db with gateway ws = memdb.NewWatchSet() - _, _, err = s.ConnectServiceNodes(ws, "db", nil) + _, _, err = s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) // Associate gateway with db @@ -2446,7 +2650,7 @@ func TestStateStore_ConnectServiceNodes_Gateways(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(17)) assert.Len(t, nodes, 2) @@ -2472,15 +2676,15 @@ func TestStateStore_ConnectServiceNodes_Gateways(t *testing.T) { // Reset WatchSet to ensure watch fires when deregistering gateway ws = memdb.NewWatchSet() - _, _, err = s.ConnectServiceNodes(ws, "db", nil) + _, _, err = s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) // Watch should fire when a gateway instance is deregistered - assert.Nil(t, s.DeleteService(19, "bar", "gateway", nil)) + assert.Nil(t, s.DeleteService(19, "bar", "gateway", nil, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() - idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(19)) assert.Len(t, nodes, 2) @@ -2493,10 +2697,10 @@ func TestStateStore_ConnectServiceNodes_Gateways(t *testing.T) { assert.Equal(t, 443, nodes[1].ServicePort) // Index should not slide back after deleting all instances of the gateway - assert.Nil(t, s.DeleteService(20, "foo", "gateway-2", nil)) + assert.Nil(t, s.DeleteService(20, "foo", "gateway-2", nil, "")) assert.True(t, watchFired(ws)) - idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.ConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(20)) assert.Len(t, nodes, 1) @@ -2556,7 +2760,7 @@ func TestStateStore_Service_Snapshot(t *testing.T) { if idx := snap.LastIndex(); idx != 4 { t.Fatalf("bad index: %d", idx) } - services, err := snap.Services("node1", nil) + services, err := snap.Services("node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2613,7 +2817,7 @@ func TestStateStore_EnsureCheck(t *testing.T) { } // Retrieve the check and make sure it matches - idx, checks, err := s.NodeChecks(nil, "node1", nil) + idx, checks, err := s.NodeChecks(nil, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2630,7 +2834,7 @@ func TestStateStore_EnsureCheck(t *testing.T) { testCheckOutput := func(t *testing.T, expectedNodeIndex, expectedIndexForCheck uint64, outputTxt string) { t.Helper() // Check that we successfully updated - idx, checks, err = s.NodeChecks(nil, "node1", nil) + idx, checks, err = s.NodeChecks(nil, "node1", nil, "") require.NoError(t, err) require.Equal(t, expectedNodeIndex, idx, "bad raft index") @@ -2696,7 +2900,7 @@ func TestStateStore_EnsureCheck_defaultStatus(t *testing.T) { } // Get the check again - _, result, err := s.NodeChecks(nil, "node1", nil) + _, result, err := s.NodeChecks(nil, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2712,7 +2916,7 @@ func TestStateStore_NodeChecks(t *testing.T) { // Do an initial query for a node that doesn't exist. ws := memdb.NewWatchSet() - idx, checks, err := s.NodeChecks(ws, "node1", nil) + idx, checks, err := s.NodeChecks(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2737,7 +2941,7 @@ func TestStateStore_NodeChecks(t *testing.T) { // Try querying for all checks associated with node1 ws = memdb.NewWatchSet() - idx, checks, err = s.NodeChecks(ws, "node1", nil) + idx, checks, err = s.NodeChecks(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2757,7 +2961,7 @@ func TestStateStore_NodeChecks(t *testing.T) { // Try querying for all checks associated with node2 ws = memdb.NewWatchSet() - idx, checks, err = s.NodeChecks(ws, "node2", nil) + idx, checks, err = s.NodeChecks(ws, "node2", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2780,7 +2984,7 @@ func TestStateStore_ServiceChecks(t *testing.T) { // Do an initial query for a service that doesn't exist. ws := memdb.NewWatchSet() - idx, checks, err := s.ServiceChecks(ws, "service1", nil) + idx, checks, err := s.ServiceChecks(ws, "service1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2805,7 +3009,7 @@ func TestStateStore_ServiceChecks(t *testing.T) { // Try querying for all checks associated with service1. ws = memdb.NewWatchSet() - idx, checks, err = s.ServiceChecks(ws, "service1", nil) + idx, checks, err = s.ServiceChecks(ws, "service1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2839,7 +3043,7 @@ func TestStateStore_ServiceChecksByNodeMeta(t *testing.T) { // Querying with no results returns nil. ws := memdb.NewWatchSet() - idx, checks, err := s.ServiceChecksByNodeMeta(ws, "service1", nil, nil) + idx, checks, err := s.ServiceChecksByNodeMeta(ws, "service1", nil, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2892,7 +3096,7 @@ func TestStateStore_ServiceChecksByNodeMeta(t *testing.T) { idx = 7 for _, tc := range cases { ws = memdb.NewWatchSet() - _, checks, err := s.ServiceChecksByNodeMeta(ws, "service1", tc.filters, nil) + _, checks, err := s.ServiceChecksByNodeMeta(ws, "service1", tc.filters, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2928,8 +3132,7 @@ func TestStateStore_ServiceChecksByNodeMeta(t *testing.T) { // Now get a fresh watch, which will be forced to watch the whole // node table. ws = memdb.NewWatchSet() - _, _, err = s.ServiceChecksByNodeMeta(ws, "service1", - map[string]string{"common": "1"}, nil) + _, _, err = s.ServiceChecksByNodeMeta(ws, "service1", map[string]string{"common": "1"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2950,7 +3153,7 @@ func TestStateStore_ChecksInState(t *testing.T) { // Querying with no results returns nil ws := memdb.NewWatchSet() - idx, res, err := s.ChecksInState(ws, api.HealthPassing, nil) + idx, res, err := s.ChecksInState(ws, api.HealthPassing, nil, "") if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -2966,7 +3169,7 @@ func TestStateStore_ChecksInState(t *testing.T) { // Query the state store for passing checks. ws = memdb.NewWatchSet() - _, checks, err := s.ChecksInState(ws, api.HealthPassing, nil) + _, checks, err := s.ChecksInState(ws, api.HealthPassing, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -2990,7 +3193,7 @@ func TestStateStore_ChecksInState(t *testing.T) { // HealthAny just returns everything. ws = memdb.NewWatchSet() - _, checks, err = s.ChecksInState(ws, api.HealthAny, nil) + _, checks, err = s.ChecksInState(ws, api.HealthAny, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3017,7 +3220,7 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) { // Querying with no results returns nil. ws := memdb.NewWatchSet() - idx, res, err := s.ChecksInStateByNodeMeta(ws, api.HealthPassing, nil, nil) + idx, res, err := s.ChecksInStateByNodeMeta(ws, api.HealthPassing, nil, nil, "") if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -3085,7 +3288,7 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) { for i, tc := range cases { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { ws = memdb.NewWatchSet() - _, checks, err := s.ChecksInStateByNodeMeta(ws, tc.state, tc.filters, nil) + _, checks, err := s.ChecksInStateByNodeMeta(ws, tc.state, tc.filters, nil, "") require.NoError(t, err) var foundIDs []string @@ -3117,8 +3320,7 @@ func TestStateStore_ChecksInStateByNodeMeta(t *testing.T) { // Now get a fresh watch, which will be forced to watch the whole // node table. ws = memdb.NewWatchSet() - _, _, err = s.ChecksInStateByNodeMeta(ws, api.HealthPassing, - map[string]string{"common": "1"}, nil) + _, _, err = s.ChecksInStateByNodeMeta(ws, api.HealthPassing, map[string]string{"common": "1"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3140,7 +3342,7 @@ func TestStateStore_DeleteCheck(t *testing.T) { // Make sure the check is there. ws := memdb.NewWatchSet() - _, checks, err := s.NodeChecks(ws, "node1", nil) + _, checks, err := s.NodeChecks(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3151,10 +3353,10 @@ func TestStateStore_DeleteCheck(t *testing.T) { ensureServiceVersion(t, s, ws, "service1", 2, 1) // Delete the check. - if err := s.DeleteCheck(3, "node1", "check1", nil); err != nil { + if err := s.DeleteCheck(3, "node1", "check1", nil, ""); err != nil { t.Fatalf("err: %s", err) } - if idx, check, err := s.NodeCheck("node1", "check1", nil); idx != 3 || err != nil || check != nil { + if idx, check, err := s.NodeCheck("node1", "check1", nil, ""); idx != 3 || err != nil || check != nil { t.Fatalf("Node check should have been deleted idx=%d, node=%v, err=%s", idx, check, err) } if idx := s.maxIndex(tableChecks); idx != 3 { @@ -3168,7 +3370,7 @@ func TestStateStore_DeleteCheck(t *testing.T) { // Check is gone ws = memdb.NewWatchSet() - _, checks, err = s.NodeChecks(ws, "node1", nil) + _, checks, err = s.NodeChecks(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3183,7 +3385,7 @@ func TestStateStore_DeleteCheck(t *testing.T) { // Deleting a nonexistent check should be idempotent and not return an // error. - if err := s.DeleteCheck(4, "node1", "check1", nil); err != nil { + if err := s.DeleteCheck(4, "node1", "check1", nil, ""); err != nil { t.Fatalf("err: %s", err) } if idx := s.maxIndex(tableChecks); idx != 3 { @@ -3195,7 +3397,7 @@ func TestStateStore_DeleteCheck(t *testing.T) { } func ensureServiceVersion(t *testing.T, s *Store, ws memdb.WatchSet, serviceID string, expectedIdx uint64, expectedSize int) { - idx, services, err := s.ServiceNodes(ws, serviceID, nil) + idx, services, err := s.ServiceNodes(ws, serviceID, nil, "") t.Helper() if err != nil { t.Fatalf("err: %s", err) @@ -3213,7 +3415,7 @@ func ensureIndexForService(t *testing.T, s *Store, serviceName string, expectedI t.Helper() tx := s.db.Txn(false) defer tx.Abort() - transaction, err := tx.First(tableIndex, "id", serviceIndexName(serviceName, nil)) + transaction, err := tx.First(tableIndex, "id", serviceIndexName(serviceName, nil, "")) if err == nil { if idx, ok := transaction.(*IndexEntry); ok { if expectedIndex != idx.Value { @@ -3235,7 +3437,7 @@ func TestStateStore_IndexIndependence(t *testing.T) { // Querying with no matches gives an empty response ws := memdb.NewWatchSet() - idx, res, err := s.CheckServiceNodes(ws, "service1", nil) + idx, res, err := s.CheckServiceNodes(ws, "service1", nil, "") if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -3292,13 +3494,13 @@ func TestStateStore_IndexIndependence(t *testing.T) { testRegisterCheck(t, s, 14, "node2", "service_shared", "check_service_shared", api.HealthPassing) ensureServiceVersion(t, s, ws, "service_shared", 14, 2) - s.DeleteCheck(15, "node2", types.CheckID("check_service_shared"), nil) + s.DeleteCheck(15, "node2", types.CheckID("check_service_shared"), nil, "") ensureServiceVersion(t, s, ws, "service_shared", 15, 2) ensureIndexForService(t, s, "service_shared", 15) - s.DeleteService(16, "node2", "service_shared", nil) + s.DeleteService(16, "node2", "service_shared", nil, "") ensureServiceVersion(t, s, ws, "service_shared", 16, 1) ensureIndexForService(t, s, "service_shared", 16) - s.DeleteService(17, "node1", "service_shared", nil) + s.DeleteService(17, "node1", "service_shared", nil, "") ensureServiceVersion(t, s, ws, "service_shared", 17, 0) testRegisterService(t, s, 18, "node1", "service_new") @@ -3357,7 +3559,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // The connect index and gateway-services iterators are watched wantBeforeWatchSetSize: 2, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(5, "node1", "test", nil)) + require.NoError(t, s.DeleteService(5, "node1", "test", nil, "")) }, // Note that the old implementation would unblock in this case since it // always watched the target service's index even though some updates @@ -3418,7 +3620,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // connect index iterator, and gateway-services iterator. wantBeforeWatchSetSize: 3, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(6, "node2", "test", nil)) + require.NoError(t, s.DeleteService(6, "node2", "test", nil, "")) }, shouldFire: true, wantAfterIndex: 6, @@ -3438,7 +3640,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // connect index iterator, and gateway-services iterator. wantBeforeWatchSetSize: 3, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(6, "node1", "test", nil)) + require.NoError(t, s.DeleteService(6, "node1", "test", nil, "")) }, shouldFire: true, wantAfterIndex: 6, @@ -3495,7 +3697,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // connect index iterator, and gateway-services iterator. wantBeforeWatchSetSize: 3, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(6, "node2", "test-sidecar-proxy", nil)) + require.NoError(t, s.DeleteService(6, "node2", "test-sidecar-proxy", nil, "")) }, shouldFire: true, wantAfterIndex: 6, @@ -3515,7 +3717,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // connect index iterator, and gateway-services iterator. wantBeforeWatchSetSize: 3, updateFn: func(s *Store) { - require.NoError(t, s.DeleteService(6, "node1", "test-sidecar-proxy", nil)) + require.NoError(t, s.DeleteService(6, "node1", "test-sidecar-proxy", nil, "")) }, shouldFire: true, wantAfterIndex: 6, @@ -3679,7 +3881,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // Run the query ws := memdb.NewWatchSet() - _, res, err := s.CheckConnectServiceNodes(ws, tt.svc, nil) + _, res, err := s.CheckConnectServiceNodes(ws, tt.svc, nil, "") require.NoError(t, err) require.Len(t, res, tt.wantBeforeResLen) require.Len(t, ws, tt.wantBeforeWatchSetSize) @@ -3698,7 +3900,7 @@ func TestStateStore_ConnectQueryBlocking(t *testing.T) { // Re-query the same result. Should return the desired index and len ws = memdb.NewWatchSet() - idx, res, err := s.CheckConnectServiceNodes(ws, tt.svc, nil) + idx, res, err := s.CheckConnectServiceNodes(ws, tt.svc, nil, "") require.NoError(t, err) require.Len(t, res, tt.wantAfterResLen) require.Equal(t, tt.wantAfterIndex, idx) @@ -3712,7 +3914,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { // Querying with no matches gives an empty response ws := memdb.NewWatchSet() - idx, res, err := s.CheckServiceNodes(ws, "service1", nil) + idx, res, err := s.CheckServiceNodes(ws, "service1", nil, "") if idx != 0 || res != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) } @@ -3745,7 +3947,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { // with a specific service. ws = memdb.NewWatchSet() ensureServiceVersion(t, s, ws, "service1", 6, 1) - idx, results, err := s.CheckServiceNodes(ws, "service1", nil) + idx, results, err := s.CheckServiceNodes(ws, "service1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3771,7 +3973,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { t.Fatalf("bad") } ws = memdb.NewWatchSet() - idx, _, err = s.CheckServiceNodes(ws, "service1", nil) + idx, _, err = s.CheckServiceNodes(ws, "service1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3787,7 +3989,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { t.Fatalf("bad") } ws = memdb.NewWatchSet() - idx, _, err = s.CheckServiceNodes(ws, "service1", nil) + idx, _, err = s.CheckServiceNodes(ws, "service1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3801,7 +4003,7 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { t.Fatalf("bad") } ws = memdb.NewWatchSet() - idx, _, err = s.CheckServiceNodes(ws, "service1", nil) + idx, _, err = s.CheckServiceNodes(ws, "service1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -3828,7 +4030,7 @@ func TestStateStore_CheckConnectServiceNodes(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err := s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(0)) assert.Len(t, nodes, 0) @@ -3853,7 +4055,7 @@ func TestStateStore_CheckConnectServiceNodes(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(20)) assert.Len(t, nodes, 2) @@ -3873,7 +4075,7 @@ func TestStateStore_CheckConnectServiceNodes_Gateways(t *testing.T) { // Listing with no results returns an empty list. ws := memdb.NewWatchSet() - idx, nodes, err := s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err := s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(0)) assert.Len(t, nodes, 0) @@ -3907,7 +4109,7 @@ func TestStateStore_CheckConnectServiceNodes_Gateways(t *testing.T) { assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() - idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(18)) assert.Len(t, nodes, 0) @@ -3926,7 +4128,7 @@ func TestStateStore_CheckConnectServiceNodes_Gateways(t *testing.T) { // Read everything back. ws = memdb.NewWatchSet() - idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(21)) assert.Len(t, nodes, 2) @@ -3951,17 +4153,17 @@ func TestStateStore_CheckConnectServiceNodes_Gateways(t *testing.T) { assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() - idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(22)) assert.Len(t, nodes, 3) // Watch should fire when a gateway instance is deregistered - assert.Nil(t, s.DeleteService(23, "bar", "gateway", nil)) + assert.Nil(t, s.DeleteService(23, "bar", "gateway", nil, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() - idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(23)) assert.Len(t, nodes, 2) @@ -3974,10 +4176,10 @@ func TestStateStore_CheckConnectServiceNodes_Gateways(t *testing.T) { assert.Equal(t, 443, nodes[1].Service.Port) // Index should not slide back after deleting all instances of the gateway - assert.Nil(t, s.DeleteService(24, "foo", "gateway-2", nil)) + assert.Nil(t, s.DeleteService(24, "foo", "gateway-2", nil, "")) assert.True(t, watchFired(ws)) - idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil) + idx, nodes, err = s.CheckConnectServiceNodes(ws, "db", nil, "") assert.Nil(t, err) assert.Equal(t, idx, uint64(24)) assert.Len(t, nodes, 1) @@ -4021,7 +4223,7 @@ func BenchmarkCheckServiceNodes(b *testing.B) { ws := memdb.NewWatchSet() for i := 0; i < b.N; i++ { - s.CheckServiceNodes(ws, "db", nil) + s.CheckServiceNodes(ws, "db", nil, "") } } @@ -4055,7 +4257,7 @@ func TestStateStore_CheckServiceTagNodes(t *testing.T) { } ws := memdb.NewWatchSet() - idx, nodes, err := s.CheckServiceTagNodes(ws, "db", []string{"primary"}, nil) + idx, nodes, err := s.CheckServiceTagNodes(ws, "db", []string{"primary"}, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -4134,7 +4336,7 @@ func TestStateStore_Check_Snapshot(t *testing.T) { if idx := snap.LastIndex(); idx != 5 { t.Fatalf("bad index: %d", idx) } - iter, err := snap.Checks("node1", nil) + iter, err := snap.Checks("node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -4390,7 +4592,7 @@ func TestStateStore_ServiceDump(t *testing.T) { { name: "delete a node", modFn: func(t *testing.T) { - s.DeleteNode(12, "node2", nil) + s.DeleteNode(12, "node2", nil, "") }, allFired: true, // fires due to "index" kindFired: true, // fires due to "index" @@ -4426,11 +4628,11 @@ func TestStateStore_ServiceDump(t *testing.T) { op := op require.True(t, t.Run(op.name, func(t *testing.T) { wsAll := memdb.NewWatchSet() - _, _, err := s.ServiceDump(wsAll, "", false, nil) + _, _, err := s.ServiceDump(wsAll, "", false, nil, "") require.NoError(t, err) wsKind := memdb.NewWatchSet() - _, _, err = s.ServiceDump(wsKind, structs.ServiceKindConnectProxy, true, nil) + _, _, err = s.ServiceDump(wsKind, structs.ServiceKindConnectProxy, true, nil, "") require.NoError(t, err) op.modFn(t) @@ -4438,12 +4640,12 @@ func TestStateStore_ServiceDump(t *testing.T) { require.Equal(t, op.allFired, watchFired(wsAll), "all dump watch firing busted") require.Equal(t, op.kindFired, watchFired(wsKind), "kind dump watch firing busted") - _, dump, err := s.ServiceDump(nil, "", false, nil) + _, dump, err := s.ServiceDump(nil, "", false, nil, "") require.NoError(t, err) sortDump(dump) op.checkAll(t, dump) - _, dump, err = s.ServiceDump(nil, structs.ServiceKindConnectProxy, true, nil) + _, dump, err = s.ServiceDump(nil, structs.ServiceKindConnectProxy, true, nil, "") require.NoError(t, err) sortDump(dump) op.checkKind(t, dump) @@ -4456,12 +4658,12 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { // Generating a node dump that matches nothing returns empty wsInfo := memdb.NewWatchSet() - idx, dump, err := s.NodeInfo(wsInfo, "node1", nil) + idx, dump, err := s.NodeInfo(wsInfo, "node1", nil, "") if idx != 0 || dump != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, dump, err) } wsDump := memdb.NewWatchSet() - idx, dump, err = s.NodeDump(wsDump, nil) + idx, dump, err = s.NodeDump(wsDump, nil, "") if idx != 0 || dump != nil || err != nil { t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, dump, err) } @@ -4614,7 +4816,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { // Get a dump of just a single node ws := memdb.NewWatchSet() - idx, dump, err = s.NodeInfo(ws, "node1", nil) + idx, dump, err = s.NodeInfo(ws, "node1", nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -4625,7 +4827,7 @@ func TestStateStore_NodeInfo_NodeDump(t *testing.T) { require.Equal(t, expect[0], dump[0]) // Generate a dump of all the nodes - idx, dump, err = s.NodeDump(nil, nil) + idx, dump, err = s.NodeDump(nil, nil, "") if err != nil { t.Fatalf("err: %s", err) } @@ -4656,7 +4858,7 @@ func TestStateStore_ServiceIdxUpdateOnNodeUpdate(t *testing.T) { // Store the current service index ws := memdb.NewWatchSet() - lastIdx, _, err := s.ServiceNodes(ws, "srv", nil) + lastIdx, _, err := s.ServiceNodes(ws, "srv", nil, "") require.Nil(t, err) // Update the node with some meta @@ -4665,7 +4867,7 @@ func TestStateStore_ServiceIdxUpdateOnNodeUpdate(t *testing.T) { // Read the new service index ws = memdb.NewWatchSet() - newIdx, _, err := s.ServiceNodes(ws, "srv", nil) + newIdx, _, err := s.ServiceNodes(ws, "srv", nil, "") require.Nil(t, err) require.True(t, newIdx > lastIdx) @@ -4696,7 +4898,7 @@ func TestStateStore_ensureServiceCASTxn(t *testing.T) { // ensure no update happened roTxn := s.db.Txn(false) - _, nsRead, err := s.NodeService("node1", "foo", nil) + _, nsRead, err := s.NodeService("node1", "foo", nil, "") require.NoError(t, err) require.NotNil(t, nsRead) require.Equal(t, uint64(2), nsRead.ModifyIndex) @@ -4711,7 +4913,7 @@ func TestStateStore_ensureServiceCASTxn(t *testing.T) { // ensure no update happened roTxn = s.db.Txn(false) - _, nsRead, err = s.NodeService("node1", "foo", nil) + _, nsRead, err = s.NodeService("node1", "foo", nil, "") require.NoError(t, err) require.NotNil(t, nsRead) require.Equal(t, uint64(2), nsRead.ModifyIndex) @@ -4726,7 +4928,7 @@ func TestStateStore_ensureServiceCASTxn(t *testing.T) { // ensure the update happened roTxn = s.db.Txn(false) - _, nsRead, err = s.NodeService("node1", "foo", nil) + _, nsRead, err = s.NodeService("node1", "foo", nil, "") require.NoError(t, err) require.NotNil(t, nsRead) require.Equal(t, uint64(7), nsRead.ModifyIndex) @@ -4973,7 +5175,7 @@ func TestStateStore_GatewayServices_Terminating(t *testing.T) { assert.Equal(t, expect, out) // Delete a service covered by wildcard - assert.Nil(t, s.DeleteService(24, "bar", "redis", nil)) + assert.Nil(t, s.DeleteService(24, "bar", "redis", nil, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -5198,7 +5400,7 @@ func TestStateStore_GatewayServices_ServiceDeletion(t *testing.T) { assert.Equal(t, expect, out) // Delete a service specified directly. - assert.Nil(t, s.DeleteService(20, "foo", "db", nil)) + assert.Nil(t, s.DeleteService(20, "foo", "db", nil, "")) // Only the watch for other-gateway should fire, since its association to db came from a wildcard assert.False(t, watchFired(ws)) @@ -5482,7 +5684,7 @@ func TestStateStore_GatewayServices_Ingress(t *testing.T) { }) t.Run("deregistering a service", func(t *testing.T) { - require.Nil(t, s.DeleteService(18, "node1", "service1", nil)) + require.Nil(t, s.DeleteService(18, "node1", "service1", nil, "")) require.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -6226,7 +6428,7 @@ func TestStateStore_DumpGatewayServices(t *testing.T) { // Delete a service covered by wildcard t.Run("delete-wc-service", func(t *testing.T) { - assert.Nil(t, s.DeleteService(23, "bar", "redis", nil)) + assert.Nil(t, s.DeleteService(23, "bar", "redis", nil, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -6531,7 +6733,7 @@ func TestCatalog_catalogDownstreams_Watches(t *testing.T) { require.ElementsMatch(t, exp.names, names) // Now delete the web-proxy service and the result should be empty - require.NoError(t, s.DeleteService(3, "foo", "web-proxy", defaultMeta)) + require.NoError(t, s.DeleteService(3, "foo", "web-proxy", defaultMeta, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -6982,7 +7184,7 @@ func TestCatalog_upstreamsFromRegistration_Watches(t *testing.T) { require.ElementsMatch(t, exp.names, names) // Now delete the web-proxy service and the result should mirror the one of the remaining instance - require.NoError(t, s.DeleteService(4, "foo", "web-proxy", defaultMeta)) + require.NoError(t, s.DeleteService(4, "foo", "web-proxy", defaultMeta, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -7002,7 +7204,7 @@ func TestCatalog_upstreamsFromRegistration_Watches(t *testing.T) { require.ElementsMatch(t, exp.names, names) // Now delete the last web-proxy instance and the mappings should be cleared - require.NoError(t, s.DeleteService(5, "foo", "web-proxy-2", defaultMeta)) + require.NoError(t, s.DeleteService(5, "foo", "web-proxy-2", defaultMeta, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -7079,7 +7281,7 @@ func TestCatalog_topologyCleanupPanic(t *testing.T) { assert.True(t, watchFired(ws)) // Now delete the node Foo, and this would panic because of the deletion within an iterator - require.NoError(t, s.DeleteNode(3, "foo", nil)) + require.NoError(t, s.DeleteNode(3, "foo", nil, "")) assert.True(t, watchFired(ws)) } @@ -7240,7 +7442,7 @@ func TestCatalog_upstreamsFromRegistration_Ingress(t *testing.T) { require.ElementsMatch(t, exp.names, names) // Deleting a service covered by a wildcard should delete its mapping - require.NoError(t, s.DeleteService(6, "foo", svc.ID, &svc.EnterpriseMeta)) + require.NoError(t, s.DeleteService(6, "foo", svc.ID, &svc.EnterpriseMeta, "")) assert.True(t, watchFired(ws)) ws = memdb.NewWatchSet() @@ -7362,7 +7564,7 @@ func TestCatalog_cleanupGatewayWildcards_panic(t *testing.T) { require.NoError(t, s.EnsureService(5, "foo", &api2)) // Now delete the node "foo", and this would panic because of the deletion within an iterator - require.NoError(t, s.DeleteNode(6, "foo", nil)) + require.NoError(t, s.DeleteNode(6, "foo", nil, "")) } func TestCatalog_DownstreamsForService(t *testing.T) { @@ -7911,7 +8113,7 @@ func TestStateStore_EnsureService_ServiceNames(t *testing.T) { // Deregister an ingress gateway and the index should not slide back idx++ - require.NoError(t, s.DeleteService(idx, "node1", "new-ingress-gateway", entMeta)) + require.NoError(t, s.DeleteService(idx, "node1", "new-ingress-gateway", entMeta, "")) gotIdx, got, err = s.ServiceNamesOfKind(nil, structs.ServiceKindIngressGateway) require.NoError(t, err) @@ -7936,7 +8138,7 @@ func TestStateStore_EnsureService_ServiceNames(t *testing.T) { // Deregister the single typical service and the service name should also be dropped idx++ - require.NoError(t, s.DeleteService(idx, "node1", "web", entMeta)) + require.NoError(t, s.DeleteService(idx, "node1", "web", entMeta, "")) gotIdx, got, err = s.ServiceNamesOfKind(nil, structs.ServiceKindTypical) require.NoError(t, err) diff --git a/agent/consul/state/connect_ca_events.go b/agent/consul/state/connect_ca_events.go index 6a0bdb9744..36fe8ce351 100644 --- a/agent/consul/state/connect_ca_events.go +++ b/agent/consul/state/connect_ca_events.go @@ -4,6 +4,7 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbsubscribe" ) // EventTopicCARoots is the streaming topic to which events will be published @@ -12,13 +13,7 @@ import ( // // Note: topics are ordinarily defined in subscribe.proto, but this one isn't // currently available via the Subscribe endpoint. -const EventTopicCARoots stringer = "CARoots" - -// stringer is a convenience type to turn a regular string into a fmt.Stringer -// so that it can be used as a stream.Topic or stream.Subject. -type stringer string - -func (s stringer) String() string { return string(s) } +const EventTopicCARoots stream.StringTopic = "CARoots" type EventPayloadCARoots struct { CARoots structs.CARoots @@ -35,6 +30,10 @@ func (e EventPayloadCARoots) HasReadPermission(authz acl.Authorizer) bool { return authz.ServiceWriteAny(&authzContext) == acl.Allow } +func (e EventPayloadCARoots) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + panic("EventPayloadCARoots does not implement ToSubscriptionEvent") +} + // caRootsChangeEvents returns an event on EventTopicCARoots whenever the list // of active CA Roots changes. func caRootsChangeEvents(tx ReadTxn, changes Changes) ([]stream.Event, error) { diff --git a/agent/consul/state/coordinate_test.go b/agent/consul/state/coordinate_test.go index 3a28d199b9..6b576a8b80 100644 --- a/agent/consul/state/coordinate_test.go +++ b/agent/consul/state/coordinate_test.go @@ -181,7 +181,7 @@ func TestStateStore_Coordinate_Cleanup(t *testing.T) { require.Equal(t, expected, coords) // Now delete the node. - require.NoError(t, s.DeleteNode(3, "node1", nil)) + require.NoError(t, s.DeleteNode(3, "node1", nil, "")) // Make sure the coordinate is gone. _, coords, err = s.Coordinate(nil, "node1", nil) diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index 2417f5741a..821288f3bc 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -997,8 +997,9 @@ func (s *Store) intentionTopologyTxn(tx ReadTxn, ws memdb.WatchSet, // TODO(tproxy): One remaining improvement is that this includes non-Connect services (typical services without a proxy) // Ideally those should be excluded as well, since they can't be upstreams/downstreams without a proxy. - // Maybe narrow serviceNamesOfKindTxn to services represented by proxies? (ingress, sidecar-proxy, terminating) - index, services, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindTypical) + // Maybe narrow serviceNamesOfKindTxn to services represented by proxies? (ingress, sidecar- + wildcardMeta := structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier) + index, services, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindTypical, *wildcardMeta) if err != nil { return index, nil, fmt.Errorf("failed to list ingress service names: %v", err) } @@ -1008,7 +1009,7 @@ func (s *Store) intentionTopologyTxn(tx ReadTxn, ws memdb.WatchSet, if downstreams { // Ingress gateways can only ever be downstreams, since mesh services don't dial them. - index, ingress, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindIngressGateway) + index, ingress, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindIngressGateway, *wildcardMeta) if err != nil { return index, nil, fmt.Errorf("failed to list ingress service names: %v", err) } diff --git a/agent/consul/state/peering.go b/agent/consul/state/peering.go new file mode 100644 index 0000000000..b2a8b2c388 --- /dev/null +++ b/agent/consul/state/peering.go @@ -0,0 +1,486 @@ +package state + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/hashicorp/go-memdb" + "github.com/hashicorp/go-uuid" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" +) + +const ( + tablePeering = "peering" + tablePeeringTrustBundles = "peering-trust-bundles" +) + +func peeringTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: tablePeering, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + AllowMissing: false, + Unique: true, + Indexer: indexerSingle{ + readIndex: readIndex(indexFromUUIDString), + writeIndex: writeIndex(indexIDFromPeering), + }, + }, + indexName: { + Name: indexName, + AllowMissing: false, + Unique: true, + Indexer: indexerSingleWithPrefix{ + readIndex: indexPeeringFromQuery, + writeIndex: indexFromPeering, + prefixIndex: prefixIndexFromQueryNoNamespace, + }, + }, + }, + } +} + +func peeringTrustBundlesTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: tablePeeringTrustBundles, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + AllowMissing: false, + Unique: true, + Indexer: indexerSingle{ + readIndex: indexPeeringFromQuery, // same as peering table since we'll use the query.Value + writeIndex: indexFromPeeringTrustBundle, + }, + }, + }, + } +} + +func indexIDFromPeering(raw interface{}) ([]byte, error) { + p, ok := raw.(*pbpeering.Peering) + if !ok { + return nil, fmt.Errorf("unexpected type %T for pbpeering.Peering index", raw) + } + + if p.ID == "" { + return nil, errMissingValueForIndex + } + + uuid, err := uuidStringToBytes(p.ID) + if err != nil { + return nil, err + } + var b indexBuilder + b.Raw(uuid) + return b.Bytes(), nil +} + +func (s *Store) PeeringReadByID(ws memdb.WatchSet, id string) (uint64, *pbpeering.Peering, error) { + tx := s.db.ReadTxn() + defer tx.Abort() + + peering, err := peeringReadByIDTxn(ws, tx, id) + if err != nil { + return 0, nil, fmt.Errorf("failed to read peering by id: %w", err) + } + if peering == nil { + // Return the tables index so caller can watch it for changes if the peering doesn't exist + return maxIndexWatchTxn(tx, ws, tablePeering), nil, nil + } + + return peering.ModifyIndex, peering, nil +} + +func (s *Store) PeeringRead(ws memdb.WatchSet, q Query) (uint64, *pbpeering.Peering, error) { + tx := s.db.ReadTxn() + defer tx.Abort() + + watchCh, peeringRaw, err := tx.FirstWatch(tablePeering, indexName, q) + if err != nil { + return 0, nil, fmt.Errorf("failed peering lookup: %w", err) + } + + peering, ok := peeringRaw.(*pbpeering.Peering) + if peering != nil && !ok { + return 0, nil, fmt.Errorf("invalid type %T", peering) + } + ws.Add(watchCh) + + if peering == nil { + // Return the tables index so caller can watch it for changes if the peering doesn't exist + return maxIndexWatchTxn(tx, ws, partitionedIndexEntryName(tablePeering, q.PartitionOrDefault())), nil, nil + } + return peering.ModifyIndex, peering, nil +} + +func peeringReadByIDTxn(ws memdb.WatchSet, tx ReadTxn, id string) (*pbpeering.Peering, error) { + watchCh, peeringRaw, err := tx.FirstWatch(tablePeering, indexID, id) + if err != nil { + return nil, fmt.Errorf("failed peering lookup: %w", err) + } + ws.Add(watchCh) + + peering, ok := peeringRaw.(*pbpeering.Peering) + if peering != nil && !ok { + return nil, fmt.Errorf("invalid type %T", peering) + } + return peering, nil +} + +func (s *Store) PeeringList(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.Peering, error) { + tx := s.db.ReadTxn() + defer tx.Abort() + + var ( + iter memdb.ResultIterator + err error + idx uint64 + ) + if entMeta.PartitionOrDefault() == structs.WildcardSpecifier { + iter, err = tx.Get(tablePeering, indexID) + idx = maxIndexWatchTxn(tx, ws, tablePeering) + } else { + iter, err = tx.Get(tablePeering, indexName+"_prefix", entMeta) + idx = maxIndexWatchTxn(tx, ws, partitionedIndexEntryName(tablePeering, entMeta.PartitionOrDefault())) + } + if err != nil { + return 0, nil, fmt.Errorf("failed peering lookup: %v", err) + } + + var result []*pbpeering.Peering + for entry := iter.Next(); entry != nil; entry = iter.Next() { + result = append(result, entry.(*pbpeering.Peering)) + } + + return idx, result, nil +} + +func generatePeeringUUID(tx ReadTxn) (string, error) { + for { + uuid, err := uuid.GenerateUUID() + if err != nil { + return "", fmt.Errorf("failed to generate UUID: %w", err) + } + existing, err := peeringReadByIDTxn(nil, tx, uuid) + if err != nil { + return "", fmt.Errorf("failed to read peering: %w", err) + } + if existing == nil { + return uuid, nil + } + } +} + +func (s *Store) PeeringWrite(idx uint64, p *pbpeering.Peering) error { + tx := s.db.WriteTxn(idx) + defer tx.Abort() + + q := Query{ + Value: p.Name, + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(p.Partition), + } + existingRaw, err := tx.First(tablePeering, indexName, q) + if err != nil { + return fmt.Errorf("failed peering lookup: %w", err) + } + + existing, ok := existingRaw.(*pbpeering.Peering) + if existingRaw != nil && !ok { + return fmt.Errorf("invalid type %T", existingRaw) + } + + if existing != nil { + p.CreateIndex = existing.CreateIndex + p.ID = existing.ID + + } else { + // TODO(peering): consider keeping PeeringState enum elsewhere? + p.State = pbpeering.PeeringState_INITIAL + p.CreateIndex = idx + + p.ID, err = generatePeeringUUID(tx) + if err != nil { + return fmt.Errorf("failed to generate peering id: %w", err) + } + } + p.ModifyIndex = idx + + if err := tx.Insert(tablePeering, p); err != nil { + return fmt.Errorf("failed inserting peering: %w", err) + } + + if err := updatePeeringTableIndexes(tx, idx, p.PartitionOrDefault()); err != nil { + return err + } + return tx.Commit() +} + +// TODO(peering): replace with deferred deletion since this operation +// should involve cleanup of data associated with the peering. +func (s *Store) PeeringDelete(idx uint64, q Query) error { + tx := s.db.WriteTxn(idx) + defer tx.Abort() + + existing, err := tx.First(tablePeering, indexName, q) + if err != nil { + return fmt.Errorf("failed peering lookup: %v", err) + } + + if existing == nil { + return nil + } + + if err := tx.Delete(tablePeering, existing); err != nil { + return fmt.Errorf("failed deleting peering: %v", err) + } + + if err := updatePeeringTableIndexes(tx, idx, q.PartitionOrDefault()); err != nil { + return err + } + return tx.Commit() +} + +func (s *Store) PeeringTerminateByID(idx uint64, id string) error { + tx := s.db.WriteTxn(idx) + defer tx.Abort() + + existing, err := peeringReadByIDTxn(nil, tx, id) + if err != nil { + return fmt.Errorf("failed to read peering %q: %w", id, err) + } + if existing == nil { + return nil + } + + c := proto.Clone(existing) + clone, ok := c.(*pbpeering.Peering) + if !ok { + return fmt.Errorf("invalid type %T, expected *pbpeering.Peering", existing) + } + + clone.State = pbpeering.PeeringState_TERMINATED + clone.ModifyIndex = idx + + if err := tx.Insert(tablePeering, clone); err != nil { + return fmt.Errorf("failed inserting peering: %w", err) + } + + if err := updatePeeringTableIndexes(tx, idx, clone.PartitionOrDefault()); err != nil { + return err + } + return tx.Commit() +} + +// ExportedServicesForPeer returns the list of typical and proxy services exported to a peer. +// TODO(peering): What to do about terminating gateways? Sometimes terminating gateways are the appropriate destination +// to dial for an upstream mesh service. However, that information is handled by observing the terminating gateway's +// config entry, which we wouldn't want to replicate. How would client peers know to route through terminating gateways +// when they're not dialing through a remote mesh gateway? +func (s *Store) ExportedServicesForPeer(ws memdb.WatchSet, peerID string) (uint64, []structs.ServiceName, error) { + tx := s.db.ReadTxn() + defer tx.Abort() + + peering, err := peeringReadByIDTxn(ws, tx, peerID) + if err != nil { + return 0, nil, fmt.Errorf("failed to read peering: %w", err) + } + if peering == nil { + return 0, nil, nil + } + + maxIdx := peering.ModifyIndex + + entMeta := structs.NodeEnterpriseMetaInPartition(peering.Partition) + idx, raw, err := configEntryTxn(tx, ws, structs.ExportedServices, entMeta.PartitionOrDefault(), entMeta) + if err != nil { + return 0, nil, fmt.Errorf("failed to fetch exported-services config entry: %w", err) + } + if idx > maxIdx { + maxIdx = idx + } + if raw == nil { + return maxIdx, nil, nil + } + conf, ok := raw.(*structs.ExportedServicesConfigEntry) + if !ok { + return 0, nil, fmt.Errorf("expected type *structs.ExportedServicesConfigEntry, got %T", raw) + } + + set := make(map[structs.ServiceName]struct{}) + + for _, svc := range conf.Services { + svcMeta := acl.NewEnterpriseMetaWithPartition(entMeta.PartitionOrDefault(), svc.Namespace) + + sawPeer := false + for _, consumer := range svc.Consumers { + name := structs.NewServiceName(svc.Name, &svcMeta) + + if _, ok := set[name]; ok { + // Service was covered by a wildcard that was already accounted for + continue + } + if consumer.PeerName != peering.Name { + continue + } + sawPeer = true + + if svc.Name != structs.WildcardSpecifier { + set[name] = struct{}{} + } + } + + // If the target peer is a consumer, and all services in the namespace are exported, query those service names. + if sawPeer && svc.Name == structs.WildcardSpecifier { + var typicalServices []*KindServiceName + idx, typicalServices, err = serviceNamesOfKindTxn(tx, ws, structs.ServiceKindTypical, svcMeta) + if err != nil { + return 0, nil, fmt.Errorf("failed to get service names: %w", err) + } + if idx > maxIdx { + maxIdx = idx + } + for _, s := range typicalServices { + set[s.Service] = struct{}{} + } + + var proxyServices []*KindServiceName + idx, proxyServices, err = serviceNamesOfKindTxn(tx, ws, structs.ServiceKindConnectProxy, svcMeta) + if err != nil { + return 0, nil, fmt.Errorf("failed to get service names: %w", err) + } + if idx > maxIdx { + maxIdx = idx + } + for _, s := range proxyServices { + set[s.Service] = struct{}{} + } + } + } + + var resp []structs.ServiceName + for svc := range set { + resp = append(resp, svc) + } + return maxIdx, resp, nil +} + +func (s *Store) PeeringTrustBundleRead(ws memdb.WatchSet, q Query) (uint64, *pbpeering.PeeringTrustBundle, error) { + tx := s.db.ReadTxn() + defer tx.Abort() + + watchCh, ptbRaw, err := tx.FirstWatch(tablePeeringTrustBundles, indexID, q) + if err != nil { + return 0, nil, fmt.Errorf("failed peering trust bundle lookup: %w", err) + } + + ptb, ok := ptbRaw.(*pbpeering.PeeringTrustBundle) + if ptb != nil && !ok { + return 0, nil, fmt.Errorf("invalid type %T", ptb) + } + ws.Add(watchCh) + + if ptb == nil { + // Return the tables index so caller can watch it for changes if the trust bundle doesn't exist + return maxIndexWatchTxn(tx, ws, partitionedIndexEntryName(tablePeeringTrustBundles, q.PartitionOrDefault())), nil, nil + } + return ptb.ModifyIndex, ptb, nil +} + +// PeeringTrustBundleWrite writes ptb to the state store. If there is an existing trust bundle with the given peer name, +// it will be overwritten. +func (s *Store) PeeringTrustBundleWrite(idx uint64, ptb *pbpeering.PeeringTrustBundle) error { + tx := s.db.WriteTxn(idx) + defer tx.Abort() + + q := Query{ + Value: ptb.PeerName, + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(ptb.Partition), + } + existingRaw, err := tx.First(tablePeeringTrustBundles, indexID, q) + if err != nil { + return fmt.Errorf("failed peering trust bundle lookup: %w", err) + } + + existing, ok := existingRaw.(*pbpeering.PeeringTrustBundle) + if existingRaw != nil && !ok { + return fmt.Errorf("invalid type %T", existingRaw) + } + + if existing != nil { + ptb.CreateIndex = existing.CreateIndex + + } else { + ptb.CreateIndex = idx + } + + ptb.ModifyIndex = idx + + if err := tx.Insert(tablePeeringTrustBundles, ptb); err != nil { + return fmt.Errorf("failed inserting peering trust bundle: %w", err) + } + + if err := updatePeeringTrustBundlesTableIndexes(tx, idx, ptb.PartitionOrDefault()); err != nil { + return err + } + return tx.Commit() +} + +func (s *Store) PeeringTrustBundleDelete(idx uint64, q Query) error { + tx := s.db.WriteTxn(idx) + defer tx.Abort() + + existing, err := tx.First(tablePeeringTrustBundles, indexID, q) + if err != nil { + return fmt.Errorf("failed peering trust bundle lookup: %v", err) + } + + if existing == nil { + return nil + } + + if err := tx.Delete(tablePeeringTrustBundles, existing); err != nil { + return fmt.Errorf("failed deleting peering trust bundle: %v", err) + } + + if err := updatePeeringTrustBundlesTableIndexes(tx, idx, q.PartitionOrDefault()); err != nil { + return err + } + return tx.Commit() +} + +func (s *Snapshot) Peerings() (memdb.ResultIterator, error) { + return s.tx.Get(tablePeering, indexName) +} + +func (s *Snapshot) PeeringTrustBundles() (memdb.ResultIterator, error) { + return s.tx.Get(tablePeeringTrustBundles, indexID) +} + +func (r *Restore) Peering(p *pbpeering.Peering) error { + if err := r.tx.Insert(tablePeering, p); err != nil { + return fmt.Errorf("failed restoring peering: %w", err) + } + + if err := updatePeeringTableIndexes(r.tx, p.ModifyIndex, p.PartitionOrDefault()); err != nil { + return err + } + + return nil +} + +func (r *Restore) PeeringTrustBundle(ptb *pbpeering.PeeringTrustBundle) error { + if err := r.tx.Insert(tablePeeringTrustBundles, ptb); err != nil { + return fmt.Errorf("failed restoring peering trust bundle: %w", err) + } + + if err := updatePeeringTrustBundlesTableIndexes(r.tx, ptb.ModifyIndex, ptb.PartitionOrDefault()); err != nil { + return err + } + + return nil +} diff --git a/agent/consul/state/peering_oss.go b/agent/consul/state/peering_oss.go new file mode 100644 index 0000000000..8229d78a66 --- /dev/null +++ b/agent/consul/state/peering_oss.go @@ -0,0 +1,66 @@ +//go:build !consulent +// +build !consulent + +package state + +import ( + "fmt" + "strings" + + "github.com/hashicorp/consul/proto/pbpeering" +) + +func indexPeeringFromQuery(raw interface{}) ([]byte, error) { + q, ok := raw.(Query) + if !ok { + return nil, fmt.Errorf("unexpected type %T for Query index", raw) + } + + var b indexBuilder + b.String(strings.ToLower(q.Value)) + return b.Bytes(), nil +} + +func indexFromPeering(raw interface{}) ([]byte, error) { + p, ok := raw.(*pbpeering.Peering) + if !ok { + return nil, fmt.Errorf("unexpected type %T for structs.Peering index", raw) + } + + if p.Name == "" { + return nil, errMissingValueForIndex + } + + var b indexBuilder + b.String(strings.ToLower(p.Name)) + return b.Bytes(), nil +} + +func indexFromPeeringTrustBundle(raw interface{}) ([]byte, error) { + ptb, ok := raw.(*pbpeering.PeeringTrustBundle) + if !ok { + return nil, fmt.Errorf("unexpected type %T for pbpeering.PeeringTrustBundle index", raw) + } + + if ptb.PeerName == "" { + return nil, errMissingValueForIndex + } + + var b indexBuilder + b.String(strings.ToLower(ptb.PeerName)) + return b.Bytes(), nil +} + +func updatePeeringTableIndexes(tx WriteTxn, idx uint64, _ string) error { + if err := tx.Insert(tableIndex, &IndexEntry{Key: tablePeering, Value: idx}); err != nil { + return fmt.Errorf("failed updating table index: %w", err) + } + return nil +} + +func updatePeeringTrustBundlesTableIndexes(tx WriteTxn, idx uint64, _ string) error { + if err := tx.Insert(tableIndex, &IndexEntry{Key: tablePeeringTrustBundles, Value: idx}); err != nil { + return fmt.Errorf("failed updating table index: %w", err) + } + return nil +} diff --git a/agent/consul/state/peering_test.go b/agent/consul/state/peering_test.go new file mode 100644 index 0000000000..1fbe7af0c4 --- /dev/null +++ b/agent/consul/state/peering_test.go @@ -0,0 +1,811 @@ +package state + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/hashicorp/go-memdb" + "github.com/hashicorp/go-uuid" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" +) + +func insertTestPeerings(t *testing.T, s *Store) { + t.Helper() + + tx := s.db.WriteTxn(0) + defer tx.Abort() + + err := tx.Insert(tablePeering, &pbpeering.Peering{ + Name: "foo", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", + State: pbpeering.PeeringState_INITIAL, + CreateIndex: 1, + ModifyIndex: 1, + }) + require.NoError(t, err) + + err = tx.Insert(tablePeering, &pbpeering.Peering{ + Name: "bar", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: "5ebcff30-5509-4858-8142-a8e580f1863f", + State: pbpeering.PeeringState_FAILING, + CreateIndex: 2, + ModifyIndex: 2, + }) + require.NoError(t, err) + + err = tx.Insert(tableIndex, &IndexEntry{ + Key: tablePeering, + Value: 2, + }) + require.NoError(t, err) + require.NoError(t, tx.Commit()) +} + +func insertTestPeeringTrustBundles(t *testing.T, s *Store) { + t.Helper() + + tx := s.db.WriteTxn(0) + defer tx.Abort() + + err := tx.Insert(tablePeeringTrustBundles, &pbpeering.PeeringTrustBundle{ + TrustDomain: "foo.com", + PeerName: "foo", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + RootPEMs: []string{"foo certificate bundle"}, + CreateIndex: 1, + ModifyIndex: 1, + }) + require.NoError(t, err) + + err = tx.Insert(tablePeeringTrustBundles, &pbpeering.PeeringTrustBundle{ + TrustDomain: "bar.com", + PeerName: "bar", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + RootPEMs: []string{"bar certificate bundle"}, + CreateIndex: 2, + ModifyIndex: 2, + }) + require.NoError(t, err) + + err = tx.Insert(tableIndex, &IndexEntry{ + Key: tablePeeringTrustBundles, + Value: 2, + }) + require.NoError(t, err) + require.NoError(t, tx.Commit()) +} + +func TestStateStore_PeeringReadByID(t *testing.T) { + s := NewStateStore(nil) + insertTestPeerings(t, s) + + type testcase struct { + name string + id string + expect *pbpeering.Peering + } + run := func(t *testing.T, tc testcase) { + _, peering, err := s.PeeringReadByID(nil, tc.id) + require.NoError(t, err) + require.Equal(t, tc.expect, peering) + } + tcs := []testcase{ + { + name: "get foo", + id: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", + expect: &pbpeering.Peering{ + Name: "foo", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", + State: pbpeering.PeeringState_INITIAL, + CreateIndex: 1, + ModifyIndex: 1, + }, + }, + { + name: "get bar", + id: "5ebcff30-5509-4858-8142-a8e580f1863f", + expect: &pbpeering.Peering{ + Name: "bar", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: "5ebcff30-5509-4858-8142-a8e580f1863f", + State: pbpeering.PeeringState_FAILING, + CreateIndex: 2, + ModifyIndex: 2, + }, + }, + { + name: "get non-existent", + id: "05f54e2f-7813-4d4d-ba03-534554c88a18", + expect: nil, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestStateStore_PeeringRead(t *testing.T) { + s := NewStateStore(nil) + insertTestPeerings(t, s) + + type testcase struct { + name string + query Query + expect *pbpeering.Peering + } + run := func(t *testing.T, tc testcase) { + _, peering, err := s.PeeringRead(nil, tc.query) + require.NoError(t, err) + require.Equal(t, tc.expect, peering) + } + tcs := []testcase{ + { + name: "get foo", + query: Query{ + Value: "foo", + }, + expect: &pbpeering.Peering{ + Name: "foo", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", + State: pbpeering.PeeringState_INITIAL, + CreateIndex: 1, + ModifyIndex: 1, + }, + }, + { + name: "get non-existent baz", + query: Query{ + Value: "baz", + }, + expect: nil, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestStore_Peering_Watch(t *testing.T) { + s := NewStateStore(nil) + + var lastIdx uint64 + lastIdx++ + + // set up initial write + err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "foo", + }) + require.NoError(t, err) + + newWatch := func(t *testing.T, q Query) memdb.WatchSet { + t.Helper() + // set up a watch + ws := memdb.NewWatchSet() + + _, _, err := s.PeeringRead(ws, q) + require.NoError(t, err) + + return ws + } + + t.Run("insert fires watch", func(t *testing.T) { + // watch on non-existent bar + ws := newWatch(t, Query{Value: "bar"}) + + lastIdx++ + err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "bar", + }) + require.NoError(t, err) + require.True(t, watchFired(ws)) + + // should find bar peering + idx, p, err := s.PeeringRead(ws, Query{Value: "bar"}) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.NotNil(t, p) + }) + + t.Run("update fires watch", func(t *testing.T) { + // watch on existing foo + ws := newWatch(t, Query{Value: "foo"}) + + // unrelated write shouldn't fire watch + lastIdx++ + err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "bar", + }) + require.NoError(t, err) + require.False(t, watchFired(ws)) + + // foo write should fire watch + lastIdx++ + err = s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "foo", + State: pbpeering.PeeringState_FAILING, + }) + require.NoError(t, err) + require.True(t, watchFired(ws)) + + // check foo is updated + idx, p, err := s.PeeringRead(ws, Query{Value: "foo"}) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Equal(t, pbpeering.PeeringState_FAILING, p.State) + }) + + t.Run("delete fires watch", func(t *testing.T) { + // watch on existing foo + ws := newWatch(t, Query{Value: "foo"}) + + // delete on bar shouldn't fire watch + lastIdx++ + require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.Peering{Name: "bar"})) + lastIdx++ + require.NoError(t, s.PeeringDelete(lastIdx, Query{Value: "bar"})) + require.False(t, watchFired(ws)) + + // delete on foo should fire watch + lastIdx++ + err := s.PeeringDelete(lastIdx, Query{Value: "foo"}) + require.NoError(t, err) + require.True(t, watchFired(ws)) + + // check foo is gone + idx, p, err := s.PeeringRead(ws, Query{Value: "foo"}) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Nil(t, p) + }) +} + +func TestStore_PeeringList(t *testing.T) { + s := NewStateStore(nil) + insertTestPeerings(t, s) + + _, pps, err := s.PeeringList(nil, acl.EnterpriseMeta{}) + require.NoError(t, err) + expect := []*pbpeering.Peering{ + { + Name: "foo", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9", + State: pbpeering.PeeringState_INITIAL, + CreateIndex: 1, + ModifyIndex: 1, + }, + { + Name: "bar", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + ID: "5ebcff30-5509-4858-8142-a8e580f1863f", + State: pbpeering.PeeringState_FAILING, + CreateIndex: 2, + ModifyIndex: 2, + }, + } + require.ElementsMatch(t, expect, pps) +} + +func TestStore_PeeringList_Watch(t *testing.T) { + s := NewStateStore(nil) + + var lastIdx uint64 + lastIdx++ // start at 1 + + // track number of expected peerings in state store + var count int + + newWatch := func(t *testing.T, entMeta acl.EnterpriseMeta) memdb.WatchSet { + t.Helper() + // set up a watch + ws := memdb.NewWatchSet() + + _, _, err := s.PeeringList(ws, entMeta) + require.NoError(t, err) + + return ws + } + + t.Run("insert fires watch", func(t *testing.T) { + ws := newWatch(t, acl.EnterpriseMeta{}) + + lastIdx++ + // insert a peering + err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "bar", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }) + require.NoError(t, err) + count++ + + require.True(t, watchFired(ws)) + + // should find bar peering + idx, pp, err := s.PeeringList(ws, acl.EnterpriseMeta{}) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Len(t, pp, count) + }) + + t.Run("update fires watch", func(t *testing.T) { + // set up initial write + lastIdx++ + err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "foo", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }) + require.NoError(t, err) + count++ + + ws := newWatch(t, acl.EnterpriseMeta{}) + + // update peering + lastIdx++ + err = s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "foo", + State: pbpeering.PeeringState_FAILING, + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }) + require.NoError(t, err) + + require.True(t, watchFired(ws)) + + idx, pp, err := s.PeeringList(ws, acl.EnterpriseMeta{}) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Len(t, pp, count) + }) + + t.Run("delete fires watch", func(t *testing.T) { + // set up initial write + lastIdx++ + err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "baz", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }) + require.NoError(t, err) + count++ + + ws := newWatch(t, acl.EnterpriseMeta{}) + + // delete peering + lastIdx++ + err = s.PeeringDelete(lastIdx, Query{Value: "baz"}) + require.NoError(t, err) + count-- + + require.True(t, watchFired(ws)) + + idx, pp, err := s.PeeringList(ws, acl.EnterpriseMeta{}) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Len(t, pp, count) + }) +} + +func TestStore_PeeringWrite(t *testing.T) { + s := NewStateStore(nil) + insertTestPeerings(t, s) + type testcase struct { + name string + input *pbpeering.Peering + } + run := func(t *testing.T, tc testcase) { + require.NoError(t, s.PeeringWrite(10, tc.input)) + + q := Query{ + Value: tc.input.Name, + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(tc.input.Partition), + } + _, p, err := s.PeeringRead(nil, q) + require.NoError(t, err) + require.NotNil(t, p) + if tc.input.State == 0 { + require.Equal(t, pbpeering.PeeringState_INITIAL, p.State) + } + require.Equal(t, tc.input.Name, p.Name) + } + tcs := []testcase{ + { + name: "create baz", + input: &pbpeering.Peering{ + Name: "baz", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + { + name: "update foo", + input: &pbpeering.Peering{ + Name: "foo", + State: pbpeering.PeeringState_FAILING, + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestStore_PeeringWrite_GenerateUUID(t *testing.T) { + rand.Seed(1) + + s := NewStateStore(nil) + + entMeta := structs.NodeEnterpriseMetaInDefaultPartition() + partition := entMeta.PartitionOrDefault() + + for i := 1; i < 11; i++ { + require.NoError(t, s.PeeringWrite(uint64(i), &pbpeering.Peering{ + Name: fmt.Sprintf("peering-%d", i), + Partition: partition, + })) + } + + idx, peerings, err := s.PeeringList(nil, *entMeta) + require.NoError(t, err) + require.Equal(t, uint64(10), idx) + require.Len(t, peerings, 10) + + // Ensure that all assigned UUIDs are unique. + uniq := make(map[string]struct{}) + for _, p := range peerings { + uniq[p.ID] = struct{}{} + } + require.Len(t, uniq, 10) + + // Ensure that the ID of an existing peering cannot be overwritten. + updated := &pbpeering.Peering{ + Name: peerings[0].Name, + Partition: peerings[0].Partition, + } + + // Attempt to overwrite ID. + updated.ID, err = uuid.GenerateUUID() + require.NoError(t, err) + require.NoError(t, s.PeeringWrite(11, updated)) + + q := Query{ + Value: updated.Name, + EnterpriseMeta: *entMeta, + } + idx, got, err := s.PeeringRead(nil, q) + require.NoError(t, err) + require.Equal(t, uint64(11), idx) + require.Equal(t, peerings[0].ID, got.ID) +} + +func TestStore_PeeringDelete(t *testing.T) { + s := NewStateStore(nil) + insertTestPeerings(t, s) + + q := Query{Value: "foo"} + + require.NoError(t, s.PeeringDelete(10, q)) + + _, p, err := s.PeeringRead(nil, q) + require.NoError(t, err) + require.Nil(t, p) +} + +func TestStore_PeeringTerminateByID(t *testing.T) { + s := NewStateStore(nil) + insertTestPeerings(t, s) + + // id corresponding to default/foo + id := "9e650110-ac74-4c5a-a6a8-9348b2bed4e9" + + require.NoError(t, s.PeeringTerminateByID(10, id)) + + _, p, err := s.PeeringReadByID(nil, id) + require.NoError(t, err) + require.Equal(t, pbpeering.PeeringState_TERMINATED, p.State) +} + +func TestStateStore_PeeringTrustBundleRead(t *testing.T) { + s := NewStateStore(nil) + insertTestPeeringTrustBundles(t, s) + + type testcase struct { + name string + query Query + expect *pbpeering.PeeringTrustBundle + } + run := func(t *testing.T, tc testcase) { + _, ptb, err := s.PeeringTrustBundleRead(nil, tc.query) + require.NoError(t, err) + require.Equal(t, tc.expect, ptb) + } + + entMeta := structs.NodeEnterpriseMetaInDefaultPartition() + + tcs := []testcase{ + { + name: "get foo", + query: Query{ + Value: "foo", + EnterpriseMeta: *entMeta, + }, + expect: &pbpeering.PeeringTrustBundle{ + TrustDomain: "foo.com", + PeerName: "foo", + Partition: entMeta.PartitionOrEmpty(), + RootPEMs: []string{"foo certificate bundle"}, + CreateIndex: 1, + ModifyIndex: 1, + }, + }, + { + name: "get non-existent baz", + query: Query{ + Value: "baz", + }, + expect: nil, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestStore_PeeringTrustBundleWrite(t *testing.T) { + s := NewStateStore(nil) + insertTestPeeringTrustBundles(t, s) + type testcase struct { + name string + input *pbpeering.PeeringTrustBundle + } + run := func(t *testing.T, tc testcase) { + require.NoError(t, s.PeeringTrustBundleWrite(10, tc.input)) + + q := Query{ + Value: tc.input.PeerName, + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(tc.input.Partition), + } + _, ptb, err := s.PeeringTrustBundleRead(nil, q) + require.NoError(t, err) + require.NotNil(t, ptb) + require.Equal(t, tc.input.TrustDomain, ptb.TrustDomain) + require.Equal(t, tc.input.PeerName, ptb.PeerName) + } + tcs := []testcase{ + { + name: "create baz", + input: &pbpeering.PeeringTrustBundle{ + TrustDomain: "baz.com", + PeerName: "baz", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + { + name: "update foo", + input: &pbpeering.PeeringTrustBundle{ + TrustDomain: "foo-updated.com", + PeerName: "foo", + Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), + }, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestStore_PeeringTrustBundleDelete(t *testing.T) { + s := NewStateStore(nil) + insertTestPeeringTrustBundles(t, s) + + q := Query{Value: "foo"} + + require.NoError(t, s.PeeringTrustBundleDelete(10, q)) + + _, ptb, err := s.PeeringRead(nil, q) + require.NoError(t, err) + require.Nil(t, ptb) +} + +func TestStateStore_ExportedServicesForPeer(t *testing.T) { + s := NewStateStore(nil) + + var lastIdx uint64 + + lastIdx++ + err := s.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "my-peering", + }) + require.NoError(t, err) + + q := Query{Value: "my-peering"} + _, p, err := s.PeeringRead(nil, q) + require.NoError(t, err) + require.NotNil(t, p) + + id := p.ID + + ws := memdb.NewWatchSet() + + runStep(t, "no exported services", func(t *testing.T) { + idx, exported, err := s.ExportedServicesForPeer(ws, id) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Empty(t, exported) + }) + + runStep(t, "config entry with exact service names", func(t *testing.T) { + entry := &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "mysql", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + { + Name: "redis", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + { + Name: "mongo", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-other-peering", + }, + }, + }, + }, + } + lastIdx++ + err = s.EnsureConfigEntry(lastIdx, entry) + require.NoError(t, err) + + require.True(t, watchFired(ws)) + ws = memdb.NewWatchSet() + + expect := []structs.ServiceName{ + { + Name: "mysql", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + { + Name: "redis", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + } + idx, got, err := s.ExportedServicesForPeer(ws, id) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.ElementsMatch(t, expect, got) + }) + + runStep(t, "config entry with wildcard service name picks up existing service", func(t *testing.T) { + lastIdx++ + require.NoError(t, s.EnsureNode(lastIdx, &structs.Node{Node: "foo", Address: "127.0.0.1"})) + + lastIdx++ + require.NoError(t, s.EnsureService(lastIdx, "foo", &structs.NodeService{ID: "billing", Service: "billing", Port: 5000})) + + entry := &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "*", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + }, + } + lastIdx++ + err = s.EnsureConfigEntry(lastIdx, entry) + require.NoError(t, err) + + require.True(t, watchFired(ws)) + ws = memdb.NewWatchSet() + + expect := []structs.ServiceName{ + { + Name: "billing", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + } + idx, got, err := s.ExportedServicesForPeer(ws, id) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Equal(t, expect, got) + }) + + runStep(t, "config entry with wildcard service names picks up new registrations", func(t *testing.T) { + lastIdx++ + require.NoError(t, s.EnsureService(lastIdx, "foo", &structs.NodeService{ID: "payments", Service: "payments", Port: 5000})) + + lastIdx++ + proxy := structs.NodeService{ + Kind: structs.ServiceKindConnectProxy, + ID: "payments-proxy", + Service: "payments-proxy", + Port: 5000, + } + require.NoError(t, s.EnsureService(lastIdx, "foo", &proxy)) + + require.True(t, watchFired(ws)) + ws = memdb.NewWatchSet() + + expect := []structs.ServiceName{ + { + Name: "billing", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + { + Name: "payments", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + { + Name: "payments-proxy", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + } + idx, got, err := s.ExportedServicesForPeer(ws, id) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.ElementsMatch(t, expect, got) + }) + + runStep(t, "config entry with wildcard service names picks up service deletions", func(t *testing.T) { + lastIdx++ + require.NoError(t, s.DeleteService(lastIdx, "foo", "billing", nil, "")) + + require.True(t, watchFired(ws)) + ws = memdb.NewWatchSet() + + expect := []structs.ServiceName{ + { + Name: "payments", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + { + Name: "payments-proxy", + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + } + idx, got, err := s.ExportedServicesForPeer(ws, id) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.ElementsMatch(t, expect, got) + }) + + runStep(t, "deleting the config entry clears exported services", func(t *testing.T) { + require.NoError(t, s.DeleteConfigEntry(lastIdx, structs.ExportedServices, "default", structs.DefaultEnterpriseMetaInDefaultPartition())) + idx, exported, err := s.ExportedServicesForPeer(ws, id) + require.NoError(t, err) + require.Equal(t, lastIdx, idx) + require.Empty(t, exported) + }) +} diff --git a/agent/consul/state/query.go b/agent/consul/state/query.go index b88fbe4fcc..a4725b875a 100644 --- a/agent/consul/state/query.go +++ b/agent/consul/state/query.go @@ -12,10 +12,15 @@ import ( // Query is a type used to query any single value index that may include an // enterprise identifier. type Query struct { - Value string + Value string + PeerName string acl.EnterpriseMeta } +func (q Query) PeerOrEmpty() string { + return q.PeerName +} + func (q Query) IDValue() string { return q.Value } @@ -137,11 +142,16 @@ func (q BoolQuery) PartitionOrDefault() string { // KeyValueQuery is a type used to query for both a key and a value that may // include an enterprise identifier. type KeyValueQuery struct { - Key string - Value string + Key string + Value string + PeerName string acl.EnterpriseMeta } +func (q KeyValueQuery) PeerOrEmpty() string { + return q.PeerName +} + // NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer // receiver for this method. Remove once that is fixed. func (q KeyValueQuery) NamespaceOrDefault() string { diff --git a/agent/consul/state/query_oss.go b/agent/consul/state/query_oss.go index 0f11dce5f5..553e7aebe6 100644 --- a/agent/consul/state/query_oss.go +++ b/agent/consul/state/query_oss.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" ) func prefixIndexFromQuery(arg interface{}) ([]byte, error) { @@ -28,6 +29,29 @@ func prefixIndexFromQuery(arg interface{}) ([]byte, error) { return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg) } +func prefixIndexFromQueryWithPeer(arg interface{}) ([]byte, error) { + var b indexBuilder + switch v := arg.(type) { + case *acl.EnterpriseMeta: + return nil, nil + case acl.EnterpriseMeta: + return nil, nil + case Query: + if v.PeerOrEmpty() == "" { + b.String(structs.LocalPeerKeyword) + } else { + b.String(strings.ToLower(v.PeerOrEmpty())) + } + if v.Value == "" { + return b.Bytes(), nil + } + b.String(strings.ToLower(v.Value)) + return b.Bytes(), nil + } + + return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg) +} + func prefixIndexFromQueryNoNamespace(arg interface{}) ([]byte, error) { return prefixIndexFromQuery(arg) } diff --git a/agent/consul/state/schema.go b/agent/consul/state/schema.go index 75a2ffa747..28a690e48c 100644 --- a/agent/consul/state/schema.go +++ b/agent/consul/state/schema.go @@ -22,12 +22,16 @@ func newDBSchema() *memdb.DBSchema { configTableSchema, coordinatesTableSchema, federationStateTableSchema, + freeVirtualIPTableSchema, gatewayServicesTableSchema, indexTableSchema, intentionsTableSchema, + kindServiceNameTableSchema, kvsTableSchema, meshTopologyTableSchema, nodesTableSchema, + peeringTableSchema, + peeringTrustBundlesTableSchema, policiesTableSchema, preparedQueriesTableSchema, rolesTableSchema, @@ -39,8 +43,6 @@ func newDBSchema() *memdb.DBSchema { tokensTableSchema, tombstonesTableSchema, usageTableSchema, - freeVirtualIPTableSchema, - kindServiceNameTableSchema, ) withEnterpriseSchema(db) return db diff --git a/agent/consul/state/schema_oss.go b/agent/consul/state/schema_oss.go index ea8e8a43e0..fbe3cd7e56 100644 --- a/agent/consul/state/schema_oss.go +++ b/agent/consul/state/schema_oss.go @@ -3,7 +3,12 @@ package state -import "github.com/hashicorp/consul/acl" +import ( + "fmt" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" +) func partitionedIndexEntryName(entry string, _ string) string { return entry @@ -12,3 +17,11 @@ func partitionedIndexEntryName(entry string, _ string) string { func partitionedAndNamespacedIndexEntryName(entry string, _ *acl.EnterpriseMeta) string { return entry } + +// peeredIndexEntryName returns the peered index key for an importable entity (e.g. checks, services, or nodes). +func peeredIndexEntryName(entry, peerName string) string { + if peerName == "" { + peerName = structs.LocalPeerKeyword + } + return fmt.Sprintf("peer.%s:%s", peerName, entry) +} diff --git a/agent/consul/state/session_test.go b/agent/consul/state/session_test.go index 2e841500a9..a4eae8a507 100644 --- a/agent/consul/state/session_test.go +++ b/agent/consul/state/session_test.go @@ -553,7 +553,7 @@ func TestStateStore_Session_Invalidate_DeleteNode(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - if err := s.DeleteNode(15, "foo", nil); err != nil { + if err := s.DeleteNode(15, "foo", nil, ""); err != nil { t.Fatalf("err: %v", err) } if !watchFired(ws) { @@ -608,7 +608,7 @@ func TestStateStore_Session_Invalidate_DeleteService(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - if err := s.DeleteService(15, "foo", "api", nil); err != nil { + if err := s.DeleteService(15, "foo", "api", nil, ""); err != nil { t.Fatalf("err: %v", err) } if !watchFired(ws) { @@ -709,7 +709,7 @@ func TestStateStore_Session_Invalidate_DeleteCheck(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - if err := s.DeleteCheck(15, "foo", "bar", nil); err != nil { + if err := s.DeleteCheck(15, "foo", "bar", nil, ""); err != nil { t.Fatalf("err: %v", err) } if !watchFired(ws) { @@ -777,7 +777,7 @@ func TestStateStore_Session_Invalidate_Key_Unlock_Behavior(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - if err := s.DeleteNode(6, "foo", nil); err != nil { + if err := s.DeleteNode(6, "foo", nil, ""); err != nil { t.Fatalf("err: %v", err) } if !watchFired(ws) { @@ -859,7 +859,7 @@ func TestStateStore_Session_Invalidate_Key_Delete_Behavior(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - if err := s.DeleteNode(6, "foo", nil); err != nil { + if err := s.DeleteNode(6, "foo", nil, ""); err != nil { t.Fatalf("err: %v", err) } if !watchFired(ws) { diff --git a/agent/consul/state/state_store.go b/agent/consul/state/state_store.go index e795b68578..d8aa98dd98 100644 --- a/agent/consul/state/state_store.go +++ b/agent/consul/state/state_store.go @@ -291,10 +291,9 @@ func maxIndexWatchTxn(tx ReadTxn, ws memdb.WatchSet, tables ...string) uint64 { return lindex } -// indexUpdateMaxTxn is used when restoring entries and sets the table's index to -// the given idx only if it's greater than the current index. -func indexUpdateMaxTxn(tx WriteTxn, idx uint64, table string) error { - ti, err := tx.First(tableIndex, indexID, table) +// indexUpdateMaxTxn sets the table's index to the given idx only if it's greater than the current index. +func indexUpdateMaxTxn(tx WriteTxn, idx uint64, key string) error { + ti, err := tx.First(tableIndex, indexID, key) if err != nil { return fmt.Errorf("failed to retrieve existing index: %s", err) } @@ -311,7 +310,7 @@ func indexUpdateMaxTxn(tx WriteTxn, idx uint64, table string) error { } } - if err := tx.Insert(tableIndex, &IndexEntry{table, idx}); err != nil { + if err := tx.Insert(tableIndex, &IndexEntry{key, idx}); err != nil { return fmt.Errorf("failed updating index %s", err) } return nil diff --git a/agent/consul/state/store_integration_test.go b/agent/consul/state/store_integration_test.go index 421205e142..47afc36d74 100644 --- a/agent/consul/state/store_integration_test.go +++ b/agent/consul/state/store_integration_test.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbsubscribe" ) func TestStore_IntegrationWithEventPublisher_ACLTokenUpdate(t *testing.T) { @@ -26,7 +27,7 @@ func TestStore_IntegrationWithEventPublisher_ACLTokenUpdate(t *testing.T) { // Register the subscription. subscription := &stream.SubscribeRequest{ Topic: topicService, - Subject: stringer("nope"), + Subject: stream.StringSubject("nope"), Token: token.SecretID, } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) @@ -73,7 +74,7 @@ func TestStore_IntegrationWithEventPublisher_ACLTokenUpdate(t *testing.T) { // Register another subscription. subscription2 := &stream.SubscribeRequest{ Topic: topicService, - Subject: stringer("nope"), + Subject: stream.StringSubject("nope"), Token: token.SecretID, } sub2, err := publisher.Subscribe(subscription2) @@ -114,7 +115,7 @@ func TestStore_IntegrationWithEventPublisher_ACLPolicyUpdate(t *testing.T) { // Register the subscription. subscription := &stream.SubscribeRequest{ Topic: topicService, - Subject: stringer("nope"), + Subject: stream.StringSubject("nope"), Token: token.SecretID, } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) @@ -165,7 +166,7 @@ func TestStore_IntegrationWithEventPublisher_ACLPolicyUpdate(t *testing.T) { // Register another subscription. subscription2 := &stream.SubscribeRequest{ Topic: topicService, - Subject: stringer("nope"), + Subject: stream.StringSubject("nope"), Token: token.SecretID, } sub, err = publisher.Subscribe(subscription2) @@ -194,7 +195,7 @@ func TestStore_IntegrationWithEventPublisher_ACLPolicyUpdate(t *testing.T) { // Register another subscription. subscription3 := &stream.SubscribeRequest{ Topic: topicService, - Subject: stringer("nope"), + Subject: stream.StringSubject("nope"), Token: token.SecretID, } sub, err = publisher.Subscribe(subscription3) @@ -236,7 +237,7 @@ func TestStore_IntegrationWithEventPublisher_ACLRoleUpdate(t *testing.T) { // Register the subscription. subscription := &stream.SubscribeRequest{ Topic: topicService, - Subject: stringer("nope"), + Subject: stream.StringSubject("nope"), Token: token.SecretID, } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) @@ -282,7 +283,7 @@ func TestStore_IntegrationWithEventPublisher_ACLRoleUpdate(t *testing.T) { // Register another subscription. subscription2 := &stream.SubscribeRequest{ Topic: topicService, - Subject: stringer("nope"), + Subject: stream.StringSubject("nope"), Token: token.SecretID, } sub, err = publisher.Subscribe(subscription2) @@ -399,7 +400,7 @@ var topicService topic = "test-topic-service" func (s *Store) topicServiceTestHandler(req stream.SubscribeRequest, snap stream.SnapshotAppender) (uint64, error) { key := req.Subject.String() - idx, nodes, err := s.ServiceNodes(nil, key, nil) + idx, nodes, err := s.ServiceNodes(nil, key, nil, structs.TODOPeerKeyword) if err != nil { return idx, err } @@ -431,7 +432,11 @@ func (p nodePayload) HasReadPermission(acl.Authorizer) bool { } func (p nodePayload) Subject() stream.Subject { - return stringer(p.key) + return stream.StringSubject(p.key) +} + +func (e nodePayload) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + panic("EventPayloadCARoots does not implement ToSubscriptionEvent") } func createTokenAndWaitForACLEventPublish(t *testing.T, s *Store) *structs.ACLToken { @@ -459,7 +464,7 @@ func createTokenAndWaitForACLEventPublish(t *testing.T, s *Store) *structs.ACLTo // continuing... req := &stream.SubscribeRequest{ Topic: topicService, - Subject: stringer("nope"), + Subject: stream.StringSubject("nope"), Token: token.SecretID, } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) diff --git a/agent/consul/state/txn.go b/agent/consul/state/txn.go index 4f44b56cc7..5faccadfbe 100644 --- a/agent/consul/state/txn.go +++ b/agent/consul/state/txn.go @@ -153,9 +153,9 @@ func (s *Store) txnNode(tx WriteTxn, idx uint64, op *structs.TxnNodeOp) (structs getNode := func() (*structs.Node, error) { if op.Node.ID != "" { - return getNodeIDTxn(tx, op.Node.ID, op.Node.GetEnterpriseMeta()) + return getNodeIDTxn(tx, op.Node.ID, op.Node.GetEnterpriseMeta(), op.Node.PeerName) } else { - return getNodeTxn(tx, op.Node.Node, op.Node.GetEnterpriseMeta()) + return getNodeTxn(tx, op.Node.Node, op.Node.GetEnterpriseMeta(), op.Node.PeerName) } } @@ -182,11 +182,11 @@ func (s *Store) txnNode(tx WriteTxn, idx uint64, op *structs.TxnNodeOp) (structs entry, err = getNode() case api.NodeDelete: - err = s.deleteNodeTxn(tx, idx, op.Node.Node, op.Node.GetEnterpriseMeta()) + err = s.deleteNodeTxn(tx, idx, op.Node.Node, op.Node.GetEnterpriseMeta(), op.Node.PeerName) case api.NodeDeleteCAS: var ok bool - ok, err = s.deleteNodeCASTxn(tx, idx, op.Node.ModifyIndex, op.Node.Node, op.Node.GetEnterpriseMeta()) + ok, err = s.deleteNodeCASTxn(tx, idx, op.Node.ModifyIndex, op.Node.Node, op.Node.GetEnterpriseMeta(), op.Node.PeerName) if !ok && err == nil { err = fmt.Errorf("failed to delete node %q, index is stale", op.Node.Node) } @@ -219,7 +219,7 @@ func (s *Store) txnNode(tx WriteTxn, idx uint64, op *structs.TxnNodeOp) (structs func (s *Store) txnService(tx WriteTxn, idx uint64, op *structs.TxnServiceOp) (structs.TxnResults, error) { switch op.Verb { case api.ServiceGet: - entry, err := getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) + entry, err := getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta, op.Service.PeerName) switch { case err != nil: return nil, err @@ -233,7 +233,7 @@ func (s *Store) txnService(tx WriteTxn, idx uint64, op *structs.TxnServiceOp) (s if err := ensureServiceTxn(tx, idx, op.Node, false, &op.Service); err != nil { return nil, err } - entry, err := getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) + entry, err := getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta, op.Service.PeerName) return newTxnResultFromNodeServiceEntry(entry), err case api.ServiceCAS: @@ -246,15 +246,15 @@ func (s *Store) txnService(tx WriteTxn, idx uint64, op *structs.TxnServiceOp) (s return nil, err } - entry, err := getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) + entry, err := getNodeServiceTxn(tx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta, op.Service.PeerName) return newTxnResultFromNodeServiceEntry(entry), err case api.ServiceDelete: - err := s.deleteServiceTxn(tx, idx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) + err := s.deleteServiceTxn(tx, idx, op.Node, op.Service.ID, &op.Service.EnterpriseMeta, op.Service.PeerName) return nil, err case api.ServiceDeleteCAS: - ok, err := s.deleteServiceCASTxn(tx, idx, op.Service.ModifyIndex, op.Node, op.Service.ID, &op.Service.EnterpriseMeta) + ok, err := s.deleteServiceCASTxn(tx, idx, op.Service.ModifyIndex, op.Node, op.Service.ID, &op.Service.EnterpriseMeta, op.Service.PeerName) if !ok && err == nil { return nil, fmt.Errorf("failed to delete service %q on node %q, index is stale", op.Service.ID, op.Node) } @@ -284,7 +284,7 @@ func (s *Store) txnCheck(tx WriteTxn, idx uint64, op *structs.TxnCheckOp) (struc switch op.Verb { case api.CheckGet: - _, entry, err = getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) + _, entry, err = getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta, op.Check.PeerName) if entry == nil && err == nil { err = fmt.Errorf("check %q on node %q doesn't exist", op.Check.CheckID, op.Check.Node) } @@ -292,7 +292,7 @@ func (s *Store) txnCheck(tx WriteTxn, idx uint64, op *structs.TxnCheckOp) (struc case api.CheckSet: err = s.ensureCheckTxn(tx, idx, false, &op.Check) if err == nil { - _, entry, err = getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) + _, entry, err = getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta, op.Check.PeerName) } case api.CheckCAS: @@ -303,14 +303,14 @@ func (s *Store) txnCheck(tx WriteTxn, idx uint64, op *structs.TxnCheckOp) (struc err = fmt.Errorf("failed to set check %q on node %q, index is stale", entry.CheckID, entry.Node) break } - _, entry, err = getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) + _, entry, err = getNodeCheckTxn(tx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta, op.Check.PeerName) case api.CheckDelete: - err = s.deleteCheckTxn(tx, idx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) + err = s.deleteCheckTxn(tx, idx, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta, op.Check.PeerName) case api.CheckDeleteCAS: var ok bool - ok, err = s.deleteCheckCASTxn(tx, idx, op.Check.ModifyIndex, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta) + ok, err = s.deleteCheckCASTxn(tx, idx, op.Check.ModifyIndex, op.Check.Node, op.Check.CheckID, &op.Check.EnterpriseMeta, op.Check.PeerName) if !ok && err == nil { err = fmt.Errorf("failed to delete check %q on node %q, index is stale", op.Check.CheckID, op.Check.Node) } diff --git a/agent/consul/state/txn_test.go b/agent/consul/state/txn_test.go index 17adc2bc36..f98325df3b 100644 --- a/agent/consul/state/txn_test.go +++ b/agent/consul/state/txn_test.go @@ -196,7 +196,7 @@ func TestStateStore_Txn_Node(t *testing.T) { require.Equal(t, expected, results) // Pull the resulting state store contents. - idx, actual, err := s.Nodes(nil, nil) + idx, actual, err := s.Nodes(nil, nil, "") require.NoError(t, err) if idx != 8 { t.Fatalf("bad index: %d", idx) @@ -311,7 +311,7 @@ func TestStateStore_Txn_Service(t *testing.T) { require.Equal(t, expected, results) // Pull the resulting state store contents. - idx, actual, err := s.NodeServices(nil, "node1", nil) + idx, actual, err := s.NodeServices(nil, "node1", nil, "") require.NoError(t, err) if idx != 6 { t.Fatalf("bad index: %d", idx) @@ -464,7 +464,7 @@ func TestStateStore_Txn_Checks(t *testing.T) { require.Equal(t, expected, results) // Pull the resulting state store contents. - idx, actual, err := s.NodeChecks(nil, "node1", nil) + idx, actual, err := s.NodeChecks(nil, "node1", nil, "") require.NoError(t, err) if idx != 6 { t.Fatalf("bad index: %d", idx) diff --git a/agent/consul/state/usage_test.go b/agent/consul/state/usage_test.go index 3831d9c76d..7b0f11f8f3 100644 --- a/agent/consul/state/usage_test.go +++ b/agent/consul/state/usage_test.go @@ -38,7 +38,7 @@ func TestStateStore_Usage_NodeUsage_Delete(t *testing.T) { require.Equal(t, idx, uint64(1)) require.Equal(t, usage.Nodes, 2) - require.NoError(t, s.DeleteNode(2, "node2", nil)) + require.NoError(t, s.DeleteNode(2, "node2", nil, "")) idx, usage, err = s.NodeUsage() require.NoError(t, err) require.Equal(t, idx, uint64(2)) @@ -152,7 +152,7 @@ func TestStateStore_Usage_ServiceUsage_DeleteNode(t *testing.T) { require.Equal(t, 1, usage.ConnectServiceInstances[string(structs.ServiceKindConnectProxy)]) require.Equal(t, 1, usage.ConnectServiceInstances[connectNativeInstancesTable]) - require.NoError(t, s.DeleteNode(4, "node1", nil)) + require.NoError(t, s.DeleteNode(4, "node1", nil, "")) idx, usage, err = s.ServiceUsage() require.NoError(t, err) diff --git a/agent/consul/stream/event.go b/agent/consul/stream/event.go index b3936a49b7..708420de2f 100644 --- a/agent/consul/stream/event.go +++ b/agent/consul/stream/event.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/proto/pbsubscribe" ) // Topic is an identifier that partitions events. A subscription will only receive @@ -22,11 +23,7 @@ type Subject fmt.Stringer // SubjectNone is used when all events on a given topic are "global" and not // further partitioned by subject. For example: the "CA Roots" topic which is // used to notify subscribers when the global set CA root certificates changes. -const SubjectNone stringer = "none" - -type stringer string - -func (s stringer) String() string { return string(s) } +const SubjectNone StringSubject = "none" // Event is a structure with identifiers and a payload. Events are Published to // EventPublisher and returned to Subscribers. @@ -50,6 +47,10 @@ type Payload interface { // it is usually the normalized resource name (including the partition and // namespace if applicable). Subject() Subject + + // ToSubscriptionEvent is used to convert streaming events to their + // serializable equivalent. + ToSubscriptionEvent(idx uint64) *pbsubscribe.Event } // PayloadEvents is a Payload that may be returned by Subscription.Next when @@ -113,6 +114,26 @@ func (PayloadEvents) Subject() Subject { panic("PayloadEvents does not implement Subject") } +func (p PayloadEvents) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + return &pbsubscribe.Event{ + Index: idx, + Payload: &pbsubscribe.Event_EventBatch{ + EventBatch: &pbsubscribe.EventBatch{ + Events: batchEventsFromEventSlice(p.Items), + }, + }, + } +} + +func batchEventsFromEventSlice(events []Event) []*pbsubscribe.Event { + result := make([]*pbsubscribe.Event, len(events)) + for i := range events { + event := events[i] + result[i] = event.Payload.ToSubscriptionEvent(event.Index) + } + return result +} + // IsEndOfSnapshot returns true if this is a framing event that indicates the // snapshot has completed. Subsequent events from Subscription.Next will be // streamed as they occur. @@ -146,18 +167,42 @@ func (framingEvent) Subject() Subject { panic("framing events do not implement Subject") } +func (framingEvent) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + panic("framingEvent does not implement ToSubscriptionEvent") +} + type endOfSnapshot struct { framingEvent } +func (s endOfSnapshot) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + return &pbsubscribe.Event{ + Index: idx, + Payload: &pbsubscribe.Event_EndOfSnapshot{EndOfSnapshot: true}, + } +} + type newSnapshotToFollow struct { framingEvent } +func (s newSnapshotToFollow) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + return &pbsubscribe.Event{ + Index: idx, + Payload: &pbsubscribe.Event_NewSnapshotToFollow{NewSnapshotToFollow: true}, + } +} + type closeSubscriptionPayload struct { tokensSecretIDs []string } +// closeSubscriptionPayload is only used internally and does not correspond to +// a subscription event that would be sent to clients. +func (s closeSubscriptionPayload) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + panic("closeSubscriptionPayload does not implement ToSubscriptionEvent") +} + func (closeSubscriptionPayload) HasReadPermission(acl.Authorizer) bool { return false } diff --git a/agent/consul/stream/event_publisher_test.go b/agent/consul/stream/event_publisher_test.go index fbd253830d..6d930691d2 100644 --- a/agent/consul/stream/event_publisher_test.go +++ b/agent/consul/stream/event_publisher_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/proto/pbsubscribe" ) type intTopic int @@ -22,7 +23,7 @@ var testTopic Topic = intTopic(999) func TestEventPublisher_SubscribeWithIndex0(t *testing.T) { req := &SubscribeRequest{ Topic: testTopic, - Subject: stringer("sub-key"), + Subject: StringSubject("sub-key"), } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -82,7 +83,11 @@ func (p simplePayload) HasReadPermission(acl.Authorizer) bool { return !p.noReadPerm } -func (p simplePayload) Subject() Subject { return stringer(p.key) } +func (p simplePayload) Subject() Subject { return StringSubject(p.key) } + +func (p simplePayload) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event { + panic("simplePayload does not implement ToSubscriptionEvent") +} func registerTestSnapshotHandlers(t *testing.T, publisher *EventPublisher) { t.Helper() @@ -188,7 +193,7 @@ func consumeSub(ctx context.Context, sub *Subscription) error { func TestEventPublisher_SubscribeWithIndex0_FromCache(t *testing.T) { req := &SubscribeRequest{ Topic: testTopic, - Subject: stringer("sub-key"), + Subject: StringSubject("sub-key"), } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -234,7 +239,7 @@ func TestEventPublisher_SubscribeWithIndex0_FromCache(t *testing.T) { func TestEventPublisher_SubscribeWithIndexNotZero_CanResume(t *testing.T) { req := &SubscribeRequest{ Topic: testTopic, - Subject: stringer("sub-key"), + Subject: StringSubject("sub-key"), } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -288,7 +293,7 @@ func TestEventPublisher_SubscribeWithIndexNotZero_CanResume(t *testing.T) { func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshot(t *testing.T) { req := &SubscribeRequest{ Topic: testTopic, - Subject: stringer("sub-key"), + Subject: StringSubject("sub-key"), } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -345,7 +350,7 @@ func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshot(t *testing.T) { func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshotFromCache(t *testing.T) { req := &SubscribeRequest{ Topic: testTopic, - Subject: stringer("sub-key"), + Subject: StringSubject("sub-key"), } ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() @@ -414,7 +419,7 @@ func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshotFromCache(t *testin func TestEventPublisher_SubscribeWithIndexNotZero_NewSnapshot_WithCache(t *testing.T) { req := &SubscribeRequest{ Topic: testTopic, - Subject: stringer("sub-key"), + Subject: StringSubject("sub-key"), Index: 1, } @@ -499,7 +504,7 @@ func runStep(t *testing.T, name string, fn func(t *testing.T)) { func TestEventPublisher_Unsubscribe_ClosesSubscription(t *testing.T) { req := &SubscribeRequest{ Topic: testTopic, - Subject: stringer("sub-key"), + Subject: StringSubject("sub-key"), } ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -522,7 +527,7 @@ func TestEventPublisher_Unsubscribe_ClosesSubscription(t *testing.T) { func TestEventPublisher_Unsubscribe_FreesResourcesWhenThereAreNoSubscribers(t *testing.T) { req := &SubscribeRequest{ Topic: testTopic, - Subject: stringer("sub-key"), + Subject: StringSubject("sub-key"), } publisher := NewEventPublisher(time.Second) diff --git a/agent/consul/stream/string_types.go b/agent/consul/stream/string_types.go new file mode 100644 index 0000000000..568f972991 --- /dev/null +++ b/agent/consul/stream/string_types.go @@ -0,0 +1,11 @@ +package stream + +// StringSubject can be used as a Subject for Events sent to the EventPublisher +type StringSubject string + +func (s StringSubject) String() string { return string(s) } + +// StringTopic can be used as a Topic for Events sent to the EventPublisher +type StringTopic string + +func (s StringTopic) String() string { return string(s) } diff --git a/agent/consul/stream/subscription_test.go b/agent/consul/stream/subscription_test.go index b6e0f1a5fe..80aed3dbb3 100644 --- a/agent/consul/stream/subscription_test.go +++ b/agent/consul/stream/subscription_test.go @@ -29,7 +29,7 @@ func TestSubscription(t *testing.T) { req := SubscribeRequest{ Topic: testTopic, - Subject: stringer("test"), + Subject: StringSubject("test"), } sub := newSubscription(req, startHead, noopUnSub) @@ -103,7 +103,7 @@ func TestSubscription_Close(t *testing.T) { req := SubscribeRequest{ Topic: testTopic, - Subject: stringer("test"), + Subject: StringSubject("test"), } sub := newSubscription(req, startHead, noopUnSub) diff --git a/agent/consul/txn_endpoint_test.go b/agent/consul/txn_endpoint_test.go index 868ea3b813..7f8d09f32a 100644 --- a/agent/consul/txn_endpoint_test.go +++ b/agent/consul/txn_endpoint_test.go @@ -234,7 +234,7 @@ func TestTxn_Apply(t *testing.T) { t.Fatalf("bad: %v", d) } - _, n, err := state.GetNode("foo", nil) + _, n, err := state.GetNode("foo", nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -242,7 +242,7 @@ func TestTxn_Apply(t *testing.T) { t.Fatalf("bad: %v", err) } - _, s, err := state.NodeService("foo", "svc-foo", nil) + _, s, err := state.NodeService("foo", "svc-foo", nil, "") if err != nil { t.Fatalf("err: %v", err) } @@ -250,7 +250,7 @@ func TestTxn_Apply(t *testing.T) { t.Fatalf("bad: %v", err) } - _, c, err := state.NodeCheck("foo", types.CheckID("check-foo"), nil) + _, c, err := state.NodeCheck("foo", types.CheckID("check-foo"), nil, "") if err != nil { t.Fatalf("err: %v", err) } diff --git a/agent/grpc/private/services/subscribe/subscribe.go b/agent/grpc/private/services/subscribe/subscribe.go index c1b2f7e2d4..3abaa4b55b 100644 --- a/agent/grpc/private/services/subscribe/subscribe.go +++ b/agent/grpc/private/services/subscribe/subscribe.go @@ -2,7 +2,6 @@ package subscribe import ( "errors" - "fmt" "github.com/hashicorp/go-hclog" "google.golang.org/grpc" @@ -13,7 +12,6 @@ import ( "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/proto/pbservice" "github.com/hashicorp/consul/proto/pbsubscribe" ) @@ -61,7 +59,7 @@ func (h *Server) Subscribe(req *pbsubscribe.SubscribeRequest, serverStream pbsub return status.Error(codes.InvalidArgument, "Key is required") } - sub, err := h.Backend.Subscribe(toStreamSubscribeRequest(req, entMeta)) + sub, err := h.Backend.Subscribe(state.PBToStreamSubscribeRequest(req, entMeta)) if err != nil { return err } @@ -84,25 +82,15 @@ func (h *Server) Subscribe(req *pbsubscribe.SubscribeRequest, serverStream pbsub } elog.Trace(event) - e := newEventFromStreamEvent(event) + + // TODO: This conversion could be cached if needed + e := event.Payload.ToSubscriptionEvent(event.Index) if err := serverStream.Send(e); err != nil { return err } } } -func toStreamSubscribeRequest(req *pbsubscribe.SubscribeRequest, entMeta acl.EnterpriseMeta) *stream.SubscribeRequest { - return &stream.SubscribeRequest{ - Topic: req.Topic, - Subject: state.EventSubjectService{ - Key: req.Key, - EnterpriseMeta: entMeta, - }, - Token: req.Token, - Index: req.Index, - } -} - func forwardToDC( req *pbsubscribe.SubscribeRequest, serverStream pbsubscribe.StateChangeSubscription_SubscribeServer, @@ -129,48 +117,3 @@ func forwardToDC( } } } - -func newEventFromStreamEvent(event stream.Event) *pbsubscribe.Event { - e := &pbsubscribe.Event{Index: event.Index} - switch { - case event.IsEndOfSnapshot(): - e.Payload = &pbsubscribe.Event_EndOfSnapshot{EndOfSnapshot: true} - return e - case event.IsNewSnapshotToFollow(): - e.Payload = &pbsubscribe.Event_NewSnapshotToFollow{NewSnapshotToFollow: true} - return e - } - setPayload(e, event.Payload) - return e -} - -func setPayload(e *pbsubscribe.Event, payload stream.Payload) { - switch p := payload.(type) { - case *stream.PayloadEvents: - e.Payload = &pbsubscribe.Event_EventBatch{ - EventBatch: &pbsubscribe.EventBatch{ - Events: batchEventsFromEventSlice(p.Items), - }, - } - case state.EventPayloadCheckServiceNode: - e.Payload = &pbsubscribe.Event_ServiceHealth{ - ServiceHealth: &pbsubscribe.ServiceHealthUpdate{ - Op: p.Op, - // TODO: this could be cached - CheckServiceNode: pbservice.NewCheckServiceNodeFromStructs(p.Value), - }, - } - default: - panic(fmt.Sprintf("unexpected payload: %T: %#v", p, p)) - } -} - -func batchEventsFromEventSlice(events []stream.Event) []*pbsubscribe.Event { - result := make([]*pbsubscribe.Event, len(events)) - for i := range events { - event := events[i] - result[i] = &pbsubscribe.Event{Index: event.Index} - setPayload(result[i], event.Payload) - } - return result -} diff --git a/agent/grpc/private/services/subscribe/subscribe_test.go b/agent/grpc/private/services/subscribe/subscribe_test.go index c319590575..c9afbe4952 100644 --- a/agent/grpc/private/services/subscribe/subscribe_test.go +++ b/agent/grpc/private/services/subscribe/subscribe_test.go @@ -956,7 +956,7 @@ func TestNewEventFromSteamEvent(t *testing.T) { fn := func(t *testing.T, tc testCase) { expected := tc.expected - actual := newEventFromStreamEvent(tc.event) + actual := tc.event.Payload.ToSubscriptionEvent(tc.event.Index) prototest.AssertDeepEqual(t, expected, actual, cmpopts.EquateEmpty()) } diff --git a/agent/grpc/public/forward.go b/agent/grpc/public/forward.go new file mode 100644 index 0000000000..398d33d516 --- /dev/null +++ b/agent/grpc/public/forward.go @@ -0,0 +1,16 @@ +package public + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +func ForwardMetadataContext(ctx context.Context) context.Context { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return ctx + } + + return metadata.NewOutgoingContext(ctx, md) +} diff --git a/agent/grpc/public/services/connectca/mock_ACLResolver.go b/agent/grpc/public/services/connectca/mock_ACLResolver.go index ce21ffdebd..a1ff427964 100644 --- a/agent/grpc/public/services/connectca/mock_ACLResolver.go +++ b/agent/grpc/public/services/connectca/mock_ACLResolver.go @@ -1,10 +1,12 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. +// Code generated by mockery v2.11.0. DO NOT EDIT. package connectca import ( acl "github.com/hashicorp/consul/acl" mock "github.com/stretchr/testify/mock" + + testing "testing" ) // MockACLResolver is an autogenerated mock type for the ACLResolver type @@ -34,3 +36,12 @@ func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(token string, entMeta *acl return r0, r1 } + +// NewMockACLResolver creates a new instance of MockACLResolver. It also registers a cleanup function to assert the mocks expectations. +func NewMockACLResolver(t testing.TB) *MockACLResolver { + mock := &MockACLResolver{} + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/agent/grpc/public/services/connectca/mock_CAManager.go b/agent/grpc/public/services/connectca/mock_CAManager.go index 1034c4b97d..8839344a2d 100644 --- a/agent/grpc/public/services/connectca/mock_CAManager.go +++ b/agent/grpc/public/services/connectca/mock_CAManager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. +// Code generated by mockery v2.11.0. DO NOT EDIT. package connectca @@ -8,6 +8,8 @@ import ( structs "github.com/hashicorp/consul/agent/structs" + testing "testing" + x509 "crypto/x509" ) @@ -38,3 +40,12 @@ func (_m *MockCAManager) AuthorizeAndSignCertificate(csr *x509.CertificateReques return r0, r1 } + +// NewMockCAManager creates a new instance of MockCAManager. It also registers a cleanup function to assert the mocks expectations. +func NewMockCAManager(t testing.TB) *MockCAManager { + mock := &MockCAManager{} + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/agent/grpc/public/services/connectca/server.go b/agent/grpc/public/services/connectca/server.go index 1407e42d61..f9abd49ee4 100644 --- a/agent/grpc/public/services/connectca/server.go +++ b/agent/grpc/public/services/connectca/server.go @@ -39,12 +39,12 @@ type StateStore interface { AbandonCh() <-chan struct{} } -//go:generate mockery -name ACLResolver -inpkg +//go:generate mockery --name ACLResolver --inpackage type ACLResolver interface { ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (acl.Authorizer, error) } -//go:generate mockery -name CAManager -inpkg +//go:generate mockery --name CAManager --inpackage type CAManager interface { AuthorizeAndSignCertificate(csr *x509.CertificateRequest, authz acl.Authorizer) (*structs.IssuedCert, error) } diff --git a/agent/grpc/public/services/connectca/server_test.go b/agent/grpc/public/services/connectca/server_test.go index def654bf86..b382f88239 100644 --- a/agent/grpc/public/services/connectca/server_test.go +++ b/agent/grpc/public/services/connectca/server_test.go @@ -2,17 +2,15 @@ package connectca import ( "context" - "net" - "sync" "testing" - "time" "github.com/stretchr/testify/require" "google.golang.org/grpc" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/stream" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/grpc/public/testutils" + structs "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto-public/pbconnectca" ) @@ -20,68 +18,26 @@ func noopForwardRPC(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error) return false, nil } -func testStateStore(t *testing.T, publisher state.EventPublisher) *state.Store { +func setupFSMAndPublisher(t *testing.T) (*testutils.FakeFSM, state.EventPublisher) { t.Helper() - gc, err := state.NewTombstoneGC(time.Second, time.Millisecond) - require.NoError(t, err) + config := testutils.FakeFSMConfig{ + Register: func(fsm *testutils.FakeFSM, publisher *stream.EventPublisher) { + // register handlers + publisher.RegisterHandler(state.EventTopicCARoots, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { + return fsm.GetStore().CARootsSnapshot(req, buf) + }) + }, + Refresh: []stream.Topic{state.EventTopicCARoots}, + } - return state.NewStateStoreWithEventPublisher(gc, publisher) -} - -type FakeFSM struct { - lock sync.Mutex - store *state.Store - publisher *stream.EventPublisher -} - -func newFakeFSM(t *testing.T, publisher *stream.EventPublisher) *FakeFSM { - t.Helper() - - store := testStateStore(t, publisher) - - fsm := FakeFSM{store: store, publisher: publisher} - - // register handlers - publisher.RegisterHandler(state.EventTopicCARoots, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { - return fsm.GetStore().CARootsSnapshot(req, buf) - }) - - return &fsm -} - -func (f *FakeFSM) GetStore() *state.Store { - f.lock.Lock() - defer f.lock.Unlock() - return f.store -} - -func (f *FakeFSM) ReplaceStore(store *state.Store) { - f.lock.Lock() - defer f.lock.Unlock() - oldStore := f.store - f.store = store - oldStore.Abandon() - f.publisher.RefreshTopic(state.EventTopicCARoots) -} - -func setupFSMAndPublisher(t *testing.T) (*FakeFSM, state.EventPublisher) { - t.Helper() - publisher := stream.NewEventPublisher(10 * time.Second) - - fsm := newFakeFSM(t, publisher) - - ctx, cancel := context.WithCancel(context.Background()) - t.Cleanup(cancel) - go publisher.Run(ctx) - - return fsm, publisher + return testutils.SetupFSMAndPublisher(t, config) } func testClient(t *testing.T, server *Server) pbconnectca.ConnectCAServiceClient { t.Helper() - addr := runTestServer(t, server) + addr := testutils.RunTestServer(t, server) conn, err := grpc.DialContext(context.Background(), addr.String(), grpc.WithInsecure()) require.NoError(t, err) @@ -91,18 +47,3 @@ func testClient(t *testing.T, server *Server) pbconnectca.ConnectCAServiceClient return pbconnectca.NewConnectCAServiceClient(conn) } - -func runTestServer(t *testing.T, server *Server) net.Addr { - t.Helper() - - lis, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - - grpcServer := grpc.NewServer() - server.Register(grpcServer) - - go grpcServer.Serve(lis) - t.Cleanup(grpcServer.Stop) - - return lis.Addr() -} diff --git a/agent/grpc/public/services/connectca/sign.go b/agent/grpc/public/services/connectca/sign.go index d6a21d6169..b3ace6d3d0 100644 --- a/agent/grpc/public/services/connectca/sign.go +++ b/agent/grpc/public/services/connectca/sign.go @@ -22,7 +22,7 @@ func (s *Server) Sign(ctx context.Context, req *pbconnectca.SignRequest) (*pbcon return nil, err } - logger := s.Logger.Named("sign").With("request_id", traceID()) + logger := s.Logger.Named("sign").With("request_id", public.TraceID()) logger.Trace("request received") token := public.TokenFromContext(ctx) @@ -48,6 +48,7 @@ func (s *Server) Sign(ctx context.Context, req *pbconnectca.SignRequest) (*pbcon var rsp *pbconnectca.SignResponse handled, err := s.ForwardRPC(&rpcInfo, func(conn *grpc.ClientConn) error { logger.Trace("forwarding RPC") + ctx := public.ForwardMetadataContext(ctx) var err error rsp, err = pbconnectca.NewConnectCAServiceClient(conn).Sign(ctx, req) return err diff --git a/agent/grpc/public/services/connectca/sign_test.go b/agent/grpc/public/services/connectca/sign_test.go index 600b1056c4..a4f891b8ca 100644 --- a/agent/grpc/public/services/connectca/sign_test.go +++ b/agent/grpc/public/services/connectca/sign_test.go @@ -15,6 +15,7 @@ import ( acl "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/grpc/public/testutils" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto-public/pbconnectca" ) @@ -231,7 +232,7 @@ func TestSign_RPCForwarding(t *testing.T) { ForwardRPC: noopForwardRPC, ConnectEnabled: true, }) - leaderConn, err := grpc.Dial(runTestServer(t, leader).String(), grpc.WithInsecure()) + leaderConn, err := grpc.Dial(testutils.RunTestServer(t, leader).String(), grpc.WithInsecure()) require.NoError(t, err) follower := NewServer(Config{ diff --git a/agent/grpc/public/services/connectca/watch_roots.go b/agent/grpc/public/services/connectca/watch_roots.go index 1d458b5586..cee37d7aa8 100644 --- a/agent/grpc/public/services/connectca/watch_roots.go +++ b/agent/grpc/public/services/connectca/watch_roots.go @@ -11,7 +11,6 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-uuid" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" @@ -30,7 +29,7 @@ func (s *Server) WatchRoots(_ *emptypb.Empty, serverStream pbconnectca.ConnectCA return err } - logger := s.Logger.Named("watch-roots").With("stream_id", traceID()) + logger := s.Logger.Named("watch-roots").With("request_id", public.TraceID()) logger.Trace("starting stream") defer logger.Trace("stream closed") @@ -181,16 +180,6 @@ func (s *Server) authorize(token string) error { return nil } -// We tag logs with a unique identifier to ease debugging. In the future this -// should probably be a real Open Telemetry trace ID. -func traceID() string { - id, err := uuid.GenerateUUID() - if err != nil { - return "" - } - return id -} - func getTrustDomain(store StateStore, logger hclog.Logger) (string, error) { _, cfg, err := store.CAConfig(nil) switch { diff --git a/agent/grpc/public/services/connectca/watch_roots_test.go b/agent/grpc/public/services/connectca/watch_roots_test.go index 1106aa35d8..acaa349f17 100644 --- a/agent/grpc/public/services/connectca/watch_roots_test.go +++ b/agent/grpc/public/services/connectca/watch_roots_test.go @@ -54,7 +54,7 @@ func TestWatchRoots_Success(t *testing.T) { // Mock the ACL Resolver to return an authorizer with `service:write`. aclResolver := &MockACLResolver{} aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). - Return(testutils.TestAuthorizer(t), nil) + Return(testutils.TestAuthorizerServiceWriteAny(t), nil) ctx := public.ContextWithToken(context.Background(), testACLToken) @@ -140,7 +140,7 @@ func TestWatchRoots_ACLTokenInvalidated(t *testing.T) { // first two times it is called (initial connect and first re-auth). aclResolver := &MockACLResolver{} aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). - Return(testutils.TestAuthorizer(t), nil).Twice() + Return(testutils.TestAuthorizerServiceWriteAny(t), nil).Twice() ctx := public.ContextWithToken(context.Background(), testACLToken) @@ -208,7 +208,7 @@ func TestWatchRoots_StateStoreAbandoned(t *testing.T) { // Mock the ACL Resolver to return an authorizer with `service:write`. aclResolver := &MockACLResolver{} aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). - Return(testutils.TestAuthorizer(t), nil) + Return(testutils.TestAuthorizerServiceWriteAny(t), nil) ctx := public.ContextWithToken(context.Background(), testACLToken) @@ -230,7 +230,7 @@ func TestWatchRoots_StateStoreAbandoned(t *testing.T) { mustGetRoots(t, rspCh) // Simulate a snapshot restore. - storeB := testStateStore(t, publisher) + storeB := testutils.TestStateStore(t, publisher) rootB := connect.TestCA(t, nil) _, err = storeB.CARootSetCAS(1, 0, structs.CARoots{rootB}) diff --git a/agent/grpc/public/services/dataplane/get_envoy_boostrap_params_test.go b/agent/grpc/public/services/dataplane/get_envoy_boostrap_params_test.go new file mode 100644 index 0000000000..072068861b --- /dev/null +++ b/agent/grpc/public/services/dataplane/get_envoy_boostrap_params_test.go @@ -0,0 +1,260 @@ +package dataplane + +import ( + "context" + "testing" + + acl "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/grpc/public" + "github.com/hashicorp/consul/agent/grpc/public/testutils" + structs "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto-public/pbdataplane" + "github.com/hashicorp/consul/types" + "github.com/hashicorp/go-hclog" + mock "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/structpb" +) + +const ( + testToken = "acl-token-get-envoy-bootstrap-params" + proxyServiceID = "web-proxy" + nodeName = "foo" + nodeID = "2980b72b-bd9d-9d7b-d4f9-951bf7508d95" + proxyConfigKey = "envoy_dogstatsd_url" + proxyConfigValue = "udp://127.0.0.1:8125" + serverDC = "dc1" +) + +func testRegisterRequestProxy(t *testing.T) *structs.RegisterRequest { + return &structs.RegisterRequest{ + Datacenter: serverDC, + Node: nodeName, + ID: types.NodeID(nodeID), + Address: "127.0.0.1", + Service: &structs.NodeService{ + Kind: structs.ServiceKindConnectProxy, + Service: proxyServiceID, + ID: proxyServiceID, + Address: "127.0.0.2", + Port: 2222, + Proxy: structs.ConnectProxyConfig{ + DestinationServiceName: "web", + Config: map[string]interface{}{ + proxyConfigKey: proxyConfigValue, + }, + }, + EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(), + }, + } +} + +func testRegisterIngressGateway(t *testing.T) *structs.RegisterRequest { + registerReq := structs.TestRegisterIngressGateway(t) + registerReq.ID = types.NodeID("2980b72b-bd9d-9d7b-d4f9-951bf7508d95") + registerReq.Service.ID = registerReq.Service.Service + registerReq.Service.Proxy.Config = map[string]interface{}{ + proxyConfigKey: proxyConfigValue, + } + return registerReq +} + +func TestGetEnvoyBootstrapParams_Success(t *testing.T) { + type testCase struct { + name string + registerReq *structs.RegisterRequest + nodeID bool + } + + run := func(t *testing.T, tc testCase) { + store := testutils.TestStateStore(t, nil) + err := store.EnsureRegistration(1, tc.registerReq) + require.NoError(t, err) + + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything). + Return(testutils.TestAuthorizerServiceRead(t, tc.registerReq.Service.ID), nil) + ctx := public.ContextWithToken(context.Background(), testToken) + + server := NewServer(Config{ + GetStore: func() StateStore { return store }, + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + Datacenter: serverDC, + }) + client := testClient(t, server) + + req := &pbdataplane.GetEnvoyBootstrapParamsRequest{ + ServiceId: tc.registerReq.Service.ID, + NodeSpec: &pbdataplane.GetEnvoyBootstrapParamsRequest_NodeName{NodeName: tc.registerReq.Node}} + if tc.nodeID { + req.NodeSpec = &pbdataplane.GetEnvoyBootstrapParamsRequest_NodeId{NodeId: string(tc.registerReq.ID)} + } + resp, err := client.GetEnvoyBootstrapParams(ctx, req) + require.NoError(t, err) + + require.Equal(t, tc.registerReq.Service.Proxy.DestinationServiceName, resp.Service) + require.Equal(t, serverDC, resp.Datacenter) + require.Equal(t, tc.registerReq.EnterpriseMeta.PartitionOrDefault(), resp.Partition) + require.Equal(t, tc.registerReq.EnterpriseMeta.NamespaceOrDefault(), resp.Namespace) + require.Contains(t, resp.Config.Fields, proxyConfigKey) + require.Equal(t, structpb.NewStringValue(proxyConfigValue), resp.Config.Fields[proxyConfigKey]) + require.Equal(t, convertToResponseServiceKind(tc.registerReq.Service.Kind), resp.ServiceKind) + + } + + testCases := []testCase{ + { + name: "lookup service side car proxy by node name", + registerReq: testRegisterRequestProxy(t), + }, + { + name: "lookup service side car proxy by node ID", + registerReq: testRegisterRequestProxy(t), + nodeID: true, + }, + { + name: "lookup ingress gw service by node name", + registerReq: testRegisterIngressGateway(t), + }, + { + name: "lookup ingress gw service by node ID", + registerReq: testRegisterIngressGateway(t), + nodeID: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestGetEnvoyBootstrapParams_Error(t *testing.T) { + type testCase struct { + name string + req *pbdataplane.GetEnvoyBootstrapParamsRequest + expectedErrCode codes.Code + expecteErrMsg string + } + + run := func(t *testing.T, tc testCase) { + aclResolver := &MockACLResolver{} + + aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything). + Return(testutils.TestAuthorizerServiceRead(t, proxyServiceID), nil) + ctx := public.ContextWithToken(context.Background(), testToken) + + store := testutils.TestStateStore(t, nil) + registerReq := testRegisterRequestProxy(t) + err := store.EnsureRegistration(1, registerReq) + require.NoError(t, err) + + server := NewServer(Config{ + GetStore: func() StateStore { return store }, + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + }) + client := testClient(t, server) + + resp, err := client.GetEnvoyBootstrapParams(ctx, tc.req) + require.Nil(t, resp) + require.Error(t, err) + errStatus, ok := status.FromError(err) + require.True(t, ok) + require.Equal(t, tc.expectedErrCode.String(), errStatus.Code().String()) + require.Equal(t, tc.expecteErrMsg, errStatus.Message()) + } + + testCases := []testCase{ + { + name: "lookup-service-by-unregistered-node-name", + req: &pbdataplane.GetEnvoyBootstrapParamsRequest{ + ServiceId: proxyServiceID, + NodeSpec: &pbdataplane.GetEnvoyBootstrapParamsRequest_NodeName{NodeName: "blah"}}, + expectedErrCode: codes.NotFound, + expecteErrMsg: "node not found", + }, + { + name: "lookup-service-by-unregistered-node-id", + req: &pbdataplane.GetEnvoyBootstrapParamsRequest{ + ServiceId: proxyServiceID, + NodeSpec: &pbdataplane.GetEnvoyBootstrapParamsRequest_NodeId{NodeId: "5980b72b-bd9d-9d7b-d4f9-951bf7508d98"}}, + expectedErrCode: codes.NotFound, + expecteErrMsg: "node not found", + }, + { + name: "lookup-service-by-unregistered-service", + req: &pbdataplane.GetEnvoyBootstrapParamsRequest{ + ServiceId: "blah-service", + NodeSpec: &pbdataplane.GetEnvoyBootstrapParamsRequest_NodeName{NodeName: nodeName}}, + expectedErrCode: codes.NotFound, + expecteErrMsg: "Service not found", + }, + { + name: "lookup-service-without-node-details", + req: &pbdataplane.GetEnvoyBootstrapParamsRequest{ + ServiceId: proxyServiceID}, + expectedErrCode: codes.InvalidArgument, + expecteErrMsg: "Node ID or name required to lookup the service", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } + +} + +func TestGetEnvoyBootstrapParams_Unauthenticated(t *testing.T) { + // Mock the ACL resolver to return ErrNotFound. + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). + Return(nil, acl.ErrNotFound) + ctx := public.ContextWithToken(context.Background(), testToken) + store := testutils.TestStateStore(t, nil) + server := NewServer(Config{ + GetStore: func() StateStore { return store }, + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + }) + client := testClient(t, server) + resp, err := client.GetEnvoyBootstrapParams(ctx, &pbdataplane.GetEnvoyBootstrapParamsRequest{}) + require.Error(t, err) + require.Equal(t, codes.Unauthenticated.String(), status.Code(err).String()) + require.Nil(t, resp) +} + +func TestGetEnvoyBootstrapParams_PermissionDenied(t *testing.T) { + // Mock the ACL resolver to return a deny all authorizer + aclResolver := &MockACLResolver{} + aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything). + Return(acl.DenyAll(), nil) + ctx := public.ContextWithToken(context.Background(), testToken) + store := testutils.TestStateStore(t, nil) + registerReq := structs.TestRegisterRequestProxy(t) + proxyServiceID := "web-sidecar-proxy" + registerReq.Service.ID = proxyServiceID + err := store.EnsureRegistration(1, registerReq) + require.NoError(t, err) + + server := NewServer(Config{ + GetStore: func() StateStore { return store }, + Logger: hclog.NewNullLogger(), + ACLResolver: aclResolver, + }) + client := testClient(t, server) + req := &pbdataplane.GetEnvoyBootstrapParamsRequest{ + ServiceId: proxyServiceID, + NodeSpec: &pbdataplane.GetEnvoyBootstrapParamsRequest_NodeName{NodeName: registerReq.Node}} + + resp, err := client.GetEnvoyBootstrapParams(ctx, req) + require.Error(t, err) + require.Equal(t, codes.PermissionDenied.String(), status.Code(err).String()) + require.Nil(t, resp) +} diff --git a/agent/grpc/public/services/dataplane/get_envoy_bootstrap_params.go b/agent/grpc/public/services/dataplane/get_envoy_bootstrap_params.go new file mode 100644 index 0000000000..c34289ff02 --- /dev/null +++ b/agent/grpc/public/services/dataplane/get_envoy_bootstrap_params.go @@ -0,0 +1,88 @@ +package dataplane + +import ( + "context" + "errors" + "strings" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/structpb" + + acl "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/grpc/public" + structs "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto-public/pbdataplane" +) + +func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.GetEnvoyBootstrapParamsRequest) (*pbdataplane.GetEnvoyBootstrapParamsResponse, error) { + logger := s.Logger.Named("get-envoy-bootstrap-params").With("service_id", req.GetServiceId(), "request_id", public.TraceID()) + + logger.Trace("Started processing request") + defer logger.Trace("Finished processing request") + + token := public.TokenFromContext(ctx) + var authzContext acl.AuthorizerContext + entMeta := acl.NewEnterpriseMetaWithPartition(req.GetPartition(), req.GetNamespace()) + authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(token, &entMeta, &authzContext) + if err != nil { + return nil, status.Error(codes.Unauthenticated, err.Error()) + } + + store := s.GetStore() + + _, svc, err := store.ServiceNode(req.GetNodeId(), req.GetNodeName(), req.GetServiceId(), &entMeta, structs.DefaultPeerKeyword) + if err != nil { + logger.Error("Error looking up service", "error", err) + if errors.Is(err, state.ErrNodeNotFound) { + return nil, status.Error(codes.NotFound, err.Error()) + } else if strings.Contains(err.Error(), "Node ID or name required") { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } else { + return nil, status.Error(codes.Internal, "Failure looking up service") + } + } + if svc == nil { + return nil, status.Error(codes.NotFound, "Service not found") + } + + if err := authz.ToAllowAuthorizer().ServiceReadAllowed(svc.ServiceName, &authzContext); err != nil { + return nil, status.Error(codes.PermissionDenied, err.Error()) + } + + // Build out the response + + resp := &pbdataplane.GetEnvoyBootstrapParamsResponse{ + Service: svc.ServiceProxy.DestinationServiceName, + Partition: svc.EnterpriseMeta.PartitionOrDefault(), + Namespace: svc.EnterpriseMeta.NamespaceOrDefault(), + Datacenter: s.Datacenter, + ServiceKind: convertToResponseServiceKind(svc.ServiceKind), + } + + bootstrapConfig, err := structpb.NewStruct(svc.ServiceProxy.Config) + if err != nil { + logger.Error("Error creating the envoy boostrap params config", "error", err) + return nil, status.Error(codes.Unknown, "Error creating the envoy boostrap params config") + } + resp.Config = bootstrapConfig + + return resp, nil +} + +func convertToResponseServiceKind(serviceKind structs.ServiceKind) (respKind pbdataplane.ServiceKind) { + switch serviceKind { + case structs.ServiceKindConnectProxy: + respKind = pbdataplane.ServiceKind_CONNECT_PROXY + case structs.ServiceKindMeshGateway: + respKind = pbdataplane.ServiceKind_MESH_GATEWAY + case structs.ServiceKindTerminatingGateway: + respKind = pbdataplane.ServiceKind_TERMINATING_GATEWAY + case structs.ServiceKindIngressGateway: + respKind = pbdataplane.ServiceKind_INGRESS_GATEWAY + case structs.ServiceKindTypical: + respKind = pbdataplane.ServiceKind_TYPICAL + } + return +} diff --git a/agent/grpc/public/services/dataplane/get_supported_features.go b/agent/grpc/public/services/dataplane/get_supported_features.go index f9a8171901..ffb3517e84 100644 --- a/agent/grpc/public/services/dataplane/get_supported_features.go +++ b/agent/grpc/public/services/dataplane/get_supported_features.go @@ -12,14 +12,17 @@ import ( "github.com/hashicorp/consul/proto-public/pbdataplane" ) -func (d *Server) SupportedDataplaneFeatures(ctx context.Context, req *pbdataplane.SupportedDataplaneFeaturesRequest) (*pbdataplane.SupportedDataplaneFeaturesResponse, error) { - d.Logger.Trace("Received request for supported dataplane features") +func (s *Server) GetSupportedDataplaneFeatures(ctx context.Context, req *pbdataplane.GetSupportedDataplaneFeaturesRequest) (*pbdataplane.GetSupportedDataplaneFeaturesResponse, error) { + logger := s.Logger.Named("get-supported-dataplane-features").With("request_id", public.TraceID()) + + logger.Trace("Started processing request") + defer logger.Trace("Finished processing request") // Require the given ACL token to have `service:write` on any service token := public.TokenFromContext(ctx) var authzContext acl.AuthorizerContext entMeta := structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier) - authz, err := d.ACLResolver.ResolveTokenAndDefaultMeta(token, entMeta, &authzContext) + authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(token, entMeta, &authzContext) if err != nil { return nil, status.Error(codes.Unauthenticated, err.Error()) } @@ -42,5 +45,5 @@ func (d *Server) SupportedDataplaneFeatures(ctx context.Context, req *pbdataplan }, } - return &pbdataplane.SupportedDataplaneFeaturesResponse{SupportedDataplaneFeatures: supportedFeatures}, nil + return &pbdataplane.GetSupportedDataplaneFeaturesResponse{SupportedDataplaneFeatures: supportedFeatures}, nil } diff --git a/agent/grpc/public/services/dataplane/get_supported_features_test.go b/agent/grpc/public/services/dataplane/get_supported_features_test.go index 36ac7400ca..b1f28af0e3 100644 --- a/agent/grpc/public/services/dataplane/get_supported_features_test.go +++ b/agent/grpc/public/services/dataplane/get_supported_features_test.go @@ -22,15 +22,14 @@ func TestSupportedDataplaneFeatures_Success(t *testing.T) { // Mock the ACL Resolver to return an authorizer with `service:write`. aclResolver := &MockACLResolver{} aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). - Return(testutils.TestAuthorizer(t), nil) + Return(testutils.TestAuthorizerServiceWriteAny(t), nil) ctx := public.ContextWithToken(context.Background(), testACLToken) server := NewServer(Config{ Logger: hclog.NewNullLogger(), ACLResolver: aclResolver, }) - client := testClient(t, server) - resp, err := client.SupportedDataplaneFeatures(ctx, &pbdataplane.SupportedDataplaneFeaturesRequest{}) + resp, err := client.GetSupportedDataplaneFeatures(ctx, &pbdataplane.GetSupportedDataplaneFeaturesRequest{}) require.NoError(t, err) require.Equal(t, 3, len(resp.SupportedDataplaneFeatures)) @@ -59,14 +58,14 @@ func TestSupportedDataplaneFeatures_Unauthenticated(t *testing.T) { ACLResolver: aclResolver, }) client := testClient(t, server) - resp, err := client.SupportedDataplaneFeatures(ctx, &pbdataplane.SupportedDataplaneFeaturesRequest{}) + resp, err := client.GetSupportedDataplaneFeatures(ctx, &pbdataplane.GetSupportedDataplaneFeaturesRequest{}) require.Error(t, err) require.Equal(t, codes.Unauthenticated.String(), status.Code(err).String()) require.Nil(t, resp) } func TestSupportedDataplaneFeatures_PermissionDenied(t *testing.T) { - // Mock the ACL resolver to return ErrNotFound. + // Mock the ACL resolver to return a deny all authorizer aclResolver := &MockACLResolver{} aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). Return(acl.DenyAll(), nil) @@ -76,7 +75,7 @@ func TestSupportedDataplaneFeatures_PermissionDenied(t *testing.T) { ACLResolver: aclResolver, }) client := testClient(t, server) - resp, err := client.SupportedDataplaneFeatures(ctx, &pbdataplane.SupportedDataplaneFeaturesRequest{}) + resp, err := client.GetSupportedDataplaneFeatures(ctx, &pbdataplane.GetSupportedDataplaneFeaturesRequest{}) require.Error(t, err) require.Equal(t, codes.PermissionDenied.String(), status.Code(err).String()) require.Nil(t, resp) diff --git a/agent/grpc/public/services/dataplane/mock_ACLResolver.go b/agent/grpc/public/services/dataplane/mock_ACLResolver.go index 39d4b54770..1a73abfc81 100644 --- a/agent/grpc/public/services/dataplane/mock_ACLResolver.go +++ b/agent/grpc/public/services/dataplane/mock_ACLResolver.go @@ -1,11 +1,12 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. +// Code generated by mockery v2.11.0. DO NOT EDIT. package dataplane import ( + acl "github.com/hashicorp/consul/acl" mock "github.com/stretchr/testify/mock" - acl "github.com/hashicorp/consul/acl" + testing "testing" ) // MockACLResolver is an autogenerated mock type for the ACLResolver type @@ -35,3 +36,12 @@ func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(_a0 string, _a1 *acl.Enter return r0, r1 } + +// NewMockACLResolver creates a new instance of MockACLResolver. It also registers a cleanup function to assert the mocks expectations. +func NewMockACLResolver(t testing.TB) *MockACLResolver { + mock := &MockACLResolver{} + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/agent/grpc/public/services/dataplane/server.go b/agent/grpc/public/services/dataplane/server.go index 6c05a0d08f..b45f6f38ac 100644 --- a/agent/grpc/public/services/dataplane/server.go +++ b/agent/grpc/public/services/dataplane/server.go @@ -1,10 +1,12 @@ package dataplane import ( - "github.com/hashicorp/go-hclog" "google.golang.org/grpc" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/proto-public/pbdataplane" ) @@ -13,11 +15,18 @@ type Server struct { } type Config struct { + GetStore func() StateStore Logger hclog.Logger ACLResolver ACLResolver + // Datacenter of the Consul server this gRPC server is hosted on + Datacenter string } -//go:generate mockery -name ACLResolver -inpkg +type StateStore interface { + ServiceNode(string, string, string, *acl.EnterpriseMeta, string) (uint64, *structs.ServiceNode, error) +} + +//go:generate mockery --name ACLResolver --inpackage type ACLResolver interface { ResolveTokenAndDefaultMeta(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) (acl.Authorizer, error) } diff --git a/agent/grpc/public/services/dataplane/server_test.go b/agent/grpc/public/services/dataplane/server_test.go index 5a9186c5ae..fa0a24b91f 100644 --- a/agent/grpc/public/services/dataplane/server_test.go +++ b/agent/grpc/public/services/dataplane/server_test.go @@ -2,9 +2,9 @@ package dataplane import ( "context" - "net" "testing" + "github.com/hashicorp/consul/agent/grpc/public/testutils" "github.com/hashicorp/consul/proto-public/pbdataplane" "github.com/stretchr/testify/require" "google.golang.org/grpc" @@ -13,7 +13,7 @@ import ( func testClient(t *testing.T, server *Server) pbdataplane.DataplaneServiceClient { t.Helper() - addr := RunTestServer(t, server) + addr := testutils.RunTestServer(t, server) conn, err := grpc.DialContext(context.Background(), addr.String(), grpc.WithInsecure()) require.NoError(t, err) @@ -23,18 +23,3 @@ func testClient(t *testing.T, server *Server) pbdataplane.DataplaneServiceClient return pbdataplane.NewDataplaneServiceClient(conn) } - -func RunTestServer(t *testing.T, server *Server) net.Addr { - t.Helper() - - lis, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - - grpcServer := grpc.NewServer() - server.Register(grpcServer) - - go grpcServer.Serve(lis) - t.Cleanup(grpcServer.Stop) - - return lis.Addr() -} diff --git a/agent/grpc/public/services/serverdiscovery/mock_ACLResolver.go b/agent/grpc/public/services/serverdiscovery/mock_ACLResolver.go new file mode 100644 index 0000000000..909e9c6172 --- /dev/null +++ b/agent/grpc/public/services/serverdiscovery/mock_ACLResolver.go @@ -0,0 +1,36 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package serverdiscovery + +import ( + acl "github.com/hashicorp/consul/acl" + mock "github.com/stretchr/testify/mock" +) + +// MockACLResolver is an autogenerated mock type for the ACLResolver type +type MockACLResolver struct { + mock.Mock +} + +// ResolveTokenAndDefaultMeta provides a mock function with given fields: _a0, _a1, _a2 +func (_m *MockACLResolver) ResolveTokenAndDefaultMeta(_a0 string, _a1 *acl.EnterpriseMeta, _a2 *acl.AuthorizerContext) (acl.Authorizer, error) { + ret := _m.Called(_a0, _a1, _a2) + + var r0 acl.Authorizer + if rf, ok := ret.Get(0).(func(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) acl.Authorizer); ok { + r0 = rf(_a0, _a1, _a2) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(acl.Authorizer) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) error); ok { + r1 = rf(_a0, _a1, _a2) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/agent/grpc/public/services/serverdiscovery/server.go b/agent/grpc/public/services/serverdiscovery/server.go new file mode 100644 index 0000000000..ec82b47fa3 --- /dev/null +++ b/agent/grpc/public/services/serverdiscovery/server.go @@ -0,0 +1,38 @@ +package serverdiscovery + +import ( + "google.golang.org/grpc" + + "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/proto-public/pbserverdiscovery" +) + +type Server struct { + Config +} + +type Config struct { + Publisher EventPublisher + Logger hclog.Logger + ACLResolver ACLResolver +} + +type EventPublisher interface { + Subscribe(*stream.SubscribeRequest) (*stream.Subscription, error) +} + +//go:generate mockery -name ACLResolver -inpkg +type ACLResolver interface { + ResolveTokenAndDefaultMeta(string, *acl.EnterpriseMeta, *acl.AuthorizerContext) (acl.Authorizer, error) +} + +func NewServer(cfg Config) *Server { + return &Server{cfg} +} + +func (s *Server) Register(grpcServer *grpc.Server) { + pbserverdiscovery.RegisterServerDiscoveryServiceServer(grpcServer, s) +} diff --git a/agent/grpc/public/services/serverdiscovery/server_test.go b/agent/grpc/public/services/serverdiscovery/server_test.go new file mode 100644 index 0000000000..2c26f2a1c4 --- /dev/null +++ b/agent/grpc/public/services/serverdiscovery/server_test.go @@ -0,0 +1,89 @@ +package serverdiscovery + +import ( + "context" + "testing" + "time" + + mock "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + + "github.com/hashicorp/consul/agent/consul/autopilotevents" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/grpc/public/testutils" + "github.com/hashicorp/consul/proto-public/pbserverdiscovery" +) + +type mockSnapshotHandler struct { + mock.Mock +} + +func newMockSnapshotHandler(t *testing.T) *mockSnapshotHandler { + handler := &mockSnapshotHandler{} + t.Cleanup(func() { + handler.AssertExpectations(t) + }) + return handler +} + +func (m *mockSnapshotHandler) handle(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) { + ret := m.Called(req, buf) + return ret.Get(0).(uint64), ret.Error(1) +} + +func (m *mockSnapshotHandler) expect(token string, requestIndex uint64, eventIndex uint64, payload autopilotevents.EventPayloadReadyServers) { + m.On("handle", stream.SubscribeRequest{ + Topic: autopilotevents.EventTopicReadyServers, + Subject: stream.SubjectNone, + Token: token, + Index: requestIndex, + }, mock.Anything).Once().Run(func(args mock.Arguments) { + buf := args.Get(1).(stream.SnapshotAppender) + buf.Append([]stream.Event{ + { + Topic: autopilotevents.EventTopicReadyServers, + Index: eventIndex, + Payload: payload, + }, + }) + }).Return(eventIndex, nil) +} + +func newMockACLResolver(t *testing.T) *MockACLResolver { + t.Helper() + m := &MockACLResolver{} + t.Cleanup(func() { m.AssertExpectations(t) }) + return m +} + +func setupPublisher(t *testing.T) (*mockSnapshotHandler, state.EventPublisher) { + t.Helper() + + handler := newMockSnapshotHandler(t) + + publisher := stream.NewEventPublisher(10 * time.Second) + publisher.RegisterHandler(autopilotevents.EventTopicReadyServers, handler.handle) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + go publisher.Run(ctx) + + return handler, publisher + +} + +func testClient(t *testing.T, server *Server) pbserverdiscovery.ServerDiscoveryServiceClient { + t.Helper() + + addr := testutils.RunTestServer(t, server) + + conn, err := grpc.DialContext(context.Background(), addr.String(), grpc.WithInsecure()) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, conn.Close()) + }) + + return pbserverdiscovery.NewServerDiscoveryServiceClient(conn) +} diff --git a/agent/grpc/public/services/serverdiscovery/watch_servers.go b/agent/grpc/public/services/serverdiscovery/watch_servers.go new file mode 100644 index 0000000000..6ceda83ffe --- /dev/null +++ b/agent/grpc/public/services/serverdiscovery/watch_servers.go @@ -0,0 +1,146 @@ +package serverdiscovery + +import ( + "context" + "errors" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/autopilotevents" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/grpc/public" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto-public/pbserverdiscovery" + "github.com/hashicorp/go-hclog" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// WatchServers provides a stream on which you can receive the list of servers +// that are ready to receive incoming requests including stale queries. The +// current set of ready servers are sent immediately at the start of the +// stream and new updates will be sent whenver the set of ready servers changes. +func (s *Server) WatchServers(req *pbserverdiscovery.WatchServersRequest, serverStream pbserverdiscovery.ServerDiscoveryService_WatchServersServer) error { + logger := s.Logger.Named("watch-servers").With("request_id", public.TraceID()) + + logger.Debug("starting stream") + defer logger.Trace("stream closed") + + token := public.TokenFromContext(serverStream.Context()) + + // Serve the ready servers from an EventPublisher subscription. If the subscription is + // closed due to an ACL change, we'll attempt to re-authorize and resume it to + // prevent unnecessarily terminating the stream. + var idx uint64 + for { + var err error + idx, err = s.serveReadyServers(token, idx, req, serverStream, logger) + if errors.Is(err, stream.ErrSubForceClosed) { + logger.Trace("subscription force-closed due to an ACL change or snapshot restore, will attempt to re-auth and resume") + } else { + return err + } + } +} + +func (s *Server) serveReadyServers(token string, index uint64, req *pbserverdiscovery.WatchServersRequest, serverStream pbserverdiscovery.ServerDiscoveryService_WatchServersServer, logger hclog.Logger) (uint64, error) { + if err := s.authorize(token); err != nil { + return 0, err + } + + // Start the subscription. + sub, err := s.Publisher.Subscribe(&stream.SubscribeRequest{ + Topic: autopilotevents.EventTopicReadyServers, + Subject: stream.SubjectNone, + Token: token, + Index: index, + }) + if err != nil { + logger.Error("failed to subscribe to server discovery events", "error", err) + return 0, status.Error(codes.Internal, "failed to subscribe to server discovery events") + } + defer sub.Unsubscribe() + + for { + event, err := sub.Next(serverStream.Context()) + switch { + case errors.Is(err, stream.ErrSubForceClosed): + return index, err + case errors.Is(err, context.Canceled): + return 0, nil + case err != nil: + logger.Error("failed to read next event", "error", err) + return index, status.Error(codes.Internal, err.Error()) + } + + // We do not send framing events (e.g. EndOfSnapshot, NewSnapshotToFollow) + // because we send a full list of ready servers on every event, rather than expecting + // clients to maintain a state-machine in the way they do for service health. + if event.IsFramingEvent() { + continue + } + + // Note: this check isn't strictly necessary because the event publishing + // machinery will ensure the index increases monotonically, but it can be + // tricky to faithfully reproduce this in tests (e.g. the EventPublisher + // garbage collects topic buffers and snapshots aggressively when streams + // disconnect) so this avoids a bunch of confusing setup code. + if event.Index <= index { + continue + } + + index = event.Index + + rsp, err := eventToResponse(req, event) + if err != nil { + logger.Error("failed to convert event to response", "error", err) + return index, status.Error(codes.Internal, err.Error()) + } + if err := serverStream.Send(rsp); err != nil { + logger.Error("failed to send response", "error", err) + return index, err + } + } +} + +func (s *Server) authorize(token string) error { + // Require the given ACL token to have `service:write` on any service (in any + // partition and namespace). + var authzContext acl.AuthorizerContext + entMeta := structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier) + authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(token, entMeta, &authzContext) + if err != nil { + return status.Error(codes.Unauthenticated, err.Error()) + } + if err := authz.ToAllowAuthorizer().ServiceWriteAnyAllowed(&authzContext); err != nil { + return status.Error(codes.PermissionDenied, err.Error()) + } + return nil +} + +func eventToResponse(req *pbserverdiscovery.WatchServersRequest, event stream.Event) (*pbserverdiscovery.WatchServersResponse, error) { + readyServers, err := autopilotevents.ExtractEventPayload(event) + if err != nil { + return nil, err + } + + var servers []*pbserverdiscovery.Server + + for _, srv := range readyServers { + addr := srv.Address + + wanAddr, ok := srv.TaggedAddresses[structs.TaggedAddressWAN] + if req.Wan && ok { + addr = wanAddr + } + + servers = append(servers, &pbserverdiscovery.Server{ + Id: srv.ID, + Version: srv.Version, + Address: addr, + }) + } + + return &pbserverdiscovery.WatchServersResponse{ + Servers: servers, + }, nil +} diff --git a/agent/grpc/public/services/serverdiscovery/watch_servers_test.go b/agent/grpc/public/services/serverdiscovery/watch_servers_test.go new file mode 100644 index 0000000000..1409431d94 --- /dev/null +++ b/agent/grpc/public/services/serverdiscovery/watch_servers_test.go @@ -0,0 +1,302 @@ +package serverdiscovery + +import ( + "context" + "errors" + "io" + "testing" + "time" + + acl "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/autopilotevents" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/grpc/public" + "github.com/hashicorp/consul/agent/grpc/public/testutils" + "github.com/hashicorp/consul/proto-public/pbserverdiscovery" + "github.com/hashicorp/consul/proto/prototest" + "github.com/hashicorp/consul/sdk/testutil" + mock "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const testACLToken = "eb61f1ed-65a4-4da6-8d3d-0564bd16c965" + +func TestWatchServers_StreamLifeCycle(t *testing.T) { + // The flow for this test is roughly: + // + // 1. Open a WatchServers stream + // 2. Observe the snapshot message is sent back through + // the stream. + // 3. Publish an event that changes to 2 servers. + // 4. See the corresponding message sent back through the stream. + // 5. Send a NewCloseSubscriptionEvent for the token secret. + // 6. See that a new snapshot is taken and the corresponding message + // gets sent back. If there were multiple subscribers for the topic + // then this should not happen. However with the current EventPublisher + // implementation, whenever the last subscriber for a topic has its + // subscription closed then the publisher will delete the whole topic + // buffer. When that happens, resubscribing will see no snapshot + // cache, or latest event in the buffer and force creating a new snapshot. + // 7. Publish another event to move to 3 servers. + // 8. Ensure that the message gets sent through the stream. Also + // this will validate that no other 1 or 2 server event is + // seen after stream reinitialization. + + srv1 := autopilotevents.ReadyServerInfo{ + ID: "9aeb73f6-e83e-43c1-bdc9-ca5e43efe3e4", + Address: "198.18.0.1", + Version: "1.12.0", + } + srv2 := autopilotevents.ReadyServerInfo{ + ID: "eec8721f-c42b-48da-a5a5-07565158015e", + Address: "198.18.0.2", + Version: "1.12.3", + } + srv3 := autopilotevents.ReadyServerInfo{ + ID: "256796f2-3a38-4f80-8cef-375c3cb3aa1f", + Address: "198.18.0.3", + Version: "1.12.3", + } + + oneServerEventPayload := autopilotevents.EventPayloadReadyServers{srv1} + twoServerEventPayload := autopilotevents.EventPayloadReadyServers{srv1, srv2} + threeServerEventPayload := autopilotevents.EventPayloadReadyServers{srv1, srv2, srv3} + + oneServerResponse := &pbserverdiscovery.WatchServersResponse{ + Servers: []*pbserverdiscovery.Server{ + { + Id: srv1.ID, + Address: srv1.Address, + Version: srv1.Version, + }, + }, + } + + twoServerResponse := &pbserverdiscovery.WatchServersResponse{ + Servers: []*pbserverdiscovery.Server{ + { + Id: srv1.ID, + Address: srv1.Address, + Version: srv1.Version, + }, + { + Id: srv2.ID, + Address: srv2.Address, + Version: srv2.Version, + }, + }, + } + + threeServerResponse := &pbserverdiscovery.WatchServersResponse{ + Servers: []*pbserverdiscovery.Server{ + { + Id: srv1.ID, + Address: srv1.Address, + Version: srv1.Version, + }, + { + Id: srv2.ID, + Address: srv2.Address, + Version: srv2.Version, + }, + { + Id: srv3.ID, + Address: srv3.Address, + Version: srv3.Version, + }, + }, + } + + // setup the event publisher and snapshot handler + handler, publisher := setupPublisher(t) + // we only expect this to be called once. For the rest of the + // test we ought to be able to resume the stream. + handler.expect(testACLToken, 0, 1, oneServerEventPayload) + handler.expect(testACLToken, 2, 3, twoServerEventPayload) + + // setup the mock ACLResolver and its expectations + // 2 times authorization should succeed and the third should fail. + resolver := newMockACLResolver(t) + resolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). + Return(testutils.TestAuthorizerServiceWriteAny(t), nil).Twice() + + // add the token to the requests context + ctx := public.ContextWithToken(context.Background(), testACLToken) + + // setup the server + server := NewServer(Config{ + Publisher: publisher, + Logger: testutil.Logger(t), + ACLResolver: resolver, + }) + + // Run the server and get a test client for it + client := testClient(t, server) + + // 1. Open the WatchServers stream + serverStream, err := client.WatchServers(ctx, &pbserverdiscovery.WatchServersRequest{Wan: false}) + require.NoError(t, err) + + rspCh := handleReadyServersStream(t, serverStream) + + // 2. Observe the snapshot message is sent back through the stream. + rsp := mustGetServers(t, rspCh) + require.NotNil(t, rsp) + prototest.AssertDeepEqual(t, oneServerResponse, rsp) + + // 3. Publish an event that changes to 2 servers. + publisher.Publish([]stream.Event{ + { + Topic: autopilotevents.EventTopicReadyServers, + Index: 2, + Payload: twoServerEventPayload, + }, + }) + + // 4. See the corresponding message sent back through the stream. + rsp = mustGetServers(t, rspCh) + require.NotNil(t, rsp) + prototest.AssertDeepEqual(t, twoServerResponse, rsp) + + // 5. Send a NewCloseSubscriptionEvent for the token secret. + publisher.Publish([]stream.Event{ + stream.NewCloseSubscriptionEvent([]string{testACLToken}), + }) + + // 6. Observe another snapshot message + rsp = mustGetServers(t, rspCh) + require.NotNil(t, rsp) + prototest.AssertDeepEqual(t, twoServerResponse, rsp) + + // 7. Publish another event to move to 3 servers. + publisher.Publish([]stream.Event{ + { + Topic: autopilotevents.EventTopicReadyServers, + Index: 4, + Payload: threeServerEventPayload, + }, + }) + + // 8. Ensure that the message gets sent through the stream. Also + // this will validate that no other 1 or 2 server event is + // seen after stream reinitialization. + rsp = mustGetServers(t, rspCh) + require.NotNil(t, rsp) + prototest.AssertDeepEqual(t, threeServerResponse, rsp) +} + +func TestWatchServers_ACLToken_PermissionDenied(t *testing.T) { + // setup the event publisher and snapshot handler + _, publisher := setupPublisher(t) + + resolver := newMockACLResolver(t) + resolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). + Return(acl.DenyAll(), nil).Once() + + // add the token to the requests context + ctx := public.ContextWithToken(context.Background(), testACLToken) + + // setup the server + server := NewServer(Config{ + Publisher: publisher, + Logger: testutil.Logger(t), + ACLResolver: resolver, + }) + + // Run the server and get a test client for it + client := testClient(t, server) + + // 1. Open the WatchServers stream + serverStream, err := client.WatchServers(ctx, &pbserverdiscovery.WatchServersRequest{Wan: false}) + require.NoError(t, err) + rspCh := handleReadyServersStream(t, serverStream) + + // Expect to get an Unauthenticated error immediately. + err = mustGetError(t, rspCh) + require.Equal(t, codes.PermissionDenied.String(), status.Code(err).String()) +} + +func TestWatchServers_ACLToken_Unauthenticated(t *testing.T) { + // setup the event publisher and snapshot handler + _, publisher := setupPublisher(t) + + resolver := newMockACLResolver(t) + resolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). + Return(nil, acl.ErrNotFound).Once() + + // add the token to the requests context + ctx := public.ContextWithToken(context.Background(), testACLToken) + + // setup the server + server := NewServer(Config{ + Publisher: publisher, + Logger: testutil.Logger(t), + ACLResolver: resolver, + }) + + // Run the server and get a test client for it + client := testClient(t, server) + + // 1. Open the WatchServers stream + serverStream, err := client.WatchServers(ctx, &pbserverdiscovery.WatchServersRequest{Wan: false}) + require.NoError(t, err) + rspCh := handleReadyServersStream(t, serverStream) + + // Expect to get an Unauthenticated error immediately. + err = mustGetError(t, rspCh) + require.Equal(t, codes.Unauthenticated.String(), status.Code(err).String()) +} + +func handleReadyServersStream(t *testing.T, stream pbserverdiscovery.ServerDiscoveryService_WatchServersClient) <-chan serversOrError { + t.Helper() + + rspCh := make(chan serversOrError) + go func() { + for { + rsp, err := stream.Recv() + if errors.Is(err, io.EOF) || + errors.Is(err, context.Canceled) || + errors.Is(err, context.DeadlineExceeded) { + return + } + rspCh <- serversOrError{ + rsp: rsp, + err: err, + } + } + }() + return rspCh +} + +func mustGetServers(t *testing.T, ch <-chan serversOrError) *pbserverdiscovery.WatchServersResponse { + t.Helper() + + select { + case rsp := <-ch: + require.NoError(t, rsp.err) + return rsp.rsp + case <-time.After(1 * time.Second): + t.Fatal("timeout waiting for WatchServersResponse") + return nil + } +} + +func mustGetError(t *testing.T, ch <-chan serversOrError) error { + t.Helper() + + select { + case rsp := <-ch: + require.Error(t, rsp.err) + return rsp.err + case <-time.After(1 * time.Second): + t.Fatal("timeout waiting for WatchServersResponse") + return nil + } +} + +type serversOrError struct { + rsp *pbserverdiscovery.WatchServersResponse + err error +} diff --git a/agent/grpc/public/testutils/acl.go b/agent/grpc/public/testutils/acl.go index 3bea248bae..8caacb1052 100644 --- a/agent/grpc/public/testutils/acl.go +++ b/agent/grpc/public/testutils/acl.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/consul/acl" ) -func TestAuthorizer(t *testing.T) acl.Authorizer { +func TestAuthorizerServiceWriteAny(t *testing.T) acl.Authorizer { t.Helper() policy, err := acl.NewPolicyFromSource(` @@ -23,3 +23,22 @@ func TestAuthorizer(t *testing.T) acl.Authorizer { return authz } + +func TestAuthorizerServiceRead(t *testing.T, serviceName string) acl.Authorizer { + t.Helper() + + aclRule := &acl.Policy{ + PolicyRules: acl.PolicyRules{ + Services: []*acl.ServiceRule{ + { + Name: serviceName, + Policy: acl.PolicyRead, + }, + }, + }, + } + authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{aclRule}, nil) + require.NoError(t, err) + + return authz +} diff --git a/agent/grpc/public/testutils/fsm.go b/agent/grpc/public/testutils/fsm.go new file mode 100644 index 0000000000..aea426a4ea --- /dev/null +++ b/agent/grpc/public/testutils/fsm.go @@ -0,0 +1,81 @@ +package testutils + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/stretchr/testify/require" +) + +func TestStateStore(t *testing.T, publisher state.EventPublisher) *state.Store { + t.Helper() + + gc, err := state.NewTombstoneGC(time.Second, time.Millisecond) + require.NoError(t, err) + + if publisher == nil { + publisher = stream.NoOpEventPublisher{} + } + + return state.NewStateStoreWithEventPublisher(gc, publisher) +} + +type Registrar func(*FakeFSM, *stream.EventPublisher) + +type FakeFSMConfig struct { + Register Registrar + Refresh []stream.Topic + publisher *stream.EventPublisher +} + +type FakeFSM struct { + config FakeFSMConfig + lock sync.Mutex + store *state.Store +} + +func newFakeFSM(t *testing.T, config FakeFSMConfig) *FakeFSM { + t.Helper() + + store := TestStateStore(t, config.publisher) + + fsm := &FakeFSM{store: store, config: config} + + config.Register(fsm, fsm.config.publisher) + + return fsm +} + +func (f *FakeFSM) GetStore() *state.Store { + f.lock.Lock() + defer f.lock.Unlock() + return f.store +} + +func (f *FakeFSM) ReplaceStore(store *state.Store) { + f.lock.Lock() + defer f.lock.Unlock() + oldStore := f.store + f.store = store + oldStore.Abandon() + for _, topic := range f.config.Refresh { + f.config.publisher.RefreshTopic(topic) + } +} + +func SetupFSMAndPublisher(t *testing.T, config FakeFSMConfig) (*FakeFSM, state.EventPublisher) { + t.Helper() + config.publisher = stream.NewEventPublisher(10 * time.Second) + + fsm := newFakeFSM(t, config) + + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + go config.publisher.Run(ctx) + + return fsm, config.publisher +} diff --git a/agent/grpc/public/testutils/server.go b/agent/grpc/public/testutils/server.go new file mode 100644 index 0000000000..53d779d912 --- /dev/null +++ b/agent/grpc/public/testutils/server.go @@ -0,0 +1,30 @@ +package testutils + +import ( + "net" + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +type GRPCService interface { + Register(*grpc.Server) +} + +func RunTestServer(t *testing.T, services ...GRPCService) net.Addr { + t.Helper() + + lis, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + grpcServer := grpc.NewServer() + for _, svc := range services { + svc.Register(grpcServer) + } + + go grpcServer.Serve(lis) + t.Cleanup(grpcServer.Stop) + + return lis.Addr() +} diff --git a/agent/grpc/public/utils.go b/agent/grpc/public/utils.go new file mode 100644 index 0000000000..70d7d1abb5 --- /dev/null +++ b/agent/grpc/public/utils.go @@ -0,0 +1,13 @@ +package public + +import "github.com/hashicorp/go-uuid" + +// We tag logs with a unique identifier to ease debugging. In the future this +// should probably be a real Open Telemetry trace ID. +func TraceID() string { + id, err := uuid.GenerateUUID() + if err != nil { + return "" + } + return id +} diff --git a/agent/health_endpoint.go b/agent/health_endpoint.go index 69e7777f68..7f904089d8 100644 --- a/agent/health_endpoint.go +++ b/agent/health_endpoint.go @@ -194,6 +194,8 @@ func (s *HTTPHandlers) healthServiceNodes(resp http.ResponseWriter, req *http.Re return nil, nil } + s.parsePeerName(req, &args) + // Check for tags params := req.URL.Query() if _, ok := params["tag"]; ok { diff --git a/agent/health_endpoint_test.go b/agent/health_endpoint_test.go index baa4c43423..8bf37835c1 100644 --- a/agent/health_endpoint_test.go +++ b/agent/health_endpoint_test.go @@ -607,129 +607,163 @@ func TestHealthServiceNodes(t *testing.T) { t.Parallel() a := NewTestAgent(t, "") - defer a.Shutdown() testrpc.WaitForTestAgent(t, a.RPC, "dc1") - req, _ := http.NewRequest("GET", "/v1/health/service/consul?dc=dc1", nil) - resp := httptest.NewRecorder() - obj, err := a.srv.HealthServiceNodes(resp, req) - if err != nil { - t.Fatalf("err: %v", err) + testingPeerNames := []string{"", "my-peer"} + + suffix := func(peerName string) string { + if peerName == "" { + return "" + } + // TODO(peering): after streaming works, remove the "&near=_agent" part + return "&peer=" + peerName + "&near=_agent" } - assertIndex(t, resp) - - // Should be 1 health check for consul - nodes := obj.(structs.CheckServiceNodes) - if len(nodes) != 1 { - t.Fatalf("bad: %v", obj) - } - - req, _ = http.NewRequest("GET", "/v1/health/service/nope?dc=dc1", nil) - resp = httptest.NewRecorder() - obj, err = a.srv.HealthServiceNodes(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - - assertIndex(t, resp) - - // Should be a non-nil empty list - nodes = obj.(structs.CheckServiceNodes) - if nodes == nil || len(nodes) != 0 { - t.Fatalf("bad: %v", obj) - } - - args := &structs.RegisterRequest{ - Datacenter: "dc1", - Node: "bar", - Address: "127.0.0.1", - Service: &structs.NodeService{ - ID: "test", - Service: "test", - }, - } - - var out struct{} - if err := a.RPC("Catalog.Register", args, &out); err != nil { - t.Fatalf("err: %v", err) - } - - req, _ = http.NewRequest("GET", "/v1/health/service/test?dc=dc1", nil) - resp = httptest.NewRecorder() - obj, err = a.srv.HealthServiceNodes(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - - assertIndex(t, resp) - - // Should be a non-nil empty list for checks - nodes = obj.(structs.CheckServiceNodes) - if len(nodes) != 1 || nodes[0].Checks == nil || len(nodes[0].Checks) != 0 { - t.Fatalf("bad: %v", obj) - } - - // Test caching - { - // List instances with cache enabled - req, _ := http.NewRequest("GET", "/v1/health/service/test?cached", nil) + for _, peerName := range testingPeerNames { + req, err := http.NewRequest("GET", "/v1/health/service/consul?dc=dc1"+suffix(peerName), nil) + require.NoError(t, err) resp := httptest.NewRecorder() obj, err := a.srv.HealthServiceNodes(resp, req) require.NoError(t, err) - nodes := obj.(structs.CheckServiceNodes) - assert.Len(t, nodes, 1) - // Should be a cache miss - assert.Equal(t, "MISS", resp.Header().Get("X-Cache")) + assertIndex(t, resp) + + nodes := obj.(structs.CheckServiceNodes) + if peerName == "" { + // Should be 1 health check for consul + require.Len(t, nodes, 1) + } else { + require.NotNil(t, nodes) + require.Len(t, nodes, 0) + } + + req, err = http.NewRequest("GET", "/v1/health/service/nope?dc=dc1"+suffix(peerName), nil) + require.NoError(t, err) + resp = httptest.NewRecorder() + obj, err = a.srv.HealthServiceNodes(resp, req) + require.NoError(t, err) + + assertIndex(t, resp) + + // Should be a non-nil empty list + nodes = obj.(structs.CheckServiceNodes) + require.NotNil(t, nodes) + require.Len(t, nodes, 0) } - { - // List instances with cache enabled - req, _ := http.NewRequest("GET", "/v1/health/service/test?cached", nil) + // TODO(peering): will have to seed this data differently in the future + originalRegister := make(map[string]*structs.RegisterRequest) + for _, peerName := range testingPeerNames { + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.1", + PeerName: peerName, + Service: &structs.NodeService{ + ID: "test", + Service: "test", + PeerName: peerName, + }, + } + + var out struct{} + require.NoError(t, a.RPC("Catalog.Register", args, &out)) + originalRegister[peerName] = args + } + + verify := func(t *testing.T, peerName string, nodes structs.CheckServiceNodes) { + require.Len(t, nodes, 1) + require.Equal(t, peerName, nodes[0].Node.PeerName) + require.Equal(t, "bar", nodes[0].Node.Node) + require.Equal(t, peerName, nodes[0].Service.PeerName) + require.Equal(t, "test", nodes[0].Service.Service) + require.NotNil(t, nodes[0].Checks) + require.Len(t, nodes[0].Checks, 0) + } + + for _, peerName := range testingPeerNames { + req, err := http.NewRequest("GET", "/v1/health/service/test?dc=dc1"+suffix(peerName), nil) + require.NoError(t, err) resp := httptest.NewRecorder() obj, err := a.srv.HealthServiceNodes(resp, req) require.NoError(t, err) - nodes := obj.(structs.CheckServiceNodes) - assert.Len(t, nodes, 1) - // Should be a cache HIT now! - assert.Equal(t, "HIT", resp.Header().Get("X-Cache")) + assertIndex(t, resp) + + // Should be a non-nil empty list for checks + nodes := obj.(structs.CheckServiceNodes) + verify(t, peerName, nodes) + + // Test caching + { + // List instances with cache enabled + req, err := http.NewRequest("GET", "/v1/health/service/test?cached"+suffix(peerName), nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + obj, err := a.srv.HealthServiceNodes(resp, req) + require.NoError(t, err) + nodes := obj.(structs.CheckServiceNodes) + verify(t, peerName, nodes) + + // Should be a cache miss + require.Equal(t, "MISS", resp.Header().Get("X-Cache")) + } + + { + // List instances with cache enabled + req, err := http.NewRequest("GET", "/v1/health/service/test?cached"+suffix(peerName), nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + obj, err := a.srv.HealthServiceNodes(resp, req) + require.NoError(t, err) + nodes := obj.(structs.CheckServiceNodes) + verify(t, peerName, nodes) + + // Should be a cache HIT now! + require.Equal(t, "HIT", resp.Header().Get("X-Cache")) + } } // Ensure background refresh works { - // Register a new instance of the service - args2 := args - args2.Node = "baz" - args2.Address = "127.0.0.2" - require.NoError(t, a.RPC("Catalog.Register", args, &out)) + // TODO(peering): will have to seed this data differently in the future + for _, peerName := range testingPeerNames { + args := originalRegister[peerName] + // Register a new instance of the service + args2 := *args + args2.Node = "baz" + args2.Address = "127.0.0.2" + var out struct{} + require.NoError(t, a.RPC("Catalog.Register", &args2, &out)) + } - retry.Run(t, func(r *retry.R) { - // List it again - req, _ := http.NewRequest("GET", "/v1/health/service/test?cached", nil) - resp := httptest.NewRecorder() - obj, err := a.srv.HealthServiceNodes(resp, req) - r.Check(err) + for _, peerName := range testingPeerNames { + retry.Run(t, func(r *retry.R) { + // List it again + req, err := http.NewRequest("GET", "/v1/health/service/test?cached"+suffix(peerName), nil) + require.NoError(r, err) + resp := httptest.NewRecorder() + obj, err := a.srv.HealthServiceNodes(resp, req) + require.NoError(r, err) - nodes := obj.(structs.CheckServiceNodes) - if len(nodes) != 2 { - r.Fatalf("Want 2 nodes") - } - header := resp.Header().Get("X-Consul-Index") - if header == "" || header == "0" { - r.Fatalf("Want non-zero header: %q", header) - } - _, err = strconv.ParseUint(header, 10, 64) - r.Check(err) + nodes := obj.(structs.CheckServiceNodes) + require.Len(r, nodes, 2) - // Should be a cache hit! The data should've updated in the cache - // in the background so this should've been fetched directly from - // the cache. - if resp.Header().Get("X-Cache") != "HIT" { - r.Fatalf("should be a cache hit") - } - }) + header := resp.Header().Get("X-Consul-Index") + if header == "" || header == "0" { + r.Fatalf("Want non-zero header: %q", header) + } + _, err = strconv.ParseUint(header, 10, 64) + require.NoError(r, err) + + // Should be a cache hit! The data should've updated in the cache + // in the background so this should've been fetched directly from + // the cache. + if resp.Header().Get("X-Cache") != "HIT" { + r.Fatalf("should be a cache hit") + } + }) + } } } diff --git a/agent/http.go b/agent/http.go index 16a3a2150d..6be651c1a9 100644 --- a/agent/http.go +++ b/agent/http.go @@ -1105,6 +1105,12 @@ func (s *HTTPHandlers) parseSource(req *http.Request, source *structs.QuerySourc } } +func (s *HTTPHandlers) parsePeerName(req *http.Request, args *structs.ServiceSpecificRequest) { + if peer := req.URL.Query().Get("peer"); peer != "" { + args.PeerName = peer + } +} + // parseMetaFilter is used to parse the ?node-meta=key:value query parameter, used for // filtering results to nodes with the given metadata key/value func (s *HTTPHandlers) parseMetaFilter(req *http.Request) map[string]string { diff --git a/agent/http_register.go b/agent/http_register.go index 47cdfcf1f9..cbef7fa6cb 100644 --- a/agent/http_register.go +++ b/agent/http_register.go @@ -103,6 +103,10 @@ func init() { registerEndpoint("/v1/operator/autopilot/configuration", []string{"GET", "PUT"}, (*HTTPHandlers).OperatorAutopilotConfiguration) registerEndpoint("/v1/operator/autopilot/health", []string{"GET"}, (*HTTPHandlers).OperatorServerHealth) registerEndpoint("/v1/operator/autopilot/state", []string{"GET"}, (*HTTPHandlers).OperatorAutopilotState) + registerEndpoint("/v1/peering/token", []string{"POST"}, (*HTTPHandlers).PeeringGenerateToken) + registerEndpoint("/v1/peering/initiate", []string{"POST"}, (*HTTPHandlers).PeeringInitiate) + registerEndpoint("/v1/peering/", []string{"GET"}, (*HTTPHandlers).PeeringRead) + registerEndpoint("/v1/peerings", []string{"GET"}, (*HTTPHandlers).PeeringList) registerEndpoint("/v1/query", []string{"GET", "POST"}, (*HTTPHandlers).PreparedQueryGeneral) // specific prepared query endpoints have more complex rules for allowed methods, so // the prefix is registered with no methods. diff --git a/agent/peering_endpoint.go b/agent/peering_endpoint.go new file mode 100644 index 0000000000..6138a910c6 --- /dev/null +++ b/agent/peering_endpoint.go @@ -0,0 +1,118 @@ +package agent + +import ( + "fmt" + "net/http" + + "github.com/hashicorp/consul/lib" + "github.com/hashicorp/consul/proto/pbpeering" +) + +// PeeringRead fetches a peering that matches the request parameters. +func (s *HTTPHandlers) PeeringRead(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + name, err := getPathSuffixUnescaped(req.URL.Path, "/v1/peering/") + if err != nil { + return nil, err + } + if name == "" { + return nil, BadRequestError{Reason: "Must specify a name to fetch."} + } + + entMeta := s.agent.AgentEnterpriseMeta() + if err := s.parseEntMetaPartition(req, entMeta); err != nil { + return nil, err + } + + args := pbpeering.PeeringReadRequest{ + Name: name, + Datacenter: s.agent.config.Datacenter, + Partition: entMeta.PartitionOrEmpty(), // should be "" in OSS + } + + result, err := s.agent.rpcClientPeering.PeeringRead(req.Context(), &args) + if err != nil { + return nil, err + } + if result.Peering == nil { + return nil, NotFoundError{} + } + + // TODO(peering): replace with API types + return result.Peering, nil +} + +// PeeringList fetches all peerings in the datacenter in OSS or in a given partition in Consul Enterprise. +func (s *HTTPHandlers) PeeringList(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + entMeta := s.agent.AgentEnterpriseMeta() + if err := s.parseEntMetaPartition(req, entMeta); err != nil { + return nil, err + } + + args := pbpeering.PeeringListRequest{ + Datacenter: s.agent.config.Datacenter, + Partition: entMeta.PartitionOrEmpty(), // should be "" in OSS + } + + pbresp, err := s.agent.rpcClientPeering.PeeringList(req.Context(), &args) + if err != nil { + return nil, err + } + return pbresp.Peerings, nil +} + +// PeeringGenerateToken handles POSTs to the /v1/peering/token endpoint. The request +// will always be forwarded via RPC to the local leader. +func (s *HTTPHandlers) PeeringGenerateToken(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + args := pbpeering.GenerateTokenRequest{ + Datacenter: s.agent.config.Datacenter, + } + + if req.Body == nil { + return nil, BadRequestError{Reason: "The peering arguments must be provided in the body"} + } + + if err := lib.DecodeJSON(req.Body, &args); err != nil { + return nil, BadRequestError{Reason: fmt.Sprintf("Body decoding failed: %v", err)} + } + + if args.PeerName == "" { + return nil, BadRequestError{Reason: "PeerName is required in the payload when generating a new peering token."} + } + + entMeta := s.agent.AgentEnterpriseMeta() + if err := s.parseEntMetaPartition(req, entMeta); err != nil { + return nil, err + } + + if args.Partition == "" { + args.Partition = entMeta.PartitionOrEmpty() + } + + return s.agent.rpcClientPeering.GenerateToken(req.Context(), &args) +} + +// PeeringInitiate handles POSTs to the /v1/peering/initiate endpoint. The request +// will always be forwarded via RPC to the local leader. +func (s *HTTPHandlers) PeeringInitiate(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + args := pbpeering.InitiateRequest{ + Datacenter: s.agent.config.Datacenter, + } + + if req.Body == nil { + return nil, BadRequestError{Reason: "The peering arguments must be provided in the body"} + } + + if err := lib.DecodeJSON(req.Body, &args); err != nil { + return nil, BadRequestError{Reason: fmt.Sprintf("Body decoding failed: %v", err)} + } + + if args.PeerName == "" { + return nil, BadRequestError{Reason: "PeerName is required in the payload when initiating a peering."} + } + + if args.PeeringToken == "" { + return nil, BadRequestError{Reason: "PeeringToken is required in the payload when initiating a peering."} + } + + return s.agent.rpcClientPeering.Initiate(req.Context(), &args) +} diff --git a/agent/peering_endpoint_oss_test.go b/agent/peering_endpoint_oss_test.go new file mode 100644 index 0000000000..5a6fa1f286 --- /dev/null +++ b/agent/peering_endpoint_oss_test.go @@ -0,0 +1,45 @@ +//go:build !consulent +// +build !consulent + +package agent + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/testrpc" +) + +func TestHTTP_Peering_GenerateToken_OSS_Failure(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + + a := NewTestAgent(t, "") + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + t.Run("Doesn't allow partitions in OSS HTTP requests", func(t *testing.T) { + reqBody := &pbpeering.GenerateTokenRequest{ + PeerName: "peering-a", + } + reqBodyBytes, err := json.Marshal(reqBody) + require.NoError(t, err) + req, err := http.NewRequest("POST", "/v1/peering/token?partition=foo", + bytes.NewReader(reqBodyBytes)) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "Partitions are a Consul Enterprise feature") + }) +} diff --git a/agent/peering_endpoint_test.go b/agent/peering_endpoint_test.go new file mode 100644 index 0000000000..0e1840cf0a --- /dev/null +++ b/agent/peering_endpoint_test.go @@ -0,0 +1,312 @@ +package agent + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/testrpc" +) + +var validCA = ` +-----BEGIN CERTIFICATE----- +MIICmDCCAj6gAwIBAgIBBzAKBggqhkjOPQQDAjAWMRQwEgYDVQQDEwtDb25zdWwg +Q0EgNzAeFw0xODA1MjExNjMzMjhaFw0yODA1MTgxNjMzMjhaMBYxFDASBgNVBAMT +C0NvbnN1bCBDQSA3MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAER0qlxjnRcMEr +iSGlH7G7dYU7lzBEmLUSMZkyBbClmyV8+e8WANemjn+PLnCr40If9cmpr7RnC9Qk +GTaLnLiF16OCAXswggF3MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/ +MGgGA1UdDgRhBF8xZjo5MTpjYTo0MTo4ZjphYzo2NzpiZjo1OTpjMjpmYTo0ZTo3 +NTo1YzpkODpmMDo1NTpkZTpiZTo3NTpiODozMzozMTpkNToyNDpiMDowNDpiMzpl +ODo5Nzo1Yjo3ZTBqBgNVHSMEYzBhgF8xZjo5MTpjYTo0MTo4ZjphYzo2NzpiZjo1 +OTpjMjpmYTo0ZTo3NTo1YzpkODpmMDo1NTpkZTpiZTo3NTpiODozMzozMTpkNToy +NDpiMDowNDpiMzplODo5Nzo1Yjo3ZTA/BgNVHREEODA2hjRzcGlmZmU6Ly8xMjRk +ZjVhMC05ODIwLTc2YzMtOWFhOS02ZjYyMTY0YmExYzIuY29uc3VsMD0GA1UdHgEB +/wQzMDGgLzAtgisxMjRkZjVhMC05ODIwLTc2YzMtOWFhOS02ZjYyMTY0YmExYzIu +Y29uc3VsMAoGCCqGSM49BAMCA0gAMEUCIQDzkkI7R+0U12a+zq2EQhP/n2mHmta+ +fs2hBxWIELGwTAIgLdO7RRw+z9nnxCIA6kNl//mIQb+PGItespiHZKAz74Q= +-----END CERTIFICATE----- +` + +func TestHTTP_Peering_GenerateToken(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + t.Run("No Body", func(t *testing.T) { + req, err := http.NewRequest("POST", "/v1/peering/token", nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "The peering arguments must be provided in the body") + }) + + t.Run("Body Invalid", func(t *testing.T) { + req, err := http.NewRequest("POST", "/v1/peering/token", bytes.NewReader([]byte("abc"))) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "Body decoding failed:") + }) + + t.Run("No Name", func(t *testing.T) { + req, err := http.NewRequest("POST", "/v1/peering/token", + bytes.NewReader([]byte(`{}`))) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "PeerName is required") + }) + + // TODO(peering): add more failure cases + + t.Run("Success", func(t *testing.T) { + body := &pbpeering.GenerateTokenRequest{ + PeerName: "peering-a", + } + + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + + req, err := http.NewRequest("POST", "/v1/peering/token", bytes.NewReader(bodyBytes)) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code, "expected 200, got %d: %v", resp.Code, resp.Body.String()) + + var r pbpeering.GenerateTokenResponse + require.NoError(t, json.NewDecoder(resp.Body).Decode(&r)) + + tokenJSON, err := base64.StdEncoding.DecodeString(r.PeeringToken) + require.NoError(t, err) + + var token structs.PeeringToken + require.NoError(t, json.Unmarshal(tokenJSON, &token)) + + require.Nil(t, token.CA) + require.Equal(t, []string{fmt.Sprintf("127.0.0.1:%d", a.config.ServerPort)}, token.ServerAddresses) + require.Equal(t, "server.dc1.consul", token.ServerName) + + // The PeerID in the token is randomly generated so we don't assert on its value. + require.NotEmpty(t, token.PeerID) + }) +} + +func TestHTTP_Peering_Initiate(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + t.Run("No Body", func(t *testing.T) { + req, err := http.NewRequest("POST", "/v1/peering/initiate", nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "The peering arguments must be provided in the body") + }) + + t.Run("Body Invalid", func(t *testing.T) { + req, err := http.NewRequest("POST", "/v1/peering/initiate", bytes.NewReader([]byte("abc"))) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "Body decoding failed:") + }) + + t.Run("No Name", func(t *testing.T) { + req, err := http.NewRequest("POST", "/v1/peering/initiate", + bytes.NewReader([]byte(`{}`))) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "PeerName is required") + }) + + t.Run("No Token", func(t *testing.T) { + req, err := http.NewRequest("POST", "/v1/peering/initiate", + bytes.NewReader([]byte(`{"PeerName": "peer1-usw1"}`))) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusBadRequest, resp.Code) + body, _ := io.ReadAll(resp.Body) + require.Contains(t, string(body), "PeeringToken is required") + }) + + // TODO(peering): add more failure cases + + t.Run("Success", func(t *testing.T) { + token := structs.PeeringToken{ + CA: []string{validCA}, + ServerName: "server.dc1.consul", + ServerAddresses: []string{fmt.Sprintf("1.2.3.4:%d", 443)}, + PeerID: "a0affd3e-f1c8-4bb9-9168-90fd902c441d", + } + tokenJSON, _ := json.Marshal(&token) + tokenB64 := base64.StdEncoding.EncodeToString(tokenJSON) + body := &pbpeering.InitiateRequest{ + PeerName: "peering-a", + PeeringToken: tokenB64, + } + + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + + req, err := http.NewRequest("POST", "/v1/peering/initiate", bytes.NewReader(bodyBytes)) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code, "expected 200, got %d: %v", resp.Code, resp.Body.String()) + + // success response does not currently return a value so {} is correct + require.Equal(t, "{}", resp.Body.String()) + }) +} + +func TestHTTP_Peering_Read(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Insert peerings directly to state store. + // Note that the state store holds reference to the underlying + // variables; do not modify them after writing. + foo := &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + Name: "foo", + State: pbpeering.PeeringState_INITIAL, + PeerCAPems: nil, + PeerServerName: "fooservername", + PeerServerAddresses: []string{"addr1"}, + }, + } + _, err := a.rpcClientPeering.PeeringWrite(ctx, foo) + require.NoError(t, err) + bar := &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + Name: "bar", + State: pbpeering.PeeringState_ACTIVE, + PeerCAPems: nil, + PeerServerName: "barservername", + PeerServerAddresses: []string{"addr1"}, + }, + } + _, err = a.rpcClientPeering.PeeringWrite(ctx, bar) + require.NoError(t, err) + + t.Run("return foo", func(t *testing.T) { + req, err := http.NewRequest("GET", "/v1/peering/foo", nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + // TODO(peering): replace with API types + var pbresp pbpeering.Peering + require.NoError(t, json.NewDecoder(resp.Body).Decode(&pbresp)) + + require.Equal(t, foo.Peering.Name, pbresp.Name) + }) + + t.Run("not found", func(t *testing.T) { + req, err := http.NewRequest("GET", "/v1/peering/baz", nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusNotFound, resp.Code) + }) +} + +func TestHTTP_Peering_List(t *testing.T) { + if testing.Short() { + t.Skip("too slow for testing.Short") + } + + t.Parallel() + a := NewTestAgent(t, "") + + testrpc.WaitForTestAgent(t, a.RPC, "dc1") + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Insert peerings directly to state store. + // Note that the state store holds reference to the underlying + // variables; do not modify them after writing. + foo := &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + Name: "foo", + State: pbpeering.PeeringState_INITIAL, + PeerCAPems: nil, + PeerServerName: "fooservername", + PeerServerAddresses: []string{"addr1"}, + }, + } + _, err := a.rpcClientPeering.PeeringWrite(ctx, foo) + require.NoError(t, err) + bar := &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + Name: "bar", + State: pbpeering.PeeringState_ACTIVE, + PeerCAPems: nil, + PeerServerName: "barservername", + PeerServerAddresses: []string{"addr1"}, + }, + } + _, err = a.rpcClientPeering.PeeringWrite(ctx, bar) + require.NoError(t, err) + + t.Run("return all", func(t *testing.T) { + req, err := http.NewRequest("GET", "/v1/peerings", nil) + require.NoError(t, err) + resp := httptest.NewRecorder() + a.srv.h.ServeHTTP(resp, req) + require.Equal(t, http.StatusOK, resp.Code) + + // TODO(peering): replace with API types + var pbresp []*pbpeering.Peering + require.NoError(t, json.NewDecoder(resp.Body).Decode(&pbresp)) + + require.Len(t, pbresp, 2) + }) +} diff --git a/agent/pool/pool.go b/agent/pool/pool.go index 179565dcf3..acfe73065e 100644 --- a/agent/pool/pool.go +++ b/agent/pool/pool.go @@ -31,7 +31,7 @@ type muxSession interface { // streamClient is used to wrap a stream with an RPC client type StreamClient struct { - stream net.Conn + stream *TimeoutConn codec rpc.ClientCodec } @@ -56,6 +56,36 @@ type Conn struct { clientLock sync.Mutex } +// TimeoutConn wraps net.Conn with a read timeout. +// When set, FirstReadTimeout only applies to the very next Read. +// DefaultTimeout is used for any other Read. +type TimeoutConn struct { + net.Conn + DefaultTimeout time.Duration + FirstReadTimeout time.Duration +} + +func (c *TimeoutConn) Read(b []byte) (int, error) { + timeout := c.DefaultTimeout + // Apply timeout to first read then zero it out + if c.FirstReadTimeout > 0 { + timeout = c.FirstReadTimeout + c.FirstReadTimeout = 0 + } + var deadline time.Time + if timeout > 0 { + deadline = time.Now().Add(timeout) + } + if err := c.Conn.SetReadDeadline(deadline); err != nil { + return 0, err + } + return c.Conn.Read(b) +} + +func (c *TimeoutConn) Write(b []byte) (int, error) { + return c.Conn.Write(b) +} + func (c *Conn) Close() error { return c.session.Close() } @@ -79,12 +109,14 @@ func (c *Conn) getClient() (*StreamClient, error) { return nil, err } + timeoutStream := &TimeoutConn{Conn: stream, DefaultTimeout: c.pool.Timeout} + // Create the RPC client - codec := msgpackrpc.NewCodecFromHandle(true, true, stream, structs.MsgpackHandle) + codec := msgpackrpc.NewCodecFromHandle(true, true, timeoutStream, structs.MsgpackHandle) // Return a new stream client sc := &StreamClient{ - stream: stream, + stream: timeoutStream, codec: codec, } return sc, nil @@ -101,7 +133,7 @@ func (c *Conn) returnClient(client *StreamClient) { // If this is a Yamux stream, shrink the internal buffers so that // we can GC the idle memory - if ys, ok := client.stream.(*yamux.Stream); ok { + if ys, ok := client.stream.Conn.(*yamux.Stream); ok { ys.Shrink() } } @@ -133,6 +165,13 @@ type ConnPool struct { // TODO: consider refactoring to accept a full yamux.Config instead of a logger Logger *log.Logger + // The default timeout for stream reads/writes + Timeout time.Duration + + // Used for calculating timeouts on RPC requests + MaxQueryTime time.Duration + DefaultQueryTime time.Duration + // The maximum time to keep a connection open MaxTime time.Duration @@ -325,7 +364,7 @@ func (p *ConnPool) dial( tlsRPCType RPCType, ) (net.Conn, HalfCloser, error) { // Try to dial the conn - d := &net.Dialer{LocalAddr: p.SrcAddr, Timeout: DefaultDialTimeout} + d := &net.Dialer{LocalAddr: p.SrcAddr, Timeout: p.Timeout} conn, err := d.Dial("tcp", addr.String()) if err != nil { return nil, nil, err @@ -590,6 +629,11 @@ func (p *ConnPool) rpc(dc string, nodeName string, addr net.Addr, method string, return fmt.Errorf("rpc error getting client: %w", err) } + // Use the zero value if the request doesn't implement RPCInfo + if info, ok := args.(structs.RPCInfo); ok { + sc.stream.FirstReadTimeout = info.Timeout(p.Timeout, p.MaxQueryTime, p.DefaultQueryTime) + } + // Make the RPC call err = msgpackrpc.CallWithCodec(sc.codec, method, args, reply) if err != nil { diff --git a/agent/rpc/peering/service.go b/agent/rpc/peering/service.go new file mode 100644 index 0000000000..288669a1f0 --- /dev/null +++ b/agent/rpc/peering/service.go @@ -0,0 +1,741 @@ +package peering + +import ( + "context" + "errors" + "fmt" + "io" + "strconv" + "strings" + "time" + + "github.com/armon/go-metrics" + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-memdb" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/anypb" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/dns" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/proto/pbservice" + "github.com/hashicorp/consul/proto/pbstatus" +) + +var ( + errPeeringTokenEmptyCA = errors.New("peering token CA value is empty") + errPeeringTokenInvalidCA = errors.New("peering token CA value is invalid") + errPeeringTokenEmptyServerAddresses = errors.New("peering token server addresses value is empty") + errPeeringTokenEmptyServerName = errors.New("peering token server name value is empty") + errPeeringTokenEmptyPeerID = errors.New("peering token peer ID value is empty") +) + +// errPeeringInvalidServerAddress is returned when an initiate request contains +// an invalid server address. +type errPeeringInvalidServerAddress struct { + addr string +} + +// Error implements the error interface +func (e *errPeeringInvalidServerAddress) Error() string { + return fmt.Sprintf("%s is not a valid peering server address", e.addr) +} + +// Service implements pbpeering.PeeringService to provide RPC operations for +// managing peering relationships. +type Service struct { + Backend Backend + logger hclog.Logger + streams *streamTracker +} + +func NewService(logger hclog.Logger, backend Backend) *Service { + return &Service{ + Backend: backend, + logger: logger, + streams: newStreamTracker(), + } +} + +var _ pbpeering.PeeringServiceServer = (*Service)(nil) + +// Backend defines the core integrations the Peering endpoint depends on. A +// functional implementation will integrate with various subcomponents of Consul +// such as the State store for reading and writing data, the CA machinery for +// providing access to CA data and the RPC system for forwarding requests to +// other servers. +type Backend interface { + // Forward should forward the request to the leader when necessary. + Forward(info structs.RPCInfo, f func(*grpc.ClientConn) error) (handled bool, err error) + + // GetAgentCACertificates returns the CA certificate to be returned in the peering token data + GetAgentCACertificates() ([]string, error) + + // GetServerAddresses returns the addresses used for establishing a peering connection + GetServerAddresses() ([]string, error) + + // GetServerName returns the SNI to be returned in the peering token data which + // will be used by peers when establishing peering connections over TLS. + GetServerName() string + + // EncodeToken packages a peering token into a slice of bytes. + EncodeToken(tok *structs.PeeringToken) ([]byte, error) + + // DecodeToken unpackages a peering token from a slice of bytes. + DecodeToken([]byte) (*structs.PeeringToken, error) + + EnterpriseCheckPartitions(partition string) error + + Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) + + Store() Store + Apply() Apply +} + +// Store provides a read-only interface for querying Peering data. +type Store interface { + PeeringRead(ws memdb.WatchSet, q state.Query) (uint64, *pbpeering.Peering, error) + PeeringList(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.Peering, error) + ExportedServicesForPeer(ws memdb.WatchSet, peerID string) (uint64, []structs.ServiceName, error) + AbandonCh() <-chan struct{} +} + +// Apply provides a write-only interface for persisting Peering data. +type Apply interface { + PeeringWrite(req *pbpeering.PeeringWriteRequest) error + PeeringDelete(req *pbpeering.PeeringDeleteRequest) error + PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error +} + +// GenerateToken implements the PeeringService RPC method to generate a +// peering token which is the initial step in establishing a peering relationship +// with other Consul clusters. +func (s *Service) GenerateToken( + ctx context.Context, + req *pbpeering.GenerateTokenRequest, +) (*pbpeering.GenerateTokenResponse, error) { + if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { + return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) + } + // validate prior to forwarding to the leader, this saves a network hop + if err := dns.ValidateLabel(req.PeerName); err != nil { + return nil, fmt.Errorf("%s is not a valid peer name: %w", req.PeerName, err) + } + + // TODO(peering): add metrics + // TODO(peering): add tracing + + resp := &pbpeering.GenerateTokenResponse{} + handled, err := s.Backend.Forward(req, func(conn *grpc.ClientConn) error { + var err error + resp, err = pbpeering.NewPeeringServiceClient(conn).GenerateToken(ctx, req) + return err + }) + if handled || err != nil { + return resp, err + } + + ca, err := s.Backend.GetAgentCACertificates() + if err != nil { + return nil, err + } + + serverAddrs, err := s.Backend.GetServerAddresses() + if err != nil { + return nil, err + } + + writeReq := pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + Name: req.PeerName, + + // TODO(peering): Normalize from ACL token once this endpoint is guarded by ACLs. + Partition: req.PartitionOrDefault(), + }, + } + if err := s.Backend.Apply().PeeringWrite(&writeReq); err != nil { + return nil, fmt.Errorf("failed to write peering: %w", err) + } + + q := state.Query{ + Value: strings.ToLower(req.PeerName), + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition), + } + _, peering, err := s.Backend.Store().PeeringRead(nil, q) + if err != nil { + return nil, err + } + if peering == nil { + return nil, fmt.Errorf("peering was deleted while token generation request was in flight") + } + + tok := structs.PeeringToken{ + // Store the UUID so that we can do a global search when handling inbound streams. + PeerID: peering.ID, + CA: ca, + ServerAddresses: serverAddrs, + ServerName: s.Backend.GetServerName(), + } + + encoded, err := s.Backend.EncodeToken(&tok) + if err != nil { + return nil, err + } + resp.PeeringToken = string(encoded) + return resp, err +} + +// Initiate implements the PeeringService RPC method to finalize peering +// registration. Given a valid token output from a peer's GenerateToken endpoint, +// a peering is registered. +func (s *Service) Initiate( + ctx context.Context, + req *pbpeering.InitiateRequest, +) (*pbpeering.InitiateResponse, error) { + // validate prior to forwarding to the leader, this saves a network hop + if err := dns.ValidateLabel(req.PeerName); err != nil { + return nil, fmt.Errorf("%s is not a valid peer name: %w", req.PeerName, err) + } + tok, err := s.Backend.DecodeToken([]byte(req.PeeringToken)) + if err != nil { + return nil, err + } + if err := validatePeeringToken(tok); err != nil { + return nil, err + } + + resp := &pbpeering.InitiateResponse{} + handled, err := s.Backend.Forward(req, func(conn *grpc.ClientConn) error { + var err error + resp, err = pbpeering.NewPeeringServiceClient(conn).Initiate(ctx, req) + return err + }) + if handled || err != nil { + return resp, err + } + + defer metrics.MeasureSince([]string{"peering", "initiate"}, time.Now()) + + // convert ServiceAddress values to strings + serverAddrs := make([]string, len(tok.ServerAddresses)) + for i, addr := range tok.ServerAddresses { + serverAddrs[i] = addr + } + + // as soon as a peering is written with a list of ServerAddresses that is + // non-empty, the leader routine will see the peering and attempt to establish + // a connection with the remote peer. + writeReq := &pbpeering.PeeringWriteRequest{ + Peering: &pbpeering.Peering{ + Name: req.PeerName, + PeerCAPems: tok.CA, + PeerServerAddresses: serverAddrs, + PeerServerName: tok.ServerName, + // uncomment once #1613 lands + // PeerID: tok.PeerID, + }, + } + if err = s.Backend.Apply().PeeringWrite(writeReq); err != nil { + return nil, fmt.Errorf("failed to write peering: %w", err) + } + // resp.Status == 0 + return resp, nil +} + +func (s *Service) PeeringRead(ctx context.Context, req *pbpeering.PeeringReadRequest) (*pbpeering.PeeringReadResponse, error) { + if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { + return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) + } + + var resp *pbpeering.PeeringReadResponse + handled, err := s.Backend.Forward(req, func(conn *grpc.ClientConn) error { + var err error + resp, err = pbpeering.NewPeeringServiceClient(conn).PeeringRead(ctx, req) + return err + }) + if handled || err != nil { + return resp, err + } + + defer metrics.MeasureSince([]string{"peering", "read"}, time.Now()) + // TODO(peering): ACL check request token + + // TODO(peering): handle blocking queries + q := state.Query{ + Value: strings.ToLower(req.Name), + EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition)} + _, peering, err := s.Backend.Store().PeeringRead(nil, q) + if err != nil { + return nil, err + } + return &pbpeering.PeeringReadResponse{Peering: peering}, nil +} + +func (s *Service) PeeringList(ctx context.Context, req *pbpeering.PeeringListRequest) (*pbpeering.PeeringListResponse, error) { + if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { + return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) + } + + var resp *pbpeering.PeeringListResponse + handled, err := s.Backend.Forward(req, func(conn *grpc.ClientConn) error { + var err error + resp, err = pbpeering.NewPeeringServiceClient(conn).PeeringList(ctx, req) + return err + }) + if handled || err != nil { + return resp, err + } + + defer metrics.MeasureSince([]string{"peering", "list"}, time.Now()) + // TODO(peering): ACL check request token + + // TODO(peering): handle blocking queries + _, peerings, err := s.Backend.Store().PeeringList(nil, *structs.NodeEnterpriseMetaInPartition(req.Partition)) + if err != nil { + return nil, err + } + return &pbpeering.PeeringListResponse{Peerings: peerings}, nil +} + +// TODO(peering): As of writing, this method is only used in tests to set up Peerings in the state store. +// Consider removing if we can find another way to populate state store in peering_endpoint_test.go +func (s *Service) PeeringWrite(ctx context.Context, req *pbpeering.PeeringWriteRequest) (*pbpeering.PeeringWriteResponse, error) { + if err := s.Backend.EnterpriseCheckPartitions(req.Peering.Partition); err != nil { + return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) + } + + var resp *pbpeering.PeeringWriteResponse + handled, err := s.Backend.Forward(req, func(conn *grpc.ClientConn) error { + var err error + resp, err = pbpeering.NewPeeringServiceClient(conn).PeeringWrite(ctx, req) + return err + }) + if handled || err != nil { + return resp, err + } + + defer metrics.MeasureSince([]string{"peering", "write"}, time.Now()) + // TODO(peering): ACL check request token + + // TODO(peering): handle blocking queries + err = s.Backend.Apply().PeeringWrite(req) + if err != nil { + return nil, err + } + return &pbpeering.PeeringWriteResponse{}, nil +} + +func (s *Service) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDeleteRequest) (*pbpeering.PeeringDeleteResponse, error) { + if err := s.Backend.EnterpriseCheckPartitions(req.Partition); err != nil { + return nil, grpcstatus.Error(codes.InvalidArgument, err.Error()) + } + + var resp *pbpeering.PeeringDeleteResponse + handled, err := s.Backend.Forward(req, func(conn *grpc.ClientConn) error { + var err error + resp, err = pbpeering.NewPeeringServiceClient(conn).PeeringDelete(ctx, req) + return err + }) + if handled || err != nil { + return resp, err + } + + defer metrics.MeasureSince([]string{"peering", "delete"}, time.Now()) + // TODO(peering): ACL check request token + + // TODO(peering): handle blocking queries + err = s.Backend.Apply().PeeringDelete(req) + if err != nil { + return nil, err + } + return &pbpeering.PeeringDeleteResponse{}, nil +} + +type BidirectionalStream interface { + Send(*pbpeering.ReplicationMessage) error + Recv() (*pbpeering.ReplicationMessage, error) + Context() context.Context +} + +// StreamResources handles incoming streaming connections. +func (s *Service) StreamResources(stream pbpeering.PeeringService_StreamResourcesServer) error { + // Initial message on a new stream must be a new subscription request. + first, err := stream.Recv() + if err != nil { + s.logger.Error("failed to establish stream", "error", err) + return err + } + + // TODO(peering) Make request contain a list of resources, so that roots and services can be + // subscribed to with a single request. See: + // https://github.com/envoyproxy/data-plane-api/blob/main/envoy/service/discovery/v3/discovery.proto#L46 + req := first.GetRequest() + if req == nil { + return grpcstatus.Error(codes.InvalidArgument, "first message when initiating a peering must be a subscription request") + } + s.logger.Trace("received initial replication request from peer") + logTraceRecv(s.logger, req) + + if req.PeerID == "" { + return grpcstatus.Error(codes.InvalidArgument, "initial subscription request must specify a PeerID") + } + if req.Nonce != "" { + return grpcstatus.Error(codes.InvalidArgument, "initial subscription request must not contain a nonce") + } + if req.ResourceURL != pbpeering.TypeURLService { + return grpcstatus.Error(codes.InvalidArgument, fmt.Sprintf("subscription request to unknown resource URL: %s", req.ResourceURL)) + } + + // TODO(peering): Validate that a peering exists for this peer + // TODO(peering): If the peering is marked as deleted, send a Terminated message and return + // TODO(peering): Store subscription request so that an event publisher can separately handle pushing messages for it + s.logger.Info("accepted initial replication request from peer", "peer_id", req.PeerID) + + // For server peers both of these ID values are the same, because we generated a token with a local ID, + // and the client peer dials using that same ID. + return s.HandleStream(req.PeerID, req.PeerID, stream) +} + +// The localID provided is the locally-generated identifier for the peering. +// The remoteID is an identifier that the remote peer recognizes for the peering. +func (s *Service) HandleStream(localID, remoteID string, stream BidirectionalStream) error { + logger := s.logger.Named("stream").With("peer_id", localID) + logger.Trace("handling stream for peer") + + status, err := s.streams.connected(localID) + if err != nil { + return fmt.Errorf("failed to register stream: %v", err) + } + + // TODO(peering) Also need to clear subscriptions associated with the peer + defer s.streams.disconnected(localID) + + mgr := newSubscriptionManager(stream.Context(), logger, s.Backend) + subCh := mgr.subscribe(stream.Context(), localID) + + sub := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + PeerID: remoteID, + }, + }, + } + logTraceSend(logger, sub) + + if err := stream.Send(sub); err != nil { + if err == io.EOF { + logger.Info("stream ended by peer") + status.trackReceiveError(err.Error()) + return nil + } + // TODO(peering) Test error handling in calls to Send/Recv + status.trackSendError(err.Error()) + return fmt.Errorf("failed to send to stream: %v", err) + } + + // TODO(peering): Should this be buffered? + recvChan := make(chan *pbpeering.ReplicationMessage) + go func() { + defer close(recvChan) + for { + msg, err := stream.Recv() + if err == io.EOF { + logger.Info("stream ended by peer") + status.trackReceiveError(err.Error()) + return + } + if e, ok := grpcstatus.FromError(err); ok { + // Cancelling the stream is not an error, that means we or our peer intended to terminate the peering. + if e.Code() == codes.Canceled { + return + } + } + if err != nil { + logger.Error("failed to receive from stream", "error", err) + status.trackReceiveError(err.Error()) + return + } + + logTraceRecv(logger, msg) + recvChan <- msg + } + }() + + for { + select { + // When the doneCh is closed that means that the peering was deleted locally. + case <-status.doneCh: + logger.Info("ending stream") + + term := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Terminated_{ + Terminated: &pbpeering.ReplicationMessage_Terminated{}, + }, + } + logTraceSend(logger, term) + + if err := stream.Send(term); err != nil { + status.trackSendError(err.Error()) + return fmt.Errorf("failed to send to stream: %v", err) + } + + logger.Trace("deleting stream status") + s.streams.deleteStatus(localID) + + return nil + + case msg, open := <-recvChan: + if !open { + // No longer receiving data on the stream. + return nil + } + + if req := msg.GetRequest(); req != nil { + switch { + case req.Nonce == "": + // TODO(peering): This can happen on a client peer since they don't try to receive subscriptions before entering HandleStream. + // Should change that behavior or only allow it that one time. + + case req.Error != nil && (req.Error.Code != int32(code.Code_OK) || req.Error.Message != ""): + logger.Warn("client peer was unable to apply resource", "code", req.Error.Code, "error", req.Error.Message) + status.trackNack(fmt.Sprintf("client peer was unable to apply resource: %s", req.Error.Message)) + + default: + status.trackAck() + } + + continue + } + + if resp := msg.GetResponse(); resp != nil { + req, err := processResponse(resp) + if err != nil { + logger.Error("failed to persist resource", "resourceURL", resp.ResourceURL, "resourceID", resp.ResourceID) + status.trackReceiveError(err.Error()) + } else { + status.trackReceiveSuccess() + } + + logTraceSend(logger, req) + if err := stream.Send(req); err != nil { + status.trackSendError(err.Error()) + return fmt.Errorf("failed to send to stream: %v", err) + } + + continue + } + + if term := msg.GetTerminated(); term != nil { + logger.Info("received peering termination message, cleaning up imported resources") + + // Once marked as terminated, a separate deferred deletion routine will clean up imported resources. + if err := s.Backend.Apply().PeeringTerminateByID(&pbpeering.PeeringTerminateByIDRequest{ID: localID}); err != nil { + return err + } + return nil + } + + case update := <-subCh: + switch { + case strings.HasPrefix(update.CorrelationID, subExportedService): + if err := pushServiceResponse(logger, stream, status, update); err != nil { + return fmt.Errorf("failed to push data for %q: %w", update.CorrelationID, err) + } + + default: + logger.Warn("unrecognized update type from subscription manager: " + update.CorrelationID) + continue + } + } + } +} + +// pushService response handles sending exported service instance updates to the peer cluster. +// Each cache.UpdateEvent will contain all instances for a service name. +// If there are no instances in the event, we consider that to be a de-registration. +func pushServiceResponse(logger hclog.Logger, stream BidirectionalStream, status *lockableStreamStatus, update cache.UpdateEvent) error { + csn, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + if !ok { + logger.Error(fmt.Sprintf("invalid type for response: %T, expected *pbservice.IndexedCheckServiceNodes", update.Result)) + + // Skip this update to avoid locking up peering due to a bad service update. + return nil + } + serviceName := strings.TrimPrefix(update.CorrelationID, subExportedService) + + // If no nodes are present then it's due to one of: + // 1. The service is newly registered or exported and yielded a transient empty update. + // 2. All instances of the service were de-registered. + // 3. The service was un-exported. + // + // We don't distinguish when these three things occurred, but it's safe to send a DELETE Op in all cases, so we do that. + // Case #1 is a no-op for the importing peer. + if len(csn.Nodes) == 0 { + resp := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Response_{ + Response: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + // TODO(peering): Nonce management + Nonce: "", + ResourceID: serviceName, + Operation: pbpeering.ReplicationMessage_Response_DELETE, + }, + }, + } + logTraceSend(logger, resp) + if err := stream.Send(resp); err != nil { + status.trackSendError(err.Error()) + return fmt.Errorf("failed to send to stream: %v", err) + } + return nil + } + + // If there are nodes in the response, we push them as an UPSERT operation. + any, err := ptypes.MarshalAny(csn) + if err != nil { + // Log the error and skip this response to avoid locking up peering due to a bad update event. + logger.Error("failed to marshal service endpoints", "error", err) + return nil + } + resp := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Response_{ + Response: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + // TODO(peering): Nonce management + Nonce: "", + ResourceID: serviceName, + Operation: pbpeering.ReplicationMessage_Response_UPSERT, + Resource: any, + }, + }, + } + logTraceSend(logger, resp) + if err := stream.Send(resp); err != nil { + status.trackSendError(err.Error()) + return fmt.Errorf("failed to send to stream: %v", err) + } + return nil +} + +func (s *Service) StreamStatus(peer string) (resp StreamStatus, found bool) { + return s.streams.streamStatus(peer) +} + +// ConnectedStreams returns a map of connected stream IDs to the corresponding channel for tearing them down. +func (s *Service) ConnectedStreams() map[string]chan struct{} { + return s.streams.connectedStreams() +} + +func makeReply(resourceURL, nonce string, errCode code.Code, errMsg string) *pbpeering.ReplicationMessage { + var rpcErr *pbstatus.Status + if errCode != code.Code_OK || errMsg != "" { + rpcErr = &pbstatus.Status{ + Code: int32(errCode), + Message: errMsg, + } + } + + msg := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: resourceURL, + Nonce: nonce, + Error: rpcErr, + }, + }, + } + return msg +} + +func processResponse(resp *pbpeering.ReplicationMessage_Response) (*pbpeering.ReplicationMessage, error) { + var ( + err error + errCode code.Code + errMsg string + ) + + if resp.ResourceURL != pbpeering.TypeURLService { + errCode = code.Code_INVALID_ARGUMENT + err = fmt.Errorf("received response for unknown resource type %q", resp.ResourceURL) + return makeReply(resp.ResourceURL, resp.Nonce, errCode, err.Error()), err + } + + switch resp.Operation { + case pbpeering.ReplicationMessage_Response_UPSERT: + err = handleUpsert(resp.ResourceURL, resp.Resource) + if err != nil { + errCode = code.Code_INTERNAL + errMsg = err.Error() + } + + case pbpeering.ReplicationMessage_Response_DELETE: + err = handleDelete(resp.ResourceURL, resp.ResourceID) + if err != nil { + errCode = code.Code_INTERNAL + errMsg = err.Error() + } + + default: + errCode = code.Code_INVALID_ARGUMENT + + op := pbpeering.ReplicationMessage_Response_Operation_name[int32(resp.Operation)] + if op == "" { + op = strconv.FormatInt(int64(resp.Operation), 10) + } + errMsg = fmt.Sprintf("unsupported operation: %q", op) + + err = errors.New(errMsg) + } + + return makeReply(resp.ResourceURL, resp.Nonce, errCode, errMsg), err +} + +func handleUpsert(resourceURL string, resource *anypb.Any) error { + // TODO(peering): implement + return nil +} + +func handleDelete(resourceURL string, resourceID string) error { + // TODO(peering): implement + return nil +} + +func logTraceRecv(logger hclog.Logger, pb proto.Message) { + logTraceProto(logger, pb, true) +} + +func logTraceSend(logger hclog.Logger, pb proto.Message) { + logTraceProto(logger, pb, false) +} + +func logTraceProto(logger hclog.Logger, pb proto.Message, received bool) { + if !logger.IsTrace() { + return + } + + dir := "sent" + if received { + dir = "received" + } + + m := jsonpb.Marshaler{ + Indent: " ", + } + out, err := m.MarshalToString(pb) + if err != nil { + out = "" + } + + logger.Trace("replication message", "direction", dir, "protobuf", out) +} diff --git a/agent/rpc/peering/service_oss_test.go b/agent/rpc/peering/service_oss_test.go new file mode 100644 index 0000000000..8c7633639a --- /dev/null +++ b/agent/rpc/peering/service_oss_test.go @@ -0,0 +1,39 @@ +//go:build !consulent +// +build !consulent + +package peering_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/proto/pbpeering" +) + +func TestPeeringService_RejectsPartition(t *testing.T) { + s := newTestServer(t, nil) + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + t.Run("read", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + req := &pbpeering.PeeringReadRequest{Name: "foo", Partition: "default"} + resp, err := client.PeeringRead(ctx, req) + require.Contains(t, err.Error(), "Partitions are a Consul Enterprise feature") + require.Nil(t, resp) + }) + + t.Run("list", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + req := &pbpeering.PeeringListRequest{Partition: "default"} + resp, err := client.PeeringList(ctx, req) + require.Contains(t, err.Error(), "Partitions are a Consul Enterprise feature") + require.Nil(t, resp) + }) +} diff --git a/agent/rpc/peering/service_test.go b/agent/rpc/peering/service_test.go new file mode 100644 index 0000000000..a90df37f19 --- /dev/null +++ b/agent/rpc/peering/service_test.go @@ -0,0 +1,414 @@ +package peering_test + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "path" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + gogrpc "google.golang.org/grpc" + + grpc "github.com/hashicorp/consul/agent/grpc/private" + "github.com/hashicorp/consul/agent/grpc/private/resolver" + "github.com/hashicorp/consul/proto/prototest" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul" + "github.com/hashicorp/consul/agent/pool" + "github.com/hashicorp/consul/agent/router" + "github.com/hashicorp/consul/agent/rpc/middleware" + "github.com/hashicorp/consul/agent/rpc/peering" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/token" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/testrpc" + "github.com/hashicorp/consul/tlsutil" + "github.com/hashicorp/consul/types" +) + +func TestPeeringService_GenerateToken(t *testing.T) { + dir := testutil.TempDir(t, "consul") + signer, _, _ := tlsutil.GeneratePrivateKey() + ca, _, _ := tlsutil.GenerateCA(tlsutil.CAOpts{Signer: signer}) + cafile := path.Join(dir, "cacert.pem") + require.NoError(t, ioutil.WriteFile(cafile, []byte(ca), 0600)) + + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, func(c *consul.Config) { + c.SerfLANConfig.MemberlistConfig.AdvertiseAddr = "127.0.0.1" + c.TLSConfig.InternalRPC.CAFile = cafile + c.DataDir = dir + }) + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + req := pbpeering.GenerateTokenRequest{PeerName: "peerB", Datacenter: "dc1"} + resp, err := client.GenerateToken(ctx, &req) + require.NoError(t, err) + + tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken) + require.NoError(t, err) + + token := &structs.PeeringToken{} + require.NoError(t, json.Unmarshal(tokenJSON, token)) + require.Equal(t, "server.dc1.consul", token.ServerName) + require.Len(t, token.ServerAddresses, 1) + require.Equal(t, "127.0.0.1:2345", token.ServerAddresses[0]) + require.Equal(t, []string{ca}, token.CA) + + require.NotEmpty(t, token.PeerID) + _, err = uuid.ParseUUID(token.PeerID) + require.NoError(t, err) + + _, peers, err := s.Server.FSM().State().PeeringList(nil, *structs.DefaultEnterpriseMetaInDefaultPartition()) + require.NoError(t, err) + require.Len(t, peers, 1) + + peers[0].ModifyIndex = 0 + peers[0].CreateIndex = 0 + + expect := &pbpeering.Peering{ + Name: "peerB", + Partition: acl.DefaultPartitionName, + ID: token.PeerID, + State: pbpeering.PeeringState_INITIAL, + } + require.Equal(t, expect, peers[0]) +} + +func TestPeeringService_Initiate(t *testing.T) { + validToken := peering.TestPeeringToken("83474a06-cca4-4ff4-99a4-4152929c8160") + validTokenJSON, _ := json.Marshal(&validToken) + validTokenB64 := base64.StdEncoding.EncodeToString(validTokenJSON) + + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, nil) + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + type testcase struct { + name string + req *pbpeering.InitiateRequest + expectResp *pbpeering.InitiateResponse + expectPeering *pbpeering.Peering + expectErr string + } + run := func(t *testing.T, tc testcase) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + resp, err := client.Initiate(ctx, tc.req) + if tc.expectErr != "" { + require.Contains(t, err.Error(), tc.expectErr) + return + } + require.NoError(t, err) + prototest.AssertDeepEqual(t, tc.expectResp, resp) + + // if a peering was expected to be written, try to read it back + if tc.expectPeering != nil { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + resp, err := client.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: tc.expectPeering.Name}) + require.NoError(t, err) + // check individual values we care about since we don't know exactly + // what the create/modify indexes will be + require.Equal(t, tc.expectPeering.Name, resp.Peering.Name) + require.Equal(t, tc.expectPeering.Partition, resp.Peering.Partition) + require.Equal(t, tc.expectPeering.State, resp.Peering.State) + require.Equal(t, tc.expectPeering.PeerCAPems, resp.Peering.PeerCAPems) + require.Equal(t, tc.expectPeering.PeerServerAddresses, resp.Peering.PeerServerAddresses) + require.Equal(t, tc.expectPeering.PeerServerName, resp.Peering.PeerServerName) + } + } + tcs := []testcase{ + { + name: "invalid peer name", + req: &pbpeering.InitiateRequest{PeerName: "--AA--"}, + expectErr: "--AA-- is not a valid peer name", + }, + { + name: "invalid token (base64)", + req: &pbpeering.InitiateRequest{ + PeerName: "peer1-usw1", + PeeringToken: "+++/+++", + }, + expectErr: "illegal base64 data", + }, + { + name: "invalid token (JSON)", + req: &pbpeering.InitiateRequest{ + PeerName: "peer1-usw1", + PeeringToken: "Cg==", // base64 of "-" + }, + expectErr: "unexpected end of JSON input", + }, + { + name: "invalid token (empty)", + req: &pbpeering.InitiateRequest{ + PeerName: "peer1-usw1", + PeeringToken: "e30K", // base64 of "{}" + }, + expectErr: "peering token CA value is empty", + }, + { + name: "success", + req: &pbpeering.InitiateRequest{ + PeerName: "peer1-usw1", + PeeringToken: validTokenB64, + }, + expectResp: &pbpeering.InitiateResponse{}, + expectPeering: peering.TestPeering( + "peer1-usw1", + pbpeering.PeeringState_INITIAL, + ), + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} +func TestPeeringService_Read(t *testing.T) { + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, nil) + + // insert peering directly to state store + p := &pbpeering.Peering{ + Name: "foo", + State: pbpeering.PeeringState_INITIAL, + PeerCAPems: nil, + PeerServerName: "test", + PeerServerAddresses: []string{"addr1"}, + } + err := s.Server.FSM().State().PeeringWrite(10, p) + require.NoError(t, err) + + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + type testcase struct { + name string + req *pbpeering.PeeringReadRequest + expect *pbpeering.PeeringReadResponse + expectErr string + } + run := func(t *testing.T, tc testcase) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + resp, err := client.PeeringRead(ctx, tc.req) + if tc.expectErr != "" { + require.Contains(t, err.Error(), tc.expectErr) + return + } + require.NoError(t, err) + prototest.AssertDeepEqual(t, tc.expect, resp) + } + tcs := []testcase{ + { + name: "returns foo", + req: &pbpeering.PeeringReadRequest{Name: "foo"}, + expect: &pbpeering.PeeringReadResponse{Peering: p}, + expectErr: "", + }, + { + name: "bar not found", + req: &pbpeering.PeeringReadRequest{Name: "bar"}, + expect: &pbpeering.PeeringReadResponse{}, + expectErr: "", + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} + +func TestPeeringService_List(t *testing.T) { + // TODO(peering): see note on newTestServer, refactor to not use this + s := newTestServer(t, nil) + + // Insert peerings directly to state store. + // Note that the state store holds reference to the underlying + // variables; do not modify them after writing. + foo := &pbpeering.Peering{ + Name: "foo", + State: pbpeering.PeeringState_INITIAL, + PeerCAPems: nil, + PeerServerName: "fooservername", + PeerServerAddresses: []string{"addr1"}, + } + require.NoError(t, s.Server.FSM().State().PeeringWrite(10, foo)) + bar := &pbpeering.Peering{ + Name: "bar", + State: pbpeering.PeeringState_ACTIVE, + PeerCAPems: nil, + PeerServerName: "barservername", + PeerServerAddresses: []string{"addr1"}, + } + require.NoError(t, s.Server.FSM().State().PeeringWrite(15, bar)) + + client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + resp, err := client.PeeringList(ctx, &pbpeering.PeeringListRequest{}) + require.NoError(t, err) + + expect := &pbpeering.PeeringListResponse{ + Peerings: []*pbpeering.Peering{bar, foo}, + } + prototest.AssertDeepEqual(t, expect, resp) +} + +// newTestServer is copied from partition/service_test.go, with the addition of certs/cas. +// TODO(peering): these are endpoint tests and should live in the agent/consul +// package. Instead, these can be written around a mock client (see testing.go) +// and a mock backend (future) +func newTestServer(t *testing.T, cb func(conf *consul.Config)) testingServer { + t.Helper() + conf := consul.DefaultConfig() + dir := testutil.TempDir(t, "consul") + + conf.Bootstrap = true + conf.Datacenter = "dc1" + conf.DataDir = dir + conf.RPCAddr = &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: 2345} + conf.RaftConfig.ElectionTimeout = 200 * time.Millisecond + conf.RaftConfig.LeaderLeaseTimeout = 100 * time.Millisecond + conf.RaftConfig.HeartbeatTimeout = 200 * time.Millisecond + conf.TLSConfig.Domain = "consul" + + nodeID, err := uuid.GenerateUUID() + if err != nil { + t.Fatal(err) + } + conf.NodeID = types.NodeID(nodeID) + + if cb != nil { + cb(conf) + } + + // Apply config to copied fields because many tests only set the old + // values. + conf.ACLResolverSettings.ACLsEnabled = conf.ACLsEnabled + conf.ACLResolverSettings.NodeName = conf.NodeName + conf.ACLResolverSettings.Datacenter = conf.Datacenter + conf.ACLResolverSettings.EnterpriseMeta = *conf.AgentEnterpriseMeta() + + deps := newDefaultDeps(t, conf) + server, err := consul.NewServer(conf, deps, gogrpc.NewServer()) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, server.Shutdown()) + }) + + testrpc.WaitForLeader(t, server.RPC, conf.Datacenter) + + backend := consul.NewPeeringBackend(server, deps.GRPCConnPool) + handler := &peering.Service{Backend: backend} + + grpcServer := gogrpc.NewServer() + pbpeering.RegisterPeeringServiceServer(grpcServer, handler) + + lis, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + t.Cleanup(func() { lis.Close() }) + + g := new(errgroup.Group) + g.Go(func() error { + return grpcServer.Serve(lis) + }) + t.Cleanup(func() { + if grpcServer.Stop(); err != nil { + t.Logf("grpc server shutdown: %v", err) + } + if err := g.Wait(); err != nil { + t.Logf("grpc server error: %v", err) + } + }) + + return testingServer{ + Server: server, + Backend: backend, + Addr: lis.Addr(), + } +} + +func (s testingServer) ClientConn(t *testing.T) *gogrpc.ClientConn { + t.Helper() + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + t.Cleanup(cancel) + + conn, err := gogrpc.DialContext(ctx, s.Addr.String(), gogrpc.WithInsecure()) + require.NoError(t, err) + t.Cleanup(func() { conn.Close() }) + return conn +} + +type testingServer struct { + Server *consul.Server + Addr net.Addr + Backend peering.Backend +} + +// TODO(peering): remove duplication between this and agent/consul tests +func newDefaultDeps(t *testing.T, c *consul.Config) consul.Deps { + t.Helper() + + logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ + Name: c.NodeName, + Level: hclog.Debug, + Output: testutil.NewLogBuffer(t), + }) + + tls, err := tlsutil.NewConfigurator(c.TLSConfig, logger) + require.NoError(t, err, "failed to create tls configuration") + + r := router.NewRouter(logger, c.Datacenter, fmt.Sprintf("%s.%s", c.NodeName, c.Datacenter), nil) + builder := resolver.NewServerResolverBuilder(resolver.Config{}) + resolver.Register(builder) + + connPool := &pool.ConnPool{ + Server: false, + SrcAddr: c.RPCSrcAddr, + Logger: logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true}), + MaxTime: 2 * time.Minute, + MaxStreams: 4, + TLSConfigurator: tls, + Datacenter: c.Datacenter, + } + + return consul.Deps{ + Logger: logger, + TLSConfigurator: tls, + Tokens: new(token.Store), + Router: r, + ConnPool: connPool, + GRPCConnPool: grpc.NewClientConnPool(grpc.ClientConnPoolConfig{ + Servers: builder, + TLSWrapper: grpc.TLSWrapper(tls.OutgoingRPCWrapper()), + UseTLSForDC: tls.UseTLS, + DialingFromServer: true, + DialingFromDatacenter: c.Datacenter, + }), + LeaderForwarder: builder, + EnterpriseDeps: newDefaultDepsEnterprise(t, logger, c), + NewRequestRecorderFunc: middleware.NewRequestRecorder, + GetNetRPCInterceptorFunc: middleware.GetNetRPCInterceptor, + } +} diff --git a/agent/rpc/peering/stream_test.go b/agent/rpc/peering/stream_test.go new file mode 100644 index 0000000000..65aa4c0f8b --- /dev/null +++ b/agent/rpc/peering/stream_test.go @@ -0,0 +1,810 @@ +package peering + +import ( + "context" + "io" + "testing" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/stretchr/testify/require" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/proto/pbservice" + "github.com/hashicorp/consul/proto/pbstatus" + "github.com/hashicorp/consul/proto/prototest" + "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/consul/sdk/testutil/retry" +) + +func TestStreamResources_Server_FirstRequest(t *testing.T) { + type testCase struct { + name string + input *pbpeering.ReplicationMessage + wantErr error + } + + run := func(t *testing.T, tc testCase) { + srv := NewService(testutil.Logger(t), nil) + client := newMockClient(context.Background()) + + errCh := make(chan error, 1) + client.errCh = errCh + + go func() { + // Pass errors from server handler into errCh so that they can be seen by the client on Recv(). + // This matches gRPC's behavior when an error is returned by a server. + err := srv.StreamResources(client.replicationStream) + if err != nil { + errCh <- err + } + }() + + err := client.Send(tc.input) + require.NoError(t, err) + + msg, err := client.Recv() + require.Nil(t, msg) + require.Error(t, err) + require.EqualError(t, err, tc.wantErr.Error()) + } + + tt := []testCase{ + { + name: "unexpected response", + input: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Response_{ + Response: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + ResourceID: "api-service", + Nonce: "2", + }, + }, + }, + wantErr: status.Error(codes.InvalidArgument, "first message when initiating a peering must be a subscription request"), + }, + { + name: "missing peer id", + input: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{}, + }, + }, + wantErr: status.Error(codes.InvalidArgument, "initial subscription request must specify a PeerID"), + }, + { + name: "unexpected nonce", + input: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + PeerID: "63b60245-c475-426b-b314-4588d210859d", + Nonce: "1", + }, + }, + }, + wantErr: status.Error(codes.InvalidArgument, "initial subscription request must not contain a nonce"), + }, + { + name: "unknown resource", + input: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + PeerID: "63b60245-c475-426b-b314-4588d210859d", + ResourceURL: "nomad.Job", + }, + }, + }, + wantErr: status.Error(codes.InvalidArgument, "subscription request to unknown resource URL: nomad.Job"), + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } + +} + +func TestStreamResources_Server_Terminate(t *testing.T) { + publisher := stream.NewEventPublisher(10 * time.Second) + store := newStateStore(t, publisher) + + srv := NewService(testutil.Logger(t), &testStreamBackend{ + store: store, + pub: publisher, + }) + + it := incrementalTime{ + base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + } + srv.streams.timeNow = it.Now + + client := newMockClient(context.Background()) + + errCh := make(chan error, 1) + client.errCh = errCh + + go func() { + // Pass errors from server handler into errCh so that they can be seen by the client on Recv(). + // This matches gRPC's behavior when an error is returned by a server. + if err := srv.StreamResources(client.replicationStream); err != nil { + errCh <- err + } + }() + + // Receive a subscription from a peer + peerID := "63b60245-c475-426b-b314-4588d210859d" + sub := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + PeerID: peerID, + ResourceURL: pbpeering.TypeURLService, + }, + }, + } + err := client.Send(sub) + require.NoError(t, err) + + runStep(t, "new stream gets tracked", func(t *testing.T) { + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.True(r, status.Connected) + }) + }) + + // Receive subscription to my-peer-B's resources + receivedSub, err := client.Recv() + require.NoError(t, err) + + expect := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + PeerID: peerID, + }, + }, + } + prototest.AssertDeepEqual(t, expect, receivedSub) + + runStep(t, "terminate the stream", func(t *testing.T) { + done := srv.ConnectedStreams()[peerID] + close(done) + + retry.Run(t, func(r *retry.R) { + _, ok := srv.StreamStatus(peerID) + require.False(r, ok) + }) + }) + + receivedTerm, err := client.Recv() + require.NoError(t, err) + expect = &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Terminated_{ + Terminated: &pbpeering.ReplicationMessage_Terminated{}, + }, + } + prototest.AssertDeepEqual(t, expect, receivedTerm) +} + +func TestStreamResources_Server_StreamTracker(t *testing.T) { + publisher := stream.NewEventPublisher(10 * time.Second) + store := newStateStore(t, publisher) + + srv := NewService(testutil.Logger(t), &testStreamBackend{ + store: store, + pub: publisher, + }) + + it := incrementalTime{ + base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + } + srv.streams.timeNow = it.Now + + client := newMockClient(context.Background()) + + errCh := make(chan error, 1) + go func() { + errCh <- srv.StreamResources(client.replicationStream) + }() + + peerID := "63b60245-c475-426b-b314-4588d210859d" + sub := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + PeerID: peerID, + ResourceURL: pbpeering.TypeURLService, + }, + }, + } + err := client.Send(sub) + require.NoError(t, err) + + runStep(t, "new stream gets tracked", func(t *testing.T) { + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.True(r, status.Connected) + }) + }) + + runStep(t, "client receives initial subscription", func(t *testing.T) { + ack, err := client.Recv() + require.NoError(t, err) + + expectAck := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + PeerID: peerID, + Nonce: "", + }, + }, + } + prototest.AssertDeepEqual(t, expectAck, ack) + }) + + var sequence uint64 + var lastSendSuccess time.Time + + runStep(t, "ack tracked as success", func(t *testing.T) { + ack := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + PeerID: peerID, + ResourceURL: pbpeering.TypeURLService, + Nonce: "1", + + // Acks do not have an Error populated in the request + }, + }, + } + err := client.Send(ack) + require.NoError(t, err) + sequence++ + + lastSendSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC() + + expect := StreamStatus{ + Connected: true, + LastAck: lastSendSuccess, + } + + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.Equal(r, expect, status) + }) + }) + + var lastNack time.Time + var lastNackMsg string + + runStep(t, "nack tracked as error", func(t *testing.T) { + nack := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + PeerID: peerID, + ResourceURL: pbpeering.TypeURLService, + Nonce: "2", + Error: &pbstatus.Status{ + Code: int32(code.Code_UNAVAILABLE), + Message: "bad bad not good", + }, + }, + }, + } + err := client.Send(nack) + require.NoError(t, err) + sequence++ + + lastNackMsg = "client peer was unable to apply resource: bad bad not good" + lastNack = it.base.Add(time.Duration(sequence) * time.Second).UTC() + + expect := StreamStatus{ + Connected: true, + LastAck: lastSendSuccess, + LastNack: lastNack, + LastNackMessage: lastNackMsg, + } + + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.Equal(r, expect, status) + }) + }) + + var lastRecvSuccess time.Time + + runStep(t, "response applied locally", func(t *testing.T) { + resp := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Response_{ + Response: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + ResourceID: "api", + Nonce: "21", + Operation: pbpeering.ReplicationMessage_Response_UPSERT, + }, + }, + } + err := client.Send(resp) + require.NoError(t, err) + sequence++ + + ack, err := client.Recv() + require.NoError(t, err) + + expectAck := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "21", + }, + }, + } + prototest.AssertDeepEqual(t, expectAck, ack) + + lastRecvSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC() + + expect := StreamStatus{ + Connected: true, + LastAck: lastSendSuccess, + LastNack: lastNack, + LastNackMessage: lastNackMsg, + LastReceiveSuccess: lastRecvSuccess, + } + + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.Equal(r, expect, status) + }) + }) + + var lastRecvError time.Time + var lastRecvErrorMsg string + + runStep(t, "response fails to apply locally", func(t *testing.T) { + resp := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Response_{ + Response: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + ResourceID: "web", + Nonce: "24", + + // Unknown operation gets NACKed + Operation: pbpeering.ReplicationMessage_Response_Unknown, + }, + }, + } + err := client.Send(resp) + require.NoError(t, err) + sequence++ + + ack, err := client.Recv() + require.NoError(t, err) + + expectNack := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "24", + Error: &pbstatus.Status{ + Code: int32(code.Code_INVALID_ARGUMENT), + Message: `unsupported operation: "Unknown"`, + }, + }, + }, + } + prototest.AssertDeepEqual(t, expectNack, ack) + + lastRecvError = it.base.Add(time.Duration(sequence) * time.Second).UTC() + lastRecvErrorMsg = `unsupported operation: "Unknown"` + + expect := StreamStatus{ + Connected: true, + LastAck: lastSendSuccess, + LastNack: lastNack, + LastNackMessage: lastNackMsg, + LastReceiveSuccess: lastRecvSuccess, + LastReceiveError: lastRecvError, + LastReceiveErrorMessage: lastRecvErrorMsg, + } + + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.Equal(r, expect, status) + }) + }) + + runStep(t, "client disconnect marks stream as disconnected", func(t *testing.T) { + client.Close() + + sequence++ + lastRecvError := it.base.Add(time.Duration(sequence) * time.Second).UTC() + + sequence++ + disconnectTime := it.base.Add(time.Duration(sequence) * time.Second).UTC() + + expect := StreamStatus{ + Connected: false, + LastAck: lastSendSuccess, + LastNack: lastNack, + LastNackMessage: lastNackMsg, + DisconnectTime: disconnectTime, + LastReceiveSuccess: lastRecvSuccess, + LastReceiveErrorMessage: io.EOF.Error(), + LastReceiveError: lastRecvError, + } + + retry.Run(t, func(r *retry.R) { + status, ok := srv.StreamStatus(peerID) + require.True(r, ok) + require.Equal(r, expect, status) + }) + }) + + select { + case err := <-errCh: + // Client disconnect is not an error, but should make the handler return. + require.NoError(t, err) + case <-time.After(50 * time.Millisecond): + t.Fatalf("timed out waiting for handler to finish") + } +} + +func TestStreamResources_Server_ServiceUpdates(t *testing.T) { + publisher := stream.NewEventPublisher(10 * time.Second) + store := newStateStore(t, publisher) + + // Create a peering + var lastIdx uint64 = 1 + err := store.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "my-peering", + }) + require.NoError(t, err) + + _, p, err := store.PeeringRead(nil, state.Query{Value: "my-peering"}) + require.NoError(t, err) + require.NotNil(t, p) + + srv := NewService(testutil.Logger(t), &testStreamBackend{ + store: store, + pub: publisher, + }) + + client := newMockClient(context.Background()) + + errCh := make(chan error, 1) + client.errCh = errCh + + go func() { + // Pass errors from server handler into errCh so that they can be seen by the client on Recv(). + // This matches gRPC's behavior when an error is returned by a server. + if err := srv.StreamResources(client.replicationStream); err != nil { + errCh <- err + } + }() + + // Issue a services subscription to server + init := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + PeerID: p.ID, + ResourceURL: pbpeering.TypeURLService, + }, + }, + } + require.NoError(t, client.Send(init)) + + // Receive a services subscription from server + receivedSub, err := client.Recv() + require.NoError(t, err) + + expect := &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + PeerID: p.ID, + }, + }, + } + prototest.AssertDeepEqual(t, expect, receivedSub) + + // Register a service that is not yet exported + mysql := &structs.CheckServiceNode{ + Node: &structs.Node{Node: "foo", Address: "10.0.0.1"}, + Service: &structs.NodeService{ID: "mysql-1", Service: "mysql", Port: 5000}, + } + + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, mysql.Node)) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "foo", mysql.Service)) + + runStep(t, "exporting mysql leads to an UPSERT event", func(t *testing.T) { + entry := &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "mysql", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + { + // Mongo does not get pushed because it does not have instances registered. + Name: "mongo", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + }, + } + lastIdx++ + err = store.EnsureConfigEntry(lastIdx, entry) + require.NoError(t, err) + + retry.Run(t, func(r *retry.R) { + msg, err := client.RecvWithTimeout(100 * time.Millisecond) + require.NoError(r, err) + require.Equal(r, pbpeering.ReplicationMessage_Response_UPSERT, msg.GetResponse().Operation) + require.Equal(r, mysql.Service.CompoundServiceName().String(), msg.GetResponse().ResourceID) + + var nodes pbservice.IndexedCheckServiceNodes + require.NoError(r, ptypes.UnmarshalAny(msg.GetResponse().Resource, &nodes)) + require.Len(r, nodes.Nodes, 1) + }) + }) + + mongo := &structs.CheckServiceNode{ + Node: &structs.Node{Node: "zip", Address: "10.0.0.3"}, + Service: &structs.NodeService{ID: "mongo-1", Service: "mongo", Port: 5000}, + } + + runStep(t, "registering mongo instance leads to an UPSERT event", func(t *testing.T) { + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, mongo.Node)) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "zip", mongo.Service)) + + retry.Run(t, func(r *retry.R) { + msg, err := client.RecvWithTimeout(100 * time.Millisecond) + require.NoError(r, err) + require.Equal(r, pbpeering.ReplicationMessage_Response_UPSERT, msg.GetResponse().Operation) + require.Equal(r, mongo.Service.CompoundServiceName().String(), msg.GetResponse().ResourceID) + + var nodes pbservice.IndexedCheckServiceNodes + require.NoError(r, ptypes.UnmarshalAny(msg.GetResponse().Resource, &nodes)) + require.Len(r, nodes.Nodes, 1) + }) + }) + + runStep(t, "un-exporting mysql leads to a DELETE event for mysql", func(t *testing.T) { + entry := &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "mongo", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + }, + } + lastIdx++ + err = store.EnsureConfigEntry(lastIdx, entry) + require.NoError(t, err) + + retry.Run(t, func(r *retry.R) { + msg, err := client.RecvWithTimeout(100 * time.Millisecond) + require.NoError(r, err) + require.Equal(r, pbpeering.ReplicationMessage_Response_DELETE, msg.GetResponse().Operation) + require.Equal(r, mysql.Service.CompoundServiceName().String(), msg.GetResponse().ResourceID) + require.Nil(r, msg.GetResponse().Resource) + }) + }) + + runStep(t, "deleting the config entry leads to a DELETE event for mongo", func(t *testing.T) { + lastIdx++ + err = store.DeleteConfigEntry(lastIdx, structs.ExportedServices, "default", nil) + require.NoError(t, err) + + retry.Run(t, func(r *retry.R) { + msg, err := client.RecvWithTimeout(100 * time.Millisecond) + require.NoError(r, err) + require.Equal(r, pbpeering.ReplicationMessage_Response_DELETE, msg.GetResponse().Operation) + require.Equal(r, mongo.Service.CompoundServiceName().String(), msg.GetResponse().ResourceID) + require.Nil(r, msg.GetResponse().Resource) + }) + }) +} + +type testStreamBackend struct { + pub state.EventPublisher + store *state.Store +} + +func (b *testStreamBackend) Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) { + return b.pub.Subscribe(req) +} + +func (b *testStreamBackend) Store() Store { + return b.store +} + +func (b *testStreamBackend) Forward(info structs.RPCInfo, f func(conn *grpc.ClientConn) error) (handled bool, err error) { + return true, nil +} + +func (b *testStreamBackend) GetAgentCACertificates() ([]string, error) { + return []string{}, nil +} + +func (b *testStreamBackend) GetServerAddresses() ([]string, error) { + return []string{}, nil +} + +func (b *testStreamBackend) GetServerName() string { + return "" +} + +func (b *testStreamBackend) EncodeToken(tok *structs.PeeringToken) ([]byte, error) { + return nil, nil +} + +func (b *testStreamBackend) DecodeToken([]byte) (*structs.PeeringToken, error) { + return nil, nil +} + +func (b *testStreamBackend) EnterpriseCheckPartitions(partition string) error { + return nil +} + +func (b *testStreamBackend) Apply() Apply { + return nil +} + +func Test_processResponse(t *testing.T) { + type testCase struct { + name string + in *pbpeering.ReplicationMessage_Response + expect *pbpeering.ReplicationMessage + wantErr bool + } + + run := func(t *testing.T, tc testCase) { + reply, err := processResponse(tc.in) + if tc.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + require.Equal(t, tc.expect, reply) + } + + tt := []testCase{ + { + name: "valid upsert", + in: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + ResourceID: "api", + Nonce: "1", + Operation: pbpeering.ReplicationMessage_Response_UPSERT, + }, + expect: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "1", + }, + }, + }, + wantErr: false, + }, + { + name: "valid delete", + in: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + ResourceID: "api", + Nonce: "1", + Operation: pbpeering.ReplicationMessage_Response_DELETE, + }, + expect: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "1", + }, + }, + }, + wantErr: false, + }, + { + name: "invalid resource url", + in: &pbpeering.ReplicationMessage_Response{ + ResourceURL: "nomad.Job", + Nonce: "1", + Operation: pbpeering.ReplicationMessage_Response_Unknown, + }, + expect: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: "nomad.Job", + Nonce: "1", + Error: &pbstatus.Status{ + Code: int32(code.Code_INVALID_ARGUMENT), + Message: `received response for unknown resource type "nomad.Job"`, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "unknown operation", + in: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "1", + Operation: pbpeering.ReplicationMessage_Response_Unknown, + }, + expect: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "1", + Error: &pbstatus.Status{ + Code: int32(code.Code_INVALID_ARGUMENT), + Message: `unsupported operation: "Unknown"`, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "out of range operation", + in: &pbpeering.ReplicationMessage_Response{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "1", + Operation: pbpeering.ReplicationMessage_Response_Operation(100000), + }, + expect: &pbpeering.ReplicationMessage{ + Payload: &pbpeering.ReplicationMessage_Request_{ + Request: &pbpeering.ReplicationMessage_Request{ + ResourceURL: pbpeering.TypeURLService, + Nonce: "1", + Error: &pbstatus.Status{ + Code: int32(code.Code_INVALID_ARGUMENT), + Message: `unsupported operation: "100000"`, + }, + }, + }, + }, + wantErr: true, + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} diff --git a/agent/rpc/peering/stream_tracker.go b/agent/rpc/peering/stream_tracker.go new file mode 100644 index 0000000000..af2cbe1c28 --- /dev/null +++ b/agent/rpc/peering/stream_tracker.go @@ -0,0 +1,212 @@ +package peering + +import ( + "fmt" + "sync" + "time" +) + +// streamTracker contains a map of (PeerID -> StreamStatus). +// As streams are opened and closed we track details about their status. +type streamTracker struct { + mu sync.RWMutex + streams map[string]*lockableStreamStatus + + // timeNow is a shim for testing. + timeNow func() time.Time +} + +func newStreamTracker() *streamTracker { + return &streamTracker{ + streams: make(map[string]*lockableStreamStatus), + timeNow: time.Now, + } +} + +// connected registers a stream for a given peer, and marks it as connected. +// It also enforces that there is only one active stream for a peer. +func (t *streamTracker) connected(id string) (*lockableStreamStatus, error) { + t.mu.Lock() + defer t.mu.Unlock() + + status, ok := t.streams[id] + if !ok { + status = newLockableStreamStatus(t.timeNow) + t.streams[id] = status + return status, nil + } + + if status.connected() { + return nil, fmt.Errorf("there is an active stream for the given PeerID %q", id) + } + status.trackConnected() + + return status, nil +} + +// disconnected ensures that if a peer id's stream status is tracked, it is marked as disconnected. +func (t *streamTracker) disconnected(id string) { + t.mu.Lock() + defer t.mu.Unlock() + + if status, ok := t.streams[id]; ok { + status.trackDisconnected() + } +} + +func (t *streamTracker) streamStatus(id string) (resp StreamStatus, found bool) { + t.mu.RLock() + defer t.mu.RUnlock() + + s, ok := t.streams[id] + if !ok { + return StreamStatus{}, false + } + return s.status(), true +} + +func (t *streamTracker) connectedStreams() map[string]chan struct{} { + t.mu.RLock() + defer t.mu.RUnlock() + + resp := make(map[string]chan struct{}) + for peer, status := range t.streams { + if status.connected() { + resp[peer] = status.doneCh + } + } + return resp +} + +func (t *streamTracker) deleteStatus(id string) { + t.mu.Lock() + defer t.mu.Unlock() + + delete(t.streams, id) +} + +type lockableStreamStatus struct { + mu sync.RWMutex + + // timeNow is a shim for testing. + timeNow func() time.Time + + // doneCh allows for shutting down a stream gracefully by sending a termination message + // to the peer before the stream's context is cancelled. + doneCh chan struct{} + + StreamStatus +} + +// StreamStatus contains information about the replication stream to a peer cluster. +// TODO(peering): There's a lot of fields here... +type StreamStatus struct { + // Connected is true when there is an open stream for the peer. + Connected bool + + // If the status is not connected, DisconnectTime tracks when the stream was closed. Else it's zero. + DisconnectTime time.Time + + // LastAck tracks the time we received the last ACK for a resource replicated TO the peer. + LastAck time.Time + + // LastNack tracks the time we received the last NACK for a resource replicated to the peer. + LastNack time.Time + + // LastNackMessage tracks the reported error message associated with the last NACK from a peer. + LastNackMessage string + + // LastSendError tracks the time of the last error sending into the stream. + LastSendError time.Time + + // LastSendErrorMessage tracks the last error message when sending into the stream. + LastSendErrorMessage string + + // LastReceiveSuccess tracks the time we last successfully stored a resource replicated FROM the peer. + LastReceiveSuccess time.Time + + // LastReceiveError tracks either: + // - The time we failed to store a resource replicated FROM the peer. + // - The time of the last error when receiving from the stream. + LastReceiveError time.Time + + // LastReceiveError tracks either: + // - The error message when we failed to store a resource replicated FROM the peer. + // - The last error message when receiving from the stream. + LastReceiveErrorMessage string +} + +func newLockableStreamStatus(now func() time.Time) *lockableStreamStatus { + return &lockableStreamStatus{ + StreamStatus: StreamStatus{ + Connected: true, + }, + timeNow: now, + doneCh: make(chan struct{}), + } +} + +func (s *lockableStreamStatus) trackAck() { + s.mu.Lock() + s.LastAck = s.timeNow().UTC() + s.mu.Unlock() +} + +func (s *lockableStreamStatus) trackSendError(error string) { + s.mu.Lock() + s.LastSendError = s.timeNow().UTC() + s.LastSendErrorMessage = error + s.mu.Unlock() +} + +func (s *lockableStreamStatus) trackReceiveSuccess() { + s.mu.Lock() + s.LastReceiveSuccess = s.timeNow().UTC() + s.mu.Unlock() +} + +func (s *lockableStreamStatus) trackReceiveError(error string) { + s.mu.Lock() + s.LastReceiveError = s.timeNow().UTC() + s.LastReceiveErrorMessage = error + s.mu.Unlock() +} + +func (s *lockableStreamStatus) trackNack(msg string) { + s.mu.Lock() + s.LastNack = s.timeNow().UTC() + s.LastNackMessage = msg + s.mu.Unlock() +} + +func (s *lockableStreamStatus) trackConnected() { + s.mu.Lock() + s.Connected = true + s.DisconnectTime = time.Time{} + s.mu.Unlock() +} + +func (s *lockableStreamStatus) trackDisconnected() { + s.mu.Lock() + s.Connected = false + s.DisconnectTime = s.timeNow().UTC() + s.mu.Unlock() +} + +func (s *lockableStreamStatus) connected() bool { + var resp bool + + s.mu.RLock() + resp = s.Connected + s.mu.RUnlock() + + return resp +} + +func (s *lockableStreamStatus) status() StreamStatus { + s.mu.RLock() + copy := s.StreamStatus + s.mu.RUnlock() + + return copy +} diff --git a/agent/rpc/peering/stream_tracker_test.go b/agent/rpc/peering/stream_tracker_test.go new file mode 100644 index 0000000000..2c055865b4 --- /dev/null +++ b/agent/rpc/peering/stream_tracker_test.go @@ -0,0 +1,162 @@ +package peering + +import ( + "sort" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestStreamTracker_EnsureConnectedDisconnected(t *testing.T) { + tracker := newStreamTracker() + peerID := "63b60245-c475-426b-b314-4588d210859d" + + it := incrementalTime{ + base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + } + tracker.timeNow = it.Now + + var ( + statusPtr *lockableStreamStatus + err error + ) + + runStep(t, "new stream", func(t *testing.T) { + statusPtr, err = tracker.connected(peerID) + require.NoError(t, err) + + expect := StreamStatus{ + Connected: true, + } + + status, ok := tracker.streamStatus(peerID) + require.True(t, ok) + require.Equal(t, expect, status) + }) + + runStep(t, "duplicate gets rejected", func(t *testing.T) { + _, err := tracker.connected(peerID) + require.Error(t, err) + require.Contains(t, err.Error(), `there is an active stream for the given PeerID "63b60245-c475-426b-b314-4588d210859d"`) + }) + + var sequence uint64 + var lastSuccess time.Time + + runStep(t, "stream updated", func(t *testing.T) { + statusPtr.trackAck() + sequence++ + + status, ok := tracker.streamStatus(peerID) + require.True(t, ok) + + lastSuccess = it.base.Add(time.Duration(sequence) * time.Second).UTC() + expect := StreamStatus{ + Connected: true, + LastAck: lastSuccess, + } + require.Equal(t, expect, status) + }) + + runStep(t, "disconnect", func(t *testing.T) { + tracker.disconnected(peerID) + sequence++ + + expect := StreamStatus{ + Connected: false, + DisconnectTime: it.base.Add(time.Duration(sequence) * time.Second).UTC(), + LastAck: lastSuccess, + } + status, ok := tracker.streamStatus(peerID) + require.True(t, ok) + require.Equal(t, expect, status) + }) + + runStep(t, "re-connect", func(t *testing.T) { + _, err := tracker.connected(peerID) + require.NoError(t, err) + + expect := StreamStatus{ + Connected: true, + LastAck: lastSuccess, + + // DisconnectTime gets cleared on re-connect. + } + + status, ok := tracker.streamStatus(peerID) + require.True(t, ok) + require.Equal(t, expect, status) + }) + + runStep(t, "delete", func(t *testing.T) { + tracker.deleteStatus(peerID) + + status, ok := tracker.streamStatus(peerID) + require.False(t, ok) + require.Zero(t, status) + }) +} + +func TestStreamTracker_connectedStreams(t *testing.T) { + type testCase struct { + name string + setup func(t *testing.T, s *streamTracker) + expect []string + } + + run := func(t *testing.T, tc testCase) { + tracker := newStreamTracker() + if tc.setup != nil { + tc.setup(t, tracker) + } + + streams := tracker.connectedStreams() + + var keys []string + for key := range streams { + keys = append(keys, key) + } + sort.Strings(keys) + + require.Equal(t, tc.expect, keys) + } + + tt := []testCase{ + { + name: "no streams", + expect: nil, + }, + { + name: "all streams active", + setup: func(t *testing.T, s *streamTracker) { + _, err := s.connected("foo") + require.NoError(t, err) + + _, err = s.connected("bar") + require.NoError(t, err) + }, + expect: []string{"bar", "foo"}, + }, + { + name: "mixed active and inactive", + setup: func(t *testing.T, s *streamTracker) { + status, err := s.connected("foo") + require.NoError(t, err) + + // Mark foo as disconnected to avoid showing it as an active stream + status.trackDisconnected() + + _, err = s.connected("bar") + require.NoError(t, err) + }, + expect: []string{"bar"}, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + run(t, tc) + }) + } +} diff --git a/agent/rpc/peering/subscription_manager.go b/agent/rpc/peering/subscription_manager.go new file mode 100644 index 0000000000..bd90168d7c --- /dev/null +++ b/agent/rpc/peering/subscription_manager.go @@ -0,0 +1,149 @@ +package peering + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-memdb" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/submatview" + "github.com/hashicorp/consul/lib/retry" + "github.com/hashicorp/consul/proto/pbservice" +) + +type MaterializedViewStore interface { + Get(ctx context.Context, req submatview.Request) (submatview.Result, error) + Notify(ctx context.Context, req submatview.Request, cID string, ch chan<- cache.UpdateEvent) error +} + +type SubscriptionBackend interface { + Subscriber + Store() Store +} + +// subscriptionManager handlers requests to subscribe to events from an events publisher. +type subscriptionManager struct { + logger hclog.Logger + viewStore MaterializedViewStore + backend SubscriptionBackend + + // watchedServices is a map of exported services to a cancel function for their subscription notifier. + watchedServices map[structs.ServiceName]context.CancelFunc +} + +// TODO(peering): Maybe centralize so that there is a single manager per datacenter, rather than per peering. +func newSubscriptionManager(ctx context.Context, logger hclog.Logger, backend SubscriptionBackend) *subscriptionManager { + logger = logger.Named("subscriptions") + store := submatview.NewStore(logger.Named("viewstore")) + go store.Run(ctx) + + return &subscriptionManager{ + logger: logger, + viewStore: store, + backend: backend, + watchedServices: make(map[structs.ServiceName]context.CancelFunc), + } +} + +// subscribe returns a channel that will contain updates to exported service instances for a given peer. +func (m *subscriptionManager) subscribe(ctx context.Context, peerID string) <-chan cache.UpdateEvent { + updateCh := make(chan cache.UpdateEvent, 1) + go m.syncSubscriptions(ctx, peerID, updateCh) + + return updateCh +} + +func (m *subscriptionManager) syncSubscriptions(ctx context.Context, peerID string, updateCh chan<- cache.UpdateEvent) { + waiter := &retry.Waiter{ + MinFailures: 1, + Factor: 500 * time.Millisecond, + MaxWait: 60 * time.Second, + Jitter: retry.NewJitter(100), + } + + for { + if err := m.syncSubscriptionsAndBlock(ctx, peerID, updateCh); err != nil { + m.logger.Error("failed to sync subscriptions", "error", err) + } + + if err := waiter.Wait(ctx); err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { + m.logger.Error("failed to wait before re-trying sync", "error", err) + } + + select { + case <-ctx.Done(): + return + default: + } + } +} + +// syncSubscriptionsAndBlock ensures that the subscriptions to the subscription backend +// match the list of services exported to the peer. +func (m *subscriptionManager) syncSubscriptionsAndBlock(ctx context.Context, peerID string, updateCh chan<- cache.UpdateEvent) error { + store := m.backend.Store() + + ws := memdb.NewWatchSet() + ws.Add(store.AbandonCh()) + ws.Add(ctx.Done()) + + // Get exported services for peer id + _, services, err := store.ExportedServicesForPeer(ws, peerID) + if err != nil { + return fmt.Errorf("failed to watch exported services for peer %q: %w", peerID, err) + } + + // seen contains the set of exported service names and is used to reconcile the list of watched services. + seen := make(map[structs.ServiceName]struct{}) + + // Ensure there is a subscription for each service exported to the peer. + for _, svc := range services { + seen[svc] = struct{}{} + + if _, ok := m.watchedServices[svc]; ok { + // Exported service is already being watched, nothing to do. + continue + } + + notifyCtx, cancel := context.WithCancel(ctx) + m.watchedServices[svc] = cancel + + if err := m.Notify(notifyCtx, svc, updateCh); err != nil { + m.logger.Error("failed to subscribe to service", "service", svc.String()) + continue + } + } + + // For every subscription without an exported service, call the associated cancel fn. + for svc, cancel := range m.watchedServices { + if _, ok := seen[svc]; !ok { + cancel() + + // Send an empty event to the stream handler to trigger sending a DELETE message. + // Cancelling the subscription context above is necessary, but does not yield a useful signal on its own. + updateCh <- cache.UpdateEvent{ + CorrelationID: subExportedService + svc.String(), + Result: &pbservice.IndexedCheckServiceNodes{}, + } + } + } + + // Block for any changes to the state store. + ws.WatchCh(ctx) + return nil +} + +const ( + subExportedService = "exported-service:" +) + +// Notify the given channel when there are updates to the requested service. +func (m *subscriptionManager) Notify(ctx context.Context, svc structs.ServiceName, updateCh chan<- cache.UpdateEvent) error { + sr := newExportedServiceRequest(m.logger, svc, m.backend) + return m.viewStore.Notify(ctx, sr, subExportedService+svc.String(), updateCh) +} diff --git a/agent/rpc/peering/subscription_manager_test.go b/agent/rpc/peering/subscription_manager_test.go new file mode 100644 index 0000000000..b8b06be6d0 --- /dev/null +++ b/agent/rpc/peering/subscription_manager_test.go @@ -0,0 +1,362 @@ +package peering + +import ( + "context" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" + "github.com/hashicorp/consul/proto/pbservice" + "github.com/hashicorp/consul/sdk/testutil/retry" +) + +type testSubscriptionBackend struct { + state.EventPublisher + store *state.Store +} + +func (b *testSubscriptionBackend) Store() Store { + return b.store +} + +func TestSubscriptionManager_RegisterDeregister(t *testing.T) { + publisher := stream.NewEventPublisher(10 * time.Second) + store := newStateStore(t, publisher) + + backend := testSubscriptionBackend{ + EventPublisher: publisher, + store: store, + } + + ctx := context.Background() + mgr := newSubscriptionManager(ctx, hclog.New(nil), &backend) + + // Create a peering + var lastIdx uint64 = 1 + err := store.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "my-peering", + }) + require.NoError(t, err) + + _, p, err := store.PeeringRead(nil, state.Query{Value: "my-peering"}) + require.NoError(t, err) + require.NotNil(t, p) + + id := p.ID + + subCh := mgr.subscribe(ctx, id) + + entry := &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "mysql", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + { + Name: "mongo", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-other-peering", + }, + }, + }, + }, + } + lastIdx++ + err = store.EnsureConfigEntry(lastIdx, entry) + require.NoError(t, err) + + mysql1 := &structs.CheckServiceNode{ + Node: &structs.Node{Node: "foo", Address: "10.0.0.1"}, + Service: &structs.NodeService{ID: "mysql-1", Service: "mysql", Port: 5000}, + Checks: structs.HealthChecks{ + &structs.HealthCheck{CheckID: "mysql-check", ServiceID: "mysql-1", Node: "foo"}, + }, + } + + runStep(t, "registering exported service instance yields update", func(t *testing.T) { + + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, mysql1.Node)) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "foo", mysql1.Service)) + + lastIdx++ + require.NoError(t, store.EnsureCheck(lastIdx, mysql1.Checks[0])) + + // Receive in a retry loop so that eventually we converge onto the expected CheckServiceNode. + retry.Run(t, func(r *retry.R) { + select { + case update := <-subCh: + nodes, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + require.True(r, ok) + require.Equal(r, uint64(5), nodes.Index) + + require.Len(r, nodes.Nodes, 1) + require.Equal(r, "foo", nodes.Nodes[0].Node.Node) + require.Equal(r, "mysql-1", nodes.Nodes[0].Service.ID) + + require.Len(r, nodes.Nodes[0].Checks, 1) + require.Equal(r, "mysql-check", nodes.Nodes[0].Checks[0].CheckID) + + default: + r.Fatalf("invalid update") + } + }) + }) + + mysql2 := &structs.CheckServiceNode{ + Node: &structs.Node{Node: "bar", Address: "10.0.0.2"}, + Service: &structs.NodeService{ID: "mysql-2", Service: "mysql", Port: 5000}, + Checks: structs.HealthChecks{ + &structs.HealthCheck{CheckID: "mysql-2-check", ServiceID: "mysql-2", Node: "bar"}, + }, + } + + runStep(t, "additional instances are returned when registered", func(t *testing.T) { + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, mysql2.Node)) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "bar", mysql2.Service)) + + lastIdx++ + require.NoError(t, store.EnsureCheck(lastIdx, mysql2.Checks[0])) + + retry.Run(t, func(r *retry.R) { + select { + case update := <-subCh: + nodes, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + require.True(r, ok) + require.Equal(r, uint64(8), nodes.Index) + + require.Len(r, nodes.Nodes, 2) + require.Equal(r, "bar", nodes.Nodes[0].Node.Node) + require.Equal(r, "mysql-2", nodes.Nodes[0].Service.ID) + + require.Len(r, nodes.Nodes[0].Checks, 1) + require.Equal(r, "mysql-2-check", nodes.Nodes[0].Checks[0].CheckID) + + require.Equal(r, "foo", nodes.Nodes[1].Node.Node) + require.Equal(r, "mysql-1", nodes.Nodes[1].Service.ID) + + require.Len(r, nodes.Nodes[1].Checks, 1) + require.Equal(r, "mysql-check", nodes.Nodes[1].Checks[0].CheckID) + + default: + r.Fatalf("invalid update") + } + }) + }) + + runStep(t, "no updates are received for services not exported to my-peering", func(t *testing.T) { + mongo := &structs.CheckServiceNode{ + Node: &structs.Node{Node: "zip", Address: "10.0.0.3"}, + Service: &structs.NodeService{ID: "mongo", Service: "mongo", Port: 5000}, + Checks: structs.HealthChecks{ + &structs.HealthCheck{CheckID: "mongo-check", ServiceID: "mongo", Node: "zip"}, + }, + } + + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, mongo.Node)) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "zip", mongo.Service)) + + lastIdx++ + require.NoError(t, store.EnsureCheck(lastIdx, mongo.Checks[0])) + + // Receive from subCh times out. The retry in the last step already consumed all the mysql events. + select { + case update := <-subCh: + nodes, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + + if ok && len(nodes.Nodes) > 0 && nodes.Nodes[0].Node.Node == "zip" { + t.Fatalf("received update for mongo node zip") + } + + case <-time.After(100 * time.Millisecond): + // Expect this to fire + } + }) + + runStep(t, "deregister an instance and it gets removed from the output", func(t *testing.T) { + lastIdx++ + require.NoError(t, store.DeleteService(lastIdx, "foo", mysql1.Service.ID, nil, "")) + + select { + case update := <-subCh: + nodes, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + require.True(t, ok) + require.Equal(t, uint64(12), nodes.Index) + + require.Len(t, nodes.Nodes, 1) + require.Equal(t, "bar", nodes.Nodes[0].Node.Node) + require.Equal(t, "mysql-2", nodes.Nodes[0].Service.ID) + + require.Len(t, nodes.Nodes[0].Checks, 1) + require.Equal(t, "mysql-2-check", nodes.Nodes[0].Checks[0].CheckID) + + case <-time.After(100 * time.Millisecond): + t.Fatalf("timed out waiting for update") + } + }) + + runStep(t, "deregister the last instance and the output is empty", func(t *testing.T) { + lastIdx++ + require.NoError(t, store.DeleteService(lastIdx, "bar", mysql2.Service.ID, nil, "")) + + select { + case update := <-subCh: + nodes, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + require.True(t, ok) + require.Equal(t, uint64(13), nodes.Index) + require.Len(t, nodes.Nodes, 0) + + case <-time.After(100 * time.Millisecond): + t.Fatalf("timed out waiting for update") + } + }) +} + +func TestSubscriptionManager_InitialSnapshot(t *testing.T) { + publisher := stream.NewEventPublisher(10 * time.Second) + store := newStateStore(t, publisher) + + backend := testSubscriptionBackend{ + EventPublisher: publisher, + store: store, + } + + ctx := context.Background() + mgr := newSubscriptionManager(ctx, hclog.New(nil), &backend) + + // Create a peering + var lastIdx uint64 = 1 + err := store.PeeringWrite(lastIdx, &pbpeering.Peering{ + Name: "my-peering", + }) + require.NoError(t, err) + + _, p, err := store.PeeringRead(nil, state.Query{Value: "my-peering"}) + require.NoError(t, err) + require.NotNil(t, p) + + id := p.ID + + subCh := mgr.subscribe(ctx, id) + + // Register two services that are not yet exported + mysql := &structs.CheckServiceNode{ + Node: &structs.Node{Node: "foo", Address: "10.0.0.1"}, + Service: &structs.NodeService{ID: "mysql-1", Service: "mysql", Port: 5000}, + } + + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, mysql.Node)) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "foo", mysql.Service)) + + mongo := &structs.CheckServiceNode{ + Node: &structs.Node{Node: "zip", Address: "10.0.0.3"}, + Service: &structs.NodeService{ID: "mongo-1", Service: "mongo", Port: 5000}, + } + + lastIdx++ + require.NoError(t, store.EnsureNode(lastIdx, mongo.Node)) + + lastIdx++ + require.NoError(t, store.EnsureService(lastIdx, "zip", mongo.Service)) + + // No updates should be received, because neither service is exported. + select { + case update := <-subCh: + nodes, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + + if ok && len(nodes.Nodes) > 0 { + t.Fatalf("received unexpected update") + } + + case <-time.After(100 * time.Millisecond): + // Expect this to fire + } + + runStep(t, "exporting the two services yields an update for both", func(t *testing.T) { + entry := &structs.ExportedServicesConfigEntry{ + Name: "default", + Services: []structs.ExportedService{ + { + Name: "mysql", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + { + Name: "mongo", + Consumers: []structs.ServiceConsumer{ + { + PeerName: "my-peering", + }, + }, + }, + }, + } + lastIdx++ + err = store.EnsureConfigEntry(lastIdx, entry) + require.NoError(t, err) + + var ( + sawMySQL bool + sawMongo bool + ) + + retry.Run(t, func(r *retry.R) { + select { + case update := <-subCh: + nodes, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) + require.True(r, ok) + require.Len(r, nodes.Nodes, 1) + + switch nodes.Nodes[0].Service.Service { + case "mongo": + sawMongo = true + case "mysql": + sawMySQL = true + } + if !sawMySQL || !sawMongo { + r.Fatalf("missing an update") + } + default: + r.Fatalf("invalid update") + } + }) + }) +} + +func newStateStore(t *testing.T, publisher *stream.EventPublisher) *state.Store { + gc, err := state.NewTombstoneGC(time.Second, time.Millisecond) + require.NoError(t, err) + + store := state.NewStateStoreWithEventPublisher(gc, publisher) + require.NoError(t, publisher.RegisterHandler(state.EventTopicServiceHealth, store.ServiceHealthSnapshot)) + require.NoError(t, publisher.RegisterHandler(state.EventTopicServiceHealthConnect, store.ServiceHealthSnapshot)) + go publisher.Run(context.Background()) + + return store +} diff --git a/agent/rpc/peering/subscription_view.go b/agent/rpc/peering/subscription_view.go new file mode 100644 index 0000000000..d6b48e923b --- /dev/null +++ b/agent/rpc/peering/subscription_view.go @@ -0,0 +1,141 @@ +package peering + +import ( + "fmt" + "sort" + + "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/submatview" + "github.com/hashicorp/consul/proto/pbservice" + "github.com/hashicorp/consul/proto/pbsubscribe" +) + +type Subscriber interface { + Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) +} + +type exportedServiceRequest struct { + logger hclog.Logger + req structs.ServiceSpecificRequest + sub Subscriber +} + +func newExportedServiceRequest(logger hclog.Logger, svc structs.ServiceName, sub Subscriber) *exportedServiceRequest { + req := structs.ServiceSpecificRequest{ + // TODO(peering): Need to subscribe to both Connect and not + Connect: false, + + ServiceName: svc.Name, + EnterpriseMeta: svc.EnterpriseMeta, + } + return &exportedServiceRequest{ + logger: logger, + req: req, + sub: sub, + } +} + +// CacheInfo implements submatview.Request +func (e *exportedServiceRequest) CacheInfo() cache.RequestInfo { + return e.req.CacheInfo() +} + +// NewMaterializer implements submatview.Request +func (e *exportedServiceRequest) NewMaterializer() (submatview.Materializer, error) { + reqFn := func(index uint64) *pbsubscribe.SubscribeRequest { + r := &pbsubscribe.SubscribeRequest{ + Topic: pbsubscribe.Topic_ServiceHealth, + Key: e.req.ServiceName, + Token: e.req.Token, + Datacenter: e.req.Datacenter, + Index: index, + Namespace: e.req.EnterpriseMeta.NamespaceOrEmpty(), + Partition: e.req.EnterpriseMeta.PartitionOrEmpty(), + } + if e.req.Connect { + r.Topic = pbsubscribe.Topic_ServiceHealthConnect + } + return r + } + deps := submatview.Deps{ + View: newExportedServicesView(), + Logger: e.logger, + Request: reqFn, + } + return submatview.NewLocalMaterializer(e.sub, deps), nil +} + +// Type implements submatview.Request +func (e *exportedServiceRequest) Type() string { + return "leader.peering.stream.exportedServiceRequest" +} + +// exportedServicesView implements submatview.View for storing the view state +// of an exported service's health result. We store it as a map to make updates and +// deletions a little easier but we could just store a result type +// (IndexedCheckServiceNodes) and update it in place for each event - that +// involves re-sorting each time etc. though. +// +// Unlike rpcclient.healthView, there is no need for a filter because for exported services +// we export all instances unconditionally. +type exportedServicesView struct { + state map[string]*pbservice.CheckServiceNode +} + +func newExportedServicesView() *exportedServicesView { + return &exportedServicesView{ + state: make(map[string]*pbservice.CheckServiceNode), + } +} + +// Reset implements submatview.View +func (s *exportedServicesView) Reset() { + s.state = make(map[string]*pbservice.CheckServiceNode) +} + +// Update implements submatview.View +func (s *exportedServicesView) Update(events []*pbsubscribe.Event) error { + for _, event := range events { + serviceHealth := event.GetServiceHealth() + if serviceHealth == nil { + return fmt.Errorf("unexpected event type for service health view: %T", + event.GetPayload()) + } + + id := serviceHealth.CheckServiceNode.UniqueID() + switch serviceHealth.Op { + case pbsubscribe.CatalogOp_Register: + s.state[id] = serviceHealth.CheckServiceNode + + case pbsubscribe.CatalogOp_Deregister: + delete(s.state, id) + } + } + return nil +} + +// Result returns the CheckServiceNodes stored by this view. +// Result implements submatview.View +func (s *exportedServicesView) Result(index uint64) interface{} { + result := pbservice.IndexedCheckServiceNodes{ + Nodes: make([]*pbservice.CheckServiceNode, 0, len(s.state)), + Index: index, + } + for _, node := range s.state { + result.Nodes = append(result.Nodes, node) + } + sortCheckServiceNodes(&result) + + return &result +} + +// sortCheckServiceNodes stable sorts the results to match memdb semantics. +func sortCheckServiceNodes(n *pbservice.IndexedCheckServiceNodes) { + sort.SliceStable(n.Nodes, func(i, j int) bool { + return n.Nodes[i].UniqueID() < n.Nodes[j].UniqueID() + }) +} diff --git a/agent/rpc/peering/subscription_view_test.go b/agent/rpc/peering/subscription_view_test.go new file mode 100644 index 0000000000..cbb9d071f3 --- /dev/null +++ b/agent/rpc/peering/subscription_view_test.go @@ -0,0 +1,338 @@ +package peering + +import ( + "context" + "math/rand" + "sort" + "sync" + "testing" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/submatview" + "github.com/hashicorp/consul/proto/pbservice" + "github.com/hashicorp/consul/proto/pbsubscribe" +) + +// TestExportedServiceSubscription tests the exported services view and the backing submatview.LocalMaterializer. +func TestExportedServiceSubscription(t *testing.T) { + s := &stateMap{ + states: make(map[string]*serviceState), + } + + sh := snapshotHandler{stateMap: s} + pub := stream.NewEventPublisher(10 * time.Millisecond) + pub.RegisterHandler(pbsubscribe.Topic_ServiceHealth, sh.Snapshot) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go pub.Run(ctx) + + apiSN := structs.NewServiceName("api", nil) + webSN := structs.NewServiceName("web", nil) + + // List of updates to the state store: + // - api: {register api-1, register api-2, register api-3} + // - web: {register web-1, deregister web-1, register web-2}1 + events := []map[string]stream.Event{ + { + apiSN.String(): stream.Event{ + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + ID: "api-1", + Service: "api", + }, + }, + }, + }, + webSN.String(): stream.Event{ + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + ID: "web-1", + Service: "web", + }, + }, + }, + }, + }, + { + apiSN.String(): stream.Event{ + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + ID: "api-2", + Service: "api", + }, + }, + }, + }, + webSN.String(): stream.Event{ + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Deregister, + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + ID: "web-1", + Service: "web", + }, + }, + }, + }, + }, + { + apiSN.String(): stream.Event{ + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + ID: "api-3", + Service: "api", + }, + }, + }, + }, + webSN.String(): stream.Event{ + Topic: pbsubscribe.Topic_ServiceHealth, + Payload: state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: &structs.CheckServiceNode{ + Service: &structs.NodeService{ + ID: "web-2", + Service: "web", + }, + }, + }, + }, + }, + } + + // store represents Consul's memdb state store. + // A stream of event updates + store := store{stateMap: s, pub: pub} + + // This errgroup is used to issue simulate async updates to the state store, + // and also consume that fixed number of updates. + group, gctx := errgroup.WithContext(ctx) + group.Go(func() error { + store.simulateUpdates(gctx, events) + return nil + }) + + // viewStore is the store shared by the two service consumer's materializers. + // It is intentionally not run in the errgroup because it will block until the context is canceled. + viewStore := submatview.NewStore(hclog.New(nil)) + go viewStore.Run(ctx) + + // Each consumer represents a subscriber to exported service updates, and will consume + // stream events for the service name it is interested in. + consumers := make(map[string]*consumer) + for _, svc := range []structs.ServiceName{apiSN, webSN} { + c := &consumer{ + viewStore: viewStore, + publisher: pub, + seenByIndex: make(map[uint64][]string), + } + service := svc + group.Go(func() error { + return c.consume(gctx, service.Name, len(events)) + }) + consumers[service.String()] = c + } + + // Wait until all the events have been simulated and consumed. + done := make(chan struct{}) + go func() { + defer close(done) + _ = group.Wait() + }() + + select { + case <-done: + // finished + case <-time.After(500 * time.Millisecond): + // timed out, the Wait context will be cancelled by + t.Fatalf("timed out waiting for producers and consumers") + } + + for svc, c := range consumers { + require.NotEmpty(t, c.seenByIndex) + + // Note that store.states[svc].idsByIndex does not assert against a slice of expectations because + // the index that the different events will arrive in the simulation is not deterministic. + require.Equal(t, store.states[svc].idsByIndex, c.seenByIndex) + } +} + +// stateMap is a map keyed by service to the state of the store at different indexes +type stateMap struct { + mu sync.Mutex + states map[string]*serviceState +} + +type store struct { + *stateMap + + pub *stream.EventPublisher +} + +// simulateUpdates will publish events and also store the state at each index for later assertions. +func (s *store) simulateUpdates(ctx context.Context, events []map[string]stream.Event) { + idx := uint64(0) + + for _, m := range events { + if ctx.Err() != nil { + return + } + + for svc, event := range m { + idx++ + event.Index = idx + s.pub.Publish([]stream.Event{event}) + + s.stateMap.mu.Lock() + svcState, ok := s.states[svc] + if !ok { + svcState = &serviceState{ + current: make(map[string]*structs.CheckServiceNode), + idsByIndex: make(map[uint64][]string), + } + s.states[svc] = svcState + } + s.stateMap.mu.Unlock() + + svcState.mu.Lock() + svcState.idx = idx + + // Updating the svcState.current map allows us to capture snapshots from a stream of add/delete events. + payload := event.Payload.(state.EventPayloadCheckServiceNode) + switch payload.Op { + case pbsubscribe.CatalogOp_Register: + svcState.current[payload.Value.Service.ID] = payload.Value + default: + // If not a registration it must be a deregistration: + delete(svcState.current, payload.Value.Service.ID) + } + + svcState.idsByIndex[idx] = serviceIDsFromMap(svcState.current) + svcState.mu.Unlock() + + delay := time.Duration(rand.Intn(25)) * time.Millisecond + time.Sleep(5*time.Millisecond + delay) + } + } +} + +func serviceIDsFromMap(m map[string]*structs.CheckServiceNode) []string { + var result []string + for id := range m { + result = append(result, id) + } + sort.Strings(result) + return result +} + +type snapshotHandler struct { + *stateMap +} + +type serviceState struct { + mu sync.Mutex + idx uint64 + + // The current snapshot of data, given the observed events. + current map[string]*structs.CheckServiceNode + + // The list of service IDs seen at each index that an update was received for the given service name. + idsByIndex map[uint64][]string +} + +// Snapshot dumps the currently registered service instances. +// +// Snapshot implements stream.SnapshotFunc. +func (s *snapshotHandler) Snapshot(req stream.SubscribeRequest, buf stream.SnapshotAppender) (index uint64, err error) { + s.stateMap.mu.Lock() + svcState, ok := s.states[req.Subject.String()] + if !ok { + svcState = &serviceState{ + current: make(map[string]*structs.CheckServiceNode), + idsByIndex: make(map[uint64][]string), + } + s.states[req.Subject.String()] = svcState + } + s.stateMap.mu.Unlock() + + svcState.mu.Lock() + defer svcState.mu.Unlock() + + for _, node := range svcState.current { + event := stream.Event{ + Topic: pbsubscribe.Topic_ServiceHealth, + Index: svcState.idx, + Payload: state.EventPayloadCheckServiceNode{ + Op: pbsubscribe.CatalogOp_Register, + Value: node, + }, + } + buf.Append([]stream.Event{event}) + } + return svcState.idx, nil +} + +type consumer struct { + viewStore *submatview.Store + publisher *stream.EventPublisher + seenByIndex map[uint64][]string +} + +func (c *consumer) consume(ctx context.Context, service string, countExpected int) error { + group, gctx := errgroup.WithContext(ctx) + updateCh := make(chan cache.UpdateEvent, 10) + + group.Go(func() error { + sr := newExportedServiceRequest(hclog.New(nil), structs.NewServiceName(service, nil), c.publisher) + return c.viewStore.Notify(gctx, sr, "", updateCh) + }) + group.Go(func() error { + var n int + for { + if n >= countExpected { + return nil + } + select { + case u := <-updateCh: + // Each update contains the current snapshot of registered services. + c.seenByIndex[u.Meta.Index] = serviceIDsFromUpdates(u) + n++ + + case <-gctx.Done(): + return nil + } + } + }) + return group.Wait() +} + +func serviceIDsFromUpdates(u cache.UpdateEvent) []string { + var result []string + for _, node := range u.Result.(*pbservice.IndexedCheckServiceNodes).Nodes { + result = append(result, node.Service.ID) + } + sort.Strings(result) + return result +} diff --git a/agent/rpc/peering/testing.go b/agent/rpc/peering/testing.go new file mode 100644 index 0000000000..ffa24ea713 --- /dev/null +++ b/agent/rpc/peering/testing.go @@ -0,0 +1,199 @@ +package peering + +import ( + "context" + "io" + "sync" + "testing" + "time" + + "google.golang.org/grpc/metadata" + + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/proto/pbpeering" +) + +// same certificate that appears in our connect tests +var validCA = ` +-----BEGIN CERTIFICATE----- +MIICmDCCAj6gAwIBAgIBBzAKBggqhkjOPQQDAjAWMRQwEgYDVQQDEwtDb25zdWwg +Q0EgNzAeFw0xODA1MjExNjMzMjhaFw0yODA1MTgxNjMzMjhaMBYxFDASBgNVBAMT +C0NvbnN1bCBDQSA3MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAER0qlxjnRcMEr +iSGlH7G7dYU7lzBEmLUSMZkyBbClmyV8+e8WANemjn+PLnCr40If9cmpr7RnC9Qk +GTaLnLiF16OCAXswggF3MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/ +MGgGA1UdDgRhBF8xZjo5MTpjYTo0MTo4ZjphYzo2NzpiZjo1OTpjMjpmYTo0ZTo3 +NTo1YzpkODpmMDo1NTpkZTpiZTo3NTpiODozMzozMTpkNToyNDpiMDowNDpiMzpl +ODo5Nzo1Yjo3ZTBqBgNVHSMEYzBhgF8xZjo5MTpjYTo0MTo4ZjphYzo2NzpiZjo1 +OTpjMjpmYTo0ZTo3NTo1YzpkODpmMDo1NTpkZTpiZTo3NTpiODozMzozMTpkNToy +NDpiMDowNDpiMzplODo5Nzo1Yjo3ZTA/BgNVHREEODA2hjRzcGlmZmU6Ly8xMjRk +ZjVhMC05ODIwLTc2YzMtOWFhOS02ZjYyMTY0YmExYzIuY29uc3VsMD0GA1UdHgEB +/wQzMDGgLzAtgisxMjRkZjVhMC05ODIwLTc2YzMtOWFhOS02ZjYyMTY0YmExYzIu +Y29uc3VsMAoGCCqGSM49BAMCA0gAMEUCIQDzkkI7R+0U12a+zq2EQhP/n2mHmta+ +fs2hBxWIELGwTAIgLdO7RRw+z9nnxCIA6kNl//mIQb+PGItespiHZKAz74Q= +-----END CERTIFICATE----- +` +var invalidCA = ` +-----BEGIN CERTIFICATE----- +not valid +-----END CERTIFICATE----- +` + +var validAddress = "1.2.3.4:80" + +var validServerName = "server.consul" + +var validPeerID = "peer1" + +// TODO(peering): the test methods below are exposed to prevent duplication, +// these should be removed at same time tests in peering_test get refactored. +// XXX: we can't put the existing tests in service_test.go into the peering +// package because it causes an import cycle by importing the top-level consul +// package (which correctly imports the agent/rpc/peering package) + +// TestPeering is a test utility for generating a pbpeering.Peering with valid +// data along with the peerName, state and index. +func TestPeering(peerName string, state pbpeering.PeeringState) *pbpeering.Peering { + return &pbpeering.Peering{ + Name: peerName, + PeerCAPems: []string{validCA}, + PeerServerAddresses: []string{validAddress}, + PeerServerName: validServerName, + State: state, + // uncomment once #1613 lands + // PeerID: validPeerID + } +} + +// TestPeeringToken is a test utility for generating a valid peering token +// with the given peerID for use in test cases +func TestPeeringToken(peerID string) structs.PeeringToken { + return structs.PeeringToken{ + CA: []string{validCA}, + ServerAddresses: []string{validAddress}, + ServerName: validServerName, + PeerID: peerID, + } +} + +type mockClient struct { + mu sync.Mutex + errCh chan error + + replicationStream *mockStream +} + +func (c *mockClient) Send(r *pbpeering.ReplicationMessage) error { + c.replicationStream.recvCh <- r + return nil +} + +func (c *mockClient) Recv() (*pbpeering.ReplicationMessage, error) { + select { + case err := <-c.errCh: + return nil, err + case r := <-c.replicationStream.sendCh: + return r, nil + case <-time.After(10 * time.Millisecond): + return nil, io.EOF + } +} + +func (c *mockClient) RecvWithTimeout(dur time.Duration) (*pbpeering.ReplicationMessage, error) { + select { + case err := <-c.errCh: + return nil, err + case r := <-c.replicationStream.sendCh: + return r, nil + case <-time.After(dur): + return nil, io.EOF + } +} + +func (c *mockClient) Close() { + close(c.replicationStream.recvCh) +} + +func newMockClient(ctx context.Context) *mockClient { + return &mockClient{ + replicationStream: newTestReplicationStream(ctx), + } +} + +// mockStream mocks peering.PeeringService_StreamResourcesServer +type mockStream struct { + sendCh chan *pbpeering.ReplicationMessage + recvCh chan *pbpeering.ReplicationMessage + + ctx context.Context + mu sync.Mutex +} + +var _ pbpeering.PeeringService_StreamResourcesServer = (*mockStream)(nil) + +func newTestReplicationStream(ctx context.Context) *mockStream { + return &mockStream{ + sendCh: make(chan *pbpeering.ReplicationMessage, 1), + recvCh: make(chan *pbpeering.ReplicationMessage, 1), + ctx: ctx, + } +} + +// Send implements pbpeering.PeeringService_StreamResourcesServer +func (s *mockStream) Send(r *pbpeering.ReplicationMessage) error { + s.sendCh <- r + return nil +} + +// Recv implements pbpeering.PeeringService_StreamResourcesServer +func (s *mockStream) Recv() (*pbpeering.ReplicationMessage, error) { + r := <-s.recvCh + if r == nil { + return nil, io.EOF + } + return r, nil +} + +// Context implements grpc.ServerStream and grpc.ClientStream +func (s *mockStream) Context() context.Context { + return s.ctx +} + +// SendMsg implements grpc.ServerStream and grpc.ClientStream +func (s *mockStream) SendMsg(m interface{}) error { + return nil +} + +// RecvMsg implements grpc.ServerStream and grpc.ClientStream +func (s *mockStream) RecvMsg(m interface{}) error { + return nil +} + +// SetHeader implements grpc.ServerStream +func (s *mockStream) SetHeader(metadata.MD) error { + return nil +} + +// SendHeader implements grpc.ServerStream +func (s *mockStream) SendHeader(metadata.MD) error { + return nil +} + +// SetTrailer implements grpc.ServerStream +func (s *mockStream) SetTrailer(metadata.MD) {} + +type incrementalTime struct { + base time.Time + next uint64 +} + +func (t *incrementalTime) Now() time.Time { + t.next++ + return t.base.Add(time.Duration(t.next) * time.Second) +} + +func runStep(t *testing.T, name string, fn func(t *testing.T)) { + t.Helper() + if !t.Run(name, fn) { + t.FailNow() + } +} diff --git a/agent/rpc/peering/testutil_oss_test.go b/agent/rpc/peering/testutil_oss_test.go new file mode 100644 index 0000000000..4aac92cadd --- /dev/null +++ b/agent/rpc/peering/testutil_oss_test.go @@ -0,0 +1,16 @@ +//go:build !consulent +// +build !consulent + +package peering_test + +import ( + "testing" + + "github.com/hashicorp/consul/agent/consul" + "github.com/hashicorp/go-hclog" +) + +func newDefaultDepsEnterprise(t *testing.T, logger hclog.Logger, c *consul.Config) consul.EnterpriseDeps { + t.Helper() + return consul.EnterpriseDeps{} +} diff --git a/agent/rpc/peering/validate.go b/agent/rpc/peering/validate.go new file mode 100644 index 0000000000..466b61a6e3 --- /dev/null +++ b/agent/rpc/peering/validate.go @@ -0,0 +1,62 @@ +package peering + +import ( + "fmt" + "net" + "strconv" + + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/structs" + + // TODO: replace this with net/netip when we upgrade to go1.18 + "inet.af/netaddr" +) + +// validatePeeringToken ensures that the token has valid values. +func validatePeeringToken(tok *structs.PeeringToken) error { + if len(tok.CA) == 0 { + return errPeeringTokenEmptyCA + } + + // the CA values here should be valid x509 certs + for _, certStr := range tok.CA { + // TODO(peering): should we put these in a cert pool on the token? + // maybe there's a better place to do the parsing? + if _, err := connect.ParseCert(certStr); err != nil { + return fmt.Errorf("peering token invalid CA: %w", err) + } + } + + if len(tok.ServerAddresses) == 0 { + return errPeeringTokenEmptyServerAddresses + } + for _, addr := range tok.ServerAddresses { + host, portRaw, err := net.SplitHostPort(addr) + if err != nil { + return &errPeeringInvalidServerAddress{addr} + } + + port, err := strconv.Atoi(portRaw) + if err != nil { + return &errPeeringInvalidServerAddress{addr} + } + if port < 1 || port > 65535 { + return &errPeeringInvalidServerAddress{addr} + } + if _, err := netaddr.ParseIP(host); err != nil { + return &errPeeringInvalidServerAddress{addr} + } + } + + // TODO(peering): validate name matches SNI? + // TODO(peering): validate name well formed? + if tok.ServerName == "" { + return errPeeringTokenEmptyServerName + } + + if tok.PeerID == "" { + return errPeeringTokenEmptyPeerID + } + + return nil +} diff --git a/agent/rpc/peering/validate_test.go b/agent/rpc/peering/validate_test.go new file mode 100644 index 0000000000..e3b1cbf7d2 --- /dev/null +++ b/agent/rpc/peering/validate_test.go @@ -0,0 +1,107 @@ +package peering + +import ( + "errors" + "testing" + + "github.com/hashicorp/consul/agent/structs" + "github.com/stretchr/testify/require" +) + +func TestValidatePeeringToken(t *testing.T) { + type testCase struct { + name string + token *structs.PeeringToken + wantErr error + } + + tt := []testCase{ + { + name: "empty", + token: &structs.PeeringToken{}, + wantErr: errPeeringTokenEmptyCA, + }, + { + name: "empty CA", + token: &structs.PeeringToken{ + CA: []string{}, + }, + wantErr: errPeeringTokenEmptyCA, + }, + { + name: "invalid CA", + token: &structs.PeeringToken{ + CA: []string{"notavalidcert"}, + }, + wantErr: errors.New("peering token invalid CA: no PEM-encoded data found"), + }, + { + name: "invalid CA cert", + token: &structs.PeeringToken{ + CA: []string{invalidCA}, + }, + wantErr: errors.New("peering token invalid CA: x509: malformed certificate"), + }, + { + name: "invalid address port", + token: &structs.PeeringToken{ + CA: []string{validCA}, + ServerAddresses: []string{"1.2.3.4"}, + }, + wantErr: &errPeeringInvalidServerAddress{ + "1.2.3.4", + }, + }, + { + name: "invalid address IP", + token: &structs.PeeringToken{ + CA: []string{validCA}, + ServerAddresses: []string{"foo.bar.baz"}, + }, + wantErr: &errPeeringInvalidServerAddress{ + "foo.bar.baz", + }, + }, + { + name: "invalid server name", + token: &structs.PeeringToken{ + CA: []string{validCA}, + ServerAddresses: []string{"1.2.3.4:80"}, + }, + wantErr: errPeeringTokenEmptyServerName, + }, + { + name: "invalid peer ID", + token: &structs.PeeringToken{ + CA: []string{validCA}, + ServerAddresses: []string{validAddress}, + ServerName: validServerName, + }, + wantErr: errPeeringTokenEmptyPeerID, + }, + { + name: "valid token", + token: &structs.PeeringToken{ + CA: []string{validCA}, + ServerAddresses: []string{validAddress}, + ServerName: validServerName, + PeerID: validPeerID, + }, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + err := validatePeeringToken(tc.token) + if tc.wantErr != nil { + if err == nil { + t.Error("expected error but got nil") + return + } + require.Contains(t, err.Error(), tc.wantErr.Error()) + return + } + require.NoError(t, err) + }) + } +} diff --git a/agent/rpcclient/health/health.go b/agent/rpcclient/health/health.go index 004101144f..828e284945 100644 --- a/agent/rpcclient/health/health.go +++ b/agent/rpcclient/health/health.go @@ -133,15 +133,16 @@ func (r serviceRequest) Type() string { return "agent.rpcclient.health.serviceRequest" } -func (r serviceRequest) NewMaterializer() (*submatview.Materializer, error) { +func (r serviceRequest) NewMaterializer() (submatview.Materializer, error) { view, err := newHealthView(r.ServiceSpecificRequest) if err != nil { return nil, err } - return submatview.NewMaterializer(submatview.Deps{ + deps := submatview.Deps{ View: view, - Client: pbsubscribe.NewStateChangeSubscriptionClient(r.deps.Conn), Logger: r.deps.Logger, Request: newMaterializerRequest(r.ServiceSpecificRequest), - }), nil + } + + return submatview.NewRPCMaterializer(pbsubscribe.NewStateChangeSubscriptionClient(r.deps.Conn), deps), nil } diff --git a/agent/rpcclient/health/view_test.go b/agent/rpcclient/health/view_test.go index c2a7ea79b0..137a9986a0 100644 --- a/agent/rpcclient/health/view_test.go +++ b/agent/rpcclient/health/view_test.go @@ -537,17 +537,17 @@ type serviceRequestStub struct { streamClient submatview.StreamClient } -func (r serviceRequestStub) NewMaterializer() (*submatview.Materializer, error) { +func (r serviceRequestStub) NewMaterializer() (submatview.Materializer, error) { view, err := newHealthView(r.ServiceSpecificRequest) if err != nil { return nil, err } - return submatview.NewMaterializer(submatview.Deps{ + deps := submatview.Deps{ View: view, - Client: r.streamClient, Logger: hclog.New(nil), Request: newMaterializerRequest(r.ServiceSpecificRequest), - }), nil + } + return submatview.NewRPCMaterializer(r.streamClient, deps), nil } func newEventServiceHealthRegister(index uint64, nodeNum int, svc string) *pbsubscribe.Event { diff --git a/agent/setup.go b/agent/setup.go index 322f170b25..bbe54ae06e 100644 --- a/agent/setup.go +++ b/agent/setup.go @@ -169,11 +169,14 @@ func newConnPool(config *config.RuntimeConfig, logger hclog.Logger, tls *tlsutil } pool := &pool.ConnPool{ - Server: config.ServerMode, - SrcAddr: rpcSrcAddr, - Logger: logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true}), - TLSConfigurator: tls, - Datacenter: config.Datacenter, + Server: config.ServerMode, + SrcAddr: rpcSrcAddr, + Logger: logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true}), + TLSConfigurator: tls, + Datacenter: config.Datacenter, + Timeout: config.RPCHoldTimeout, + MaxQueryTime: config.MaxQueryTime, + DefaultQueryTime: config.DefaultQueryTime, } if config.ServerMode { pool.MaxTime = 2 * time.Minute diff --git a/agent/structs/config_entry_export_oss_test.go b/agent/structs/config_entry_export_oss_test.go new file mode 100644 index 0000000000..4015f5d714 --- /dev/null +++ b/agent/structs/config_entry_export_oss_test.go @@ -0,0 +1,62 @@ +//go:build !consulent +// +build !consulent + +package structs + +import ( + "testing" +) + +func TestExportedServicesConfigEntry_OSS(t *testing.T) { + cases := map[string]configEntryTestcase{ + "normalize: noop in oss": { + entry: &ExportedServicesConfigEntry{ + Name: "default", + Services: []ExportedService{ + { + Name: "web", + Consumers: []ServiceConsumer{ + { + PeerName: "bar", + }, + }, + }, + }, + }, + expected: &ExportedServicesConfigEntry{ + Name: "default", + Services: []ExportedService{ + { + Name: "web", + Namespace: "", + Consumers: []ServiceConsumer{ + { + PeerName: "bar", + }, + }, + }, + }, + }, + }, + "validate: empty name": { + entry: &ExportedServicesConfigEntry{ + Name: "", + }, + validateErr: `exported-services Name must be "default"`, + }, + "validate: wildcard name": { + entry: &ExportedServicesConfigEntry{ + Name: WildcardSpecifier, + }, + validateErr: `exported-services Name must be "default"`, + }, + "validate: other name": { + entry: &ExportedServicesConfigEntry{ + Name: "foo", + }, + validateErr: `exported-services Name must be "default"`, + }, + } + + testConfigEntryNormalizeAndValidate(t, cases) +} diff --git a/agent/structs/config_entry_exports.go b/agent/structs/config_entry_exports.go index 8a184cc396..e7e33c54d2 100644 --- a/agent/structs/config_entry_exports.go +++ b/agent/structs/config_entry_exports.go @@ -35,9 +35,14 @@ type ExportedService struct { } // ServiceConsumer represents a downstream consumer of the service to be exported. +// At most one of Partition or PeerName must be specified. type ServiceConsumer struct { // Partition is the admin partition to export the service to. + // Deprecated: PeerName should be used for both remote peers and local partitions. Partition string + + // PeerName is the name of the peer to export the service to. + PeerName string } func (e *ExportedServicesConfigEntry) ToMap() map[string]map[string][]string { @@ -99,37 +104,40 @@ func (e *ExportedServicesConfigEntry) Normalize() error { e.EnterpriseMeta.Normalize() for i := range e.Services { - e.Services[i].Namespace = acl.NamespaceOrDefault(e.Services[i].Namespace) + e.Services[i].Namespace = acl.NormalizeNamespace(e.Services[i].Namespace) } return nil } func (e *ExportedServicesConfigEntry) Validate() error { - if e.Name == "" { - return fmt.Errorf("Name is required") - } - if e.Name == WildcardSpecifier { - return fmt.Errorf("exported-services Name must be the name of a partition, and not a wildcard") - } - - if err := requireEnterprise(e.GetKind()); err != nil { + if err := validateExportedServicesName(e.Name); err != nil { return err } + if err := validateConfigEntryMeta(e.Meta); err != nil { return err } - for _, svc := range e.Services { + for i, svc := range e.Services { if svc.Name == "" { - return fmt.Errorf("service name cannot be empty") + return fmt.Errorf("Services[%d]: service name cannot be empty", i) + } + if svc.Namespace == WildcardSpecifier && svc.Name != WildcardSpecifier { + return fmt.Errorf("Services[%d]: service name must be wildcard if namespace is wildcard", i) } if len(svc.Consumers) == 0 { - return fmt.Errorf("service %q must have at least one consumer", svc.Name) + return fmt.Errorf("Services[%d]: must have at least one consumer", i) } - for _, consumer := range svc.Consumers { + for j, consumer := range svc.Consumers { + if consumer.PeerName != "" && consumer.Partition != "" { + return fmt.Errorf("Services[%d].Consumers[%d]: must define at most one of PeerName or Partition", i, j) + } if consumer.Partition == WildcardSpecifier { - return fmt.Errorf("exporting to all partitions (wildcard) is not yet supported") + return fmt.Errorf("Services[%d].Consumers[%d]: exporting to all partitions (wildcard) is not supported", i, j) + } + if consumer.PeerName == WildcardSpecifier { + return fmt.Errorf("Services[%d].Consumers[%d]: exporting to all peers (wildcard) is not supported", i, j) } } } diff --git a/agent/structs/config_entry_exports_test.go b/agent/structs/config_entry_exports_test.go new file mode 100644 index 0000000000..db0aadb91a --- /dev/null +++ b/agent/structs/config_entry_exports_test.go @@ -0,0 +1,94 @@ +package structs + +import ( + "testing" +) + +func TestExportedServicesConfigEntry(t *testing.T) { + cases := map[string]configEntryTestcase{ + "validate: empty service name": { + entry: &ExportedServicesConfigEntry{ + Name: "default", + Services: []ExportedService{ + { + Name: "", + }, + }, + }, + validateErr: `service name cannot be empty`, + }, + "validate: empty consumer list": { + entry: &ExportedServicesConfigEntry{ + Name: "default", + Services: []ExportedService{ + { + Name: "web", + }, + }, + }, + validateErr: `must have at least one consumer`, + }, + "validate: no wildcard in consumer partition": { + entry: &ExportedServicesConfigEntry{ + Name: "default", + Services: []ExportedService{ + { + Name: "api", + Consumers: []ServiceConsumer{ + { + Partition: "foo", + }, + }, + }, + { + Name: "web", + Consumers: []ServiceConsumer{ + { + Partition: "*", + }, + }, + }, + }, + }, + validateErr: `Services[1].Consumers[0]: exporting to all partitions (wildcard) is not supported`, + }, + "validate: no wildcard in consumer peername": { + entry: &ExportedServicesConfigEntry{ + Name: "default", + Services: []ExportedService{ + { + Name: "web", + Consumers: []ServiceConsumer{ + { + PeerName: "foo", + }, + { + PeerName: "*", + }, + }, + }, + }, + }, + validateErr: `Services[0].Consumers[1]: exporting to all peers (wildcard) is not supported`, + }, + "validate: cannot specify consumer with partition and peername": { + entry: &ExportedServicesConfigEntry{ + Name: "default", + Services: []ExportedService{ + { + Name: "web", + Consumers: []ServiceConsumer{ + { + Partition: "foo", + PeerName: "bar", + }, + }, + }, + }, + }, + validateErr: `Services[0].Consumers[0]: must define at most one of PeerName or Partition`, + }, + } + + testConfigEntryNormalizeAndValidate(t, cases) +} diff --git a/agent/structs/config_entry_oss.go b/agent/structs/config_entry_oss.go index 2cd1db7ac9..4bd3a93fcd 100644 --- a/agent/structs/config_entry_oss.go +++ b/agent/structs/config_entry_oss.go @@ -38,6 +38,9 @@ func validateInnerEnterpriseMeta(_, _ *acl.EnterpriseMeta) error { return nil } -func requireEnterprise(kind string) error { - return fmt.Errorf("Config entry kind %q requires Consul Enterprise", kind) +func validateExportedServicesName(name string) error { + if name != "default" { + return fmt.Errorf(`exported-services Name must be "default"`) + } + return nil } diff --git a/agent/structs/peering.go b/agent/structs/peering.go new file mode 100644 index 0000000000..16235fd862 --- /dev/null +++ b/agent/structs/peering.go @@ -0,0 +1,9 @@ +package structs + +// PeeringToken identifies a peer in order for a connection to be established. +type PeeringToken struct { + CA []string + ServerAddresses []string + ServerName string + PeerID string +} diff --git a/agent/structs/prepared_query.go b/agent/structs/prepared_query.go index b6028ceadf..440053f0b7 100644 --- a/agent/structs/prepared_query.go +++ b/agent/structs/prepared_query.go @@ -79,6 +79,10 @@ type ServiceQuery struct { // should be directly next to their services so this isn't an issue. Connect bool + // If not empty, PeerName represents the peer that the service + // was imported from. + PeerName string + // EnterpriseMeta is the embedded enterprise metadata acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` } diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 765c039e78..88e5a9b210 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -14,18 +14,15 @@ import ( "strings" "time" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/duration" "github.com/golang/protobuf/ptypes/timestamp" - - "github.com/golang/protobuf/proto" + "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" "github.com/hashicorp/go-multierror" "github.com/hashicorp/serf/coordinate" "github.com/mitchellh/hashstructure" - "github.com/hashicorp/consul-net-rpc/go-msgpack/codec" - - ptypes "github.com/golang/protobuf/ptypes" - "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/api" @@ -80,6 +77,29 @@ const ( ServiceVirtualIPRequestType = 32 FreeVirtualIPRequestType = 33 KindServiceNamesType = 34 + PeeringWriteType = 35 + PeeringDeleteType = 36 + PeeringTerminateByIDType = 37 + PeeringTrustBundleWriteType = 38 + PeeringTrustBundleDeleteType = 39 +) + +const ( + // LocalPeerKeyword is a reserved keyword used for indexing in the state store for objects in the local peer. + LocalPeerKeyword = "internal" + + // DefaultPeerKeyword is the PeerName to use to refer to the local + // cluster's own data, rather than replicated peered data. + // + // This may internally be converted into LocalPeerKeyword, but external + // uses should not use that symbol directly in most cases. + DefaultPeerKeyword = "" + + // TODOPeerKeyword is the peer keyword to use if you aren't sure if the + // usage SHOULD be peering-aware yet. + // + // TODO(peering): remove this in the future + TODOPeerKeyword = "" ) // if a new request type is added above it must be @@ -123,6 +143,10 @@ var requestTypeStrings = map[MessageType]string{ ServiceVirtualIPRequestType: "ServiceVirtualIP", FreeVirtualIPRequestType: "FreeVirtualIP", KindServiceNamesType: "KindServiceName", + PeeringWriteType: "Peering", + PeeringDeleteType: "PeeringDelete", + PeeringTrustBundleWriteType: "PeeringTrustBundle", + PeeringTrustBundleDeleteType: "PeeringTrustBundleDelete", } const ( @@ -217,6 +241,7 @@ type RPCInfo interface { TokenSecret() string SetTokenSecret(string) HasTimedOut(since time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) + Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) time.Duration } // QueryOptions is used to specify various flags for read queries @@ -315,18 +340,24 @@ func (q *QueryOptions) SetTokenSecret(s string) { q.Token = s } -func (q QueryOptions) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { +func (q QueryOptions) Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) time.Duration { + // Match logic in Server.blockingQuery. if q.MinQueryIndex > 0 { if q.MaxQueryTime > maxQueryTime { q.MaxQueryTime = maxQueryTime } else if q.MaxQueryTime <= 0 { q.MaxQueryTime = defaultQueryTime } + // Timeout after maximum jitter has elapsed. q.MaxQueryTime += lib.RandomStagger(q.MaxQueryTime / JitterFraction) - return time.Since(start) > (q.MaxQueryTime + rpcHoldTimeout), nil + return q.MaxQueryTime + rpcHoldTimeout } - return time.Since(start) > rpcHoldTimeout, nil + return rpcHoldTimeout +} + +func (q QueryOptions) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + return time.Since(start) > q.Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime), nil } type WriteRequest struct { @@ -353,7 +384,11 @@ func (w *WriteRequest) SetTokenSecret(s string) { } func (w WriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { - return time.Since(start) > rpcHoldTimeout, nil + return time.Since(start) > w.Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime), nil +} + +func (w WriteRequest) Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout } type QueryBackend int @@ -431,6 +466,8 @@ type RegisterRequest struct { // node portion of this update will not apply. SkipNodeUpdate bool + PeerName string + // EnterpriseMeta is the embedded enterprise metadata acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` @@ -461,6 +498,7 @@ func (r *RegisterRequest) ChangesNode(node *Node) bool { if r.ID != node.ID || !strings.EqualFold(r.Node, node.Node) || r.PartitionOrDefault() != node.PartitionOrDefault() || + r.PeerName != node.PeerName || r.Address != node.Address || r.Datacenter != node.Datacenter || !reflect.DeepEqual(r.TaggedAddresses, node.TaggedAddresses) || @@ -481,6 +519,7 @@ type DeregisterRequest struct { Node string ServiceID string CheckID types.CheckID + PeerName string acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` WriteRequest } @@ -544,6 +583,7 @@ type DCSpecificRequest struct { Datacenter string NodeMetaFilters map[string]string Source QuerySource + PeerName string acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -556,6 +596,7 @@ func (r *DCSpecificRequest) CacheInfo() cache.RequestInfo { info := cache.RequestInfo{ Token: r.Token, Datacenter: r.Datacenter, + PeerName: r.PeerName, MinIndex: r.MinQueryIndex, Timeout: r.MaxQueryTime, MaxAge: r.MaxAge, @@ -590,6 +631,7 @@ type ServiceDumpRequest struct { UseServiceKind bool Source QuerySource acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` + PeerName string QueryOptions } @@ -601,6 +643,7 @@ func (r *ServiceDumpRequest) CacheInfo() cache.RequestInfo { info := cache.RequestInfo{ Token: r.Token, Datacenter: r.Datacenter, + PeerName: r.PeerName, MinIndex: r.MinQueryIndex, Timeout: r.MaxQueryTime, MaxAge: r.MaxAge, @@ -637,7 +680,11 @@ func (r *ServiceDumpRequest) CacheMinIndex() uint64 { // ServiceSpecificRequest is used to query about a specific service type ServiceSpecificRequest struct { - Datacenter string + Datacenter string + + // The name of the peer that the requested service was imported from. + PeerName string + NodeMetaFilters map[string]string ServiceName string ServiceKind ServiceKind @@ -698,6 +745,7 @@ func (r *ServiceSpecificRequest) CacheInfo() cache.RequestInfo { r.Connect, r.Filter, r.EnterpriseMeta, + r.PeerName, r.Ingress, r.ServiceKind, }, nil) @@ -719,6 +767,7 @@ func (r *ServiceSpecificRequest) CacheMinIndex() uint64 { type NodeSpecificRequest struct { Datacenter string Node string + PeerName string acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash"` QueryOptions } @@ -752,13 +801,14 @@ func (r *NodeSpecificRequest) CacheInfo() cache.RequestInfo { return info } -// ChecksInStateRequest is used to query for nodes in a state +// ChecksInStateRequest is used to query for checks in a state type ChecksInStateRequest struct { Datacenter string NodeMetaFilters map[string]string State string Source QuerySource + PeerName string acl.EnterpriseMeta `mapstructure:",squash"` QueryOptions } @@ -774,12 +824,17 @@ type Node struct { Address string Datacenter string Partition string `json:",omitempty"` + PeerName string `json:",omitempty"` TaggedAddresses map[string]string Meta map[string]string RaftIndex `bexpr:"-"` } +func (n *Node) PeerOrEmpty() string { + return n.PeerName +} + func (n *Node) GetEnterpriseMeta() *acl.EnterpriseMeta { return NodeEnterpriseMetaInPartition(n.Partition) } @@ -805,6 +860,7 @@ func (n *Node) IsSame(other *Node) bool { return n.ID == other.ID && strings.EqualFold(n.Node, other.Node) && n.PartitionOrDefault() == other.PartitionOrDefault() && + strings.EqualFold(n.PeerName, other.PeerName) && n.Address == other.Address && n.Datacenter == other.Datacenter && reflect.DeepEqual(n.TaggedAddresses, other.TaggedAddresses) && @@ -923,11 +979,18 @@ type ServiceNode struct { ServiceProxy ConnectProxyConfig ServiceConnect ServiceConnect + // If not empty, PeerName represents the peer that this ServiceNode was imported from. + PeerName string `json:",omitempty"` + acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` RaftIndex `bexpr:"-"` } +func (s *ServiceNode) PeerOrEmpty() string { + return s.PeerName +} + // PartialClone() returns a clone of the given service node, minus the node- // related fields that get filled in later, Address and TaggedAddresses. func (s *ServiceNode) PartialClone() *ServiceNode { @@ -969,6 +1032,7 @@ func (s *ServiceNode) PartialClone() *ServiceNode { ModifyIndex: s.ModifyIndex, }, EnterpriseMeta: s.EnterpriseMeta, + PeerName: s.PeerName, } } @@ -988,6 +1052,7 @@ func (s *ServiceNode) ToNodeService() *NodeService { EnableTagOverride: s.ServiceEnableTagOverride, Proxy: s.ServiceProxy, Connect: s.ServiceConnect, + PeerName: s.PeerName, EnterpriseMeta: s.EnterpriseMeta, RaftIndex: RaftIndex{ CreateIndex: s.CreateIndex, @@ -1133,6 +1198,9 @@ type NodeService struct { acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` + // If not empty, PeerName represents the peer that the NodeService was imported from. + PeerName string + RaftIndex `bexpr:"-"` } @@ -1417,6 +1485,7 @@ func (s *NodeService) IsSame(other *NodeService) bool { s.Kind != other.Kind || !reflect.DeepEqual(s.Proxy, other.Proxy) || s.Connect != other.Connect || + s.PeerName != other.PeerName || !s.EnterpriseMeta.IsSame(&other.EnterpriseMeta) { return false } @@ -1488,6 +1557,7 @@ func (s *NodeService) ToServiceNode(node string) *ServiceNode { ServiceProxy: s.Proxy, ServiceConnect: s.Connect, EnterpriseMeta: s.EnterpriseMeta, + PeerName: s.PeerName, RaftIndex: RaftIndex{ CreateIndex: s.CreateIndex, ModifyIndex: s.ModifyIndex, @@ -1529,6 +1599,10 @@ type HealthCheck struct { // HTTP or GRPC health check of the service. ExposedPort int + // PeerName is the name of the peer the check was imported from. + // It is empty if the check was registered locally. + PeerName string `json:",omitempty"` + Definition HealthCheckDefinition `bexpr:"-"` acl.EnterpriseMeta `hcl:",squash" mapstructure:",squash" bexpr:"-"` @@ -1536,6 +1610,10 @@ type HealthCheck struct { RaftIndex `bexpr:"-"` } +func (hc *HealthCheck) PeerOrEmpty() string { + return hc.PeerName +} + func (hc *HealthCheck) NodeIdentity() Identity { return Identity{ ID: hc.Node, @@ -1693,6 +1771,7 @@ func (c *HealthCheck) IsSame(other *HealthCheck) bool { c.ServiceName != other.ServiceName || !reflect.DeepEqual(c.ServiceTags, other.ServiceTags) || !reflect.DeepEqual(c.Definition, other.Definition) || + c.PeerName != other.PeerName || !c.EnterpriseMeta.IsSame(&other.EnterpriseMeta) { return false } @@ -1867,6 +1946,7 @@ type NodeInfo struct { ID types.NodeID Node string Partition string `json:",omitempty"` + PeerName string `json:",omitempty"` Address string TaggedAddresses map[string]string Meta map[string]string diff --git a/agent/structs/structs_filtering_test.go b/agent/structs/structs_filtering_test.go index b094cf5bdd..93d51c5b18 100644 --- a/agent/structs/structs_filtering_test.go +++ b/agent/structs/structs_filtering_test.go @@ -53,7 +53,7 @@ func TestPointerStructure(t *testing.T) { require.Equal(t, "1.1.1.1", val) } -/////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// // // NOTE: The tests within this file are designed to validate that the fields // that will be available for filtering for various data types in the @@ -61,7 +61,7 @@ func TestPointerStructure(t *testing.T) { // to update this file to get the tests passing again then you definitely // should update the documentation as well. // -/////////////////////////////////////////////////////////////////////////////// +// ///////////////////////////////////////////////////////////////////////////// type fieldConfigTest struct { dataType interface{} @@ -309,6 +309,11 @@ var expectedFieldConfigNode bexpr.FieldConfigurations = bexpr.FieldConfiguration CoerceFn: bexpr.CoerceString, SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches}, }, + "PeerName": &bexpr.FieldConfiguration{ + StructFieldName: "PeerName", + CoerceFn: bexpr.CoerceString, + SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches}, + }, "Address": &bexpr.FieldConfiguration{ StructFieldName: "Address", CoerceFn: bexpr.CoerceString, @@ -408,6 +413,11 @@ var expectedFieldConfigNodeService bexpr.FieldConfigurations = bexpr.FieldConfig StructFieldName: "ServiceConnect", SubFields: expectedFieldConfigServiceConnect, }, + "PeerName": &bexpr.FieldConfiguration{ + StructFieldName: "PeerName", + CoerceFn: bexpr.CoerceString, + SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches}, + }, } var expectedFieldConfigServiceNode bexpr.FieldConfigurations = bexpr.FieldConfigurations{ @@ -507,6 +517,11 @@ var expectedFieldConfigServiceNode bexpr.FieldConfigurations = bexpr.FieldConfig StructFieldName: "ServiceConnect", SubFields: expectedFieldConfigServiceConnect, }, + "PeerName": &bexpr.FieldConfiguration{ + StructFieldName: "PeerName", + CoerceFn: bexpr.CoerceString, + SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches}, + }, } var expectedFieldConfigHealthCheck bexpr.FieldConfigurations = bexpr.FieldConfigurations{ @@ -578,6 +593,11 @@ var expectedFieldConfigHealthCheck bexpr.FieldConfigurations = bexpr.FieldConfig SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual}, StructFieldName: "ExposedPort", }, + "PeerName": &bexpr.FieldConfiguration{ + CoerceFn: bexpr.CoerceString, + SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches}, + StructFieldName: "PeerName", + }, } var expectedFieldConfigCheckServiceNode bexpr.FieldConfigurations = bexpr.FieldConfigurations{ @@ -612,6 +632,11 @@ var expectedFieldConfigNodeInfo bexpr.FieldConfigurations = bexpr.FieldConfigura CoerceFn: bexpr.CoerceString, SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches}, }, + "PeerName": &bexpr.FieldConfiguration{ + StructFieldName: "PeerName", + CoerceFn: bexpr.CoerceString, + SupportedOperations: []bexpr.MatchOperator{bexpr.MatchEqual, bexpr.MatchNotEqual, bexpr.MatchIn, bexpr.MatchNotIn, bexpr.MatchMatches, bexpr.MatchNotMatches}, + }, "Address": &bexpr.FieldConfiguration{ StructFieldName: "Address", CoerceFn: bexpr.CoerceString, diff --git a/agent/structs/structs_test.go b/agent/structs/structs_test.go index 57711184bd..cee4cff2e0 100644 --- a/agent/structs/structs_test.go +++ b/agent/structs/structs_test.go @@ -1908,6 +1908,8 @@ func TestServiceDumpRequest_CacheInfoKey(t *testing.T) { var cacheInfoIgnoredFields = map[string]bool{ // Datacenter is part of the cache key added by the cache itself. "Datacenter": true, + // PeerName is part of the cache key added by the cache itself. + "PeerName": true, // QuerySource is always the same for every request from a single agent, so it // is excluded from the key. "Source": true, diff --git a/agent/submatview/local_materializer.go b/agent/submatview/local_materializer.go new file mode 100644 index 0000000000..b65c2f0744 --- /dev/null +++ b/agent/submatview/local_materializer.go @@ -0,0 +1,109 @@ +package submatview + +import ( + "context" + "errors" + + "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/consul/stream" + "github.com/hashicorp/consul/lib/retry" + "github.com/hashicorp/consul/proto/pbsubscribe" +) + +// LocalMaterializer is a materializer for a stream of events +// and manages the local subscription to the event publisher +// until the cache result is discarded when its TTL expires. +type LocalMaterializer struct { + deps Deps + backend LocalBackend + retryWaiter *retry.Waiter + handler eventHandler + + mat *materializer +} + +var _ Materializer = (*LocalMaterializer)(nil) + +type LocalBackend interface { + Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) +} + +func NewLocalMaterializer(backend LocalBackend, deps Deps) *LocalMaterializer { + m := LocalMaterializer{ + backend: backend, + deps: deps, + mat: newMaterializer(deps.Logger, deps.View, deps.Waiter), + } + return &m +} + +// Query implements Materializer +func (m *LocalMaterializer) Query(ctx context.Context, minIndex uint64) (Result, error) { + return m.mat.query(ctx, minIndex) +} + +// Run receives events from a local subscription backend and sends them to the View. +// It runs until ctx is cancelled, so it is expected to be run in a goroutine. +// Mirrors implementation of RPCMaterializer. +// +// Run implements Materializer +func (m *LocalMaterializer) Run(ctx context.Context) { + for { + req := m.deps.Request(m.mat.currentIndex()) + err := m.subscribeOnce(ctx, req) + if ctx.Err() != nil { + return + } + m.mat.handleError(req, err) + + if err := m.mat.retryWaiter.Wait(ctx); err != nil { + return + } + } +} + +// subscribeOnce opens a new subscription to a local backend and runs +// for its lifetime or until the view is closed. +func (m *LocalMaterializer) subscribeOnce(ctx context.Context, req *pbsubscribe.SubscribeRequest) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + m.handler = initialHandler(req.Index) + + entMeta := acl.NewEnterpriseMetaWithPartition(req.Partition, req.Namespace) + sub, err := m.backend.Subscribe(state.PBToStreamSubscribeRequest(req, entMeta)) + if err != nil { + return err + } + defer sub.Unsubscribe() + + for { + event, err := sub.Next(ctx) + switch { + case errors.Is(err, stream.ErrSubForceClosed): + m.deps.Logger.Trace("subscription reset by server") + return err + + case err != nil: + return err + } + + e := event.Payload.ToSubscriptionEvent(event.Index) + m.handler, err = m.handler(m, e) + if err != nil { + m.mat.reset() + return err + } + } +} + +// updateView implements viewState +func (m *LocalMaterializer) updateView(events []*pbsubscribe.Event, index uint64) error { + return m.mat.updateView(events, index) +} + +// reset implements viewState +func (m *LocalMaterializer) reset() { + m.mat.reset() +} diff --git a/agent/submatview/materializer.go b/agent/submatview/materializer.go index 3b870d9e17..aa312d060c 100644 --- a/agent/submatview/materializer.go +++ b/agent/submatview/materializer.go @@ -6,9 +6,6 @@ import ( "time" "github.com/hashicorp/go-hclog" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" "github.com/hashicorp/consul/lib/retry" "github.com/hashicorp/consul/proto/pbsubscribe" @@ -38,16 +35,27 @@ type View interface { Reset() } -// Materializer consumes the event stream, handling any framing events, and -// sends the events to View as they are received. -// -// Materializer is used as the cache.Result.State for a streaming -// cache type and manages the actual streaming RPC call to the servers behind -// the scenes until the cache result is discarded when TTL expires. -type Materializer struct { - deps Deps +// Result returned from the View. +type Result struct { + Index uint64 + Value interface{} + // Cached is true if the requested value was already available locally. If + // the value is false, it indicates that GetFromView had to wait for an update, + Cached bool +} + +type Deps struct { + View View + Logger hclog.Logger + Waiter *retry.Waiter + Request func(index uint64) *pbsubscribe.SubscribeRequest +} + +// materializer consumes the event stream, handling any framing events, and +// allows for querying the materialized view. +type materializer struct { retryWaiter *retry.Waiter - handler eventHandler + logger hclog.Logger // lock protects the mutable state - all fields below it must only be accessed // while holding lock. @@ -58,175 +66,22 @@ type Materializer struct { err error } -type Deps struct { - View View - Client StreamClient - Logger hclog.Logger - Waiter *retry.Waiter - Request func(index uint64) *pbsubscribe.SubscribeRequest -} - -// StreamClient provides a subscription to state change events. -type StreamClient interface { - Subscribe(ctx context.Context, in *pbsubscribe.SubscribeRequest, opts ...grpc.CallOption) (pbsubscribe.StateChangeSubscription_SubscribeClient, error) -} - -// NewMaterializer returns a new Materializer. Run must be called to start it. -func NewMaterializer(deps Deps) *Materializer { - v := &Materializer{ - deps: deps, - view: deps.View, - retryWaiter: deps.Waiter, +func newMaterializer(logger hclog.Logger, view View, waiter *retry.Waiter) *materializer { + m := materializer{ + view: view, + retryWaiter: waiter, + logger: logger, updateCh: make(chan struct{}), } - if v.retryWaiter == nil { - v.retryWaiter = &retry.Waiter{ - MinFailures: 1, - // Start backing off with small increments (200-400ms) which will double - // each attempt. (200-400, 400-800, 800-1600, 1600-3200, 3200-6000, 6000 - // after that). (retry.Wait applies Max limit after jitter right now). - Factor: 200 * time.Millisecond, - MinWait: 0, - MaxWait: 60 * time.Second, - Jitter: retry.NewJitter(100), - } + if m.retryWaiter == nil { + m.retryWaiter = defaultWaiter() } - return v + return &m } -// Run receives events from the StreamClient and sends them to the View. It runs -// until ctx is cancelled, so it is expected to be run in a goroutine. -func (m *Materializer) Run(ctx context.Context) { - for { - req := m.deps.Request(m.index) - err := m.runSubscription(ctx, req) - if ctx.Err() != nil { - return - } - - failures := m.retryWaiter.Failures() - if isNonTemporaryOrConsecutiveFailure(err, failures) { - m.lock.Lock() - m.notifyUpdateLocked(err) - m.lock.Unlock() - } - - m.deps.Logger.Error("subscribe call failed", - "err", err, - "topic", req.Topic, - "key", req.Key, - "failure_count", failures+1) - - if err := m.retryWaiter.Wait(ctx); err != nil { - return - } - } -} - -// isNonTemporaryOrConsecutiveFailure returns true if the error is not a -// temporary error or if failures > 0. -func isNonTemporaryOrConsecutiveFailure(err error, failures int) bool { - // temporary is an interface used by net and other std lib packages to - // show error types represent temporary/recoverable errors. - temp, ok := err.(interface { - Temporary() bool - }) - return !ok || !temp.Temporary() || failures > 0 -} - -// runSubscription opens a new subscribe streaming call to the servers and runs -// for it's lifetime or until the view is closed. -func (m *Materializer) runSubscription(ctx context.Context, req *pbsubscribe.SubscribeRequest) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - m.handler = initialHandler(req.Index) - - s, err := m.deps.Client.Subscribe(ctx, req) - if err != nil { - return err - } - - for { - event, err := s.Recv() - switch { - case isGrpcStatus(err, codes.Aborted): - m.reset() - return resetErr("stream reset requested") - case err != nil: - return err - } - - m.handler, err = m.handler(m, event) - if err != nil { - m.reset() - return err - } - } -} - -func isGrpcStatus(err error, code codes.Code) bool { - s, ok := status.FromError(err) - return ok && s.Code() == code -} - -// resetErr represents a server request to reset the subscription, it's typed so -// we can mark it as temporary and so attempt to retry first time without -// notifying clients. -type resetErr string - -// Temporary Implements the internal Temporary interface -func (e resetErr) Temporary() bool { - return true -} - -// Error implements error -func (e resetErr) Error() string { - return string(e) -} - -// reset clears the state ready to start a new stream from scratch. -func (m *Materializer) reset() { - m.lock.Lock() - defer m.lock.Unlock() - - m.view.Reset() - m.index = 0 -} - -func (m *Materializer) updateView(events []*pbsubscribe.Event, index uint64) error { - m.lock.Lock() - defer m.lock.Unlock() - - if err := m.view.Update(events); err != nil { - return err - } - m.index = index - m.notifyUpdateLocked(nil) - m.retryWaiter.Reset() - return nil -} - -// notifyUpdateLocked closes the current update channel and recreates a new -// one. It must be called while holding the s.lock lock. -func (m *Materializer) notifyUpdateLocked(err error) { - m.err = err - close(m.updateCh) - m.updateCh = make(chan struct{}) -} - -// Result returned from the View. -type Result struct { - Index uint64 - Value interface{} - // Cached is true if the requested value was already available locally. If - // the value is false, it indicates that getFromView had to wait for an update, - Cached bool -} - -// getFromView blocks until the index of the View is greater than opts.MinIndex, -//or the context is cancelled. -func (m *Materializer) getFromView(ctx context.Context, minIndex uint64) (Result, error) { +// Query blocks until the index of the View is greater than opts.MinIndex, +// or the context is cancelled. +func (m *materializer) query(ctx context.Context, minIndex uint64) (Result, error) { m.lock.Lock() result := Result{ @@ -278,3 +133,85 @@ func (m *Materializer) getFromView(ctx context.Context, minIndex uint64) (Result } } } + +func (m *materializer) currentIndex() uint64 { + var resp uint64 + + m.lock.Lock() + resp = m.index + m.lock.Unlock() + + return resp +} + +// notifyUpdateLocked closes the current update channel and recreates a new +// one. It must be called while holding the m.lock lock. +func (m *materializer) notifyUpdateLocked(err error) { + m.err = err + close(m.updateCh) + m.updateCh = make(chan struct{}) +} + +// reset clears the state ready to start a new stream from scratch. +func (m *materializer) reset() { + m.lock.Lock() + defer m.lock.Unlock() + + m.view.Reset() + m.index = 0 +} + +// updateView updates the view from a sequence of events and stores +// the corresponding Raft index. +func (m *materializer) updateView(events []*pbsubscribe.Event, index uint64) error { + m.lock.Lock() + defer m.lock.Unlock() + + if err := m.view.Update(events); err != nil { + return err + } + + m.index = index + m.notifyUpdateLocked(nil) + m.retryWaiter.Reset() + return nil +} + +func (m *materializer) handleError(req *pbsubscribe.SubscribeRequest, err error) { + failures := m.retryWaiter.Failures() + if isNonTemporaryOrConsecutiveFailure(err, failures) { + m.lock.Lock() + m.notifyUpdateLocked(err) + m.lock.Unlock() + } + + m.logger.Error("subscribe call failed", + "err", err, + "topic", req.Topic, + "key", req.Key, + "failure_count", failures+1) +} + +// isNonTemporaryOrConsecutiveFailure returns true if the error is not a +// temporary error or if failures > 0. +func isNonTemporaryOrConsecutiveFailure(err error, failures int) bool { + // temporary is an interface used by net and other std lib packages to + // show error types represent temporary/recoverable errors. + temp, ok := err.(interface { + Temporary() bool + }) + return !ok || !temp.Temporary() || failures > 0 +} + +func defaultWaiter() *retry.Waiter { + return &retry.Waiter{ + MinFailures: 1, + // Start backing off with small increments (200-400ms) which will double + // each attempt. (200-400, 400-800, 800-1600, 1600-3200, 3200-6000, 6000 + // after that). (retry.Wait applies Max limit after jitter right now). + Factor: 200 * time.Millisecond, + MinWait: 0, + MaxWait: 60 * time.Second, + Jitter: retry.NewJitter(100), + } +} diff --git a/agent/submatview/rpc_materializer.go b/agent/submatview/rpc_materializer.go new file mode 100644 index 0000000000..3b379d4e84 --- /dev/null +++ b/agent/submatview/rpc_materializer.go @@ -0,0 +1,125 @@ +package submatview + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/hashicorp/consul/proto/pbsubscribe" +) + +// RPCMaterializer is a materializer for a streaming cache type +// and manages the actual streaming RPC call to the servers behind +// the scenes until the cache result is discarded when its TTL expires. +type RPCMaterializer struct { + deps Deps + client StreamClient + handler eventHandler + + mat *materializer +} + +var _ Materializer = (*RPCMaterializer)(nil) + +// StreamClient provides a subscription to state change events. +type StreamClient interface { + Subscribe(ctx context.Context, in *pbsubscribe.SubscribeRequest, opts ...grpc.CallOption) (pbsubscribe.StateChangeSubscription_SubscribeClient, error) +} + +// NewRPCMaterializer returns a new Materializer. Run must be called to start it. +func NewRPCMaterializer(client StreamClient, deps Deps) *RPCMaterializer { + m := RPCMaterializer{ + deps: deps, + client: client, + mat: newMaterializer(deps.Logger, deps.View, deps.Waiter), + } + return &m +} + +// Query implements Materializer +func (m *RPCMaterializer) Query(ctx context.Context, minIndex uint64) (Result, error) { + return m.mat.query(ctx, minIndex) +} + +// Run receives events from the StreamClient and sends them to the View. It runs +// until ctx is cancelled, so it is expected to be run in a goroutine. +// Mirrors implementation of LocalMaterializer +// +// Run implements Materializer +func (m *RPCMaterializer) Run(ctx context.Context) { + for { + req := m.deps.Request(m.mat.currentIndex()) + err := m.subscribeOnce(ctx, req) + if ctx.Err() != nil { + return + } + m.mat.handleError(req, err) + + if err := m.mat.retryWaiter.Wait(ctx); err != nil { + return + } + } +} + +// subscribeOnce opens a new subscribe streaming call to the servers and runs +// for its lifetime or until the view is closed. +func (m *RPCMaterializer) subscribeOnce(ctx context.Context, req *pbsubscribe.SubscribeRequest) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + m.handler = initialHandler(req.Index) + + s, err := m.client.Subscribe(ctx, req) + if err != nil { + return err + } + + for { + event, err := s.Recv() + switch { + case isGrpcStatus(err, codes.Aborted): + m.mat.reset() + return resetErr("stream reset requested") + case err != nil: + return err + } + + m.handler, err = m.handler(m, event) + if err != nil { + m.mat.reset() + return err + } + } +} + +func isGrpcStatus(err error, code codes.Code) bool { + s, ok := status.FromError(err) + return ok && s.Code() == code +} + +// resetErr represents a server request to reset the subscription, it's typed so +// we can mark it as temporary and so attempt to retry first time without +// notifying clients. +type resetErr string + +// Temporary Implements the internal Temporary interface +func (e resetErr) Temporary() bool { + return true +} + +// Error implements error +func (e resetErr) Error() string { + return string(e) +} + +// updateView implements viewState +func (m *RPCMaterializer) updateView(events []*pbsubscribe.Event, index uint64) error { + return m.mat.updateView(events, index) +} + +// reset implements viewState +func (m *RPCMaterializer) reset() { + m.mat.reset() +} diff --git a/agent/submatview/store.go b/agent/submatview/store.go index 07363f7403..0b27347934 100644 --- a/agent/submatview/store.go +++ b/agent/submatview/store.go @@ -34,8 +34,14 @@ type Store struct { idleTTL time.Duration } +// A Materializer maintains a materialized view of a subscription on an event stream. +type Materializer interface { + Run(ctx context.Context) + Query(ctx context.Context, minIndex uint64) (Result, error) +} + type entry struct { - materializer *Materializer + materializer Materializer expiry *ttlcache.Entry stop func() // requests is the count of active requests using this entry. This entry will @@ -100,7 +106,7 @@ type Request interface { // NewMaterializer will be called if there is no active materializer to fulfil // the request. It should return a Materializer appropriate for streaming // data to fulfil this request. - NewMaterializer() (*Materializer, error) + NewMaterializer() (Materializer, error) // Type should return a string which uniquely identifies this type of request. // The returned value is used as the prefix of the key used to index // entries in the Store. @@ -124,7 +130,7 @@ func (s *Store) Get(ctx context.Context, req Request) (Result, error) { defer cancel() } - result, err := materializer.getFromView(ctx, info.MinIndex) + result, err := materializer.Query(ctx, info.MinIndex) // context.DeadlineExceeded is translated to nil to match the timeout // behaviour of agent/cache.Cache.Get. if err == nil || errors.Is(err, context.DeadlineExceeded) { @@ -155,7 +161,7 @@ func (s *Store) Notify( index := info.MinIndex for { - result, err := materializer.getFromView(ctx, index) + result, err := materializer.Query(ctx, index) switch { case ctx.Err() != nil: return @@ -185,7 +191,7 @@ func (s *Store) Notify( // readEntry from the store, and increment the requests counter. releaseEntry // must be called when the request is finished to decrement the counter. -func (s *Store) readEntry(req Request) (string, *Materializer, error) { +func (s *Store) readEntry(req Request) (string, Materializer, error) { info := req.CacheInfo() key := makeEntryKey(req.Type(), info) diff --git a/agent/submatview/store_integration_test.go b/agent/submatview/store_integration_test.go index e8247b8185..72eed5a5f9 100644 --- a/agent/submatview/store_integration_test.go +++ b/agent/submatview/store_integration_test.go @@ -253,7 +253,6 @@ func (e *eventProducer) Produce(ctx context.Context, pub *stream.EventPublisher) }, }, } - } e.nodesLock.Lock() diff --git a/agent/submatview/store_test.go b/agent/submatview/store_test.go index b177380d31..bdbc576c78 100644 --- a/agent/submatview/store_test.go +++ b/agent/submatview/store_test.go @@ -24,7 +24,7 @@ func TestStore_Get(t *testing.T) { store := NewStore(hclog.New(nil)) go store.Run(ctx) - req := &fakeRequest{ + req := &fakeRPCRequest{ client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents( @@ -199,14 +199,14 @@ type resultOrError struct { Err error } -type fakeRequest struct { +type fakeRPCRequest struct { index uint64 timeout time.Duration key string client *TestStreamingClient } -func (r *fakeRequest) CacheInfo() cache.RequestInfo { +func (r *fakeRPCRequest) CacheInfo() cache.RequestInfo { key := r.key if key == "" { key = "key" @@ -220,10 +220,9 @@ func (r *fakeRequest) CacheInfo() cache.RequestInfo { } } -func (r *fakeRequest) NewMaterializer() (*Materializer, error) { - return NewMaterializer(Deps{ +func (r *fakeRPCRequest) NewMaterializer() (Materializer, error) { + deps := Deps{ View: &fakeView{srvs: make(map[string]*pbservice.CheckServiceNode)}, - Client: r.client, Logger: hclog.New(nil), Request: func(index uint64) *pbsubscribe.SubscribeRequest { req := &pbsubscribe.SubscribeRequest{ @@ -236,10 +235,11 @@ func (r *fakeRequest) NewMaterializer() (*Materializer, error) { } return req }, - }), nil + } + return NewRPCMaterializer(r.client, deps), nil } -func (r *fakeRequest) Type() string { +func (r *fakeRPCRequest) Type() string { return fmt.Sprintf("%T", r) } @@ -291,7 +291,7 @@ func TestStore_Notify(t *testing.T) { store := NewStore(hclog.New(nil)) go store.Run(ctx) - req := &fakeRequest{ + req := &fakeRPCRequest{ client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents( @@ -360,7 +360,7 @@ func TestStore_Notify_ManyRequests(t *testing.T) { store := NewStore(hclog.New(nil)) go store.Run(ctx) - req := &fakeRequest{ + req := &fakeRPCRequest{ client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents(newEndOfSnapshotEvent(2)) @@ -393,13 +393,13 @@ func TestStore_Notify_ManyRequests(t *testing.T) { assertRequestCount(r, store, req, 4) }) - var req2 *fakeRequest + var req2 *fakeRPCRequest runStep(t, "Get and Notify with a different key", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - req2 = &fakeRequest{client: req.client, key: "key2", index: 22} + req2 = &fakeRPCRequest{client: req.client, key: "key2", index: 22} require.NoError(t, store.Notify(ctx, req2, cID, ch1)) go func() { @@ -472,7 +472,7 @@ func TestStore_Run_ExpiresEntries(t *testing.T) { store.idleTTL = ttl go store.Run(ctx) - req := &fakeRequest{ + req := &fakeRPCRequest{ client: NewTestStreamingClient(pbcommon.DefaultEnterpriseMeta.Namespace), } req.client.QueueEvents(newEndOfSnapshotEvent(2)) diff --git a/agent/testagent.go b/agent/testagent.go index 11ca9a5188..4dbf859bc8 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -87,8 +87,6 @@ type TestAgent struct { // NewTestAgent returns a started agent with the given configuration. It fails // the test if the Agent could not be started. -// The caller is responsible for calling Shutdown() to stop the agent and remove -// temporary directories. func NewTestAgent(t *testing.T, hcl string) *TestAgent { a := StartTestAgent(t, TestAgent{HCL: hcl}) t.Cleanup(func() { a.Shutdown() }) diff --git a/agent/xds/delta.go b/agent/xds/delta.go index 872ac31aa8..5475533dd5 100644 --- a/agent/xds/delta.go +++ b/agent/xds/delta.go @@ -470,16 +470,6 @@ func (t *xDSDeltaType) Recv(req *envoy_discovery_v3.DeltaDiscoveryRequest, sf su t.wildcard = len(req.ResourceNamesSubscribe) == 0 t.registered = true registeredThisTime = true - - if sf.ForceLDSandCDSToAlwaysUseWildcardsOnReconnect { - switch t.typeURL { - case xdscommon.ListenerType, xdscommon.ClusterType: - if !t.wildcard { - t.wildcard = true - logger.Trace("fixing Envoy bug fixed in 1.19.0 by inferring wildcard mode for type") - } - } - } } /* diff --git a/agent/xds/delta_test.go b/agent/xds/delta_test.go index c60f5fc021..c094a002bc 100644 --- a/agent/xds/delta_test.go +++ b/agent/xds/delta_test.go @@ -657,72 +657,6 @@ func TestServer_DeltaAggregatedResources_v3_SlowEndpointPopulation(t *testing.T) } } -func TestServer_DeltaAggregatedResources_v3_GetAllClusterAfterConsulRestarted(t *testing.T) { - // This illustrates a scenario related to https://github.com/hashicorp/consul/issues/11833 - - aclResolve := func(id string) (acl.Authorizer, error) { - // Allow all - return acl.RootAuthorizer("manage"), nil - } - scenario := newTestServerDeltaScenario(t, aclResolve, "web-sidecar-proxy", "", 0, false) - _, mgr, errCh, envoy := scenario.server, scenario.mgr, scenario.errCh, scenario.envoy - envoy.EnvoyVersion = "1.18.0" - - sid := structs.NewServiceID("web-sidecar-proxy", nil) - - // Register the proxy to create state needed to Watch() on - mgr.RegisterProxy(t, sid) - - var snap *proxycfg.ConfigSnapshot - runStep(t, "get into state after consul restarted", func(t *testing.T) { - snap = newTestSnapshot(t, nil, "") - - // Send initial cluster discover. - // This is to simulate the discovery request call from envoy after disconnected from consul ads stream. - // - // We need to force it to be an older version of envoy so that the logic shifts. - envoy.SendDeltaReq(t, xdscommon.ClusterType, &envoy_discovery_v3.DeltaDiscoveryRequest{ - ResourceNamesSubscribe: []string{ - "local_app", - "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul", - }, - InitialResourceVersions: map[string]string{ - "local_app": "a948904f2f0f479b8f8197694b30184b0d2ed1c1cd2a1ec0fb85d299a192a447", - "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul": "5891b5b522d5df086d0ff0b110fbd9d21bb4fc7163af34d08286a2e846f6be03", - }, - }) - - // Check no response sent yet - assertDeltaChanBlocked(t, envoy.deltaStream.sendCh) - - requireProtocolVersionGauge(t, scenario, "v3", 1) - - // Deliver a new snapshot - // the config contains 3 clusters: local_app, db, geo-cache. - // this is to simulate the fact that there is one additional (upstream) cluster gets added to the sidecar service - // during the time xds disconnected (consul restarted). - mgr.DeliverConfig(t, sid, snap) - - assertDeltaResponseSent(t, envoy.deltaStream.sendCh, &envoy_discovery_v3.DeltaDiscoveryResponse{ - TypeUrl: xdscommon.ClusterType, - Nonce: hexString(1), - Resources: makeTestResources(t, - makeTestCluster(t, snap, "tcp:local_app"), - makeTestCluster(t, snap, "tcp:db"), - makeTestCluster(t, snap, "tcp:geo-cache"), - ), - }) - }) - - envoy.Close() - select { - case err := <-errCh: - require.NoError(t, err) - case <-time.After(50 * time.Millisecond): - t.Fatalf("timed out waiting for handler to finish") - } -} - func TestServer_DeltaAggregatedResources_v3_BasicProtocol_TCP_clusterChangesImpactEndpoints(t *testing.T) { aclResolve := func(id string) (acl.Authorizer, error) { // Allow all diff --git a/agent/xds/envoy_versioning.go b/agent/xds/envoy_versioning.go index 1ee5798903..e0face0bdd 100644 --- a/agent/xds/envoy_versioning.go +++ b/agent/xds/envoy_versioning.go @@ -11,9 +11,7 @@ import ( var ( // minSupportedVersion is the oldest mainline version we support. This should always be // the zero'th point release of the last element of proxysupport.EnvoyVersions. - minSupportedVersion = version.Must(version.NewVersion("1.18.0")) - - minVersionToForceLDSandCDSToAlwaysUseWildcardsOnReconnect = version.Must(version.NewVersion("1.19.0")) + minSupportedVersion = version.Must(version.NewVersion("1.19.0")) specificUnsupportedVersions = []unsupportedVersion{} ) @@ -25,19 +23,8 @@ type unsupportedVersion struct { } type supportedProxyFeatures struct { - // Older versions of Envoy incorrectly exploded a wildcard subscription for - // LDS and CDS into specific line items on incremental xDS reconnect. They - // would populate both InitialResourceVersions and ResourceNamesSubscribe - // when they SHOULD have left ResourceNamesSubscribe empty (or used an - // explicit "*" in later Envoy versions) to imply wildcard mode. On - // reconnect, Consul interpreted the lack of the wildcard attribute as - // implying that the Envoy instance should not receive updates for any - // newly created listeners and clusters for the remaining life of that - // Envoy sidecar process. - // - // see: https://github.com/envoyproxy/envoy/issues/16063 - // see: https://github.com/envoyproxy/envoy/pull/16153 - ForceLDSandCDSToAlwaysUseWildcardsOnReconnect bool + // Put feature switches here when necessary. For reference, The most recent remove of a feature flag was removed in + // . } func determineSupportedProxyFeatures(node *envoy_core_v3.Node) (supportedProxyFeatures, error) { @@ -75,9 +62,7 @@ func determineSupportedProxyFeaturesFromVersion(version *version.Version) (suppo sf := supportedProxyFeatures{} - if version.LessThan(minVersionToForceLDSandCDSToAlwaysUseWildcardsOnReconnect) { - sf.ForceLDSandCDSToAlwaysUseWildcardsOnReconnect = true - } + // when feature flags necessary, populate here by calling version.LessThan(...) return sf, nil } diff --git a/agent/xds/envoy_versioning_test.go b/agent/xds/envoy_versioning_test.go index c11bcf76a4..8f93c90dda 100644 --- a/agent/xds/envoy_versioning_test.go +++ b/agent/xds/envoy_versioning_test.go @@ -118,10 +118,13 @@ func TestDetermineSupportedProxyFeaturesFromString(t *testing.T) { "1.16.4": {expectErr: "Envoy 1.16.4 " + errTooOld}, "1.16.5": {expectErr: "Envoy 1.16.5 " + errTooOld}, "1.16.6": {expectErr: "Envoy 1.16.6 " + errTooOld}, + "1.17.4": {expectErr: "Envoy 1.17.4 " + errTooOld}, + "1.18.6": {expectErr: "Envoy 1.18.6 " + errTooOld}, } // Insert a bunch of valid versions. // Populate feature flags here when appropriate. See consul 1.10.x for reference. + /* Example from 1.18 for _, v := range []string{ "1.18.0", "1.18.1", "1.18.2", "1.18.3", "1.18.4", "1.18.5", "1.18.6", } { @@ -129,10 +132,12 @@ func TestDetermineSupportedProxyFeaturesFromString(t *testing.T) { ForceLDSandCDSToAlwaysUseWildcardsOnReconnect: true, }} } + */ for _, v := range []string{ "1.19.0", "1.19.1", "1.19.2", "1.19.3", "1.20.0", "1.20.1", "1.20.2", "1.21.1", + "1.22.0", } { cases[v] = testcase{expect: supportedProxyFeatures{}} } diff --git a/agent/xds/listeners.go b/agent/xds/listeners.go index b9b7855b90..5fcc83a912 100644 --- a/agent/xds/listeners.go +++ b/agent/xds/listeners.go @@ -18,8 +18,13 @@ import ( envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + envoy_grpc_http1_bridge_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_http1_bridge/v3" envoy_grpc_stats_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_stats/v3" + envoy_http_router_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" + envoy_original_dst_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/original_dst/v3" + envoy_tls_inspector_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/tls_inspector/v3" envoy_http_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" + envoy_sni_cluster_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/sni_cluster/v3" envoy_tcp_proxy_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3" envoy_tls_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3" envoy_type_v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3" @@ -80,18 +85,19 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg. port = cfgSnap.Proxy.TransparentProxy.OutboundListenerPort } + originalDstFilter, err := makeEnvoyListenerFilter("envoy.filters.listener.original_dst", &envoy_original_dst_v3.OriginalDst{}) + if err != nil { + return nil, err + } + outboundListener = makePortListener(OutboundListenerName, "127.0.0.1", port, envoy_core_v3.TrafficDirection_OUTBOUND) outboundListener.FilterChains = make([]*envoy_listener_v3.FilterChain, 0) outboundListener.ListenerFilters = []*envoy_listener_v3.ListenerFilter{ - { - // The original_dst filter is a listener filter that recovers the original destination - // address before the iptables redirection. This filter is needed for transparent - // proxies because they route to upstreams using filter chains that match on the - // destination IP address. If the filter is not present, no chain will match. - // - // TODO(tproxy): Hard-coded until we upgrade the go-control-plane library - Name: "envoy.filters.listener.original_dst", - }, + // The original_dst filter is a listener filter that recovers the original destination + // address before the iptables redirection. This filter is needed for transparent + // proxies because they route to upstreams using filter chains that match on the + // destination IP address. If the filter is not present, no chain will match. + originalDstFilter, } } @@ -1058,9 +1064,15 @@ func (s *ResourceGenerator) makeTerminatingGatewayListener( if err != nil { return nil, err } + + sniCluster, err := makeSNIClusterFilter() + if err != nil { + return nil, err + } + fallback := &envoy_listener_v3.FilterChain{ Filters: []*envoy_listener_v3.Filter{ - {Name: "envoy.filters.network.sni_cluster"}, + sniCluster, tcpProxy, }, } @@ -1383,7 +1395,7 @@ func makeListenerFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, err } func makeTLSInspectorListenerFilter() (*envoy_listener_v3.ListenerFilter, error) { - return &envoy_listener_v3.ListenerFilter{Name: "envoy.filters.listener.tls_inspector"}, nil + return makeEnvoyListenerFilter("envoy.filters.listener.tls_inspector", &envoy_tls_inspector_v3.TlsInspector{}) } func makeSNIFilterChainMatch(sniMatches ...string) *envoy_listener_v3.FilterChainMatch { @@ -1393,8 +1405,7 @@ func makeSNIFilterChainMatch(sniMatches ...string) *envoy_listener_v3.FilterChai } func makeSNIClusterFilter() (*envoy_listener_v3.Filter, error) { - // This filter has no config which is why we are not calling make - return &envoy_listener_v3.Filter{Name: "envoy.filters.network.sni_cluster"}, nil + return makeFilter("envoy.filters.network.sni_cluster", &envoy_sni_cluster_v3.SniCluster{}) } func makeTCPProxyFilter(filterName, cluster, statPrefix string) (*envoy_listener_v3.Filter, error) { @@ -1413,13 +1424,16 @@ func makeStatPrefix(prefix, filterName string) string { } func makeHTTPFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) { + router, err := makeEnvoyHTTPFilter("envoy.filters.http.router", &envoy_http_router_v3.Router{}) + if err != nil { + return nil, err + } + cfg := &envoy_http_v3.HttpConnectionManager{ StatPrefix: makeStatPrefix(opts.statPrefix, opts.filterName), CodecType: envoy_http_v3.HttpConnectionManager_AUTO, HttpFilters: []*envoy_http_v3.HttpFilter{ - { - Name: "envoy.filters.http.router", - }, + router, }, Tracing: &envoy_http_v3.HttpConnectionManager_Tracing{ // Don't trace any requests by default unless the client application @@ -1508,10 +1522,13 @@ func makeHTTPFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) } if opts.protocol == "grpc" { - // Add grpc bridge before router and authz - cfg.HttpFilters = append([]*envoy_http_v3.HttpFilter{{ - Name: "envoy.filters.http.grpc_http1_bridge", - }}, cfg.HttpFilters...) + grpcHttp1Bridge, err := makeEnvoyHTTPFilter( + "envoy.filters.http.grpc_http1_bridge", + &envoy_grpc_http1_bridge_v3.Config{}, + ) + if err != nil { + return nil, err + } // In envoy 1.14.x the default value "stats_for_all_methods=true" was // deprecated, and was changed to "false" in 1.18.x. Avoid using the @@ -1527,14 +1544,28 @@ func makeHTTPFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) if err != nil { return nil, err } + + // Add grpc bridge before router and authz, and the stats in front of that. cfg.HttpFilters = append([]*envoy_http_v3.HttpFilter{ grpcStatsFilter, + grpcHttp1Bridge, }, cfg.HttpFilters...) } return makeFilter("envoy.filters.network.http_connection_manager", cfg) } +func makeEnvoyListenerFilter(name string, cfg proto.Message) (*envoy_listener_v3.ListenerFilter, error) { + any, err := ptypes.MarshalAny(cfg) + if err != nil { + return nil, err + } + return &envoy_listener_v3.ListenerFilter{ + Name: name, + ConfigType: &envoy_listener_v3.ListenerFilter_TypedConfig{TypedConfig: any}, + }, nil +} + func makeFilter(name string, cfg proto.Message) (*envoy_listener_v3.Filter, error) { any, err := ptypes.MarshalAny(cfg) if err != nil { diff --git a/agent/xds/proxysupport/proxysupport.go b/agent/xds/proxysupport/proxysupport.go index c3a9ba05fb..eb693e6328 100644 --- a/agent/xds/proxysupport/proxysupport.go +++ b/agent/xds/proxysupport/proxysupport.go @@ -7,8 +7,8 @@ package proxysupport // // see: https://www.consul.io/docs/connect/proxies/envoy#supported-versions var EnvoyVersions = []string{ + "1.22.0", "1.21.1", "1.20.2", "1.19.3", - "1.18.6", } diff --git a/agent/xds/testdata/listeners/connect-proxy-with-chain-and-overrides.latest.golden b/agent/xds/testdata/listeners/connect-proxy-with-chain-and-overrides.latest.golden index 1410130f83..4e524021f8 100644 --- a/agent/xds/testdata/listeners/connect-proxy-with-chain-and-overrides.latest.golden +++ b/agent/xds/testdata/listeners/connect-proxy-with-chain-and-overrides.latest.golden @@ -36,10 +36,16 @@ } }, { - "name": "envoy.filters.http.grpc_http1_bridge" + "name": "envoy.filters.http.grpc_http1_bridge", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.grpc_http1_bridge.v3.Config" + } }, { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/connect-proxy-with-grpc-chain.latest.golden b/agent/xds/testdata/listeners/connect-proxy-with-grpc-chain.latest.golden index 1410130f83..4e524021f8 100644 --- a/agent/xds/testdata/listeners/connect-proxy-with-grpc-chain.latest.golden +++ b/agent/xds/testdata/listeners/connect-proxy-with-grpc-chain.latest.golden @@ -36,10 +36,16 @@ } }, { - "name": "envoy.filters.http.grpc_http1_bridge" + "name": "envoy.filters.http.grpc_http1_bridge", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.grpc_http1_bridge.v3.Config" + } }, { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/connect-proxy-with-http-chain.latest.golden b/agent/xds/testdata/listeners/connect-proxy-with-http-chain.latest.golden index 2ef5af99d4..0eed52477d 100644 --- a/agent/xds/testdata/listeners/connect-proxy-with-http-chain.latest.golden +++ b/agent/xds/testdata/listeners/connect-proxy-with-http-chain.latest.golden @@ -29,7 +29,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/connect-proxy-with-http2-chain.latest.golden b/agent/xds/testdata/listeners/connect-proxy-with-http2-chain.latest.golden index 6463f5b1ba..56d9ffd881 100644 --- a/agent/xds/testdata/listeners/connect-proxy-with-http2-chain.latest.golden +++ b/agent/xds/testdata/listeners/connect-proxy-with-http2-chain.latest.golden @@ -29,7 +29,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/expose-checks.latest.golden b/agent/xds/testdata/listeners/expose-checks.latest.golden index a893884f19..518285040d 100644 --- a/agent/xds/testdata/listeners/expose-checks.latest.golden +++ b/agent/xds/testdata/listeners/expose-checks.latest.golden @@ -57,7 +57,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/expose-paths-local-app-paths.latest.golden b/agent/xds/testdata/listeners/expose-paths-local-app-paths.latest.golden index 65bdb30d96..cb7b0d46e7 100644 --- a/agent/xds/testdata/listeners/expose-paths-local-app-paths.latest.golden +++ b/agent/xds/testdata/listeners/expose-paths-local-app-paths.latest.golden @@ -41,7 +41,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -96,7 +99,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/expose-paths-new-cluster-http2.latest.golden b/agent/xds/testdata/listeners/expose-paths-new-cluster-http2.latest.golden index fb860a535a..84ef190ad9 100644 --- a/agent/xds/testdata/listeners/expose-paths-new-cluster-http2.latest.golden +++ b/agent/xds/testdata/listeners/expose-paths-new-cluster-http2.latest.golden @@ -41,7 +41,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -99,7 +102,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/http-listener-with-timeouts.latest.golden b/agent/xds/testdata/listeners/http-listener-with-timeouts.latest.golden index 15997388c7..0cd9b242b7 100644 --- a/agent/xds/testdata/listeners/http-listener-with-timeouts.latest.golden +++ b/agent/xds/testdata/listeners/http-listener-with-timeouts.latest.golden @@ -101,7 +101,10 @@ } }, { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/http-public-listener.latest.golden b/agent/xds/testdata/listeners/http-public-listener.latest.golden index 85a5acb535..d0a676eff2 100644 --- a/agent/xds/testdata/listeners/http-public-listener.latest.golden +++ b/agent/xds/testdata/listeners/http-public-listener.latest.golden @@ -100,7 +100,10 @@ } }, { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/http-upstream.latest.golden b/agent/xds/testdata/listeners/http-upstream.latest.golden index 14046bfcf8..717877fcd7 100644 --- a/agent/xds/testdata/listeners/http-upstream.latest.golden +++ b/agent/xds/testdata/listeners/http-upstream.latest.golden @@ -41,7 +41,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/ingress-http-multiple-services.latest.golden b/agent/xds/testdata/listeners/ingress-http-multiple-services.latest.golden index 0ee1474c75..bcdf29c643 100644 --- a/agent/xds/testdata/listeners/ingress-http-multiple-services.latest.golden +++ b/agent/xds/testdata/listeners/ingress-http-multiple-services.latest.golden @@ -29,7 +29,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -72,7 +75,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/ingress-splitter-with-resolver-redirect.latest.golden b/agent/xds/testdata/listeners/ingress-splitter-with-resolver-redirect.latest.golden index 4f804bc74f..ae2f68556e 100644 --- a/agent/xds/testdata/listeners/ingress-splitter-with-resolver-redirect.latest.golden +++ b/agent/xds/testdata/listeners/ingress-splitter-with-resolver-redirect.latest.golden @@ -29,7 +29,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/ingress-with-sds-listener+service-level.latest.golden b/agent/xds/testdata/listeners/ingress-with-sds-listener+service-level.latest.golden index b5531913dd..02bcf8d36e 100644 --- a/agent/xds/testdata/listeners/ingress-with-sds-listener+service-level.latest.golden +++ b/agent/xds/testdata/listeners/ingress-with-sds-listener+service-level.latest.golden @@ -34,7 +34,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -96,7 +99,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -143,7 +149,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ], "trafficDirection": "OUTBOUND" diff --git a/agent/xds/testdata/listeners/ingress-with-sds-listener-gw-level-http.latest.golden b/agent/xds/testdata/listeners/ingress-with-sds-listener-gw-level-http.latest.golden index e858c2afba..5e197a36e5 100644 --- a/agent/xds/testdata/listeners/ingress-with-sds-listener-gw-level-http.latest.golden +++ b/agent/xds/testdata/listeners/ingress-with-sds-listener-gw-level-http.latest.golden @@ -29,7 +29,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/ingress-with-sds-service-level-mixed-no-tls.latest.golden b/agent/xds/testdata/listeners/ingress-with-sds-service-level-mixed-no-tls.latest.golden index dbfb9f40ed..bb017e85d6 100644 --- a/agent/xds/testdata/listeners/ingress-with-sds-service-level-mixed-no-tls.latest.golden +++ b/agent/xds/testdata/listeners/ingress-with-sds-service-level-mixed-no-tls.latest.golden @@ -34,7 +34,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -96,7 +99,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -111,7 +117,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ], "trafficDirection": "OUTBOUND" diff --git a/agent/xds/testdata/listeners/ingress-with-sds-service-level.latest.golden b/agent/xds/testdata/listeners/ingress-with-sds-service-level.latest.golden index cc7c0c30cc..d89cb9eefb 100644 --- a/agent/xds/testdata/listeners/ingress-with-sds-service-level.latest.golden +++ b/agent/xds/testdata/listeners/ingress-with-sds-service-level.latest.golden @@ -34,7 +34,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -101,7 +104,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -148,7 +154,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ], "trafficDirection": "OUTBOUND" diff --git a/agent/xds/testdata/listeners/ingress-with-single-tls-listener.latest.golden b/agent/xds/testdata/listeners/ingress-with-single-tls-listener.latest.golden index 3c0d3cbec8..17fef5d07e 100644 --- a/agent/xds/testdata/listeners/ingress-with-single-tls-listener.latest.golden +++ b/agent/xds/testdata/listeners/ingress-with-single-tls-listener.latest.golden @@ -29,7 +29,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -72,7 +75,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/ingress-with-tls-min-version-listeners-gateway-defaults.latest.golden b/agent/xds/testdata/listeners/ingress-with-tls-min-version-listeners-gateway-defaults.latest.golden index 6cce4ea290..af876f8857 100644 --- a/agent/xds/testdata/listeners/ingress-with-tls-min-version-listeners-gateway-defaults.latest.golden +++ b/agent/xds/testdata/listeners/ingress-with-tls-min-version-listeners-gateway-defaults.latest.golden @@ -29,7 +29,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -99,7 +102,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -169,7 +175,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -239,7 +248,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -309,7 +321,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/ingress-with-tls-mixed-listeners.latest.golden b/agent/xds/testdata/listeners/ingress-with-tls-mixed-listeners.latest.golden index e5ba23e893..e504650b3e 100644 --- a/agent/xds/testdata/listeners/ingress-with-tls-mixed-listeners.latest.golden +++ b/agent/xds/testdata/listeners/ingress-with-tls-mixed-listeners.latest.golden @@ -29,7 +29,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -99,7 +102,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/ingress-with-tls-mixed-min-version-listeners.latest.golden b/agent/xds/testdata/listeners/ingress-with-tls-mixed-min-version-listeners.latest.golden index 47b7046a92..1347394a2f 100644 --- a/agent/xds/testdata/listeners/ingress-with-tls-mixed-min-version-listeners.latest.golden +++ b/agent/xds/testdata/listeners/ingress-with-tls-mixed-min-version-listeners.latest.golden @@ -29,7 +29,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -99,7 +102,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -169,7 +175,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/mesh-gateway-custom-addresses.latest.golden b/agent/xds/testdata/listeners/mesh-gateway-custom-addresses.latest.golden index 1d948062e8..7c293d43d8 100644 --- a/agent/xds/testdata/listeners/mesh-gateway-custom-addresses.latest.golden +++ b/agent/xds/testdata/listeners/mesh-gateway-custom-addresses.latest.golden @@ -65,7 +65,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -80,7 +83,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ] }, @@ -148,7 +154,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -163,7 +172,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ] }, @@ -231,7 +243,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -246,7 +261,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ] }, @@ -314,7 +332,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -329,7 +350,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ] } diff --git a/agent/xds/testdata/listeners/mesh-gateway-no-services.latest.golden b/agent/xds/testdata/listeners/mesh-gateway-no-services.latest.golden index f1d2001d38..40d5b919bd 100644 --- a/agent/xds/testdata/listeners/mesh-gateway-no-services.latest.golden +++ b/agent/xds/testdata/listeners/mesh-gateway-no-services.latest.golden @@ -14,7 +14,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -29,7 +32,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ] } diff --git a/agent/xds/testdata/listeners/mesh-gateway-tagged-addresses.latest.golden b/agent/xds/testdata/listeners/mesh-gateway-tagged-addresses.latest.golden index 99f09630fd..4c76beebb7 100644 --- a/agent/xds/testdata/listeners/mesh-gateway-tagged-addresses.latest.golden +++ b/agent/xds/testdata/listeners/mesh-gateway-tagged-addresses.latest.golden @@ -65,7 +65,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -80,7 +83,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ] }, @@ -148,7 +154,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -163,7 +172,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ] } diff --git a/agent/xds/testdata/listeners/mesh-gateway-using-federation-states.latest.golden b/agent/xds/testdata/listeners/mesh-gateway-using-federation-states.latest.golden index 092529f4c6..7505e7c201 100644 --- a/agent/xds/testdata/listeners/mesh-gateway-using-federation-states.latest.golden +++ b/agent/xds/testdata/listeners/mesh-gateway-using-federation-states.latest.golden @@ -65,7 +65,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -80,7 +83,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ] } diff --git a/agent/xds/testdata/listeners/mesh-gateway.latest.golden b/agent/xds/testdata/listeners/mesh-gateway.latest.golden index 092529f4c6..7505e7c201 100644 --- a/agent/xds/testdata/listeners/mesh-gateway.latest.golden +++ b/agent/xds/testdata/listeners/mesh-gateway.latest.golden @@ -65,7 +65,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -80,7 +83,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ] } diff --git a/agent/xds/testdata/listeners/splitter-with-resolver-redirect.latest.golden b/agent/xds/testdata/listeners/splitter-with-resolver-redirect.latest.golden index 2ef5af99d4..0eed52477d 100644 --- a/agent/xds/testdata/listeners/splitter-with-resolver-redirect.latest.golden +++ b/agent/xds/testdata/listeners/splitter-with-resolver-redirect.latest.golden @@ -29,7 +29,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { diff --git a/agent/xds/testdata/listeners/terminating-gateway-custom-and-tagged-addresses.latest.golden b/agent/xds/testdata/listeners/terminating-gateway-custom-and-tagged-addresses.latest.golden index 80e0a60453..26f56d3dd3 100644 --- a/agent/xds/testdata/listeners/terminating-gateway-custom-and-tagged-addresses.latest.golden +++ b/agent/xds/testdata/listeners/terminating-gateway-custom-and-tagged-addresses.latest.golden @@ -230,7 +230,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -245,7 +248,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ], "trafficDirection": "INBOUND" @@ -479,7 +485,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -494,7 +503,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ], "trafficDirection": "INBOUND" diff --git a/agent/xds/testdata/listeners/terminating-gateway-no-api-cert.latest.golden b/agent/xds/testdata/listeners/terminating-gateway-no-api-cert.latest.golden index 1c59ad859b..acbcc16b42 100644 --- a/agent/xds/testdata/listeners/terminating-gateway-no-api-cert.latest.golden +++ b/agent/xds/testdata/listeners/terminating-gateway-no-api-cert.latest.golden @@ -176,7 +176,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -191,7 +194,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ], "trafficDirection": "INBOUND" diff --git a/agent/xds/testdata/listeners/terminating-gateway-no-services.latest.golden b/agent/xds/testdata/listeners/terminating-gateway-no-services.latest.golden index 042527d73e..329a0cb6c3 100644 --- a/agent/xds/testdata/listeners/terminating-gateway-no-services.latest.golden +++ b/agent/xds/testdata/listeners/terminating-gateway-no-services.latest.golden @@ -14,7 +14,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -29,7 +32,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ], "trafficDirection": "INBOUND" diff --git a/agent/xds/testdata/listeners/terminating-gateway-service-subsets.latest.golden b/agent/xds/testdata/listeners/terminating-gateway-service-subsets.latest.golden index f68f73e654..c2ce2223ba 100644 --- a/agent/xds/testdata/listeners/terminating-gateway-service-subsets.latest.golden +++ b/agent/xds/testdata/listeners/terminating-gateway-service-subsets.latest.golden @@ -205,7 +205,10 @@ } }, { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -276,7 +279,10 @@ } }, { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -347,7 +353,10 @@ } }, { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -389,7 +398,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -404,7 +416,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ], "trafficDirection": "INBOUND" diff --git a/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-cipher-suites.latest.golden b/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-cipher-suites.latest.golden index 7b8a7eb8f2..b3a94c9478 100644 --- a/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-cipher-suites.latest.golden +++ b/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-cipher-suites.latest.golden @@ -242,7 +242,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -257,7 +260,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ], "trafficDirection": "INBOUND" diff --git a/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-max-version.latest.golden b/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-max-version.latest.golden index 433a499029..e2554002e0 100644 --- a/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-max-version.latest.golden +++ b/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-max-version.latest.golden @@ -230,7 +230,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -245,7 +248,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ], "trafficDirection": "INBOUND" diff --git a/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-min-version.latest.golden b/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-min-version.latest.golden index 74a08b900e..daf9ca01fc 100644 --- a/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-min-version.latest.golden +++ b/agent/xds/testdata/listeners/terminating-gateway-with-tls-incoming-min-version.latest.golden @@ -230,7 +230,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -245,7 +248,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ], "trafficDirection": "INBOUND" diff --git a/agent/xds/testdata/listeners/terminating-gateway.latest.golden b/agent/xds/testdata/listeners/terminating-gateway.latest.golden index ab610fae3d..4af97913f8 100644 --- a/agent/xds/testdata/listeners/terminating-gateway.latest.golden +++ b/agent/xds/testdata/listeners/terminating-gateway.latest.golden @@ -230,7 +230,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -245,7 +248,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ], "trafficDirection": "INBOUND" diff --git a/agent/xds/testdata/listeners/transparent-proxy-catalog-destinations-only.latest.golden b/agent/xds/testdata/listeners/transparent-proxy-catalog-destinations-only.latest.golden index 394e4eb355..9d6d500378 100644 --- a/agent/xds/testdata/listeners/transparent-proxy-catalog-destinations-only.latest.golden +++ b/agent/xds/testdata/listeners/transparent-proxy-catalog-destinations-only.latest.golden @@ -59,7 +59,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.original_dst" + "name": "envoy.filters.listener.original_dst", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.original_dst.v3.OriginalDst" + } } ], "trafficDirection": "OUTBOUND" diff --git a/agent/xds/testdata/listeners/transparent-proxy-dial-instances-directly.latest.golden b/agent/xds/testdata/listeners/transparent-proxy-dial-instances-directly.latest.golden index e5afb75387..d6f3ea51d8 100644 --- a/agent/xds/testdata/listeners/transparent-proxy-dial-instances-directly.latest.golden +++ b/agent/xds/testdata/listeners/transparent-proxy-dial-instances-directly.latest.golden @@ -115,7 +115,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.original_dst" + "name": "envoy.filters.listener.original_dst", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.original_dst.v3.OriginalDst" + } } ], "trafficDirection": "OUTBOUND" diff --git a/agent/xds/testdata/listeners/transparent-proxy-http-upstream.latest.golden b/agent/xds/testdata/listeners/transparent-proxy-http-upstream.latest.golden index 612f4669b3..b6f00f2cdc 100644 --- a/agent/xds/testdata/listeners/transparent-proxy-http-upstream.latest.golden +++ b/agent/xds/testdata/listeners/transparent-proxy-http-upstream.latest.golden @@ -78,7 +78,10 @@ }, "httpFilters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -105,7 +108,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.original_dst" + "name": "envoy.filters.listener.original_dst", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.original_dst.v3.OriginalDst" + } } ], "trafficDirection": "OUTBOUND" diff --git a/agent/xds/testdata/listeners/transparent-proxy-terminating-gateway.latest.golden b/agent/xds/testdata/listeners/transparent-proxy-terminating-gateway.latest.golden index 5267c85583..949e2f8b0c 100644 --- a/agent/xds/testdata/listeners/transparent-proxy-terminating-gateway.latest.golden +++ b/agent/xds/testdata/listeners/transparent-proxy-terminating-gateway.latest.golden @@ -79,7 +79,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.original_dst" + "name": "envoy.filters.listener.original_dst", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.original_dst.v3.OriginalDst" + } } ], "trafficDirection": "OUTBOUND" diff --git a/agent/xds/testdata/listeners/transparent-proxy.latest.golden b/agent/xds/testdata/listeners/transparent-proxy.latest.golden index d390e3d9f7..ca8b75eb2d 100644 --- a/agent/xds/testdata/listeners/transparent-proxy.latest.golden +++ b/agent/xds/testdata/listeners/transparent-proxy.latest.golden @@ -75,7 +75,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.original_dst" + "name": "envoy.filters.listener.original_dst", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.original_dst.v3.OriginalDst" + } } ], "trafficDirection": "OUTBOUND" diff --git a/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway-with-service-resolvers.latest.golden b/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway-with-service-resolvers.latest.golden index a64481e5e6..32cfda1206 100644 --- a/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway-with-service-resolvers.latest.golden +++ b/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway-with-service-resolvers.latest.golden @@ -158,7 +158,10 @@ } }, { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -237,7 +240,10 @@ } }, { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -370,7 +376,10 @@ } }, { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -413,7 +422,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -428,7 +440,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ], "trafficDirection": "INBOUND" diff --git a/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.latest.golden b/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.latest.golden index dea85717fb..f415e40ab2 100644 --- a/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.latest.golden +++ b/agent/xds/testdata/serverless_plugin/listeners/lambda-terminating-gateway.latest.golden @@ -212,7 +212,10 @@ } }, { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ], "tracing": { @@ -255,7 +258,10 @@ { "filters": [ { - "name": "envoy.filters.network.sni_cluster" + "name": "envoy.filters.network.sni_cluster", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.network.sni_cluster.v3.SniCluster" + } }, { "name": "envoy.filters.network.tcp_proxy", @@ -270,7 +276,10 @@ ], "listenerFilters": [ { - "name": "envoy.filters.listener.tls_inspector" + "name": "envoy.filters.listener.tls_inspector", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.listener.tls_inspector.v3.TlsInspector" + } } ], "trafficDirection": "INBOUND" diff --git a/agent/xds/xds_protocol_helpers_test.go b/agent/xds/xds_protocol_helpers_test.go index c4d5f86738..544983141e 100644 --- a/agent/xds/xds_protocol_helpers_test.go +++ b/agent/xds/xds_protocol_helpers_test.go @@ -14,6 +14,7 @@ import ( envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" envoy_rbac_v3 "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3" envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" + envoy_http_router_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3" envoy_http_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" envoy_network_rbac_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/rbac/v3" envoy_tcp_proxy_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3" @@ -306,6 +307,18 @@ func xdsNewFilter(t *testing.T, name string, cfg proto.Message) *envoy_listener_ return f } +func xdsNewListenerFilter(t *testing.T, name string, cfg proto.Message) *envoy_listener_v3.ListenerFilter { + f, err := makeEnvoyListenerFilter(name, cfg) + require.NoError(t, err) + return f +} + +func xdsNewHttpFilter(t *testing.T, name string, cfg proto.Message) *envoy_http_v3.HttpFilter { + f, err := makeEnvoyHTTPFilter(name, cfg) + require.NoError(t, err) + return f +} + func mustHashResource(t *testing.T, res proto.Message) string { v, err := hashResource(res) require.NoError(t, err) @@ -626,7 +639,7 @@ func makeTestListener(t *testing.T, snap *proxycfg.ConfigSnapshot, fixtureName s Filters: []*envoy_listener_v3.Filter{ xdsNewFilter(t, "envoy.filters.network.http_connection_manager", &envoy_http_v3.HttpConnectionManager{ HttpFilters: []*envoy_http_v3.HttpFilter{ - {Name: "envoy.filters.http.router"}, + xdsNewHttpFilter(t, "envoy.filters.http.router", &envoy_http_router_v3.Router{}), }, RouteSpecifier: &envoy_http_v3.HttpConnectionManager_RouteConfig{ RouteConfig: makeTestRoute(t, "http2:db:inline"), @@ -651,7 +664,7 @@ func makeTestListener(t *testing.T, snap *proxycfg.ConfigSnapshot, fixtureName s Filters: []*envoy_listener_v3.Filter{ xdsNewFilter(t, "envoy.filters.network.http_connection_manager", &envoy_http_v3.HttpConnectionManager{ HttpFilters: []*envoy_http_v3.HttpFilter{ - {Name: "envoy.filters.http.router"}, + xdsNewHttpFilter(t, "envoy.filters.http.router", &envoy_http_router_v3.Router{}), }, RouteSpecifier: &envoy_http_v3.HttpConnectionManager_Rds{ Rds: &envoy_http_v3.Rds{ @@ -679,7 +692,7 @@ func makeTestListener(t *testing.T, snap *proxycfg.ConfigSnapshot, fixtureName s Filters: []*envoy_listener_v3.Filter{ xdsNewFilter(t, "envoy.filters.network.http_connection_manager", &envoy_http_v3.HttpConnectionManager{ HttpFilters: []*envoy_http_v3.HttpFilter{ - {Name: "envoy.filters.http.router"}, + xdsNewHttpFilter(t, "envoy.filters.http.router", &envoy_http_router_v3.Router{}), }, RouteSpecifier: &envoy_http_v3.HttpConnectionManager_Rds{ Rds: &envoy_http_v3.Rds{ diff --git a/api/agent.go b/api/agent.go index 7bbe39ea79..f69c697c50 100644 --- a/api/agent.go +++ b/api/agent.go @@ -92,6 +92,7 @@ type AgentService struct { ContentHash string `json:",omitempty" bexpr:"-"` Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` Connect *AgentServiceConnect `json:",omitempty"` + PeerName string `json:",omitempty"` // NOTE: If we ever set the ContentHash outside of singular service lookup then we may need // to include the Namespace in the hash. When we do, then we are in for lots of fun with tests. // For now though, ignoring it works well enough. diff --git a/api/agent_test.go b/api/agent_test.go index 6b5c97e76b..0c1660b1e9 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -784,7 +784,7 @@ func TestAPI_AgentService(t *testing.T) { ID: "foo", Service: "foo", Tags: []string{"bar", "baz"}, - ContentHash: "f72563cae6924fb5", + ContentHash: "3e352f348d44f7eb", Port: 8000, Weights: AgentWeights{ Passing: 1, diff --git a/api/api_test.go b/api/api_test.go index 8af27f0261..3f4e4e3255 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -3,12 +3,14 @@ package api import ( crand "crypto/rand" "crypto/tls" + "crypto/x509" "fmt" "io/ioutil" "net" "net/http" "net/url" "os" + "path" "path/filepath" "reflect" "runtime" @@ -16,6 +18,8 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -589,9 +593,8 @@ func TestAPI_SetupTLSConfig(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - if len(cc.RootCAs.Subjects()) != 2 { - t.Fatalf("didn't load root CAs") - } + expectedCaPoolByDir := getExpectedCaPoolByDir(t) + assertDeepEqual(t, expectedCaPoolByDir, cc.RootCAs, cmpCertPool) // Load certs in-memory certPEM, err := ioutil.ReadFile("../test/hostname/Alice.crt") @@ -1098,3 +1101,35 @@ func TestAPI_GenerateEnvHTTPS(t *testing.T) { require.Equal(t, expected, c.GenerateEnv()) } + +func getExpectedCaPoolByDir(t *testing.T) *x509.CertPool { + pool := x509.NewCertPool() + entries, err := os.ReadDir("../test/ca_path") + require.NoError(t, err) + + for _, entry := range entries { + filename := path.Join("../test/ca_path", entry.Name()) + + data, err := ioutil.ReadFile(filename) + require.NoError(t, err) + + if !pool.AppendCertsFromPEM(data) { + t.Fatalf("could not add test ca %s to pool", filename) + } + } + + return pool +} + +// lazyCerts has a func field which can't be compared. +var cmpCertPool = cmp.Options{ + cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"), + cmp.AllowUnexported(x509.CertPool{}), +} + +func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) { + t.Helper() + if diff := cmp.Diff(x, y, opts...); diff != "" { + t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) + } +} diff --git a/api/config_entry_exports.go b/api/config_entry_exports.go index ae9cb2ff62..14a021f649 100644 --- a/api/config_entry_exports.go +++ b/api/config_entry_exports.go @@ -44,9 +44,14 @@ type ExportedService struct { } // ServiceConsumer represents a downstream consumer of the service to be exported. +// At most one of Partition or PeerName must be specified. type ServiceConsumer struct { // Partition is the admin partition to export the service to. + // Deprecated: PeerName should be used for both remote peers and local partitions. Partition string + + // PeerName is the name of the peer to export the service to. + PeerName string } func (e *ExportedServicesConfigEntry) GetKind() string { return ExportedServices } diff --git a/api/config_entry_exports_test.go b/api/config_entry_exports_test.go new file mode 100644 index 0000000000..e1df48f752 --- /dev/null +++ b/api/config_entry_exports_test.go @@ -0,0 +1,102 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAPI_ConfigEntries_ExportedServices(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + entries := c.ConfigEntries() + + runStep(t, "set and get", func(t *testing.T) { + exports := &ExportedServicesConfigEntry{ + Name: PartitionDefaultName, + Partition: defaultPartition, + Meta: map[string]string{ + "gir": "zim", + }, + } + + _, wm, err := entries.Set(exports, nil) + require.NoError(t, err) + require.NotNil(t, wm) + require.NotEqual(t, 0, wm.RequestTime) + + entry, qm, err := entries.Get(ExportedServices, PartitionDefaultName, nil) + require.NoError(t, err) + require.NotNil(t, qm) + require.NotEqual(t, 0, qm.RequestTime) + + result, ok := entry.(*ExportedServicesConfigEntry) + require.True(t, ok) + + // ignore indexes + result.CreateIndex = 0 + result.ModifyIndex = 0 + require.Equal(t, exports, result) + }) + + runStep(t, "update", func(t *testing.T) { + updated := &ExportedServicesConfigEntry{ + Name: PartitionDefaultName, + Services: []ExportedService{ + { + Name: "db", + Namespace: defaultNamespace, + Consumers: []ServiceConsumer{ + { + PeerName: "alpha", + }, + }, + }, + }, + Meta: map[string]string{ + "foo": "bar", + "gir": "zim", + }, + Partition: defaultPartition, + } + + _, wm, err := entries.Set(updated, nil) + require.NoError(t, err) + require.NotNil(t, wm) + require.NotEqual(t, 0, wm.RequestTime) + + entry, qm, err := entries.Get(ExportedServices, PartitionDefaultName, nil) + require.NoError(t, err) + require.NotNil(t, qm) + require.NotEqual(t, 0, qm.RequestTime) + + result, ok := entry.(*ExportedServicesConfigEntry) + require.True(t, ok) + + // ignore indexes + result.CreateIndex = 0 + result.ModifyIndex = 0 + require.Equal(t, updated, result) + }) + + runStep(t, "list", func(t *testing.T) { + entries, qm, err := entries.List(ExportedServices, nil) + require.NoError(t, err) + require.NotNil(t, qm) + require.NotEqual(t, 0, qm.RequestTime) + require.Len(t, entries, 1) + }) + + runStep(t, "delete", func(t *testing.T) { + wm, err := entries.Delete(ExportedServices, PartitionDefaultName, nil) + require.NoError(t, err) + require.NotNil(t, wm) + require.NotEqual(t, 0, wm.RequestTime) + + // verify deletion + _, _, err = entries.Get(MeshConfig, PartitionDefaultName, nil) + require.Error(t, err) + }) +} diff --git a/api/go.mod b/api/go.mod index 6a37c10dd6..33d9c558c6 100644 --- a/api/go.mod +++ b/api/go.mod @@ -5,6 +5,7 @@ go 1.12 replace github.com/hashicorp/consul/sdk => ../sdk require ( + github.com/google/go-cmp v0.5.7 github.com/hashicorp/consul/sdk v0.8.0 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-hclog v0.12.0 diff --git a/api/go.sum b/api/go.sum index 45eae683fe..ebf25c8c86 100644 --- a/api/go.sum +++ b/api/go.sum @@ -12,6 +12,8 @@ github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= @@ -100,12 +102,12 @@ golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44 h1:Bli41pIlzTzf3KEY06n+xnzK/BESIg2ze4Pgfh/aI8c= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -114,6 +116,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/build-support/docker/Build-Go.dockerfile b/build-support/docker/Build-Go.dockerfile index 39fc6df5cc..21f508502d 100644 --- a/build-support/docker/Build-Go.dockerfile +++ b/build-support/docker/Build-Go.dockerfile @@ -1,4 +1,4 @@ -ARG GOLANG_VERSION=1.17.5 +ARG GOLANG_VERSION=1.18.1 FROM golang:${GOLANG_VERSION} RUN go install github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@master diff --git a/codecov.yml b/codecov.yml deleted file mode 100644 index 9e6e403021..0000000000 --- a/codecov.yml +++ /dev/null @@ -1,44 +0,0 @@ -# https://docs.codecov.io/docs/commit-status -coverage: - status: - # only measure diff changes, not unexpected project changes from flakiness - project: off - patch: - default: - # https://docs.codecov.io/docs/commit-status#section-informational - informational: true - # https://docs.codecov.io/docs/commit-status#section-excluding-tests-example- - # TODO: should any paths be excluded from coverage metrics? - # paths: - ui: - informational: true - # https://docs.codecov.io/docs/commit-status#section-changes-status - # TODO: enable after eliminating current unexpected coverage changes? - changes: off - -# https://docs.codecov.io/docs/codecov-yaml#section-default-yaml -# TODO: experiment with changing these values? -# parsers: -# gcov: -# branch_detection: -# conditional: yes -# loop: yes -# method: no -# macro: no - -# https://docs.codecov.io/docs/pull-request-comments -comment: false - -# https://docs.codecov.io/docs/flags -# TODO: split out test coverage for API, SDK, UI, website? -flags: - ui: - paths: /ui-v2/ - -github_checks: - annotations: false - -ignore: - - "agent/uiserver/bindata_assetfs.go" - - "vendor/**/*" - - "**/*.pb.go" diff --git a/command/connect/envoy/bootstrap_config.go b/command/connect/envoy/bootstrap_config.go index 777a02451e..9c94eaac70 100644 --- a/command/connect/envoy/bootstrap_config.go +++ b/command/connect/envoy/bootstrap_config.go @@ -686,7 +686,10 @@ func (c *BootstrapConfig) generateListenerConfig(args *BootstrapTplArgs, bindAdd }, "http_filters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ] } diff --git a/command/connect/envoy/bootstrap_config_test.go b/command/connect/envoy/bootstrap_config_test.go index 5a5e0d3dc0..f02ae04c5c 100644 --- a/command/connect/envoy/bootstrap_config_test.go +++ b/command/connect/envoy/bootstrap_config_test.go @@ -136,10 +136,13 @@ const ( ] } ] - }, - "http_filters": [ + }, + "http_filters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ] } @@ -194,10 +197,13 @@ const ( ] } ] - }, - "http_filters": [ + }, + "http_filters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ] } @@ -252,10 +258,13 @@ const ( ] } ] - }, - "http_filters": [ + }, + "http_filters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ] } @@ -313,7 +322,10 @@ const ( }, "http_filters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ] } @@ -371,7 +383,10 @@ const ( }, "http_filters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ] } diff --git a/command/connect/envoy/testdata/ingress-gateway-address-specified.golden b/command/connect/envoy/testdata/ingress-gateway-address-specified.golden index 81a9c7e69d..d0b0be0dc5 100644 --- a/command/connect/envoy/testdata/ingress-gateway-address-specified.golden +++ b/command/connect/envoy/testdata/ingress-gateway-address-specified.golden @@ -121,7 +121,10 @@ }, "http_filters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ] } diff --git a/command/connect/envoy/testdata/ingress-gateway-no-auto-register.golden b/command/connect/envoy/testdata/ingress-gateway-no-auto-register.golden index d17a33844f..d3cedfb5a7 100644 --- a/command/connect/envoy/testdata/ingress-gateway-no-auto-register.golden +++ b/command/connect/envoy/testdata/ingress-gateway-no-auto-register.golden @@ -121,7 +121,10 @@ }, "http_filters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ] } diff --git a/command/connect/envoy/testdata/ingress-gateway-register-with-service-and-proxy-id.golden b/command/connect/envoy/testdata/ingress-gateway-register-with-service-and-proxy-id.golden index acee34c6e9..cb98077ebc 100644 --- a/command/connect/envoy/testdata/ingress-gateway-register-with-service-and-proxy-id.golden +++ b/command/connect/envoy/testdata/ingress-gateway-register-with-service-and-proxy-id.golden @@ -121,7 +121,10 @@ }, "http_filters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ] } diff --git a/command/connect/envoy/testdata/ingress-gateway-register-with-service-without-proxy-id.golden b/command/connect/envoy/testdata/ingress-gateway-register-with-service-without-proxy-id.golden index 005ab2244d..d23864619d 100644 --- a/command/connect/envoy/testdata/ingress-gateway-register-with-service-without-proxy-id.golden +++ b/command/connect/envoy/testdata/ingress-gateway-register-with-service-without-proxy-id.golden @@ -121,7 +121,10 @@ }, "http_filters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ] } diff --git a/command/connect/envoy/testdata/ingress-gateway.golden b/command/connect/envoy/testdata/ingress-gateway.golden index 148c56ed1d..24dd4392e4 100644 --- a/command/connect/envoy/testdata/ingress-gateway.golden +++ b/command/connect/envoy/testdata/ingress-gateway.golden @@ -121,7 +121,10 @@ }, "http_filters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ] } diff --git a/command/connect/envoy/testdata/prometheus-metrics.golden b/command/connect/envoy/testdata/prometheus-metrics.golden index d52d3e7b67..333f9872dc 100644 --- a/command/connect/envoy/testdata/prometheus-metrics.golden +++ b/command/connect/envoy/testdata/prometheus-metrics.golden @@ -121,7 +121,10 @@ }, "http_filters": [ { - "name": "envoy.filters.http.router" + "name": "envoy.filters.http.router", + "typedConfig": { + "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" + } } ] } diff --git a/connect/proxy/proxy.go b/connect/proxy/proxy.go index 8eb34c4999..86cc24d464 100644 --- a/connect/proxy/proxy.go +++ b/connect/proxy/proxy.go @@ -75,12 +75,7 @@ func (p *Proxy) Serve() error { tcfg := service.ServerTLSConfig() cert, _ := tcfg.GetCertificate(nil) leaf, _ := x509.ParseCertificate(cert.Certificate[0]) - roots, err := connect.CommonNamesFromCertPool(tcfg.RootCAs) - if err != nil { - p.logger.Error("Failed to parse root subjects", "error", err) - } else { - p.logger.Info("Parsed TLS identity", "uri", leaf.URIs[0], "roots", roots) - } + p.logger.Info("Parsed TLS identity", "uri", leaf.URIs[0]) // Only start a listener if we have a port set. This allows // the configuration to disable our public listener. diff --git a/connect/service_test.go b/connect/service_test.go index 1897a90973..e72b501ed7 100644 --- a/connect/service_test.go +++ b/connect/service_test.go @@ -9,6 +9,8 @@ import ( "io" "io/ioutil" "net/http" + "reflect" + "sort" "strings" "testing" "time" @@ -189,15 +191,15 @@ func TestService_ServerTLSConfig(t *testing.T) { // After some time, both root and leaves should be different but both should // still be correct. - oldRootSubjects := bytes.Join(tlsCfg.RootCAs.Subjects(), []byte(", ")) + oldRootSubjects := getSubjects(tlsCfg.RootCAs) oldLeafSerial := cert.SerialNumber oldLeafKeyID := cert.SubjectKeyId retry.Run(t, func(r *retry.R) { updatedCfg := service.ServerTLSConfig() // Wait until roots are different - rootSubjects := bytes.Join(updatedCfg.RootCAs.Subjects(), []byte(", ")) - if bytes.Equal(oldRootSubjects, rootSubjects) { + rootSubjects := getSubjects(updatedCfg.RootCAs) + if oldRootSubjects == rootSubjects { r.Fatalf("root certificates should have changed, got %s", rootSubjects) } @@ -288,3 +290,15 @@ func TestService_HasDefaultHTTPResolverFromAddr(t *testing.T) { require.NoError(t, err) require.Equal(t, expected, got) } + +func getSubjects(cp *x509.CertPool) string { + subjectsIter := reflect.ValueOf(cp).Elem().FieldByName("byName").MapRange() + subjects := []string{} + for subjectsIter.Next() { + k := subjectsIter.Key() + subjects = append(subjects, k.String()) + } + sort.Strings(subjects) + subjectList := strings.Join(subjects, ",") + return subjectList +} diff --git a/connect/tls.go b/connect/tls.go index a79fe7c8a3..dd7fc1869e 100644 --- a/connect/tls.go +++ b/connect/tls.go @@ -3,8 +3,6 @@ package connect import ( "crypto/tls" "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" "errors" "fmt" "io/ioutil" @@ -111,33 +109,6 @@ func devTLSConfigFromFiles(caFile, certFile, return cfg, nil } -// PKIXNameFromRawSubject attempts to parse a DER encoded "Subject" as a PKIX -// Name. It's useful for inspecting root certificates in an x509.CertPool which -// only expose RawSubject via the Subjects method. -func PKIXNameFromRawSubject(raw []byte) (*pkix.Name, error) { - var subject pkix.RDNSequence - if _, err := asn1.Unmarshal(raw, &subject); err != nil { - return nil, err - } - var name pkix.Name - name.FillFromRDNSequence(&subject) - return &name, nil -} - -// CommonNamesFromCertPool returns the common names of the certificates in the -// cert pool. -func CommonNamesFromCertPool(p *x509.CertPool) ([]string, error) { - var names []string - for _, rawSubj := range p.Subjects() { - n, err := PKIXNameFromRawSubject(rawSubj) - if err != nil { - return nil, err - } - names = append(names, n.CommonName) - } - return names, nil -} - // CertURIFromConn is a helper to extract the service identifier URI from a // net.Conn. If the net.Conn is not a *tls.Conn then an error is always // returned. If the *tls.Conn didn't present a valid connect certificate, or is diff --git a/connect/tls_test.go b/connect/tls_test.go index 9659cf5be6..1f83072240 100644 --- a/connect/tls_test.go +++ b/connect/tls_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/require" "github.com/hashicorp/consul/agent" @@ -295,9 +296,11 @@ func requireEqualTLSConfig(t *testing.T, expect, got *tls.Config) { // cmpCertPool is a custom comparison for x509.CertPool, because CertPool.lazyCerts // has a func field which can't be compared. -var cmpCertPool = cmp.Comparer(func(x, y *x509.CertPool) bool { - return cmp.Equal(x.Subjects(), y.Subjects()) -}) +// lazyCerts has a func field which can't be compared. +var cmpCertPool = cmp.Options{ + cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"), + cmp.AllowUnexported(x509.CertPool{}), +} // requireCorrectVerifier invokes got.VerifyPeerCertificate and expects the // tls.Config arg to be returned on the provided channel. This ensures the diff --git a/go.mod b/go.mod index 8d7134af30..b3fb5807ed 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/fsnotify/fsnotify v1.5.1 github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.4.3 - github.com/google/go-cmp v0.5.6 + github.com/google/go-cmp v0.5.7 github.com/google/go-querystring v1.0.0 // indirect github.com/google/gofuzz v1.2.0 github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22 @@ -53,7 +53,7 @@ require ( github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 github.com/hashicorp/memberlist v0.3.1 - github.com/hashicorp/raft v1.3.6 + github.com/hashicorp/raft v1.3.8 github.com/hashicorp/raft-autopilot v0.1.6 github.com/hashicorp/raft-boltdb v0.0.0-20211202195631-7d34b9fb3f42 // indirect github.com/hashicorp/raft-boltdb/v2 v2.2.2 @@ -89,13 +89,14 @@ require ( golang.org/x/net v0.0.0-20211216030914-fe4d6282115f golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20211013075003-97ac67df715c + golang.org/x/sys v0.0.0-20220412211240-33da011f77ad golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e google.golang.org/genproto v0.0.0-20200623002339-fbb79eadd5eb google.golang.org/grpc v1.36.0 google.golang.org/protobuf v1.25.0 gopkg.in/square/go-jose.v2 v2.5.1 gotest.tools/v3 v3.0.3 + inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 k8s.io/api v0.18.2 k8s.io/apimachinery v0.18.2 k8s.io/client-go v0.18.2 diff --git a/go.sum b/go.sum index 9311ff4815..ca0e61768b 100644 --- a/go.sum +++ b/go.sum @@ -160,6 +160,7 @@ github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0 h1:ZoRgc53qJCfSLimXqJDrmBhnt5GChDsExMCK7t48o0Y= github.com/elazarl/go-bindata-assetfs v0.0.0-20160803192304-e1a2a7ec64b0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -253,8 +254,9 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -366,8 +368,8 @@ github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= -github.com/hashicorp/raft v1.3.6 h1:v5xW5KzByoerQlN/o31VJrFNiozgzGyDoMgDJgXpsto= -github.com/hashicorp/raft v1.3.6/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= +github.com/hashicorp/raft v1.3.8 h1:lrhx4wesQLOSv3ERX/pK4cwfzQ0J2RgzsvAkBxHe1bA= +github.com/hashicorp/raft v1.3.8/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft-autopilot v0.1.6 h1:C1q3RNF2FfXNZfHWbvVAu0QixaQK8K5pX4O5lh+9z4I= github.com/hashicorp/raft-autopilot v0.1.6/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= @@ -633,6 +635,10 @@ go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go4.org/intern v0.0.0-20211027215823-ae77deb06f29 h1:UXLjNohABv4S58tHmeuIZDO6e3mHpW2Dx33gaNt03LE= +go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37 h1:Tx9kY6yUkLge/pFG7IEMwDZy6CS2ajFc9TvQdPCW0uA= +go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -772,7 +778,6 @@ golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -785,13 +790,15 @@ golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211013075003-97ac67df715c h1:taxlMj0D/1sOAuv/CbSD+MMDof2vbyPTqz5FNYKpXt8= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -852,8 +859,9 @@ golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -971,6 +979,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 h1:acCzuUSQ79tGsM/O50VRFySfMm19IoMKL+sZztZkCxw= +inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6/go.mod h1:y3MGhcFMlh0KZPMuXXow8mpjxxAk3yoDNsp4cQz54i8= k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA= diff --git a/internal/tools/proto-gen-rpc-glue/main.go b/internal/tools/proto-gen-rpc-glue/main.go index 0618b35b2a..57e932f50b 100644 --- a/internal/tools/proto-gen-rpc-glue/main.go +++ b/internal/tools/proto-gen-rpc-glue/main.go @@ -344,6 +344,14 @@ func (msg *%[1]s) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a t return msg.%[2]s.HasTimedOut(start, rpcHoldTimeout, a, b) } +// Timeout implements structs.RPCInfo +func (msg *%[1]s) Timeout(rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) time.Duration { + if msg == nil || msg.%[2]s == nil { + return 0 + } + return msg.%[2]s.Timeout(rpcHoldTimeout, a, b) +} + // IsRead implements structs.RPCInfo func (msg *%[1]s) IsRead() bool { return false @@ -392,6 +400,14 @@ func (msg *%[1]s) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a t return msg.%[2]s.HasTimedOut(start, rpcHoldTimeout, a, b) } +// Timeout implements structs.RPCInfo +func (msg *%[1]s) Timeout(rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) time.Duration { + if msg == nil || msg.%[2]s == nil { + return 0 + } + return msg.%[2]s.Timeout(rpcHoldTimeout, a, b) +} + // SetTokenSecret implements structs.RPCInfo func (msg *%[1]s) SetTokenSecret(s string) { // TODO: initialize if nil @@ -443,6 +459,15 @@ func (msg *%[1]s) HasTimedOut(start time.Time, rpcHoldTimeout time.Duration, a t } return msg.%[2]s.HasTimedOut(start, rpcHoldTimeout, a, b) } + +// Timeout implements structs.RPCInfo +func (msg *%[1]s) Timeout(rpcHoldTimeout time.Duration, a time.Duration, b time.Duration) time.Duration { + if msg == nil || msg.%[2]s == nil { + return 0 + } + return msg.%[2]s.Timeout(rpcHoldTimeout, a, b) +} + // SetTokenSecret implements structs.RPCInfo func (msg *%[1]s) SetTokenSecret(s string) { // TODO: initialize if nil diff --git a/logging/names.go b/logging/names.go index 17db364af7..015e4a0fc7 100644 --- a/logging/names.go +++ b/logging/names.go @@ -50,6 +50,7 @@ const ( Sentinel string = "sentinel" Snapshot string = "snapshot" Partition string = "partition" + Peering string = "peering" TerminatingGateway string = "terminating_gateway" TLSUtil string = "tlsutil" Transaction string = "txn" diff --git a/main_test.go b/main_test.go deleted file mode 100644 index 06ab7d0f9a..0000000000 --- a/main_test.go +++ /dev/null @@ -1 +0,0 @@ -package main diff --git a/package-lock.json b/package-lock.json deleted file mode 100644 index 29027cb00a..0000000000 --- a/package-lock.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "name": "consul", - "lockfileVersion": 2, - "requires": true, - "packages": {} -} diff --git a/proto-public/pbconnectca/ca.proto b/proto-public/pbconnectca/ca.proto index 216a6e43c8..029ebbea2b 100644 --- a/proto-public/pbconnectca/ca.proto +++ b/proto-public/pbconnectca/ca.proto @@ -2,20 +2,22 @@ syntax = "proto3"; package connectca; -option go_package = "github.com/hashicorp/consul/proto-public/pbconnectca"; - import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; +option go_package = "github.com/hashicorp/consul/proto-public/pbconnectca"; + service ConnectCAService { // WatchRoots provides a stream on which you can receive the list of active // Connect CA roots. Current roots are sent immediately at the start of the // stream, and new lists will be sent whenever the roots are rotated. - rpc WatchRoots(google.protobuf.Empty) returns (stream WatchRootsResponse) {}; + rpc WatchRoots(google.protobuf.Empty) returns (stream WatchRootsResponse) {} + // Sign a leaf certificate for the service or agent identified by the SPIFFE // ID in the given CSR's SAN. - rpc Sign(SignRequest) returns (SignResponse) {}; + rpc Sign(SignRequest) returns (SignResponse) {} + } message WatchRootsResponse { @@ -38,41 +40,41 @@ message WatchRootsResponse { } message CARoot { - // id is a globally unique ID (UUID) representing this CA root. - string id = 1; + // id is a globally unique ID (UUID) representing this CA root. + string id = 1; - // name is a human-friendly name for this CA root. This value is opaque to - // Consul and is not used for anything internally. - string name = 2; + // name is a human-friendly name for this CA root. This value is opaque to + // Consul and is not used for anything internally. + string name = 2; - // serial_number is the x509 serial number of the certificate. - uint64 serial_number = 3; + // serial_number is the x509 serial number of the certificate. + uint64 serial_number = 3; - // signing_key_id is the connect.HexString encoded id of the public key that - // corresponds to the private key used to sign leaf certificates in the - // local datacenter. - // - // The value comes from x509.Certificate.SubjectKeyId of the local leaf - // signing cert. - // - // See https://www.rfc-editor.org/rfc/rfc3280#section-4.2.1.1 for more detail. - string signing_key_id = 4; + // signing_key_id is the connect.HexString encoded id of the public key that + // corresponds to the private key used to sign leaf certificates in the + // local datacenter. + // + // The value comes from x509.Certificate.SubjectKeyId of the local leaf + // signing cert. + // + // See https://www.rfc-editor.org/rfc/rfc3280#section-4.2.1.1 for more detail. + string signing_key_id = 4; - // root_cert is the PEM-encoded public certificate. - string root_cert = 5; + // root_cert is the PEM-encoded public certificate. + string root_cert = 5; - // intermediate_certs is a list of PEM-encoded intermediate certs to - // attach to any leaf certs signed by this CA. - repeated string intermediate_certs = 6; + // intermediate_certs is a list of PEM-encoded intermediate certs to + // attach to any leaf certs signed by this CA. + repeated string intermediate_certs = 6; - // active is true if this is the current active CA. This must only - // be true for exactly one CA. - bool active = 7; + // active is true if this is the current active CA. This must only + // be true for exactly one CA. + bool active = 7; - // rotated_out_at is the time at which this CA was removed from the state. - // This will only be set on roots that have been rotated out from being the - // active root. - google.protobuf.Timestamp rotated_out_at = 8; + // rotated_out_at is the time at which this CA was removed from the state. + // This will only be set on roots that have been rotated out from being the + // active root. + google.protobuf.Timestamp rotated_out_at = 8; } message SignRequest { diff --git a/proto-public/pbdataplane/dataplane.pb.binary.go b/proto-public/pbdataplane/dataplane.pb.binary.go index aae9be9117..c08cd112ba 100644 --- a/proto-public/pbdataplane/dataplane.pb.binary.go +++ b/proto-public/pbdataplane/dataplane.pb.binary.go @@ -8,12 +8,12 @@ import ( ) // MarshalBinary implements encoding.BinaryMarshaler -func (msg *SupportedDataplaneFeaturesRequest) MarshalBinary() ([]byte, error) { +func (msg *GetSupportedDataplaneFeaturesRequest) MarshalBinary() ([]byte, error) { return proto.Marshal(msg) } // UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *SupportedDataplaneFeaturesRequest) UnmarshalBinary(b []byte) error { +func (msg *GetSupportedDataplaneFeaturesRequest) UnmarshalBinary(b []byte) error { return proto.Unmarshal(b, msg) } @@ -28,11 +28,31 @@ func (msg *DataplaneFeatureSupport) UnmarshalBinary(b []byte) error { } // MarshalBinary implements encoding.BinaryMarshaler -func (msg *SupportedDataplaneFeaturesResponse) MarshalBinary() ([]byte, error) { +func (msg *GetSupportedDataplaneFeaturesResponse) MarshalBinary() ([]byte, error) { return proto.Marshal(msg) } // UnmarshalBinary implements encoding.BinaryUnmarshaler -func (msg *SupportedDataplaneFeaturesResponse) UnmarshalBinary(b []byte) error { +func (msg *GetSupportedDataplaneFeaturesResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *GetEnvoyBootstrapParamsRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *GetEnvoyBootstrapParamsRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *GetEnvoyBootstrapParamsResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *GetEnvoyBootstrapParamsResponse) UnmarshalBinary(b []byte) error { return proto.Unmarshal(b, msg) } diff --git a/proto-public/pbdataplane/dataplane.pb.go b/proto-public/pbdataplane/dataplane.pb.go index c5e48a2412..2fa239769f 100644 --- a/proto-public/pbdataplane/dataplane.pb.go +++ b/proto-public/pbdataplane/dataplane.pb.go @@ -16,6 +16,7 @@ import ( status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" sync "sync" ) @@ -83,14 +84,83 @@ func (DataplaneFeatures) EnumDescriptor() ([]byte, []int) { return file_proto_public_pbdataplane_dataplane_proto_rawDescGZIP(), []int{0} } -type SupportedDataplaneFeaturesRequest struct { +type ServiceKind int32 + +const ( + // ServiceKind Typical is a typical, classic Consul service. This is + // represented by the absence of a value. This was chosen for ease of + // backwards compatibility: existing services in the catalog would + // default to the typical service. + ServiceKind_TYPICAL ServiceKind = 0 + // ServiceKind Connect Proxy is a proxy for the Connect feature. This + // service proxies another service within Consul and speaks the connect + // protocol. + ServiceKind_CONNECT_PROXY ServiceKind = 1 + // ServiceKind Mesh Gateway is a Mesh Gateway for the Connect feature. This + // service will proxy connections based off the SNI header set by other + // connect proxies. + ServiceKind_MESH_GATEWAY ServiceKind = 2 + // ServiceKind Terminating Gateway is a Terminating Gateway for the Connect + // feature. This service will proxy connections to services outside the mesh. + ServiceKind_TERMINATING_GATEWAY ServiceKind = 3 + // ServiceKind Ingress Gateway is an Ingress Gateway for the Connect feature. + // This service will ingress connections into the service mesh. + ServiceKind_INGRESS_GATEWAY ServiceKind = 4 +) + +// Enum value maps for ServiceKind. +var ( + ServiceKind_name = map[int32]string{ + 0: "TYPICAL", + 1: "CONNECT_PROXY", + 2: "MESH_GATEWAY", + 3: "TERMINATING_GATEWAY", + 4: "INGRESS_GATEWAY", + } + ServiceKind_value = map[string]int32{ + "TYPICAL": 0, + "CONNECT_PROXY": 1, + "MESH_GATEWAY": 2, + "TERMINATING_GATEWAY": 3, + "INGRESS_GATEWAY": 4, + } +) + +func (x ServiceKind) Enum() *ServiceKind { + p := new(ServiceKind) + *p = x + return p +} + +func (x ServiceKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ServiceKind) Descriptor() protoreflect.EnumDescriptor { + return file_proto_public_pbdataplane_dataplane_proto_enumTypes[1].Descriptor() +} + +func (ServiceKind) Type() protoreflect.EnumType { + return &file_proto_public_pbdataplane_dataplane_proto_enumTypes[1] +} + +func (x ServiceKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ServiceKind.Descriptor instead. +func (ServiceKind) EnumDescriptor() ([]byte, []int) { + return file_proto_public_pbdataplane_dataplane_proto_rawDescGZIP(), []int{1} +} + +type GetSupportedDataplaneFeaturesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } -func (x *SupportedDataplaneFeaturesRequest) Reset() { - *x = SupportedDataplaneFeaturesRequest{} +func (x *GetSupportedDataplaneFeaturesRequest) Reset() { + *x = GetSupportedDataplaneFeaturesRequest{} if protoimpl.UnsafeEnabled { mi := &file_proto_public_pbdataplane_dataplane_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -98,13 +168,13 @@ func (x *SupportedDataplaneFeaturesRequest) Reset() { } } -func (x *SupportedDataplaneFeaturesRequest) String() string { +func (x *GetSupportedDataplaneFeaturesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SupportedDataplaneFeaturesRequest) ProtoMessage() {} +func (*GetSupportedDataplaneFeaturesRequest) ProtoMessage() {} -func (x *SupportedDataplaneFeaturesRequest) ProtoReflect() protoreflect.Message { +func (x *GetSupportedDataplaneFeaturesRequest) ProtoReflect() protoreflect.Message { mi := &file_proto_public_pbdataplane_dataplane_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -116,8 +186,8 @@ func (x *SupportedDataplaneFeaturesRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use SupportedDataplaneFeaturesRequest.ProtoReflect.Descriptor instead. -func (*SupportedDataplaneFeaturesRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetSupportedDataplaneFeaturesRequest.ProtoReflect.Descriptor instead. +func (*GetSupportedDataplaneFeaturesRequest) Descriptor() ([]byte, []int) { return file_proto_public_pbdataplane_dataplane_proto_rawDescGZIP(), []int{0} } @@ -176,7 +246,7 @@ func (x *DataplaneFeatureSupport) GetSupported() bool { return false } -type SupportedDataplaneFeaturesResponse struct { +type GetSupportedDataplaneFeaturesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -184,8 +254,8 @@ type SupportedDataplaneFeaturesResponse struct { SupportedDataplaneFeatures []*DataplaneFeatureSupport `protobuf:"bytes,1,rep,name=supported_dataplane_features,json=supportedDataplaneFeatures,proto3" json:"supported_dataplane_features,omitempty"` } -func (x *SupportedDataplaneFeaturesResponse) Reset() { - *x = SupportedDataplaneFeaturesResponse{} +func (x *GetSupportedDataplaneFeaturesResponse) Reset() { + *x = GetSupportedDataplaneFeaturesResponse{} if protoimpl.UnsafeEnabled { mi := &file_proto_public_pbdataplane_dataplane_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -193,13 +263,13 @@ func (x *SupportedDataplaneFeaturesResponse) Reset() { } } -func (x *SupportedDataplaneFeaturesResponse) String() string { +func (x *GetSupportedDataplaneFeaturesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SupportedDataplaneFeaturesResponse) ProtoMessage() {} +func (*GetSupportedDataplaneFeaturesResponse) ProtoMessage() {} -func (x *SupportedDataplaneFeaturesResponse) ProtoReflect() protoreflect.Message { +func (x *GetSupportedDataplaneFeaturesResponse) ProtoReflect() protoreflect.Message { mi := &file_proto_public_pbdataplane_dataplane_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -211,64 +281,303 @@ func (x *SupportedDataplaneFeaturesResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use SupportedDataplaneFeaturesResponse.ProtoReflect.Descriptor instead. -func (*SupportedDataplaneFeaturesResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetSupportedDataplaneFeaturesResponse.ProtoReflect.Descriptor instead. +func (*GetSupportedDataplaneFeaturesResponse) Descriptor() ([]byte, []int) { return file_proto_public_pbdataplane_dataplane_proto_rawDescGZIP(), []int{2} } -func (x *SupportedDataplaneFeaturesResponse) GetSupportedDataplaneFeatures() []*DataplaneFeatureSupport { +func (x *GetSupportedDataplaneFeaturesResponse) GetSupportedDataplaneFeatures() []*DataplaneFeatureSupport { if x != nil { return x.SupportedDataplaneFeatures } return nil } +type GetEnvoyBootstrapParamsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to NodeSpec: + // *GetEnvoyBootstrapParamsRequest_NodeId + // *GetEnvoyBootstrapParamsRequest_NodeName + NodeSpec isGetEnvoyBootstrapParamsRequest_NodeSpec `protobuf_oneof:"node_spec"` + // The proxy service ID + ServiceId string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Partition string `protobuf:"bytes,4,opt,name=partition,proto3" json:"partition,omitempty"` + Namespace string `protobuf:"bytes,5,opt,name=namespace,proto3" json:"namespace,omitempty"` +} + +func (x *GetEnvoyBootstrapParamsRequest) Reset() { + *x = GetEnvoyBootstrapParamsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_public_pbdataplane_dataplane_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEnvoyBootstrapParamsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEnvoyBootstrapParamsRequest) ProtoMessage() {} + +func (x *GetEnvoyBootstrapParamsRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_public_pbdataplane_dataplane_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEnvoyBootstrapParamsRequest.ProtoReflect.Descriptor instead. +func (*GetEnvoyBootstrapParamsRequest) Descriptor() ([]byte, []int) { + return file_proto_public_pbdataplane_dataplane_proto_rawDescGZIP(), []int{3} +} + +func (m *GetEnvoyBootstrapParamsRequest) GetNodeSpec() isGetEnvoyBootstrapParamsRequest_NodeSpec { + if m != nil { + return m.NodeSpec + } + return nil +} + +func (x *GetEnvoyBootstrapParamsRequest) GetNodeId() string { + if x, ok := x.GetNodeSpec().(*GetEnvoyBootstrapParamsRequest_NodeId); ok { + return x.NodeId + } + return "" +} + +func (x *GetEnvoyBootstrapParamsRequest) GetNodeName() string { + if x, ok := x.GetNodeSpec().(*GetEnvoyBootstrapParamsRequest_NodeName); ok { + return x.NodeName + } + return "" +} + +func (x *GetEnvoyBootstrapParamsRequest) GetServiceId() string { + if x != nil { + return x.ServiceId + } + return "" +} + +func (x *GetEnvoyBootstrapParamsRequest) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *GetEnvoyBootstrapParamsRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +type isGetEnvoyBootstrapParamsRequest_NodeSpec interface { + isGetEnvoyBootstrapParamsRequest_NodeSpec() +} + +type GetEnvoyBootstrapParamsRequest_NodeId struct { + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,oneof"` +} + +type GetEnvoyBootstrapParamsRequest_NodeName struct { + NodeName string `protobuf:"bytes,2,opt,name=node_name,json=nodeName,proto3,oneof"` +} + +func (*GetEnvoyBootstrapParamsRequest_NodeId) isGetEnvoyBootstrapParamsRequest_NodeSpec() {} + +func (*GetEnvoyBootstrapParamsRequest_NodeName) isGetEnvoyBootstrapParamsRequest_NodeSpec() {} + +type GetEnvoyBootstrapParamsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceKind ServiceKind `protobuf:"varint,1,opt,name=service_kind,json=serviceKind,proto3,enum=dataplane.ServiceKind" json:"service_kind,omitempty"` + // The destination service name + Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` + Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + Partition string `protobuf:"bytes,4,opt,name=partition,proto3" json:"partition,omitempty"` + Datacenter string `protobuf:"bytes,5,opt,name=datacenter,proto3" json:"datacenter,omitempty"` + Config *structpb.Struct `protobuf:"bytes,6,opt,name=config,proto3" json:"config,omitempty"` +} + +func (x *GetEnvoyBootstrapParamsResponse) Reset() { + *x = GetEnvoyBootstrapParamsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_public_pbdataplane_dataplane_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEnvoyBootstrapParamsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEnvoyBootstrapParamsResponse) ProtoMessage() {} + +func (x *GetEnvoyBootstrapParamsResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_public_pbdataplane_dataplane_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEnvoyBootstrapParamsResponse.ProtoReflect.Descriptor instead. +func (*GetEnvoyBootstrapParamsResponse) Descriptor() ([]byte, []int) { + return file_proto_public_pbdataplane_dataplane_proto_rawDescGZIP(), []int{4} +} + +func (x *GetEnvoyBootstrapParamsResponse) GetServiceKind() ServiceKind { + if x != nil { + return x.ServiceKind + } + return ServiceKind_TYPICAL +} + +func (x *GetEnvoyBootstrapParamsResponse) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *GetEnvoyBootstrapParamsResponse) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *GetEnvoyBootstrapParamsResponse) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *GetEnvoyBootstrapParamsResponse) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +func (x *GetEnvoyBootstrapParamsResponse) GetConfig() *structpb.Struct { + if x != nil { + return x.Config + } + return nil +} + var File_proto_public_pbdataplane_dataplane_proto protoreflect.FileDescriptor var file_proto_public_pbdataplane_dataplane_proto_rawDesc = []byte{ 0x0a, 0x28, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x64, 0x61, 0x74, 0x61, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x22, 0x23, 0x0a, 0x21, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x78, 0x0a, 0x17, 0x44, 0x61, - 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, - 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x3f, 0x0a, 0x0c, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x64, 0x61, - 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, - 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x0b, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x65, 0x64, 0x22, 0x8a, 0x01, 0x0a, 0x22, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x1c, 0x73, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, - 0x6e, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x44, 0x61, - 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, - 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x1a, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, - 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x2a, 0x77, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x57, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, 0x45, 0x52, - 0x56, 0x45, 0x52, 0x53, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x44, 0x47, 0x45, 0x5f, 0x43, - 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, - 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x4e, 0x56, 0x4f, 0x59, - 0x5f, 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, - 0x47, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x32, 0x8f, 0x01, 0x0a, 0x10, 0x44, - 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x7b, 0x0a, 0x1a, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, - 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x2c, 0x2e, - 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x26, 0x0a, 0x24, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x64, 0x61, - 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, - 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x36, 0x5a, 0x34, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, - 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70, - 0x6c, 0x61, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x78, 0x0a, 0x17, 0x44, + 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x3f, 0x0a, 0x0c, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x0b, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x75, 0x70, 0x70, + 0x6f, 0x72, 0x74, 0x65, 0x64, 0x22, 0x8d, 0x01, 0x0a, 0x25, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, + 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x64, 0x0a, 0x1c, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x1a, 0x73, 0x75, 0x70, 0x70, 0x6f, + 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xc2, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, + 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x6e, 0x6f, 0x64, + 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, + 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x0b, 0x0a, + 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0x83, 0x02, 0x0a, 0x1f, 0x47, + 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, + 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x1e, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, + 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2a, 0x77, 0x0a, 0x11, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x57, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x53, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x44, 0x47, 0x45, 0x5f, 0x43, 0x45, + 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, + 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x4e, 0x56, 0x4f, 0x59, 0x5f, + 0x42, 0x4f, 0x4f, 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, + 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x2a, 0x6d, 0x0a, 0x0b, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x59, 0x50, 0x49, + 0x43, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x45, 0x53, 0x48, + 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x45, + 0x52, 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, + 0x59, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x47, + 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0x04, 0x32, 0x8d, 0x02, 0x0a, 0x10, 0x44, 0x61, 0x74, + 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x84, 0x01, + 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, + 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x2f, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, + 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x30, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, + 0x6e, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, + 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x29, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, + 0x6e, 0x76, 0x6f, 0x79, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x42, + 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, + 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, 0x62, 0x64, 0x61, 0x74, 0x61, 0x70, 0x6c, 0x61, 0x6e, 0x65, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -283,24 +592,32 @@ func file_proto_public_pbdataplane_dataplane_proto_rawDescGZIP() []byte { return file_proto_public_pbdataplane_dataplane_proto_rawDescData } -var file_proto_public_pbdataplane_dataplane_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_proto_public_pbdataplane_dataplane_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_proto_public_pbdataplane_dataplane_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_proto_public_pbdataplane_dataplane_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_proto_public_pbdataplane_dataplane_proto_goTypes = []interface{}{ - (DataplaneFeatures)(0), // 0: dataplane.DataplaneFeatures - (*SupportedDataplaneFeaturesRequest)(nil), // 1: dataplane.SupportedDataplaneFeaturesRequest - (*DataplaneFeatureSupport)(nil), // 2: dataplane.DataplaneFeatureSupport - (*SupportedDataplaneFeaturesResponse)(nil), // 3: dataplane.SupportedDataplaneFeaturesResponse + (DataplaneFeatures)(0), // 0: dataplane.DataplaneFeatures + (ServiceKind)(0), // 1: dataplane.ServiceKind + (*GetSupportedDataplaneFeaturesRequest)(nil), // 2: dataplane.GetSupportedDataplaneFeaturesRequest + (*DataplaneFeatureSupport)(nil), // 3: dataplane.DataplaneFeatureSupport + (*GetSupportedDataplaneFeaturesResponse)(nil), // 4: dataplane.GetSupportedDataplaneFeaturesResponse + (*GetEnvoyBootstrapParamsRequest)(nil), // 5: dataplane.GetEnvoyBootstrapParamsRequest + (*GetEnvoyBootstrapParamsResponse)(nil), // 6: dataplane.GetEnvoyBootstrapParamsResponse + (*structpb.Struct)(nil), // 7: google.protobuf.Struct } var file_proto_public_pbdataplane_dataplane_proto_depIdxs = []int32{ 0, // 0: dataplane.DataplaneFeatureSupport.feature_name:type_name -> dataplane.DataplaneFeatures - 2, // 1: dataplane.SupportedDataplaneFeaturesResponse.supported_dataplane_features:type_name -> dataplane.DataplaneFeatureSupport - 1, // 2: dataplane.DataplaneService.SupportedDataplaneFeatures:input_type -> dataplane.SupportedDataplaneFeaturesRequest - 3, // 3: dataplane.DataplaneService.SupportedDataplaneFeatures:output_type -> dataplane.SupportedDataplaneFeaturesResponse - 3, // [3:4] is the sub-list for method output_type - 2, // [2:3] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name + 3, // 1: dataplane.GetSupportedDataplaneFeaturesResponse.supported_dataplane_features:type_name -> dataplane.DataplaneFeatureSupport + 1, // 2: dataplane.GetEnvoyBootstrapParamsResponse.service_kind:type_name -> dataplane.ServiceKind + 7, // 3: dataplane.GetEnvoyBootstrapParamsResponse.config:type_name -> google.protobuf.Struct + 2, // 4: dataplane.DataplaneService.GetSupportedDataplaneFeatures:input_type -> dataplane.GetSupportedDataplaneFeaturesRequest + 5, // 5: dataplane.DataplaneService.GetEnvoyBootstrapParams:input_type -> dataplane.GetEnvoyBootstrapParamsRequest + 4, // 6: dataplane.DataplaneService.GetSupportedDataplaneFeatures:output_type -> dataplane.GetSupportedDataplaneFeaturesResponse + 6, // 7: dataplane.DataplaneService.GetEnvoyBootstrapParams:output_type -> dataplane.GetEnvoyBootstrapParamsResponse + 6, // [6:8] is the sub-list for method output_type + 4, // [4:6] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_proto_public_pbdataplane_dataplane_proto_init() } @@ -310,7 +627,7 @@ func file_proto_public_pbdataplane_dataplane_proto_init() { } if !protoimpl.UnsafeEnabled { file_proto_public_pbdataplane_dataplane_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SupportedDataplaneFeaturesRequest); i { + switch v := v.(*GetSupportedDataplaneFeaturesRequest); i { case 0: return &v.state case 1: @@ -334,7 +651,31 @@ func file_proto_public_pbdataplane_dataplane_proto_init() { } } file_proto_public_pbdataplane_dataplane_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SupportedDataplaneFeaturesResponse); i { + switch v := v.(*GetSupportedDataplaneFeaturesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_public_pbdataplane_dataplane_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetEnvoyBootstrapParamsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_public_pbdataplane_dataplane_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetEnvoyBootstrapParamsResponse); i { case 0: return &v.state case 1: @@ -346,13 +687,17 @@ func file_proto_public_pbdataplane_dataplane_proto_init() { } } } + file_proto_public_pbdataplane_dataplane_proto_msgTypes[3].OneofWrappers = []interface{}{ + (*GetEnvoyBootstrapParamsRequest_NodeId)(nil), + (*GetEnvoyBootstrapParamsRequest_NodeName)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_proto_public_pbdataplane_dataplane_proto_rawDesc, - NumEnums: 1, - NumMessages: 3, + NumEnums: 2, + NumMessages: 5, NumExtensions: 0, NumServices: 1, }, @@ -379,7 +724,8 @@ const _ = grpc.SupportPackageIsVersion6 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type DataplaneServiceClient interface { - SupportedDataplaneFeatures(ctx context.Context, in *SupportedDataplaneFeaturesRequest, opts ...grpc.CallOption) (*SupportedDataplaneFeaturesResponse, error) + GetSupportedDataplaneFeatures(ctx context.Context, in *GetSupportedDataplaneFeaturesRequest, opts ...grpc.CallOption) (*GetSupportedDataplaneFeaturesResponse, error) + GetEnvoyBootstrapParams(ctx context.Context, in *GetEnvoyBootstrapParamsRequest, opts ...grpc.CallOption) (*GetEnvoyBootstrapParamsResponse, error) } type dataplaneServiceClient struct { @@ -390,9 +736,18 @@ func NewDataplaneServiceClient(cc grpc.ClientConnInterface) DataplaneServiceClie return &dataplaneServiceClient{cc} } -func (c *dataplaneServiceClient) SupportedDataplaneFeatures(ctx context.Context, in *SupportedDataplaneFeaturesRequest, opts ...grpc.CallOption) (*SupportedDataplaneFeaturesResponse, error) { - out := new(SupportedDataplaneFeaturesResponse) - err := c.cc.Invoke(ctx, "/dataplane.DataplaneService/SupportedDataplaneFeatures", in, out, opts...) +func (c *dataplaneServiceClient) GetSupportedDataplaneFeatures(ctx context.Context, in *GetSupportedDataplaneFeaturesRequest, opts ...grpc.CallOption) (*GetSupportedDataplaneFeaturesResponse, error) { + out := new(GetSupportedDataplaneFeaturesResponse) + err := c.cc.Invoke(ctx, "/dataplane.DataplaneService/GetSupportedDataplaneFeatures", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dataplaneServiceClient) GetEnvoyBootstrapParams(ctx context.Context, in *GetEnvoyBootstrapParamsRequest, opts ...grpc.CallOption) (*GetEnvoyBootstrapParamsResponse, error) { + out := new(GetEnvoyBootstrapParamsResponse) + err := c.cc.Invoke(ctx, "/dataplane.DataplaneService/GetEnvoyBootstrapParams", in, out, opts...) if err != nil { return nil, err } @@ -401,35 +756,57 @@ func (c *dataplaneServiceClient) SupportedDataplaneFeatures(ctx context.Context, // DataplaneServiceServer is the server API for DataplaneService service. type DataplaneServiceServer interface { - SupportedDataplaneFeatures(context.Context, *SupportedDataplaneFeaturesRequest) (*SupportedDataplaneFeaturesResponse, error) + GetSupportedDataplaneFeatures(context.Context, *GetSupportedDataplaneFeaturesRequest) (*GetSupportedDataplaneFeaturesResponse, error) + GetEnvoyBootstrapParams(context.Context, *GetEnvoyBootstrapParamsRequest) (*GetEnvoyBootstrapParamsResponse, error) } // UnimplementedDataplaneServiceServer can be embedded to have forward compatible implementations. type UnimplementedDataplaneServiceServer struct { } -func (*UnimplementedDataplaneServiceServer) SupportedDataplaneFeatures(context.Context, *SupportedDataplaneFeaturesRequest) (*SupportedDataplaneFeaturesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SupportedDataplaneFeatures not implemented") +func (*UnimplementedDataplaneServiceServer) GetSupportedDataplaneFeatures(context.Context, *GetSupportedDataplaneFeaturesRequest) (*GetSupportedDataplaneFeaturesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSupportedDataplaneFeatures not implemented") +} +func (*UnimplementedDataplaneServiceServer) GetEnvoyBootstrapParams(context.Context, *GetEnvoyBootstrapParamsRequest) (*GetEnvoyBootstrapParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetEnvoyBootstrapParams not implemented") } func RegisterDataplaneServiceServer(s *grpc.Server, srv DataplaneServiceServer) { s.RegisterService(&_DataplaneService_serviceDesc, srv) } -func _DataplaneService_SupportedDataplaneFeatures_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SupportedDataplaneFeaturesRequest) +func _DataplaneService_GetSupportedDataplaneFeatures_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSupportedDataplaneFeaturesRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(DataplaneServiceServer).SupportedDataplaneFeatures(ctx, in) + return srv.(DataplaneServiceServer).GetSupportedDataplaneFeatures(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/dataplane.DataplaneService/SupportedDataplaneFeatures", + FullMethod: "/dataplane.DataplaneService/GetSupportedDataplaneFeatures", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DataplaneServiceServer).SupportedDataplaneFeatures(ctx, req.(*SupportedDataplaneFeaturesRequest)) + return srv.(DataplaneServiceServer).GetSupportedDataplaneFeatures(ctx, req.(*GetSupportedDataplaneFeaturesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DataplaneService_GetEnvoyBootstrapParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetEnvoyBootstrapParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DataplaneServiceServer).GetEnvoyBootstrapParams(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/dataplane.DataplaneService/GetEnvoyBootstrapParams", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DataplaneServiceServer).GetEnvoyBootstrapParams(ctx, req.(*GetEnvoyBootstrapParamsRequest)) } return interceptor(ctx, in, info, handler) } @@ -439,8 +816,12 @@ var _DataplaneService_serviceDesc = grpc.ServiceDesc{ HandlerType: (*DataplaneServiceServer)(nil), Methods: []grpc.MethodDesc{ { - MethodName: "SupportedDataplaneFeatures", - Handler: _DataplaneService_SupportedDataplaneFeatures_Handler, + MethodName: "GetSupportedDataplaneFeatures", + Handler: _DataplaneService_GetSupportedDataplaneFeatures_Handler, + }, + { + MethodName: "GetEnvoyBootstrapParams", + Handler: _DataplaneService_GetEnvoyBootstrapParams_Handler, }, }, Streams: []grpc.StreamDesc{}, diff --git a/proto-public/pbdataplane/dataplane.proto b/proto-public/pbdataplane/dataplane.proto index 17789fc55e..dfc672bec1 100644 --- a/proto-public/pbdataplane/dataplane.proto +++ b/proto-public/pbdataplane/dataplane.proto @@ -4,29 +4,78 @@ syntax = "proto3"; package dataplane; +import "google/protobuf/struct.proto"; + option go_package = "github.com/hashicorp/consul/proto-public/pbdataplane"; - -message SupportedDataplaneFeaturesRequest {} +message GetSupportedDataplaneFeaturesRequest {} enum DataplaneFeatures { - UNKNOWN = 0; - WATCH_SERVERS = 1; - EDGE_CERTIFICATE_MANAGEMENT = 2; - ENVOY_BOOTSTRAP_CONFIGURATION = 3; + UNKNOWN = 0; + WATCH_SERVERS = 1; + EDGE_CERTIFICATE_MANAGEMENT = 2; + ENVOY_BOOTSTRAP_CONFIGURATION = 3; } - message DataplaneFeatureSupport { - DataplaneFeatures feature_name = 1; - bool supported = 2; + DataplaneFeatures feature_name = 1; + bool supported = 2; } -message SupportedDataplaneFeaturesResponse { - repeated DataplaneFeatureSupport supported_dataplane_features = 1; +message GetSupportedDataplaneFeaturesResponse { + repeated DataplaneFeatureSupport supported_dataplane_features = 1; } +message GetEnvoyBootstrapParamsRequest { + oneof node_spec { + string node_id = 1; + string node_name = 2; + } + // The proxy service ID + string service_id = 3; + string partition = 4; + string namespace = 5; +} + +enum ServiceKind { + // ServiceKind Typical is a typical, classic Consul service. This is + // represented by the absence of a value. This was chosen for ease of + // backwards compatibility: existing services in the catalog would + // default to the typical service. + TYPICAL = 0; + + // ServiceKind Connect Proxy is a proxy for the Connect feature. This + // service proxies another service within Consul and speaks the connect + // protocol. + CONNECT_PROXY = 1; + + // ServiceKind Mesh Gateway is a Mesh Gateway for the Connect feature. This + // service will proxy connections based off the SNI header set by other + // connect proxies. + MESH_GATEWAY = 2; + + // ServiceKind Terminating Gateway is a Terminating Gateway for the Connect + // feature. This service will proxy connections to services outside the mesh. + TERMINATING_GATEWAY = 3; + + // ServiceKind Ingress Gateway is an Ingress Gateway for the Connect feature. + // This service will ingress connections into the service mesh. + INGRESS_GATEWAY = 4; +} + +message GetEnvoyBootstrapParamsResponse { + ServiceKind service_kind = 1; + // The destination service name + string service = 2; + string namespace = 3; + string partition = 4; + string datacenter = 5; + google.protobuf.Struct config = 6; +} service DataplaneService { - rpc SupportedDataplaneFeatures(SupportedDataplaneFeaturesRequest) returns (SupportedDataplaneFeaturesResponse) {}; -} \ No newline at end of file + rpc GetSupportedDataplaneFeatures(GetSupportedDataplaneFeaturesRequest) returns (GetSupportedDataplaneFeaturesResponse) {} + + rpc GetEnvoyBootstrapParams(GetEnvoyBootstrapParamsRequest) returns (GetEnvoyBootstrapParamsResponse) {} + +} diff --git a/proto-public/pbserverdiscovery/serverdiscovery.pb.binary.go b/proto-public/pbserverdiscovery/serverdiscovery.pb.binary.go new file mode 100644 index 0000000000..a2e291967a --- /dev/null +++ b/proto-public/pbserverdiscovery/serverdiscovery.pb.binary.go @@ -0,0 +1,38 @@ +// Code generated by protoc-gen-go-binary. DO NOT EDIT. +// source: proto-public/pbserverdiscovery/serverdiscovery.proto + +package pbserverdiscovery + +import ( + "github.com/golang/protobuf/proto" +) + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *WatchServersRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *WatchServersRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *WatchServersResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *WatchServersResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *Server) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *Server) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} diff --git a/proto-public/pbserverdiscovery/serverdiscovery.pb.go b/proto-public/pbserverdiscovery/serverdiscovery.pb.go new file mode 100644 index 0000000000..c6638e9fc2 --- /dev/null +++ b/proto-public/pbserverdiscovery/serverdiscovery.pb.go @@ -0,0 +1,437 @@ +// Package serverdiscovery provides a service on Consul servers to discover the set of servers +// currently able to handle incoming requests. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 +// source: proto-public/pbserverdiscovery/serverdiscovery.proto + +package pbserverdiscovery + +import ( + context "context" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type WatchServersRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Wan being set to true will cause WAN addresses to be sent in the response + // instead of the LAN addresses which are the default + Wan bool `protobuf:"varint,1,opt,name=wan,proto3" json:"wan,omitempty"` +} + +func (x *WatchServersRequest) Reset() { + *x = WatchServersRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_public_pbserverdiscovery_serverdiscovery_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WatchServersRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WatchServersRequest) ProtoMessage() {} + +func (x *WatchServersRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_public_pbserverdiscovery_serverdiscovery_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WatchServersRequest.ProtoReflect.Descriptor instead. +func (*WatchServersRequest) Descriptor() ([]byte, []int) { + return file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDescGZIP(), []int{0} +} + +func (x *WatchServersRequest) GetWan() bool { + if x != nil { + return x.Wan + } + return false +} + +type WatchServersResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Servers is the list of server address information. + Servers []*Server `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"` +} + +func (x *WatchServersResponse) Reset() { + *x = WatchServersResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_public_pbserverdiscovery_serverdiscovery_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WatchServersResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WatchServersResponse) ProtoMessage() {} + +func (x *WatchServersResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_public_pbserverdiscovery_serverdiscovery_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WatchServersResponse.ProtoReflect.Descriptor instead. +func (*WatchServersResponse) Descriptor() ([]byte, []int) { + return file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDescGZIP(), []int{1} +} + +func (x *WatchServersResponse) GetServers() []*Server { + if x != nil { + return x.Servers + } + return nil +} + +type Server struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // id is the unique string identifying this server for all time. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // address on the network of the server + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // the consul version of the server + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *Server) Reset() { + *x = Server{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_public_pbserverdiscovery_serverdiscovery_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Server) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Server) ProtoMessage() {} + +func (x *Server) ProtoReflect() protoreflect.Message { + mi := &file_proto_public_pbserverdiscovery_serverdiscovery_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Server.ProtoReflect.Descriptor instead. +func (*Server) Descriptor() ([]byte, []int) { + return file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDescGZIP(), []int{2} +} + +func (x *Server) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Server) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *Server) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +var File_proto_public_pbserverdiscovery_serverdiscovery_proto protoreflect.FileDescriptor + +var file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDesc = []byte{ + 0x0a, 0x34, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x2f, 0x70, + 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, + 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x22, 0x27, 0x0a, 0x13, 0x57, 0x61, 0x74, 0x63, 0x68, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, + 0x0a, 0x03, 0x77, 0x61, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x77, 0x61, 0x6e, + 0x22, 0x49, 0x0a, 0x14, 0x57, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x22, 0x4c, 0x0a, 0x06, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x32, 0x79, 0x0a, 0x16, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x5f, 0x0a, 0x0c, 0x57, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x73, 0x12, 0x24, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, + 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x57, 0x61, 0x74, 0x63, + 0x68, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x30, 0x01, 0x42, 0x3c, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, + 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDescOnce sync.Once + file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDescData = file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDesc +) + +func file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDescGZIP() []byte { + file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDescOnce.Do(func() { + file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDescData) + }) + return file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDescData +} + +var file_proto_public_pbserverdiscovery_serverdiscovery_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_proto_public_pbserverdiscovery_serverdiscovery_proto_goTypes = []interface{}{ + (*WatchServersRequest)(nil), // 0: serverdiscovery.WatchServersRequest + (*WatchServersResponse)(nil), // 1: serverdiscovery.WatchServersResponse + (*Server)(nil), // 2: serverdiscovery.Server +} +var file_proto_public_pbserverdiscovery_serverdiscovery_proto_depIdxs = []int32{ + 2, // 0: serverdiscovery.WatchServersResponse.servers:type_name -> serverdiscovery.Server + 0, // 1: serverdiscovery.ServerDiscoveryService.WatchServers:input_type -> serverdiscovery.WatchServersRequest + 1, // 2: serverdiscovery.ServerDiscoveryService.WatchServers:output_type -> serverdiscovery.WatchServersResponse + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_proto_public_pbserverdiscovery_serverdiscovery_proto_init() } +func file_proto_public_pbserverdiscovery_serverdiscovery_proto_init() { + if File_proto_public_pbserverdiscovery_serverdiscovery_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_public_pbserverdiscovery_serverdiscovery_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WatchServersRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_public_pbserverdiscovery_serverdiscovery_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WatchServersResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_public_pbserverdiscovery_serverdiscovery_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Server); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_public_pbserverdiscovery_serverdiscovery_proto_goTypes, + DependencyIndexes: file_proto_public_pbserverdiscovery_serverdiscovery_proto_depIdxs, + MessageInfos: file_proto_public_pbserverdiscovery_serverdiscovery_proto_msgTypes, + }.Build() + File_proto_public_pbserverdiscovery_serverdiscovery_proto = out.File + file_proto_public_pbserverdiscovery_serverdiscovery_proto_rawDesc = nil + file_proto_public_pbserverdiscovery_serverdiscovery_proto_goTypes = nil + file_proto_public_pbserverdiscovery_serverdiscovery_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ServerDiscoveryServiceClient is the client API for ServerDiscoveryService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ServerDiscoveryServiceClient interface { + // WatchServers will stream back sets of ready servers as they change such as + // when new servers are added or older ones removed. A ready server is one that + // should be considered ready for sending general RPC requests towards that would + // catalog queries, xDS proxy configurations and similar services. + WatchServers(ctx context.Context, in *WatchServersRequest, opts ...grpc.CallOption) (ServerDiscoveryService_WatchServersClient, error) +} + +type serverDiscoveryServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewServerDiscoveryServiceClient(cc grpc.ClientConnInterface) ServerDiscoveryServiceClient { + return &serverDiscoveryServiceClient{cc} +} + +func (c *serverDiscoveryServiceClient) WatchServers(ctx context.Context, in *WatchServersRequest, opts ...grpc.CallOption) (ServerDiscoveryService_WatchServersClient, error) { + stream, err := c.cc.NewStream(ctx, &_ServerDiscoveryService_serviceDesc.Streams[0], "/serverdiscovery.ServerDiscoveryService/WatchServers", opts...) + if err != nil { + return nil, err + } + x := &serverDiscoveryServiceWatchServersClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type ServerDiscoveryService_WatchServersClient interface { + Recv() (*WatchServersResponse, error) + grpc.ClientStream +} + +type serverDiscoveryServiceWatchServersClient struct { + grpc.ClientStream +} + +func (x *serverDiscoveryServiceWatchServersClient) Recv() (*WatchServersResponse, error) { + m := new(WatchServersResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerDiscoveryServiceServer is the server API for ServerDiscoveryService service. +type ServerDiscoveryServiceServer interface { + // WatchServers will stream back sets of ready servers as they change such as + // when new servers are added or older ones removed. A ready server is one that + // should be considered ready for sending general RPC requests towards that would + // catalog queries, xDS proxy configurations and similar services. + WatchServers(*WatchServersRequest, ServerDiscoveryService_WatchServersServer) error +} + +// UnimplementedServerDiscoveryServiceServer can be embedded to have forward compatible implementations. +type UnimplementedServerDiscoveryServiceServer struct { +} + +func (*UnimplementedServerDiscoveryServiceServer) WatchServers(*WatchServersRequest, ServerDiscoveryService_WatchServersServer) error { + return status.Errorf(codes.Unimplemented, "method WatchServers not implemented") +} + +func RegisterServerDiscoveryServiceServer(s *grpc.Server, srv ServerDiscoveryServiceServer) { + s.RegisterService(&_ServerDiscoveryService_serviceDesc, srv) +} + +func _ServerDiscoveryService_WatchServers_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(WatchServersRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ServerDiscoveryServiceServer).WatchServers(m, &serverDiscoveryServiceWatchServersServer{stream}) +} + +type ServerDiscoveryService_WatchServersServer interface { + Send(*WatchServersResponse) error + grpc.ServerStream +} + +type serverDiscoveryServiceWatchServersServer struct { + grpc.ServerStream +} + +func (x *serverDiscoveryServiceWatchServersServer) Send(m *WatchServersResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _ServerDiscoveryService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "serverdiscovery.ServerDiscoveryService", + HandlerType: (*ServerDiscoveryServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "WatchServers", + Handler: _ServerDiscoveryService_WatchServers_Handler, + ServerStreams: true, + }, + }, + Metadata: "proto-public/pbserverdiscovery/serverdiscovery.proto", +} diff --git a/proto-public/pbserverdiscovery/serverdiscovery.proto b/proto-public/pbserverdiscovery/serverdiscovery.proto new file mode 100644 index 0000000000..203b25903b --- /dev/null +++ b/proto-public/pbserverdiscovery/serverdiscovery.proto @@ -0,0 +1,37 @@ +// Package serverdiscovery provides a service on Consul servers to discover the set of servers +// currently able to handle incoming requests. + +syntax = "proto3"; + +package serverdiscovery; + +option go_package = "github.com/hashicorp/consul/proto-public/pbserverdiscovery"; + +service ServerDiscoveryService { + // WatchServers will stream back sets of ready servers as they change such as + // when new servers are added or older ones removed. A ready server is one that + // should be considered ready for sending general RPC requests towards that would + // catalog queries, xDS proxy configurations and similar services. + rpc WatchServers(WatchServersRequest) returns (stream WatchServersResponse) {}; +} + +message WatchServersRequest { + // Wan being set to true will cause WAN addresses to be sent in the response + // instead of the LAN addresses which are the default + bool wan = 1; +} + +message WatchServersResponse{ + // Servers is the list of server address information. + repeated Server servers = 1; +} + +message Server { + // id is the unique string identifying this server for all time. + string id = 1; + // address on the network of the server + string address = 2; + // the consul version of the server + string version = 3; +} + diff --git a/proto/pbautoconf/auto_config.go b/proto/pbautoconf/auto_config.go index 96ef19cad8..74a7cf4ab6 100644 --- a/proto/pbautoconf/auto_config.go +++ b/proto/pbautoconf/auto_config.go @@ -23,5 +23,9 @@ func (req *AutoConfigRequest) SetTokenSecret(token string) { } func (req *AutoConfigRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { - return time.Since(start) > rpcHoldTimeout, nil + return time.Since(start) > req.Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime), nil +} + +func (req *AutoConfigRequest) Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout } diff --git a/proto/pbcommon/common.go b/proto/pbcommon/common.go index 79b1592e55..faca038b6f 100644 --- a/proto/pbcommon/common.go +++ b/proto/pbcommon/common.go @@ -75,12 +75,16 @@ func (q *QueryOptions) SetStaleIfError(staleIfError time.Duration) { } func (q *QueryOptions) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + return time.Since(start) > q.Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime), nil +} + +func (q *QueryOptions) Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) time.Duration { maxTime := structs.DurationFromProto(q.MaxQueryTime) o := structs.QueryOptions{ MaxQueryTime: maxTime, MinQueryIndex: q.MinQueryIndex, } - return o.HasTimedOut(start, rpcHoldTimeout, maxQueryTime, defaultQueryTime) + return o.Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime) } // SetFilter is needed to implement the structs.QueryOptionsCompat interface @@ -113,8 +117,13 @@ func (w *WriteRequest) AllowStaleRead() bool { } // HasTimedOut implements structs.RPCInfo -func (w *WriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) { - return time.Since(start) > rpcHoldTimeout, nil +func (w *WriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + return time.Since(start) > w.Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime), nil +} + +// Timeout implements structs.RPCInfo +func (w *WriteRequest) Timeout(rpcHoldTimeout, _, _ time.Duration) time.Duration { + return rpcHoldTimeout } // IsRead implements structs.RPCInfo @@ -140,7 +149,12 @@ func (r *ReadRequest) SetTokenSecret(token string) { // HasTimedOut implements structs.RPCInfo func (r *ReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { - return time.Since(start) > rpcHoldTimeout, nil + return time.Since(start) > r.Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime), nil +} + +// Timeout implements structs.RPCInfo +func (r *ReadRequest) Timeout(rpcHoldTimeout, _, _ time.Duration) time.Duration { + return rpcHoldTimeout } // RequestDatacenter implements structs.RPCInfo diff --git a/proto/pbpeering/generate.go b/proto/pbpeering/generate.go new file mode 100644 index 0000000000..1f060a6cfe --- /dev/null +++ b/proto/pbpeering/generate.go @@ -0,0 +1,9 @@ +// TODO: files generated from this go:generate may fail the CI check because of relative source. +// Figure out a way to robustly use this file. +//go:generate protoc --gofast_out=. --gofast_opt=paths=source_relative --go-binary_out=. peering.proto +// requires: +// - protoc +// - github.com/gogo/protobuf/protoc-gen-gofast +// - github.com/hashicorp/protoc-gen-go-binary + +package pbpeering diff --git a/proto/pbpeering/peering.go b/proto/pbpeering/peering.go new file mode 100644 index 0000000000..5a7cea91b6 --- /dev/null +++ b/proto/pbpeering/peering.go @@ -0,0 +1,202 @@ +package pbpeering + +import "time" + +// TODO(peering): These are byproducts of not embedding +// types in our protobuf definitions and are temporary; +// Hoping to replace them with 1 or 2 methods per request +// using https://github.com/hashicorp/consul/pull/12507 + +func (msg *PeeringReadRequest) RequestDatacenter() string { + return msg.Datacenter +} + +func (msg *PeeringReadRequest) IsRead() bool { + return true +} + +func (msg *PeeringReadRequest) AllowStaleRead() bool { + return false +} + +func (msg *PeeringReadRequest) TokenSecret() string { + return "" +} + +func (msg *PeeringReadRequest) SetTokenSecret(s string) { + return +} + +func (msg *PeeringReadRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// Timeout implements structs.RPCInfo +func (msg *PeeringReadRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout +} + +func (msg *PeeringListRequest) RequestDatacenter() string { + return msg.Datacenter +} + +func (msg *PeeringListRequest) IsRead() bool { + return true +} + +func (msg *PeeringListRequest) AllowStaleRead() bool { + return false +} + +func (msg *PeeringListRequest) TokenSecret() string { + return "" +} + +func (msg *PeeringListRequest) SetTokenSecret(s string) { + return +} + +func (msg *PeeringListRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// Timeout implements structs.RPCInfo +func (msg *PeeringListRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout +} + +func (msg *PeeringWriteRequest) RequestDatacenter() string { + return msg.Datacenter +} + +func (msg *PeeringWriteRequest) IsRead() bool { + return false +} + +func (msg *PeeringWriteRequest) AllowStaleRead() bool { + return false +} + +func (msg *PeeringWriteRequest) TokenSecret() string { + return "" +} + +func (msg *PeeringWriteRequest) SetTokenSecret(s string) { + return +} + +func (msg *PeeringWriteRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// Timeout implements structs.RPCInfo +func (msg *PeeringWriteRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout +} + +func (msg *PeeringDeleteRequest) RequestDatacenter() string { + return msg.Datacenter +} + +func (msg *PeeringDeleteRequest) IsRead() bool { + return false +} + +func (msg *PeeringDeleteRequest) AllowStaleRead() bool { + return false +} + +func (msg *PeeringDeleteRequest) TokenSecret() string { + return "" +} + +func (msg *PeeringDeleteRequest) SetTokenSecret(s string) { + return +} + +func (msg *PeeringDeleteRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// Timeout implements structs.RPCInfo +func (msg *PeeringDeleteRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout +} + +// RequestDatacenter implements structs.RPCInfo +func (req *GenerateTokenRequest) RequestDatacenter() string { + return req.Datacenter +} + +// IsRead implements structs.RPCInfo +func (req *GenerateTokenRequest) IsRead() bool { + return false +} + +// AllowStaleRead implements structs.RPCInfo +func (req *GenerateTokenRequest) AllowStaleRead() bool { + return false +} + +// TokenSecret implements structs.RPCInfo +func (req *GenerateTokenRequest) TokenSecret() string { + return req.Token +} + +// SetTokenSecret implements structs.RPCInfo +func (req *GenerateTokenRequest) SetTokenSecret(token string) { + req.Token = token +} + +// HasTimedOut implements structs.RPCInfo +func (req *GenerateTokenRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// Timeout implements structs.RPCInfo +func (msg *GenerateTokenRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout +} + +// RequestDatacenter implements structs.RPCInfo +func (req *InitiateRequest) RequestDatacenter() string { + return req.Datacenter +} + +// IsRead implements structs.RPCInfo +func (req *InitiateRequest) IsRead() bool { + return false +} + +// AllowStaleRead implements structs.RPCInfo +func (req *InitiateRequest) AllowStaleRead() bool { + return false +} + +// TokenSecret implements structs.RPCInfo +func (req *InitiateRequest) TokenSecret() string { + return req.Token +} + +// SetTokenSecret implements structs.RPCInfo +func (req *InitiateRequest) SetTokenSecret(token string) { + req.Token = token +} + +// HasTimedOut implements structs.RPCInfo +func (req *InitiateRequest) HasTimedOut(start time.Time, rpcHoldTimeout, _, _ time.Duration) (bool, error) { + return time.Since(start) > rpcHoldTimeout, nil +} + +// Timeout implements structs.RPCInfo +func (msg *InitiateRequest) Timeout(rpcHoldTimeout time.Duration, maxQueryTime time.Duration, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout +} + +// ShouldDial returns true when the peering was stored via the peering initiation endpoint, +// AND the peering is not marked as terminated by our peer. +// If we generated a token for this peer we did not store our server addresses under PeerServerAddresses. +// These server addresses are for dialing, and only the peer initiating the peering will do the dialing. +func (p *Peering) ShouldDial() bool { + return len(p.PeerServerAddresses) > 0 && p.State != PeeringState_TERMINATED +} diff --git a/proto/pbpeering/peering.pb.binary.go b/proto/pbpeering/peering.pb.binary.go new file mode 100644 index 0000000000..e7e7557781 --- /dev/null +++ b/proto/pbpeering/peering.pb.binary.go @@ -0,0 +1,248 @@ +// Code generated by protoc-gen-go-binary. DO NOT EDIT. +// source: proto/pbpeering/peering.proto + +package pbpeering + +import ( + "github.com/golang/protobuf/proto" +) + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *Peering) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *Peering) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringTrustBundle) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringTrustBundle) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringReadRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringReadRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringReadResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringReadResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringListRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringListRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringListResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringListResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringWriteRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringWriteRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringWriteResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringWriteResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringDeleteRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringDeleteRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringDeleteResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringDeleteResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringTerminateByIDRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringTerminateByIDRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringTerminateByIDResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringTerminateByIDResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringTrustBundleWriteRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringTrustBundleWriteRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringTrustBundleWriteResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringTrustBundleWriteResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringTrustBundleDeleteRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringTrustBundleDeleteRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *PeeringTrustBundleDeleteResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *PeeringTrustBundleDeleteResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *GenerateTokenRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *GenerateTokenRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *GenerateTokenResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *GenerateTokenResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *InitiateRequest) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *InitiateRequest) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *InitiateResponse) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *InitiateResponse) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *ReplicationMessage) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *ReplicationMessage) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *ReplicationMessage_Request) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *ReplicationMessage_Request) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *ReplicationMessage_Response) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *ReplicationMessage_Response) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *ReplicationMessage_Terminated) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *ReplicationMessage_Terminated) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} diff --git a/proto/pbpeering/peering.pb.go b/proto/pbpeering/peering.pb.go new file mode 100644 index 0000000000..902b8c28c1 --- /dev/null +++ b/proto/pbpeering/peering.pb.go @@ -0,0 +1,2569 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 +// source: proto/pbpeering/peering.proto + +package pbpeering + +import ( + context "context" + proto "github.com/golang/protobuf/proto" + pbstatus "github.com/hashicorp/consul/proto/pbstatus" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// PeeringState enumerates all the states a peering can be in +type PeeringState int32 + +const ( + // Undefined represents an unset value for PeeringState during + // writes. + PeeringState_UNDEFINED PeeringState = 0 + // Initial means a Peering has been initialized and is awaiting + // acknowledgement from a remote peer. + PeeringState_INITIAL PeeringState = 1 + // Active means that the peering connection is active and healthy. + PeeringState_ACTIVE PeeringState = 2 + // Failing means the peering connection has been interrupted but has not yet + // been terminated. + PeeringState_FAILING PeeringState = 3 + // Terminated means the peering relationship has been removed. + PeeringState_TERMINATED PeeringState = 4 +) + +// Enum value maps for PeeringState. +var ( + PeeringState_name = map[int32]string{ + 0: "UNDEFINED", + 1: "INITIAL", + 2: "ACTIVE", + 3: "FAILING", + 4: "TERMINATED", + } + PeeringState_value = map[string]int32{ + "UNDEFINED": 0, + "INITIAL": 1, + "ACTIVE": 2, + "FAILING": 3, + "TERMINATED": 4, + } +) + +func (x PeeringState) Enum() *PeeringState { + p := new(PeeringState) + *p = x + return p +} + +func (x PeeringState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PeeringState) Descriptor() protoreflect.EnumDescriptor { + return file_proto_pbpeering_peering_proto_enumTypes[0].Descriptor() +} + +func (PeeringState) Type() protoreflect.EnumType { + return &file_proto_pbpeering_peering_proto_enumTypes[0] +} + +func (x PeeringState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PeeringState.Descriptor instead. +func (PeeringState) EnumDescriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{0} +} + +// Operation enumerates supported operations for replicated resources. +type ReplicationMessage_Response_Operation int32 + +const ( + ReplicationMessage_Response_Unknown ReplicationMessage_Response_Operation = 0 + // UPSERT represents a create or update event. + ReplicationMessage_Response_UPSERT ReplicationMessage_Response_Operation = 1 + // DELETE indicates the resource should be deleted. + // In DELETE operations no Resource will be returned. + // Deletion by an importing peer must be done with the type URL and ID. + ReplicationMessage_Response_DELETE ReplicationMessage_Response_Operation = 2 +) + +// Enum value maps for ReplicationMessage_Response_Operation. +var ( + ReplicationMessage_Response_Operation_name = map[int32]string{ + 0: "Unknown", + 1: "UPSERT", + 2: "DELETE", + } + ReplicationMessage_Response_Operation_value = map[string]int32{ + "Unknown": 0, + "UPSERT": 1, + "DELETE": 2, + } +) + +func (x ReplicationMessage_Response_Operation) Enum() *ReplicationMessage_Response_Operation { + p := new(ReplicationMessage_Response_Operation) + *p = x + return p +} + +func (x ReplicationMessage_Response_Operation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ReplicationMessage_Response_Operation) Descriptor() protoreflect.EnumDescriptor { + return file_proto_pbpeering_peering_proto_enumTypes[1].Descriptor() +} + +func (ReplicationMessage_Response_Operation) Type() protoreflect.EnumType { + return &file_proto_pbpeering_peering_proto_enumTypes[1] +} + +func (x ReplicationMessage_Response_Operation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ReplicationMessage_Response_Operation.Descriptor instead. +func (ReplicationMessage_Response_Operation) EnumDescriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{20, 1, 0} +} + +// Peering defines a peering relationship between two disparate Consul clusters +type Peering struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID is a datacenter-scoped UUID for the peering. + // The ID is generated when a peering is first written to the state store. + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + // Name is the local alias for the peering relationship. + Name string `protobuf:"bytes,2,opt,name=Name,proto3" json:"Name,omitempty"` + // Partition is the local partition connecting to the peer. + Partition string `protobuf:"bytes,3,opt,name=Partition,proto3" json:"Partition,omitempty"` + // State is one of the valid PeeringState values to represent the status of + // peering relationship. + State PeeringState `protobuf:"varint,4,opt,name=State,proto3,enum=pbpeering.PeeringState" json:"State,omitempty"` + // PeerID is the ID that our peer assigned to this peering. + // This ID is to be used when dialing the peer, so that it can know who dialed it. + PeerID string `protobuf:"bytes,5,opt,name=PeerID,proto3" json:"PeerID,omitempty"` + // PeerCAPems contains all the CA certificates for the remote peer. + PeerCAPems []string `protobuf:"bytes,6,rep,name=PeerCAPems,proto3" json:"PeerCAPems,omitempty"` + // PeerServerName is the name of the remote server as it relates to TLS. + PeerServerName string `protobuf:"bytes,7,opt,name=PeerServerName,proto3" json:"PeerServerName,omitempty"` + // PeerServerAddresses contains all the the connection addresses for the remote peer. + PeerServerAddresses []string `protobuf:"bytes,8,rep,name=PeerServerAddresses,proto3" json:"PeerServerAddresses,omitempty"` + // CreateIndex is the Raft index at which the Peering was created. + CreateIndex uint64 `protobuf:"varint,9,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty"` + // ModifyIndex is the latest Raft index at which the Peering. was modified. + ModifyIndex uint64 `protobuf:"varint,10,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty"` +} + +func (x *Peering) Reset() { + *x = Peering{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Peering) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Peering) ProtoMessage() {} + +func (x *Peering) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Peering.ProtoReflect.Descriptor instead. +func (*Peering) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{0} +} + +func (x *Peering) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *Peering) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Peering) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *Peering) GetState() PeeringState { + if x != nil { + return x.State + } + return PeeringState_UNDEFINED +} + +func (x *Peering) GetPeerID() string { + if x != nil { + return x.PeerID + } + return "" +} + +func (x *Peering) GetPeerCAPems() []string { + if x != nil { + return x.PeerCAPems + } + return nil +} + +func (x *Peering) GetPeerServerName() string { + if x != nil { + return x.PeerServerName + } + return "" +} + +func (x *Peering) GetPeerServerAddresses() []string { + if x != nil { + return x.PeerServerAddresses + } + return nil +} + +func (x *Peering) GetCreateIndex() uint64 { + if x != nil { + return x.CreateIndex + } + return 0 +} + +func (x *Peering) GetModifyIndex() uint64 { + if x != nil { + return x.ModifyIndex + } + return 0 +} + +// PeeringTrustBundle holds the trust information for validating requests from a peer. +type PeeringTrustBundle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // TrustDomain is the domain for the bundle, example.com, foo.bar.gov for example. Note that this must not have a prefix such as "spiffe://". + TrustDomain string `protobuf:"bytes,1,opt,name=TrustDomain,proto3" json:"TrustDomain,omitempty"` + // PeerName associates the trust bundle with a peer. + PeerName string `protobuf:"bytes,2,opt,name=PeerName,proto3" json:"PeerName,omitempty"` + // Partition isolates the bundle from other trust bundles in separate partitions. + Partition string `protobuf:"bytes,3,opt,name=Partition,proto3" json:"Partition,omitempty"` + // RootPEMs holds ASN.1 DER encoded X.509 certificate data for the trust bundle. + RootPEMs []string `protobuf:"bytes,4,rep,name=RootPEMs,proto3" json:"RootPEMs,omitempty"` + // CreateIndex is the Raft index at which the trust domain was created. + CreateIndex uint64 `protobuf:"varint,5,opt,name=CreateIndex,proto3" json:"CreateIndex,omitempty"` + // ModifyIndex is the latest Raft index at which the trust bundle was modified. + ModifyIndex uint64 `protobuf:"varint,6,opt,name=ModifyIndex,proto3" json:"ModifyIndex,omitempty"` +} + +func (x *PeeringTrustBundle) Reset() { + *x = PeeringTrustBundle{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringTrustBundle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringTrustBundle) ProtoMessage() {} + +func (x *PeeringTrustBundle) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringTrustBundle.ProtoReflect.Descriptor instead. +func (*PeeringTrustBundle) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{1} +} + +func (x *PeeringTrustBundle) GetTrustDomain() string { + if x != nil { + return x.TrustDomain + } + return "" +} + +func (x *PeeringTrustBundle) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + +func (x *PeeringTrustBundle) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *PeeringTrustBundle) GetRootPEMs() []string { + if x != nil { + return x.RootPEMs + } + return nil +} + +func (x *PeeringTrustBundle) GetCreateIndex() uint64 { + if x != nil { + return x.CreateIndex + } + return 0 +} + +func (x *PeeringTrustBundle) GetModifyIndex() uint64 { + if x != nil { + return x.ModifyIndex + } + return 0 +} + +type PeeringReadRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` + Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` +} + +func (x *PeeringReadRequest) Reset() { + *x = PeeringReadRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringReadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringReadRequest) ProtoMessage() {} + +func (x *PeeringReadRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringReadRequest.ProtoReflect.Descriptor instead. +func (*PeeringReadRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{2} +} + +func (x *PeeringReadRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PeeringReadRequest) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *PeeringReadRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +type PeeringReadResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Peering *Peering `protobuf:"bytes,1,opt,name=Peering,proto3" json:"Peering,omitempty"` +} + +func (x *PeeringReadResponse) Reset() { + *x = PeeringReadResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringReadResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringReadResponse) ProtoMessage() {} + +func (x *PeeringReadResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringReadResponse.ProtoReflect.Descriptor instead. +func (*PeeringReadResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{3} +} + +func (x *PeeringReadResponse) GetPeering() *Peering { + if x != nil { + return x.Peering + } + return nil +} + +type PeeringListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Partition string `protobuf:"bytes,1,opt,name=Partition,proto3" json:"Partition,omitempty"` + Datacenter string `protobuf:"bytes,2,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` +} + +func (x *PeeringListRequest) Reset() { + *x = PeeringListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringListRequest) ProtoMessage() {} + +func (x *PeeringListRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringListRequest.ProtoReflect.Descriptor instead. +func (*PeeringListRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{4} +} + +func (x *PeeringListRequest) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *PeeringListRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +type PeeringListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Peerings []*Peering `protobuf:"bytes,1,rep,name=Peerings,proto3" json:"Peerings,omitempty"` +} + +func (x *PeeringListResponse) Reset() { + *x = PeeringListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringListResponse) ProtoMessage() {} + +func (x *PeeringListResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringListResponse.ProtoReflect.Descriptor instead. +func (*PeeringListResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{5} +} + +func (x *PeeringListResponse) GetPeerings() []*Peering { + if x != nil { + return x.Peerings + } + return nil +} + +type PeeringWriteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Peering *Peering `protobuf:"bytes,1,opt,name=Peering,proto3" json:"Peering,omitempty"` + //TODO(peering): what to do with embedded write request? + Datacenter string `protobuf:"bytes,2,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` +} + +func (x *PeeringWriteRequest) Reset() { + *x = PeeringWriteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringWriteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringWriteRequest) ProtoMessage() {} + +func (x *PeeringWriteRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringWriteRequest.ProtoReflect.Descriptor instead. +func (*PeeringWriteRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{6} +} + +func (x *PeeringWriteRequest) GetPeering() *Peering { + if x != nil { + return x.Peering + } + return nil +} + +func (x *PeeringWriteRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +// TODO(peering): Consider returning Peering if we keep this endpoint around +type PeeringWriteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PeeringWriteResponse) Reset() { + *x = PeeringWriteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringWriteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringWriteResponse) ProtoMessage() {} + +func (x *PeeringWriteResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringWriteResponse.ProtoReflect.Descriptor instead. +func (*PeeringWriteResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{7} +} + +type PeeringDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` + //TODO(peering): what to do with embedded write request? + Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` +} + +func (x *PeeringDeleteRequest) Reset() { + *x = PeeringDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringDeleteRequest) ProtoMessage() {} + +func (x *PeeringDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringDeleteRequest.ProtoReflect.Descriptor instead. +func (*PeeringDeleteRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{8} +} + +func (x *PeeringDeleteRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PeeringDeleteRequest) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *PeeringDeleteRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +type PeeringDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PeeringDeleteResponse) Reset() { + *x = PeeringDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringDeleteResponse) ProtoMessage() {} + +func (x *PeeringDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringDeleteResponse.ProtoReflect.Descriptor instead. +func (*PeeringDeleteResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{9} +} + +type PeeringTerminateByIDRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` +} + +func (x *PeeringTerminateByIDRequest) Reset() { + *x = PeeringTerminateByIDRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringTerminateByIDRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringTerminateByIDRequest) ProtoMessage() {} + +func (x *PeeringTerminateByIDRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringTerminateByIDRequest.ProtoReflect.Descriptor instead. +func (*PeeringTerminateByIDRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{10} +} + +func (x *PeeringTerminateByIDRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +type PeeringTerminateByIDResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PeeringTerminateByIDResponse) Reset() { + *x = PeeringTerminateByIDResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringTerminateByIDResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringTerminateByIDResponse) ProtoMessage() {} + +func (x *PeeringTerminateByIDResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringTerminateByIDResponse.ProtoReflect.Descriptor instead. +func (*PeeringTerminateByIDResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{11} +} + +type PeeringTrustBundleWriteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeeringTrustBundle *PeeringTrustBundle `protobuf:"bytes,1,opt,name=PeeringTrustBundle,proto3" json:"PeeringTrustBundle,omitempty"` + //TODO(peering): what to do with embedded write request? + Datacenter string `protobuf:"bytes,2,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` +} + +func (x *PeeringTrustBundleWriteRequest) Reset() { + *x = PeeringTrustBundleWriteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringTrustBundleWriteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringTrustBundleWriteRequest) ProtoMessage() {} + +func (x *PeeringTrustBundleWriteRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringTrustBundleWriteRequest.ProtoReflect.Descriptor instead. +func (*PeeringTrustBundleWriteRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{12} +} + +func (x *PeeringTrustBundleWriteRequest) GetPeeringTrustBundle() *PeeringTrustBundle { + if x != nil { + return x.PeeringTrustBundle + } + return nil +} + +func (x *PeeringTrustBundleWriteRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +type PeeringTrustBundleWriteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PeeringTrustBundleWriteResponse) Reset() { + *x = PeeringTrustBundleWriteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringTrustBundleWriteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringTrustBundleWriteResponse) ProtoMessage() {} + +func (x *PeeringTrustBundleWriteResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringTrustBundleWriteResponse.ProtoReflect.Descriptor instead. +func (*PeeringTrustBundleWriteResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{13} +} + +type PeeringTrustBundleDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` + //TODO(peering): what to do with embedded write request? + Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` +} + +func (x *PeeringTrustBundleDeleteRequest) Reset() { + *x = PeeringTrustBundleDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringTrustBundleDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringTrustBundleDeleteRequest) ProtoMessage() {} + +func (x *PeeringTrustBundleDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringTrustBundleDeleteRequest.ProtoReflect.Descriptor instead. +func (*PeeringTrustBundleDeleteRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{14} +} + +func (x *PeeringTrustBundleDeleteRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PeeringTrustBundleDeleteRequest) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *PeeringTrustBundleDeleteRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +type PeeringTrustBundleDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PeeringTrustBundleDeleteResponse) Reset() { + *x = PeeringTrustBundleDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeeringTrustBundleDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeeringTrustBundleDeleteResponse) ProtoMessage() {} + +func (x *PeeringTrustBundleDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeeringTrustBundleDeleteResponse.ProtoReflect.Descriptor instead. +func (*PeeringTrustBundleDeleteResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{15} +} + +type GenerateTokenRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the remote peer. + PeerName string `protobuf:"bytes,1,opt,name=PeerName,proto3" json:"PeerName,omitempty"` + // Partition to to be peered. + Partition string `protobuf:"bytes,2,opt,name=Partition,proto3" json:"Partition,omitempty"` + // these are common fields required for implementing structs.RPCInfo methods + // that are used to forward requests + Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` + Token string `protobuf:"bytes,4,opt,name=Token,proto3" json:"Token,omitempty"` +} + +func (x *GenerateTokenRequest) Reset() { + *x = GenerateTokenRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenerateTokenRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateTokenRequest) ProtoMessage() {} + +func (x *GenerateTokenRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateTokenRequest.ProtoReflect.Descriptor instead. +func (*GenerateTokenRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{16} +} + +func (x *GenerateTokenRequest) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + +func (x *GenerateTokenRequest) GetPartition() string { + if x != nil { + return x.Partition + } + return "" +} + +func (x *GenerateTokenRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +func (x *GenerateTokenRequest) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +type GenerateTokenResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // PeeringToken is an opaque string provided to the remote peer for it to complete + // the peering initialization handshake. + PeeringToken string `protobuf:"bytes,1,opt,name=PeeringToken,proto3" json:"PeeringToken,omitempty"` +} + +func (x *GenerateTokenResponse) Reset() { + *x = GenerateTokenResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GenerateTokenResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateTokenResponse) ProtoMessage() {} + +func (x *GenerateTokenResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateTokenResponse.ProtoReflect.Descriptor instead. +func (*GenerateTokenResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{17} +} + +func (x *GenerateTokenResponse) GetPeeringToken() string { + if x != nil { + return x.PeeringToken + } + return "" +} + +type InitiateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the remote peer. + PeerName string `protobuf:"bytes,1,opt,name=PeerName,proto3" json:"PeerName,omitempty"` + // The peering token returned from the peer's GenerateToken endpoint. + PeeringToken string `protobuf:"bytes,2,opt,name=PeeringToken,proto3" json:"PeeringToken,omitempty"` + // these are common fields required for implementing structs.RPCInfo methods + // that are used to forward requests + Datacenter string `protobuf:"bytes,3,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` + Token string `protobuf:"bytes,4,opt,name=Token,proto3" json:"Token,omitempty"` +} + +func (x *InitiateRequest) Reset() { + *x = InitiateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitiateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitiateRequest) ProtoMessage() {} + +func (x *InitiateRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitiateRequest.ProtoReflect.Descriptor instead. +func (*InitiateRequest) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{18} +} + +func (x *InitiateRequest) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + +func (x *InitiateRequest) GetPeeringToken() string { + if x != nil { + return x.PeeringToken + } + return "" +} + +func (x *InitiateRequest) GetDatacenter() string { + if x != nil { + return x.Datacenter + } + return "" +} + +func (x *InitiateRequest) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +type InitiateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // this is just a placeholder to avoid returning google.protobuf.Empty + // (and consequently gogo.protobuf.types that it will be replaced with) + Status uint32 `protobuf:"varint,1,opt,name=Status,proto3" json:"Status,omitempty"` +} + +func (x *InitiateResponse) Reset() { + *x = InitiateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitiateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitiateResponse) ProtoMessage() {} + +func (x *InitiateResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitiateResponse.ProtoReflect.Descriptor instead. +func (*InitiateResponse) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{19} +} + +func (x *InitiateResponse) GetStatus() uint32 { + if x != nil { + return x.Status + } + return 0 +} + +type ReplicationMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: + // *ReplicationMessage_Request_ + // *ReplicationMessage_Response_ + // *ReplicationMessage_Terminated_ + Payload isReplicationMessage_Payload `protobuf_oneof:"Payload"` +} + +func (x *ReplicationMessage) Reset() { + *x = ReplicationMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReplicationMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicationMessage) ProtoMessage() {} + +func (x *ReplicationMessage) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicationMessage.ProtoReflect.Descriptor instead. +func (*ReplicationMessage) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{20} +} + +func (m *ReplicationMessage) GetPayload() isReplicationMessage_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *ReplicationMessage) GetRequest() *ReplicationMessage_Request { + if x, ok := x.GetPayload().(*ReplicationMessage_Request_); ok { + return x.Request + } + return nil +} + +func (x *ReplicationMessage) GetResponse() *ReplicationMessage_Response { + if x, ok := x.GetPayload().(*ReplicationMessage_Response_); ok { + return x.Response + } + return nil +} + +func (x *ReplicationMessage) GetTerminated() *ReplicationMessage_Terminated { + if x, ok := x.GetPayload().(*ReplicationMessage_Terminated_); ok { + return x.Terminated + } + return nil +} + +type isReplicationMessage_Payload interface { + isReplicationMessage_Payload() +} + +type ReplicationMessage_Request_ struct { + Request *ReplicationMessage_Request `protobuf:"bytes,1,opt,name=request,proto3,oneof"` +} + +type ReplicationMessage_Response_ struct { + Response *ReplicationMessage_Response `protobuf:"bytes,2,opt,name=response,proto3,oneof"` +} + +type ReplicationMessage_Terminated_ struct { + Terminated *ReplicationMessage_Terminated `protobuf:"bytes,3,opt,name=terminated,proto3,oneof"` +} + +func (*ReplicationMessage_Request_) isReplicationMessage_Payload() {} + +func (*ReplicationMessage_Response_) isReplicationMessage_Payload() {} + +func (*ReplicationMessage_Terminated_) isReplicationMessage_Payload() {} + +// A Request requests to subscribe to a resource of a given type. +type ReplicationMessage_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An identifier for the peer making the request. + // This identifier is provisioned by the serving peer prior to the request from the dialing peer. + PeerID string `protobuf:"bytes,1,opt,name=PeerID,proto3" json:"PeerID,omitempty"` + // Nonce corresponding to that of the response being ACKed or NACKed. + // Initial subscription requests will have an empty nonce. + // The nonce is generated and incremented by the exporting peer. + Nonce string `protobuf:"bytes,2,opt,name=Nonce,proto3" json:"Nonce,omitempty"` + // The type URL for the resource being requested or ACK/NACKed. + ResourceURL string `protobuf:"bytes,3,opt,name=ResourceURL,proto3" json:"ResourceURL,omitempty"` + // The error if the previous response was not applied successfully. + // This field is empty in the first subscription request. + Error *pbstatus.Status `protobuf:"bytes,4,opt,name=Error,proto3" json:"Error,omitempty"` +} + +func (x *ReplicationMessage_Request) Reset() { + *x = ReplicationMessage_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReplicationMessage_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicationMessage_Request) ProtoMessage() {} + +func (x *ReplicationMessage_Request) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicationMessage_Request.ProtoReflect.Descriptor instead. +func (*ReplicationMessage_Request) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *ReplicationMessage_Request) GetPeerID() string { + if x != nil { + return x.PeerID + } + return "" +} + +func (x *ReplicationMessage_Request) GetNonce() string { + if x != nil { + return x.Nonce + } + return "" +} + +func (x *ReplicationMessage_Request) GetResourceURL() string { + if x != nil { + return x.ResourceURL + } + return "" +} + +func (x *ReplicationMessage_Request) GetError() *pbstatus.Status { + if x != nil { + return x.Error + } + return nil +} + +// A Response contains resources corresponding to a subscription request. +type ReplicationMessage_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Nonce identifying a response in a stream. + Nonce string `protobuf:"bytes,1,opt,name=Nonce,proto3" json:"Nonce,omitempty"` + // The type URL of resource being returned. + ResourceURL string `protobuf:"bytes,2,opt,name=ResourceURL,proto3" json:"ResourceURL,omitempty"` + // An identifier for the resource being returned. + // This could be the SPIFFE ID of the service. + ResourceID string `protobuf:"bytes,3,opt,name=ResourceID,proto3" json:"ResourceID,omitempty"` + // The resource being returned. + Resource *anypb.Any `protobuf:"bytes,4,opt,name=Resource,proto3" json:"Resource,omitempty"` + // REQUIRED. The operation to be performed in relation to the resource. + Operation ReplicationMessage_Response_Operation `protobuf:"varint,5,opt,name=operation,proto3,enum=pbpeering.ReplicationMessage_Response_Operation" json:"operation,omitempty"` +} + +func (x *ReplicationMessage_Response) Reset() { + *x = ReplicationMessage_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReplicationMessage_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicationMessage_Response) ProtoMessage() {} + +func (x *ReplicationMessage_Response) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicationMessage_Response.ProtoReflect.Descriptor instead. +func (*ReplicationMessage_Response) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{20, 1} +} + +func (x *ReplicationMessage_Response) GetNonce() string { + if x != nil { + return x.Nonce + } + return "" +} + +func (x *ReplicationMessage_Response) GetResourceURL() string { + if x != nil { + return x.ResourceURL + } + return "" +} + +func (x *ReplicationMessage_Response) GetResourceID() string { + if x != nil { + return x.ResourceID + } + return "" +} + +func (x *ReplicationMessage_Response) GetResource() *anypb.Any { + if x != nil { + return x.Resource + } + return nil +} + +func (x *ReplicationMessage_Response) GetOperation() ReplicationMessage_Response_Operation { + if x != nil { + return x.Operation + } + return ReplicationMessage_Response_Unknown +} + +// Terminated is sent when a peering is deleted locally. +// This message signals to the peer that they should clean up their local state about the peering. +type ReplicationMessage_Terminated struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReplicationMessage_Terminated) Reset() { + *x = ReplicationMessage_Terminated{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbpeering_peering_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReplicationMessage_Terminated) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicationMessage_Terminated) ProtoMessage() {} + +func (x *ReplicationMessage_Terminated) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbpeering_peering_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicationMessage_Terminated.ProtoReflect.Descriptor instead. +func (*ReplicationMessage_Terminated) Descriptor() ([]byte, []int) { + return file_proto_pbpeering_peering_proto_rawDescGZIP(), []int{20, 2} +} + +var File_proto_pbpeering_peering_proto protoreflect.FileDescriptor + +var file_proto_pbpeering_peering_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x2f, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x09, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xd0, 0x02, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x0e, + 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x12, + 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x2d, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x17, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x12, 0x1e, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x41, 0x50, 0x65, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x50, 0x65, 0x65, + 0x72, 0x43, 0x41, 0x50, 0x65, 0x6d, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x50, 0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x30, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x50, 0x65, + 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xd0, 0x01, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, + 0x54, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x54, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a, + 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, + 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, + 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x52, 0x6f, 0x6f, 0x74, + 0x50, 0x45, 0x4d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x52, 0x6f, 0x6f, 0x74, + 0x50, 0x45, 0x4d, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x20, 0x0a, 0x0b, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x4d, 0x6f, 0x64, + 0x69, 0x66, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x66, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, + 0x22, 0x43, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x50, 0x65, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x22, 0x52, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, + 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x50, + 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, + 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, + 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x45, 0x0a, 0x13, 0x50, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2e, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, + 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x73, + 0x22, 0x63, 0x0a, 0x13, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x50, 0x65, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, + 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x16, 0x0a, 0x14, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x68, 0x0a, + 0x14, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, + 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, + 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, + 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x17, 0x0a, 0x15, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x2d, 0x0a, 0x1b, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x74, 0x65, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x22, + 0x1e, 0x0a, 0x1c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x74, 0x65, 0x42, 0x79, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x8f, 0x01, 0x0a, 0x1e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, + 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x12, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x12, 0x50, + 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, + 0x72, 0x22, 0x21, 0x0a, 0x1f, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, + 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x73, 0x0a, 0x1f, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, + 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, + 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, + 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, + 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x22, 0x22, 0x0a, 0x20, 0x50, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x54, 0x72, 0x75, 0x73, 0x74, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x86, 0x01, + 0x0a, 0x14, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, + 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x3b, 0x0a, 0x15, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x22, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x22, 0x87, 0x01, 0x0a, 0x0f, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, + 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, + 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x2a, 0x0a, + 0x10, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x9c, 0x05, 0x0a, 0x12, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x41, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, + 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0a, 0x74, 0x65, 0x72, + 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x65, 0x72, + 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x61, 0x74, 0x65, 0x64, 0x1a, 0x7f, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x50, 0x65, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x6e, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x20, + 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, + 0x12, 0x24, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x96, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x4c, 0x12, 0x1e, 0x0a, 0x0a, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x44, 0x12, 0x30, 0x0a, 0x08, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x4e, 0x0a, + 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x30, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x30, 0x0a, + 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, + 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x53, 0x45, 0x52, + 0x54, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x1a, + 0x0c, 0x0a, 0x0a, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x42, 0x09, 0x0a, + 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2a, 0x53, 0x0a, 0x0c, 0x50, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, + 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x49, 0x54, 0x49, + 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, + 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x45, 0x44, 0x10, 0x04, 0x32, 0xbf, 0x04, + 0x0a, 0x0e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x52, 0x0a, 0x0d, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x1f, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, + 0x12, 0x1a, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x49, 0x6e, 0x69, + 0x74, 0x69, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x70, + 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x50, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x12, 0x1d, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x50, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1d, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0d, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x0c, 0x50, 0x65, 0x65, + 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x1e, 0x2e, 0x70, 0x62, 0x70, 0x65, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x62, 0x70, 0x65, + 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x0f, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x1d, 0x2e, + 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1d, 0x2e, 0x70, + 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, + 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, + 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x70, 0x65, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_pbpeering_peering_proto_rawDescOnce sync.Once + file_proto_pbpeering_peering_proto_rawDescData = file_proto_pbpeering_peering_proto_rawDesc +) + +func file_proto_pbpeering_peering_proto_rawDescGZIP() []byte { + file_proto_pbpeering_peering_proto_rawDescOnce.Do(func() { + file_proto_pbpeering_peering_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_pbpeering_peering_proto_rawDescData) + }) + return file_proto_pbpeering_peering_proto_rawDescData +} + +var file_proto_pbpeering_peering_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_proto_pbpeering_peering_proto_msgTypes = make([]protoimpl.MessageInfo, 24) +var file_proto_pbpeering_peering_proto_goTypes = []interface{}{ + (PeeringState)(0), // 0: pbpeering.PeeringState + (ReplicationMessage_Response_Operation)(0), // 1: pbpeering.ReplicationMessage.Response.Operation + (*Peering)(nil), // 2: pbpeering.Peering + (*PeeringTrustBundle)(nil), // 3: pbpeering.PeeringTrustBundle + (*PeeringReadRequest)(nil), // 4: pbpeering.PeeringReadRequest + (*PeeringReadResponse)(nil), // 5: pbpeering.PeeringReadResponse + (*PeeringListRequest)(nil), // 6: pbpeering.PeeringListRequest + (*PeeringListResponse)(nil), // 7: pbpeering.PeeringListResponse + (*PeeringWriteRequest)(nil), // 8: pbpeering.PeeringWriteRequest + (*PeeringWriteResponse)(nil), // 9: pbpeering.PeeringWriteResponse + (*PeeringDeleteRequest)(nil), // 10: pbpeering.PeeringDeleteRequest + (*PeeringDeleteResponse)(nil), // 11: pbpeering.PeeringDeleteResponse + (*PeeringTerminateByIDRequest)(nil), // 12: pbpeering.PeeringTerminateByIDRequest + (*PeeringTerminateByIDResponse)(nil), // 13: pbpeering.PeeringTerminateByIDResponse + (*PeeringTrustBundleWriteRequest)(nil), // 14: pbpeering.PeeringTrustBundleWriteRequest + (*PeeringTrustBundleWriteResponse)(nil), // 15: pbpeering.PeeringTrustBundleWriteResponse + (*PeeringTrustBundleDeleteRequest)(nil), // 16: pbpeering.PeeringTrustBundleDeleteRequest + (*PeeringTrustBundleDeleteResponse)(nil), // 17: pbpeering.PeeringTrustBundleDeleteResponse + (*GenerateTokenRequest)(nil), // 18: pbpeering.GenerateTokenRequest + (*GenerateTokenResponse)(nil), // 19: pbpeering.GenerateTokenResponse + (*InitiateRequest)(nil), // 20: pbpeering.InitiateRequest + (*InitiateResponse)(nil), // 21: pbpeering.InitiateResponse + (*ReplicationMessage)(nil), // 22: pbpeering.ReplicationMessage + (*ReplicationMessage_Request)(nil), // 23: pbpeering.ReplicationMessage.Request + (*ReplicationMessage_Response)(nil), // 24: pbpeering.ReplicationMessage.Response + (*ReplicationMessage_Terminated)(nil), // 25: pbpeering.ReplicationMessage.Terminated + (*pbstatus.Status)(nil), // 26: status.Status + (*anypb.Any)(nil), // 27: google.protobuf.Any +} +var file_proto_pbpeering_peering_proto_depIdxs = []int32{ + 0, // 0: pbpeering.Peering.State:type_name -> pbpeering.PeeringState + 2, // 1: pbpeering.PeeringReadResponse.Peering:type_name -> pbpeering.Peering + 2, // 2: pbpeering.PeeringListResponse.Peerings:type_name -> pbpeering.Peering + 2, // 3: pbpeering.PeeringWriteRequest.Peering:type_name -> pbpeering.Peering + 3, // 4: pbpeering.PeeringTrustBundleWriteRequest.PeeringTrustBundle:type_name -> pbpeering.PeeringTrustBundle + 23, // 5: pbpeering.ReplicationMessage.request:type_name -> pbpeering.ReplicationMessage.Request + 24, // 6: pbpeering.ReplicationMessage.response:type_name -> pbpeering.ReplicationMessage.Response + 25, // 7: pbpeering.ReplicationMessage.terminated:type_name -> pbpeering.ReplicationMessage.Terminated + 26, // 8: pbpeering.ReplicationMessage.Request.Error:type_name -> status.Status + 27, // 9: pbpeering.ReplicationMessage.Response.Resource:type_name -> google.protobuf.Any + 1, // 10: pbpeering.ReplicationMessage.Response.operation:type_name -> pbpeering.ReplicationMessage.Response.Operation + 18, // 11: pbpeering.PeeringService.GenerateToken:input_type -> pbpeering.GenerateTokenRequest + 20, // 12: pbpeering.PeeringService.Initiate:input_type -> pbpeering.InitiateRequest + 4, // 13: pbpeering.PeeringService.PeeringRead:input_type -> pbpeering.PeeringReadRequest + 6, // 14: pbpeering.PeeringService.PeeringList:input_type -> pbpeering.PeeringListRequest + 10, // 15: pbpeering.PeeringService.PeeringDelete:input_type -> pbpeering.PeeringDeleteRequest + 8, // 16: pbpeering.PeeringService.PeeringWrite:input_type -> pbpeering.PeeringWriteRequest + 22, // 17: pbpeering.PeeringService.StreamResources:input_type -> pbpeering.ReplicationMessage + 19, // 18: pbpeering.PeeringService.GenerateToken:output_type -> pbpeering.GenerateTokenResponse + 21, // 19: pbpeering.PeeringService.Initiate:output_type -> pbpeering.InitiateResponse + 5, // 20: pbpeering.PeeringService.PeeringRead:output_type -> pbpeering.PeeringReadResponse + 7, // 21: pbpeering.PeeringService.PeeringList:output_type -> pbpeering.PeeringListResponse + 11, // 22: pbpeering.PeeringService.PeeringDelete:output_type -> pbpeering.PeeringDeleteResponse + 9, // 23: pbpeering.PeeringService.PeeringWrite:output_type -> pbpeering.PeeringWriteResponse + 22, // 24: pbpeering.PeeringService.StreamResources:output_type -> pbpeering.ReplicationMessage + 18, // [18:25] is the sub-list for method output_type + 11, // [11:18] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_proto_pbpeering_peering_proto_init() } +func file_proto_pbpeering_peering_proto_init() { + if File_proto_pbpeering_peering_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_pbpeering_peering_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Peering); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringTrustBundle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringReadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringReadResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringWriteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringWriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringTerminateByIDRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringTerminateByIDResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringTrustBundleWriteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringTrustBundleWriteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringTrustBundleDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeeringTrustBundleDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GenerateTokenRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GenerateTokenResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitiateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitiateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplicationMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplicationMessage_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplicationMessage_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbpeering_peering_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplicationMessage_Terminated); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_proto_pbpeering_peering_proto_msgTypes[20].OneofWrappers = []interface{}{ + (*ReplicationMessage_Request_)(nil), + (*ReplicationMessage_Response_)(nil), + (*ReplicationMessage_Terminated_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_pbpeering_peering_proto_rawDesc, + NumEnums: 2, + NumMessages: 24, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_pbpeering_peering_proto_goTypes, + DependencyIndexes: file_proto_pbpeering_peering_proto_depIdxs, + EnumInfos: file_proto_pbpeering_peering_proto_enumTypes, + MessageInfos: file_proto_pbpeering_peering_proto_msgTypes, + }.Build() + File_proto_pbpeering_peering_proto = out.File + file_proto_pbpeering_peering_proto_rawDesc = nil + file_proto_pbpeering_peering_proto_goTypes = nil + file_proto_pbpeering_peering_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// PeeringServiceClient is the client API for PeeringService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type PeeringServiceClient interface { + GenerateToken(ctx context.Context, in *GenerateTokenRequest, opts ...grpc.CallOption) (*GenerateTokenResponse, error) + Initiate(ctx context.Context, in *InitiateRequest, opts ...grpc.CallOption) (*InitiateResponse, error) + PeeringRead(ctx context.Context, in *PeeringReadRequest, opts ...grpc.CallOption) (*PeeringReadResponse, error) + PeeringList(ctx context.Context, in *PeeringListRequest, opts ...grpc.CallOption) (*PeeringListResponse, error) + PeeringDelete(ctx context.Context, in *PeeringDeleteRequest, opts ...grpc.CallOption) (*PeeringDeleteResponse, error) + // TODO(peering): As of writing, this method is only used in tests to set up Peerings in the state store. + // Consider removing if we can find another way to populate state store in peering_endpoint_test.go + PeeringWrite(ctx context.Context, in *PeeringWriteRequest, opts ...grpc.CallOption) (*PeeringWriteResponse, error) + // StreamResources opens an event stream for resources to share between peers, such as services. + // Events are streamed as they happen. + StreamResources(ctx context.Context, opts ...grpc.CallOption) (PeeringService_StreamResourcesClient, error) +} + +type peeringServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewPeeringServiceClient(cc grpc.ClientConnInterface) PeeringServiceClient { + return &peeringServiceClient{cc} +} + +func (c *peeringServiceClient) GenerateToken(ctx context.Context, in *GenerateTokenRequest, opts ...grpc.CallOption) (*GenerateTokenResponse, error) { + out := new(GenerateTokenResponse) + err := c.cc.Invoke(ctx, "/pbpeering.PeeringService/GenerateToken", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *peeringServiceClient) Initiate(ctx context.Context, in *InitiateRequest, opts ...grpc.CallOption) (*InitiateResponse, error) { + out := new(InitiateResponse) + err := c.cc.Invoke(ctx, "/pbpeering.PeeringService/Initiate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *peeringServiceClient) PeeringRead(ctx context.Context, in *PeeringReadRequest, opts ...grpc.CallOption) (*PeeringReadResponse, error) { + out := new(PeeringReadResponse) + err := c.cc.Invoke(ctx, "/pbpeering.PeeringService/PeeringRead", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *peeringServiceClient) PeeringList(ctx context.Context, in *PeeringListRequest, opts ...grpc.CallOption) (*PeeringListResponse, error) { + out := new(PeeringListResponse) + err := c.cc.Invoke(ctx, "/pbpeering.PeeringService/PeeringList", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *peeringServiceClient) PeeringDelete(ctx context.Context, in *PeeringDeleteRequest, opts ...grpc.CallOption) (*PeeringDeleteResponse, error) { + out := new(PeeringDeleteResponse) + err := c.cc.Invoke(ctx, "/pbpeering.PeeringService/PeeringDelete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *peeringServiceClient) PeeringWrite(ctx context.Context, in *PeeringWriteRequest, opts ...grpc.CallOption) (*PeeringWriteResponse, error) { + out := new(PeeringWriteResponse) + err := c.cc.Invoke(ctx, "/pbpeering.PeeringService/PeeringWrite", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *peeringServiceClient) StreamResources(ctx context.Context, opts ...grpc.CallOption) (PeeringService_StreamResourcesClient, error) { + stream, err := c.cc.NewStream(ctx, &_PeeringService_serviceDesc.Streams[0], "/pbpeering.PeeringService/StreamResources", opts...) + if err != nil { + return nil, err + } + x := &peeringServiceStreamResourcesClient{stream} + return x, nil +} + +type PeeringService_StreamResourcesClient interface { + Send(*ReplicationMessage) error + Recv() (*ReplicationMessage, error) + grpc.ClientStream +} + +type peeringServiceStreamResourcesClient struct { + grpc.ClientStream +} + +func (x *peeringServiceStreamResourcesClient) Send(m *ReplicationMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *peeringServiceStreamResourcesClient) Recv() (*ReplicationMessage, error) { + m := new(ReplicationMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// PeeringServiceServer is the server API for PeeringService service. +type PeeringServiceServer interface { + GenerateToken(context.Context, *GenerateTokenRequest) (*GenerateTokenResponse, error) + Initiate(context.Context, *InitiateRequest) (*InitiateResponse, error) + PeeringRead(context.Context, *PeeringReadRequest) (*PeeringReadResponse, error) + PeeringList(context.Context, *PeeringListRequest) (*PeeringListResponse, error) + PeeringDelete(context.Context, *PeeringDeleteRequest) (*PeeringDeleteResponse, error) + // TODO(peering): As of writing, this method is only used in tests to set up Peerings in the state store. + // Consider removing if we can find another way to populate state store in peering_endpoint_test.go + PeeringWrite(context.Context, *PeeringWriteRequest) (*PeeringWriteResponse, error) + // StreamResources opens an event stream for resources to share between peers, such as services. + // Events are streamed as they happen. + StreamResources(PeeringService_StreamResourcesServer) error +} + +// UnimplementedPeeringServiceServer can be embedded to have forward compatible implementations. +type UnimplementedPeeringServiceServer struct { +} + +func (*UnimplementedPeeringServiceServer) GenerateToken(context.Context, *GenerateTokenRequest) (*GenerateTokenResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GenerateToken not implemented") +} +func (*UnimplementedPeeringServiceServer) Initiate(context.Context, *InitiateRequest) (*InitiateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Initiate not implemented") +} +func (*UnimplementedPeeringServiceServer) PeeringRead(context.Context, *PeeringReadRequest) (*PeeringReadResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PeeringRead not implemented") +} +func (*UnimplementedPeeringServiceServer) PeeringList(context.Context, *PeeringListRequest) (*PeeringListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PeeringList not implemented") +} +func (*UnimplementedPeeringServiceServer) PeeringDelete(context.Context, *PeeringDeleteRequest) (*PeeringDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PeeringDelete not implemented") +} +func (*UnimplementedPeeringServiceServer) PeeringWrite(context.Context, *PeeringWriteRequest) (*PeeringWriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PeeringWrite not implemented") +} +func (*UnimplementedPeeringServiceServer) StreamResources(PeeringService_StreamResourcesServer) error { + return status.Errorf(codes.Unimplemented, "method StreamResources not implemented") +} + +func RegisterPeeringServiceServer(s *grpc.Server, srv PeeringServiceServer) { + s.RegisterService(&_PeeringService_serviceDesc, srv) +} + +func _PeeringService_GenerateToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GenerateTokenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PeeringServiceServer).GenerateToken(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pbpeering.PeeringService/GenerateToken", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PeeringServiceServer).GenerateToken(ctx, req.(*GenerateTokenRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PeeringService_Initiate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InitiateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PeeringServiceServer).Initiate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pbpeering.PeeringService/Initiate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PeeringServiceServer).Initiate(ctx, req.(*InitiateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PeeringService_PeeringRead_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PeeringReadRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PeeringServiceServer).PeeringRead(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pbpeering.PeeringService/PeeringRead", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PeeringServiceServer).PeeringRead(ctx, req.(*PeeringReadRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PeeringService_PeeringList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PeeringListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PeeringServiceServer).PeeringList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pbpeering.PeeringService/PeeringList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PeeringServiceServer).PeeringList(ctx, req.(*PeeringListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PeeringService_PeeringDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PeeringDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PeeringServiceServer).PeeringDelete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pbpeering.PeeringService/PeeringDelete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PeeringServiceServer).PeeringDelete(ctx, req.(*PeeringDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PeeringService_PeeringWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PeeringWriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PeeringServiceServer).PeeringWrite(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/pbpeering.PeeringService/PeeringWrite", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PeeringServiceServer).PeeringWrite(ctx, req.(*PeeringWriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _PeeringService_StreamResources_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(PeeringServiceServer).StreamResources(&peeringServiceStreamResourcesServer{stream}) +} + +type PeeringService_StreamResourcesServer interface { + Send(*ReplicationMessage) error + Recv() (*ReplicationMessage, error) + grpc.ServerStream +} + +type peeringServiceStreamResourcesServer struct { + grpc.ServerStream +} + +func (x *peeringServiceStreamResourcesServer) Send(m *ReplicationMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *peeringServiceStreamResourcesServer) Recv() (*ReplicationMessage, error) { + m := new(ReplicationMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _PeeringService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "pbpeering.PeeringService", + HandlerType: (*PeeringServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GenerateToken", + Handler: _PeeringService_GenerateToken_Handler, + }, + { + MethodName: "Initiate", + Handler: _PeeringService_Initiate_Handler, + }, + { + MethodName: "PeeringRead", + Handler: _PeeringService_PeeringRead_Handler, + }, + { + MethodName: "PeeringList", + Handler: _PeeringService_PeeringList_Handler, + }, + { + MethodName: "PeeringDelete", + Handler: _PeeringService_PeeringDelete_Handler, + }, + { + MethodName: "PeeringWrite", + Handler: _PeeringService_PeeringWrite_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamResources", + Handler: _PeeringService_StreamResources_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "proto/pbpeering/peering.proto", +} diff --git a/proto/pbpeering/peering.proto b/proto/pbpeering/peering.proto new file mode 100644 index 0000000000..e0c126caf0 --- /dev/null +++ b/proto/pbpeering/peering.proto @@ -0,0 +1,283 @@ +syntax = "proto3"; + +package pbpeering; + +option go_package = "github.com/hashicorp/consul/proto/pbpeering"; + +import "google/protobuf/any.proto"; + +// TODO(peering): Handle this some other way +import "proto/pbstatus/status.proto"; + +// PeeringService handles operations for establishing peering relationships +// between disparate Consul clusters. +service PeeringService { + rpc GenerateToken(GenerateTokenRequest) returns (GenerateTokenResponse); + rpc Initiate(InitiateRequest) returns (InitiateResponse); + rpc PeeringRead(PeeringReadRequest) returns (PeeringReadResponse); + rpc PeeringList(PeeringListRequest) returns (PeeringListResponse); + rpc PeeringDelete(PeeringDeleteRequest) returns (PeeringDeleteResponse); + + // TODO(peering): As of writing, this method is only used in tests to set up Peerings in the state store. + // Consider removing if we can find another way to populate state store in peering_endpoint_test.go + rpc PeeringWrite(PeeringWriteRequest) returns (PeeringWriteResponse); + + // StreamResources opens an event stream for resources to share between peers, such as services. + // Events are streamed as they happen. + rpc StreamResources(stream ReplicationMessage) returns (stream ReplicationMessage); +} + +// PeeringState enumerates all the states a peering can be in +enum PeeringState { + // Undefined represents an unset value for PeeringState during + // writes. + UNDEFINED = 0; + + // Initial means a Peering has been initialized and is awaiting + // acknowledgement from a remote peer. + INITIAL = 1; + + // Active means that the peering connection is active and healthy. + ACTIVE = 2; + + // Failing means the peering connection has been interrupted but has not yet + // been terminated. + FAILING = 3; + + // Terminated means the peering relationship has been removed. + TERMINATED = 4; +} + +// Peering defines a peering relationship between two disparate Consul clusters +message Peering { + // ID is a datacenter-scoped UUID for the peering. + // The ID is generated when a peering is first written to the state store. + string ID = 1; + + // Name is the local alias for the peering relationship. + string Name = 2; + + // Partition is the local partition connecting to the peer. + string Partition = 3; + + // State is one of the valid PeeringState values to represent the status of + // peering relationship. + PeeringState State = 4; + + // PeerID is the ID that our peer assigned to this peering. + // This ID is to be used when dialing the peer, so that it can know who dialed it. + string PeerID = 5; + + // PeerCAPems contains all the CA certificates for the remote peer. + repeated string PeerCAPems = 6; + + // PeerServerName is the name of the remote server as it relates to TLS. + string PeerServerName = 7; + + // PeerServerAddresses contains all the the connection addresses for the remote peer. + repeated string PeerServerAddresses = 8; + + // CreateIndex is the Raft index at which the Peering was created. + uint64 CreateIndex = 9; + + // ModifyIndex is the latest Raft index at which the Peering. was modified. + uint64 ModifyIndex = 10; +} + +// PeeringTrustBundle holds the trust information for validating requests from a peer. +message PeeringTrustBundle { + // TrustDomain is the domain for the bundle, example.com, foo.bar.gov for example. Note that this must not have a prefix such as "spiffe://". + string TrustDomain = 1; + + // PeerName associates the trust bundle with a peer. + string PeerName = 2; + + // Partition isolates the bundle from other trust bundles in separate partitions. + string Partition = 3; + + // RootPEMs holds ASN.1 DER encoded X.509 certificate data for the trust bundle. + repeated string RootPEMs = 4; + + // CreateIndex is the Raft index at which the trust domain was created. + uint64 CreateIndex = 5; + + // ModifyIndex is the latest Raft index at which the trust bundle was modified. + uint64 ModifyIndex = 6; +} + +message PeeringReadRequest { + string Name = 1; + string Partition = 2; + + string Datacenter = 3; + + //TODO(peering) query metadata +} + +message PeeringReadResponse { + Peering Peering = 1; + + //TODO(peering) query metadata +} + +message PeeringListRequest { + string Partition = 1; + + string Datacenter = 2; + + //TODO(peering) query metadata +} + +message PeeringListResponse { + repeated Peering Peerings = 1; + + //TODO(peering) query metadata +} + +message PeeringWriteRequest { + Peering Peering = 1; + + //TODO(peering): what to do with embedded write request? + string Datacenter = 2; +} + +// TODO(peering): Consider returning Peering if we keep this endpoint around +message PeeringWriteResponse{} + +message PeeringDeleteRequest { + string Name = 1; + + string Partition = 2; + + //TODO(peering): what to do with embedded write request? + string Datacenter = 3; +} + +message PeeringDeleteResponse {} + +message PeeringTerminateByIDRequest { + string ID = 1; +} + +message PeeringTerminateByIDResponse {} + +message PeeringTrustBundleWriteRequest { + PeeringTrustBundle PeeringTrustBundle = 1; + + //TODO(peering): what to do with embedded write request? + string Datacenter = 2; +} + +message PeeringTrustBundleWriteResponse{} + +message PeeringTrustBundleDeleteRequest { + string Name = 1; + + string Partition = 2; + + //TODO(peering): what to do with embedded write request? + string Datacenter = 3; +} + +message PeeringTrustBundleDeleteResponse{} + +message GenerateTokenRequest { + // Name of the remote peer. + string PeerName = 1; + + // Partition to to be peered. + string Partition = 2; + + // these are common fields required for implementing structs.RPCInfo methods + // that are used to forward requests + string Datacenter = 3; + string Token = 4; +} + +message GenerateTokenResponse { + // PeeringToken is an opaque string provided to the remote peer for it to complete + // the peering initialization handshake. + string PeeringToken = 1; +} + +message InitiateRequest { + // Name of the remote peer. + string PeerName = 1; + + // The peering token returned from the peer's GenerateToken endpoint. + string PeeringToken = 2; + + // these are common fields required for implementing structs.RPCInfo methods + // that are used to forward requests + string Datacenter = 3; + string Token = 4; +} + +message InitiateResponse { + // this is just a placeholder to avoid returning google.protobuf.Empty + // (and consequently gogo.protobuf.types that it will be replaced with) + uint32 Status = 1; +} + +message ReplicationMessage { + oneof Payload { + Request request = 1; + Response response = 2; + Terminated terminated = 3; + } + + // A Request requests to subscribe to a resource of a given type. + message Request { + // An identifier for the peer making the request. + // This identifier is provisioned by the serving peer prior to the request from the dialing peer. + string PeerID = 1; + + // Nonce corresponding to that of the response being ACKed or NACKed. + // Initial subscription requests will have an empty nonce. + // The nonce is generated and incremented by the exporting peer. + string Nonce = 2; + + // The type URL for the resource being requested or ACK/NACKed. + string ResourceURL = 3; + + // The error if the previous response was not applied successfully. + // This field is empty in the first subscription request. + status.Status Error = 4; + } + + // A Response contains resources corresponding to a subscription request. + message Response { + // Nonce identifying a response in a stream. + string Nonce = 1; + + // The type URL of resource being returned. + string ResourceURL = 2; + + // An identifier for the resource being returned. + // This could be the SPIFFE ID of the service. + string ResourceID = 3; + + // The resource being returned. + google.protobuf.Any Resource = 4; + + // Operation enumerates supported operations for replicated resources. + enum Operation { + Unknown = 0; + + // UPSERT represents a create or update event. + UPSERT = 1; + + // DELETE indicates the resource should be deleted. + // In DELETE operations no Resource will be returned. + // Deletion by an importing peer must be done with the type URL and ID. + DELETE = 2; + } + + // REQUIRED. The operation to be performed in relation to the resource. + Operation operation = 5; + } + + // Terminated is sent when a peering is deleted locally. + // This message signals to the peer that they should clean up their local state about the peering. + message Terminated {} +} diff --git a/proto/pbpeering/peering_oss.go b/proto/pbpeering/peering_oss.go new file mode 100644 index 0000000000..d5e5b4a896 --- /dev/null +++ b/proto/pbpeering/peering_oss.go @@ -0,0 +1,16 @@ +//go:build !consulent +// +build !consulent + +package pbpeering + +func (r *GenerateTokenRequest) PartitionOrDefault() string { + return "" +} + +func (p *Peering) PartitionOrDefault() string { + return "" +} + +func (ptb *PeeringTrustBundle) PartitionOrDefault() string { + return "" +} diff --git a/proto/pbpeering/types.go b/proto/pbpeering/types.go new file mode 100644 index 0000000000..3e6b092e2e --- /dev/null +++ b/proto/pbpeering/types.go @@ -0,0 +1,5 @@ +package pbpeering + +const ( + TypeURLService = "type.googleapis.com/consul.api.Service" +) diff --git a/proto/pbservice/healthcheck.gen.go b/proto/pbservice/healthcheck.gen.go index a38fd30c2f..4eef24bef7 100644 --- a/proto/pbservice/healthcheck.gen.go +++ b/proto/pbservice/healthcheck.gen.go @@ -93,6 +93,7 @@ func HealthCheckToStructs(s *HealthCheck, t *structs.HealthCheck) { t.Interval = s.Interval t.Timeout = s.Timeout t.ExposedPort = int(s.ExposedPort) + t.PeerName = s.PeerName if s.Definition != nil { HealthCheckDefinitionToStructs(s.Definition, &t.Definition) } @@ -116,6 +117,7 @@ func HealthCheckFromStructs(t *structs.HealthCheck, s *HealthCheck) { s.Interval = t.Interval s.Timeout = t.Timeout s.ExposedPort = int32(t.ExposedPort) + s.PeerName = t.PeerName { var x HealthCheckDefinition HealthCheckDefinitionFromStructs(&t.Definition, &x) diff --git a/proto/pbservice/healthcheck.pb.go b/proto/pbservice/healthcheck.pb.go index d28ed185c3..3620a1aa2b 100644 --- a/proto/pbservice/healthcheck.pb.go +++ b/proto/pbservice/healthcheck.pb.go @@ -59,6 +59,7 @@ type HealthCheck struct { ExposedPort int32 `protobuf:"varint,14,opt,name=ExposedPort,proto3" json:"ExposedPort,omitempty"` Interval string `protobuf:"bytes,15,opt,name=Interval,proto3" json:"Interval,omitempty"` Timeout string `protobuf:"bytes,16,opt,name=Timeout,proto3" json:"Timeout,omitempty"` + PeerName string `protobuf:"bytes,17,opt,name=PeerName,proto3" json:"PeerName,omitempty"` } func (x *HealthCheck) Reset() { @@ -205,6 +206,13 @@ func (x *HealthCheck) GetTimeout() string { return "" } +func (x *HealthCheck) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + type HeaderValue struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -802,7 +810,7 @@ var file_proto_pbservice_healthcheck_proto_rawDesc = []byte{ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x96, 0x04, 0x0a, 0x0b, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb2, 0x04, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, @@ -836,149 +844,151 @@ var file_proto_pbservice_healthcheck_proto_rawDesc = []byte{ 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x54, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x22, 0x23, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb2, 0x07, 0x0a, 0x15, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, - 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x12, 0x44, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x65, 0x66, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x12, 0x12, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2a, 0x0a, 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x54, 0x43, 0x50, 0x12, 0x35, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x24, 0x0a, 0x0d, 0x4f, 0x75, - 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, - 0x12, 0x33, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x65, 0x6f, 0x75, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, + 0x22, 0x23, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb2, 0x07, 0x0a, 0x15, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x12, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, + 0x54, 0x54, 0x50, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, + 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, + 0x44, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x42, 0x6f, 0x64, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x42, 0x6f, 0x64, + 0x79, 0x12, 0x2a, 0x0a, 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x44, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x10, 0x0a, + 0x03, 0x54, 0x43, 0x50, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x54, 0x43, 0x50, 0x12, + 0x35, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x61, 0x0a, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x24, 0x0a, 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x4f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x33, 0x0a, 0x07, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x61, 0x0a, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, + 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x66, + 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, + 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, + 0x66, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, + 0x67, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x41, 0x72, 0x67, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x48, 0x32, 0x50, 0x49, + 0x4e, 0x47, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, + 0x12, 0x22, 0x0a, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, + 0x65, 0x54, 0x4c, 0x53, 0x12, 0x12, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x47, 0x52, 0x50, 0x43, 0x12, 0x1e, 0x0a, 0x0a, 0x47, 0x52, 0x50, 0x43, + 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x47, 0x52, + 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x1c, 0x0a, 0x09, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x03, 0x54, 0x54, + 0x4c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x1a, 0x51, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd4, 0x09, 0x0a, 0x09, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x4e, + 0x6f, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, + 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x38, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x48, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x42, 0x6f, + 0x64, 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2a, + 0x0a, 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x43, + 0x50, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x54, 0x43, 0x50, 0x12, 0x35, 0x0a, 0x08, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, + 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x48, 0x32, 0x50, + 0x49, 0x4e, 0x47, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, + 0x47, 0x12, 0x22, 0x0a, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, + 0x53, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, + 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x12, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x47, 0x52, 0x50, 0x43, 0x12, 0x1e, 0x0a, 0x0a, 0x47, 0x52, 0x50, + 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x47, + 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x54, 0x54, + 0x4c, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x32, 0x0a, 0x14, 0x53, 0x75, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x18, + 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, 0x14, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x65, + 0x66, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, 0x34, 0x0a, 0x15, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x57, 0x61, 0x72, + 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x46, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, + 0x67, 0x12, 0x36, 0x0a, 0x16, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, + 0x6f, 0x72, 0x65, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x18, 0x16, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x16, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x72, 0x6f, + 0x78, 0x79, 0x48, 0x54, 0x54, 0x50, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x72, + 0x6f, 0x78, 0x79, 0x48, 0x54, 0x54, 0x50, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, + 0x47, 0x52, 0x50, 0x43, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x72, 0x6f, 0x78, + 0x79, 0x47, 0x52, 0x50, 0x43, 0x12, 0x61, 0x0a, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x6f, 0x63, 0x6b, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x12, 0x16, 0x0a, 0x06, - 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x48, 0x32, - 0x50, 0x49, 0x4e, 0x47, 0x12, 0x22, 0x0a, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, 0x73, - 0x65, 0x54, 0x4c, 0x53, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x48, 0x32, 0x50, 0x69, - 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x12, 0x0a, 0x04, 0x47, 0x52, 0x50, 0x43, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x47, 0x52, 0x50, 0x43, 0x12, 0x1e, 0x0a, 0x0a, - 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x1c, 0x0a, 0x09, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2b, - 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x1a, 0x51, 0x0a, 0x0b, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd4, - 0x09, 0x0a, 0x09, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x38, 0x0a, 0x06, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, - 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x79, - 0x70, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, - 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x12, - 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x42, 0x6f, - 0x64, 0x79, 0x12, 0x2a, 0x0a, 0x10, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x44, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x10, - 0x0a, 0x03, 0x54, 0x43, 0x50, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x54, 0x43, 0x50, - 0x12, 0x35, 0x0a, 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x49, - 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x1c, 0x0a, 0x09, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x6f, 0x63, - 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x68, 0x65, 0x6c, 0x6c, 0x12, 0x16, 0x0a, - 0x06, 0x48, 0x32, 0x50, 0x49, 0x4e, 0x47, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x48, - 0x32, 0x50, 0x49, 0x4e, 0x47, 0x12, 0x22, 0x0a, 0x0c, 0x48, 0x32, 0x50, 0x69, 0x6e, 0x67, 0x55, - 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x48, 0x32, 0x50, - 0x69, 0x6e, 0x67, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x12, 0x0a, 0x04, 0x47, 0x52, 0x50, - 0x43, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x47, 0x52, 0x50, 0x43, 0x12, 0x1e, 0x0a, - 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x55, 0x73, 0x65, 0x54, 0x4c, 0x53, 0x12, 0x24, 0x0a, - 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x1b, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x4c, 0x53, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x54, 0x4c, 0x53, 0x53, - 0x6b, 0x69, 0x70, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x54, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2b, - 0x0a, 0x03, 0x54, 0x54, 0x4c, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x54, 0x54, 0x4c, 0x12, 0x32, 0x0a, 0x14, 0x53, - 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x73, 0x73, - 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, 0x14, 0x53, 0x75, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x12, - 0x34, 0x0a, 0x15, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, - 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, - 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x57, 0x61, - 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x16, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, - 0x73, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x18, - 0x16, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x42, - 0x65, 0x66, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x12, 0x1c, 0x0a, - 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x48, 0x54, 0x54, 0x50, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x48, 0x54, 0x54, 0x50, 0x12, 0x1c, 0x0a, 0x09, 0x50, - 0x72, 0x6f, 0x78, 0x79, 0x47, 0x52, 0x50, 0x43, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x50, 0x72, 0x6f, 0x78, 0x79, 0x47, 0x52, 0x50, 0x43, 0x12, 0x61, 0x0a, 0x1e, 0x44, 0x65, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1e, 0x44, 0x65, - 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x69, 0x74, 0x69, 0x63, 0x61, 0x6c, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0d, - 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x19, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, - 0x7a, 0x65, 0x1a, 0x51, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, - 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x63, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0d, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x51, + 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto/pbservice/healthcheck.proto b/proto/pbservice/healthcheck.proto index 67629ba98d..afda5dc255 100644 --- a/proto/pbservice/healthcheck.proto +++ b/proto/pbservice/healthcheck.proto @@ -40,6 +40,7 @@ message HealthCheck { string Interval = 15; string Timeout = 16; + string PeerName = 17; } message HeaderValue { diff --git a/proto/pbservice/node.gen.go b/proto/pbservice/node.gen.go index cadf2c7e96..f231ea836a 100644 --- a/proto/pbservice/node.gen.go +++ b/proto/pbservice/node.gen.go @@ -13,6 +13,7 @@ func NodeToStructs(s *Node, t *structs.Node) { t.Address = s.Address t.Datacenter = s.Datacenter t.Partition = s.Partition + t.PeerName = s.PeerName t.TaggedAddresses = s.TaggedAddresses t.Meta = s.Meta t.RaftIndex = RaftIndexToStructs(s.RaftIndex) @@ -26,6 +27,7 @@ func NodeFromStructs(t *structs.Node, s *Node) { s.Address = t.Address s.Datacenter = t.Datacenter s.Partition = t.Partition + s.PeerName = t.PeerName s.TaggedAddresses = t.TaggedAddresses s.Meta = t.Meta s.RaftIndex = NewRaftIndexFromStructs(t.RaftIndex) @@ -53,6 +55,7 @@ func NodeServiceToStructs(s *NodeService, t *structs.NodeService) { } t.LocallyRegisteredAsSidecar = s.LocallyRegisteredAsSidecar t.EnterpriseMeta = EnterpriseMetaToStructs(s.EnterpriseMeta) + t.PeerName = s.PeerName t.RaftIndex = RaftIndexToStructs(s.RaftIndex) } func NodeServiceFromStructs(t *structs.NodeService, s *NodeService) { @@ -82,5 +85,6 @@ func NodeServiceFromStructs(t *structs.NodeService, s *NodeService) { } s.LocallyRegisteredAsSidecar = t.LocallyRegisteredAsSidecar s.EnterpriseMeta = NewEnterpriseMetaFromStructs(t.EnterpriseMeta) + s.PeerName = t.PeerName s.RaftIndex = NewRaftIndexFromStructs(t.RaftIndex) } diff --git a/proto/pbservice/node.pb.binary.go b/proto/pbservice/node.pb.binary.go index b7533436fb..f1e2bec68c 100644 --- a/proto/pbservice/node.pb.binary.go +++ b/proto/pbservice/node.pb.binary.go @@ -7,6 +7,16 @@ import ( "github.com/golang/protobuf/proto" ) +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *IndexedCheckServiceNodes) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *IndexedCheckServiceNodes) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} + // MarshalBinary implements encoding.BinaryMarshaler func (msg *CheckServiceNode) MarshalBinary() ([]byte, error) { return proto.Marshal(msg) diff --git a/proto/pbservice/node.pb.go b/proto/pbservice/node.pb.go index 44340c9aa6..1e76b0e0dd 100644 --- a/proto/pbservice/node.pb.go +++ b/proto/pbservice/node.pb.go @@ -26,6 +26,62 @@ const ( // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 +// IndexedCheckServiceNodes is used to return multiple instances for a given service. +type IndexedCheckServiceNodes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Index uint64 `protobuf:"varint,1,opt,name=Index,proto3" json:"Index,omitempty"` + Nodes []*CheckServiceNode `protobuf:"bytes,2,rep,name=Nodes,proto3" json:"Nodes,omitempty"` +} + +func (x *IndexedCheckServiceNodes) Reset() { + *x = IndexedCheckServiceNodes{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbservice_node_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IndexedCheckServiceNodes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IndexedCheckServiceNodes) ProtoMessage() {} + +func (x *IndexedCheckServiceNodes) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbservice_node_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IndexedCheckServiceNodes.ProtoReflect.Descriptor instead. +func (*IndexedCheckServiceNodes) Descriptor() ([]byte, []int) { + return file_proto_pbservice_node_proto_rawDescGZIP(), []int{0} +} + +func (x *IndexedCheckServiceNodes) GetIndex() uint64 { + if x != nil { + return x.Index + } + return 0 +} + +func (x *IndexedCheckServiceNodes) GetNodes() []*CheckServiceNode { + if x != nil { + return x.Nodes + } + return nil +} + // CheckServiceNode is used to provide the node, its service // definition, as well as a HealthCheck that is associated. type CheckServiceNode struct { @@ -41,7 +97,7 @@ type CheckServiceNode struct { func (x *CheckServiceNode) Reset() { *x = CheckServiceNode{} if protoimpl.UnsafeEnabled { - mi := &file_proto_pbservice_node_proto_msgTypes[0] + mi := &file_proto_pbservice_node_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -54,7 +110,7 @@ func (x *CheckServiceNode) String() string { func (*CheckServiceNode) ProtoMessage() {} func (x *CheckServiceNode) ProtoReflect() protoreflect.Message { - mi := &file_proto_pbservice_node_proto_msgTypes[0] + mi := &file_proto_pbservice_node_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -67,7 +123,7 @@ func (x *CheckServiceNode) ProtoReflect() protoreflect.Message { // Deprecated: Use CheckServiceNode.ProtoReflect.Descriptor instead. func (*CheckServiceNode) Descriptor() ([]byte, []int) { - return file_proto_pbservice_node_proto_rawDescGZIP(), []int{0} + return file_proto_pbservice_node_proto_rawDescGZIP(), []int{1} } func (x *CheckServiceNode) GetNode() *Node { @@ -107,6 +163,7 @@ type Node struct { ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` Node string `protobuf:"bytes,2,opt,name=Node,proto3" json:"Node,omitempty"` Partition string `protobuf:"bytes,8,opt,name=Partition,proto3" json:"Partition,omitempty"` + PeerName string `protobuf:"bytes,9,opt,name=PeerName,proto3" json:"PeerName,omitempty"` Address string `protobuf:"bytes,3,opt,name=Address,proto3" json:"Address,omitempty"` Datacenter string `protobuf:"bytes,4,opt,name=Datacenter,proto3" json:"Datacenter,omitempty"` TaggedAddresses map[string]string `protobuf:"bytes,5,rep,name=TaggedAddresses,proto3" json:"TaggedAddresses,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -118,7 +175,7 @@ type Node struct { func (x *Node) Reset() { *x = Node{} if protoimpl.UnsafeEnabled { - mi := &file_proto_pbservice_node_proto_msgTypes[1] + mi := &file_proto_pbservice_node_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -131,7 +188,7 @@ func (x *Node) String() string { func (*Node) ProtoMessage() {} func (x *Node) ProtoReflect() protoreflect.Message { - mi := &file_proto_pbservice_node_proto_msgTypes[1] + mi := &file_proto_pbservice_node_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -144,7 +201,7 @@ func (x *Node) ProtoReflect() protoreflect.Message { // Deprecated: Use Node.ProtoReflect.Descriptor instead. func (*Node) Descriptor() ([]byte, []int) { - return file_proto_pbservice_node_proto_rawDescGZIP(), []int{1} + return file_proto_pbservice_node_proto_rawDescGZIP(), []int{2} } func (x *Node) GetID() string { @@ -168,6 +225,13 @@ func (x *Node) GetPartition() string { return "" } +func (x *Node) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + func (x *Node) GetAddress() string { if x != nil { return x.Address @@ -267,6 +331,7 @@ type NodeService struct { LocallyRegisteredAsSidecar bool `protobuf:"varint,13,opt,name=LocallyRegisteredAsSidecar,proto3" json:"LocallyRegisteredAsSidecar,omitempty"` // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs EnterpriseMeta *pbcommon.EnterpriseMeta `protobuf:"bytes,16,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"` + PeerName string `protobuf:"bytes,18,opt,name=PeerName,proto3" json:"PeerName,omitempty"` // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,14,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"` } @@ -274,7 +339,7 @@ type NodeService struct { func (x *NodeService) Reset() { *x = NodeService{} if protoimpl.UnsafeEnabled { - mi := &file_proto_pbservice_node_proto_msgTypes[2] + mi := &file_proto_pbservice_node_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -287,7 +352,7 @@ func (x *NodeService) String() string { func (*NodeService) ProtoMessage() {} func (x *NodeService) ProtoReflect() protoreflect.Message { - mi := &file_proto_pbservice_node_proto_msgTypes[2] + mi := &file_proto_pbservice_node_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -300,7 +365,7 @@ func (x *NodeService) ProtoReflect() protoreflect.Message { // Deprecated: Use NodeService.ProtoReflect.Descriptor instead. func (*NodeService) Descriptor() ([]byte, []int) { - return file_proto_pbservice_node_proto_rawDescGZIP(), []int{2} + return file_proto_pbservice_node_proto_rawDescGZIP(), []int{3} } func (x *NodeService) GetKind() string { @@ -408,6 +473,13 @@ func (x *NodeService) GetEnterpriseMeta() *pbcommon.EnterpriseMeta { return nil } +func (x *NodeService) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + func (x *NodeService) GetRaftIndex() *pbcommon.RaftIndex { if x != nil { return x.RaftIndex @@ -426,100 +498,110 @@ var file_proto_pbservice_node_proto_rawDesc = []byte{ 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x99, 0x01, 0x0a, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x04, 0x4e, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x4e, 0x6f, 0x64, 0x65, - 0x12, 0x30, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, - 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x48, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x06, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x22, 0xaf, 0x03, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x49, - 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x4e, - 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, - 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, 0x63, - 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, 0x74, - 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, - 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, - 0x65, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, - 0x64, 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x09, 0x52, 0x61, - 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x42, 0x0a, 0x14, 0x54, 0x61, 0x67, 0x67, 0x65, - 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x09, 0x4d, - 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc9, 0x06, 0x0a, 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x12, 0x55, 0x0a, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x70, 0x62, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, - 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, - 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x50, 0x6f, 0x72, - 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, - 0x68, 0x12, 0x2c, 0x0a, 0x07, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x57, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x52, 0x07, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x12, - 0x2c, 0x0a, 0x11, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x67, 0x4f, 0x76, 0x65, 0x72, - 0x72, 0x69, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x45, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x54, 0x61, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x33, 0x0a, - 0x05, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, - 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x50, 0x72, 0x6f, - 0x78, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x07, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x3e, 0x0a, 0x1a, 0x4c, 0x6f, 0x63, 0x61, 0x6c, - 0x6c, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x41, 0x73, 0x53, 0x69, - 0x64, 0x65, 0x63, 0x61, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x41, 0x73, - 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x12, 0x3e, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x16, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x18, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, + 0x64, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, + 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x31, 0x0a, 0x05, 0x4e, 0x6f, 0x64, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x10, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, + 0x12, 0x23, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, + 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x30, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, + 0x06, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0xcb, 0x03, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, + 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, + 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x61, 0x74, 0x61, + 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x44, 0x61, + 0x74, 0x61, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0f, 0x54, 0x61, 0x67, 0x67, + 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x09, 0x52, - 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x5d, 0x0a, 0x14, 0x54, 0x61, 0x67, 0x67, + 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x42, 0x0a, 0x14, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, - 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x09, + 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe5, 0x06, 0x0a, 0x0b, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x12, 0x55, 0x0a, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x70, 0x62, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, + 0x0a, 0x04, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x50, 0x6f, + 0x72, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x2c, 0x0a, 0x07, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x52, 0x07, 0x57, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, + 0x12, 0x2c, 0x0a, 0x11, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x67, 0x4f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x54, 0x61, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x33, + 0x0a, 0x05, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x50, 0x72, + 0x6f, 0x78, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, + 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x3e, 0x0a, 0x1a, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x41, 0x73, 0x53, + 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x4c, 0x6f, + 0x63, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x41, + 0x73, 0x53, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x12, 0x3e, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x09, 0x52, 0x61, 0x66, 0x74, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x5d, 0x0a, 0x14, 0x54, 0x61, 0x67, 0x67, 0x65, 0x64, 0x41, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2d, 0x5a, + 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -534,43 +616,45 @@ func file_proto_pbservice_node_proto_rawDescGZIP() []byte { return file_proto_pbservice_node_proto_rawDescData } -var file_proto_pbservice_node_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_proto_pbservice_node_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_proto_pbservice_node_proto_goTypes = []interface{}{ - (*CheckServiceNode)(nil), // 0: pbservice.CheckServiceNode - (*Node)(nil), // 1: pbservice.Node - (*NodeService)(nil), // 2: pbservice.NodeService - nil, // 3: pbservice.Node.TaggedAddressesEntry - nil, // 4: pbservice.Node.MetaEntry - nil, // 5: pbservice.NodeService.TaggedAddressesEntry - nil, // 6: pbservice.NodeService.MetaEntry - (*HealthCheck)(nil), // 7: pbservice.HealthCheck - (*pbcommon.RaftIndex)(nil), // 8: common.RaftIndex - (*Weights)(nil), // 9: pbservice.Weights - (*ConnectProxyConfig)(nil), // 10: pbservice.ConnectProxyConfig - (*ServiceConnect)(nil), // 11: pbservice.ServiceConnect - (*pbcommon.EnterpriseMeta)(nil), // 12: common.EnterpriseMeta - (*ServiceAddress)(nil), // 13: pbservice.ServiceAddress + (*IndexedCheckServiceNodes)(nil), // 0: pbservice.IndexedCheckServiceNodes + (*CheckServiceNode)(nil), // 1: pbservice.CheckServiceNode + (*Node)(nil), // 2: pbservice.Node + (*NodeService)(nil), // 3: pbservice.NodeService + nil, // 4: pbservice.Node.TaggedAddressesEntry + nil, // 5: pbservice.Node.MetaEntry + nil, // 6: pbservice.NodeService.TaggedAddressesEntry + nil, // 7: pbservice.NodeService.MetaEntry + (*HealthCheck)(nil), // 8: pbservice.HealthCheck + (*pbcommon.RaftIndex)(nil), // 9: common.RaftIndex + (*Weights)(nil), // 10: pbservice.Weights + (*ConnectProxyConfig)(nil), // 11: pbservice.ConnectProxyConfig + (*ServiceConnect)(nil), // 12: pbservice.ServiceConnect + (*pbcommon.EnterpriseMeta)(nil), // 13: common.EnterpriseMeta + (*ServiceAddress)(nil), // 14: pbservice.ServiceAddress } var file_proto_pbservice_node_proto_depIdxs = []int32{ - 1, // 0: pbservice.CheckServiceNode.Node:type_name -> pbservice.Node - 2, // 1: pbservice.CheckServiceNode.Service:type_name -> pbservice.NodeService - 7, // 2: pbservice.CheckServiceNode.Checks:type_name -> pbservice.HealthCheck - 3, // 3: pbservice.Node.TaggedAddresses:type_name -> pbservice.Node.TaggedAddressesEntry - 4, // 4: pbservice.Node.Meta:type_name -> pbservice.Node.MetaEntry - 8, // 5: pbservice.Node.RaftIndex:type_name -> common.RaftIndex - 5, // 6: pbservice.NodeService.TaggedAddresses:type_name -> pbservice.NodeService.TaggedAddressesEntry - 6, // 7: pbservice.NodeService.Meta:type_name -> pbservice.NodeService.MetaEntry - 9, // 8: pbservice.NodeService.Weights:type_name -> pbservice.Weights - 10, // 9: pbservice.NodeService.Proxy:type_name -> pbservice.ConnectProxyConfig - 11, // 10: pbservice.NodeService.Connect:type_name -> pbservice.ServiceConnect - 12, // 11: pbservice.NodeService.EnterpriseMeta:type_name -> common.EnterpriseMeta - 8, // 12: pbservice.NodeService.RaftIndex:type_name -> common.RaftIndex - 13, // 13: pbservice.NodeService.TaggedAddressesEntry.value:type_name -> pbservice.ServiceAddress - 14, // [14:14] is the sub-list for method output_type - 14, // [14:14] is the sub-list for method input_type - 14, // [14:14] is the sub-list for extension type_name - 14, // [14:14] is the sub-list for extension extendee - 0, // [0:14] is the sub-list for field type_name + 1, // 0: pbservice.IndexedCheckServiceNodes.Nodes:type_name -> pbservice.CheckServiceNode + 2, // 1: pbservice.CheckServiceNode.Node:type_name -> pbservice.Node + 3, // 2: pbservice.CheckServiceNode.Service:type_name -> pbservice.NodeService + 8, // 3: pbservice.CheckServiceNode.Checks:type_name -> pbservice.HealthCheck + 4, // 4: pbservice.Node.TaggedAddresses:type_name -> pbservice.Node.TaggedAddressesEntry + 5, // 5: pbservice.Node.Meta:type_name -> pbservice.Node.MetaEntry + 9, // 6: pbservice.Node.RaftIndex:type_name -> common.RaftIndex + 6, // 7: pbservice.NodeService.TaggedAddresses:type_name -> pbservice.NodeService.TaggedAddressesEntry + 7, // 8: pbservice.NodeService.Meta:type_name -> pbservice.NodeService.MetaEntry + 10, // 9: pbservice.NodeService.Weights:type_name -> pbservice.Weights + 11, // 10: pbservice.NodeService.Proxy:type_name -> pbservice.ConnectProxyConfig + 12, // 11: pbservice.NodeService.Connect:type_name -> pbservice.ServiceConnect + 13, // 12: pbservice.NodeService.EnterpriseMeta:type_name -> common.EnterpriseMeta + 9, // 13: pbservice.NodeService.RaftIndex:type_name -> common.RaftIndex + 14, // 14: pbservice.NodeService.TaggedAddressesEntry.value:type_name -> pbservice.ServiceAddress + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_proto_pbservice_node_proto_init() } @@ -582,7 +666,7 @@ func file_proto_pbservice_node_proto_init() { file_proto_pbservice_service_proto_init() if !protoimpl.UnsafeEnabled { file_proto_pbservice_node_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckServiceNode); i { + switch v := v.(*IndexedCheckServiceNodes); i { case 0: return &v.state case 1: @@ -594,7 +678,7 @@ func file_proto_pbservice_node_proto_init() { } } file_proto_pbservice_node_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Node); i { + switch v := v.(*CheckServiceNode); i { case 0: return &v.state case 1: @@ -606,6 +690,18 @@ func file_proto_pbservice_node_proto_init() { } } file_proto_pbservice_node_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Node); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_pbservice_node_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*NodeService); i { case 0: return &v.state @@ -624,7 +720,7 @@ func file_proto_pbservice_node_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_proto_pbservice_node_proto_rawDesc, NumEnums: 0, - NumMessages: 7, + NumMessages: 8, NumExtensions: 0, NumServices: 0, }, diff --git a/proto/pbservice/node.proto b/proto/pbservice/node.proto index 4b83893520..eb55ac2e35 100644 --- a/proto/pbservice/node.proto +++ b/proto/pbservice/node.proto @@ -8,6 +8,12 @@ import "proto/pbcommon/common.proto"; import "proto/pbservice/healthcheck.proto"; import "proto/pbservice/service.proto"; +// IndexedCheckServiceNodes is used to return multiple instances for a given service. +message IndexedCheckServiceNodes { + uint64 Index = 1; + repeated CheckServiceNode Nodes = 2; +} + // CheckServiceNode is used to provide the node, its service // definition, as well as a HealthCheck that is associated. message CheckServiceNode { @@ -29,6 +35,7 @@ message Node { string Node = 2; string Partition = 8; + string PeerName = 9; string Address = 3; string Datacenter = 4; map TaggedAddresses = 5; @@ -105,6 +112,8 @@ message NodeService { // mog: func-to=EnterpriseMetaToStructs func-from=NewEnterpriseMetaFromStructs common.EnterpriseMeta EnterpriseMeta = 16; + string PeerName = 18; + // mog: func-to=RaftIndexToStructs func-from=NewRaftIndexFromStructs common.RaftIndex RaftIndex = 14; } diff --git a/proto/pbstatus/status.pb.binary.go b/proto/pbstatus/status.pb.binary.go new file mode 100644 index 0000000000..f1dcfbf66b --- /dev/null +++ b/proto/pbstatus/status.pb.binary.go @@ -0,0 +1,18 @@ +// Code generated by protoc-gen-go-binary. DO NOT EDIT. +// source: proto/pbstatus/status.proto + +package pbstatus + +import ( + "github.com/golang/protobuf/proto" +) + +// MarshalBinary implements encoding.BinaryMarshaler +func (msg *Status) MarshalBinary() ([]byte, error) { + return proto.Marshal(msg) +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler +func (msg *Status) UnmarshalBinary(b []byte) error { + return proto.Unmarshal(b, msg) +} diff --git a/proto/pbstatus/status.pb.go b/proto/pbstatus/status.pb.go new file mode 100644 index 0000000000..0b56e62b6e --- /dev/null +++ b/proto/pbstatus/status.pb.go @@ -0,0 +1,204 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.15.8 +// source: proto/pbstatus/status.proto + +package pbstatus + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_pbstatus_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_proto_pbstatus_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_proto_pbstatus_status_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_proto_pbstatus_status_proto protoreflect.FileDescriptor + +var file_proto_pbstatus_status_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x54, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, + 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_pbstatus_status_proto_rawDescOnce sync.Once + file_proto_pbstatus_status_proto_rawDescData = file_proto_pbstatus_status_proto_rawDesc +) + +func file_proto_pbstatus_status_proto_rawDescGZIP() []byte { + file_proto_pbstatus_status_proto_rawDescOnce.Do(func() { + file_proto_pbstatus_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_pbstatus_status_proto_rawDescData) + }) + return file_proto_pbstatus_status_proto_rawDescData +} + +var file_proto_pbstatus_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_proto_pbstatus_status_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: status.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_proto_pbstatus_status_proto_depIdxs = []int32{ + 1, // 0: status.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_proto_pbstatus_status_proto_init() } +func file_proto_pbstatus_status_proto_init() { + if File_proto_pbstatus_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_pbstatus_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_pbstatus_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_pbstatus_status_proto_goTypes, + DependencyIndexes: file_proto_pbstatus_status_proto_depIdxs, + MessageInfos: file_proto_pbstatus_status_proto_msgTypes, + }.Build() + File_proto_pbstatus_status_proto = out.File + file_proto_pbstatus_status_proto_rawDesc = nil + file_proto_pbstatus_status_proto_goTypes = nil + file_proto_pbstatus_status_proto_depIdxs = nil +} diff --git a/proto/pbstatus/status.proto b/proto/pbstatus/status.proto new file mode 100644 index 0000000000..eefc29f62f --- /dev/null +++ b/proto/pbstatus/status.proto @@ -0,0 +1,47 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package status; +option go_package = "github.com/hashicorp/consul/proto/pbstatus"; + +import "google/protobuf/any.proto"; + +option cc_enable_arenas = true; +option java_multiple_files = true; +option java_outer_classname = "StatusProto"; +option java_package = "com.google.rpc"; +option objc_class_prefix = "RPC"; + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +message Status { + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + int32 code = 1; + + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + string message = 2; + + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + repeated google.protobuf.Any details = 3; +} \ No newline at end of file diff --git a/proto/pbsubscribe/subscribe.go b/proto/pbsubscribe/subscribe.go index 961c3c1cdb..05a34a0f03 100644 --- a/proto/pbsubscribe/subscribe.go +++ b/proto/pbsubscribe/subscribe.go @@ -29,5 +29,10 @@ func (req *SubscribeRequest) SetTokenSecret(token string) { // HasTimedOut implements structs.RPCInfo func (req *SubscribeRequest) HasTimedOut(start time.Time, rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) (bool, error) { - return time.Since(start) > rpcHoldTimeout, nil + return time.Since(start) > req.Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime), nil +} + +// Timeout implements structs.RPCInfo +func (req *SubscribeRequest) Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) time.Duration { + return rpcHoldTimeout } diff --git a/proto/pbsubscribe/subscribe.pb.go b/proto/pbsubscribe/subscribe.pb.go index 851991ed01..00245d0243 100644 --- a/proto/pbsubscribe/subscribe.pb.go +++ b/proto/pbsubscribe/subscribe.pb.go @@ -170,6 +170,8 @@ type SubscribeRequest struct { // // Partition is an enterprise-only feature. Partition string `protobuf:"bytes,7,opt,name=Partition,proto3" json:"Partition,omitempty"` + // TODO(peering): docs + PeerName string `protobuf:"bytes,8,opt,name=PeerName,proto3" json:"PeerName,omitempty"` } func (x *SubscribeRequest) Reset() { @@ -253,6 +255,13 @@ func (x *SubscribeRequest) GetPartition() string { return "" } +func (x *SubscribeRequest) GetPeerName() string { + if x != nil { + return x.PeerName + } + return "" +} + // Event describes a streaming update on a subscription. Events are used both to // describe the current "snapshot" of the result as well as ongoing mutations to // that snapshot. @@ -501,7 +510,7 @@ var file_proto_pbsubscribe_subscribe_proto_rawDesc = []byte{ 0x69, 0x62, 0x65, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x1a, 0x1a, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, - 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd4, 0x01, 0x0a, 0x10, 0x53, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf0, 0x01, 0x0a, 0x10, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, @@ -515,51 +524,53 @@ var file_proto_pbsubscribe_subscribe_proto_rawDesc = []byte{ 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x85, 0x02, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x12, 0x26, 0x0a, 0x0d, 0x45, 0x6e, 0x64, 0x4f, 0x66, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x45, 0x6e, 0x64, 0x4f, - 0x66, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x32, 0x0a, 0x13, 0x4e, 0x65, 0x77, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x6f, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x13, 0x4e, 0x65, 0x77, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x6f, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x37, 0x0a, - 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x0a, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, - 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x42, 0x09, - 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x36, 0x0a, 0x0a, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x28, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, - 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x73, 0x22, 0x84, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x02, 0x4f, 0x70, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, - 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4f, 0x70, 0x52, 0x02, 0x4f, 0x70, 0x12, - 0x47, 0x0a, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, - 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x62, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x2a, 0x41, 0x0a, 0x05, 0x54, 0x6f, 0x70, 0x69, - 0x63, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x11, - 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x10, - 0x01, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x10, 0x02, 0x2a, 0x29, 0x0a, 0x09, 0x43, - 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4f, 0x70, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x65, 0x72, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x65, 0x72, 0x10, 0x01, 0x32, 0x59, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x3e, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x1b, - 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, - 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x75, - 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, - 0x01, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, - 0x62, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x85, 0x02, + 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x26, 0x0a, + 0x0d, 0x45, 0x6e, 0x64, 0x4f, 0x66, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0d, 0x45, 0x6e, 0x64, 0x4f, 0x66, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x32, 0x0a, 0x13, 0x4e, 0x65, 0x77, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x54, 0x6f, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x48, 0x00, 0x52, 0x13, 0x4e, 0x65, 0x77, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x54, 0x6f, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x37, 0x0a, 0x0a, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x46, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x50, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x36, 0x0a, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, + 0x74, 0x63, 0x68, 0x12, 0x28, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x84, 0x01, + 0x0a, 0x13, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x02, 0x4f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x14, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x43, 0x61, + 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4f, 0x70, 0x52, 0x02, 0x4f, 0x70, 0x12, 0x47, 0x0a, 0x10, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x62, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4e, 0x6f, 0x64, 0x65, 0x2a, 0x41, 0x0a, 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x0b, 0x0a, + 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x10, 0x01, 0x12, 0x18, 0x0a, + 0x14, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x10, 0x02, 0x2a, 0x29, 0x0a, 0x09, 0x43, 0x61, 0x74, 0x61, 0x6c, + 0x6f, 0x67, 0x4f, 0x70, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, + 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x65, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, + 0x10, 0x01, 0x32, 0x59, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, + 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x12, 0x1b, 0x2e, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x62, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x00, 0x30, 0x01, 0x42, 0x2f, 0x5a, + 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, + 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto/pbsubscribe/subscribe.proto b/proto/pbsubscribe/subscribe.proto index a860b874bd..be98a6f7df 100644 --- a/proto/pbsubscribe/subscribe.proto +++ b/proto/pbsubscribe/subscribe.proto @@ -84,6 +84,9 @@ message SubscribeRequest { // // Partition is an enterprise-only feature. string Partition = 7; + + // TODO(peering): docs + string PeerName = 8; } // Event describes a streaming update on a subscription. Events are used both to diff --git a/sdk/go.mod b/sdk/go.mod index 6d543bcecc..18b289a0e8 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -11,7 +11,7 @@ require ( github.com/mattn/go-isatty v0.0.12 // indirect github.com/pkg/errors v0.8.1 github.com/stretchr/testify v1.4.0 - golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 + golang.org/x/sys v0.0.0-20220412211240-33da011f77ad gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/yaml.v2 v2.2.8 // indirect ) diff --git a/sdk/go.sum b/sdk/go.sum index b9840a6414..94ec34ba4f 100644 --- a/sdk/go.sum +++ b/sdk/go.sum @@ -34,8 +34,8 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/terraform/README.md b/terraform/README.md deleted file mode 100644 index 3b503b209e..0000000000 --- a/terraform/README.md +++ /dev/null @@ -1,14 +0,0 @@ -These Terraform modules were removed from GitHub in [GH-5085](https://github.com/hashicorp/consul/pull/5085). - -These are not currently being maintained and tested, and were created prior to the existence of the Terraform Module Registry, which is the more appropriate way to share and distribute modules. - -In an effort to limit confusion of the purpose of these modules and not encourage usage of something we aren't confident about, this removes them from this repository. - -You can still access these modules if you depend on them by pinning to a specific ref in Git. It is recommended you pin against a recent major version where these modules existed: - -module "consul-aws" { - source = "git::https://github.com/hashicorp/consul.git//terraform/aws?ref=v1.4.0" -} -More detail about module sources can be found on this page: - -https://www.terraform.io/docs/modules/sources.html \ No newline at end of file diff --git a/test/ca/certindex b/test/ca/certindex index 8c478a407a..4610614a1c 100644 --- a/test/ca/certindex +++ b/test/ca/certindex @@ -5,3 +5,4 @@ V 160526220537Z 0D unknown /CN=test.internal/ST=CA/C=US/emailAddress=test@inter V 170604185910Z 0E unknown /CN=testco.internal/ST=California/C=US/emailAddress=test@testco.com/O=Hashicorp Test Cert/OU=Beta V 180606021919Z 0F unknown /CN=testco.internal/ST=California/C=US/emailAddress=james@hashicorp.com/O=End Point/OU=Testing V 21180418091009Z 10 unknown /CN=testco.internal/ST=California/C=US/emailAddress=james@hashicorp.com/O=End Point/OU=Testing +V 21220322142538Z 11 unknown /CN=testco.internal/ST=California/C=US/emailAddress=do-not-reply@hashicorp.com/O=End Point/OU=Testing diff --git a/test/ca/myca.conf b/test/ca/myca.conf index 067beb240a..810e06ba3c 100644 --- a/test/ca/myca.conf +++ b/test/ca/myca.conf @@ -13,7 +13,7 @@ database = certindex private_key = privkey.pem serial = serialfile default_days = 36500 -default_md = sha1 +default_md = sha512 policy = myca_policy x509_extensions = myca_extensions diff --git a/test/ca/serialfile b/test/ca/serialfile index b4de394767..48082f72f0 100644 --- a/test/ca/serialfile +++ b/test/ca/serialfile @@ -1 +1 @@ -11 +12 diff --git a/test/hostname/Alice.cfg b/test/hostname/Alice.cfg index 6bba3002dd..dcee8e5f2a 100644 --- a/test/hostname/Alice.cfg +++ b/test/hostname/Alice.cfg @@ -9,7 +9,7 @@ ST = California L = Los Angeles O = End Point OU = Testing -emailAddress = james@hashicorp.com +emailAddress = do-not-reply@hashicorp.com CN = Alice [v3_req] diff --git a/test/hostname/Alice.crt b/test/hostname/Alice.crt index 074e8b5b4b..42d54de0f4 100644 --- a/test/hostname/Alice.crt +++ b/test/hostname/Alice.crt @@ -1,23 +1,23 @@ -----BEGIN CERTIFICATE----- -MIIDyzCCArOgAwIBAgIBGjANBgkqhkiG9w0BAQUFADCBmTELMAkGA1UEBhMCVVMx +MIID0zCCArugAwIBAgIBLjANBgkqhkiG9w0BAQ0FADCBmTELMAkGA1UEBhMCVVMx EzARBgNVBAgTCkNhbGlmb3JuaWExFDASBgNVBAcTC0xvcyBBbmdlbGVzMRkwFwYD VQQKExBIYWhpQ29ycCBUZXN0IENBMQ0wCwYDVQQLEwRUZXN0MREwDwYDVQQDEwhD -ZXJ0QXV0aDEiMCAGCSqGSIb3DQEJARYTamFtZXNAaGFzaGljb3JwLmNvbTAgFw0x -ODA1MTIwOTA0MzJaGA8yMTE4MDQxODA5MDQzMlowfDEOMAwGA1UEAwwFQWxpY2Ux -EzARBgNVBAgMCkNhbGlmb3JuaWExCzAJBgNVBAYTAlVTMSIwIAYJKoZIhvcNAQkB -FhNqYW1lc0BoYXNoaWNvcnAuY29tMRIwEAYDVQQKDAlFbmQgUG9pbnQxEDAOBgNV -BAsMB1Rlc3RpbmcwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDjzkhi -7DQSMX6CBeIJtX3K508fTlvNxs9gYKMGIybyTrWSc5gT76QA7ntnETpcParyoF7K -N7LJnmTZr9uYOxJ9ZkYHzeAoBVbYjvm2jgMt8lTHwqept0ASIYhhe1RBhkIJH9eN -hoY6LgYefelj/leTYu55TUGfPD0kRNs4bG5XCl8TFbACOxKKdcY3uZQTaOXYl/Uv -Nl2Pp9h3v72/WL680Y9kGnmU9wcvBU5RewOTZKtdGe6y3hRmYz16nKxo733KH5Px -RDy2GyJ9mKC7QiyL8TYc7BRSp9FePeAXx5RQOYTL6Z5pgirwOnZkiWyaKBud9T5t -FxeT9QJdd1NsAURdAgMBAAGjODA2MAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMBwG -A1UdEQQVMBOCEXNlcnZlci5kYzEuY29uc3VsMA0GCSqGSIb3DQEBBQUAA4IBAQBN -xFFMhWl2UtZYrQ5f3GrqTRncoe/oDqXxuAiiBRDo3Gz/XDkz9aFwwK2z7rjaYVrQ -8ZksrA4T/Zr5nGCXCpFjVMzw3eFRWqWbGRFi/nfcifvk5EW7uobT84SOYQ5jrv6y -3kmsd6f2pnYKgWEX7J94XVIE/BeVSHZMHephrK6KC3Gdy66xNk6othKymY6veNxn -70qQbw0yRrud6svdPNmD6GCauz2i3blb7xW1FZMrJqtN0Mw5W2QHMyS1MQFeSeaC -TDv/Os3tocLFtdsoLAECLAqYAL9wAvvm8eNNOWPnFpy644lE2uLupWB8z5m0GbGp -utZXHATEkmGoFKC+dNml +ZXJ0QXV0aDEiMCAGCSqGSIb3DQEJARYTamFtZXNAaGFzaGljb3JwLmNvbTAgFw0y +MjA0MTQyMjQxNTBaGA8yMTIyMDMyMTIyNDE1MFowgYMxDjAMBgNVBAMMBUFsaWNl +MRMwEQYDVQQIDApDYWxpZm9ybmlhMQswCQYDVQQGEwJVUzEpMCcGCSqGSIb3DQEJ +ARYaZG8tbm90LXJlcGx5QGhhc2hpY29ycC5jb20xEjAQBgNVBAoMCUVuZCBQb2lu +dDEQMA4GA1UECwwHVGVzdGluZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMadRkCC0SC4FcOGn+Y7L4lTyZpjywbgim081BdWXEQCG/gPlcxZGQEPn2ZA +cglvuNbRlsDYqDs88cn9Nm9xWRJOh3x79erN/1k8AChNSj69nvTzg3cBUYx3Tz8I +5MvuG1XBp6cOK7cyUizllQVF1YX3vM5wZSP3hEe8jxGpMxS6+cKh2MHbhDNtUV78 +t7VFiDqvkT4H85VIiHyBTzK/1lMmHed820Aam8b8b0WSsdVFUZZcxUKuaKAqg6Np +YQE49IPMGJ8zidVZhEm/vZP1K9+uNJSq4mnClFBua+06Z6F7gj2MjGyNn6MlMOKs +EMAmntg4jgm/DznSng0t95XtVKUCAwEAAaM4MDYwCQYDVR0TBAIwADALBgNVHQ8E +BAMCBeAwHAYDVR0RBBUwE4IRc2VydmVyLmRjMS5jb25zdWwwDQYJKoZIhvcNAQEN +BQADggEBAED+jxV3/dWdqUF4O6J0MbJ0i60XRpFHvP9W7ukt8L+cMgVsWTqWPt+d +819gp0L+OAgwAVW0jFXpywi3LkdqurTFMMeG/yc9H4ryuLBAmg6TQSAexaYGznhE +jXZYJR04Wi8ct2e62GRZdAUGCzg9ZxAEr3wPRg+XW1jkYvJvPPFerG5kQPdx1bq/ +C3AQh3ONSK+ZTv1hxWumixkJbHh0aQpnPvy1Mq4AV+mHXlPlJocXfhCFh9gZag3q +DpDQ3Q56fZmDmssRQO9TLd0/+lfZ22aM94DmJyU78Dq+rpLfC4Guh8DfhLGtCK9M +60ixhLIOonbE5/Q0T8fKxW2di6DR+kc= -----END CERTIFICATE----- diff --git a/test/hostname/Alice.key b/test/hostname/Alice.key index 64f286c92a..482cb33829 100644 --- a/test/hostname/Alice.key +++ b/test/hostname/Alice.key @@ -1,28 +1,28 @@ -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDjzkhi7DQSMX6C -BeIJtX3K508fTlvNxs9gYKMGIybyTrWSc5gT76QA7ntnETpcParyoF7KN7LJnmTZ -r9uYOxJ9ZkYHzeAoBVbYjvm2jgMt8lTHwqept0ASIYhhe1RBhkIJH9eNhoY6LgYe -felj/leTYu55TUGfPD0kRNs4bG5XCl8TFbACOxKKdcY3uZQTaOXYl/UvNl2Pp9h3 -v72/WL680Y9kGnmU9wcvBU5RewOTZKtdGe6y3hRmYz16nKxo733KH5PxRDy2GyJ9 -mKC7QiyL8TYc7BRSp9FePeAXx5RQOYTL6Z5pgirwOnZkiWyaKBud9T5tFxeT9QJd -d1NsAURdAgMBAAECggEALevYZbCNspktklJTRXfDetJz/bCTCwEnrprsYgFWCYRa -T8JjhqlJGzL3x0gOxqdbvXscgJEHxmLam5M6pg5KZOLn/QzAQfEJl7ACoI0yEOIH -uxj/KVQaY01FK7lru6WvzB0SG6JhjnrWmvDwykpsJvbLccJkFxBSluwWcOJSv9Kj -CQMExsy9s2aVyUcA19aob8tQunBpAZfqIAO/wQxGUbxo7Bk6/o+/jYSoedzm0viY -M7xskskE0CMglC4AkbpWBLAR/aKlgtFiniYm3wp4k7Nbf0WMkESfCfvQtqsBgp0W -vuL2QbVouzxiGtj9XyGA3WqsJDVFL4CD5Aoap+kmgQKBgQDyQYmyOlifQvB95ZWO -GVQ0W4bOqzxOHuQYswIPj2yRjeoD7dYcCQD8i3nKzxrSljIezal49dio3+yBJwY6 -jomzrq7HPtmKMt4eZN1l9Tljiz9+5cxyKc2/qGJoEBkBccBlZXAFVJ99wSfcKQQw -zT4NbVHuXK5lZol6Wjvk/fVXIQKBgQDwut+wKCmsYgsVIJ17I1iHlcJUspPkWB4c -+iES1uGM49Iem2lMNSdRKzlkB5c6+JjIbmhLvh0+PH/7/vkVIrelbLCi4qe3E6m8 -gTOVq8pHohzLJJQAEWG6JlkjxBj+Orgc5qos4eO71yJProGk+xMZARz5n0EKmkpP -Zju/T/7RvQKBgQDyOBMsT+hCPRTmXEIflTW7L/Rm+ZFPbtWT2I/r7PSZyDI+gXQ+ -Dcadu/sni9H+0swEPo//cJiTqWj4bYNt0wzdyn/Ymf+6jUfHTgSMKBecbyMqhyvW -zfN5eSwDbm0CI7FB8J2Dxuu9Of7Xw278OIqdtDtiP+rjWhWFb2lJeZ7v4QKBgQCt -XRdMyI/CelUa4QMos/rEoiByWKzTLHZ7TdNVuvRyP3uJ2UhKvpjTBrrtA95wdKmq -5oAr0/1BXdaZxzTgeMEi3BSVKX+5A+sgOzfIGRCy59euoGgJaHsl0QovDMEnDWic -P63cZs1X8IXgNn9dLgfB4SBZ0MvJc/YCGlD65QRRTQKBgFxqEn90iOZr4AZKYoIR -0qQM0MA8W8Vi1EoKU7O/onuZrBA1rMfOGMjdtGmnTozVDbi/VKR6sjd4IpsIDH9L -WMn7Jm8Y5KYIEs9/DVv+/jPoPa/fQ680h8+QmRrz8P95Ap3xd17s+10qbUtrQdzI -w4xzB0gF0vOT/dCAmN66h/rv +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDGnUZAgtEguBXD +hp/mOy+JU8maY8sG4IptPNQXVlxEAhv4D5XMWRkBD59mQHIJb7jW0ZbA2Kg7PPHJ +/TZvcVkSTod8e/Xqzf9ZPAAoTUo+vZ7084N3AVGMd08/COTL7htVwaenDiu3MlIs +5ZUFRdWF97zOcGUj94RHvI8RqTMUuvnCodjB24QzbVFe/Le1RYg6r5E+B/OVSIh8 +gU8yv9ZTJh3nfNtAGpvG/G9FkrHVRVGWXMVCrmigKoOjaWEBOPSDzBifM4nVWYRJ +v72T9SvfrjSUquJpwpRQbmvtOmehe4I9jIxsjZ+jJTDirBDAJp7YOI4Jvw850p4N +LfeV7VSlAgMBAAECggEAVGkRK2etk5dJAKFdoc6zpEys2OXiqpiRnF2G0ihM7EZt +Np8BDikrvEy0dROco7AMwZev12P9E7gSFsN7+B8XOPWRFXHlkRZdmMIsWvSGQmX6 +gaZg0BoKW4V1c5fHDXizu12jcBdQsvo6/IPSMrx8RASHBMG16nROjuJvd5UOdZdF +wKTlCjLvJNnZsaW9HVsO3YsR1w2m1NbUdx+UYeRzy7sYzKc4Wnm2qA6r1UkzjC1S +6ho3NAy9mX4HlSyYq4qWwLT9ByIqgzjWdWJUbyJ2+ZvCRmbagONoBjGc323Og8LB +6PRswo2nlQSjxHA4G2llQaKp5TAbiwr9tPG+DaupnQKBgQDoQUkwa144jT6Bjn35 +36xl/s00vhT5dfFyEK4RPThTPP7FBKHPp8PO2kPz7z6CoL7xoV/yR2OENZ3ZxIgX +xBIw7cu25B0yp9+yH9wZGd3hNDGiXlnaJKy4oXYrs8pkh9bUXXAUsa+3Jcc29Y5I +MiboquzFMf0JRwHapx0SE9/G7wKBgQDa64Z4DlubX/QhdrQBlyJA/bT7NfTHkRoH +Oo0b9POqrh7ZiVMp8mlNDQY9VZo44UOJRrttNLzNCCl7Sce6H3GA38fp/OcszZTw +Crlfzk0Fm3D9zUUog8BO3sH1WBC1ws2cONIa5AeuNj6GuWE0UueULcGeb7bp5VYe +kN/Cx7u9qwKBgFIMPj7Mr0xrGVnLbNWJHu4pRXUMcXxvHgydt+B/MBa3xYj0SfWB +3rqEgNz796lOACZ8S9jbP1zFVC5KL4m1yndeikjh7S6n/259stNrP+b++UnS6wsV +Sa8v2v81VJqPImWDXMTywJCC2A2iUdFPZk9rkplXP3y3iQAlaS+ptbQhAoGARM49 +x0IL/LudyV67mLxdobubxFDjDE5ItfjrHNxSMVTkkU6d+tMb0YHEckaTYEk8psq6 +YcpvhKmKEBvSUGdNj0nGVX6NUgGTTBayyK/YeWivjLWVhPRT3vYYU/pH1jjR0sXx +E06UM2cNI97j9EQSUPpInnlSLhVMifLSwS9xjF0CgYEAsUd2Cy/sw1D+bv/6ktlz +5FYjAwJA2dcYCQMJ6Cds7yTgkmS0sFcb3bFOMtkTwzS6YShc07kiPlGs5d6P2uIA +cYBQLl/NNxVthCLmz8roY+g9wQH+0Bmwiigbn+vTriN+xA9F11Nie4vn6ZzQqncq +71BwLZBtCekeJsRU+ml+dSI= -----END PRIVATE KEY----- diff --git a/test/hostname/Betty.crt b/test/hostname/Betty.crt index 62dae6fb48..bba0776cec 100644 --- a/test/hostname/Betty.crt +++ b/test/hostname/Betty.crt @@ -1,23 +1,23 @@ -----BEGIN CERTIFICATE----- -MIID7DCCAtSgAwIBAgIBHDANBgkqhkiG9w0BAQUFADCBmTELMAkGA1UEBhMCVVMx +MIID7DCCAtSgAwIBAgIBMDANBgkqhkiG9w0BAQ0FADCBmTELMAkGA1UEBhMCVVMx EzARBgNVBAgTCkNhbGlmb3JuaWExFDASBgNVBAcTC0xvcyBBbmdlbGVzMRkwFwYD VQQKExBIYWhpQ29ycCBUZXN0IENBMQ0wCwYDVQQLEwRUZXN0MREwDwYDVQQDEwhD -ZXJ0QXV0aDEiMCAGCSqGSIb3DQEJARYTamFtZXNAaGFzaGljb3JwLmNvbTAgFw0x -OTEyMTEyMTQzMzlaGA8yMTE5MTExNzIxNDMzOVowgYMxDjAMBgNVBAMMBUJldHR5 +ZXJ0QXV0aDEiMCAGCSqGSIb3DQEJARYTamFtZXNAaGFzaGljb3JwLmNvbTAgFw0y +MjA0MTQyMjQxNTBaGA8yMTIyMDMyMTIyNDE1MFowgYMxDjAMBgNVBAMMBUJldHR5 MRMwEQYDVQQIDApDYWxpZm9ybmlhMQswCQYDVQQGEwJVUzEpMCcGCSqGSIb3DQEJ ARYaZG8tbm90LXJlcGx5QGhhc2hpY29ycC5jb20xEjAQBgNVBAoMCUVuZCBQb2lu dDEQMA4GA1UECwwHVGVzdGluZzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAPU04u9/94fgQZMwCHR9gX6yBfJV6m7hTxR7rQv8GXaaCYVVisH2NmW6KcrZ -hjUqsvPpm63vEZasYC2blqlLnQCmJyOemnx9v0WEX9SLM3w8ihjbGhSq6VqaCeGH -s3jaxe9Bx8anR1tWiz2AoEEP1SzHgBQv08swDdWZsFKqnXntwqKqZcegIQMelxW+ -iofAtSRZcwhbQUrpgaarxStuvpxqt1y/rbS27H1cf9U4CLysKClOIIJE3l7rqKCb -R5uYyQd07nZC+R7/83TX1AGFvk55QujB9Pm9p6RbjHJWZ5CLPtpiQhpMwYw1JluN -1KSwnpDDreCWMw+yEchlAnpw3/cCAwEAAaNRME8wCQYDVR0TBAIwADALBgNVHQ8E +ggEBALcRmiQ3lifgSuD1f6Spc6iHSvX1ilRXlo9FJ9MuROyg1ByuxQliU4Wz4XUw +CbpzUncb3B7Sg0Dg2dfMZJoafkVcVi4k6Rv689uasp4LIciK53sL33QfZEXLw5Nt +LizfDM4IV5tb2m7s057ObVwdjjg0ICRkpgXWQgGb2OhkU3ZliPeuo4RnODCThRLR +9SvHenpn8TJldNGunQGERPRFEeF6ekNgSbfM3vjimBUGzHdlqd9L327u+fRrVC/E +k0YXTM7Ummc1NUButUaFFiA9uBFhqU5tI1NnrD+dAmsboXrzI62HsipOmWskoSAX +66gkeyi/yoDsXENd09WiqTxOaqECAwEAAaNRME8wCQYDVR0TBAIwADALBgNVHQ8E BAMCBeAwNQYDVR0RBC4wLIIRc2VydmVyLmRjMi5jb25zdWyCF2JldHR5LnNlcnZl -ci5kYzIuY29uc3VsMA0GCSqGSIb3DQEBBQUAA4IBAQBvGhMpUHmw3j7+sj0D+mCz -+bBhZH6HEpy6TLjS1GfO0/fyO2DIcPMHNTdNqmoDTt33scS53155jEhCI8Wtb6LY -Mvoo0wwnQtGvuqyscnJldAQ++08N2bjJq9iQoG1gB9oPWOxRe4tjbSoJNl1X3a0u -jwjKwOl0HX23WMy3S5mIKuOBuT79/nY/rVlFP1fsna4TKO1ocXjK5JnQ9TKdGTRH -9STT/RPIIQvWg+zeDS+ZlMocZEq7NT63d2BzH2ZiV6VRZM0PSyEixE0fqfxPxA2D -+fqeDl8iRR4tPIifkDFZLoMiHDa7Ciqh1hgdMUk1tkPZpxy+XP+AzI/K/3Tnceer +ci5kYzIuY29uc3VsMA0GCSqGSIb3DQEBDQUAA4IBAQB+hMHxwzY7KpFe/mKhiUCE +bOrVBvXAp/98F5UPoMGbF8Qe4/nNPAhhFGvkG28lAyeai1j7HX+gqx0qxcOMHGMw +uIL/XcpetQijOazNzvCaXo8MqPMjkiFDWkdaJVR7D1BU2kwwDpHJnNpnjynBW3vl +OPkANoo9WX825vErdZ4bEHQRRZziU7v2auqxZuxB8uBf9NJiDQvyUvkDUkQkHu+a +8QAdifeavxCkcIE6aKzPAEfDgVIHylzOjAGQUpZgmaA5344jH5CltTlZ54zh33jO +MmmzFj5e0MCdFJY3JBqKcEfXswTnDdXZIvdF3Iu/kWXTmd8Fnkx2ektNd70MFK0F -----END CERTIFICATE----- diff --git a/test/hostname/Betty.key b/test/hostname/Betty.key index 6b12937c48..2d1fb541b9 100644 --- a/test/hostname/Betty.key +++ b/test/hostname/Betty.key @@ -1,28 +1,28 @@ -----BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQD1NOLvf/eH4EGT -MAh0fYF+sgXyVepu4U8Ue60L/Bl2mgmFVYrB9jZluinK2YY1KrLz6Zut7xGWrGAt -m5apS50Apicjnpp8fb9FhF/UizN8PIoY2xoUqulamgnhh7N42sXvQcfGp0dbVos9 -gKBBD9Usx4AUL9PLMA3VmbBSqp157cKiqmXHoCEDHpcVvoqHwLUkWXMIW0FK6YGm -q8Urbr6cardcv620tux9XH/VOAi8rCgpTiCCRN5e66igm0ebmMkHdO52Qvke//N0 -19QBhb5OeULowfT5vaekW4xyVmeQiz7aYkIaTMGMNSZbjdSksJ6Qw63gljMPshHI -ZQJ6cN/3AgMBAAECggEARpwMHVuENCRnvbK/PIxHlew9uiLzdyp6UzOqCSF3U6fX -xgV0B5aW44RQNJGfDABXt9U1d0i4j+Ljbz62i9myRFWUP7WUVvT+902/Kr1K/iOQ -wMeXIwx0Vhq1bbReAhc9mEAg/xt8eNjbD8LSYunkQRjR0P5UxtX3peKz25o17r3w -U5lpvbYzm/k376Dhr2RBr30jrrf2rh06+FQCc2dF2mK1j7+YKbIHK+BKQYtQeVyg -XYpJfJTsuHFojwZNGXEuidkGApuokTS0HiAuAjrCQsn4cUftXnUtE2HJgsCum/Bp -Kb74ahBbZCITXCRSKZCi6p9oFcHQ30JDCCz4Qy9HgQKBgQD/dzWYKzI29ihQmeLN -ntHRl4RTjO4LfCs6lr8ul5nFOcgGwSwaFaTbqq0oJefCqEH+wmH1Jbd5nfRi7PWr -uGibeZnLdiseHHMsvN8l6PY3tVCm3kJL5Ze2TY+n8/7eUPcmH60CFikqO53ahjV3 -9PtUBr5BUe1xUJ6T4zegqZKWbwKBgQD1uC8PfrIMGLmq9l3x3T2pAbmfz0N3DfUs -ncY+JCQRkBkWJk3oW8ITBZagCwvg4AnhbGvNgbAaPGEQ9HL7f19ieJeHxEaVtTY2 -kKDwelPHT06oCu2AZ8h1Zqfn55O/HtGO/MuTdFa9IKjGYJTUvSBy3VVd8gnK9MBV -fhUtEqNS+QKBgQC33NR18KDzbbcWS6sw0l2wu5xBhezN11BFmrl+jx3dFPkh42Ya -X/mHIBAAFUf4kaDt+nkGN18V6Nk7WdB3BwJC5AIMrb/arB8407bHUiPjdFvXvZ95 -gITwcGI0PyfwWdWHWsTp+4klHENAQ9e3vlok37WOzahXJe78AUzIFUOrgQKBgQCb -qC3Htw67Mv6LGr6wdOKWqY0Ze4bVaHYj6V6oBuUCazI5IdLAmz/6JNQiVl0T+1jH -AJPZ/4m7VPx4bSJZx3p5OsNjMic0tzK8pioNrLBd1hORyDpj2VrXZEyBT+X8cF14 -IxQjONOpw4KnCI+/pH9lxGhLtwQVGa6tec2YW/IyoQKBgQCMr00Z1/+edBh/s+Ho -p87Wwf3vRtRZLniVdc1jVk9raK6azrFS+vBzpkWZatLu5Grtwl/9HYNTu+AnfKGP -jyRkCx0i5qgEQobYkiAJeFocyDVbzaDdZBhTAINN9uaSDH1JpGNlIBxIflzT0adf -OCBbgQ6SaTH+MWvYJ1KJPsQVkw== +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC3EZokN5Yn4Erg +9X+kqXOoh0r19YpUV5aPRSfTLkTsoNQcrsUJYlOFs+F1MAm6c1J3G9we0oNA4NnX +zGSaGn5FXFYuJOkb+vPbmrKeCyHIiud7C990H2RFy8OTbS4s3wzOCFebW9pu7NOe +zm1cHY44NCAkZKYF1kIBm9joZFN2ZYj3rqOEZzgwk4US0fUrx3p6Z/EyZXTRrp0B +hET0RRHhenpDYEm3zN744pgVBsx3ZanfS99u7vn0a1QvxJNGF0zO1JpnNTVAbrVG +hRYgPbgRYalObSNTZ6w/nQJrG6F68yOth7IqTplrJKEgF+uoJHsov8qA7FxDXdPV +oqk8TmqhAgMBAAECggEAcaGcYtSaAIBpGf9oTmXb44Su08KoLTf8vUs4sA1tPM+L +OY6FwRBmNXx0+k9qCnBghIwncn5KeC/ZJ+i3nSvKqvTojVXd179KNEpuikjwnFET +47134tVFYUlcSRsg6Ts98HkfH9DA4c9gf5c0LFQwHdTFCrHql37pk6QP3BfB8p9/ +BHojrxF6dFV04XR5bMTHO0w1b4OstnROiKynZYxP9nxpeMDAWG8A1/7RSCFuaXHO +2m1V7ChAsfGsF26cAcVTBQaQBonlPAaswCOVQUSqVr+PtmjhaT96EJ2mS9Wbz8Wt +Wn/opHuRbmOp7wxJKXgvroD8t0738PyHo9H+EPthQQKBgQDywIXyClZFjDjLIH0c +YhS/ceRcUuhMxI1ZPwAQG72Fxv/HMUa3neSSKKLBKRQqPFxvvfjS0IT0737WtLjK +Mst2ojEKWkveRN54VVwINwXkLTrmM0vpQrBuye7c2le+Pj6QriV4OgIlB6P6huBM +AjiksbGj217H+n67cxwLgWdVOQKBgQDBDz8lc9DpV4WH+rfOx7vBSCqqvleQ3pxV +hO/BndW6/sFo7Go6zRc7mGmEeaJUVATAMTHRsvKZ7VQQH57l8JS88V27uxAepyjz +GV8kRYbfV5mtyoq+3owcaeUbBxVv0wCNB9I5m+SuRgfwe2/FHoV8EMl44LLB+sue +x5i1sDhoqQKBgGKa+43GzyZu//7a0pc97Amb/NPXxY6xZG01HxRsAD8gB3PlO0GI +vHj2Uq49vngtjqrBMxqHIwrPYela/Zj8qxMkbOE0ro650Nh2a+zWVOlLyhoKCjLV +KQ/HrOQ/ONcJN6bTZlsAzTA3e09fjCqz2Ehl+a+Cg2yd/u6rol+2D8BZAoGALc6u +Tvah9Ru9JTyJ7Fhb5kp3RTgQkuEe+vOl56zJj6ruvTSLKBSNlKhfMP2jVJry3Z9O +kNEC2x8CuSinjSt+Py6N7QM/meZTwwqcFoEgtVGVtzS9ovgvCnbd04Hkxjmsgcn/ +SYgBxI/9RkQjiwPo7D0XcMTv5TLaqXv2cfW0DLECgYEAkf+V/kSb667hAr4MNOKn +h030GAnmuvcm/ErbqWFXC7b9VyPDr+SU8tXr+ZZIzoH53ua9gxqTxYjFkUGMIZqM +yhRv3jYpG1ar1xs7Lo7qDCggPsBlZaIUkjZSlT0YX5SZ7U8DFowh7gRID0HUiELe +aqwXam2T6fIjLBVLhkuTWjw= -----END PRIVATE KEY----- diff --git a/test/hostname/Bob.crt b/test/hostname/Bob.crt index d5950f971b..7f22c71717 100644 --- a/test/hostname/Bob.crt +++ b/test/hostname/Bob.crt @@ -1,23 +1,23 @@ -----BEGIN CERTIFICATE----- -MIID6DCCAtCgAwIBAgIBGzANBgkqhkiG9w0BAQUFADCBmTELMAkGA1UEBhMCVVMx +MIID6DCCAtCgAwIBAgIBLzANBgkqhkiG9w0BAQ0FADCBmTELMAkGA1UEBhMCVVMx EzARBgNVBAgTCkNhbGlmb3JuaWExFDASBgNVBAcTC0xvcyBBbmdlbGVzMRkwFwYD VQQKExBIYWhpQ29ycCBUZXN0IENBMQ0wCwYDVQQLEwRUZXN0MREwDwYDVQQDEwhD -ZXJ0QXV0aDEiMCAGCSqGSIb3DQEJARYTamFtZXNAaGFzaGljb3JwLmNvbTAgFw0x -OTEyMDQyMDMzMjhaGA8yMTE5MTExMDIwMzMyOFowgYExDDAKBgNVBAMMA0JvYjET +ZXJ0QXV0aDEiMCAGCSqGSIb3DQEJARYTamFtZXNAaGFzaGljb3JwLmNvbTAgFw0y +MjA0MTQyMjQxNTBaGA8yMTIyMDMyMTIyNDE1MFowgYExDDAKBgNVBAMMA0JvYjET MBEGA1UECAwKQ2FsaWZvcm5pYTELMAkGA1UEBhMCVVMxKTAnBgkqhkiG9w0BCQEW GmRvLW5vdC1yZXBseUBoYXNoaWNvcnAuY29tMRIwEAYDVQQKDAlFbmQgUG9pbnQx EDAOBgNVBAsMB1Rlc3RpbmcwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB -AQC+TMR+iyWgqvEmaqDTS7AaK5M18oPF47dDPm/o6/RbPRDO1KfcXXaJCk14tTd2 -BbgUPHyuOf5CfEQIBc3JgI8Aj4nTY56Fo7Zz0igEOd2tXBe0scx0dXZPrRnnUfg1 -tG8kBJGYL4wR7Bd8N0xCpZK4+6NWyEkGmiTCI+NoVevhadGDrTlLbs+1GvzuufUB -OnVsam28beDfFI7JoGFpV/wbu93C3BUs2yg7wvHrAw2uvA0K5A05Vk+w61gW9bKW -HNGvOzTIr5ZWYFLYO2xNq/9vbmnX/teYiMWd7OkZbwTssbV2L9NJ0hML7fd48Rb9 -3jjXAXCqHQgliqUZ45aTQEqlAgMBAAGjTzBNMAkGA1UdEwQCMAAwCwYDVR0PBAQD +AQDD0RkklDCznxVHo2/jXbeBkCDJBs8CqDeJuGwscPFIZuozR5LoL0ElScYudVwC +nvECbjcwwV0fMUIfRKao+6Akyvpd1zZpUYpifHkULzSnjm0x2ea/1fscIEuoQp+2 +eNDeQ7UAgqUpE2sgtSKuXa1l0zC8xX9eeZ3tKXl+6gXe9gfuFpRCijKt8o21EVnq +Gf8fMYZpLxKcTmf7KdZGKVzqY9JO84xfukBbWvhxpFFgka6NasSxmuqIps1AFs3V +pi2cDGixgJUGsVY4HJzEp/dU5bbr6Uke01VEmNpMn35rkxJtAWkEeWdYuq5ZaVEU +0Wd2i8mOiYtSi4i43wAlh2QjAgMBAAGjTzBNMAkGA1UdEwQCMAAwCwYDVR0PBAQD AgXgMDMGA1UdEQQsMCqCEXNlcnZlci5kYzEuY29uc3VsghVib2Iuc2VydmVyLmRj -MS5jb25zdWwwDQYJKoZIhvcNAQEFBQADggEBAGx4NH6cUIfLf4e/lvBDZFmd2qI9 -+uYC0kjdbf8mZuyVvpbtaWHqVUdfGRXjYJUi6+T7MSzhx5hhtXEwkKRDQWO3DPkE -kOOh+NEfeWm0Qsz41TlEJmZnpZP4sF37qO8uquFL4gVO4fHlybjL43XoaUiGsJ6o -jDQWqPZTArUDKz3SfvRc00VLc2TQ0neLVcAl24m5t3MNaN1UZ4PI2cXfC6HtAiVz -9V7IgRtM38YTYe8MzkiXCwFUVubTSyOOexxtoY8TuYEvyGcUocsz+G+SzK3gieB7 -D4MxZbgQzSOGtlDx9G7K5AWw+rqzReehDuzkI9itFXBAHKjudycE25a3xUQ= +MS5jb25zdWwwDQYJKoZIhvcNAQENBQADggEBAKvTAJffhOQVrm/R0p+YJpSZxis+ +Gg5nH//a6TSuj2qPp56q1V+cbiU/eDGIPkNUMLw1yl/0lhvKrHGR1QysdDcMEtph +bXga4ZwNgwQ4BLJi28J0WJDMCDq/GRhK/6KFd0uLLiOppTe470O59qSHSa2JMjKr +uzs9153jbY461IES147/MY3GDVC5pWGgnsCxmTuFf7vqV+CpRq62QlK+ZlekHaDL +cpwG/bMSUHdHcjCUusASSDyRcbCebGvMYO91zEMGkn7+DOd8ZkQyIX709/ue1uhJ +bqSWozgd383bL8ChNqnZqItOS63PCzbLhquLZJ4oQ8pq4YddGT8PD+Bmqks= -----END CERTIFICATE----- diff --git a/test/hostname/Bob.key b/test/hostname/Bob.key index ee2ebee185..5efa481dd3 100644 --- a/test/hostname/Bob.key +++ b/test/hostname/Bob.key @@ -1,28 +1,28 @@ -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC+TMR+iyWgqvEm -aqDTS7AaK5M18oPF47dDPm/o6/RbPRDO1KfcXXaJCk14tTd2BbgUPHyuOf5CfEQI -Bc3JgI8Aj4nTY56Fo7Zz0igEOd2tXBe0scx0dXZPrRnnUfg1tG8kBJGYL4wR7Bd8 -N0xCpZK4+6NWyEkGmiTCI+NoVevhadGDrTlLbs+1GvzuufUBOnVsam28beDfFI7J -oGFpV/wbu93C3BUs2yg7wvHrAw2uvA0K5A05Vk+w61gW9bKWHNGvOzTIr5ZWYFLY -O2xNq/9vbmnX/teYiMWd7OkZbwTssbV2L9NJ0hML7fd48Rb93jjXAXCqHQgliqUZ -45aTQEqlAgMBAAECggEAWrzeAHy2r1py699x2e5ABOp8IgAF5wjCbHTMBaLke9Ct -QAHUHFYQXB2mfQTjcgoeEMAarzSF0QvRoIWr7wW2qgzHKh1ZC93Y9Hbjj8hLtAqy -Xv1cQLd1d15ReKP0Fx920xS+m3Moda8+L4NqgGjUghGye4G6mERNfKiCGVDGzU7F -5ayIHR60BRiwsODJ7jr5ajcXoTHQ34gRLz/hB6S72sLAwEjGedpqpd79LNXkSdiP -axEW9nJVodc286WToR2YSSDezvIKgpZDy9onvBFmIyZIKuALmk10YNTrL1SfgR2C -wIjeHmfukgnlWzNFLB8bx0PBnaINSgxfdDa6ZYaaIQKBgQDmFWvmXUcW+SOidjUV -QTS5gjejYdDmB49y9x4eUffyHwA0wJWpiXE9TCy+PjLi1WIineHiaAmNngEU/IHF -NBi127opbU6CftvW7dGdv2IJxaN2IePSmlsLD8XItD+ZbhcZnHy4bLF8gIdttxXS -GZPHzesY0EqKCyb5ygjQ1wjZmQKBgQDTvCj6cLmVbV89wJMB2rSTglD9B2iwJnHX -wiX7bedc579odjUpTOmbPTxn9aI1MJeE9aKFuQP6NspOSXKQqlXjheXCs4d4jWmD -EQpL8dtHzXVdZf/2+RtuCYafpMRXFvraQjg5TdHT7ezQco74tW3CW2YUVdKyslNn -R1EWlzyY7QKBgQCotlyAdzWBqv5uSq9x/nZi8RFLRJahljmh24LCSOi/KexEwlL8 -FkRq5kiI16MIod9r8smH8zHOHmY8tUuTBzh3Yb+IURaYqd0aJRjny0ZgVAQgw4kD -DRxlaBNnsIRSRV+67/ykX09mM/kagn4Fqaurf1s8vr9pqfPShksgmA1tQQKBgE98 -lLmn9dOl8ppBIC8TBrVVt8e1r1RpqlVAOngQQ0n6aj3yGnT9vbkcnP++E/351vgA -KtoeoeKeDQakxhCPEZ1Pl/im4xWbqN+eVwo5qoNjG0tLznLOA8EkbFikR10WcGfd -cjP5BeuUp1F9oDS4D5NmMoUxzt5s2ais+kEL16DlAoGBAKoyjZDTv8mG0YCv4W92 -Quv8+KxE5+7qGjckDZh1tZGQjU6br1QccPAXZmlRbAJD1c90uUO+Kkx27FFJrB4t -A9jCUpXUv78PyvqX3IUW8H555n/a0M37A0xnkqm91LddkKmAbkQvt6oel5rNbt2+ -QeYzS1O8PX+zTLGf64h8Ajwt +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDD0RkklDCznxVH +o2/jXbeBkCDJBs8CqDeJuGwscPFIZuozR5LoL0ElScYudVwCnvECbjcwwV0fMUIf +RKao+6Akyvpd1zZpUYpifHkULzSnjm0x2ea/1fscIEuoQp+2eNDeQ7UAgqUpE2sg +tSKuXa1l0zC8xX9eeZ3tKXl+6gXe9gfuFpRCijKt8o21EVnqGf8fMYZpLxKcTmf7 +KdZGKVzqY9JO84xfukBbWvhxpFFgka6NasSxmuqIps1AFs3Vpi2cDGixgJUGsVY4 +HJzEp/dU5bbr6Uke01VEmNpMn35rkxJtAWkEeWdYuq5ZaVEU0Wd2i8mOiYtSi4i4 +3wAlh2QjAgMBAAECggEBAJjqspLRMxMieXC/XkIVTpfcYO375i8yBGUFY1x1OseK +rvwqubueI2amLSDcv2TAkH+QaIMnjbwtMHDQoMG39sCkzk34IeKvLb1pbPhpQNpU +rEtQ2hUXWokFY2/bz3Ok95+LCk+Cts3T/0XEjnfSafXprdbM+IFjwgOR7vJAeSM6 +auXQ24FHxSFBMt0m3Zhgp1P1qLBMJCxrEM0PTDKSnfI/HURLmUwJShu9x7+RzEb8 +pMGYGqFrXgkPOMT9gmBTjjgljMNiAYU2NZWsmP1dx3tB8lUHCd8iIp8bwNjqc8Q/ +rSNtniZ9NI+DNTu5xrAHz9KnM2AQgn92bHaerBZ+TikCgYEA53wGSaf4qW9cNBPP +p7DVAcz0FxaFEdzT4RI89KhZvKySj2bmsmywhW41dRpHhqKtBCvPgewTaI6YWGxf +YtUN8AmzrymfXZxPl+A1T6zbfV0AdP9LI/MDj0BcgkAyhdPmo4/jzAFwsRqe6HHi +dcqXBdqimuKottqWixpsJHOyITcCgYEA2I4ObI6Y4xdZfVPqj8xiMq9g1MUNRXLG +palvDGUHV1jl8oBdBCsjWZdd7cIWqLGiMxdWM42AWPZ71QKqEhd4K2z7fzeaXOZb +6Dsruxu3bG6/RNxRuCfTL6YOIpz/m5iIHcFD9g7OnODtKkvAum/8bTYiTjunHQxp +zInhKjYGenUCgYB0ywS5IQC7LC6PL/ezmeq+79Ov2nLlYk+c3YDXyCEOqtt/cuGu +4Fvn1oUuQkYYTfeRhTE7Ugsw09FVu8gcq3ZOx/ZayFtZ9cXK0RrUylgr1kKmxS/6 +QWBoZIbISO+0ygcyOyUqBwf+s3m9ucgSulacY4VrNDT/nSYbpQcvFRio3QKBgH5z +7U5MslAhKVt6rgSMJ2dLa7Ky4j4EeKTx3GuTbwb9XUEO3cH6YqkFL48Pf+W/3GQT +I331CdZpEARhiugHll9dQzqVQGkxBsgEDVQ0KhaCUOQn9vwfHT28rJJftQ6psgoV ++dZr7RBEc5j4JTF5BSDMtJmUUdAvmHQcq0PXyk6lAoGBANPIxjhoJ2tEEQq6aXxF +VeHK/zQAPTbTXE8cS7tf4AU9u5WJ8noKm8KG9NwLJcN8kNKfKj3f8VXaFh4Pg0Be +rDopAQJtk5JJlAv8RKA8Igf8ilLLw5/6AYspZZLrPNSrYYvKVGN6rszbTk83sgOi +qhTPPWMwC0CmLJQ7LDdhM4T3 -----END PRIVATE KEY----- diff --git a/test/hostname/Bonnie.crt b/test/hostname/Bonnie.crt index c13d6def2f..bc30780fbe 100644 --- a/test/hostname/Bonnie.crt +++ b/test/hostname/Bonnie.crt @@ -1,24 +1,24 @@ -----BEGIN CERTIFICATE----- -MIID7jCCAtagAwIBAgIBHTANBgkqhkiG9w0BAQUFADCBmTELMAkGA1UEBhMCVVMx +MIID7jCCAtagAwIBAgIBMTANBgkqhkiG9w0BAQ0FADCBmTELMAkGA1UEBhMCVVMx EzARBgNVBAgTCkNhbGlmb3JuaWExFDASBgNVBAcTC0xvcyBBbmdlbGVzMRkwFwYD VQQKExBIYWhpQ29ycCBUZXN0IENBMQ0wCwYDVQQLEwRUZXN0MREwDwYDVQQDEwhD -ZXJ0QXV0aDEiMCAGCSqGSIb3DQEJARYTamFtZXNAaGFzaGljb3JwLmNvbTAgFw0x -OTEyMTIyMTA0MzZaGA8yMTE5MTExODIxMDQzNlowgYQxDzANBgNVBAMMBkJvbm5p +ZXJ0QXV0aDEiMCAGCSqGSIb3DQEJARYTamFtZXNAaGFzaGljb3JwLmNvbTAgFw0y +MjA0MTQyMjQxNTBaGA8yMTIyMDMyMTIyNDE1MFowgYQxDzANBgNVBAMMBkJvbm5p ZTETMBEGA1UECAwKQ2FsaWZvcm5pYTELMAkGA1UEBhMCVVMxKTAnBgkqhkiG9w0B CQEWGmRvLW5vdC1yZXBseUBoYXNoaWNvcnAuY29tMRIwEAYDVQQKDAlFbmQgUG9p bnQxEDAOBgNVBAsMB1Rlc3RpbmcwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDpxsOS28WlhI9l3rXf1m8hRJEM/OW3o1Nt/s/G7yCGu+VORmIWZrwgX+TY -B7p7/uWSA20pOPfJjgSxQOY5V3w3Tj6JfjEiknPY0iVVBXT6/EMw8DkXUe4N8Txc -YHV59VeMBg0IwPDQg5RqvAmzCGIqn5wky0DQXu1f62qzotpwTtEvy61MAovoZOCQ -myX4M6eg/eWTG50A9X56ZTuUUo/5teVFZy/7xDt5qASZl00h9vOZ4VAmMpaxOGYh -abPX6pfhROtyjwb28w+f5hoEP0p+FChF5NJL10iFQBXxAnL+Sty1fL8+2Wt2bG3L -iA2PyRCSpTXnS/Z6yBw6b8OUNCzpAgMBAAGjUjBQMAkGA1UdEwQCMAAwCwYDVR0P +AoIBAQDEG4RQDIbyPVcEyX8+uUcc6y5OeS95QlxkDbP5kXDaroV0VLmW3P/bkEtO +10omfCx9PFeedNI+cK6g1q6ByBkcwPmBIcfjWMmlfK5wIqb/smQJwUS2mLtSbkj/ +Eiilh6PR9mNQzosUoYFLKhQbkwYLJl+uHzMZyPVFyg/xbLBpTBkKB+2K4DAw4nAg +pojylOZo/QoC5CCUkprdLWIcvADP0rgqK/sZDrfrNltKM0I4oH2xBKPxICOMWuZD +euIKBB4y13BSMT4rWt1NfnIaVwXOhYcF9D6JXs5oXUymTR3FONm7P4QkMLdNUllW +9WZoSr7WzJ4UoZo2dqCv211t0zvNAgMBAAGjUjBQMAkGA1UdEwQCMAAwCwYDVR0P BAQDAgXgMDYGA1UdEQQvMC2CEXNlcnZlci5kYzMuY29uc3Vsghhib25uaWUuc2Vy -dmVyLmRjMy5jb25zdWwwDQYJKoZIhvcNAQEFBQADggEBABQhVUQmuqwqPLcSOumf -wl0Zr271DM8s/L1+DuOpqhlxRk1EwoEE/ADpy6bQ5RrASx/SEK8ufMu+0Dwil+xR -Oq+JyIreOuRNRrs2vwj9bB/flOm14URjqOo04tOnyfp0EhUlFLGTjBP8tIzZlXqq -CTePLBJi/Lwjo13Q7zGdB8jJ9FC5PS7A3SbeW8dzZyiL/vW5UpNY20tNSnCr8zj4 -/7e9lA5PTW1CLOUEndIhmWb0CKaxikDZiQX/9GK2O6M9+Pi6URVQ3NHP3CEoVrKs -0icCXKyetRx98ipGEEPFQENDx0obZ0Sp93LkFwy9sjSFcV9i/rK4PnNJtNmEFRY9 -SqA= +dmVyLmRjMy5jb25zdWwwDQYJKoZIhvcNAQENBQADggEBADhkLBmIaeSLDOWvRq13 +7Ja3ufHgTIKR8dmyLU/Lmn5KIWOlz7dAk8eP4wbnxUrp3Rn4Mw7esrHtMihSBTVq +XjwpwH6BzGkT50iraaoWdv+3FojySlG10BBTDiyqoKSTfj9g5RQn65tebcxueRMO +KFlKHr3LJui/5PrkFFUDrXNqior2Qbx3ZZOhbpH0J//Rt00tJ5GDPiPV59nWcIgq +4huF7hS4F4hFf8F+Pfs535//zF0EmJYE1J7twU6RC8+XRfLBQaXHNuvLxJbZOD7X +ViqK23b1t4Xn3wYVohAkKo2OoDXj8hoKXxg9ZYPqK2BGEOWCbTPIZjz5EWX0k5vh +yrA= -----END CERTIFICATE----- diff --git a/test/hostname/Bonnie.key b/test/hostname/Bonnie.key index 00ba5d6206..82799dffef 100644 --- a/test/hostname/Bonnie.key +++ b/test/hostname/Bonnie.key @@ -1,28 +1,28 @@ -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDpxsOS28WlhI9l -3rXf1m8hRJEM/OW3o1Nt/s/G7yCGu+VORmIWZrwgX+TYB7p7/uWSA20pOPfJjgSx -QOY5V3w3Tj6JfjEiknPY0iVVBXT6/EMw8DkXUe4N8TxcYHV59VeMBg0IwPDQg5Rq -vAmzCGIqn5wky0DQXu1f62qzotpwTtEvy61MAovoZOCQmyX4M6eg/eWTG50A9X56 -ZTuUUo/5teVFZy/7xDt5qASZl00h9vOZ4VAmMpaxOGYhabPX6pfhROtyjwb28w+f -5hoEP0p+FChF5NJL10iFQBXxAnL+Sty1fL8+2Wt2bG3LiA2PyRCSpTXnS/Z6yBw6 -b8OUNCzpAgMBAAECggEBAJg3A0CsOJT9KyF5UZLdXJ6ctpVuVWSsw7XrI/6z1Mnl -rfi5e0R6wCOUTL0cyx/RaEkaUgl1PmHORt/jEgRkIk0gdTexIu0Pzr5ulkA1vWVu -u5Ex2PqGLiqF2HeNlvBB/y79AZ+hgStDgW+939LisohuRIzwitMh/A1oi6FLeE+G -w3JpkomArYWpGDWY9UF5WU/LquriLYDlgqsLFwLiVg73qFHCroN80ZYmQ1DF6x9W -JD6t4INk7brAoDzb2XiJtr246xcz9Fy7bPR59706vQvGS4vUB0W/+x8glVDlSYqi -2gnqHpTRl/0r/MwMwN28tqDd6TNB+qpYOUoCpVnEYcECgYEA/yRhV1wUZh46Bi89 -nq2RRTFsPnNfR/abMwNw3Jz6L3RftqlC6oqUjR9twV3mEYP+X9fHYa/MVASi9YTD -0hIeGcMX6nker0YfYxacc/cfh+8jrh/rMFbfng1fKWESMgyT8v2ZhCAFiqow4JWe -JgKpc9TlnlBSUg+QtaO15gREL4cCgYEA6o/+IKU7eMgEFjBO+BxgYETroBPZwAxy -RTjdISl9qafn8bJw3jfQPRGdEtAhXxniSVatkN3XhQD3kda+g0tMi4L3GgbJci3m -hoaO+YSErX93Grk5KHkyBXhiYp5eezWZgzj3bvXW+AjG9l1/2zOtXEB08vabrrYq -a6mo+bVQfA8CgYAVnU9hxzszK3xe6cGen7We1wEEiWGZBxs+xxvYlLPdMU6qesxg -cj0Dd5Yku3+yHCyAkhch/3Kf64SiqAyuzzodlJmCE6C12IrwKgo3CMhzC59KMQoL -nlBzY06cgsy30Lj37OQEfXH6vVBGtmYfcjpIxdBuCP/wU8E+og/W3KWqIQKBgBDf -Lp5WhEKZ0IFwFB4QnHYQPwqSovUHcCi+gFlkTJ+pbiuadDfnt9jMrYmu4teeWwJ/ -c5iZ/GiauzQISEfVYLogP3nlxxOpbY17nEkiWRDJvF11uUDKBukR0bHaRl2Ca3pe -3J1knYPVzcnmy82OmAesszzOtEAK5l48I+ViP1nNAoGAJMB2QRhrYQqjQtC4oHdp -6tGRBIS2ElgdILlXdHhvUyxvUONWEoymODeBXABMUUgiwLU/kGyt6pcjMMOStJnR -gTfGhjYIT+L5C7Ivz0OQU+CYV2i0W+c4xgjBKbuscisgg2CurS3cF1tera1Qve/Y -UtVBRnZ8/v/NisLFbtum/+Y= +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDEG4RQDIbyPVcE +yX8+uUcc6y5OeS95QlxkDbP5kXDaroV0VLmW3P/bkEtO10omfCx9PFeedNI+cK6g +1q6ByBkcwPmBIcfjWMmlfK5wIqb/smQJwUS2mLtSbkj/Eiilh6PR9mNQzosUoYFL +KhQbkwYLJl+uHzMZyPVFyg/xbLBpTBkKB+2K4DAw4nAgpojylOZo/QoC5CCUkprd +LWIcvADP0rgqK/sZDrfrNltKM0I4oH2xBKPxICOMWuZDeuIKBB4y13BSMT4rWt1N +fnIaVwXOhYcF9D6JXs5oXUymTR3FONm7P4QkMLdNUllW9WZoSr7WzJ4UoZo2dqCv +211t0zvNAgMBAAECggEASeIgGFSP5uXrSfdsREJjo2aLnK8VjrbMvjlXbBg6j3ke ++HQPM+JxVtRnOv3rhtJTpJ8+V4mlyDaF5DzdpDGyHF4r4lXKzEGgPwPZaI/1oPIh +tHcnHzAtquG4CLlbrPmMY8dTJZebWJt8bcgdqUHcDglYFO7WPsmydqepGAkd8Z07 +6Ze6tGhKbxoXYSDLEJ+twjLmWO52gWMUIAlDBawTikJ+ig9uzIO1EmxO7C2dUAHS +eImqoWgo4/uMKWgaFIjwFCvgLPItPmd8ej/tpAx/ASHBzAJbyOpbZM26h3+3RPte +RXwISe8Q9WSk5UnjFvy2+UKZ1KH4B1RutKGK2NM6tQKBgQDrqbzgAnCRbsW+Xy/D +jYh3JLJNpy6t+igMrV3Zzsaxf6rugJrcokvLcySeTjSD41pzkDX2JobJjP90/bAs +CFORm7e8a+M1z71JA/Hk6+nJwSlFpFynfIjPZsjTi7LgupBp+g0EGgv8kpm76l9T +lQRN96VBuePgvx62TtV4EagGGwKBgQDVB+s045fEa/KDAOJWsI1Wf589q+sztifE +02KNXWfgLGUzTDZbkGA5I+A/WTL+btRmUWlr+T9dALX8bI49Jho3CqVwGqkK1HA6 +7UykslGKaySMTcw4jU5jS64MPxCSklbeIAq9myYUESTQX7tidDjT8zMl/egiO7Ye +xMMWx/yENwKBgGPrLOzhu+69w9Pqseq8+K5jfcIU72LOnOp7Gz69QFuD4OqM1pxT +p8VURaNlTzjYTcKP04FRZnbQdIObCHYy9ZPYLTgTmlt3gC8UIBzKte5YGvKvNKXC +1JLzZpTjN223TIHShnBFxu1JDyuwvMhId3HDXWsXsPnart/nXvUwr0gfAoGBAMZm +dZvwSyxYDKgNbr6l9zwT586cCpVClI8u/54A2/lf33CDDi0ArV0KGJNnE6L6vT39 +nF0+6NBJFTReNaqljcytUZ6ydbTsXQnEb5kDqgVr+8Hfws74a4T2usYVBe479EMz +PE2R7UjLHqoiPnZXH4Xl/kDn1AXt7pOBVOAmqPrJAoGAMILgsmVBydvWvoQCYZRH +utnrswIYzDw9DeaLgktI/Sc9QJr1rswbW13Q/xMSb+czsmOV8jhUS7Vk+Xz6gzii +qtoOS9b5ojJP1Y+7gVY8sKVMsEJXm1sTA01HPd6WiN7gzbMpPzO64YzCFHYYZX1E +C+CDwDatWgLJeomG/2UNr68= -----END PRIVATE KEY----- diff --git a/test/hostname/certindex b/test/hostname/certindex index d6a932c66f..56c177d369 100644 --- a/test/hostname/certindex +++ b/test/hostname/certindex @@ -1,6 +1,4 @@ -V 180512061548Z 18 unknown /CN=Alice/ST=California/C=US/emailAddress=james@hashicorp.com/O=End Point/OU=Testing -V 190512090339Z 19 unknown /CN=Alice/ST=California/C=US/emailAddress=james@hashicorp.com/O=End Point/OU=Testing -V 21180418090432Z 1A unknown /CN=Alice/ST=California/C=US/emailAddress=james@hashicorp.com/O=End Point/OU=Testing -V 21191110203328Z 1B unknown /CN=Bob/ST=California/C=US/emailAddress=do-not-reply@hashicorp.com/O=End Point/OU=Testing -V 21191117214339Z 1C unknown /CN=Betty/ST=California/C=US/emailAddress=do-not-reply@hashicorp.com/O=End Point/OU=Testing -V 21191118210436Z 1D unknown /CN=Bonnie/ST=California/C=US/emailAddress=do-not-reply@hashicorp.com/O=End Point/OU=Testing +V 21220321224150Z 2E unknown /CN=Alice/ST=California/C=US/emailAddress=do-not-reply@hashicorp.com/O=End Point/OU=Testing +V 21220321224150Z 2F unknown /CN=Bob/ST=California/C=US/emailAddress=do-not-reply@hashicorp.com/O=End Point/OU=Testing +V 21220321224150Z 30 unknown /CN=Betty/ST=California/C=US/emailAddress=do-not-reply@hashicorp.com/O=End Point/OU=Testing +V 21220321224150Z 31 unknown /CN=Bonnie/ST=California/C=US/emailAddress=do-not-reply@hashicorp.com/O=End Point/OU=Testing diff --git a/test/hostname/myca.conf b/test/hostname/myca.conf index 5d7538043b..593f092b76 100644 --- a/test/hostname/myca.conf +++ b/test/hostname/myca.conf @@ -13,7 +13,7 @@ database = certindex private_key = privkey.pem serial = serialfile default_days = 36500 -default_md = sha1 +default_md = sha512 policy = myca_policy x509_extensions = myca_extensions diff --git a/test/hostname/serialfile b/test/hostname/serialfile index e28e17eb7c..f5c89552bd 100644 --- a/test/hostname/serialfile +++ b/test/hostname/serialfile @@ -1 +1 @@ -1E +32 diff --git a/test/integration/connect/envoy/run-tests.sh b/test/integration/connect/envoy/run-tests.sh index 67127c89f4..ec1a3b89ce 100755 --- a/test/integration/connect/envoy/run-tests.sh +++ b/test/integration/connect/envoy/run-tests.sh @@ -10,7 +10,7 @@ readonly HASHICORP_DOCKER_PROXY="docker.mirror.hashicorp.services" DEBUG=${DEBUG:-} # ENVOY_VERSION to run each test against -ENVOY_VERSION=${ENVOY_VERSION:-"1.21.1"} +ENVOY_VERSION=${ENVOY_VERSION:-"1.22.0"} export ENVOY_VERSION export DOCKER_BUILDKIT=1 diff --git a/test/integration/consul-container/go.mod b/test/integration/consul-container/go.mod new file mode 100644 index 0000000000..886be129b6 --- /dev/null +++ b/test/integration/consul-container/go.mod @@ -0,0 +1,30 @@ +module github.com/hashicorp/consul/integration/consul-container + +go 1.16 + +require ( + github.com/armon/go-metrics v0.3.10 // indirect + github.com/docker/docker v20.10.11+incompatible + github.com/hashicorp/consul/api v1.11.0 + github.com/hashicorp/consul/sdk v0.8.0 + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-hclog v0.16.2 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/go-uuid v1.0.2 + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/hashicorp/memberlist v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/mitchellh/mapstructure v1.4.2 // indirect + github.com/stretchr/testify v1.7.0 + github.com/testcontainers/testcontainers-go v0.13.0 + golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect + google.golang.org/grpc v1.41.0 // indirect +) + +replace github.com/hashicorp/consul/api => ../../../api + +replace github.com/hashicorp/consul/sdk => ../../../sdk diff --git a/test/integration/consul-container/go.sum b/test/integration/consul-container/go.sum new file mode 100644 index 0000000000..40ef073b97 --- /dev/null +++ b/test/integration/consul-container/go.sum @@ -0,0 +1,1110 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.23 h1:47MSwtKGXet80aIn+7h4YI6fwPmwIghAnsx2aOUrG2M= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4= +github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.11+incompatible h1:OqzI/g/W54LczvhnccGqniFoQghHx3pklbLuhfXpqGo= +github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg= +github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= +github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/testcontainers/testcontainers-go v0.13.0 h1:OUujSlEGsXVo/ykPVZk3KanBNGN0TYb/7oKIPVn15JA= +github.com/testcontainers/testcontainers-go v0.13.0/go.mod h1:z1abufU633Eb/FmSBTzV6ntZAC1eZBYPtaFsn4nPuDk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/gotestsum v1.7.0/go.mod h1:V1m4Jw3eBerhI/A6qCxUE07RnCg7ACkKj9BYcAm09V8= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/test/integration/consul-container/libs/cluster/cluster.go b/test/integration/consul-container/libs/cluster/cluster.go new file mode 100644 index 0000000000..990114b8cc --- /dev/null +++ b/test/integration/consul-container/libs/cluster/cluster.go @@ -0,0 +1,88 @@ +package cluster + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/consul/integration/consul-container/libs/node" +) + +// Cluster provides an interface for creating and controlling a Consul cluster +// in integration tests, with nodes running in containers. +type Cluster struct { + Nodes []node.Node +} + +// New creates a Consul cluster. A node will be started for each of the given +// configs and joined to the cluster. +func New(configs []node.Config) (*Cluster, error) { + cluster := Cluster{} + + nodes := make([]node.Node, len(configs)) + for idx, c := range configs { + n, err := node.NewConsulContainer(context.Background(), c) + if err != nil { + return nil, err + } + nodes[idx] = n + } + if err := cluster.AddNodes(nodes); err != nil { + return nil, err + } + return &cluster, nil +} + +// AddNodes joins the given nodes to the cluster. +func (c *Cluster) AddNodes(nodes []node.Node) error { + var joinAddr string + if len(c.Nodes) >= 1 { + joinAddr, _ = c.Nodes[0].GetAddr() + } else if len(nodes) >= 1 { + joinAddr, _ = nodes[0].GetAddr() + } + + for _, node := range nodes { + err := node.GetClient().Agent().Join(joinAddr, false) + if err != nil { + return err + } + c.Nodes = append(c.Nodes, node) + } + return nil +} + +// Terminate will attempt to terminate all nodes in the cluster. If any node +// termination fails, Terminate will abort and return an error. +func (c *Cluster) Terminate() error { + for _, n := range c.Nodes { + err := n.Terminate() + if err != nil { + return err + } + } + return nil +} + +// Leader returns the cluster leader node, or an error if no leader is +// available. +func (c *Cluster) Leader() (node.Node, error) { + if len(c.Nodes) < 1 { + return nil, fmt.Errorf("no node available") + } + n0 := c.Nodes[0] + leaderAdd, err := n0.GetClient().Status().Leader() + if err != nil { + return nil, err + } + if leaderAdd == "" { + return nil, fmt.Errorf("no leader available") + } + for _, n := range c.Nodes { + addr, _ := n.GetAddr() + if strings.Contains(leaderAdd, addr) { + return n, nil + } + } + return nil, fmt.Errorf("leader not found") +} diff --git a/test/integration/consul-container/libs/node/consul-container.go b/test/integration/consul-container/libs/node/consul-container.go new file mode 100644 index 0000000000..bf5b99421e --- /dev/null +++ b/test/integration/consul-container/libs/node/consul-container.go @@ -0,0 +1,149 @@ +package node + +import ( + "context" + "fmt" + "os" + "strconv" + "time" + + "github.com/docker/docker/pkg/ioutils" + "github.com/hashicorp/consul/api" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + + "github.com/hashicorp/consul/integration/consul-container/libs/utils" +) + +const bootLogLine = "Consul agent running" +const disableRYUKEnv = "TESTCONTAINERS_RYUK_DISABLED" + +// consulContainerNode implements the Node interface by running a Consul node +// in a container. +type consulContainerNode struct { + ctx context.Context + client *api.Client + container testcontainers.Container + ip string + port int +} + +// NewConsulContainer starts a Consul node in a container with the given config. +func NewConsulContainer(ctx context.Context, config Config) (Node, error) { + + license, err := readLicense() + if err != nil { + return nil, err + } + name := utils.RandName("consul-") + tmpDir, err := ioutils.TempDir("", name) + if err != nil { + return nil, err + } + err = os.Chmod(tmpDir, 0777) + if err != nil { + return nil, err + } + err = os.Mkdir(tmpDir+"/config", 0777) + if err != nil { + return nil, err + } + configFile := tmpDir + "/config/config.hcl" + err = os.WriteFile(configFile, []byte(config.HCL), 0644) + if err != nil { + return nil, err + } + skipReaper := isRYUKDisabled() + req := testcontainers.ContainerRequest{ + Image: "consul:" + config.Version, + ExposedPorts: []string{"8500/tcp"}, + WaitingFor: wait.ForLog(bootLogLine).WithStartupTimeout(10 * time.Second), + AutoRemove: false, + Name: name, + Mounts: testcontainers.ContainerMounts{testcontainers.ContainerMount{Source: testcontainers.DockerBindMountSource{HostPath: configFile}, Target: "/consul/config/config.hcl"}}, + Cmd: config.Cmd, + SkipReaper: skipReaper, + Env: map[string]string{"CONSUL_LICENSE": license}, + } + container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) + if err != nil { + return nil, err + } + + localIP, err := container.Host(ctx) + if err != nil { + return nil, err + } + + mappedPort, err := container.MappedPort(ctx, "8500") + if err != nil { + return nil, err + } + + ip, err := container.ContainerIP(ctx) + if err != nil { + return nil, err + } + + uri := fmt.Sprintf("http://%s:%s", localIP, mappedPort.Port()) + c := new(consulContainerNode) + c.container = container + c.ip = ip + c.port = mappedPort.Int() + apiConfig := api.DefaultConfig() + apiConfig.Address = uri + c.client, err = api.NewClient(apiConfig) + c.ctx = ctx + + if err != nil { + return nil, err + } + return c, nil +} + +// GetClient returns an API client that can be used to communicate with the Node. +func (c *consulContainerNode) GetClient() *api.Client { + return c.client +} + +// GetAddr return the network address associated with the Node. +func (c *consulContainerNode) GetAddr() (string, int) { + return c.ip, c.port +} + +// Terminate attempts to terminate the container. On failure, an error will be +// returned and the reaper process (RYUK) will handle cleanup. +func (c *consulContainerNode) Terminate() error { + return c.container.Terminate(c.ctx) +} + +// isRYUKDisabled returns whether the reaper process (RYUK) has been disabled +// by an environment variable. +// +// https://github.com/testcontainers/moby-ryuk +func isRYUKDisabled() bool { + skipReaperStr := os.Getenv(disableRYUKEnv) + skipReaper, err := strconv.ParseBool(skipReaperStr) + if err != nil { + return false + } + return skipReaper +} + +func readLicense() (string, error) { + license := os.Getenv("CONSUL_LICENSE") + if license == "" { + licensePath := os.Getenv("CONSUL_LICENSE_PATH") + if licensePath != "" { + licenseBytes, err := os.ReadFile(licensePath) + if err != nil { + return "", err + } + license = string(licenseBytes) + } + } + return license, nil +} diff --git a/test/integration/consul-container/libs/node/node.go b/test/integration/consul-container/libs/node/node.go new file mode 100644 index 0000000000..80a1c9e92c --- /dev/null +++ b/test/integration/consul-container/libs/node/node.go @@ -0,0 +1,17 @@ +package node + +import "github.com/hashicorp/consul/api" + +// Node represent a Consul node abstraction +type Node interface { + Terminate() error + GetClient() *api.Client + GetAddr() (string, int) +} + +// Config is a set of configurations required to create a Node +type Config struct { + HCL string + Version string + Cmd []string +} diff --git a/test/integration/consul-container/libs/utils/utils.go b/test/integration/consul-container/libs/utils/utils.go new file mode 100644 index 0000000000..552b85d0b7 --- /dev/null +++ b/test/integration/consul-container/libs/utils/utils.go @@ -0,0 +1,13 @@ +package utils + +import ( + "github.com/hashicorp/go-uuid" +) + +func RandName(name string) string { + generateUUID, err := uuid.GenerateUUID() + if err != nil { + return "" + } + return name + generateUUID +} diff --git a/test/integration/consul-container/upgrade/README.md b/test/integration/consul-container/upgrade/README.md new file mode 100644 index 0000000000..9d2e52f46c --- /dev/null +++ b/test/integration/consul-container/upgrade/README.md @@ -0,0 +1,6 @@ +# Consul Upgrade Integration tests +## Local run +- run `make dev-docker` +- run the tests. + +To specify targets and latest image pass `target-version` and `latest-version` to the tests. By default, it uses the `consul` docker image with respectively `local` and `latest` tags. \ No newline at end of file diff --git a/test/integration/consul-container/upgrade/healthcheck_test.go b/test/integration/consul-container/upgrade/healthcheck_test.go new file mode 100644 index 0000000000..2653262fac --- /dev/null +++ b/test/integration/consul-container/upgrade/healthcheck_test.go @@ -0,0 +1,274 @@ +package consul_container + +import ( + "context" + "flag" + "fmt" + "testing" + "time" + + "github.com/hashicorp/consul/api" + + "github.com/hashicorp/consul/integration/consul-container/libs/cluster" + "github.com/hashicorp/consul/integration/consul-container/libs/node" + + "github.com/hashicorp/consul/integration/consul-container/libs/utils" + "github.com/hashicorp/consul/sdk/testutil/retry" + + "github.com/stretchr/testify/require" +) + +var targetImage = flag.String("target-version", "local", "docker image to be used as UUT (unit under test)") +var latestImage = flag.String("latest-version", "latest", "docker image to be used as latest") + +const retryTimeout = 10 * time.Second +const retryFrequency = 500 * time.Millisecond + +// Test health check GRPC call using Current Servers and Latest GA Clients +func TestCurrentServersWithLatestGAClients(t *testing.T) { + t.Parallel() + numServers := 3 + cluster, err := serversCluster(t, numServers, *targetImage) + require.NoError(t, err) + defer Terminate(t, cluster) + numClients := 1 + + clients, err := clientsCreate(numClients) + client := cluster.Nodes[0].GetClient() + err = cluster.AddNodes(clients) + retry.RunWith(&retry.Timer{Timeout: retryTimeout, Wait: retryFrequency}, t, func(r *retry.R) { + leader, err := cluster.Leader() + require.NoError(r, err) + require.NotEmpty(r, leader) + members, err := client.Agent().Members(false) + require.Len(r, members, 4) + }) + serviceName := "api" + err, index := serviceCreate(t, client, serviceName) + + ch := make(chan []*api.ServiceEntry) + errCh := make(chan error) + + go func() { + service, q, err := client.Health().Service(serviceName, "", false, &api.QueryOptions{WaitIndex: index}) + if q.QueryBackend != api.QueryBackendStreaming { + err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend) + } + if err != nil { + errCh <- err + } else { + ch <- service + } + }() + err = client.Agent().ServiceRegister(&api.AgentServiceRegistration{Name: serviceName, Port: 9998}) + timer := time.NewTimer(1 * time.Second) + select { + case err := <-errCh: + require.NoError(t, err) + case service := <-ch: + require.Len(t, service, 1) + require.Equal(t, serviceName, service[0].Service.Service) + require.Equal(t, 9998, service[0].Service.Port) + case <-timer.C: + t.Fatalf("test timeout") + } +} + +// Test health check GRPC call using Mixed (majority latest) Servers and Latest GA Clients +func TestMixedServersMajorityLatestGAClient(t *testing.T) { + t.Parallel() + var configs []node.Config + configs = append(configs, + node.Config{ + HCL: `node_name="` + utils.RandName("consul-server") + `" + log_level="TRACE" + server=true`, + Cmd: []string{"agent", "-client=0.0.0.0"}, + Version: *targetImage, + }) + + for i := 1; i < 3; i++ { + configs = append(configs, + node.Config{ + HCL: `node_name="` + utils.RandName("consul-server") + `" + log_level="TRACE" + bootstrap_expect=3 + server=true`, + Cmd: []string{"agent", "-client=0.0.0.0"}, + Version: *latestImage, + }) + + } + + cluster, err := cluster.New(configs) + require.NoError(t, err) + defer Terminate(t, cluster) + + numClients := 1 + clients, err := clientsCreate(numClients) + client := clients[0].GetClient() + err = cluster.AddNodes(clients) + retry.RunWith(&retry.Timer{Timeout: retryTimeout, Wait: retryFrequency}, t, func(r *retry.R) { + leader, err := cluster.Leader() + require.NoError(r, err) + require.NotEmpty(r, leader) + members, err := client.Agent().Members(false) + require.Len(r, members, 4) + }) + + serviceName := "api" + err, index := serviceCreate(t, client, serviceName) + + ch := make(chan []*api.ServiceEntry) + errCh := make(chan error) + go func() { + service, q, err := client.Health().Service(serviceName, "", false, &api.QueryOptions{WaitIndex: index}) + if q.QueryBackend != api.QueryBackendStreaming { + err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend) + } + if err != nil { + errCh <- err + } else { + ch <- service + } + }() + err = client.Agent().ServiceRegister(&api.AgentServiceRegistration{Name: serviceName, Port: 9998}) + timer := time.NewTimer(1 * time.Second) + select { + case err := <-errCh: + require.NoError(t, err) + case service := <-ch: + require.Len(t, service, 1) + require.Equal(t, serviceName, service[0].Service.Service) + require.Equal(t, 9998, service[0].Service.Port) + case <-timer.C: + t.Fatalf("test timeout") + } +} + +// Test health check GRPC call using Mixed (majority current) Servers and Latest GA Clients +func TestMixedServersMajorityCurrentGAClient(t *testing.T) { + t.Parallel() + var configs []node.Config + for i := 0; i < 2; i++ { + configs = append(configs, + node.Config{ + HCL: `node_name="` + utils.RandName("consul-server") + `" + log_level="TRACE" + bootstrap_expect=3 + server=true`, + Cmd: []string{"agent", "-client=0.0.0.0"}, + Version: *targetImage, + }) + + } + configs = append(configs, + node.Config{ + HCL: `node_name="` + utils.RandName("consul-server") + `" + log_level="TRACE" + server=true`, + Cmd: []string{"agent", "-client=0.0.0.0"}, + Version: *latestImage, + }) + + cluster, err := cluster.New(configs) + require.NoError(t, err) + defer Terminate(t, cluster) + + numClients := 1 + clients, err := clientsCreate(numClients) + client := clients[0].GetClient() + err = cluster.AddNodes(clients) + retry.RunWith(&retry.Timer{Timeout: retryTimeout, Wait: retryFrequency}, t, func(r *retry.R) { + leader, err := cluster.Leader() + require.NoError(r, err) + require.NotEmpty(r, leader) + members, err := client.Agent().Members(false) + require.Len(r, members, 4) + }) + + serviceName := "api" + err, index := serviceCreate(t, client, serviceName) + + ch := make(chan []*api.ServiceEntry) + errCh := make(chan error) + go func() { + service, q, err := client.Health().Service(serviceName, "", false, &api.QueryOptions{WaitIndex: index}) + if q.QueryBackend != api.QueryBackendStreaming { + err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend) + } + if err != nil { + errCh <- err + } else { + ch <- service + } + }() + err = client.Agent().ServiceRegister(&api.AgentServiceRegistration{Name: serviceName, Port: 9998}) + timer := time.NewTimer(1 * time.Second) + select { + case err := <-errCh: + require.NoError(t, err) + case service := <-ch: + require.Len(t, service, 1) + require.Equal(t, serviceName, service[0].Service.Service) + require.Equal(t, 9998, service[0].Service.Port) + case <-timer.C: + t.Fatalf("test timeout") + } +} + +func clientsCreate(numClients int) ([]node.Node, error) { + clients := make([]node.Node, numClients) + var err error + for i := 0; i < numClients; i++ { + clients[i], err = node.NewConsulContainer(context.Background(), + node.Config{ + HCL: `node_name="` + utils.RandName("consul-client") + `" + log_level="TRACE"`, + Cmd: []string{"agent", "-client=0.0.0.0"}, + Version: *targetImage, + }) + } + return clients, err +} + +func serviceCreate(t *testing.T, client *api.Client, serviceName string) (error, uint64) { + err := client.Agent().ServiceRegister(&api.AgentServiceRegistration{Name: serviceName, Port: 9999}) + require.NoError(t, err) + service, meta, err := client.Catalog().Service(serviceName, "", &api.QueryOptions{}) + require.NoError(t, err) + require.Len(t, service, 1) + require.Equal(t, serviceName, service[0].ServiceName) + require.Equal(t, 9999, service[0].ServicePort) + return err, meta.LastIndex +} + +func serversCluster(t *testing.T, numServers int, image string) (*cluster.Cluster, error) { + var err error + var configs []node.Config + for i := 0; i < numServers; i++ { + configs = append(configs, node.Config{ + HCL: `node_name="` + utils.RandName("consul-server") + `" + log_level="TRACE" + bootstrap_expect=3 + server=true`, + Cmd: []string{"agent", "-client=0.0.0.0"}, + Version: image, + }) + } + cluster, err := cluster.New(configs) + require.NoError(t, err) + retry.RunWith(&retry.Timer{Timeout: retryTimeout, Wait: retryFrequency}, t, func(r *retry.R) { + leader, err := cluster.Leader() + require.NoError(r, err) + require.NotEmpty(r, leader) + members, err := cluster.Nodes[0].GetClient().Agent().Members(false) + require.Len(r, members, numServers) + }) + return cluster, err +} + +func Terminate(t *testing.T, cluster *cluster.Cluster) { + err := cluster.Terminate() + require.NoError(t, err) +} diff --git a/test/key/ourdomain.cer b/test/key/ourdomain.cer index 52d13b59e1..bae6090851 100644 --- a/test/key/ourdomain.cer +++ b/test/key/ourdomain.cer @@ -1,25 +1,26 @@ -----BEGIN CERTIFICATE----- -MIIERjCCAy6gAwIBAgIBEDANBgkqhkiG9w0BAQUFADCBmDELMAkGA1UEBhMCVVMx +MIIETTCCAzWgAwIBAgIBETANBgkqhkiG9w0BAQ0FADCBmDELMAkGA1UEBhMCVVMx CzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRwwGgYDVQQKExNI YXNoaUNvcnAgVGVzdCBDZXJ0MQwwCgYDVQQLEwNEZXYxFjAUBgNVBAMTDXRlc3Qu -aW50ZXJuYWwxIDAeBgkqhkiG9w0BCQEWEXRlc3RAaW50ZXJuYWwuY29tMCAXDTE4 -MDUxMjA5MTAwOVoYDzIxMTgwNDE4MDkxMDA5WjCBhjEYMBYGA1UEAwwPdGVzdGNv -LmludGVybmFsMRMwEQYDVQQIDApDYWxpZm9ybmlhMQswCQYDVQQGEwJVUzEiMCAG -CSqGSIb3DQEJARYTamFtZXNAaGFzaGljb3JwLmNvbTESMBAGA1UECgwJRW5kIFBv -aW50MRAwDgYDVQQLDAdUZXN0aW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEA0X9Ft3q7EbTgyt4W0BwGtZ/kdDw+k2VEXs9GXRh7BG0sjWIu4szAbkau -igKwAdCcAHfZe4fRNTtzlUb7RnYSLB9SJZEbvwM07mfesR1ZpxtIKsCFZ8DjJ6Wo -eAvc+2JTIcWZLXuDIIIMZ6plvPbHN8RRnC5H4fw9Z8L+qmyyn0o7+4SClkhf2AZa -6WmoZCMbrSLMQdhx1MZTO86GeUJpIG0l3XJLb7wnfn5WDG/GZB8TGAycRD1EP5mx -wzgNqJLvL3TgL0d9NIwC0rpQC4qeP6pzngdr0KV0vgFyYoSBLHiU77+HL1C8QFN4 -fWGoBjEfkVPjHKOk323OgJKWizB34wIDAQABo4GoMIGlMAkGA1UdEwQCMAAwHQYD -VR0OBBYEFHJwH4f2QlFTTll+bnNiZZBo1oheMB8GA1UdIwQYMBaAFKP56zn6r23n -tlZJVSiBZckBG7iVMAsGA1UdDwQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYI -KwYBBQUHAwIwLAYDVR0fBCUwIzAhoB+gHYYbaHR0cDovL3BhdGgudG8uY3JsL215 -Y2EuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQA0ICTh1Dli9siCA5heDl51YCjoCVGa -B7OfoJStOW3BjesingD6kpQUPdbjr0qFzvSsn7IVd8v9IGr/hknBy9gjroPmwoct -gTgTuZpRm727AQiA6KSANnOz+dwb4r0ckdDqIrUTmk4lV7Pdk0lPONtGxfa8c3gY -QjaML7GK9QRU56RmYar+5VV2wI24lqz6cwpwTCa0gpZTRRKorpBONjSpZY4myGT4 -rWRkGTu59XX0POvQxg4i2CL5Lu6WE43APoFRJBCYIQoTqOi7KwlaYqJZG7pa8LU0 -mjDUjW3cNxthYLk2q3cZ4+Or5hbUZGBFhD716+FnChZ/531lgrGWLLMN +aW50ZXJuYWwxIDAeBgkqhkiG9w0BCQEWEXRlc3RAaW50ZXJuYWwuY29tMCAXDTIy +MDQxNTE0MjUzOFoYDzIxMjIwMzIyMTQyNTM4WjCBjTEYMBYGA1UEAwwPdGVzdGNv +LmludGVybmFsMRMwEQYDVQQIDApDYWxpZm9ybmlhMQswCQYDVQQGEwJVUzEpMCcG +CSqGSIb3DQEJARYaZG8tbm90LXJlcGx5QGhhc2hpY29ycC5jb20xEjAQBgNVBAoM +CUVuZCBQb2ludDEQMA4GA1UECwwHVGVzdGluZzCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAK6g0P0eGgLw2B63xyXjRhy5WnbYegJoQJtpnJ0NvgJyZfCz +G6vIw/xjtriyW2rcw9qoEQ7aerN93UdUQaECe3J4QalobFbw9VCGIPJEblBBBKAk +Y8Ek3Ldv6WWO0hWhho11JgjhpGfpFJtDKKs9vZ/tDwiU549ra5tTppMvyZIce+nW +SVkQAlq7zFUshgBu0k1tliU9bOUwZlRnT5xnDTHhKAqyBNGX5pVxhLXv+FM9UMHw +UbCbbucWb3oF1wbARTtLnDsmI0V9PPsUVAbc+sZ4ZQKcNmq92zKq3MjB93Kitfx+ +IdSGCJ+bP8mTQCTDrqDVVCsCaeOZ4Ufa+9kRvcsCAwEAAaOBqDCBpTAJBgNVHRME +AjAAMB0GA1UdDgQWBBTCmo+KBFV1kXzUsiC7xtwK8I9udjAfBgNVHSMEGDAWgBSj ++es5+q9t57ZWSVUogWXJARu4lTALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB +BQUHAwEGCCsGAQUFBwMCMCwGA1UdHwQlMCMwIaAfoB2GG2h0dHA6Ly9wYXRoLnRv +LmNybC9teWNhLmNybDANBgkqhkiG9w0BAQ0FAAOCAQEAr/evKySRc48PNzFovBbx +vWtHgIunJ9JOE8vJyomiuup99AaLvUkRvDIdQjLRac/0rgCD3NXjqQIb5QZPmuVy +w4obNwQaqfJdLys+pQUo1Ly0nPTs5ValIyICDAUf066lcMKNIh6oSn9y9kp/DqBP +feucrJLpwVKHsuUddDCbDPUNwgSbOC6mGvDfA2Q5bd9DMDuBWeRrU7qnfyNCVbem +V2mljJl5TOEc/Yn2vES7rFv987QXOhZGw1Eerhxazi+gwJvxiC1oE5urNk9k1UL/ +byayC5BQiDSee9oyE0YDvKRD9lcvQuk7hVLBv2rY1rqNsPaJKncrnXTtJBqMQHVA +cg== -----END CERTIFICATE----- diff --git a/test/key/ourdomain.cfg b/test/key/ourdomain.cfg index 1b1bbbc927..f20f98b787 100644 --- a/test/key/ourdomain.cfg +++ b/test/key/ourdomain.cfg @@ -8,5 +8,5 @@ ST = California L = Los Angeles O = End Point OU = Testing -emailAddress = james@hashicorp.com +emailAddress = do-not-reply@hashicorp.com CN = testco.internal diff --git a/test/key/ourdomain.key b/test/key/ourdomain.key index d997337c13..7a02cadd06 100644 --- a/test/key/ourdomain.key +++ b/test/key/ourdomain.key @@ -1,28 +1,28 @@ -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDRf0W3ersRtODK -3hbQHAa1n+R0PD6TZURez0ZdGHsEbSyNYi7izMBuRq6KArAB0JwAd9l7h9E1O3OV -RvtGdhIsH1IlkRu/AzTuZ96xHVmnG0gqwIVnwOMnpah4C9z7YlMhxZkte4Mgggxn -qmW89sc3xFGcLkfh/D1nwv6qbLKfSjv7hIKWSF/YBlrpaahkIxutIsxB2HHUxlM7 -zoZ5QmkgbSXdcktvvCd+flYMb8ZkHxMYDJxEPUQ/mbHDOA2oku8vdOAvR300jALS -ulALip4/qnOeB2vQpXS+AXJihIEseJTvv4cvULxAU3h9YagGMR+RU+Mco6Tfbc6A -kpaLMHfjAgMBAAECggEAJeSNaaiLWaKL3mXZXn8TP5rSKawT7XktqrB3G7On3J8a -peASdvdt/wRN4aymxU1ESlljPxLL5oMAXwndvVrx4oUvyJe8mworcsva3dJfOviW -TxVPi/q5m5w9IqmSqO2Z98vT7wQeLa0YLVAG4u0ID7A0yrkcS2XifXgptA3BKUpi -QwukeaVLFJQDIUnokyvNLKryQh6wRd3+qKlKLJCxKVHRBIXafYo+gYarKI9Npjex -3jbf2cTpIEBTOc8vKsUGfJIJg0E6y6LGgCL2I7YUOh3WCJEKag64ufpSvwGcpmi8 -/u2H1YWJn0HzCeWfy+8q9iamLlkc+DcbxV/T5pPqgQKBgQDxCZUmQC3/NBiT11Hr -PT8k8TAW2BbvwIsBa/PhnkRUGHyUZAw/dqoQZzy42g4xa2Rl8ZOCVOEFB726RzOo -KzOIqVUxZFrt6upyU6UB1ypETz0l3dmRwh0pA/7Ko5kxSE0Jy4CJl7d706uVGCTf -5/6KRL2aMxVgCZH9tomCfWJ+wwKBgQDegHiiwUSPgbJwGMPc1OdTSOy6Zn7702T9 -GRDgEzXDRJqFrOh3GkUDRUYXXGWuP9ZydD8Bpah2OE2SzPNQf9SYzu84KLivUUkP -jE/IHx8Avjx+Sj3EvUNuONfWD/Ch043nqpsEQ6WJZuumf3DVu6fJk49o+4n241U6 -pI2mmKDQYQKBgBhYCmtJkhuzTEQqPAjRL75waZX1DyP5w1BKceA4ltgTfQmTrTT/ -rB9p/dUBmOte2E3/fxFrtypF5OCablouus6zo3oQk6pxzmnrjr/H1mn9wsQ/SskQ -3NcWozYeHcu/bKBvoDTFUO+9qhetz5OZn7ihRrD7Nc50SP1h4TN/rGH3AoGBAIvE -iAM1BKxg/IYOCHsgAm/+zzYITJxEHpwesssPRiZzYd220BCBH/j9+xmRoQ3kbAFZ -pHqUZU5d79zXgcB/jDyxQPQ2IE2A8jQiH7vGUONWnQl3+XUsrr7+VhbRzIbbLbjp -Ipd7JvE5Ba6BP5ADYVLurpdz6yZ7h35e/9w25E4BAoGAN6OGNF3wKP9gGMKgxpOu -SemLp6v8WGOTuqbqkhfsbLCd4IR6apYh5AWn2aiIq0cJvkUfgb8/yGAbP/fqsMXd -IvVqiOGKoMHfB4bb6grJk3CdpgHcaOtNowFRDKzXNuXH7f7xNNxSABIdXk6aSmkI -NEBFopxmFg7bQdfXMaciFBE= +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCuoND9HhoC8Nge +t8cl40YcuVp22HoCaECbaZydDb4CcmXwsxuryMP8Y7a4sltq3MPaqBEO2nqzfd1H +VEGhAntyeEGpaGxW8PVQhiDyRG5QQQSgJGPBJNy3b+lljtIVoYaNdSYI4aRn6RSb +QyirPb2f7Q8IlOePa2ubU6aTL8mSHHvp1klZEAJau8xVLIYAbtJNbZYlPWzlMGZU +Z0+cZw0x4SgKsgTRl+aVcYS17/hTPVDB8FGwm27nFm96BdcGwEU7S5w7JiNFfTz7 +FFQG3PrGeGUCnDZqvdsyqtzIwfdyorX8fiHUhgifmz/Jk0Akw66g1VQrAmnjmeFH +2vvZEb3LAgMBAAECggEBAIVRvXwhKRaprTX2dJIWa4auc8RcDPodgYWlIa49uZzA +ndcfxrZltkrQDcYAVQ7GsLgUq8E9R4QVSYSVbO1xqbGA4hBl6qCNoZvyauDLIbUf +jlp8rbPdYqyhQf0JBpcBFGqWz4zkR9IU/mhy42+o2UZpg5q3o/m4txSEzp18VFW5 +KiuArjFmnfrIMvIcL/CEYUCbr1hhPoySOikQZbAObNXgOKurGXR52TgUztMwt32A +oJUAmtLu+ti4Q/ffKhaV0pfRhB0Byqsbm/ONDVTdKUwKDiMrBV8CqRquXunHWJ7D +sqBi8EnM6Ke08ZLqobqQWKmjVMT41rQYAk/1FqEGWJECgYEA3UfggUMX6nysXzVs +MZ7RNbjoSwISa/4I2QGSf3moW7N7G2NTcHPjOM2qhZ2IqR5QTQ8Q9zpPpeOtWquj +3+G45fGCT8aB1A6Pt1d/oTU1g5s7cHn0DzDNB4+5iWwPE+x5XdaZU1kzm6hTS6H6 +Roby7/Hm1PUw+0SXyKSQ9vw4HTcCgYEAygcO5SB0LqjTumRjhAM0OQSRU+rADaJA +X6g5IkjFMm20AH9cA/rg7ZVw3D3ZPZg0yd+qqhE5rIFvVyLUhkaFobZ17i+pUKcf +GH9m5BIuYyg7n78uy/0F9RTZbv2U0nHObUdg5jK4/9PQvhLihEIBEbl7X4RWlgtU +3oZKsP36zg0CgYABUDzn54ML1EOdqQ6EWOH7BKb0UwXS+EYLK7Q353v1V2Jirjs/ +jqCJpMbfVikKf/CQFIfQP9tbK7fKsvwdBxT24HEakh4RKSj3OKC8TzmLF2/J4h9t +u6dr5RF/3FFWl++8e9qbIQtqYBxmdYarxn9Ip1Hsb6wjwat4+GkX3jVjDwKBgQDH +CMkd5ylPRrjBa2G3j0iF3AApQp9PT0hIdX1ET5kno3iw/Mh0i1fJ+W6lLLG3wxpO +wHJs9mdxkltU51WlrBi/RvlMXdxbPyqdgfamP1tACUUkjr/V7ENQPugwNtfFtKWA +d8/5OoOUVuPSPty3HCfdhHUNl12OmT9Vs8wmLzJGiQKBgQDDC8MU/Llbg/Lhl76j +VuOrOci4p6fc5ICagFYBUyms0wsyP7RWseCgFQtthDPuBCArewY4j7AqmRgkDrqa +gOCrJeptdKT2oCkn6AlxBc9kP2Y0N5vLEkwkkrQAbP/3iG/d4raOSTKMOT8voXpv +f4HT3Zolz4FPrtFKVOi0VA3Z5A== -----END PRIVATE KEY----- diff --git a/testrpc/wait.go b/testrpc/wait.go index 230f2f38a8..d6b72749e2 100644 --- a/testrpc/wait.go +++ b/testrpc/wait.go @@ -12,6 +12,9 @@ import ( type rpcFn func(string, interface{}, interface{}) error // WaitForLeader ensures we have a leader and a node registration. +// +// Most uses of this would be better served in the agent/consul package by +// using waitForLeaderEstablishment() instead. func WaitForLeader(t *testing.T, rpc rpcFn, dc string, options ...waitOption) { t.Helper() diff --git a/tlsutil/config.go b/tlsutil/config.go index bf4e9f6c6e..da85c2e721 100644 --- a/tlsutil/config.go +++ b/tlsutil/config.go @@ -447,15 +447,19 @@ func (c *Configurator) Base() Config { // find bugs. By accepting a varargs of slices we remove the need for the // caller to append the groups, which should prevent any such bugs. func newX509CertPool(groups ...[]string) (*x509.CertPool, error) { + var haveCerts bool pool := x509.NewCertPool() for _, group := range groups { for _, pem := range group { if !pool.AppendCertsFromPEM([]byte(pem)) { return nil, fmt.Errorf("failed to parse PEM %s", pem) } + if len(pem) > 0 { + haveCerts = true + } } } - if len(pool.Subjects()) == 0 { + if !haveCerts { return nil, nil } return pool, nil diff --git a/tlsutil/config_test.go b/tlsutil/config_test.go index b49bd66bcc..75fa839458 100644 --- a/tlsutil/config_test.go +++ b/tlsutil/config_test.go @@ -7,6 +7,8 @@ import ( "io" "io/ioutil" "net" + "os" + "path" "path/filepath" "testing" @@ -1018,15 +1020,16 @@ func TestConfigurator_LoadCAs(t *testing.T) { shouldErr bool isNil bool count int + expectedCaPool *x509.CertPool } variants := []variant{ - {"", "", false, true, 0}, - {"bogus", "", true, true, 0}, - {"", "bogus", true, true, 0}, - {"", "../test/bin", true, true, 0}, - {"../test/ca/root.cer", "", false, false, 1}, - {"", "../test/ca_path", false, false, 2}, - {"../test/ca/root.cer", "../test/ca_path", false, false, 1}, + {"", "", false, true, 0, nil}, + {"bogus", "", true, true, 0, nil}, + {"", "bogus", true, true, 0, nil}, + {"", "../test/bin", true, true, 0, nil}, + {"../test/ca/root.cer", "", false, false, 1, getExpectedCaPoolByFile(t)}, + {"", "../test/ca_path", false, false, 2, getExpectedCaPoolByDir(t)}, + {"../test/ca/root.cer", "../test/ca_path", false, false, 1, getExpectedCaPoolByFile(t)}, } for i, v := range variants { pems, err1 := LoadCAs(v.cafile, v.capath) @@ -1045,7 +1048,7 @@ func TestConfigurator_LoadCAs(t *testing.T) { } else { require.NotEmpty(t, pems, info) require.NotNil(t, pool, info) - require.Len(t, pool.Subjects(), v.count, info) + assertDeepEqual(t, v.expectedCaPool, pool, cmpCertPool) require.Len(t, pems, v.count, info) } } @@ -1325,7 +1328,7 @@ func TestConfigurator_AutoEncryptCert(t *testing.T) { cert, err = loadKeyPair("../test/key/ourdomain.cer", "../test/key/ourdomain.key") require.NoError(t, err) c.autoTLS.cert = cert - require.Equal(t, int64(4679716209), c.AutoEncryptCert().NotAfter.Unix()) + require.Equal(t, int64(4803632738), c.AutoEncryptCert().NotAfter.Unix()) } func TestConfigurator_AuthorizeInternalRPCServerConn(t *testing.T) { @@ -1570,3 +1573,51 @@ func loadFile(t *testing.T, path string) string { require.NoError(t, err) return string(data) } + +func getExpectedCaPoolByFile(t *testing.T) *x509.CertPool { + pool := x509.NewCertPool() + data, err := ioutil.ReadFile("../test/ca/root.cer") + if err != nil { + t.Fatal("could not open test file ../test/ca/root.cer for reading") + } + if !pool.AppendCertsFromPEM(data) { + t.Fatal("could not add test ca ../test/ca/root.cer to pool") + } + return pool +} + +func getExpectedCaPoolByDir(t *testing.T) *x509.CertPool { + pool := x509.NewCertPool() + entries, err := os.ReadDir("../test/ca_path") + if err != nil { + t.Fatal("could not open test dir ../test/ca_path for reading") + } + + for _, entry := range entries { + filename := path.Join("../test/ca_path", entry.Name()) + + data, err := ioutil.ReadFile(filename) + if err != nil { + t.Fatalf("could not open test file %s for reading", filename) + } + + if !pool.AppendCertsFromPEM(data) { + t.Fatalf("could not add test ca %s to pool", filename) + } + } + + return pool +} + +// lazyCerts has a func field which can't be compared. +var cmpCertPool = cmp.Options{ + cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"), + cmp.AllowUnexported(x509.CertPool{}), +} + +func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) { + t.Helper() + if diff := cmp.Diff(x, y, opts...); diff != "" { + t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff) + } +} diff --git a/ui/packages/consul-ui/app/abilities/license.js b/ui/packages/consul-ui/app/abilities/license.js index d1061e4e78..fb4d425c6a 100644 --- a/ui/packages/consul-ui/app/abilities/license.js +++ b/ui/packages/consul-ui/app/abilities/license.js @@ -1,6 +1,13 @@ import BaseAbility from './base'; +import { inject as service } from '@ember/service'; export default class LicenseAbility extends BaseAbility { resource = 'operator'; segmented = false; + + @service('env') env; + + get canRead() { + return this.env.var('CONSUL_NSPACES_ENABLED') && super.canRead; + } } diff --git a/ui/packages/consul-ui/app/styles/routes/dc/overview/serverstatus.scss b/ui/packages/consul-ui/app/styles/routes/dc/overview/serverstatus.scss index 7b36bec251..47e8f12888 100644 --- a/ui/packages/consul-ui/app/styles/routes/dc/overview/serverstatus.scss +++ b/ui/packages/consul-ui/app/styles/routes/dc/overview/serverstatus.scss @@ -33,6 +33,11 @@ section[data-route='dc.show.serverstatus'] { border-bottom: var(--decor-border-100); border-color: rgb(var(--tone-border)); } +%server-failure-tolerance > header a { + float: right; + margin-top: 4px; + font-weight: var(--typo-weight-semibold); +} %server-failure-tolerance header em { @extend %pill-200; font-size: 0.812rem; /* 13px */ diff --git a/ui/packages/consul-ui/app/templates/application.hbs b/ui/packages/consul-ui/app/templates/application.hbs index d1c8a3a234..7932cb0bde 100644 --- a/ui/packages/consul-ui/app/templates/application.hbs +++ b/ui/packages/consul-ui/app/templates/application.hbs @@ -50,7 +50,9 @@ as |source|> {{! until we get to the dc route we don't know any permissions }} {{! as we don't know the dc, any inital permission based }} {{! redirects are in the dc.show route}} - {{did-insert (route-action 'replaceWith' 'dc.show' + +{{! 2022-04-15: Temporarily reverting the services page to the default }} + {{did-insert (route-action 'replaceWith' 'dc.services.index' (hash dc=(env 'CONSUL_DATACENTER_LOCAL') ) diff --git a/ui/packages/consul-ui/app/templates/dc/show/serverstatus.hbs b/ui/packages/consul-ui/app/templates/dc/show/serverstatus.hbs index 36fa0c86e8..dd2472ec80 100644 --- a/ui/packages/consul-ui/app/templates/dc/show/serverstatus.hbs +++ b/ui/packages/consul-ui/app/templates/dc/show/serverstatus.hbs @@ -87,6 +87,9 @@ as |item|}} >
+ {{compute (fn route.t 'tolerance.link' (hash + htmlSafe=true + ))}}

{{compute (fn route.t 'tolerance.header')}}

diff --git a/ui/packages/consul-ui/tests/acceptance/dc/forwarding.feature b/ui/packages/consul-ui/tests/acceptance/dc/forwarding.feature index 6b73278c85..b5f6db0e55 100644 --- a/ui/packages/consul-ui/tests/acceptance/dc/forwarding.feature +++ b/ui/packages/consul-ui/tests/acceptance/dc/forwarding.feature @@ -10,4 +10,4 @@ Feature: dc / forwarding --- dc: datacenter --- - Then the url should be /datacenter/overview/server-status + Then the url should be /datacenter/services diff --git a/ui/packages/consul-ui/tests/acceptance/index-forwarding.feature b/ui/packages/consul-ui/tests/acceptance/index-forwarding.feature index d21902c00a..1405295abe 100644 --- a/ui/packages/consul-ui/tests/acceptance/index-forwarding.feature +++ b/ui/packages/consul-ui/tests/acceptance/index-forwarding.feature @@ -4,4 +4,4 @@ Feature: index-forwarding Scenario: Arriving at the index page when there is only one datacenter Given 1 datacenter model with the value "dc1" When I visit the index page - Then the url should be /dc1/overview/server-status + Then the url should be /dc1/services diff --git a/ui/packages/consul-ui/tests/acceptance/page-navigation.feature b/ui/packages/consul-ui/tests/acceptance/page-navigation.feature index 72b6a971b0..73f57caad1 100644 --- a/ui/packages/consul-ui/tests/acceptance/page-navigation.feature +++ b/ui/packages/consul-ui/tests/acceptance/page-navigation.feature @@ -10,7 +10,7 @@ Feature: page-navigation --- dc: dc1 --- - Then the url should be /dc1/overview/server-status + Then the url should be /dc1/services Scenario: Clicking [Link] in the navigation takes me to [URL] When I visit the services page for yaml --- diff --git a/ui/packages/consul-ui/tests/unit/abilities/-test.js b/ui/packages/consul-ui/tests/unit/abilities/-test.js index 3ac3cf0724..cf7acf5c78 100644 --- a/ui/packages/consul-ui/tests/unit/abilities/-test.js +++ b/ui/packages/consul-ui/tests/unit/abilities/-test.js @@ -52,8 +52,12 @@ module('Unit | Ability | *', function(hooks) { // TODO: We currently hardcode KVs to always be true assert.equal(true, ability[`can${perm}`], `Expected ${item}.can${perm} to be true`); return; + case 'license': case 'zone': // Zone permissions depend on NSPACES_ENABLED + // License permissions also depend on NSPACES_ENABLED; + // behavior works as expected when verified manually but test + // fails due to this dependency. -@evrowe 2022-04-18 return; } assert.equal( diff --git a/ui/packages/consul-ui/translations/routes/en-us.yaml b/ui/packages/consul-ui/translations/routes/en-us.yaml index 735a33f963..9739203edb 100644 --- a/ui/packages/consul-ui/translations/routes/en-us.yaml +++ b/ui/packages/consul-ui/translations/routes/en-us.yaml @@ -5,6 +5,8 @@ dc: title: Server status unassigned: Unassigned Zones tolerance: + link: | + Learn more header: Server fault tolerance immediate: header: Immediate @@ -35,17 +37,17 @@ dc: body: |
  • - + License expiration
  • - + Renewing a license
  • - + Applying a new license
  • diff --git a/ui/packages/consul-ui/vendor/consul-ui/routes.js b/ui/packages/consul-ui/vendor/consul-ui/routes.js index 99b3cfda5d..b5bec8a609 100644 --- a/ui/packages/consul-ui/vendor/consul-ui/routes.js +++ b/ui/packages/consul-ui/vendor/consul-ui/routes.js @@ -7,32 +7,32 @@ index: { _options: { path: '/', - redirect: '../show/serverstatus', + redirect: '../services', }, }, show: { _options: { path: '/overview', - abilities: ['access overview'] + abilities: ['access overview'], }, serverstatus: { _options: { path: '/server-status', - abilities: ['access overview', 'read zones'] + abilities: ['access overview', 'read zones'], }, }, cataloghealth: { _options: { path: '/catalog-health', - abilities: ['access overview'] + abilities: ['access overview'], }, }, license: { _options: { path: '/license', - abilities: ['access overview', 'read licence'] + abilities: ['access overview', 'read licence'], }, - } + }, }, services: { _options: { path: '/services' }, diff --git a/website/Dockerfile b/website/Dockerfile deleted file mode 100644 index 8d826168a7..0000000000 --- a/website/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM docker.mirror.hashicorp.services/node:14.17.0-alpine -RUN apk add --update --no-cache git make g++ automake autoconf libtool nasm libpng-dev - -COPY ./package.json /website/package.json -COPY ./package-lock.json /website/package-lock.json -WORKDIR /website -RUN npm install diff --git a/website/components/basic-hero/img/right-arrow-icon.svg b/website/components/basic-hero/img/right-arrow-icon.svg deleted file mode 100644 index 01a621cdce..0000000000 --- a/website/components/basic-hero/img/right-arrow-icon.svg +++ /dev/null @@ -1 +0,0 @@ - diff --git a/website/components/basic-hero/index.jsx b/website/components/basic-hero/index.jsx deleted file mode 100644 index a33bcb0c8c..0000000000 --- a/website/components/basic-hero/index.jsx +++ /dev/null @@ -1,56 +0,0 @@ -import Button from '@hashicorp/react-button' - -export default function BasicHero({ - heading, - content, - links, - brand, - backgroundImage, -}) { - return ( -
    -
    -

    {heading}

    - {content &&

    {content}

    } - {links && links.length > 0 && ( - <> -
    - {links.slice(0, 2).map((link, stableIdx) => { - const buttonVariant = stableIdx === 0 ? 'primary' : 'secondary' - return ( -
    - {links[2] && ( -
    -
    - )} - - )} -
    -
    - ) -} diff --git a/website/components/basic-hero/style.css b/website/components/basic-hero/style.css deleted file mode 100644 index 257d6680f4..0000000000 --- a/website/components/basic-hero/style.css +++ /dev/null @@ -1,76 +0,0 @@ -.g-basic-hero { - padding: 88px 0; - - & .g-type-display-1 { - color: var(--gray-1); - text-align: center; - margin-left: auto; - margin-right: auto; - margin-top: 0; - max-width: 14em; - } - - & .g-type-body-large { - color: var(--gray-2); - margin: 0 auto 0 auto; - text-align: center; - max-width: 40em; - } - - & .links { - display: flex; - flex-wrap: wrap; - justify-content: center; - - /* - * Margins here compensate for extra 8px margin on buttons - * which are needed to center and space properly regardless of whether - * buttons are wrapping to multiple lines or not. - */ - margin-top: calc(32px - 8px); - margin-bottom: -8px; - @media (--large) { - margin-top: calc(40px - 8px); - } - - & .g-btn { - /* - * This ensures 16px between buttons at all times, while maintaining proper centering - * when buttons wrap to multiple lines. - * There will be an extra 8px space on all sides of the button group. - * The top and bottom are accounted for by the -8px adjustment on `.action` margins. - * The left and right excess is left as is - it's needed for proper centering when wrapping. - */ - margin: 8px; - } - } - - & .third-link { - display: flex; - justify-content: center; - margin-top: 32px; - & a { - color: var(--gray-2); - } - & svg * { - stroke: var(--gray-2) !important; - } - } - - &.has-background { - background-repeat: no-repeat; - background-color: var(--gray-6); - background-image: url(/img/hero/pattern-desktop.svg); - width: 100%; - background-size: cover; - background-position: center; - - @media (max-width: 800px) { - background-image: url(/img/hero/pattern-mobile.svg); - } - - & .g-btn { - background: var(--gray-6); - } - } -} diff --git a/website/components/block-list/index.tsx b/website/components/block-list/index.tsx deleted file mode 100644 index 07e10fffb6..0000000000 --- a/website/components/block-list/index.tsx +++ /dev/null @@ -1,29 +0,0 @@ -import s from './style.module.css' - -interface Block { - title: string - description: string - image: string -} - -interface BlockListProps { - blocks: Block[] -} - -export default function BlockList({ blocks }: BlockListProps) { - return ( -
    - {blocks.map(({ image, title, description }) => ( -
    -
    - {title} -
    -
    -

    {title}

    -

    {description}

    -
    -
    - ))} -
    - ) -} diff --git a/website/components/block-list/style.module.css b/website/components/block-list/style.module.css deleted file mode 100644 index d6f2938262..0000000000 --- a/website/components/block-list/style.module.css +++ /dev/null @@ -1,23 +0,0 @@ -.blocksContainer { - display: grid; - row-gap: 64px; - - & .block { - display: flex; - - & .imageContainer { - margin-right: 40px; - } - } -} - -.title { - composes: g-type-display-5 from global; - margin: 0 0 16px 0; -} - -.description { - composes: g-type-body-small from global; - margin: 0; - color: var(--gray-2); -} diff --git a/website/components/callout-blade/CalloutBlade.module.css b/website/components/callout-blade/CalloutBlade.module.css deleted file mode 100644 index cb2e50b609..0000000000 --- a/website/components/callout-blade/CalloutBlade.module.css +++ /dev/null @@ -1,131 +0,0 @@ -.calloutBlade { - padding-top: 56px; - padding-bottom: 56px; - - --shadow-level-3: 0 16px 28px rgba(37, 38, 45, 0.12); - - & .contentWrapper { - & > h3 { - margin-top: 0; - margin-bottom: 48px; - @media (max-width: 1000px) { - margin-bottom: 28px; - } - } - } -} - -.contentWrapper { - composes: g-grid-container from global; -} - -.callouts { - display: grid; - list-style: none; - margin: 0; - padding: 0; - - &.twoUp { - grid-template-columns: 1fr 1fr; - grid-gap: 32px; - - & .linkWrap { - padding: 64px 32px; - display: flex; - flex-direction: row; - background: var(--gray-6); - &:hover { - background: var(--gray-5); - box-shadow: var(--shadow-level-3); - } - - & .icon { - margin-right: 48px; - } - @media (max-width: 1200px) { - padding: 48px 32px; - flex-direction: column; - & .icon { - margin-right: 0; - } - } - } - @media (max-width: 900px) { - grid-template-columns: 1fr; - } - } - - &.threeUp { - grid-template-columns: 1fr 1fr 1fr; - grid-gap: 32px; - - & .linkWrap { - padding: 64px 32px; - border: 1px solid var(--gray-5); - border-radius: 2px; - &:hover { - background: var(--gray-6); - box-shadow: var(--shadow-level-3); - border-color: var(--gray-6); - } - } - - @media (max-width: 1220px) { - grid-template-columns: 1fr; - & .linkWrap { - padding: 48px 32px; - } - } - } - - & .linkWrap { - color: inherit; - height: 100%; - transition: all 0.3s ease; - display: flex; - flex-direction: column; - - & .icon { - margin-bottom: 16px; - & svg { - height: 50px; - } - } - - & .flexWrapper { - display: flex; - flex-direction: column; - flex-grow: 1; - justify-content: space-between; - - & .infoWrapper { - display: flex; - flex-direction: column; - - & > h5 { - margin-top: 0; - margin-bottom: 16px; - } - - & > p { - color: var(--gray-3); - margin-top: 0; - margin-bottom: 48px; - } - } - - & .linkWrapper { - & .eyebrow { - margin-bottom: 8px; - } - & :global(.g-btn) { - text-align: left; - } - } - } - } -} - -.eyebrow { - composes: g-type-label from global; -} diff --git a/website/components/callout-blade/index.jsx b/website/components/callout-blade/index.jsx deleted file mode 100644 index e86ad03b9e..0000000000 --- a/website/components/callout-blade/index.jsx +++ /dev/null @@ -1,50 +0,0 @@ -import classNames from 'classnames' -import styles from './CalloutBlade.module.css' -import Button from '@hashicorp/react-button' -import InlineSvg from '@hashicorp/react-inline-svg' - -export default function CalloutBlade({ title, callouts }) { - return ( -
    -
    -

    {title}

    - -
    -
    - ) -} diff --git a/website/components/card-list/index.tsx b/website/components/card-list/index.tsx deleted file mode 100644 index 7c6a8a0687..0000000000 --- a/website/components/card-list/index.tsx +++ /dev/null @@ -1,44 +0,0 @@ -import s from './style.module.css' - -interface Card { - heading: string - description: string - url: string - eyebrow: string -} - -interface CardListProps { - title: string - cards: Card[] - className?: string -} - -export default function CardList({ title, cards, className }: CardListProps) { - return ( -
    -

    {title}

    -
    - {cards.map(({ heading, description, url, eyebrow }) => ( - -
    - {eyebrow} - {heading} -

    {description}

    -
    - consul-icon -
    - ))} -
    -
    - ) -} diff --git a/website/components/card-list/style.module.css b/website/components/card-list/style.module.css deleted file mode 100644 index 37defd3633..0000000000 --- a/website/components/card-list/style.module.css +++ /dev/null @@ -1,61 +0,0 @@ -.cardsWrapper { - display: grid; - column-gap: 40px; - row-gap: 40px; - grid-template-columns: repeat(auto-fill, minmax(218px, 1fr)); - - & .card { - border: 1px solid var(--gray-5); - box-shadow: 0 2px 3px rgba(37, 41, 55, 0.08); - border-radius: 1px; - transition: box-shadow 0.25s, transform 0.25s; - display: flex; - flex-direction: column; - padding: 24px 24px 28px; - justify-content: space-between; - - &:hover { - box-shadow: 0 16px 28px rgba(37, 38, 45, 0.12); - transform: translateY(-4px); - } - - & .cardContent { - display: flex; - flex-direction: column; - } - - & .icon { - width: 12px; - } - } -} - -.title { - composes: g-type-display-3 from global; - margin-top: 0; - margin-bottom: 46px; -} - -.eyebrow { - display: flex; - width: 100%; - justify-content: space-between; - align-items: center; - composes: g-type-label from global; - color: var(--gray-2); - margin-bottom: 14px; -} - -.heading { - composes: g-type-display-6 from global; - margin-top: 0; - margin-bottom: 8px; - color: var(--black); -} - -.description { - composes: g-type-body from global; - color: var(--gray-1); - margin-top: 0; - margin-bottom: 17px; -} diff --git a/website/components/case-study-carousel/case-study-slide.jsx b/website/components/case-study-carousel/case-study-slide.jsx deleted file mode 100644 index 315a8504d8..0000000000 --- a/website/components/case-study-carousel/case-study-slide.jsx +++ /dev/null @@ -1,44 +0,0 @@ -import InlineSvg from '@hashicorp/react-inline-svg' -import Image from '@hashicorp/react-image' -import Button from '@hashicorp/react-button' -import QuoteMarksIcon from './img/quote.svg?include' - -export default function CaseStudySlide({ - caseStudy: { person, quote, company, caseStudyURL }, -}) { - return ( -
    - -

    {quote}

    -
    -
    - {`${person.firstName} -
    -
    - {person.firstName} {person.lastName} -
    -

    - {person.title}, {company.name} -

    -
    -
    - {company.name} -
    -
    - ) -} diff --git a/website/components/case-study-carousel/img/active-control-dot.svg b/website/components/case-study-carousel/img/active-control-dot.svg deleted file mode 100644 index ee15572f4f..0000000000 --- a/website/components/case-study-carousel/img/active-control-dot.svg +++ /dev/null @@ -1 +0,0 @@ - diff --git a/website/components/case-study-carousel/img/inactive-control-dot.svg b/website/components/case-study-carousel/img/inactive-control-dot.svg deleted file mode 100644 index c28dc96807..0000000000 --- a/website/components/case-study-carousel/img/inactive-control-dot.svg +++ /dev/null @@ -1 +0,0 @@ - diff --git a/website/components/case-study-carousel/img/left-arrow-control.svg b/website/components/case-study-carousel/img/left-arrow-control.svg deleted file mode 100644 index 0cec5c4b62..0000000000 --- a/website/components/case-study-carousel/img/left-arrow-control.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/website/components/case-study-carousel/img/quote.svg b/website/components/case-study-carousel/img/quote.svg deleted file mode 100644 index 4e597a2319..0000000000 --- a/website/components/case-study-carousel/img/quote.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - \ No newline at end of file diff --git a/website/components/case-study-carousel/img/right-arrow-control.svg b/website/components/case-study-carousel/img/right-arrow-control.svg deleted file mode 100644 index eb390dd7c8..0000000000 --- a/website/components/case-study-carousel/img/right-arrow-control.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/website/components/case-study-carousel/index.jsx b/website/components/case-study-carousel/index.jsx deleted file mode 100644 index 0d2a9358ae..0000000000 --- a/website/components/case-study-carousel/index.jsx +++ /dev/null @@ -1,99 +0,0 @@ -import { useState } from 'react' -import { isIE } from 'react-device-detect' - -import Carousel from 'nuka-carousel' -import CaseSlide from './case-study-slide' -import Image from '@hashicorp/react-image' -import InlineSvg from '@hashicorp/react-inline-svg' -import ActiveControlDot from './img/active-control-dot.svg?include' -import InactiveControlDot from './img/inactive-control-dot.svg?include' -import LeftArrow from './img/left-arrow-control.svg?include' -import RightArrow from './img/right-arrow-control.svg?include' - -export default function CaseStudyCarousel({ - caseStudies, - title, - logoSection = { grayBackground: false, featuredLogos: [] }, -}) { - const [slideIndex, setSlideIndex] = useState(0) - const { grayBackground, featuredLogos } = logoSection - - const caseStudySlides = caseStudies.map((caseStudy) => ( - - )) - const logoRows = featuredLogos && Math.ceil(featuredLogos.length / 3) - - function renderControls() { - return ( -
    - {caseStudies.map((caseStudy, stableIdx) => { - return ( - - ) - })} -
    - ) - } - - function sideControls(icon, direction) { - return ( - - ) - } - - return ( -
    -

    {title}

    - {!isIE ? ( - renderControls()} - renderCenterLeftControls={({ previousSlide }) => { - return sideControls(LeftArrow, previousSlide) - }} - renderCenterRightControls={({ nextSlide }) => { - return sideControls(RightArrow, nextSlide) - }} - afterSlide={(slideIndex) => setSlideIndex(slideIndex)} - > - {caseStudySlides} - - ) : null} -
    - {featuredLogos && featuredLogos.length > 0 && ( -
    - {featuredLogos.map((featuredLogo) => ( - {featuredLogo.companyName} - ))} -
    - )} -
    -
    - ) -} diff --git a/website/components/case-study-carousel/style.css b/website/components/case-study-carousel/style.css deleted file mode 100644 index e7adad1307..0000000000 --- a/website/components/case-study-carousel/style.css +++ /dev/null @@ -1,283 +0,0 @@ -.g-case-carousel { - display: flex; - flex-direction: column; - align-items: center; - position: relative; - padding-top: 0 !important; - padding-bottom: 0 !important; - - & h2 { - margin-bottom: 30px; - max-width: 600px; - text-align: center; - white-space: pre-wrap; - - @media (max-width: 800px) { - margin-top: 64px; - white-space: initial; - margin-left: 24px; - margin-right: 24px; - } - } - - &::after { - content: ''; - width: 100%; - height: var(--background-height); - position: absolute; - bottom: 0; - z-index: -1; - } - - &.has-background { - &::after { - content: ''; - background: var(--gray-6); - } - - & .background-section { - background: var(--gray-6); - padding-bottom: 64px; - } - } - - & .background-section { - width: 100%; - - & .mono-logos { - align-items: baseline; - display: flex; - justify-content: center; - max-width: 750px; - margin: 0 auto; - margin-top: 70px; - flex-wrap: wrap; - - & > img { - height: 100%; - max-height: 40px; - width: 33.33%; - padding: 0 30px; - margin: 24px 0; - - @media (max-width: 800px) { - padding: 0 20px; - max-height: 28px; - } - } - - & > picture { - max-height: 40px; - width: 33.33%; - padding: 0 30px; - margin: 24px 0; - - @media (max-width: 800px) { - padding: 0 20px; - max-height: 28px; - } - - & > img { - width: 100%; - height: auto; - display: flex; - } - } - } - } - - & .slider-control-bottomcenter { - bottom: -35px !important; - } - - /* Begin `nuka-carousel` styles */ - & .slider { - max-width: 1200px; - - &:focus { - outline: none !important; - } - - @media (max-width: 800px) { - width: calc(100% - 48px) !important; - } - - & .slider-list { - margin-bottom: 50px !important; - - @media (max-width: 800px) { - margin-bottom: 30px !important; - } - } - - & .slider-frame:focus { - outline: none !important; - } - - & .slider-slide:focus { - outline: none !important; - } - } - - /* End `nuka-carousel` styles */ - - & .side-control { - border: none; - background: none; - margin: 20px; - - &:focus { - outline: none; - } - - &:hover:not([disabled]) { - cursor: pointer; - } - - @media (max-width: 991px) { - display: none; - } - - & svg path { - stroke: var(--gray-2); - } - - &:disabled svg path { - stroke: var(--gray-4); - } - } - - & .case-slide { - display: flex; - flex-wrap: wrap; - width: 100%; - background: var(--white); - padding: 64px; - box-shadow: 0 8px 22px #dedede; - - @media (max-width: 800px) { - box-shadow: none; - border: 1px solid var(--gray-5); - padding: 48px; - } - - @media (--medium-up) { - max-width: 750px; - } - - & button { - margin-top: 24px; - } - - & .quotes { - display: flex; - margin-bottom: 24px; - } - - & h4 { - margin: 0; - - &.case { - min-height: 130px; - margin-bottom: 24px; - color: var(--gray-2); - - @media (max-width: 800px) { - min-height: 155px; - font-size: 22px; - } - - @media (max-width: 650px) { - min-height: 190px; - } - - @media (max-width: 550px) { - font-size: 20px; - } - - @media (max-width: 400px) { - font-size: 18px; - line-height: 28px; - } - } - } - - & p { - margin: 0; - } - - & a { - margin-top: 24px; - } - - & .case-content { - display: flex; - justify-content: space-between; - width: 100%; - align-items: center; - } - - & .person-container { - display: flex; - align-items: center; - - & picture { - display: flex; - } - - & .person-photo { - border-radius: 50%; - max-height: 72px; - margin-right: 24px; - } - - & .person-name { - padding-right: 16px; - - & h5 { - margin: 0; - - @media (max-width: 400px) { - font-size: 16px; - } - } - - @media (max-width: 400px) { - & p { - font-size: 15px; - } - } - } - } - - & .company-logo { - max-height: 40px; - max-width: 180px; - - @media (max-width: 800px) { - display: none; - } - } - - & .case { - color: var(--gray-4); - font-size: 24px; - line-height: 31px; /* Called for within the design, no custom property seemed appropriate */ - } - } - - & .carousel-controls { - width: 100%; - display: flex; - flex-wrap: nowrap; - justify-content: center; - - & .carousel-controls-button { - height: 20px; - background: transparent; - box-shadow: none; - cursor: pointer; - border: none; - } - } -} diff --git a/website/components/cloud-offerings-list/index.jsx b/website/components/cloud-offerings-list/index.jsx deleted file mode 100644 index 7a32e1e9ff..0000000000 --- a/website/components/cloud-offerings-list/index.jsx +++ /dev/null @@ -1,28 +0,0 @@ -import Button from '@hashicorp/react-button' - -export default function CloudOfferingsList({ offerings }) { - return ( - - ) -} diff --git a/website/components/cloud-offerings-list/style.css b/website/components/cloud-offerings-list/style.css deleted file mode 100644 index a1b0317cd7..0000000000 --- a/website/components/cloud-offerings-list/style.css +++ /dev/null @@ -1,57 +0,0 @@ -ul.g-cloud-offerings-list { - list-style: none; - padding: 0; - margin: -16px; - display: flex; - - @media (width < 769px) { - flex-direction: column; - } - - & li { - flex-grow: 1; - margin: 16px; - background: var(--white); - border: 1px solid var(--gray-5); - border-radius: 2px; - text-align: center; - transition: box-shadow 0.25s, transform 0.25s, -webkit-transform 0.25s; - - &:hover { - box-shadow: 0 16px 28px rgba(37, 38, 45, 0.12); - transform: translateY(-4px); - cursor: pointer; - } - - & > a { - display: block; - padding: 32px; - color: inherit; - - & > img { - display: block; - width: 400px; - max-width: 100%; - margin-left: auto; - margin-right: auto; - margin-bottom: 14px; - } - - & > span { - color: var(--gray-3); - } - - & > h4 { - text-decoration: none; - margin-top: 8px; - margin-bottom: 0; - } - - & > p { - font-size: 19px; - margin-top: 8px; - margin-bottom: 24px; - } - } - } -} diff --git a/website/components/config-entry-reference/index.jsx b/website/components/config-entry-reference/index.jsx deleted file mode 100644 index 329092371a..0000000000 --- a/website/components/config-entry-reference/index.jsx +++ /dev/null @@ -1,200 +0,0 @@ -import Tabs, { Tab } from '@hashicorp/react-tabs' -import EnterpriseAlertBase from '@hashicorp/react-enterprise-alert' - -/** - * ConfigEntryReference renders the reference docs for a config entry. - * It creates two tabs, one for HCL docs and one for Kubernetes docs. - * - * @param {array} keys Array of objects, that describe all - * keys that can be set for this config entry. - * @param {boolean} topLevel Indicates this is a reference block that contains - * the top level keys vs a reference block that documents - * nested keys and that is separated out for clarity. - * - * The objects in the keys array support the following keys: - * - name : the name of the HCL key, e.g. Name, Listener. This case sensitive. - * - description : the description of the key. If this key has different descriptions - * for HCL vs. Kube YAML then description can be an object: - * description: { - * hcl: 'HCL description', - * yaml: 'YAML description' - * } - * - hcl : a boolean to indicate if this key should be shown in the HCL - * documentation. Defaults to true. - * - yaml : a boolean to indicate if this key should be shown in the YAML - * documentation. Defaults to true. - * - enterprise : a boolean to indicate if this key is Consul Enterprise - * only. Defaults to false. - * - children : accepts an array of keys that must be set under this key. - * The schema for these keys is the same as the top level keys. - * - type : the type and default of this key, e.g. string: "default". - */ -export default function ConfigEntryReference({ keys, topLevel = true }) { - // Kube needs to have its non-top-level keys nested under a "spec" key. - const kubeKeys = topLevel ? toKubeKeys(keys) : keys - return ( - - {renderKeys(keys, true)} - {renderKeys(kubeKeys, false)} - - ) -} - -/** - * Renders keys as HTML. It works recursively through all keys. - * @param {array} keys - * @param {boolean} isHCLTab - * @returns {JSX.Element|null} - */ -function renderKeys(keys, isHCLTab) { - if (!keys) return null - return
      {keys.map((key) => renderKey(key, isHCLTab))}
    -} - -/** - * Renders a single key as its HTML element. - * - * @param {object} key - * @param {boolean} isHCLTab - * @returns {JSX.Element|null} - */ -function renderKey(key, isHCLTab) { - if (!key.name) return null - if (isHCLTab && key.hcl === false) return null - if (!isHCLTab && key.yaml === false) return null - - const keyName = isHCLTab ? key.name : toYAMLKeyName(key.name) - - let description = '' - if (key.description) { - if (typeof key.description === 'string') { - description = key.description - } else if (!isHCLTab && key.description.yaml) { - description = key.description.yaml - } else if (key.description.hcl) { - description = key.description.hcl - } - } - - const htmlDescription = description && markdownToHtml(' - ' + description) - const type = key.type && {`(${key.type})`} - const enterpriseAlert = key.enterprise && - const keyLower = keyName.toLowerCase() - - // NOTE: This code copies from https://github.com/hashicorp/remark-plugins/blob/df606efc844319a2532ec54e4cf6ff2d575108ff/plugins/anchor-links/index.js - // to ensure the styling of each bullet is correct. The two locations should be kept - // in sync. - return ( -
  • - -

    - - {keyName} - {' '} - {type} - {enterpriseAlert} - -

    - {renderKeys(key.children, isHCLTab)} -
  • - ) -} - -/** - * Constructs a keys object for Kubernetes out of HCL keys. - * Really all this entails is nesting the correct keys under the Kubernetes - * 'spec' key since in HCL there is no 'spec' key. - * - * @param {array} keys - * @returns {array} - */ -function toKubeKeys(keys) { - const topLevelKeys = keys.filter((key) => isTopLevelKubeKey(key.name)) - const keysUnderSpec = keys.filter((key) => !isTopLevelKubeKey(key.name)) - return topLevelKeys.concat([{ name: 'spec', children: keysUnderSpec }]) -} - -/** - * Converts an HCL key name to a kube yaml key name. - * - * Examples: - * - Protocol => protocol - * - MeshGateway => meshGateway - * - ACLToken => aclToken - * - HTTP => http - * - * @param {string} hclKey - * @returns {string} - */ -function toYAMLKeyName(hclKey) { - // Handle something like HTTP. - if (hclKey.toUpperCase() === hclKey) { - return hclKey.toLowerCase() - } - - let indexFirstLowercaseChar = hclKey - .split('') - .findIndex((c) => c === c.toLowerCase()) - // Special case to handle something like ACLToken => aclToken. - if (indexFirstLowercaseChar > 1) { - indexFirstLowercaseChar-- - } - - let lowercasePortion = '' - for (let i = 0; i < indexFirstLowercaseChar; i++) { - lowercasePortion += hclKey[i].toLowerCase() - } - return ( - lowercasePortion + hclKey.split('').slice(indexFirstLowercaseChar).join('') - ) -} - -/** - * Converts a markdown string to its HTML representation. - * Currently it only supports inline code blocks (e.g. `code here`) and - * links (e.g. [link text](http://link-url) because these were the most - * commonly used markdown features in the key descriptions. - * - * @param {string} markdown the input markdown - * @returns {string} - */ -function markdownToHtml(markdown) { - let html = markdown - - // Replace inline code blocks defined by backticks with . - while (html.indexOf('`') > 0) { - html = html.replace('`', '') - if (html.indexOf('`') <= 0) { - throw new Error(`'${markdown} does not have matching '\`' characters`) - } - html = html.replace('`', '') - } - - // Replace links, e.g. [link text](http://link-url), - // with link text. - return html.replace(/\[(.*?)]\((.*?)\)/g, '$1') -} - -/** - * Returns true if key is a key used at the top level of a CRD. By top level we - * mean not nested under any other key. - * - * @param {string} name name of the key - * - * @return {boolean} - */ -function isTopLevelKubeKey(name) { - return ( - name.toLowerCase() === 'metadata' || - name.toLowerCase() === 'kind' || - name.toLowerCase() === 'apiversion' - ) -} - -function EnterpriseAlert(props) { - return -} diff --git a/website/components/consul-on-kubernetes-hero/images/bg-dots.svg b/website/components/consul-on-kubernetes-hero/images/bg-dots.svg deleted file mode 100644 index 4e0be2d9a9..0000000000 --- a/website/components/consul-on-kubernetes-hero/images/bg-dots.svg +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/website/components/consul-on-kubernetes-hero/images/bg-right.svg b/website/components/consul-on-kubernetes-hero/images/bg-right.svg deleted file mode 100644 index ed32816ae4..0000000000 --- a/website/components/consul-on-kubernetes-hero/images/bg-right.svg +++ /dev/null @@ -1,19 +0,0 @@ - - - - - - - - - - - - - - - - - - - diff --git a/website/components/consul-on-kubernetes-hero/images/bg-top.svg b/website/components/consul-on-kubernetes-hero/images/bg-top.svg deleted file mode 100644 index 877abcb9f3..0000000000 --- a/website/components/consul-on-kubernetes-hero/images/bg-top.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - - - - - diff --git a/website/components/consul-on-kubernetes-hero/index.tsx b/website/components/consul-on-kubernetes-hero/index.tsx deleted file mode 100644 index 78c062cba9..0000000000 --- a/website/components/consul-on-kubernetes-hero/index.tsx +++ /dev/null @@ -1,98 +0,0 @@ -import Button from '@hashicorp/react-button' -import ReactPlayer from 'react-player' -import s from './style.module.css' - -interface Cta { - url: string - text: string -} - -interface ConsulOnKubernetesHeroProps { - title: string - description: string - ctas: Cta[] - video: { - src: string - poster: string - } -} - -export default function ConsulOnKubernetesHero({ - title, - description, - ctas, - video, -}: ConsulOnKubernetesHeroProps) { - return ( -
    -
    -
    -

    {title}

    -

    {description}

    -
    - {ctas.map(({ text, url }, idx) => ( -
    -
    -
    - background top - background right - background bottom - background left -
    - - - - - } - /> -
    -
    -
    -
    - ) -} diff --git a/website/components/consul-on-kubernetes-hero/style.module.css b/website/components/consul-on-kubernetes-hero/style.module.css deleted file mode 100644 index 6aae256a08..0000000000 --- a/website/components/consul-on-kubernetes-hero/style.module.css +++ /dev/null @@ -1,162 +0,0 @@ -.ckHero { - background-color: var(--black); - color: var(--white); - padding-top: 130px; - padding-bottom: 142px; - overflow: hidden; - - @media (--medium) { - padding-top: 78px; - padding-bottom: 104px; - } - - @media (--small) { - padding-top: 56px; - padding-bottom: 80px; - } -} - -.contentWrapper { - --columns: 1; - - column-gap: 32px; - composes: g-grid-container from global; - display: grid; - grid-template-columns: repeat(var(--columns), minmax(0, 1fr)); - row-gap: 48px; - - @media (--medium-up) { - --columns: 12; - } - - & .headline { - text-align: center; - grid-column: 1 / -1; - margin: 0 auto; - - @media (--large) { - margin: 0; - text-align: left; - grid-column: 1 / 6; - } - - & .buttons { - display: flex; - flex-wrap: wrap; - align-items: center; - justify-content: center; - - @media (--large) { - justify-content: flex-start; - } - - & .button:not(:last-of-type) { - margin-right: 30px; - } - } - } - - & .media { - position: relative; - grid-column: 1 / -1; - - @media (--medium) { - grid-column: 3 / 11; - } - - @media (--large) { - grid-column: 7 / -1; - } - - & > div { - border: 1px var(--gray-3) solid; - border-radius: 4px; - } - - & .video { - background-color: var(--black); - position: relative; - padding-top: 56.25%; - width: 100%; - - & .player { - position: absolute; - top: 0; - left: 0; - - & div { - border-radius: 4px; - } - } - - & iframe { - border-radius: 4px; - } - - & > * { - bottom: 0; - height: 100%; - left: 0; - position: absolute; - right: 0; - top: 0; - width: 100%; - } - } - } -} - -.title { - composes: g-type-display-1 from global; - margin: 0; -} - -.description { - composes: g-type-body-large from global; - margin-top: 16px; - margin-bottom: 40px; - color: var(--gray-5); - max-width: 500px; - - @media (--large) { - max-width: 385px; - } -} - -.backgroundImage { - height: auto; - position: absolute; -} - -.bgTop { - composes: backgroundImage; - left: auto; - right: 0; - top: -130px; - display: none; - width: 75%; - - @media (--large) { - display: block; - } -} - -.bgRight { - composes: backgroundImage; - top: 20%; - left: 99.5%; -} - -.bgBottom { - composes: backgroundImage; - width: auto; - top: 80%; - left: 8%; -} - -.bgLeft { - composes: backgroundImage; - width: auto; - top: 86px; - left: -77px; -} diff --git a/website/components/cta-hero/img/consul-stack.svg b/website/components/cta-hero/img/consul-stack.svg deleted file mode 100644 index 34335e7acc..0000000000 --- a/website/components/cta-hero/img/consul-stack.svg +++ /dev/null @@ -1,32 +0,0 @@ - - - - Consul Stack - Created with Sketch. - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/website/components/cta-hero/index.jsx b/website/components/cta-hero/index.jsx deleted file mode 100644 index 9043245e93..0000000000 --- a/website/components/cta-hero/index.jsx +++ /dev/null @@ -1,38 +0,0 @@ -import TextSplit from '@hashicorp/react-text-split' -import Button from '@hashicorp/react-button' -import s from './style.module.css' -import InlineSvg from '@hashicorp/react-inline-svg' -import ConsulStack from './img/consul-stack.svg?include' - -export default function CtaHero({ title, description, links, cta }) { - return ( -
    - - - -
    - ) -} - -function CTA({ title, description, link }) { - return ( -
    - -

    {title}

    -

    {description}

    -
    - ) -} diff --git a/website/components/cta-hero/style.module.css b/website/components/cta-hero/style.module.css deleted file mode 100644 index 3bad47d84a..0000000000 --- a/website/components/cta-hero/style.module.css +++ /dev/null @@ -1,123 +0,0 @@ -.ctaHero { - & :global(.g-text-split) :global(.g-grid-container) { - @media (width < 1120px) { - flex-direction: column-reverse; - } - - & > div { - @media (768px < width < 1120px) { - width: 40em; - } - - &:last-child { - @media (width < 1120px) { - margin-bottom: 64px; - text-align: center; - } - - & p { - @media (width < 1120px) { - margin: 16px auto; - } - } - } - } - - /** - * HACK: - * Overrides the H2 with styling from - * our global g-type-display-1 class. - * - * This was because there's no way to - * override the heading in - * with the designed h1 styling. - * - * TODO: - * Address this at the component - * level or revert to just using h2 - * as is default. - */ - & h2 { - font-size: 2.125rem; - letter-spacing: -0.008em; - line-height: 1.265em; - - @media (--medium-up) { - font-size: 2.625rem; - letter-spacing: -0.01em; - line-height: 1.19em; - } - - @media (--large) { - font-size: 3.125rem; - line-height: 1.2em; - } - } - - & p { - max-width: 440px; - } - } -} - -.cta { - max-width: 525px; - border: var(--gray-5) 1px solid; - padding: 32px; - position: relative; - margin-top: 44px; - margin-left: auto; - margin-right: auto; - - @media (min-width: 1120px) { - margin-top: unset; - margin-left: unset; - margin-right: unset; - - /* Pull this down on Desktop to line the - * buttons up with the other CTA buttons */ - margin-bottom: -38px; - } - - & > h3 { - margin-top: 0; - margin-bottom: 16px; - max-width: 135px; - } - - & > p { - color: var(--gray-3); - margin-top: 28px; - margin-bottom: 40px; - } - - & .stackIcon { - display: block; - margin-left: auto; - margin-right: auto; - - & svg { - position: absolute; - max-width: 100%; - left: 209px; - top: -34px; - - @media (max-width: 600px) { - right: 20px; - left: unset; - } - - @media (max-width: 470px) { - position: unset; - display: block; - margin-left: auto; - margin-right: auto; - margin-bottom: 18px; - } - } - } -} - -.description { - composes: .g-type-body-small from global; -} diff --git a/website/components/docs-list/index.tsx b/website/components/docs-list/index.tsx deleted file mode 100644 index 21366492b3..0000000000 --- a/website/components/docs-list/index.tsx +++ /dev/null @@ -1,47 +0,0 @@ -import Button from '@hashicorp/react-button' -import s from './style.module.css' -interface Doc { - icon: { - src: string - alt: string - } - description: string - cta: { - text: string - url: string - } -} - -interface DocsListProps { - title: string - docs: Doc[] - className?: string -} - -export default function DocsList({ title, docs, className }: DocsListProps) { - return ( -
    -

    {title}

    -
    - {docs.map(({ icon, description, cta }) => ( -
    -
    - {icon.alt} -
    -

    {description}

    -
    - ))} -
    -
    - ) -} diff --git a/website/components/docs-list/style.module.css b/website/components/docs-list/style.module.css deleted file mode 100644 index 94314758fe..0000000000 --- a/website/components/docs-list/style.module.css +++ /dev/null @@ -1,32 +0,0 @@ -.docsList { - display: grid; - row-gap: 48px; -} - -.title { - composes: g-type-display-3 from global; - margin-top: 0; - margin-bottom: 46px; -} - -.image { - border: 1px solid var(--gray-5); - box-sizing: border-box; - border-radius: 3px; - width: 64px; - height: 64px; - display: flex; - align-items: center; - justify-content: center; - - & img { - width: 40px; - height: 40px; - } -} - -.description { - composes: g-body from global; - margin-top: 12px; - margin-bottom: 24px; -} diff --git a/website/components/downloads-props/index.jsx b/website/components/downloads-props/index.jsx deleted file mode 100644 index 047db15a09..0000000000 --- a/website/components/downloads-props/index.jsx +++ /dev/null @@ -1,92 +0,0 @@ -import Button from '@hashicorp/react-button' -import s from '../../pages/downloads/style.module.css' - -export default function DownloadsProps(preMerchandisingSlot) { - return { - getStartedDescription: - 'Follow step-by-step tutorials on the essentials of Consul.', - getStartedLinks: [ - { - label: 'CLI Quickstart', - href: 'https://learn.hashicorp.com/collections/consul/getting-started', - }, - { - label: 'HCP Consul', - href: - 'https://learn.hashicorp.com/collections/consul/cloud-get-started', - }, - { - label: 'HCS on Azure', - href: 'https://learn.hashicorp.com/collections/consul/hcs-azure', - }, - { - label: 'Kubernetes Quickstart', - href: - 'https://learn.hashicorp.com/collections/consul/gs-consul-service-mesh', - }, - { - label: 'View all Consul tutorials', - href: 'https://learn.hashicorp.com/consul', - }, - ], - tutorialLink: { - href: 'https://learn.hashicorp.com/consul', - label: 'View Tutorials at HashiCorp Learn', - }, - logo: ( - Consul - ), - merchandisingSlot: ( - <> - {preMerchandisingSlot && preMerchandisingSlot} -
    -
    -

    - Looking for a way to secure and automate application networking - without the added complexity of managing the infrastructure? -

    -
    -
    - -

    - » Download Consul Tools -

    - -
    -

    Note for ARM users:

    - -
      -
    • Use Armelv5 for all 32-bit armel systems
    • -
    • Use Armhfv6 for all armhf systems with v6+ architecture
    • -
    • Use Arm64 for all v8 64-bit architectures
    • -
    - -

    - The following commands can help determine the right version for your - system: -

    - - $ uname -m -
    - - $ readelf -a /proc/self/exe | grep -q -c Tag_ABI_VFP_args && echo - "armhf" || echo "armel" - -
    - - ), - } -} diff --git a/website/components/enterprise-comparison/consul/img/enterprise_complexity_1.svg b/website/components/enterprise-comparison/consul/img/enterprise_complexity_1.svg deleted file mode 100644 index 8cf3786cb1..0000000000 --- a/website/components/enterprise-comparison/consul/img/enterprise_complexity_1.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - - - - - - - - diff --git a/website/components/enterprise-comparison/consul/img/enterprise_complexity_2.svg b/website/components/enterprise-comparison/consul/img/enterprise_complexity_2.svg deleted file mode 100644 index fc1f1d5de0..0000000000 --- a/website/components/enterprise-comparison/consul/img/enterprise_complexity_2.svg +++ /dev/null @@ -1,27 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/website/components/enterprise-comparison/consul/index.jsx b/website/components/enterprise-comparison/consul/index.jsx deleted file mode 100644 index 7231adbb60..0000000000 --- a/website/components/enterprise-comparison/consul/index.jsx +++ /dev/null @@ -1,44 +0,0 @@ -import EnterpriseComparison from '../../enterprise-comparison' - -export default function ConsulEnterpriseComparison() { - return ( - - ) -} diff --git a/website/components/enterprise-comparison/img/arrow.svg b/website/components/enterprise-comparison/img/arrow.svg deleted file mode 100644 index 0e395bd6fd..0000000000 --- a/website/components/enterprise-comparison/img/arrow.svg +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/website/components/enterprise-comparison/img/complexity-advanced.png b/website/components/enterprise-comparison/img/complexity-advanced.png deleted file mode 100644 index eeac1f3b96..0000000000 Binary files a/website/components/enterprise-comparison/img/complexity-advanced.png and /dev/null differ diff --git a/website/components/enterprise-comparison/img/complexity-basic.png b/website/components/enterprise-comparison/img/complexity-basic.png deleted file mode 100644 index 9a35d60b14..0000000000 Binary files a/website/components/enterprise-comparison/img/complexity-basic.png and /dev/null differ diff --git a/website/components/enterprise-comparison/index.jsx b/website/components/enterprise-comparison/index.jsx deleted file mode 100644 index 810f2acca7..0000000000 --- a/website/components/enterprise-comparison/index.jsx +++ /dev/null @@ -1,62 +0,0 @@ -import Image from '@hashicorp/react-image' -import Button from '@hashicorp/react-button' -import InlineSvg from '@hashicorp/react-inline-svg' -import ArrowIcon from './img/arrow.svg?include' - -export default function EnterpriseComparison({ - title, - itemOne, - itemTwo, - brand, -}) { - return ( -
    -
    -

    {title}

    - -
    -
    - -
    {itemOne.label}
    -

    {itemOne.title}

    - -

    {itemOne.description}

    - - {itemOne.links.map((link) => ( -
    -
    - ))} -
    -
    -
    - -
    -
    - -
    {itemTwo.label}
    -

    {itemTwo.title}

    - -

    {itemTwo.description}

    - {itemTwo.links.map((link) => ( -
    -
    - ))} -
    -
    -
    -
    - ) -} diff --git a/website/components/enterprise-comparison/style.css b/website/components/enterprise-comparison/style.css deleted file mode 100644 index ac9946b44d..0000000000 --- a/website/components/enterprise-comparison/style.css +++ /dev/null @@ -1,92 +0,0 @@ -.g-enterprise-comparison { - padding-top: 128px; - padding-bottom: 128px; - background: var(--gray-6); - - & h2 { - text-align: center; - } - - @media (max-width: 800px) { - padding-top: 64px; - padding-bottom: 64px; - } - - & .content-container { - display: flex; - justify-content: space-between; - margin: 0 auto 64px auto; - - @media (max-width: 800px) { - flex-wrap: wrap; - } - & .item { - flex-basis: 50%; - justify-content: center; - text-align: center; - margin-top: 64px; - - @media (max-width: 800px) { - margin-top: 64px; - flex-basis: 100%; - } - - & .g-type-label-strong { - margin-top: 64px; - - @media (max-width: 800px) { - margin-top: 32px; - } - } - - & h4 { - white-space: pre; - margin-top: 24px; - margin-bottom: 8px; - - @media (max-width: 800px) { - margin-top: 16px; - } - } - - & picture { - display: inline-block; - } - - & img { - max-width: 160px; - max-height: 98px; - } - & p { - margin-top: 0; - margin-bottom: 24px; - - @media (max-width: 800px) { - max-width: 600px; - margin-right: auto; - margin-left: auto; - } - } - } - - & .spacer { - & .vertical-spacer { - height: 93px; - } - - & .arrow { - display: flex; - align-items: center; - } - - @media (max-width: 800px) { - display: none; - } - } - } - - & .more-features-link { - display: flex; - justify-content: center; - } -} diff --git a/website/components/features-list/feature.tsx b/website/components/features-list/feature.tsx deleted file mode 100644 index 6be7add94d..0000000000 --- a/website/components/features-list/feature.tsx +++ /dev/null @@ -1,66 +0,0 @@ -import { ReactNode } from 'react' -import Button from '@hashicorp/react-button' -import s from './style.module.css' - -interface InfoSection { - heading: string - content: ReactNode -} - -interface Cta { - text: string - url: string -} - -export interface FeatureProps { - number: number - title: string - subtitle: string - infoSections: InfoSection[] - cta: Cta - image: string -} - -export default function Feature({ - number, - title, - subtitle, - infoSections, - cta, - image, -}: FeatureProps) { - return ( -
    -
    - {title} -
    -
    -
    - {number} -
    -
    -

    {title}

    -

    {subtitle}

    -
    - {infoSections.map(({ heading, content }) => ( -
    -

    {heading}

    - {content} -
    - ))} -
    -
    -
    -
    - ) -} diff --git a/website/components/features-list/images/bottom-left-design.svg b/website/components/features-list/images/bottom-left-design.svg deleted file mode 100644 index a899b8c2a4..0000000000 --- a/website/components/features-list/images/bottom-left-design.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - diff --git a/website/components/features-list/images/top-right-design.svg b/website/components/features-list/images/top-right-design.svg deleted file mode 100644 index f20ec362cd..0000000000 --- a/website/components/features-list/images/top-right-design.svg +++ /dev/null @@ -1,13 +0,0 @@ - - - - - - - - - - - - - diff --git a/website/components/features-list/index.tsx b/website/components/features-list/index.tsx deleted file mode 100644 index 8599ec739f..0000000000 --- a/website/components/features-list/index.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import Feature from './feature' -import s from './style.module.css' -import { FeatureProps } from './feature' - -interface FeaturesListProps { - title: string - features: Omit[] -} - -export default function FeaturesList({ title, features }: FeaturesListProps) { - return ( -
    -
    -

    {title}

    -
    - {features.map((feature, i) => ( - - ))} -
    -
    -
    - ) -} diff --git a/website/components/features-list/style.module.css b/website/components/features-list/style.module.css deleted file mode 100644 index f8479110f4..0000000000 --- a/website/components/features-list/style.module.css +++ /dev/null @@ -1,109 +0,0 @@ -.featureListContainer { - background-color: #000; - padding-top: 128px; - background-position: right top, left bottom; - background-repeat: no-repeat; - color: #fff; -} - -.contentWrapper { - composes: g-grid-container from global; -} - -.featureContainer { - display: flex; - align-items: center; - flex-wrap: wrap; - - & .featureTextContainer { - display: flex; - } - - & .imageContainer { - max-width: 490px; - margin: 0 auto; - padding-bottom: 40px; - - & img { - max-width: 100%; - } - } - - @media (--large) { - flex-wrap: nowrap; - flex-direction: row-reverse; - justify-content: space-between; - - & .featureTextContainer { - margin-right: 60px; - } - - & .featureText { - max-width: 488px; - } - - & .imageContainer { - margin: 0; - } - } -} - -.featuresContainer { - padding-top: 157px; - padding-bottom: 394px; - display: grid; - row-gap: 120px; -} - -.title { - composes: g-type-display-1 from global; - max-width: 488px; - margin: 0; -} - -.listNumber { - composes: g-type-display-5 from global; - min-width: 40px; - height: 40px; - background-color: var(--consul); - display: flex; - justify-content: center; - align-items: center; - margin-right: 64px; - margin-top: 10px; - - @media (--small) { - margin-right: 30px; - margin-top: 6px; - } -} - -.featureTitle { - composes: g-type-display-2 from global; - margin: 0; -} - -.featureSubtitle { - composes: g-type-body-large from global; - margin-top: 16px; - margin-bottom: 0; -} - -.infoTitle { - composes: g-type-display-5 from global; - margin-top: 0; - margin-bottom: 8px; -} - -.infoSection { - composes: g-type-body from global; - margin-top: 32px; - margin-bottom: 40px; - display: grid; - row-gap: 24px; - - & p, - & ul { - margin: 0; - } -} diff --git a/website/components/footer/index.jsx b/website/components/footer/index.jsx deleted file mode 100644 index 6df5112338..0000000000 --- a/website/components/footer/index.jsx +++ /dev/null @@ -1,32 +0,0 @@ -import Link from 'next/link' - -export default function Footer({ openConsentManager }) { - return ( - - ) -} diff --git a/website/components/footer/style.css b/website/components/footer/style.css deleted file mode 100644 index 5f04743afd..0000000000 --- a/website/components/footer/style.css +++ /dev/null @@ -1,32 +0,0 @@ -.g-footer { - padding: 25px 0 17px 0; - flex-shrink: 0; - display: flex; - - & .g-grid-container { - display: flex; - justify-content: space-between; - flex-wrap: wrap; - } - - & a { - color: black; - opacity: 0.5; - transition: opacity 0.25s ease; - cursor: pointer; - display: inline-block; - - &:hover { - opacity: 1; - } - } - - & .left > a { - margin-right: 20px; - margin-bottom: 8px; - - &:last-child { - margin-right: 0; - } - } -} diff --git a/website/components/hcp-callout-section/HCPCalloutSection.module.css b/website/components/hcp-callout-section/HCPCalloutSection.module.css deleted file mode 100644 index 461213ad22..0000000000 --- a/website/components/hcp-callout-section/HCPCalloutSection.module.css +++ /dev/null @@ -1,82 +0,0 @@ -.hcpCalloutSection { - composes: g-grid-container from global; - padding-top: 88px; - padding-bottom: 88px; - - & .header { - display: flex; - justify-content: center; - margin-bottom: 88px; - @media (max-width: 1120px) { - margin-bottom: 48px; - } - & h2 { - margin: 0; - text-align: center; - max-width: 450px; - } - } - - & .content { - display: flex; - flex-direction: row; - justify-content: space-between; - - @media (max-width: 1120px) { - flex-direction: column-reverse; - - & .info { - margin-top: 32px; - } - } - - & .info { - max-width: 488px; - margin-right: 32px; - - & h1 { - margin-top: 0; - margin-bottom: 8px; - } - & .chin { - color: var(--gray-3); - } - & .description { - color: var(--gray-2); - margin-top: 28px; - margin-bottom: 0; - - @media (max-width: 900px) { - margin-top: 18px; - } - } - & .links { - margin-top: 32px; - display: inline-flex; - flex-direction: column; - - & > * { - &:not(:last-child) { - margin-bottom: 24px; - } - } - } - } - - & > img { - align-self: center; - margin-right: -48px; - @media (max-width: 670px) { - max-width: 100%; - } - } - } -} - -.chin { - composes: g-type-label from global; -} - -.description { - composes: g-type-long-body from global; -} diff --git a/website/components/hcp-callout-section/index.jsx b/website/components/hcp-callout-section/index.jsx deleted file mode 100644 index 605429be3b..0000000000 --- a/website/components/hcp-callout-section/index.jsx +++ /dev/null @@ -1,44 +0,0 @@ -import styles from './HCPCalloutSection.module.css' -import Button from '@hashicorp/react-button' - -export default function HcpCalloutSection({ - id, - header, - title, - description, - chin, - image, - links, -}) { - return ( -
    -
    -

    {header}

    -
    - -
    -
    -

    {title}

    - {chin} -

    {description}

    -
    - {links.map((link, index) => { - const variant = index === 0 ? 'primary' : 'tertiary' - return ( -
    -
    - ) - })} -
    -
    - {title} -
    -
    - ) -} diff --git a/website/components/homepage-hero/index.jsx b/website/components/homepage-hero/index.jsx deleted file mode 100644 index cadbe232c2..0000000000 --- a/website/components/homepage-hero/index.jsx +++ /dev/null @@ -1,61 +0,0 @@ -import s from './style.module.css' -import Hero from '@hashicorp/react-hero' - -export default function HomepageHero({ - title, - description, - links, - uiVideo, - cliVideo, - alert, - image, -}) { - return ( -
    - -
    - ) -} diff --git a/website/components/homepage-hero/style.module.css b/website/components/homepage-hero/style.module.css deleted file mode 100644 index 4aa5d4a9e8..0000000000 --- a/website/components/homepage-hero/style.module.css +++ /dev/null @@ -1,18 +0,0 @@ -.consulHero { - /* Customize the branding */ - & :global(.carousel .controls .control) { - color: var(--gray-2); - & :global(.progress-bar) { - background: var(--gray-5); - & span { - background: var(--consul); - } - } - } - & :global(.g-hero .carousel) { - & :global(.video-wrapper.is-active) { - /* Padding % modifier differs slightly from react-hero to accommodate video heights */ - padding-top: calc((100% * 0.57) + 28px); /* !important; */ - } - } -} diff --git a/website/components/io-card-container/index.tsx b/website/components/io-card-container/index.tsx deleted file mode 100644 index e71ab886e0..0000000000 --- a/website/components/io-card-container/index.tsx +++ /dev/null @@ -1,82 +0,0 @@ -import * as React from 'react' -import classNames from 'classnames' -import Button from '@hashicorp/react-button' -import IoCard, { IoCardProps } from 'components/io-card' -import s from './style.module.css' - -interface IoCardContaianerProps { - theme?: 'light' | 'dark' - heading?: string - description?: string - label?: string - cta?: { - url: string - text: string - } - cardsPerRow: 3 | 4 - cards: Array -} - -export default function IoCardContaianer({ - theme = 'light', - heading, - description, - label, - cta, - cardsPerRow = 3, - cards, -}: IoCardContaianerProps): React.ReactElement { - return ( -
    - {heading || description ? ( -
    - {heading ?

    {heading}

    : null} - {description ?

    {description}

    : null} -
    - ) : null} - {cards.length ? ( - <> - {label || cta ? ( -
    - {label ?

    {label}

    : null} - {cta ? ( -
    - ) : null} -
      - {cards.map((card, index) => { - return ( - // Index is stable - // eslint-disable-next-line react/no-array-index-key -
    • - -
    • - ) - })} -
    - - ) : null} -
    - ) -} diff --git a/website/components/io-card-container/style.module.css b/website/components/io-card-container/style.module.css deleted file mode 100644 index b7b9b08d28..0000000000 --- a/website/components/io-card-container/style.module.css +++ /dev/null @@ -1,114 +0,0 @@ -.cardContainer { - position: relative; - - & + .cardContainer { - margin-top: 64px; - - @media (--medium-up) { - margin-top: 132px; - } - } -} - -.header { - margin: 0 auto 64px; - text-align: center; - max-width: 600px; -} - -.heading { - margin: 0; - composes: g-type-display-2 from global; - - @nest .dark & { - color: var(--white); - } -} - -.description { - margin: 8px 0 0; - composes: g-type-body-large from global; - - @nest .dark & { - color: var(--gray-5); - } -} - -.subHeader { - margin: 0 0 32px; - display: flex; - align-items: center; - justify-content: space-between; - - @nest .dark & { - color: var(--gray-5); - } -} - -.label { - margin: 0; - composes: g-type-display-4 from global; -} - -.cardList { - list-style: none; - - --minCol: 250px; - --columns: var(--length); - - position: relative; - gap: 32px; - padding: 0; - - @media (--small) { - display: flex; - overflow-x: auto; - -ms-overflow-style: none; - scrollbar-width: none; - margin: 0; - padding: 6px 24px; - left: 50%; - margin-left: -50vw; - width: 100vw; - - /* This is to ensure there is overflow padding right on mobile. */ - &::after { - content: ''; - display: block; - width: 1px; - flex-shrink: 0; - } - } - - @media (--medium-up) { - display: grid; - grid-template-columns: repeat(var(--columns), minmax(var(--minCol), 1fr)); - } - - &.threeUp { - @media (--medium-up) { - --columns: 3; - --minCol: 0; - } - } - - &.fourUp { - @media (--medium-up) { - --columns: 3; - --minCol: 0; - } - - @media (--large) { - --columns: 4; - } - } - - & > li { - display: flex; - - @media (--small) { - flex-shrink: 0; - width: 250px; - } - } -} diff --git a/website/components/io-card/index.tsx b/website/components/io-card/index.tsx deleted file mode 100644 index 64baa40816..0000000000 --- a/website/components/io-card/index.tsx +++ /dev/null @@ -1,124 +0,0 @@ -import * as React from 'react' -import Link from 'next/link' -import InlineSvg from '@hashicorp/react-inline-svg' -import classNames from 'classnames' -import { IconArrowRight24 } from '@hashicorp/flight-icons/svg-react/arrow-right-24' -import { IconExternalLink24 } from '@hashicorp/flight-icons/svg-react/external-link-24' -import { productLogos } from './product-logos' -import s from './style.module.css' - -export interface IoCardProps { - variant?: 'light' | 'gray' | 'dark' - products?: Array<{ - name: keyof typeof productLogos - }> - link: { - url: string - type: 'inbound' | 'outbound' - } - inset?: 'none' | 'sm' | 'md' - eyebrow?: string - heading?: string - description?: string - children?: React.ReactNode -} - -function IoCard({ - variant = 'light', - products, - link, - inset = 'md', - eyebrow, - heading, - description, - children, -}: IoCardProps): React.ReactElement { - const LinkWrapper = ({ className, children }) => - link.type === 'inbound' ? ( - - {children} - - ) : ( - - {children} - - ) - - return ( -
    - - {children ? ( - children - ) : ( - <> - {eyebrow ? {eyebrow} : null} - {heading ? {heading} : null} - {description ? {description} : null} - - )} -
    - {products && ( -
      - {products.map(({ name }, index) => { - const key = name.toLowerCase() - const version = variant === 'dark' ? 'neutral' : 'color' - return ( - // eslint-disable-next-line react/no-array-index-key -
    • - -
    • - ) - })} -
    - )} - - {link.type === 'inbound' ? ( - - ) : ( - - )} - -
    -
    -
    - ) -} - -interface EyebrowProps { - children: string -} - -function Eyebrow({ children }: EyebrowProps) { - return

    {children}

    -} - -interface HeadingProps { - as?: 'h2' | 'h3' | 'h4' - children: React.ReactNode -} - -function Heading({ as: Component = 'h2', children }: HeadingProps) { - return {children} -} - -interface DescriptionProps { - children: string -} - -function Description({ children }: DescriptionProps) { - return

    {children}

    -} - -IoCard.Eyebrow = Eyebrow -IoCard.Heading = Heading -IoCard.Description = Description - -export default IoCard diff --git a/website/components/io-card/product-logos.ts b/website/components/io-card/product-logos.ts deleted file mode 100644 index 9c24e3bf47..0000000000 --- a/website/components/io-card/product-logos.ts +++ /dev/null @@ -1,34 +0,0 @@ -export const productLogos = { - boundary: { - color: require('@hashicorp/mktg-logos/product/boundary/logomark/color.svg?include'), - neutral: require('@hashicorp/mktg-logos/product/boundary/logomark/white.svg?include'), - }, - consul: { - color: require('@hashicorp/mktg-logos/product/consul/logomark/color.svg?include'), - neutral: require('@hashicorp/mktg-logos/product/consul/logomark/white.svg?include'), - }, - nomad: { - color: require('@hashicorp/mktg-logos/product/nomad/logomark/color.svg?include'), - neutral: require('@hashicorp/mktg-logos/product/nomad/logomark/white.svg?include'), - }, - packer: { - color: require('@hashicorp/mktg-logos/product/packer/logomark/color.svg?include'), - neutral: require('@hashicorp/mktg-logos/product/packer/logomark/white.svg?include'), - }, - terraform: { - color: require('@hashicorp/mktg-logos/product/terraform/logomark/color.svg?include'), - neutral: require('@hashicorp/mktg-logos/product/terraform/logomark/white.svg?include'), - }, - vagrant: { - color: require('@hashicorp/mktg-logos/product/vagrant/logomark/color.svg?include'), - neutral: require('@hashicorp/mktg-logos/product/vagrant/logomark/white.svg?include'), - }, - vault: { - color: require('@hashicorp/mktg-logos/product/vault/logomark/color.svg?include'), - neutral: require('@hashicorp/mktg-logos/product/vault/logomark/white.svg?include'), - }, - waypoint: { - color: require('@hashicorp/mktg-logos/product/waypoint/logomark/color.svg?include'), - neutral: require('@hashicorp/mktg-logos/product/waypoint/logomark/white.svg?include'), - }, -} diff --git a/website/components/io-card/style.module.css b/website/components/io-card/style.module.css deleted file mode 100644 index 44df36cedf..0000000000 --- a/website/components/io-card/style.module.css +++ /dev/null @@ -1,148 +0,0 @@ -.card { - /* Radii */ - --token-radius: 6px; - - /* Spacing */ - --token-spacing-03: 8px; - --token-spacing-04: 16px; - --token-spacing-05: 24px; - --token-spacing-06: 32px; - - /* Elevations */ - --token-elevation-mid: 0 2px 3px rgba(101, 106, 118, 0.1), - 0 8px 16px -10px rgba(101, 106, 118, 0.2); - --token-elevation-high: 0 2px 3px rgba(101, 106, 118, 0.15), - 0 16px 16px -10px rgba(101, 106, 118, 0.2); - - /* Transition */ - --token-transition: ease-in-out 0.2s; - - display: flex; - flex-direction: column; - flex-grow: 1; - min-height: 300px; - - & a { - display: flex; - flex-direction: column; - flex-grow: 1; - border-radius: var(--token-radius); - box-shadow: 0 0 0 1px rgba(38, 53, 61, 0.1), var(--token-elevation-mid); - transition: var(--token-transition); - transition-property: background-color, box-shadow; - - &:hover { - box-shadow: 0 0 0 2px rgba(38, 53, 61, 0.15), var(--token-elevation-high); - cursor: pointer; - } - - /* Variants */ - &.dark { - background-color: var(--gray-1); - - &:hover { - background-color: var(--gray-2); - } - } - - &.gray { - background-color: #f9f9fa; - } - - &.light { - background-color: var(--white); - } - - /* Spacing */ - &.none { - padding: 0; - } - - &.sm { - padding: var(--token-spacing-05); - } - - &.md { - padding: var(--token-spacing-06); - } - } -} - -.eyebrow { - margin: 0; - composes: g-type-label-small from global; - color: var(--gray-3); - - @nest .dark & { - color: var(--gray-5); - } -} - -.heading { - margin: 0; - composes: g-type-display-5 from global; - color: var(--black); - - @nest * + & { - margin-top: var(--token-spacing-05); - } - - @nest .dark & { - color: var(--white); - } -} - -.description { - margin: 0; - composes: g-type-body-small from global; - color: var(--gray-3); - - @nest * + & { - margin-top: var(--token-spacing-03); - } - - @nest .dark & { - color: var(--gray-5); - } -} - -.footer { - margin-top: auto; - display: flex; - justify-content: space-between; - align-items: flex-end; - padding-top: 32px; -} - -.products { - display: flex; - gap: 8px; - margin: 0; - padding: 0; - - & > li { - width: 32px; - height: 32px; - display: grid; - place-items: center; - } - - & .logo { - display: flex; - - & svg { - width: 32px; - height: 32px; - } - } -} - -.linkType { - margin-left: auto; - display: flex; - color: var(--black); - - @nest .dark & { - color: var(--white); - } -} diff --git a/website/components/io-dialog/index.tsx b/website/components/io-dialog/index.tsx deleted file mode 100644 index 14298b3053..0000000000 --- a/website/components/io-dialog/index.tsx +++ /dev/null @@ -1,47 +0,0 @@ -import * as React from 'react' -import { DialogOverlay, DialogContent, DialogOverlayProps } from '@reach/dialog' -import { AnimatePresence, motion } from 'framer-motion' -import s from './style.module.css' - -export interface IoDialogProps extends DialogOverlayProps { - label: string -} - -export default function IoDialog({ - isOpen, - onDismiss, - children, - label, -}: IoDialogProps): React.ReactElement { - const AnimatedDialogOverlay = motion(DialogOverlay) - return ( - - {isOpen && ( - -
    - - - - {children} - - -
    -
    - )} -
    - ) -} diff --git a/website/components/io-dialog/style.module.css b/website/components/io-dialog/style.module.css deleted file mode 100644 index 306619ac8a..0000000000 --- a/website/components/io-dialog/style.module.css +++ /dev/null @@ -1,62 +0,0 @@ -.dialogOverlay { - background-color: rgba(0, 0, 0, 0.75); - height: 100%; - left: 0; - overflow-y: auto; - position: fixed; - top: 0; - width: 100%; - z-index: 666666667 /* higher than global nav */; -} - -.dialogWrapper { - display: grid; - min-height: 100vh; - padding: 24px; - place-items: center; -} - -.dialogContent { - background-color: var(--gray-1); - color: var(--white); - max-width: 800px; - outline: none; - overflow-y: auto; - padding: 24px; - position: relative; - width: 100%; - - @media (min-width: 768px) { - padding: 48px; - } -} - -.dialogClose { - appearance: none; - background-color: transparent; - border: 0; - composes: g-type-display-5 from global; - cursor: pointer; - margin: 0; - padding: 0; - position: absolute; - color: var(--white); - right: 24px; - top: 24px; - z-index: 1; - - @media (min-width: 768px) { - right: 48px; - top: 48px; - } - - @nest html[dir='rtl'] & { - left: 24px; - right: auto; - - @media (min-width: 768px) { - left: 48px; - right: auto; - } - } -} diff --git a/website/components/io-home-call-to-action/index.tsx b/website/components/io-home-call-to-action/index.tsx deleted file mode 100644 index 7296b361b1..0000000000 --- a/website/components/io-home-call-to-action/index.tsx +++ /dev/null @@ -1,39 +0,0 @@ -import ReactCallToAction from '@hashicorp/react-call-to-action' -import { Products } from '@hashicorp/platform-product-meta' -import s from './style.module.css' - -interface IoHomeCallToActionProps { - brand: Products - heading: string - content: string - links: Array<{ - text: string - url: string - }> -} - -export default function IoHomeCallToAction({ - brand, - heading, - content, - links, -}: IoHomeCallToActionProps) { - return ( -
    - { - return { - text, - url, - type: index === 1 ? 'inbound' : null, - } - })} - /> -
    - ) -} diff --git a/website/components/io-home-call-to-action/style.module.css b/website/components/io-home-call-to-action/style.module.css deleted file mode 100644 index 76cb034469..0000000000 --- a/website/components/io-home-call-to-action/style.module.css +++ /dev/null @@ -1,12 +0,0 @@ -.callToAction { - margin: 60px auto; - background-image: linear-gradient(52.3deg, #2c2d2f 39.83%, #626264 96.92%); - - @media (--medium-up) { - margin: 120px auto; - } - - & > * { - background-color: transparent; - } -} diff --git a/website/components/io-home-case-studies/index.tsx b/website/components/io-home-case-studies/index.tsx deleted file mode 100644 index 3155749e27..0000000000 --- a/website/components/io-home-case-studies/index.tsx +++ /dev/null @@ -1,81 +0,0 @@ -import * as React from 'react' -import Image from 'next/image' -import { IconExternalLink16 } from '@hashicorp/flight-icons/svg-react/external-link-16' -import { IconArrowRight16 } from '@hashicorp/flight-icons/svg-react/arrow-right-16' -import s from './style.module.css' - -interface IoHomeCaseStudiesProps { - isInternalLink: (link: string) => boolean - heading: string - description: string - primary: Array<{ - thumbnail: { - url: string - alt: string - } - link: string - heading: string - }> - secondary: Array<{ - link: string - heading: string - }> -} - -export default function IoHomeCaseStudies({ - isInternalLink, - heading, - description, - primary, - secondary, -}: IoHomeCaseStudiesProps): React.ReactElement { - return ( -
    -
    -
    -

    {heading}

    -

    {description}

    -
    -
    - - - -
    -
    -
    - ) -} diff --git a/website/components/io-home-case-studies/style.module.css b/website/components/io-home-case-studies/style.module.css deleted file mode 100644 index 63ff3102fd..0000000000 --- a/website/components/io-home-case-studies/style.module.css +++ /dev/null @@ -1,170 +0,0 @@ -.root { - position: relative; - margin: 60px auto; - max-width: 1600px; - - @media (--medium-up) { - margin: 120px auto; - } -} - -.container { - composes: g-grid-container from global; -} - -.header { - margin-bottom: 32px; - - @media (--medium-up) { - max-width: calc(100% * 5 / 12); - } -} - -.heading { - margin: 0; - composes: g-type-display-3 from global; -} - -.description { - margin: 8px 0 0; - composes: g-type-body from global; - color: var(--gray-3); -} - -.caseStudies { - --columns: 1; - - display: grid; - grid-template-columns: repeat(var(--columns), minmax(0, 1fr)); - gap: 32px; - - @media (--medium-up) { - --columns: 12; - } -} - -.primary { - --columns: 1; - - grid-column: 1 / -1; - list-style: none; - margin: 0; - padding: 0; - display: grid; - grid-template-columns: repeat(var(--columns), minmax(0, 1fr)); - gap: 32px; - - @media (--medium-up) { - --columns: 2; - } - - @media (--large) { - grid-column: 1 / 9; - } -} - -.primaryItem { - display: flex; -} - -.card { - position: relative; - overflow: hidden; - display: flex; - flex-direction: column; - flex-grow: 1; - justify-content: flex-end; - padding: 32px; - box-shadow: 0 8px 16px -10px rgba(101, 106, 118, 0.2); - background-color: #000; - border-radius: 6px; - color: var(--white); - transition: ease-in-out 0.2s; - transition-property: box-shadow; - min-height: 300px; - - &::before { - content: ''; - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - z-index: 10; - border-radius: 6px; - background-image: linear-gradient( - to bottom, - rgba(0, 0, 0, 0), - rgba(0, 0, 0, 0.45) - ); - transition: opacity ease-in-out 0.2s; - } - - &:hover { - box-shadow: 0 2px 3px rgba(101, 106, 118, 0.15), - 0 16px 16px -10px rgba(101, 106, 118, 0.2); - - &::before { - opacity: 0.75; - } - } -} - -.cardThumbnail { - transition: transform 0.4s; - - @nest .card:hover & { - transform: scale(1.04); - } -} - -.cardHeading { - margin: 0; - composes: g-type-display-4 from global; - z-index: 10; -} - -.secondary { - grid-column: 1 / -1; - list-style: none; - margin: 0; - padding: 0; - - @media (--large) { - margin-top: -32px; - grid-column: 9 / -1; - } -} - -.secondaryItem { - border-bottom: 1px solid var(--gray-5); -} - -.link { - display: flex; - width: 100%; - color: var(--black); -} - -.linkInner { - display: flex; - width: 100%; - justify-content: space-between; - padding-top: 32px; - padding-bottom: 32px; - transition: transform ease-in-out 0.2s; - - @nest .link:hover & { - transform: translateX(4px); - } - - & svg { - margin-top: 6px; - flex-shrink: 0; - } -} - -.linkHeading { - margin: 0 32px 0 0; - composes: g-type-display-6 from global; -} diff --git a/website/components/io-home-feature/index.tsx b/website/components/io-home-feature/index.tsx deleted file mode 100644 index f3e910fcdd..0000000000 --- a/website/components/io-home-feature/index.tsx +++ /dev/null @@ -1,80 +0,0 @@ -import * as React from 'react' -import Image from 'next/image' -import Link from 'next/link' -import { IconArrowRight16 } from '@hashicorp/flight-icons/svg-react/arrow-right-16' -import s from './style.module.css' - -export interface IoHomeFeatureProps { - isInternalLink: (link: string) => boolean - link?: string - image: { - url: string - alt: string - } - heading: string - description: string -} - -export default function IoHomeFeature({ - isInternalLink, - link, - image, - heading, - description, -}: IoHomeFeatureProps): React.ReactElement { - return ( - -
    - {image.alt} -
    -
    -

    {heading}

    -

    {description}

    - {link ? ( - - Learn more{' '} - - - - - ) : null} -
    -
    - ) -} - -interface IoHomeFeatureWrapProps { - isInternalLink: (link: string) => boolean - href: string - children: React.ReactNode -} - -function IoHomeFeatureWrap({ - isInternalLink, - href, - children, -}: IoHomeFeatureWrapProps) { - if (!href) { - return
    {children}
    - } - - if (isInternalLink(href)) { - return ( - - {children} - - ) - } - - return ( - - {children} - - ) -} diff --git a/website/components/io-home-feature/style.module.css b/website/components/io-home-feature/style.module.css deleted file mode 100644 index 70c2cc5103..0000000000 --- a/website/components/io-home-feature/style.module.css +++ /dev/null @@ -1,79 +0,0 @@ -.feature { - display: flex; - align-items: center; - flex-direction: column; - padding: 32px; - gap: 24px 64px; - border-radius: 6px; - background-color: #f9f9fa; - color: var(--black); - box-shadow: 0 2px 3px rgba(101, 106, 118, 0.1), - 0 8px 16px -10px rgba(101, 106, 118, 0.2); - - @media (--medium-up) { - flex-direction: row; - } -} - -.featureLink { - transition: box-shadow ease-in-out 0.2s; - - &:hover { - box-shadow: 0 2px 3px rgba(101, 106, 118, 0.15), - 0 16px 16px -10px rgba(101, 106, 118, 0.2); - } -} - -.featureMedia { - flex-shrink: 0; - display: flex; - width: 100%; - border-radius: 6px; - overflow: hidden; - border: 1px solid var(--gray-5); - - @media (--medium-up) { - width: 300px; - } - - @media (--large) { - width: 400px; - } - - & > * { - width: 100%; - } -} - -.featureContent { - max-width: 520px; -} - -.featureHeading { - margin: 0; - composes: g-type-display-4 from global; -} - -.featureDescription { - margin: 8px 0 24px; - composes: g-type-body-small from global; - color: var(--gray-3); -} - -.featureCta { - display: inline-flex; - align-items: center; - - & > span { - display: flex; - margin-left: 12px; - - & > svg { - transition: transform 0.2s; - } - } - - @nest .feature:hover & span svg { - transform: translateX(2px); - } -} diff --git a/website/components/io-home-hero/index.tsx b/website/components/io-home-hero/index.tsx deleted file mode 100644 index fabaafd37c..0000000000 --- a/website/components/io-home-hero/index.tsx +++ /dev/null @@ -1,135 +0,0 @@ -import * as React from 'react' -import { Products } from '@hashicorp/platform-product-meta' -import Button from '@hashicorp/react-button' -import classNames from 'classnames' -import s from './style.module.css' - -interface IoHomeHeroProps { - pattern: string - brand: Products | 'neutral' - heading: string - description: string - ctas: Array<{ - title: string - link: string - }> - cards: Array -} - -export default function IoHomeHero({ - pattern, - brand, - heading, - description, - ctas, - cards, -}: IoHomeHeroProps) { - const [loaded, setLoaded] = React.useState(false) - - React.useEffect(() => { - setTimeout(() => { - setLoaded(true) - }, 250) - }, []) - - return ( -
    - -
    -
    -

    {heading}

    -

    {description}

    - {ctas && ( -
    - {ctas.map((cta, index) => { - return ( -
    - )} -
    - {cards && ( -
    - {cards.map((card, index) => { - return ( - - ) - })} -
    - )} -
    -
    - ) -} - -interface IoHomeHeroCardProps { - index?: number - heading: string - description: string - cta: { - title: string - link: string - brand?: 'neutral' | Products - } - subText: string -} - -function IoHomeHeroCard({ - index, - heading, - description, - cta, - subText, -}: IoHomeHeroCardProps): React.ReactElement { - return ( -
    -

    {heading}

    -

    {description}

    -
    - ) -} diff --git a/website/components/io-home-hero/style.module.css b/website/components/io-home-hero/style.module.css deleted file mode 100644 index c7f47026ff..0000000000 --- a/website/components/io-home-hero/style.module.css +++ /dev/null @@ -1,148 +0,0 @@ -.hero { - position: relative; - padding-top: 64px; - padding-bottom: 64px; - background: linear-gradient(180deg, #f9f9fa 0%, #fff 28.22%, #fff 100%); - - @media (--medium-up) { - padding-top: 128px; - padding-bottom: 128px; - } -} - -.pattern { - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - max-width: 1600px; - width: 100%; - margin: auto; - - @media (--medium-up) { - background-image: var(--pattern); - background-repeat: no-repeat; - background-position: top right; - } -} - -.container { - --columns: 1; - - composes: g-grid-container from global; - display: grid; - grid-template-columns: repeat(var(--columns), minmax(0, 1fr)); - gap: 48px 32px; - - @media (--medium-up) { - --columns: 12; - } -} - -.content { - grid-column: 1 / -1; - - @media (--medium-up) { - grid-column: 1 / 6; - } - - & > * { - max-width: 415px; - } -} - -.heading { - margin: 0; - composes: g-type-display-1 from global; -} - -.description { - margin: 8px 0 0; - composes: g-type-body-small from global; - color: var(--gray-3); -} - -.ctas { - margin-top: 24px; - display: flex; - flex-direction: column; - align-items: flex-start; - gap: 24px; -} - -.cards { - --columns: 1; - - grid-column: 1 / -1; - align-self: start; - display: grid; - grid-template-columns: repeat(var(--columns), minmax(0, 1fr)); - gap: 32px; - - @media (min-width: 600px) { - --columns: 2; - } - - @media (--medium-up) { - --columns: 1; - - grid-column: 7 / -1; - } - - @media (--large) { - --columns: 2; - - grid-column: 6 / -1; - } -} - -.card { - --token-radius: 6px; - --token-elevation-mid: 0 2px 3px rgba(101, 106, 118, 0.1), - 0 8px 16px -10px rgba(101, 106, 118, 0.2); - - opacity: 0; - padding: 40px 32px; - display: flex; - align-items: flex-start; - flex-direction: column; - flex-grow: 1; - background-color: var(--white); - border-radius: var(--token-radius); - box-shadow: 0 0 0 1px rgba(38, 53, 61, 0.1), var(--token-elevation-mid); - - @nest .loaded & { - animation-name: slideIn; - animation-duration: 0.5s; - animation-delay: calc(var(--index) * 0.1s); - animation-fill-mode: forwards; - } -} - -.cardHeading { - margin: 0; - composes: g-type-display-4 from global; -} - -.cardDescription { - margin: 8px 0 16px; - composes: g-type-display-6 from global; -} - -.cardSubText { - margin: 32px 0 0; - composes: g-type-body-small from global; - color: var(--gray-3); -} - -@keyframes slideIn { - from { - opacity: 0; - transform: translateY(50px); - } - to { - opacity: 1; - transform: translateY(0); - } -} diff --git a/website/components/io-home-in-practice/index.tsx b/website/components/io-home-in-practice/index.tsx deleted file mode 100644 index 6e145b2e95..0000000000 --- a/website/components/io-home-in-practice/index.tsx +++ /dev/null @@ -1,86 +0,0 @@ -import * as React from 'react' -import Image from 'next/image' -import Button from '@hashicorp/react-button' -import { Products } from '@hashicorp/platform-product-meta' -import { IoCardProps } from 'components/io-card' -import IoCardContainer from 'components/io-card-container' -import s from './style.module.css' - -interface IoHomeInPracticeProps { - brand: Products - pattern: string - heading: string - description: string - cards: Array - cta: { - heading: string - description: string - link: string - image: { - url: string - alt: string - width: number - height: number - } - } -} - -export default function IoHomeInPractice({ - brand, - pattern, - heading, - description, - cards, - cta, -}: IoHomeInPracticeProps) { - return ( -
    -
    - - - {cta.heading ? ( -
    -
    -

    {cta.heading}

    - {cta.description ? ( -

    {cta.description}

    - ) : null} - {cta.link ? ( -
    - {cta.image?.url ? ( -
    - {cta.image.alt} -
    - ) : null} -
    - ) : null} -
    -
    - ) -} diff --git a/website/components/io-home-in-practice/style.module.css b/website/components/io-home-in-practice/style.module.css deleted file mode 100644 index 13ed2bfd99..0000000000 --- a/website/components/io-home-in-practice/style.module.css +++ /dev/null @@ -1,98 +0,0 @@ -.inPractice { - position: relative; - margin: 60px auto; - padding: 64px 0; - max-width: 1600px; - - @media (--medium-up) { - padding: 80px 0; - margin: 120px auto; - } - - &::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background: var(--black); - background-image: var(--pattern); - background-repeat: no-repeat; - background-size: 50%; - background-position: top 200px left; - - @media (--large) { - border-radius: 6px; - left: 24px; - right: 24px; - background-size: 35%; - background-position: top 64px left; - } - } -} - -.container { - composes: g-grid-container from global; -} - -.inPracticeCta { - --columns: 1; - - position: relative; - margin-top: 64px; - display: grid; - grid-template-columns: repeat(var(--columns), minmax(0, 1fr)); - gap: 64px 32px; - - @media (--medium-up) { - --columns: 12; - } - - &::before { - content: ''; - position: absolute; - top: 0; - left: 0; - width: 100%; - bottom: -64px; - background-image: radial-gradient( - 42.33% 42.33% at 50% 100%, - #363638 0%, - #000 100% - ); - - @media (--medium-up) { - bottom: -80px; - } - } -} - -.inPracticeCtaContent { - position: relative; - grid-column: 1 / -1; - - @media (--medium-up) { - grid-column: 1 / 5; - } -} - -.inPracticeCtaMedia { - grid-column: 1 / -1; - - @media (--medium-up) { - grid-column: 6 / -1; - } -} - -.inPracticeCtaHeading { - margin: 0; - color: var(--white); - composes: g-type-display-3 from global; -} - -.inPracticeCtaDescription { - margin: 8px 0 32px; - color: var(--gray-5); - composes: g-type-body from global; -} diff --git a/website/components/io-home-intro/index.tsx b/website/components/io-home-intro/index.tsx deleted file mode 100644 index c8081b4f79..0000000000 --- a/website/components/io-home-intro/index.tsx +++ /dev/null @@ -1,155 +0,0 @@ -import * as React from 'react' -import Image from 'next/image' -import classNames from 'classnames' -import { Products } from '@hashicorp/platform-product-meta' -import Button from '@hashicorp/react-button' -import IoVideoCallout, { - IoHomeVideoCalloutProps, -} from 'components/io-video-callout' -import IoHomeFeature, { IoHomeFeatureProps } from 'components/io-home-feature' -import s from './style.module.css' - -interface IoHomeIntroProps { - isInternalLink: (link: string) => boolean - brand: Products - heading: string - description: string - features?: Array - offerings?: { - image: { - src: string - width: number - height: number - alt: string - } - list: Array<{ - heading: string - description: string - }> - cta?: { - title: string - link: string - } - } - video?: IoHomeVideoCalloutProps -} - -export default function IoHomeIntro({ - isInternalLink, - brand, - heading, - description, - features, - offerings, - video, -}: IoHomeIntroProps) { - return ( -
    -
    -
    -
    -

    {heading}

    -

    {description}

    -
    -
    -
    - - {features ? ( -
      - {features.map((feature, index) => { - return ( - // Index is stable - // eslint-disable-next-line react/no-array-index-key -
    • -
      - -
      -
    • - ) - })} -
    - ) : null} - - {offerings ? ( -
    - {offerings.image ? ( -
    - {offerings.image.alt} -
    - ) : null} -
    -
      - {offerings.list.map((offering, index) => { - return ( - // Index is stable - // eslint-disable-next-line react/no-array-index-key -
    • -

      - {offering.heading} -

      -

      - {offering.description} -

      -
    • - ) - })} -
    - {offerings.cta ? ( -
    -
    - ) : null} -
    -
    - ) : null} - - {video ? ( -
    - -
    - ) : null} -
    - ) -} diff --git a/website/components/io-home-intro/style.module.css b/website/components/io-home-intro/style.module.css deleted file mode 100644 index 6227a49ba3..0000000000 --- a/website/components/io-home-intro/style.module.css +++ /dev/null @@ -1,169 +0,0 @@ -.root { - position: relative; - margin-bottom: 60px; - - @media (--medium-up) { - margin-bottom: 120px; - } - - &.withOfferings:not(.withFeatures)::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background-image: radial-gradient( - 93.55% 93.55% at 50% 0%, - var(--gray-6) 0%, - rgba(242, 242, 243, 0) 100% - ); - - @media (--large) { - border-radius: 6px; - left: 24px; - right: 24px; - } - } -} - -.container { - composes: g-grid-container from global; -} - -.header { - padding-top: 64px; - padding-bottom: 64px; - text-align: center; - - @nest .withFeatures & { - background-color: var(--brand); - } - - @nest .withFeatures.consul & { - color: var(--white); - } -} - -.headerInner { - margin: auto; - - @media (--medium-up) { - max-width: calc(100% * 7 / 12); - } -} - -.heading { - margin: 0; - composes: g-type-display-2 from global; -} - -.description { - margin: 24px 0 0; - composes: g-type-body-large from global; - - @nest .withOfferings:not(.withFeatures) & { - color: var(--gray-3); - } -} - -/* - * Features - */ - -.features { - list-style: none; - margin: 0; - padding: 0; - display: grid; - gap: 32px; - - & li:first-of-type { - background-image: linear-gradient( - to bottom, - var(--brand) 50%, - var(--white) 50% - ); - } -} - -/* - * Offerings - */ - -.offerings { - --columns: 1; - - composes: g-grid-container from global; - display: grid; - grid-template-columns: repeat(var(--columns), minmax(0, 1fr)); - gap: 64px 32px; - - @media (--medium-up) { - --columns: 12; - } - - @nest .features + & { - margin-top: 60px; - - @media (--medium-up) { - margin-top: 120px; - } - } -} - -.offeringsMedia { - grid-column: 1 / -1; - - @media (--medium-up) { - grid-column: 1 / 6; - } -} - -.offeringsContent { - grid-column: 1 / -1; - - @media (--medium-up) { - grid-column: 7 / -1; - } -} - -.offeringsList { - list-style: none; - margin: 0; - padding: 0; - display: grid; - grid-template-columns: repeat(2, 1fr); - gap: 32px; - - @media (--small) { - grid-template-columns: repeat(1, 1fr); - } -} - -.offeringsListHeading { - margin: 0; - composes: g-type-display-4 from global; -} - -.offeringsListDescription { - margin: 16px 0 0; - composes: g-type-body-small from global; -} - -.offeringsCta { - margin-top: 48px; -} - -/* - * Video - */ - -.video { - margin-top: 60px; - composes: g-grid-container from global; - - @media (--medium-up) { - margin-top: 120px; - } -} diff --git a/website/components/io-home-pre-footer/index.tsx b/website/components/io-home-pre-footer/index.tsx deleted file mode 100644 index 98127443dd..0000000000 --- a/website/components/io-home-pre-footer/index.tsx +++ /dev/null @@ -1,79 +0,0 @@ -import * as React from 'react' -import classNames from 'classnames' -import { Products } from '@hashicorp/platform-product-meta' -import { IconArrowRight16 } from '@hashicorp/flight-icons/svg-react/arrow-right-16' -import s from './style.module.css' - -interface IoHomePreFooterProps { - brand: Products - heading: string - description: string - ctas: [IoHomePreFooterCard, IoHomePreFooterCard, IoHomePreFooterCard] -} - -export default function IoHomePreFooter({ - brand, - heading, - description, - ctas, -}: IoHomePreFooterProps) { - return ( -
    -
    -
    -

    {heading}

    -

    {description}

    -
    -
    - {ctas.map((cta, index) => { - return ( - - ) - })} -
    -
    -
    - ) -} - -interface IoHomePreFooterCard { - brand?: string - link: string - heading: string - description: string - cta: string -} - -function IoHomePreFooterCard({ - brand, - link, - heading, - description, - cta, -}: IoHomePreFooterCard): React.ReactElement { - return ( - -

    {heading}

    -

    {description}

    - - {cta} - -
    - ) -} diff --git a/website/components/io-home-pre-footer/style.module.css b/website/components/io-home-pre-footer/style.module.css deleted file mode 100644 index 1273e2087d..0000000000 --- a/website/components/io-home-pre-footer/style.module.css +++ /dev/null @@ -1,122 +0,0 @@ -.preFooter { - margin: 60px auto; -} - -.container { - --columns: 1; - - composes: g-grid-container from global; - display: grid; - grid-template-columns: repeat(var(--columns), minmax(0, 1fr)); - gap: 32px; - - @media (--medium-up) { - --columns: 12; - } -} - -.content { - grid-column: 1 / -1; - - @media (--medium-up) { - grid-column: 1 / 6; - } - - @media (--large) { - grid-column: 1 / 4; - } -} - -.heading { - margin: 0; - composes: g-type-display-1 from global; -} - -.description { - margin: 24px 0 0; - composes: g-type-body from global; - color: var(--gray-3); -} - -.cards { - grid-column: 1 / -1; - - --columns: 1; - - display: grid; - grid-template-columns: repeat(var(--columns), minmax(0, 1fr)); - gap: 32px; - - @media (--medium-up) { - --columns: 3; - - grid-column: 1 / -1; - } - - @media (--large) { - grid-column: 5 / -1; - } -} - -.card { - display: flex; - flex-direction: column; - flex-grow: 1; - padding: 32px 24px; - background-color: var(--primary); - color: var(--black); - border-radius: 6px; - box-shadow: 0 2px 3px rgba(101, 106, 118, 0.1), - 0 8px 16px -10px rgba(101, 106, 118, 0.2); - transition: ease-in-out 0.2s; - transition-property: box-shadow; - - &:hover { - box-shadow: 0 2px 3px rgba(101, 106, 118, 0.15), - 0 16px 16px -10px rgba(101, 106, 118, 0.2); - } - - &:nth-of-type(1) { - color: var(--white); - - @nest .vault & { - color: var(--black); - } - } - - &:nth-of-type(2) { - background-color: var(--secondary); - } - - &:nth-of-type(3) { - background-color: var(--gray-6); - } -} - -.cardHeading { - margin: 0; - composes: g-type-display-4 from global; -} - -.cardDescription { - margin: 8px 0 0; - padding-bottom: 48px; - color: inherit; - composes: g-type-display-6 from global; -} - -.cardCta { - margin-top: auto; - display: inline-flex; - align-items: center; - composes: g-type-buttons-and-standalone-links from global; - - & svg { - margin-left: 12px; - transition: transform 0.2s; - } - - @nest .card:hover & svg { - transform: translate(2px); - } -} diff --git a/website/components/io-usecase-call-to-action/index.tsx b/website/components/io-usecase-call-to-action/index.tsx deleted file mode 100644 index 252be27f18..0000000000 --- a/website/components/io-usecase-call-to-action/index.tsx +++ /dev/null @@ -1,69 +0,0 @@ -import Image from 'next/image' -import * as React from 'react' -import classNames from 'classnames' -import Button from '@hashicorp/react-button' -import s from './style.module.css' - -interface IoUsecaseCallToActionProps { - brand: string - theme?: 'light' | 'dark' - heading: string - description: string - links: Array<{ - text: string - url: string - }> - pattern: string -} - -export default function IoUsecaseCallToAction({ - brand, - theme, - heading, - description, - links, - pattern, -}: IoUsecaseCallToActionProps): React.ReactElement { - return ( -
    -

    {heading}

    -
    -

    {description}

    -
    - {links.map((link, index) => { - return ( -
    -
    -
    - -
    -
    - ) -} diff --git a/website/components/io-usecase-call-to-action/style.module.css b/website/components/io-usecase-call-to-action/style.module.css deleted file mode 100644 index 1afcb903d6..0000000000 --- a/website/components/io-usecase-call-to-action/style.module.css +++ /dev/null @@ -1,66 +0,0 @@ -.callToAction { - --columns: 1; - - position: relative; - display: grid; - grid-template-columns: repeat(var(--columns), minmax(0, 1fr)); - gap: 0 32px; - padding: 32px; - background-color: var(--background-color); - border-radius: 6px; - - &.light { - color: var(--black); - } - - &.dark { - color: var(--white); - } - - @media (--medium-up) { - --columns: 12; - - padding: 0; - } -} - -.heading { - grid-column: 1 / -1; - margin: 0 0 16px; - composes: g-type-display-3 from global; - - @media (--medium-up) { - grid-column: 1 / 6; - padding: 88px 32px 88px 64px; - } -} - -.content { - grid-column: 1 / -1; - - @media (--medium-up) { - grid-column: 6 / 11; - padding: 88px 0; - } -} - -.description { - margin: 0 0 32px; - composes: g-type-body-large from global; -} - -.links { - display: flex; - flex-wrap: wrap; - gap: 16px 32px; -} - -.pattern { - position: relative; - display: none; - - @media (--medium-up) { - grid-column: 11 / -1; - display: flex; - } -} diff --git a/website/components/io-usecase-customer/index.tsx b/website/components/io-usecase-customer/index.tsx deleted file mode 100644 index 288b953b85..0000000000 --- a/website/components/io-usecase-customer/index.tsx +++ /dev/null @@ -1,86 +0,0 @@ -import * as React from 'react' -import Image from 'next/image' -import Button from '@hashicorp/react-button' -import s from './style.module.css' - -interface IoUsecaseCustomerProps { - media: { - src: string - width: string - height: string - alt: string - } - logo: { - src: string - width: string - height: string - alt: string - } - heading: string - description: string - stats?: Array<{ - value: string - key: string - }> - link: string -} - -export default function IoUsecaseCustomer({ - media, - logo, - heading, - description, - stats, - link, -}: IoUsecaseCustomerProps): React.ReactElement { - return ( -
    -
    -
    -
    - {/* eslint-disable-next-line jsx-a11y/alt-text */} - -
    -
    -
    -
    - {/* eslint-disable-next-line jsx-a11y/alt-text */} - -
    - Customer case study -
    -

    {heading}

    -

    {description}

    - {link ? ( -
    -
    - ) : null} -
    -
    - {stats.length > 0 ? ( -
      - {stats.map(({ key, value }, index) => { - return ( - // Index is stable - // eslint-disable-next-line react/no-array-index-key -
    • -

      {value}

      -

      {key}

      -
    • - ) - })} -
    - ) : null} -
    -
    - ) -} diff --git a/website/components/io-usecase-customer/style.module.css b/website/components/io-usecase-customer/style.module.css deleted file mode 100644 index b881560735..0000000000 --- a/website/components/io-usecase-customer/style.module.css +++ /dev/null @@ -1,119 +0,0 @@ -.customer { - position: relative; - background-color: var(--black); - color: var(--white); - padding-bottom: 64px; - - @media (--medium-up) { - padding-bottom: 132px; - } -} - -.container { - composes: g-grid-container from global; -} - -.columns { - --columns: 1; - - display: grid; - grid-template-columns: repeat(var(--columns), minmax(0, 1fr)); - gap: 64px 32px; - - @media (--medium-up) { - --columns: 12; - } -} - -.media { - margin-top: -64px; - grid-column: 1 / -1; - - @media (--medium-up) { - grid-column: 1 / 7; - } -} - -.content { - grid-column: 1 / -1; - - @media (--medium-up) { - padding-top: 64px; - grid-column: 8 / -1; - } -} - -.eyebrow { - display: flex; - align-items: center; -} - -.eyebrowLogo { - display: flex; - max-width: 120px; -} - -.eyebrowLabel { - padding-top: 8px; - padding-bottom: 8px; - padding-left: 12px; - margin-left: 12px; - border-left: 1px solid var(--gray-5); - align-self: center; - composes: g-type-label-small-strong from global; -} - -.heading { - margin: 32px 0 24px; - composes: g-type-display-2 from global; -} - -.description { - margin: 0; - composes: g-type-body from global; -} - -.cta { - margin-top: 32px; -} - -.stats { - --columns: 1; - - list-style: none; - margin: 64px 0 0; - padding: 0; - display: grid; - grid-template-columns: repeat(var(--columns), minmax(0, 1fr)); - gap: 32px; - - @media (--medium-up) { - --columns: 12; - - margin-top: 132px; - } - - & > li { - border-top: 1px solid var(--gray-2); - grid-column: span 4; - } -} - -.value { - margin: 0; - padding-top: 32px; - font-family: var(--font-display); - font-size: 50px; - font-weight: 700; - line-height: 1; - - @media (--large) { - font-size: 80px; - } -} - -.key { - margin: 12px 0 0; - composes: g-type-display-4 from global; - color: var(--gray-3); -} diff --git a/website/components/io-usecase-hero/index.tsx b/website/components/io-usecase-hero/index.tsx deleted file mode 100644 index 4838678e8f..0000000000 --- a/website/components/io-usecase-hero/index.tsx +++ /dev/null @@ -1,41 +0,0 @@ -import * as React from 'react' -import Image from 'next/image' -import s from './style.module.css' - -interface IoUsecaseHeroProps { - eyebrow: string - heading: string - description: string - pattern?: string -} - -export default function IoUsecaseHero({ - eyebrow, - heading, - description, - pattern, -}: IoUsecaseHeroProps): React.ReactElement { - return ( -
    -
    -
    - {pattern ? ( - - ) : null} -
    -
    -

    {eyebrow}

    -

    {heading}

    -

    {description}

    -
    -
    -
    - ) -} diff --git a/website/components/io-usecase-hero/pattern.svg b/website/components/io-usecase-hero/pattern.svg deleted file mode 100644 index f4b1ef3af0..0000000000 --- a/website/components/io-usecase-hero/pattern.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/website/components/io-usecase-hero/style.module.css b/website/components/io-usecase-hero/style.module.css deleted file mode 100644 index 5fd729c8e8..0000000000 --- a/website/components/io-usecase-hero/style.module.css +++ /dev/null @@ -1,83 +0,0 @@ -.hero { - position: relative; - max-width: 1600px; - margin-right: auto; - margin-left: auto; - - &::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background-image: radial-gradient( - 95.97% 95.97% at 50% 100%, - #f2f2f3 0%, - rgba(242, 242, 243, 0) 100% - ); - - @media (--medium-up) { - border-radius: 6px; - left: 24px; - right: 24px; - } - } -} - -.container { - @media (--medium-up) { - display: grid; - grid-template-columns: 1fr max-content 1fr; - gap: 32px; - } -} - -.pattern { - margin-left: 24px; - transform: translateY(24px); - position: relative; - display: flex; - flex-direction: column; - justify-content: flex-end; - - @media (--small) { - display: none; - } - - @media (--medium) { - & > * { - display: none !important; - } - } -} - -.content { - position: relative; - max-width: 520px; - width: 100%; - margin-right: auto; - margin-left: auto; - padding: 64px 24px; - - @media (--medium-up) { - padding-top: 132px; - padding-bottom: 132px; - } -} - -.eyebrow { - margin: 0; - composes: g-type-label-strong from global; -} - -.heading { - margin: 24px 0; - composes: g-type-display-1 from global; -} - -.description { - margin: 0; - composes: g-type-body-large from global; - color: var(--gray-2); -} diff --git a/website/components/io-usecase-section/index.tsx b/website/components/io-usecase-section/index.tsx deleted file mode 100644 index 11ed7917f6..0000000000 --- a/website/components/io-usecase-section/index.tsx +++ /dev/null @@ -1,81 +0,0 @@ -import * as React from 'react' -import { Products } from '@hashicorp/platform-product-meta' -import classNames from 'classnames' -import Image from 'next/image' -import Button from '@hashicorp/react-button' -import s from './style.module.css' - -interface IoUsecaseSectionProps { - brand?: Products | 'neutral' - bottomIsFlush?: boolean - eyebrow: string - heading: string - description: string - media?: { - src: string - width: string - height: string - alt: string - } - cta?: { - text: string - link: string - } -} - -export default function IoUsecaseSection({ - brand = 'neutral', - bottomIsFlush = false, - eyebrow, - heading, - description, - media, - cta, -}: IoUsecaseSectionProps): React.ReactElement { - return ( -
    -
    -

    {eyebrow}

    -
    -
    -

    {heading}

    - {media?.src ? ( -
    - ) : null} - {cta?.link && cta?.text ? ( -
    -
    - ) : null} -
    -
    - {media?.src ? ( - // eslint-disable-next-line jsx-a11y/alt-text - - ) : ( -
    - )} -
    -
    -
    -
    - ) -} diff --git a/website/components/io-usecase-section/style.module.css b/website/components/io-usecase-section/style.module.css deleted file mode 100644 index a2b56d1f58..0000000000 --- a/website/components/io-usecase-section/style.module.css +++ /dev/null @@ -1,106 +0,0 @@ -.section { - position: relative; - max-width: 1600px; - margin-right: auto; - margin-left: auto; - padding-top: 64px; - padding-bottom: 64px; - - @media (--medium-up) { - padding-top: 132px; - padding-bottom: 132px; - } - - & + .section { - padding-bottom: 132px; - - &::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background-color: var(--gray-6); - opacity: 0.4; - - @media (--medium-up) { - border-radius: 6px; - left: 24px; - right: 24px; - } - } - } - - &.isFlush { - padding-bottom: 96px; - - @media (--medium-up) { - padding-bottom: 164px; - } - - &::before { - border-bottom-left-radius: 0; - border-bottom-right-radius: 0; - } - } -} - -.container { - composes: g-grid-container from global; -} - -.columns { - --columns: 1; - - display: grid; - grid-template-columns: repeat(var(--columns), minmax(0, 1fr)); - gap: 32px; - - @media (--medium-up) { - --columns: 12; - } -} - -.column { - &:nth-child(1) { - @media (--medium-up) { - grid-column: 1 / 7; - } - } - - &:nth-child(2) { - @media (--medium-up) { - grid-column: 8 / -1; - padding-top: 16px; - } - } -} - -.eyebrow { - margin: 0; - composes: g-type-display-5 from global; -} - -.heading { - margin: 16px 0 32px; - padding-bottom: 32px; - composes: g-type-display-3 from global; - border-bottom: 1px solid var(--black); -} - -.description { - composes: g-type-body from global; - - & > p { - margin: 0; - - & + p { - margin-top: 16px; - } - } -} - -.cta { - margin-top: 32px; -} diff --git a/website/components/io-video-callout/index.tsx b/website/components/io-video-callout/index.tsx deleted file mode 100644 index 7889348d86..0000000000 --- a/website/components/io-video-callout/index.tsx +++ /dev/null @@ -1,80 +0,0 @@ -import * as React from 'react' -import Image from 'next/image' -import ReactPlayer from 'react-player' -import VisuallyHidden from '@reach/visually-hidden' -import IoDialog from 'components/io-dialog' -import PlayIcon from './play-icon' -import s from './style.module.css' - -export interface IoHomeVideoCalloutProps { - youtubeId: string - thumbnail: string - heading: string - description: string - person: { - avatar: string - name: string - description: string - } -} - -export default function IoVideoCallout({ - youtubeId, - thumbnail, - heading, - description, - person, -}: IoHomeVideoCalloutProps): React.ReactElement { - const [showDialog, setShowDialog] = React.useState(false) - const showVideo = () => setShowDialog(true) - const hideVideo = () => setShowDialog(false) - return ( - <> -
    - -
    -

    {heading}

    -

    {description}

    - {person && ( -
    - {person.avatar ? ( -
    - {`${person.name} -
    - ) : null} -
    -

    {person.name}

    -

    {person.description}

    -
    -
    - )} -
    -
    - -

    {heading}

    -
    - -
    -
    - - ) -} diff --git a/website/components/io-video-callout/play-icon.tsx b/website/components/io-video-callout/play-icon.tsx deleted file mode 100644 index 37395ba2b6..0000000000 --- a/website/components/io-video-callout/play-icon.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import * as React from 'react' - -export default function PlayIcon(): React.ReactElement { - return ( - - - - - ) -} diff --git a/website/components/io-video-callout/style.module.css b/website/components/io-video-callout/style.module.css deleted file mode 100644 index 815601ff0f..0000000000 --- a/website/components/io-video-callout/style.module.css +++ /dev/null @@ -1,128 +0,0 @@ -.videoCallout { - --columns: 1; - - margin: 0; - display: grid; - grid-template-columns: repeat(var(--columns), minmax(0, 1fr)); - gap: 32px; - background-color: var(--black); - border-radius: 6px; - overflow: hidden; - - @media (--medium-up) { - --columns: 12; - } -} - -.thumbnail { - position: relative; - display: grid; - place-items: center; - grid-column: 1 / -1; - background-color: transparent; - border: 0; - cursor: pointer; - padding: 96px 32px; - min-height: 300px; - - @media (--medium-up) { - grid-column: 1 / 7; - } - - @media (--large) { - grid-column: 1 / 9; - } - - & > svg { - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - z-index: 1; - - @media (--small) { - width: 52px; - height: 52px; - } - } - - &::after { - content: ''; - position: absolute; - top: 0; - left: 0; - width: 100%; - height: 100%; - background-color: #000; - opacity: 0.45; - transition: opacity ease-in-out 0.2s; - } - - &:hover::after { - opacity: 0.2; - } -} - -.content { - padding: 32px; - grid-column: 1 / -1; - - @media (--medium-up) { - padding: 80px 32px; - grid-column: 7 / -1; - } - - @media (--large) { - grid-column: 9 / -1; - } -} - -.heading { - margin: 0; - composes: g-type-display-4 from global; - color: var(--white); -} - -.description { - margin: 8px 0 0; - composes: g-type-body-small from global; - color: var(--white); -} - -.person { - margin-top: 64px; - display: flex; - align-items: center; - gap: 16px; -} - -.personThumbnail { - display: flex; - border-radius: 9999px; - overflow: hidden; -} - -.personName { - margin: 0; - composes: g-type-body-strong from global; - color: var(--white); -} - -.personDescription { - margin: 4px 0 0; - composes: g-type-label-strong from global; - color: var(--gray-3); -} - -.videoHeading { - margin-top: 0; - margin-bottom: 32px; - padding-right: 100px; - composes: g-type-display-4 from global; -} - -.video { - position: relative; - background-color: var(--gray-2); - aspect-ratio: 16 / 9; -} diff --git a/website/components/mini-cta/index.jsx b/website/components/mini-cta/index.jsx deleted file mode 100644 index 02d5faff33..0000000000 --- a/website/components/mini-cta/index.jsx +++ /dev/null @@ -1,23 +0,0 @@ -import Button from '@hashicorp/react-button' - -export default function MiniCTA({ title, description, link }) { - return ( -
    -
    -
    -
    {title}
    - {description &&

    {description}

    } -
    -
    - ) -} diff --git a/website/components/mini-cta/style.css b/website/components/mini-cta/style.css deleted file mode 100644 index 8f76813ac1..0000000000 --- a/website/components/mini-cta/style.css +++ /dev/null @@ -1,36 +0,0 @@ -.g-mini-cta { - background: var(--gray-6); - text-align: center; - padding-bottom: 64px; - padding-top: 48px; - - & hr { - width: 64px; - color: var(--gray-4); - margin: 0 auto 64px auto; - - @media (max-width: 800px) { - margin: 0 auto 24px auto; - } - } - - & h5 { - margin: 0; - margin-bottom: 14px; - } - - & p { - margin: 0; - margin-bottom: 24px; - - @media (max-width: 800px) { - margin-bottom: 16px; - } - } - - & .g-btn { - & span { - font-weight: 500; - } - } -} diff --git a/website/components/prefooter-cta/index.jsx b/website/components/prefooter-cta/index.jsx deleted file mode 100644 index 6180587289..0000000000 --- a/website/components/prefooter-cta/index.jsx +++ /dev/null @@ -1,25 +0,0 @@ -import CallToAction from '@hashicorp/react-call-to-action' - -export default function PrefooterCTA() { - return ( - - ) -} diff --git a/website/components/side-by-side/index.tsx b/website/components/side-by-side/index.tsx deleted file mode 100644 index c1a29b0ff4..0000000000 --- a/website/components/side-by-side/index.tsx +++ /dev/null @@ -1,21 +0,0 @@ -import { ReactNode } from 'react' -import classNames from 'classnames' -import s from './style.module.css' - -interface SideBySideProps { - left: ReactNode - right: ReactNode -} - -export default function SideBySide({ left, right }: SideBySideProps) { - return ( -
    -
    -
    {left}
    -
    -
    -
    {right}
    -
    -
    - ) -} diff --git a/website/components/side-by-side/style.module.css b/website/components/side-by-side/style.module.css deleted file mode 100644 index 683a7de24e..0000000000 --- a/website/components/side-by-side/style.module.css +++ /dev/null @@ -1,61 +0,0 @@ -.sideBySide { - display: flex; - flex-wrap: wrap; - - @media (--large) { - flex-wrap: nowrap; - } - - & .sideWrapper { - padding: 105px 0; - width: 100%; - - @media (--large) { - width: 50%; - padding-bottom: 176px; - } - - &.leftSide { - background: var(--consul-secondary); - - @media (--large) { - padding-left: 48px; - padding-right: 104px; - } - } - - &.rightSide { - @media (--large) { - padding-right: 48px; - padding-left: 75px; - } - } - - & .side { - margin: 0 auto; - - @media (--small) { - max-width: 616px; - padding-left: 24px; - padding-right: 24px; - } - - @media (--medium) { - max-width: 944px; - padding-left: 40px; - padding-right: 40px; - } - - @media (--large) { - margin: 0; - max-width: 490px; - } - } - - &:first-child .side { - @media (--large) { - float: right; - } - } - } -} diff --git a/website/components/static-dynamic-diagram/before-after-diagram.jsx b/website/components/static-dynamic-diagram/before-after-diagram.jsx deleted file mode 100644 index c81124ed4c..0000000000 --- a/website/components/static-dynamic-diagram/before-after-diagram.jsx +++ /dev/null @@ -1,80 +0,0 @@ -import Image from '@hashicorp/react-image' -import InlineSvg from '@hashicorp/react-inline-svg' -import alertIcon from 'public/img/static-dynamic-diagram/alert.svg?include' -import checkIcon from 'public/img/static-dynamic-diagram/check.svg?include' -import s from './before-after-diagram.module.css' - -export default function BeforeAfterDiagram({ - beforeHeadline, - beforeContent, - beforeImage, - afterHeadline, - afterContent, - afterImage, -}) { - return ( -
    -
    -
    -
    - -
    -
    -
    - - - - -
    - {beforeHeadline && ( -

    - )} - {beforeContent && ( -
    - )} -
    -

    -
    -
    -
    -
    - -
    -
    -
    - - - -
    - {afterHeadline && ( -

    - )} - {afterContent && ( -
    - )} -
    -

    -
    -
    - ) -} diff --git a/website/components/static-dynamic-diagram/before-after-diagram.module.css b/website/components/static-dynamic-diagram/before-after-diagram.module.css deleted file mode 100644 index 49796a3c28..0000000000 --- a/website/components/static-dynamic-diagram/before-after-diagram.module.css +++ /dev/null @@ -1,351 +0,0 @@ -.beforeAfterDiagram { - /* CSS custom properties to control theming */ - --product-color: var(--black); - --gray-6-transparent: rgba(210, 212, 219, 0); - --after-bullet-background: url('/img/static-dynamic-diagram/check-square.svg'); - --after-bullet-height: 18px; - - display: flex; - flex-wrap: wrap; - margin: 0 -16px; - position: relative; - - @media (max-width: 1023px) { - margin-left: -12px; - margin-right: -12px; - } - - @media (max-width: 767px) { - flex-direction: column; - margin-left: 40px; - margin-right: 0; - } - - --after-bullet-background: url('/img/static-dynamic-diagram/check-square-consul.svg'); - --after-bullet-height: 19px; -} - -/* Before and after columns */ - -.side { - display: flex; - flex-direction: column; - margin: 0 16px; - position: relative; - width: calc(50% - 32px); - - @media (max-width: 1023px) { - margin: 0 12px; - width: calc(50% - 24px); - } - - @media (max-width: 767px) { - margin: 0; - width: 100%; - } -} - -.beforeSide { - composes: side; - @media (max-width: 767px) { - margin-bottom: 62px; - } -} - -.afterSide { - composes: side; -} - -/* Diagram images */ - -.image { - align-items: flex-end; - display: flex; - height: 320px; - justify-content: center; - margin-bottom: 96px; - - @media (max-width: 767px) { - margin-bottom: 40px; - } - - @media (max-width: 640px) { - height: 284px; - } - - @media (max-width: 540px) { - height: 238px; - } - - @media (max-width: 480px) { - height: 211px; - } - - @media (max-width: 375px) { - height: 163px; - } - - & div { - height: 100%; - text-align: center; - width: 100%; - } - - & picture { - height: 100%; - } - & img, - & svg { - height: 100%; - max-width: 100%; - object-fit: contain; - } - - @media (--medium-up) { - height: unset; - - & div { - height: unset; - } - - & picture { - height: unset; - } - & img, - & svg { - height: unset; - } - } -} - -/* icon / line container above content */ - -.iconLineContainer { - padding: 0; - position: absolute; - right: 0; - top: -75px; - width: 100%; - - @media (max-width: 767px) { - height: 100%; - left: -28px; - right: auto; - top: 28px; - width: auto; - } -} - -/* Line segment above content (before side only) */ - -.lineSegment { - background: black; - display: block; - height: 2px; - left: calc(50% + 30px); - position: absolute; - top: 12px; - width: calc(100% - 24px); - - @media (max-width: 767px) { - height: calc(100% + 375px); - left: auto; - top: 38px; - width: 2px; - } - - @media (max-width: 640px) { - height: calc(100% + 339px); - } - - @media (max-width: 540px) { - height: calc(100% + 293px); - } - - @media (max-width: 480px) { - height: calc(100% + 266px); - } - - @media (max-width: 375px) { - height: calc(100% + 218px); - } - - &::before { - border-radius: 100%; - border-style: solid; - border-width: 5.5px 0 5.5px 8px; - border-width: 2px; - content: ''; - height: 8px; - left: -8px; - position: absolute; - top: -3px; - width: 8px; - - @media (max-width: 767px) { - left: -3px; - top: -8px; - } - } - - &::after { - border-color: transparent transparent transparent var(--product-color); - border-style: solid; - border-width: 6px 0 6px 8px; - content: ''; - height: 0; - position: absolute; - right: -8px; - top: -5px; - width: 0; - - @media (max-width: 767px) { - bottom: -8px; - right: -4px; - top: auto; - transform: rotate(90deg); - } - } -} - -/* Icon above each content container */ - -.contentIcon { - & svg { - left: 50%; - margin: 0 0 0 -11px; - position: absolute; - } -} - -.beforeIcon { - composes: contentIcon; -} - -.afterIcon { - composes: contentIcon; - & svg path:first-child { - fill: var(--product-color); - stroke: var(--product-color); - } -} - -/* Content container */ - -.contentContainer { - border: 1px solid var(--gray-5); - flex-grow: 1; - padding: 24px 32px 20px; - position: relative; - - @media (max-width: 1023px) { - padding-left: 24px; - padding-right: 24px; - } - - @media (max-width: 767px) { - padding-left: 20px; - padding-right: 20px; - } - - &::before, - &::after { - border: solid transparent; - bottom: 100%; - content: ''; - height: 0; - left: 50%; - pointer-events: none; - position: absolute; - width: 0; - } - - &::before { - border-color: rgba(229, 230, 235, 0); - border-bottom-color: var(--gray-5); - border-width: 18px; - margin-left: -18px; - } - - &::after { - border-color: rgba(255, 255, 255, 0); - border-bottom-color: var(--white); - border-width: 17px; - margin-left: -17px; - } - - & > div { - height: 100%; - - & > div { - @media (min-width: 768px) { - margin: 0 auto; - max-width: 480px; - } - } - } -} - -/* Content headline */ - -.contentHeadline { - border-bottom: 1px solid var(--gray-5); - color: var(--black); - composes: g-type-display-3 from global; - margin: 0 0 24px; - padding-bottom: 24px; - text-align: center; -} - -/* Content styles (for rendered markdown) */ - -.content { - & :global(.__permalink-h) { - display: none; - } - - & :global(.g-type-label) { - margin: 24px 0 26px 0; - } - - & ul, - & ol { - list-style: none; - padding-left: 32px; - position: relative; - } - - & li { - margin: 8px 0; - - &::before { - background-repeat: no-repeat; - content: ''; - left: 0; - position: absolute; - } - } -} - -.beforeContent { - composes: content; - - & li::before { - background: url('/img/static-dynamic-diagram/alert-check.svg'); - background-repeat: no-repeat; - height: var(--after-bullet-height); - margin-top: 3px; - width: 20px; - } -} - -.afterContent { - composes: content; - - & li::before { - background: var(--after-bullet-background); - height: var(--after-bullet-height); - margin-top: 4px; - width: 18px; - } -} diff --git a/website/components/static-dynamic-diagram/index.tsx b/website/components/static-dynamic-diagram/index.tsx deleted file mode 100644 index 9b5cf3fad1..0000000000 --- a/website/components/static-dynamic-diagram/index.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import BeforeAfterDiagram from './before-after-diagram' -import s from './style.module.css' - -export default function StaticDynamicDiagram({ - heading, - description, - diagrams, -}) { - return ( -
    -
    -

    {heading}

    - {description &&

    {description}

    } -
    - -
    - ) -} diff --git a/website/components/static-dynamic-diagram/style.module.css b/website/components/static-dynamic-diagram/style.module.css deleted file mode 100644 index 2b1a7ff2d6..0000000000 --- a/website/components/static-dynamic-diagram/style.module.css +++ /dev/null @@ -1,19 +0,0 @@ -.staticDynamic { - composes: g-grid-container from global; - display: grid; - grid-gap: 64px; - justify-items: center; -} - -.content { - max-width: 784px; - text-align: center; -} -.description { - composes: g-type-body-large from global; - color: var(--gray-2); -} -.heading { - composes: g-type-display-2 from global; - margin: 0; -} diff --git a/website/components/subnav/index.jsx b/website/components/subnav/index.jsx deleted file mode 100644 index 4a70b89c1a..0000000000 --- a/website/components/subnav/index.jsx +++ /dev/null @@ -1,38 +0,0 @@ -import Subnav from '@hashicorp/react-subnav' -import { useRouter } from 'next/router' -import s from './style.module.css' - -export default function ConsulSubnav({ menuItems }) { - const router = useRouter() - return ( - - ) -} diff --git a/website/components/subnav/style.module.css b/website/components/subnav/style.module.css deleted file mode 100644 index 5cb3cbccd8..0000000000 --- a/website/components/subnav/style.module.css +++ /dev/null @@ -1,3 +0,0 @@ -.subnav { - border-top: 1px solid transparent; -} diff --git a/website/components/use-cases-layout/index.jsx b/website/components/use-cases-layout/index.jsx deleted file mode 100644 index 2459107113..0000000000 --- a/website/components/use-cases-layout/index.jsx +++ /dev/null @@ -1,47 +0,0 @@ -import BasicHero from 'components/basic-hero' -import PrefooterCTA from 'components/prefooter-cta' -import ConsulEnterpriseComparison from 'components/enterprise-comparison/consul' -import Head from 'next/head' -import HashiHead from '@hashicorp/react-head' - -export default function UseCaseLayout({ - title, - description, - guideLink, - children, -}) { - const pageTitle = `Consul ${title}` - return ( - <> - - - - -
    - -
    -

    Features

    -
    - {children} - - -
    - - ) -} diff --git a/website/components/use-cases-layout/style.css b/website/components/use-cases-layout/style.css deleted file mode 100644 index e51ec7e4c1..0000000000 --- a/website/components/use-cases-layout/style.css +++ /dev/null @@ -1,30 +0,0 @@ -#p-use-case { - & .features-header { - text-align: center; - margin-bottom: 0; - } - - /* Overriding the g-text-split component to have - * a header size closer to a h3 than a h2, as within - * the context of this page this text-split is more deeply - * nested within the page than we normally have it. - * */ - & .g-text-split { - & h2 { - font-size: 1.5rem; - letter-spacing: -0.004em; - line-height: 1.375em; - - @media (--medium-up) { - font-size: 1.75rem; - line-height: 1.321em; - } - - @media (--large) { - font-size: 2rem; - letter-spacing: -0.006em; - line-height: 1.313em; - } - } - } -} diff --git a/website/content/docs/api-gateway/common-errors.mdx b/website/content/docs/api-gateway/common-errors.mdx index d3ba51dcaa..f49c9fefc5 100644 --- a/website/content/docs/api-gateway/common-errors.mdx +++ b/website/content/docs/api-gateway/common-errors.mdx @@ -64,4 +64,4 @@ Install the required CRDs by using the command in Step 1 of the [Consul API Gate ---> [consul-common-errors]: /docs/troubleshoot/common-errors [troubleshooting]: https://learn.hashicorp.com/consul/day-2-operations/advanced-operations/troubleshooting -[install-instructions]: /docs/api-gateway/api-gateway-usage#installation \ No newline at end of file +[install-instructions]: /docs/api-gateway/consul-api-gateway-install#installation \ No newline at end of file diff --git a/website/content/docs/api-gateway/api-gateway-usage.mdx b/website/content/docs/api-gateway/consul-api-gateway-install.mdx similarity index 98% rename from website/content/docs/api-gateway/api-gateway-usage.mdx rename to website/content/docs/api-gateway/consul-api-gateway-install.mdx index ac84748928..814959a1f5 100644 --- a/website/content/docs/api-gateway/api-gateway-usage.mdx +++ b/website/content/docs/api-gateway/consul-api-gateway-install.mdx @@ -1,17 +1,17 @@ --- layout: docs -page_title: Consul API Gateway Usage +page_title: Consul API Gateway Install description: >- - Using Consul API gateway functionality + Installing Consul API Gateway --- -# Consul API Gateway Usage +# Installing Consul API Gateway This topic describes how to use the Consul API Gateway add-on module. It includes instructions for installation and configuration. ## Requirements -Refer to [Technical Specifications](/docs/api-gateway/tech-specs) for minimum software requirements. +Ensure that the environment you are deploying Consul API Gateway in meets the requirements listed in the [Technical Specifications](/docs/api-gateway/tech-specs). This includes validating that the requirements for minimum versions of software are met. ## Installation @@ -305,7 +305,6 @@ spec: - ### MeshService The `MeshService` configuration holds a reference to an externally-managed Consul service mesh service and can be used as a `backendRef` for a [`Route`](#route). diff --git a/website/content/docs/connect/proxies/envoy.mdx b/website/content/docs/connect/proxies/envoy.mdx index 35d21fad27..0262ede404 100644 --- a/website/content/docs/connect/proxies/envoy.mdx +++ b/website/content/docs/connect/proxies/envoy.mdx @@ -36,14 +36,12 @@ Consul supports **four major Envoy releases** at the beginning of each major Con | Consul Version | Compatible Envoy Versions | | ------------------- | -----------------------------------------------------------------------------------| -| 1.12.x | 1.21.1, 1.20.2, 1.19.3, 1.18.6 | +| 1.12.x | 1.22.0, 1.21.1, 1.20.2, 1.19.3 | | 1.11.x | 1.20.2, 1.19.3, 1.18.6, 1.17.41 | | 1.10.x | 1.18.6, 1.17.41, 1.16.51 , 1.15.51 | -| 1.9.x | 1.16.51, 1.15.51, 1.14.71,2, 1.13.71,2 | 1. Envoy 1.20.1 and earlier are vulnerable to [CVE-2022-21654](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21654) and [CVE-2022-21655](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-21655). Both CVEs were patched in Envoy versions 1.18.6, 1.19.3, and 1.20.2. Envoy 1.16.x and older releases are no longer supported (see [HCSEC-2022-07](https://discuss.hashicorp.com/t/hcsec-2022-07-consul-s-connect-service-mesh-affected-by-recent-envoy-security-releases/36332)). Consul 1.9.x clusters should be upgraded to 1.10.x and Envoy upgraded to the latest supported Envoy version for that release, 1.18.6. -1. Use Consul 1.9.0+ with Envoy 1.15.0+ to ensure that intention enforcement is updated as quickly as possible after any changes. [Additional information](https://github.com/envoyproxy/envoy/pull/10662). ## Getting Started diff --git a/website/content/docs/enterprise/admin-partitions.mdx b/website/content/docs/enterprise/admin-partitions.mdx index 6fd1bf8d96..2ccd24e517 100644 --- a/website/content/docs/enterprise/admin-partitions.mdx +++ b/website/content/docs/enterprise/admin-partitions.mdx @@ -8,8 +8,7 @@ description: Consul Enterprise enables you to create partitions that can be admi This feature requires{' '} - Consul Enterprise{' '} - with the Governance and Policy module. + Consul Enterprise{' '}. This topic provides and overview of admin partitions, which are entities that define one or more administrative boundaries for single Consul deployments. diff --git a/website/content/docs/enterprise/audit-logging.mdx b/website/content/docs/enterprise/audit-logging.mdx index 27382043e0..8ee3a90ded 100644 --- a/website/content/docs/enterprise/audit-logging.mdx +++ b/website/content/docs/enterprise/audit-logging.mdx @@ -9,8 +9,7 @@ description: >- This feature requires{' '} - Consul Enterprise{' '} - with the Governance and Policy module. + Consul Enterprise{' '}. Consul Enterprise v1.8.0 adds audit logging as a feature that captures a clear and diff --git a/website/content/docs/enterprise/federation.mdx b/website/content/docs/enterprise/federation.mdx index 9c00e6471a..e1cc46b30e 100644 --- a/website/content/docs/enterprise/federation.mdx +++ b/website/content/docs/enterprise/federation.mdx @@ -11,8 +11,7 @@ description: >- This feature requires{' '} - Consul Enterprise{' '} - with the Global Visibility, Routing, and Scale module. + Consul Enterprise{' '}. Consul's core federation capability uses the same gossip mechanism that is used diff --git a/website/content/docs/enterprise/namespaces.mdx b/website/content/docs/enterprise/namespaces.mdx index 34bca58627..05785d2d56 100644 --- a/website/content/docs/enterprise/namespaces.mdx +++ b/website/content/docs/enterprise/namespaces.mdx @@ -8,8 +8,7 @@ description: Consul Enterprise enables data isolation with Namespaces. This feature requires{' '} - Consul Enterprise{' '} - with the Governance and Policy module. + Consul Enterprise{' '}. With Consul Enterprise v1.7.0, data for different users or teams diff --git a/website/content/docs/enterprise/network-segments.mdx b/website/content/docs/enterprise/network-segments.mdx index d4a1ffedad..bd3e3a3eac 100644 --- a/website/content/docs/enterprise/network-segments.mdx +++ b/website/content/docs/enterprise/network-segments.mdx @@ -11,7 +11,7 @@ description: |- This feature requires{' '} Consul Enterprise{' '} - version 0.9.3+ with the Global Visibility, Routing, and Scale module. + version 0.9.3+. Consul requires full connectivity between all agents (servers and clients) in a diff --git a/website/content/docs/enterprise/read-scale.mdx b/website/content/docs/enterprise/read-scale.mdx index 3665db34e4..be80838036 100644 --- a/website/content/docs/enterprise/read-scale.mdx +++ b/website/content/docs/enterprise/read-scale.mdx @@ -10,8 +10,7 @@ description: >- This feature requires{' '} - Consul Enterprise{' '} - with the Global Visibility, Routing, and Scale module. + Consul Enterprise{' '}. Consul Enterprise provides the ability to scale clustered Consul servers diff --git a/website/content/docs/enterprise/redundancy.mdx b/website/content/docs/enterprise/redundancy.mdx index 73c0ced82f..0ac76f9c29 100644 --- a/website/content/docs/enterprise/redundancy.mdx +++ b/website/content/docs/enterprise/redundancy.mdx @@ -10,8 +10,7 @@ description: >- This feature requires{' '} - Consul Enterprise{' '} - with the Global Visibility, Routing, and Scale module. + Consul Enterprise{' '}. Consul Enterprise redundancy zones provide diff --git a/website/content/docs/enterprise/sentinel.mdx b/website/content/docs/enterprise/sentinel.mdx index 5e20f7af5e..04353c5262 100644 --- a/website/content/docs/enterprise/sentinel.mdx +++ b/website/content/docs/enterprise/sentinel.mdx @@ -11,8 +11,7 @@ description: >- This feature requires{' '} - Consul Enterprise{' '} - with the Governance and Policy module. + Consul Enterprise{' '}. Sentinel policies extend the ACL system in Consul beyond static "read", "write", diff --git a/website/content/docs/k8s/annotations-and-labels.mdx b/website/content/docs/k8s/annotations-and-labels.mdx index e995aabc58..26e61ea585 100644 --- a/website/content/docs/k8s/annotations-and-labels.mdx +++ b/website/content/docs/k8s/annotations-and-labels.mdx @@ -125,6 +125,15 @@ Resource annotations could be used on the Kubernetes pod to control connnect-inj consul.hashicorp.com/envoy-extra-args: '--log-level debug --disable-hot-restart' ``` +- `consul.hashicorp.com/kubernetes-service` - Specifies the name of the Kubernetes service used for Consul service registration. + This is useful when multiple Kubernetes services reference the same deployment. Any service that does not match the name + specified in this annotation is ignored. When not specified no service is ignored. + + ```yaml + annotations: + consul.hashicorp.com/kubernetes-service: 'service-name-to-use' + ``` + - `consul.hashicorp.com/service-tags` - A comma separated list of tags that will be applied to the Consul service and its sidecar. @@ -172,6 +181,7 @@ Resource annotations could be used on the Kubernetes pod to control connnect-inj - `consul.hashicorp.com/prometheus-scrape-path` - Override the default Helm value [`connectInject.metrics.defaultPrometheusScrapePath`](/docs/k8s/helm#v-connectinject-metrics-defaultprometheusscrapepath). - `consul.hashicorp.com/service-metrics-port` - Set the port where the Connect service exposes metrics. - `consul.hashicorp.com/service-metrics-path` - Set the path where the Connect service exposes metrics. +- `consul.hashicorp.com/connect-inject-mount-volume` - Comma separated list of container names to mount the connect-inject volume into. The volume will be mounted at `/consul/connect-inject`. The connect-inject volume contains Consul internals data needed by the other sidecar containers, for example the `consul` binary, and the Pod's Consul ACL token. This data can be valuable for advanced use-cases, such as making requests to the Consul API from within application containers. ## Labels diff --git a/website/content/docs/k8s/connect/connect-ca-provider.mdx b/website/content/docs/k8s/connect/connect-ca-provider.mdx index 0fe9d664a2..8cd84531c2 100644 --- a/website/content/docs/k8s/connect/connect-ca-provider.mdx +++ b/website/content/docs/k8s/connect/connect-ca-provider.mdx @@ -21,7 +21,7 @@ To configure an external CA provider via the Consul Helm chart, you need to foll 1. Create a Kubernetes secret containing the configuration file. 1. Reference the Kubernetes secret in the [`server.extraVolumes`](/docs/k8s/helm#v-server-extravolumes) value in the Helm chart. -To configure the Vault Connect Provider please see [Vault as the Service Mesh Certificate Provider on Kubernetes](/docs/k8s/installation/vault/connect-ca). +To configure the Vault Connect Provider please see [Vault as the Service Mesh Certificate Provider on Kubernetes](/docs/k8s/installation/vault/data-integration/connect-ca). ~> **NOTE:** The following instructions are only valid for Consul-k8s 0.37.0 and prior. diff --git a/website/content/docs/k8s/installation/compatibility.mdx b/website/content/docs/k8s/installation/compatibility.mdx index 701025b4e1..2d4658ab77 100644 --- a/website/content/docs/k8s/installation/compatibility.mdx +++ b/website/content/docs/k8s/installation/compatibility.mdx @@ -16,7 +16,8 @@ Starting with Consul Kubernetes 0.33.0, Consul Kubernetes versions all of its co | Consul Version | Compatible consul-k8s Versions | | -------------- | ------------------------------- | -| 1.11.x | 0.39.0 - latest | +| 1.12.x | 0.43.0 - latest | +| 1.11.x | 0.39.0 - 0.42.0 | | 1.10.x | 0.33.0 - 0.38.0 | ### Prior to version 0.33.0 @@ -35,13 +36,6 @@ Prior to Consul Kubernetes 0.33.0, a separately versioned Consul Helm chart was Supported versions of Envoy for Consul versions are also found in [Envoy - Supported Versions](/docs/connect/proxies/envoy#supported-versions). The recommended best practice is to use the default version of Envoy that is provided in the Helm values.yml file, as that is the version that has been tested with the default Consul and Consul Kubernetes binaries for a given Helm chart. -## Red Hat OpenShift compatibility - -Consul Kubernetes delivered Red Hat OpenShift support starting with Consul Helm chart version 0.25.0 for Consul 1.8.4. Please note the following details regarding OpenShift support. - -- Red Hat OpenShift is only supported for OpenShift 4.4.x and above. -- Only the default CNI Plugin, [OpenShift SDN CNI Plugin](https://docs.openshift.com/container-platform/4.9/networking/openshift_sdn/about-openshift-sdn.html) is currently supported. - ## Vault as a Secrets Backend compatibility Starting with Consul K8s 0.39.0 and Consul 1.11.x, Consul Kubernetes supports the ability to utilize Vault as the secrets backend for all the secrets utilized by Consul on Kubernetes. @@ -50,3 +44,20 @@ Starting with Consul K8s 0.39.0 and Consul 1.11.x, Consul Kubernetes supports th | ------------------------ | --------------------------| ----------------------------- | | 0.39.0 - latest | 1.9.0 - latest | 0.14.0 - latest | +## Platform specific compatibility notes + +### Red Hat OpenShift + +Consul Kubernetes delivered Red Hat OpenShift support starting with Consul Helm chart version 0.25.0 for Consul 1.8.4. Please note the following details regarding OpenShift support. + +- Red Hat OpenShift is only supported for OpenShift 4.4.x and above. +- Only the default CNI Plugin, [OpenShift SDN CNI Plugin](https://docs.openshift.com/container-platform/4.9/networking/openshift_sdn/about-openshift-sdn.html) is currently supported. + +### VMware Tanzu Kubernetes Grid and Tanzu Kubernetes Grid Integrated Edition + +Consul Kubernetes is [certified](https://marketplace.cloud.vmware.com/services/details/hashicorp-consul-1?slug=true) for both VMware Tanzu Kubernetes Grid, and VMware Tanzu Kubernetes Integrated Edition. + +- Tanzu Kubernetes Grid is certified for version 1.3.0 and above. Only Calico is supported as the CNI Plugin. +- Tanzu Kuberntetes Grid Integrated Edition is supported for version 1.11.1 and above. [NSX-T CNI Plugin v3.1.2](https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/rn/NSX-Container-Plugin-312-Release-Notes.html) and greater should be used and configured with the `enable_hostport_snat` setting set to `true`. + + diff --git a/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx b/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx index cf2d47df4a..5a7643ec18 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/bootstrap-token.mdx @@ -8,7 +8,7 @@ description: >- # Storing the ACL Bootstrap Token in Vault ## Prerequisites -Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: +Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: 1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). 2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). @@ -19,18 +19,18 @@ To use an ACL bootstrap token stored in Vault, we will follow the steps outlined 1. Store the secret in Vault. 1. Create a Vault policy that authorizes the desired level of access to the secret. - + ### Setup per Consul datacenter 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. ## One time setup in Vault -### Store the Secret in Vault +### Generate and Store the Secret in Vault -First, store the ACL bootstrap token in Vault: +First, generate and store the ACL bootstrap token in Vault: ```shell-session -$ vault kv put secret/consul/boostrap-token token="" +$ vault kv put secret/consul/bootstrap-token token="$(uuidgen | tr '[:upper:]' '[:lower:]')" ``` ### Create a Vault policy that authorizes the desired level of access to the secret diff --git a/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx b/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx index 704797564d..0bc1d318e3 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/partition-token.mdx @@ -8,7 +8,7 @@ description: >- # Storing the ACL Partition Token in Vault ## Prerequisites -Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: +Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: 1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). 2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). @@ -24,12 +24,12 @@ To use an ACL partition token stored in Vault, we will follow the steps outlined 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. ## One time setup in Vault -### Store the Secret in Vault +### Generate and Store the Secret in Vault -First, store the ACL partition token in Vault: +First, generate and store the ACL partition token in Vault: ```shell-session -$ vault kv put secret/consul/partition-token token="" +$ vault kv put secret/consul/partition-token token="$(uuidgen | tr '[:upper:]' '[:lower:]')" ``` ### Create a Vault policy that authorizes the desired level of access to the secret diff --git a/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx b/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx index 90534df40f..6ac7518a2c 100644 --- a/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx +++ b/website/content/docs/k8s/installation/vault/data-integration/replication-token.mdx @@ -8,7 +8,7 @@ description: >- # Storing the ACL Replication Token in Vault ## Prerequisites -Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: +Prior to setting up the data integration between Vault and Consul on Kubernetes, you will need to have: 1. Read and completed the steps in the [Systems Integration](/docs/k8s/installation/vault/systems-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). 2. Read the [Data Integration Overview](/docs/k8s/installation/vault/data-integration) section of [Vault as a Secrets Backend](/docs/k8s/installation/vault). @@ -24,12 +24,12 @@ To use an ACL replication token stored in Vault, we will follow the steps outlin 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. ## One time setup in Vault -### Store the Secret in Vault +### Generate and Store the Secret in Vault -First, store the ACL replication token in Vault: +First, generate and store the ACL replication token in Vault: ```shell-session -$ vault kv put secret/consul/replication-token token="" +$ vault kv put secret/consul/replication-token token="$(uuidgen | tr '[:upper:]' '[:lower:]')" ``` ### Create a Vault policy that authorizes the desired level of access to the secret diff --git a/website/content/docs/k8s/installation/vault/systems-integration.mdx b/website/content/docs/k8s/installation/vault/systems-integration.mdx index a34b3f0f22..429456548f 100644 --- a/website/content/docs/k8s/installation/vault/systems-integration.mdx +++ b/website/content/docs/k8s/installation/vault/systems-integration.mdx @@ -93,7 +93,8 @@ Before installing the Vault Injector and configuring the Vault Kubernetes Auth M ```shell-session $ export VAULT_ADDR=http://${VAULT_SERVER_HOST}:8200 ``` --> **Note:** If your vault installation is current exposed using SSL, this address will need to use `https` instead of `http`. You will also need to setup the [`VAULT_CACERT`](https://www.vaultproject.io/docs/commands#vault_cacert) environment variable. + + -> **Note:** If your vault installation is current exposed using SSL, this address will need to use `https` instead of `http`. You will also need to setup the [`VAULT_CACERT`](https://www.vaultproject.io/docs/commands#vault_cacert) environment variable. #### VAULT_TOKEN diff --git a/website/content/docs/k8s/installation/vault/wan-federation.mdx b/website/content/docs/k8s/installation/vault/wan-federation.mdx new file mode 100644 index 0000000000..e0fcc24b42 --- /dev/null +++ b/website/content/docs/k8s/installation/vault/wan-federation.mdx @@ -0,0 +1,680 @@ +--- +layout: docs +page_title: Federation Between Kubernetes Clusters with Vault as Secrets Backend +description: >- + Federating multiple Kubernetes clusters using Vault as secrets backend. +--- + +# Federation Between Kubernetes Clusters with Vault as Secrets Backend + +~> **Note**: This topic requires familiarity with [Mesh Gateways](/docs/connect/gateways/mesh-gateway/service-to-service-traffic-datacenters), [WAN Federation Via Mesh Gateways](/docs/connect/gateways/mesh-gateway/wan-federation-via-mesh-gateways). + +This page describes how you can federate multiple Kubernetes clusters using Vault as the secrets backend. See the [Multi-Cluster Overview](/docs/k8s/installation/multi-cluster) for more information on use cases and how it works. + +## Differences Between Using Kubernetes Secrets vs. Vault +The [Federation Between Kubernetes Clusters](/docs/k8s/installation/multi-cluster/kubernetes) page provides an overview of WAN Federation using Mesh Gateways with Kubernetes secrets as the secret backend. When using Vault as the secrets backend, there are different systems and data integration configuration that will be explained in the [Usage](#usage) section of this page. The other main difference is that when using Vault, there is no need for you to export and import a [Federation Secret](/docs/k8s/installation/multi-cluster/kubernetes#federation-secret) in each datacenter. + +## Usage + +The expected use case is to create WAN Federation on Kubernetes clusters. The following procedure will result in a WAN Federation with Vault as the secrets backend between two clusters, dc1 and dc2. dc1 will act as the primary Consul cluster and will also contain the Vault server installation. dc2 will be the secondary Consul cluster. + +![Consul on Kubernetes with Vault as the Secrets Backend](/img/k8s/consul-vault-wan-federation-topology.svg 'Consul on Kubernetes with Vault as the Secrets Backend') + +The Vault Injectors in each cluster will ensure that every pod in cluster has a Vault agent inject into the pod. + +![Vault Injectors inject Vault agents into pods](/img/k8s/consul-vault-wan-federation-vault-injector.svg 'Vault Injectors inject Vault agents into pods') + +The Vault Agents on each Consul pod will communicate directly with Vault on its externally accessible endpoint. Consul pods are also configured with Vault annotations that configure the secrets that the pod needs as well as the path that the Vault agent should locally store those secrets. + +![Vault agent and server communication](/img/k8s/consul-vault-wan-federation-vault-communication.svg 'Vault agent and server communication') + +The two data centers will federated using mesh gateways. This communication topology is also described in the [WAN Federation Via Mesh Gateways](/docs/k8s/installation/multi-cluster#wan-federation-via-mesh-gateways) section of [Multi-Cluster Federation Overview](/docs/k8s/installation/multi-cluster). + +![Mesh Federation via Mesh Gateways](/img/k8s/consul-vault-wan-federation-mesh-communication.svg 'Mesh Federation via Mesh Gateways') + +### Install Vault + +In this setup, you will deploy Vault server in the primary datacenter (dc1) Kubernetes cluster, which is also the primary Consul datacenter. You will configure your Vault Helm installation in the secondary datacenter (dc2) Kubernetes cluster to use it as an external server. This way there will be a single vault server cluster that will be used by both Consul datacenters. + +~> **Note**: For demonstration purposes, you will deploy a Vault server in dev mode. For production installations, this is not recommended. Please visit the [Vault Deployment Guide](https://learn.hashicorp.com/tutorials/vault/raft-deployment-guide?in=vault/day-one-raft) for guidance on how to install Vault in a production setting. + +1. Change your currrent Kubernetes context to target the primary datacenter (dc1). + + ```shell-session + $ kubectl config use-context + ``` + +1. Now, use the values files below for your Helm install. + + + + ```yaml + server: + dev: + enabled: true + service: + enabled: true + type: LoadBalancer + ui: + enabled: true + ``` + + + ```shell-session + $ helm install vault-dc1 --values vault-dc1.yaml hashicorp/vault --wait + ``` + +### Configuring your local environment + +1. Install Consul locally so that you can generate the gossip key. Please see the [Precompiled Binaries](/docs/install#precompiled-binaries) section of the [Install Consul page](/docs/install#precompiled-binaries). + +1. Set the VAULT_TOKEN with a default value. + + ```shell-session + $ export VAULT_ADDR=root + ``` + +1. Get the external IP or DNS name of the Vault server's load balancer. + + + + On EKS, you can get the IP address of the Vault server's load balancer with the following command: + + ```shell-session + $ export VAULT_SERVER_HOST=$(kubectl get svc vault-dc1 -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') + ``` + + + + + + On GKE, you can get the IP address of the Vault server's load balancer with the following command: + + ```shell-session + $ export VAULT_SERVER_HOST=$(kubectl get svc vault-dc1 -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + ``` + + + + + + On AKS, you can get the IP address of the Vault server's load balancer with the following command: + + ```shell-session + $ export VAULT_SERVER_HOST=$(kubectl get svc vault-dc1 --output jsonpath='{.status.loadBalancer.ingress[0].ip}') + ``` + + + + +1. Set the VAULT_ADDR environment variable. + + ```shell-session + $ export VAULT_ADDR=http://${VAULT_SERVER_HOST}:8200 + ``` + +## Systems Integation +### Overview +To use Vault as the Service Mesh Certificate Provider in Kubernetes, you must complete following systems integration actions: + +- One time setup in Vault + 1. Enabling Vault KV Secrets Engine - Version 2. + 1. Enabling Vault PKI Engine. +- Setup per Consul datacenter + 1. Installing the Vault Injector within the Consul datacenter installation + 1. Configuring a Kubernetes Auth Method in Vault to authenticate and authorize operations from the Consul datacenter + 1. Enable Vault as the Secrets Backend in the Consul datacenter +### One time setup on Vault +1. Enable [Vault KV secrets engine - Version 2](https://www.vaultproject.io/docs/secrets/kv/kv-v2) in order to store the [Gossip Encryption Key](/docs/k8s/helm#v-global-acls-replicationtoken) and the ACL Replication token ([`global.acls.replicationToken`](/docs/k8s/helm#v-global-acls-replicationtoken)). + + ```shell-session + $ vault secrets enable -path=consul kv-v2 + ``` + +1. Enable Vault PKI Engine in order to leverage Vault for issuing Consul Server TLS certificates. + + ```shell-session + $ vault secrets enable pki + ``` + + ```shell-session + $ vault secrets tune -max-lease-ttl=87600h pki + ``` + +### Setup per Consul datacenter +#### Primary Datacenter (dc1) +1. Install the Vault Injector in your Consul Kubernetes cluster (dc1), which is used for accessing secrets from Vault. + + -> **Note**: In the primary datacenter (dc1), you will not have to configure `injector.externalvaultaddr` value because the Vault server is in the same primary datacenter (dc1) cluster. + + + + ```yaml + server: + dev: + enabled: true + service: + enabled: true + type: LoadBalancer + injector: + enabled: true + authPath: auth/kubernetes-dc1 + ui: + enabled: true + ``` + + + Next, install Vault in the Kubernetes cluster. + + ```shell-session + $ helm upgrade vault-dc1 --values vault-dc1.yaml hashicorp/vault --wait + ``` + +1. Configure the Kubernetes Auth Method in Vault for the primary datacenter (dc1). + + ```shell-session + $ vault auth enable -path=kubernetes-dc1 kubernetes + ``` + + Because Consul is in the same datacenter cluster as Vault, the Vault Auth Method can use its own CA Cert and JWT to authenticate Consul dc1 service account requests. Therefore, you do not need to set `token_reviewer` and `kubernetes_ca_cert` on the dc1 Kubernetes Auth Method. + +1. Configure Auth Method with Kubernetes API host + + ```shell-session + $ vault write auth/kubernetes-dc1/config kubernetes_host=https://kubernetes.default.svc + ``` + +1. Enable Vault as the secrets backend in the primary datacenter (dc1). However, you will not yet apply the Helm install command. You will issue the Helm upgrade command after the [Data Integration](/docs/k8s/installation/vault/wan-federation#setup-per-consul-datacenter-1) section. + + + + ```yaml + global: + secretsBackend: + vault: + enabled: true + ``` + + + + +#### Secondary Datacenter (dc2) +1. Install the Vault Injector in the secondary datacenter (dc2). + + In the secondary datacenter (dc2), you will configure the `externalvaultaddr` value point to the external address of the Vault server in the primary datacenter (dc1). + + Change your Kubernetes context to target the secondary datacenter (dc2): + + ```shell-session + $ kubectl config use-context + ``` + + + + ```yaml + server: + enabled: false + injector: + enabled: true + externalVaultAddr: ${VAULT_ADDR} + authPath: auth/kubernetes-dc2 + ``` + + + + Next, install Vault in the Kubernetes cluster. + ```shell-session + $ helm install vault-dc2 --values vault-dc2.yaml hashicorp/vault --wait + ``` + +1. Configure the Kubernetes Auth Method in Vault for the datacenter + + ```shell-session + $ vault auth enable -path=kubernetes-dc2 kubernetes + ``` + +1. Create a service account with access to the Kubenetes API in the secondary datacenter (dc2). For the secondary datacenter (dc2) auth method, you first need to create a service account that allows the Vault server in the primary datacenter (dc1) cluster to talk to the Kubernetes API in the secondary datacenter (dc2) cluster. + + ```shell-session + $ cat <> auth-method-serviceaccount.yaml + # auth-method.yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vault-dc2-auth-method + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator + subjects: + - kind: ServiceAccount + name: vault-dc2-auth-method + namespace: default + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vault-dc2-auth-method + namespace: default + EOF + ``` + + ```shell-session + $ kubectl apply --values auth-method-serviceaccount.yaml + ``` + +1. Next, you will need to get the token and CA cert from that service account secret. + + ```shell-session + $ export K8S_DC2_CA_CERT="$(kubectl get secret `kubectl get serviceaccounts vault-dc2-auth-method --output jsonpath='{.secrets[0].name}'` --output jsonpath='{.data.ca\.crt}' | base64 --decode)" + ``` + + ```shell-session + $ export K8S_DC2_JWT_TOKEN="$(kubectl get secret `kubectl get serviceaccounts vault-dc2-auth-method --output jsonpath='{.secrets[0].name}'` --output jsonpath='{.data.token}' | base64 --decode)" + ``` + +1. Configure Auth Method with the JWT token of service account. You will have to get the externally reachable address of the secondary Consul datacenter (dc2) in the secondary Kubernetes cluster and set `kubernetes_host` within the Auth Method configuration. + + ```shell-session + $ export KUBE_API_URL_DC2=$(kubectl config view --output jsonpath="{.clusters[?(@.name == \"$(kubectl config current-context)\")].cluster.server}") + ``` + + ```shell-session + $ vault write auth/kubernetes-dc2/config \ + kubernetes_host="${KUBE_API_URL_DC2}" \ + token_reviewer_jwt="${K8S_DC2_JWT_TOKEN}" \ + kubernetes_ca_cert="${K8S_DC2_CA_CERT}" + ``` + +1. Enable Vault as the secrets backend in the secondary Consul datacenter (dc2). However, you will not yet apply the Helm install command. You will issue the Helm upgrade command after the [Data Integration](/docs/k8s/installation/vault/wan-federation#setup-per-consul-datacenter-1) section. + + + + ```yaml + global: + secretsBackend: + vault: + enabled: true + ``` + + + +## Data Integration +### Overview +To use Vault as the Service Mesh Certificate Provider in Kubernetes, you must complete following data integration actions: + + +- One time setup in Vault + 1. Store the secrets in Vault. + 1. Create a Vault policy that authorizes the desired level of access to the secrets. +- Setup per Consul datacenter + 1. Create Vault Kubernetes auth roles that link the policy to each Consul on Kubernetes service account that requires access. + 1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes helm chart. + +### One time setup in Vault +1. Store the ACL Replication Token, Gossip Encryption Key, and Root CA certificate secrets in Vault. + + ```shell-session + $ vault kv put consul/secret/gossip key="$(consul keygen)" + ``` + + ```shell-session + $ vault kv put consul/secret/replication token="$(uuidgen | tr '[:upper:]' '[:lower:]')" + ``` + ```shell-session + $ vault write pki/root/generate/internal common_name="Consul CA" ttl=87600h + ``` + +1. Create Vault policies that authorize the desired level of access to the secrets. + + ```shell-session + $ vault policy write gossip - < + ``` +#### Primary Datacenter (dc1) +1. Create Server TLS and Service Mesh Cert Policies + + ```shell-session + $ vault policy write consul-cert-dc1 - < + + ```yaml + global: + datacenter: "dc1" + name: consul + secretsBackend: + vault: + enabled: true + consulServerRole: consul-server + consulClientRole: consul-client + consulCARole: consul-ca + manageSystemACLsRole: server-acl-init + connectCA: + address: http://vault-dc1.default:8200 + rootPKIPath: connect_root/ + intermediatePKIPath: dc1/connect_inter/ + authMethodPath: kubernetes-dc1 + tls: + enabled: true + enableAutoEncrypt: true + caCert: + secretName: pki/cert/ca + federation: + enabled: true + acls: + manageSystemACLs: true + replicationToken: + secretName: consul/data/secret/replication + secretKey: token + gossipEncryption: + secretName: consul/data/secret/gossip + secretKey: key + server: + replicas: 1 + serverCert: + secretName: "pki/issue/consul-cert-dc1" + connectInject: + replicas: 1 + enabled: true + controller: + enabled: true + meshGateway: + enabled: true + replicas: 1 + ``` + + + + Next, install Consul in the primary Kubernetes cluster (dc1). + ```shell-session + $ helm install consul-dc1 --values consul-dc1.yaml hashicorp/consul + ``` + +#### Pre-installation for Secondary Datacenter (dc2) +1. Configure the Vault Kubernetes auth role in the Consul on Kubernetes Helm chart. For secondary datacenter (dc2), you will need to get the address of the mesh gateway from the **primary datacenter (dc1)** cluster. + + Keep your Kubernetes context targeting dc1 and set the `MESH_GW_HOST` environment variable that you will use in the Consul Helm chart for secondary datacenter (dc2). + + ```shell-session + $ kubectl config use-context + ``` + + Next, get mesh gateway address: + + + + + ```shell-session + $ export MESH_GW_HOST=$(kubectl get svc consul-mesh-gateway --output jsonpath='{.status.loadBalancer.ingress[0].hostname}') + ``` + + + + + + ```shell-session + $ export MESH_GW_HOST=$(kubectl get svc consul-mesh-gateway --output jsonpath='{.status.loadBalancer.ingress[0].ip}') + ``` + + + + + + ```shell-session + $ export MESH_GW_HOST=$(kubectl get svc consul-mesh-gateway --output jsonpath='{.status.loadBalancer.ingress[0].ip}') + ``` + + + + +1. Change your Kubernetes context to target the primary datacenter (dc2): + ```shell-session + $ kubectl config use-context + ``` +#### Secondary Datacenter (dc2) + +1. Create Server TLS and Service Mesh Cert Policies + + ```shell-session + $ vault policy write consul-cert-dc2 - < **Note**: To configure Vault as the Connect CA in secondary datacenters, you need to make sure that the Root CA path is the same. The intermediate path is different for each datacenter. In the `connectCA` Helm configuration for a secondary datacenter, you can specify a `intermediatePKIPath` that is, for example, prefixed with the datacenter for which this configuration is intended (e.g. `dc2/connect-intermediate`). + + + + ```yaml + global: + datacenter: "dc2" + name: consul + secretsBackend: + vault: + enabled: true + consulServerRole: consul-server + consulClientRole: consul-client + consulCARole: consul-ca + manageSystemACLsRole: server-acl-init + connectCA: + address: ${VAULT_ADDR} + rootPKIPath: connect_root/ + intermediatePKIPath: dc2/connect_inter/ + authMethodPath: kubernetes-dc2 + tls: + enabled: true + enableAutoEncrypt: true + caCert: + secretName: "pki/cert/ca" + federation: + enabled: true + primaryDatacenter: dc1 + k8sAuthMethodHost: ${KUBE_API_URL_DC2} + primaryGateways: + - ${MESH_GW_HOST}:443 + acls: + manageSystemACLs: true + replicationToken: + secretName: consul/data/secret/replication + secretKey: token + gossipEncryption: + secretName: consul/data/secret/gossip + secretKey: key + server: + replicas: 1 + serverCert: + secretName: "pki/issue/consul-cert-dc2" + connectInject: + replicas: 1 + enabled: true + controller: + enabled: true + meshGateway: + enabled: true + replicas: 1 + ``` + + + + Next, install Consul in the consul Kubernetes cluster (dc2). + + ```shell-session + $ helm install consul-dc2 -f consul-dc2.yaml hashicorp/consul + ``` + +## Next steps +You have completed the process of federating the secondary datacenter (dc2) with the primary datacenter (dc1) using Vault as the Secrets backend. To validate that everything is configured properly, please confirm that all pods within both datacenters are in a running state. + +For further detail on specific Consul secrets that are available to be stored in Vault, please checkout the detailed information in the [Data Integration](/docs/website/content/docs/k8s/installation/vault/data-integration) section of the [Vault as a Secrets Backend](/docs/website/content/docs/k8s/installation/vault) area of the Consul on Kubernetes documentation. diff --git a/website/content/docs/security/acl/auth-methods/aws-iam.mdx b/website/content/docs/security/acl/auth-methods/aws-iam.mdx new file mode 100644 index 0000000000..b6422c5f12 --- /dev/null +++ b/website/content/docs/security/acl/auth-methods/aws-iam.mdx @@ -0,0 +1,210 @@ +--- +layout: docs +page_title: AWS IAM Auth Method +description: >- + The AWS IAM auth method type allows for AWS IAM Roles and Users + to be used to authenticate to Consul. +--- + +# AWS IAM Auth Method + + +The AWS Identity and Access Management (IAM) auth method type allows for AWS +IAM Roles and Users to be used to authenticate to Consul in order to obtain +a Consul token. + +This page assumes general knowledge of [AWS +IAM](https://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html) +and the concepts described in the main [auth method +documentation](/docs/security/acl/auth-methods). + +## Overview + +The AWS IAM auth method for Consul uses a variation on the approach used by the [IAM auth method for +Vault](https://www.vaultproject.io/docs/auth/aws#iam-auth-method). Specifically, the IAM auth method +for Consul avoids the need to configure Consul servers with AWS credentials by requiring clients to +provided pre-signed AWS API requests. + +An IAM role or user authenticates by presenting certain signed AWS API requests in a specific JSON +format. The client signs the necessary AWS API requests with its AWS credentials using the [AWS +Signature v4 algorithm](https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html). When the +auth method receives the signed AWS API requests, it forwards the requests to AWS. AWS validates the +client's signature and, if the signature is valid, responds with the client's identity details. The +signature validation performed by AWS on the `sts:GetCallerIdentity` request provides the auth +method with a strong guarantee of the client's identity. The auth method compares the Amazon +Resource Name (ARN) of the client with the `BoundIAMPrincipalARNs` list to determine if the client +is permitted to login. + +## Config Parameters + +The following are the auth method [`Config`](/api-docs/acl/auth-methods#config) +parameters for an auth method of type `aws-iam`: + +- `BoundIAMPrincipalARNs` `(array: )` - The list of IAM role or IAM user ARNs + which are permitted to login. A client authenticating to Consul must have an ARN that matches one + of the ARNs in this list. + - If `EnableIAMEntityDetails=false`, then bound ARNs must not contain the full path of the role + or user, and wildcards are not supported. For example, + `arn:aws:iam::123456789012:user/MyUserName` will permit the IAM user named "MyUserName" to log + in, and `arn:aws:iam::123456789012:role/MyRoleName` will permit the IAM role named "MyRoleName" + to log in. + - If `EnableIAMEntityDetails=true`, then bound ARNs with the full path must be used, such as, + `arn:aws:iam::123456789012:role/path/to/MyRoleName`. Additionally, ARNs may contain a single + trailing wildcard. For example, `arn:aws:iam::123456789012:*` will permit any role or user in + the account `123456789012` to login, while `arn:aws:iam::123456789012:role/path/to/roles/*` + will permit only roles at the path `/path/to/roles/`. +- `EnableIAMEntityDetails` `(bool: )` - This enables the auth method to fetch the IAM role or + IAM user details, including tags and the full role or user path. If enabled, clients must pass the + `-aws-include-entity` option to `consul login`. Additionally, an IAM role or user attempting to + login must have an `iam:GetRole` or `iam:GetUser` permission, respectively, to retrieve itself. + This setting is required in order to fetch the full path and tags of the IAM user or role and in + order to use wildcards in the `BoundIAMPrincipalARNs`. +- `IAMEntityTags` `(array: [])` - The list of tag keys retrieved from the IAM role or user + and made available to binding rules. Tags are only supported when `EnableIAMEntityDetails=true`. + By default, no tags are made available to binding rules. Each tag in the `IAMEntityTags` list can + be referenced in binding rules using `entity_tags.`. For example, if `IAMEntityTags` contains + `service-name` and if a `service-name` tag exists on the IAM role or user, then you can reference + the tag value using `entity_tags.service-name` in binding rules. If the tag is not present on the + IAM role or user, then `entity_tags.service-name` evalutes to the empty string in binding rules. +- `ServerIDHeaderValue` `(string: "")` - The value to require in the `X-Consul-IAM-ServerID` header + in login requests. If set, clients must include the `X-Consul-IAM-ServerID` header in the AWS API + requests used to login to the auth method, and the client-provided value for the header must match + this setting in order to successfully log in. If not set, no header is required or validated. This + can be used to protect against different types of replay attacks - for example, a signed request + sent to a dev server being resent to a production server. Consider setting this to the Consul + server's DNS name. When this is set, clients must set pass the `-aws-server-id-header-value` + option to the `consul login` command. +- `MaxRetries` `(integer: 0)` - The maximum number of retries to use for recoverable errors when + making AWS API requests. +- `IAMEndpoint` `(string: "")` - The URL where `iam:GetRole` and `iam:GetUser` requests are sent. + This can be used to send requests to a private endpoint or through a network proxy. +- `STSEndpoint` `(string: "")` - The URL where `sts:GetCallerIdentity` requests are sent. This can + be used to send requests to a private endpoint or through a network proxy. +- `AllowedSTSHeaderValues` `(array: [])` - A list of additional allowed headers on + `sts:GetCallerIdentity` requests. In any case, a default list of headers AWS STS expects are + allowed. + +### Sample + +```json +{ + ...other fields... + "Config": { + "BoundIAMPrincipalARNs": ["arn:aws:iam::123456789012:role/MyRoleName"], + "EnableIAMEntityDetails": true, + "IAMEntityTags": ["consul-namespace"], + "ServerIDHeaderValue": "my.consul.server.example.com", + "MaxRetries": 3, + "IAMEndpoint": "https://iam.amazonaws.com/", + "STSEndpoint": "https://sts.us-east-1.amazonaws.com/", + "AllowedSTSHeaderValues": ["X-Extra-Header"] + } +} +``` + +## Trusted Identity Attributes + +The authentication step returns the following trusted identity attributes for use in binding rule +selectors and bind name interpolation. All of these attributes are strings that can be interpolated +and support the following selector operations: `Equal, Not Equal, In, Not In, Matches, Not Matches` + +| Attribute | Description | Requirement | +| -------------------- | ----------------------------------- | ---------------------------------------------------------------- | +| `entity_name` | Name of IAM role or user | | +| `entity_id` | Unique ID of IAM role or user | | +| `account_id` | AWS account id of IAM role or user | | +| `entity_path` | The path of the IAM role or user | `EnableIAMEntityDetails=true` | +| `entity_tags.` | AWS account id of IAM role or user | `EnableIAMEntityDetails=true` and `IAMEntityTags` contains `` | + +## IAM Policies + +When `EnableIAMEntityDetails=false`, no specific IAM policies are needed. + +When `EnableIAMEntityDetails=true`, an authenticating client must provide either a signed +`iam:GetRole` or `iam:GetUser` request. This request is signed with the client's AWS credentials, so +the client must have permission to fetch the role or user, respectively. + +- If the authenticating client is an IAM role, the client must have an `iam:GetRole` permission to + fetch its own role. The following shows an example of an AWS IAM Policy document which grants this + permission: + + ```json + { + "Statement": [ + { + "Action": ["iam:GetRole"], + "Effect": "Allow", + "Resource": ["arn:aws:iam::123456789012:role/MyRoleName"] + } + ], + "Version": "2012-10-17" + } + ``` + +- If the authenticating client is an IAM user, the client must have an `iam:GetUser` permission to + fetch its own role. The following shows an example of an AWS IAM Policy document which grants this + permission: + + ```json + { + "Statement": [ + { + "Action": ["iam:GetUser"], + "Effect": "Allow", + "Resource": ["arn:aws:iam::123456789012:user/MyUserName"] + } + ], + "Version": "2012-10-17" + } + ``` + +## Authentication Procedure + +If `EnableIAMEntityDetails=false`, a client must log in with the following `consul login` command. + +```shell-session +$ consul login -type aws-iam -aws-auto-bearer-token ... +``` + +- Format and sign an `sts:GetCallerIdentity` request +- Format these request details as JSON to form a bearer token +- Send the bearer token to the IAM auth method to authenticate + +Otherwise, if `EnableIAMEntityDetails=true`, the client must log in with the following `consul login` command, +in order to include a signed `iam:GetRole` or `iam:GetUser` request in the bearer token. + +```shell-session +$ consul login -type aws-iam -aws-auto-bearer-token -aws-include-entity ... +``` + +This command does the following: + +- Make an `sts:GetCallerIdentity` request to determine its own role or user name +- Format a new `sts:GetCallerIdentity` request +- Embed a signed `iam:GetRole` or `iam:GetUser` request in the headers of the + `sts:GetCallerIdentity` request +- Sign the `sts:GetCallerIdentity` request +- Format the request details as JSON to form a bearer token +- Send the bearer token to the IAM auth method to authenticate + +On the Consul servers, the IAM auth method validates a client's identity during the [Login to Auth +Method](/api-docs/acl#login-to-auth-method) API request, using the following steps: + +- Decode the `sts:GetCallerIdentity` request from the bearer token +- Optionally, decode the `iam:GetRole` or `iam:GetUser` request from the bearer token, + if `EnableIAMEntityDetails=true` in the auth method configuration +- Send the `sts:GetCallerIdentity` request to AWS. This request is pre-signed by the client, so no + other credentials or permissions are needed to make this request. AWS validates the client's + signature when it receives the request. If the signature is valid, AWS returns the client's + identity in the response. This is a strong guarantee of the client's identity. +- Optionally, the auth method sends the `iam:GetRole` or `iam:GetUser` request to AWS, + if `EnableIAMEntityDetails=true` in the auth method configuration. This request is pre-signed + by the client, so no other credentials or permisssions are required to make the request. Only the + client needs the `iam:GetRole` or `iam:GetUser` permission. AWS validates the client's signature + when it receives the request. If the signature is valid, AWS returns the IAM role or user details. + This response is not a guarantee of the client's identity - any role or user name may have been + included in the request - so the auth method requires the IAM role or user to have a [unique + id](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-unique-ids) + match with the `sts:GetCallerIdentity` response. +- Finally, the auth method makes an authentication decision. If the client's IAM role or user ARN + matches one of the configured `BoundIAMPrincipalARNs`, then the client is permitted to login. diff --git a/website/content/docs/security/acl/auth-methods/index.mdx b/website/content/docs/security/acl/auth-methods/index.mdx index 0c47504862..b2062fcf14 100644 --- a/website/content/docs/security/acl/auth-methods/index.mdx +++ b/website/content/docs/security/acl/auth-methods/index.mdx @@ -39,6 +39,7 @@ service mesh with minimal operator intervention. | [`kubernetes`](/docs/security/acl/auth-methods/kubernetes) | 1.5.0+ | | [`jwt`](/docs/security/acl/auth-methods/jwt) | 1.8.0+ | | [`oidc`](/docs/security/acl/auth-methods/oidc) | 1.8.0+ | +| [`aws-iam`](/docs/security/acl/auth-methods/aws-iam) | 1.12.0+ | ## Operator Configuration diff --git a/website/content/docs/security/acl/index.mdx b/website/content/docs/security/acl/index.mdx index 5ef4544b7b..b32176306d 100644 --- a/website/content/docs/security/acl/index.mdx +++ b/website/content/docs/security/acl/index.mdx @@ -7,7 +7,7 @@ description: >- # Access Control List (ACL) Overview -This topic describes core concepts associated with the optioal access control list (ACL) system shipped with Consul. ACLs authenticate requests and authorize access to resources. They also control access to the Consul UI, API, and CLI, as well as secure service-to-service and agent-to-agent communication. +This topic describes core concepts associated with the optional access control list (ACL) system shipped with Consul. ACLs authenticate requests and authorize access to resources. They also control access to the Consul UI, API, and CLI, as well as secure service-to-service and agent-to-agent communication. Refer to the following tutorials for step-by-step instructions on how to get started using ACLs: diff --git a/website/data/alert-banner.js b/website/data/alert-banner.js deleted file mode 100644 index 6fe9887949..0000000000 --- a/website/data/alert-banner.js +++ /dev/null @@ -1,13 +0,0 @@ -export const ALERT_BANNER_ACTIVE = false - -// https://github.com/hashicorp/web-components/tree/master/packages/alert-banner -export default { - tag: 'Blog post', - url: 'https://www.hashicorp.com/blog/a-new-chapter-for-hashicorp', - text: - 'HashiCorp shares have begun trading on the Nasdaq. Read the blog from our founders, Mitchell Hashimoto and Armon Dadgar.', - linkText: 'Read the post', - // Set the expirationDate prop with a datetime string (e.g. '2020-01-31T12:00:00-07:00') - // if you'd like the component to stop showing at or after a certain date - expirationDate: '2021-12-17T23:00:00-07:00', -} diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 0ec76b42b5..81a138333d 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -382,8 +382,8 @@ "path": "api-gateway" }, { - "title": "Usage", - "path": "api-gateway/api-gateway-usage" + "title": "Installation", + "path": "api-gateway/consul-api-gateway-install" }, { "title": "Technical Specifications", @@ -539,6 +539,10 @@ "path": "k8s/installation/vault/data-integration/snapshot-agent-config" } ] + }, + { + "title": "WAN Federation", + "path": "k8s/installation/vault/wan-federation" } ] }, @@ -969,6 +973,10 @@ { "title": "OIDC", "path": "security/acl/auth-methods/oidc" + }, + { + "title": "AWS IAM", + "path": "security/acl/auth-methods/aws-iam" } ] } diff --git a/website/data/metadata.js b/website/data/metadata.js deleted file mode 100644 index 646d7fcec0..0000000000 --- a/website/data/metadata.js +++ /dev/null @@ -1,2 +0,0 @@ -export const productName = 'Consul' -export const productSlug = 'consul' diff --git a/website/data/version.js b/website/data/version.js deleted file mode 100644 index a5beaddf65..0000000000 --- a/website/data/version.js +++ /dev/null @@ -1 +0,0 @@ -export default '1.11.5' diff --git a/website/global.d.ts b/website/global.d.ts deleted file mode 100644 index 110e4fff2d..0000000000 --- a/website/global.d.ts +++ /dev/null @@ -1 +0,0 @@ -/// diff --git a/website/layouts/standard/index.tsx b/website/layouts/standard/index.tsx deleted file mode 100644 index 38bb3f8c56..0000000000 --- a/website/layouts/standard/index.tsx +++ /dev/null @@ -1,76 +0,0 @@ -import query from './query.graphql' -import ProductSubnav from 'components/subnav' -import Footer from 'components/footer' -import { open } from '@hashicorp/react-consent-manager' - -export default function StandardLayout(props: Props): React.ReactElement { - const { useCaseNavItems } = props.data - - return ( - <> - { - return { - text: item.text, - url: `/use-cases/${item.url}`, - } - }), - ].sort((a, b) => a.text.localeCompare(b.text)), - }, - { - text: 'Enterprise', - url: - 'https://www.hashicorp.com/products/consul/?utm_source=oss&utm_medium=header-nav&utm_campaign=consul', - type: 'outbound', - }, - 'divider', - { - text: 'Tutorials', - url: 'https://learn.hashicorp.com/consul', - type: 'outbound', - }, - { - text: 'Docs', - url: '/docs', - type: 'inbound', - }, - { - text: 'API', - url: '/api-docs', - type: 'inbound', - }, - { - text: 'CLI', - url: '/commands', - type: 'inbound,', - }, - { - text: 'Community', - url: '/community', - type: 'inbound', - }, - ]} - /> - {props.children} -