mirror of
https://github.com/status-im/consul.git
synced 2025-01-22 03:29:43 +00:00
Merge branch 'main' into japple-rel-notes-reorg
This commit is contained in:
commit
f9a695286e
4
.changelog/11500.txt
Normal file
4
.changelog/11500.txt
Normal file
@ -0,0 +1,4 @@
|
||||
```release-note:bugfix
|
||||
rpc: Adds a deadline to client RPC calls, so that streams will no longer hang
|
||||
indefinitely in unstable network conditions. [[GH-8504](https://github.com/hashicorp/consul/issues/8504)]
|
||||
```
|
3
.changelog/12805.txt
Normal file
3
.changelog/12805.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
connect: Add Envoy 1.22.0 to support matrix, remove 1.18.6
|
||||
```
|
3
.changelog/12807.txt
Normal file
3
.changelog/12807.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
acl: Clarify node/service identities must be lowercase
|
||||
```
|
3
.changelog/12808.txt
Normal file
3
.changelog/12808.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:note
|
||||
dependency: Upgrade to use Go 1.18.1
|
||||
```
|
3
.changelog/12819.txt
Normal file
3
.changelog/12819.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
grpc: Add a new ServerDiscovery.WatchServers gRPC endpoint for being notified when the set of ready servers has changed.
|
||||
```
|
3
.changelog/12820.txt
Normal file
3
.changelog/12820.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:bug
|
||||
ca: fix a bug that caused a non blocking leaf cert query after a blocking leaf cert query to block
|
||||
```
|
3
.changelog/12825.txt
Normal file
3
.changelog/12825.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:feature
|
||||
grpc: New gRPC endpoint to return envoy bootstrap parameters.
|
||||
```
|
3
.changelog/12844.txt
Normal file
3
.changelog/12844.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:bug
|
||||
raft: upgrade to v1.3.8 which fixes a bug where non cluster member can still be able to participate in an election.
|
||||
```
|
3
.changelog/12846.txt
Normal file
3
.changelog/12846.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:note
|
||||
ci: change action to pull v1 instead of main
|
||||
```
|
3
.changelog/_12855.txt
Normal file
3
.changelog/_12855.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:bug
|
||||
snapshot-agent: **(Enterprise only)** Fix a bug where providing the ACL token to the snapshot agent via a CLI or ENV variable without a license configured results in an error during license auto-retrieval.
|
||||
```
|
3
.changelog/_1679.txt
Normal file
3
.changelog/_1679.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:breaking-change
|
||||
config-entry: Exporting a specific service name across all namespace is invalid.
|
||||
```
|
@ -12,18 +12,8 @@ parameters:
|
||||
description: "Boolean whether to run the load test workflow"
|
||||
|
||||
references:
|
||||
images:
|
||||
# When updating the Go version, remember to also update the versions in the
|
||||
# workflows section for go-test-lib jobs.
|
||||
go: &GOLANG_IMAGE docker.mirror.hashicorp.services/cimg/go:1.17.5
|
||||
ember: &EMBER_IMAGE docker.mirror.hashicorp.services/circleci/node:14-browsers
|
||||
|
||||
paths:
|
||||
test-results: &TEST_RESULTS_DIR /tmp/test-results
|
||||
|
||||
cache:
|
||||
yarn: &YARN_CACHE_KEY consul-ui-v7-{{ checksum "ui/yarn.lock" }}
|
||||
|
||||
environment: &ENVIRONMENT
|
||||
TEST_RESULTS_DIR: *TEST_RESULTS_DIR
|
||||
EMAIL: noreply@hashicorp.com
|
||||
@ -32,6 +22,14 @@ references:
|
||||
S3_ARTIFACT_BUCKET: consul-dev-artifacts-v2
|
||||
BASH_ENV: .circleci/bash_env.sh
|
||||
VAULT_BINARY_VERSION: 1.9.4
|
||||
GO_VERSION: 1.18.1
|
||||
images:
|
||||
# When updating the Go version, remember to also update the versions in the
|
||||
# workflows section for go-test-lib jobs.
|
||||
go: &GOLANG_IMAGE docker.mirror.hashicorp.services/cimg/go:1.18.1
|
||||
ember: &EMBER_IMAGE docker.mirror.hashicorp.services/circleci/node:14-browsers
|
||||
cache:
|
||||
yarn: &YARN_CACHE_KEY consul-ui-v7-{{ checksum "ui/yarn.lock" }}
|
||||
|
||||
steps:
|
||||
install-gotestsum: &install-gotestsum
|
||||
@ -188,7 +186,7 @@ jobs:
|
||||
name: Install golangci-lint
|
||||
command: |
|
||||
download=https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh
|
||||
wget -O- -q $download | sh -x -s -- -d -b /home/circleci/go/bin v1.40.1
|
||||
wget -O- -q $download | sh -x -s -- -d -b /home/circleci/go/bin v1.45.2
|
||||
- run: go mod download
|
||||
- run:
|
||||
name: lint
|
||||
@ -257,8 +255,8 @@ jobs:
|
||||
- run:
|
||||
command: |
|
||||
sudo rm -rf /usr/local/go
|
||||
wget https://golang.org/dl/go1.17.5.linux-arm64.tar.gz
|
||||
sudo tar -C /usr/local -xzvf go1.17.5.linux-arm64.tar.gz
|
||||
wget https://dl.google.com/go/go${GO_VERSION}.linux-arm64.tar.gz
|
||||
sudo tar -C /usr/local -xzvf go${GO_VERSION}.linux-arm64.tar.gz
|
||||
- run: *install-gotestsum
|
||||
- run: go mod download
|
||||
- run:
|
||||
@ -596,50 +594,6 @@ jobs:
|
||||
NOMAD_VERSION: main
|
||||
steps: *NOMAD_INTEGRATION_TEST_STEPS
|
||||
|
||||
build-website-docker-image:
|
||||
docker:
|
||||
- image: docker.mirror.hashicorp.services/circleci/buildpack-deps
|
||||
shell: /usr/bin/env bash -euo pipefail -c
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker
|
||||
- run:
|
||||
name: Build Docker Image if Necessary
|
||||
command: |
|
||||
# Ignore job if running an enterprise build
|
||||
IMAGE_TAG=$(cat website/Dockerfile website/package-lock.json | sha256sum | awk '{print $1;}')
|
||||
echo "Using $IMAGE_TAG"
|
||||
if [ "$CIRCLE_REPOSITORY_URL" != "git@github.com:hashicorp/consul.git" ]; then
|
||||
echo "Not Consul OSS Repo, not building website docker image"
|
||||
elif curl https://hub.docker.com/v2/repositories/hashicorp/consul-website/tags/$IMAGE_TAG -fsL > /dev/null; then
|
||||
echo "Dependencies have not changed, not building a new website docker image."
|
||||
else
|
||||
cd website/
|
||||
docker build -t hashicorp/consul-website:$IMAGE_TAG .
|
||||
docker tag hashicorp/consul-website:$IMAGE_TAG hashicorp/consul-website:latest
|
||||
docker login -u $WEBSITE_DOCKER_USER -p $WEBSITE_DOCKER_PASS
|
||||
docker push hashicorp/consul-website
|
||||
fi
|
||||
- run: *notify-slack-failure
|
||||
|
||||
algolia-index:
|
||||
docker:
|
||||
- image: docker.mirror.hashicorp.services/node:14
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Push content to Algolia Index
|
||||
command: |
|
||||
if [ "$CIRCLE_REPOSITORY_URL" != "git@github.com:hashicorp/consul.git" ]; then
|
||||
echo "Not Consul OSS Repo, not indexing Algolia"
|
||||
exit 0
|
||||
fi
|
||||
cd website/
|
||||
npm install -g npm@latest
|
||||
npm install
|
||||
node scripts/index_search_content.js
|
||||
- run: *notify-slack-failure
|
||||
|
||||
# build frontend yarn cache
|
||||
frontend-cache:
|
||||
docker:
|
||||
@ -846,14 +800,62 @@ jobs:
|
||||
working_directory: ui/packages/consul-ui
|
||||
command: make test-coverage-ci
|
||||
- run: *notify-slack-failure
|
||||
|
||||
envoy-integration-test-1_18_6: &ENVOY_TESTS
|
||||
|
||||
compatibility-integration-test:
|
||||
machine:
|
||||
image: ubuntu-2004:202101-01
|
||||
docker_layer_caching: true
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
# Get go binary from workspace
|
||||
- attach_workspace:
|
||||
at: .
|
||||
# Build the consul-dev image from the already built binary
|
||||
- run:
|
||||
command: |
|
||||
sudo rm -rf /usr/local/go
|
||||
wget https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz
|
||||
sudo tar -C /usr/local -xzvf go${GO_VERSION}.linux-amd64.tar.gz
|
||||
environment:
|
||||
<<: *ENVIRONMENT
|
||||
- run: *install-gotestsum
|
||||
- run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile .
|
||||
- run:
|
||||
name: Compatibility Integration Tests
|
||||
command: |
|
||||
subtests=$(ls -d test/integration/consul-container/*/ | grep -v libs | xargs -n 1 basename | circleci tests split)
|
||||
echo "Running $(echo $subtests | wc -w) subtests"
|
||||
echo "$subtests"
|
||||
subtests_pipe_sepr=$(echo "$subtests" | xargs | sed 's/ /|/g')
|
||||
mkdir -p /tmp/test-results/
|
||||
docker run consul:local consul version
|
||||
cd ./test/integration/consul-container
|
||||
gotestsum -- -timeout=30m ./$subtests_pipe_sepr --target-version local --latest-version latest
|
||||
ls -lrt
|
||||
environment:
|
||||
# this is needed because of incompatibility between RYUK container and circleci
|
||||
GOTESTSUM_JUNITFILE: /tmp/test-results/results.xml
|
||||
GOTESTSUM_FORMAT: standard-verbose
|
||||
COMPOSE_INTERACTIVE_NO_CLI: 1
|
||||
# tput complains if this isn't set to something.
|
||||
TERM: ansi
|
||||
- store_artifacts:
|
||||
path: ./test/integration/consul-container/upgrade/workdir/logs
|
||||
destination: container-logs
|
||||
- store_test_results:
|
||||
path: *TEST_RESULTS_DIR
|
||||
- store_artifacts:
|
||||
path: *TEST_RESULTS_DIR
|
||||
- run: *notify-slack-failure
|
||||
|
||||
envoy-integration-test-1_19_3: &ENVOY_TESTS
|
||||
machine:
|
||||
image: ubuntu-2004:202101-01
|
||||
parallelism: 4
|
||||
resource_class: medium
|
||||
environment:
|
||||
ENVOY_VERSION: "1.18.6"
|
||||
ENVOY_VERSION: "1.19.3"
|
||||
steps: &ENVOY_INTEGRATION_TEST_STEPS
|
||||
- checkout
|
||||
# Get go binary from workspace
|
||||
@ -886,11 +888,6 @@ jobs:
|
||||
path: *TEST_RESULTS_DIR
|
||||
- run: *notify-slack-failure
|
||||
|
||||
envoy-integration-test-1_19_3:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.19.3"
|
||||
|
||||
envoy-integration-test-1_20_2:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
@ -901,6 +898,11 @@ jobs:
|
||||
environment:
|
||||
ENVOY_VERSION: "1.21.1"
|
||||
|
||||
envoy-integration-test-1_22_0:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.22.0"
|
||||
|
||||
# run integration tests for the connect ca providers
|
||||
test-connect-ca-providers:
|
||||
docker:
|
||||
@ -1060,26 +1062,26 @@ workflows:
|
||||
- dev-build: *filter-ignore-non-go-branches
|
||||
- go-test:
|
||||
requires: [ dev-build ]
|
||||
- go-test-lib:
|
||||
name: "go-test-api go1.16"
|
||||
path: api
|
||||
go-version: "1.16"
|
||||
requires: [ dev-build ]
|
||||
- go-test-lib:
|
||||
name: "go-test-api go1.17"
|
||||
path: api
|
||||
go-version: "1.17"
|
||||
requires: [ dev-build ]
|
||||
- go-test-lib:
|
||||
name: "go-test-sdk go1.16"
|
||||
path: sdk
|
||||
go-version: "1.16"
|
||||
<<: *filter-ignore-non-go-branches
|
||||
name: "go-test-api go1.18"
|
||||
path: api
|
||||
go-version: "1.18"
|
||||
requires: [ dev-build ]
|
||||
- go-test-lib:
|
||||
name: "go-test-sdk go1.17"
|
||||
path: sdk
|
||||
go-version: "1.17"
|
||||
<<: *filter-ignore-non-go-branches
|
||||
- go-test-lib:
|
||||
name: "go-test-sdk go1.18"
|
||||
path: sdk
|
||||
go-version: "1.18"
|
||||
<<: *filter-ignore-non-go-branches
|
||||
- go-test-race: *filter-ignore-non-go-branches
|
||||
- go-test-32bit: *filter-ignore-non-go-branches
|
||||
build-distros:
|
||||
@ -1142,9 +1144,6 @@ workflows:
|
||||
- nomad-integration-0_8:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_18_6:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_19_3:
|
||||
requires:
|
||||
- dev-build
|
||||
@ -1154,21 +1153,13 @@ workflows:
|
||||
- envoy-integration-test-1_21_1:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_22_0:
|
||||
requires:
|
||||
- dev-build
|
||||
- compatibility-integration-test:
|
||||
requires:
|
||||
- dev-build
|
||||
|
||||
website:
|
||||
unless: << pipeline.parameters.trigger-load-test >>
|
||||
jobs:
|
||||
- build-website-docker-image:
|
||||
context: website-docker-image
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- main
|
||||
- algolia-index:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- stable-website
|
||||
frontend:
|
||||
unless: << pipeline.parameters.trigger-load-test >>
|
||||
jobs:
|
||||
|
19
.github/pull_request_template.md
vendored
Normal file
19
.github/pull_request_template.md
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
### Description
|
||||
Describe why you're making this change, in plain English.
|
||||
|
||||
### Testing & Reproduction steps
|
||||
* In the case of bugs, describe how to replicate
|
||||
* If any manual tests were done, document the steps and the conditions to replicate
|
||||
* Call out any important/ relevant unit tests, e2e tests or integration tests you have added or are adding
|
||||
|
||||
### Links
|
||||
Include any links here that might be helpful for people reviewing your PR (Tickets, GH issues, API docs, external benchmarks, tools docs, etc). If there are none, feel free to delete this section.
|
||||
|
||||
Please be mindful not to leak any customer or confidential information. HashiCorp employees may want to use our internal URL shortener to obfuscate links.
|
||||
|
||||
### PR Checklist
|
||||
|
||||
* [ ] updated test coverage
|
||||
* [ ] external facing docs updated
|
||||
* [ ] not a security concern
|
||||
* [ ] checklist [folder](./../docs/config) consulted
|
22
.github/workflows/build.yml
vendored
22
.github/workflows/build.yml
vendored
@ -49,7 +49,7 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
- name: Generate metadata file
|
||||
id: generate-metadata-file
|
||||
uses: hashicorp/actions-generate-metadata@main
|
||||
uses: hashicorp/actions-generate-metadata@v1
|
||||
with:
|
||||
version: ${{ needs.get-product-version.outputs.product-version }}
|
||||
product: ${{ env.PKG_NAME }}
|
||||
@ -65,15 +65,15 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- {go: "1.17.5", goos: "linux", goarch: "386"}
|
||||
- {go: "1.17.5", goos: "linux", goarch: "amd64"}
|
||||
- {go: "1.17.5", goos: "linux", goarch: "arm"}
|
||||
- {go: "1.17.5", goos: "linux", goarch: "arm64"}
|
||||
- {go: "1.17.5", goos: "freebsd", goarch: "386"}
|
||||
- {go: "1.17.5", goos: "freebsd", goarch: "amd64"}
|
||||
- {go: "1.17.5", goos: "windows", goarch: "386"}
|
||||
- {go: "1.17.5", goos: "windows", goarch: "amd64"}
|
||||
- {go: "1.17.5", goos: "solaris", goarch: "amd64"}
|
||||
- {go: "1.18.1", goos: "linux", goarch: "386"}
|
||||
- {go: "1.18.1", goos: "linux", goarch: "amd64"}
|
||||
- {go: "1.18.1", goos: "linux", goarch: "arm"}
|
||||
- {go: "1.18.1", goos: "linux", goarch: "arm64"}
|
||||
- {go: "1.18.1", goos: "freebsd", goarch: "386"}
|
||||
- {go: "1.18.1", goos: "freebsd", goarch: "amd64"}
|
||||
- {go: "1.18.1", goos: "windows", goarch: "386"}
|
||||
- {go: "1.18.1", goos: "windows", goarch: "amd64"}
|
||||
- {go: "1.18.1", goos: "solaris", goarch: "amd64"}
|
||||
fail-fast: true
|
||||
|
||||
name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build
|
||||
@ -173,7 +173,7 @@ jobs:
|
||||
matrix:
|
||||
goos: [ darwin ]
|
||||
goarch: [ "amd64", "arm64" ]
|
||||
go: [ "1.17.5" ]
|
||||
go: [ "1.18.1" ]
|
||||
fail-fast: true
|
||||
|
||||
name: Go ${{ matrix.go }} ${{ matrix.goos }} ${{ matrix.goarch }} build
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -13,6 +13,7 @@ bin/
|
||||
changelog.tmp
|
||||
exit-code
|
||||
Thumbs.db
|
||||
.idea
|
||||
|
||||
# MacOS
|
||||
.DS_Store
|
||||
|
@ -84,8 +84,21 @@ event "notarize-darwin-amd64" {
|
||||
}
|
||||
}
|
||||
|
||||
event "notarize-windows-386" {
|
||||
event "notarize-darwin-arm64" {
|
||||
depends = ["notarize-darwin-amd64"]
|
||||
action "notarize-darwin-arm64" {
|
||||
organization = "hashicorp"
|
||||
repository = "crt-workflows-common"
|
||||
workflow = "notarize-darwin-arm64"
|
||||
}
|
||||
|
||||
notification {
|
||||
on = "fail"
|
||||
}
|
||||
}
|
||||
|
||||
event "notarize-windows-386" {
|
||||
depends = ["notarize-darwin-arm64"]
|
||||
action "notarize-windows-386" {
|
||||
organization = "hashicorp"
|
||||
repository = "crt-workflows-common"
|
||||
|
103
CHANGELOG.md
103
CHANGELOG.md
@ -1,3 +1,106 @@
|
||||
## 1.12.0 (April 20, 2022)
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
* sdk: several changes to the testutil configuration structs (removed `ACLMasterToken`, renamed `Master` to `InitialManagement`, and `AgentMaster` to `AgentRecovery`) [[GH-11827](https://github.com/hashicorp/consul/issues/11827)]
|
||||
* telemetry: the disable_compat_1.9 option now defaults to true. 1.9 style `consul.http...` metrics can still be enabled by setting `disable_compat_1.9 = false`. However, we will remove these metrics in 1.13. [[GH-12675](https://github.com/hashicorp/consul/issues/12675)]
|
||||
|
||||
FEATURES:
|
||||
|
||||
* acl: Add token information to PermissionDeniedErrors [[GH-12567](https://github.com/hashicorp/consul/issues/12567)]
|
||||
* acl: Added an AWS IAM auth method that allows authenticating to Consul using AWS IAM identities [[GH-12583](https://github.com/hashicorp/consul/issues/12583)]
|
||||
* ca: Root certificates can now be consumed from a gRPC streaming endpoint: `WatchRoots` [[GH-12678](https://github.com/hashicorp/consul/issues/12678)]
|
||||
* cli: The `token read` command now supports the `-expanded` flag to display detailed role and policy information for the token. [[GH-12670](https://github.com/hashicorp/consul/issues/12670)]
|
||||
* config: automatically reload config when a file changes using the `auto-reload-config` CLI flag or `auto_reload_config` config option. [[GH-12329](https://github.com/hashicorp/consul/issues/12329)]
|
||||
* server: Ensure that service-defaults `Meta` is returned with the response to the `ConfigEntry.ResolveServiceConfig` RPC. [[GH-12529](https://github.com/hashicorp/consul/issues/12529)]
|
||||
* server: discovery chains now include a response field named "Default" to indicate if they were not constructed from any service-resolver, service-splitter, or service-router config entries [[GH-12511](https://github.com/hashicorp/consul/issues/12511)]
|
||||
* server: ensure that service-defaults meta is incorporated into the discovery chain response [[GH-12511](https://github.com/hashicorp/consul/issues/12511)]
|
||||
* tls: it is now possible to configure TLS differently for each of Consul's listeners (i.e. HTTPS, gRPC and the internal multiplexed RPC listener) using the `tls` stanza [[GH-12504](https://github.com/hashicorp/consul/issues/12504)]
|
||||
* ui: Added support for AWS IAM Auth Methods [[GH-12786](https://github.com/hashicorp/consul/issues/12786)]
|
||||
* ui: Support connect-native services in the Topology view. [[GH-12098](https://github.com/hashicorp/consul/issues/12098)]
|
||||
* xds: Add the ability to invoke AWS Lambdas through terminating gateways. [[GH-12681](https://github.com/hashicorp/consul/issues/12681)]
|
||||
* xds: adding control of the mesh-wide min/max TLS versions and cipher suites from the mesh config entry [[GH-12601](https://github.com/hashicorp/consul/issues/12601)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* Refactor ACL denied error code and start improving error details [[GH-12308](https://github.com/hashicorp/consul/issues/12308)]
|
||||
* acl: Provide fuller detail in the error messsage when an ACL denies access. [[GH-12470](https://github.com/hashicorp/consul/issues/12470)]
|
||||
* agent: Allow client agents to perform keyring operations [[GH-12442](https://github.com/hashicorp/consul/issues/12442)]
|
||||
* agent: add additional validation to TLS config [[GH-12522](https://github.com/hashicorp/consul/issues/12522)]
|
||||
* agent: add support for specifying TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 and TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 cipher suites [[GH-12522](https://github.com/hashicorp/consul/issues/12522)]
|
||||
* agent: bump default min version for connections to TLS 1.2 [[GH-12522](https://github.com/hashicorp/consul/issues/12522)]
|
||||
* api: add QueryBackend to QueryMeta so an api user can determine if a query was served using which backend (streaming or blocking query). [[GH-12791](https://github.com/hashicorp/consul/issues/12791)]
|
||||
* ci: include 'enhancement' entry type in IMPROVEMENTS section of changelog. [[GH-12376](https://github.com/hashicorp/consul/issues/12376)]
|
||||
* ui: Exclude Service Instance Health from Health Check reporting on the Node listing page. The health icons on each individual row now only reflect Node health. [[GH-12248](https://github.com/hashicorp/consul/issues/12248)]
|
||||
* ui: Improve usability of Topology warning/information panels [[GH-12305](https://github.com/hashicorp/consul/issues/12305)]
|
||||
* ui: Slightly improve usability of main navigation [[GH-12334](https://github.com/hashicorp/consul/issues/12334)]
|
||||
* ui: Use @hashicorp/flight icons for all our icons. [[GH-12209](https://github.com/hashicorp/consul/issues/12209)]
|
||||
* Removed impediments to using a namespace prefixed IntermediatePKIPath
|
||||
in a CA definition. [[GH-12655](https://github.com/hashicorp/consul/issues/12655)]
|
||||
* acl: Improve handling of region-specific endpoints in the AWS IAM auth method. As part of this, the `STSRegion` field was removed from the auth method config. [[GH-12774](https://github.com/hashicorp/consul/issues/12774)]
|
||||
* api: Improve error message if service or health check not found by stating that the entity must be referred to by ID, not name [[GH-10894](https://github.com/hashicorp/consul/issues/10894)]
|
||||
* autopilot: Autopilot state is now tracked on Raft followers in addition to the leader.
|
||||
Stale queries may be used to query for the non-leaders state. [[GH-12617](https://github.com/hashicorp/consul/issues/12617)]
|
||||
* autopilot: The `autopilot.healthy` and `autopilot.failure_tolerance` metrics are now
|
||||
regularly emitted by all servers. [[GH-12617](https://github.com/hashicorp/consul/issues/12617)]
|
||||
* ci: Enable security scanning for CRT [[GH-11956](https://github.com/hashicorp/consul/issues/11956)]
|
||||
* connect: Add Envoy 1.21.1 to support matrix, remove 1.17.4 [[GH-12777](https://github.com/hashicorp/consul/issues/12777)]
|
||||
* connect: Add Envoy 1.22.0 to support matrix, remove 1.18.6 [[GH-12805](https://github.com/hashicorp/consul/issues/12805)]
|
||||
* connect: reduce raft apply on CA configuration when no change is performed [[GH-12298](https://github.com/hashicorp/consul/issues/12298)]
|
||||
* deps: update to latest go-discover to fix vulnerable transitive jwt-go dependency [[GH-12739](https://github.com/hashicorp/consul/issues/12739)]
|
||||
* grpc, xds: improved reliability of grpc and xds servers by adding recovery-middleware to return and log error in case of panic. [[GH-10895](https://github.com/hashicorp/consul/issues/10895)]
|
||||
* http: if a GET request has a non-empty body, log a warning that suggests a possible problem (parameters were meant for the query string, but accidentally placed in the body) [[GH-11821](https://github.com/hashicorp/consul/issues/11821)]
|
||||
* metrics: The `consul.raft.boltdb.writeCapacity` metric was added and indicates a theoretical number of writes/second that can be performed to Consul. [[GH-12646](https://github.com/hashicorp/consul/issues/12646)]
|
||||
* sdk: Add support for `Partition` and `RetryJoin` to the TestServerConfig struct. [[GH-12126](https://github.com/hashicorp/consul/issues/12126)]
|
||||
* telemetry: Add new `leader` label to `consul.rpc.server.call` and optional `target_datacenter`, `locality`,
|
||||
`allow_stale`, and `blocking` optional labels. [[GH-12727](https://github.com/hashicorp/consul/issues/12727)]
|
||||
* ui: In the datacenter selector order Datacenters by Primary, Local then alpanumerically [[GH-12478](https://github.com/hashicorp/consul/issues/12478)]
|
||||
* ui: Include details on ACL policy dispositions required for unauthorized views [[GH-12354](https://github.com/hashicorp/consul/issues/12354)]
|
||||
* ui: Move icons away from depending on a CSS preprocessor [[GH-12461](https://github.com/hashicorp/consul/issues/12461)]
|
||||
* version: Improved performance of the version.GetHumanVersion function by 50% on memory allocation. [[GH-11507](https://github.com/hashicorp/consul/issues/11507)]
|
||||
|
||||
DEPRECATIONS:
|
||||
|
||||
* acl: The `consul.acl.ResolveTokenToIdentity` metric is no longer reported. The values that were previous reported as part of this metric will now be part of the `consul.acl.ResolveToken` metric. [[GH-12166](https://github.com/hashicorp/consul/issues/12166)]
|
||||
* agent: deprecate older syntax for specifying TLS min version values [[GH-12522](https://github.com/hashicorp/consul/issues/12522)]
|
||||
* agent: remove support for specifying insecure TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 and TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 cipher suites [[GH-12522](https://github.com/hashicorp/consul/issues/12522)]
|
||||
* config: setting `cert_file`, `key_file`, `ca_file`, `ca_path`, `tls_min_version`, `tls_cipher_suites`, `verify_incoming`, `verify_incoming_rpc`, `verify_incoming_https`, `verify_outgoing` and `verify_server_hostname` at the top-level is now deprecated, use the `tls` stanza instead [[GH-12504](https://github.com/hashicorp/consul/issues/12504)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* acl: Fix parsing of IAM user and role tags in IAM auth method [[GH-12797](https://github.com/hashicorp/consul/issues/12797)]
|
||||
* dns: allow max of 63 character DNS labels instead of 64 per RFC 1123 [[GH-12535](https://github.com/hashicorp/consul/issues/12535)]
|
||||
* logging: fix a bug with incorrect severity syslog messages (all messages were sent with NOTICE severity). [[GH-12079](https://github.com/hashicorp/consul/issues/12079)]
|
||||
* ui: Added Tags tab to gateways(just like exists for non-gateway services) [[GH-12400](https://github.com/hashicorp/consul/issues/12400)]
|
||||
|
||||
NOTES:
|
||||
|
||||
* Forked net/rpc to add middleware support: https://github.com/hashicorp/consul-net-rpc/ . [[GH-12311](https://github.com/hashicorp/consul/issues/12311)]
|
||||
* dependency: Upgrade to use Go 1.18.1 [[GH-12808](https://github.com/hashicorp/consul/issues/12808)]
|
||||
|
||||
## 1.11.5 (April 13, 2022)
|
||||
|
||||
SECURITY:
|
||||
|
||||
* agent: Added a new check field, `disable_redirects`, that allows for disabling the following of redirects for HTTP checks. The intention is to default this to true in a future release so that redirects must explicitly be enabled. [[GH-12685](https://github.com/hashicorp/consul/issues/12685)]
|
||||
* connect: Properly set SNI when configured for services behind a terminating gateway. [[GH-12672](https://github.com/hashicorp/consul/issues/12672)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* agent: improve log messages when a service with a critical health check is deregistered due to exceeding the deregister_critical_service_after timeout [[GH-12725](https://github.com/hashicorp/consul/issues/12725)]
|
||||
* xds: ensure that all connect timeout configs can apply equally to tproxy direct dial connections [[GH-12711](https://github.com/hashicorp/consul/issues/12711)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* acl: **(Enterprise Only)** fixes a bug preventing ACL policies configured with datacenter restrictions from being created if the cluster had been upgraded to Consul 1.11+ from an earlier version.
|
||||
* connect/ca: cancel old Vault renewal on CA configuration. Provide a 1 - 6 second backoff on repeated token renewal requests to prevent overwhelming Vault. [[GH-12607](https://github.com/hashicorp/consul/issues/12607)]
|
||||
* namespace: **(Enterprise Only)** Unreserve `consul` namespace to allow K8s namespace mirroring when deploying in `consul` K8s namespace .
|
||||
* raft: upgrade to v1.3.6 which fixes a bug where a read replica node could attempt bootstrapping raft and prevent other nodes from bootstrapping at all [[GH-12496](https://github.com/hashicorp/consul/issues/12496)]
|
||||
* replication: Fixed a bug which could prevent ACL replication from continuing successfully after a leader election. [[GH-12565](https://github.com/hashicorp/consul/issues/12565)]
|
||||
* server: fix spurious blocking query suppression for discovery chains [[GH-12512](https://github.com/hashicorp/consul/issues/12512)]
|
||||
* ui: Fixes a visual bug where our loading icon can look cut off [[GH-12479](https://github.com/hashicorp/consul/issues/12479)]
|
||||
* usagemetrics: **(Enterprise only)** Fix a bug where Consul usage metrics stopped being reported when upgrading servers from 1.10 to 1.11 or later.
|
||||
|
||||
## 1.11.4 (February 28, 2022)
|
||||
|
||||
FEATURES:
|
||||
|
@ -5,8 +5,8 @@ SHELL = bash
|
||||
GOTOOLS = \
|
||||
github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@master \
|
||||
github.com/hashicorp/go-bindata/go-bindata@master \
|
||||
github.com/vektra/mockery/cmd/mockery@master \
|
||||
github.com/golangci/golangci-lint/cmd/golangci-lint@v1.40.1 \
|
||||
github.com/vektra/mockery/v2@latest \
|
||||
github.com/golangci/golangci-lint/cmd/golangci-lint@v1.45.2 \
|
||||
github.com/hashicorp/lint-consul-retry@master
|
||||
|
||||
PROTOC_VERSION=3.15.8
|
||||
@ -15,7 +15,7 @@ PROTOC_VERSION=3.15.8
|
||||
# MOG_VERSION can be either a valid string for "go install <module>@<version>"
|
||||
# or the string @DEV to imply use whatever is currently installed locally.
|
||||
###
|
||||
MOG_VERSION='v0.2.0'
|
||||
MOG_VERSION='v0.3.0'
|
||||
###
|
||||
# PROTOC_GO_INJECT_TAG_VERSION can be either a valid string for "go install <module>@<version>"
|
||||
# or the string @DEV to imply use whatever is currently installed locally.
|
||||
@ -158,7 +158,8 @@ dev-docker: linux
|
||||
@echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)"
|
||||
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
|
||||
@echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)"
|
||||
@DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
|
||||
# 'consul:local' tag is needed to run the integration tests
|
||||
@DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' -t 'consul:local' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
|
||||
|
||||
# In CircleCI, the linux binary will be attached from a previous step at bin/. This make target
|
||||
# should only run in CI and not locally.
|
||||
|
@ -1 +0,0 @@
|
||||
Moved to [docs/README.md](./docs/README.md).
|
@ -3,7 +3,9 @@
|
||||
|
||||
package acl
|
||||
|
||||
const DefaultPartitionName = ""
|
||||
const (
|
||||
DefaultPartitionName = ""
|
||||
)
|
||||
|
||||
// Reviewer Note: This is a little bit strange; one might want it to be "" like partition name
|
||||
// However in consul/structs/intention.go we define IntentionDefaultNamespace as 'default' and so
|
||||
|
@ -106,3 +106,7 @@ func NewEnterpriseMetaWithPartition(_, _ string) EnterpriseMeta {
|
||||
|
||||
// FillAuthzContext stub
|
||||
func (_ *EnterpriseMeta) FillAuthzContext(_ *AuthorizerContext) {}
|
||||
|
||||
func NormalizeNamespace(_ string) string {
|
||||
return ""
|
||||
}
|
||||
|
@ -1142,6 +1142,41 @@ func TestACL_HTTP(t *testing.T) {
|
||||
_, err := a.srv.ACLTokenCreate(resp, req)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("Create with uppercase node identity", func(t *testing.T) {
|
||||
tokenInput := &structs.ACLToken{
|
||||
Description: "agent token for foo node",
|
||||
NodeIdentities: []*structs.ACLNodeIdentity{
|
||||
{
|
||||
NodeName: "FOO",
|
||||
Datacenter: "bar",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("PUT", "/v1/acl/token?token=root", jsonBody(tokenInput))
|
||||
resp := httptest.NewRecorder()
|
||||
_, err := a.srv.ACLTokenCreate(resp, req)
|
||||
require.Error(t, err)
|
||||
testutil.RequireErrorContains(t, err, "Only lowercase alphanumeric")
|
||||
})
|
||||
|
||||
t.Run("Create with uppercase service identity", func(t *testing.T) {
|
||||
tokenInput := &structs.ACLToken{
|
||||
Description: "token for service identity foo",
|
||||
ServiceIdentities: []*structs.ACLServiceIdentity{
|
||||
{
|
||||
ServiceName: "FOO",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("PUT", "/v1/acl/token?token=root", jsonBody(tokenInput))
|
||||
resp := httptest.NewRecorder()
|
||||
_, err := a.srv.ACLTokenCreate(resp, req)
|
||||
require.Error(t, err)
|
||||
testutil.RequireErrorContains(t, err, "Only lowercase alphanumeric")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/armon/go-metrics/prometheus"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/go-connlimit"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
@ -357,6 +358,8 @@ type Agent struct {
|
||||
// into Agent, which will allow us to remove this field.
|
||||
rpcClientHealth *health.Client
|
||||
|
||||
rpcClientPeering pbpeering.PeeringServiceClient
|
||||
|
||||
// routineManager is responsible for managing longer running go routines
|
||||
// run by the Agent
|
||||
routineManager *routine.Manager
|
||||
@ -434,6 +437,8 @@ func New(bd BaseDeps) (*Agent, error) {
|
||||
QueryOptionDefaults: config.ApplyDefaultQueryOptions(a.config),
|
||||
}
|
||||
|
||||
a.rpcClientPeering = pbpeering.NewPeeringServiceClient(conn)
|
||||
|
||||
a.serviceManager = NewServiceManager(&a)
|
||||
|
||||
// We used to do this in the Start method. However it doesn't need to go
|
||||
@ -3901,6 +3906,8 @@ func (a *Agent) reloadConfigInternal(newCfg *config.RuntimeConfig) error {
|
||||
ConfigEntryBootstrap: newCfg.ConfigEntryBootstrap,
|
||||
RaftSnapshotThreshold: newCfg.RaftSnapshotThreshold,
|
||||
RaftSnapshotInterval: newCfg.RaftSnapshotInterval,
|
||||
HeartbeatTimeout: newCfg.ConsulRaftHeartbeatTimeout,
|
||||
ElectionTimeout: newCfg.ConsulRaftElectionTimeout,
|
||||
RaftTrailingLogs: newCfg.RaftTrailingLogs,
|
||||
}
|
||||
if err := a.delegate.ReloadConfig(cc); err != nil {
|
||||
|
@ -6202,6 +6202,101 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgentConnectCALeafCert_nonBlockingQuery_after_blockingQuery_shouldNotBlock(t *testing.T) {
|
||||
// see: https://github.com/hashicorp/consul/issues/12048
|
||||
|
||||
runStep := func(t *testing.T, name string, fn func(t *testing.T)) {
|
||||
t.Helper()
|
||||
if !t.Run(name, fn) {
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
a := NewTestAgent(t, "")
|
||||
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
|
||||
testrpc.WaitForActiveCARoot(t, a.RPC, "dc1", nil)
|
||||
|
||||
{
|
||||
// Register a local service
|
||||
args := &structs.ServiceDefinition{
|
||||
ID: "foo",
|
||||
Name: "test",
|
||||
Address: "127.0.0.1",
|
||||
Port: 8000,
|
||||
Check: structs.CheckType{
|
||||
TTL: 15 * time.Second,
|
||||
},
|
||||
}
|
||||
req := httptest.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args))
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
if !assert.Equal(t, 200, resp.Code) {
|
||||
t.Log("Body: ", resp.Body.String())
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
serialNumber string
|
||||
index string
|
||||
issued structs.IssuedCert
|
||||
)
|
||||
runStep(t, "do initial non-blocking query", func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
require.NoError(t, dec.Decode(&issued))
|
||||
serialNumber = issued.SerialNumber
|
||||
|
||||
require.Equal(t, "MISS", resp.Header().Get("X-Cache"),
|
||||
"for the leaf cert cache type these are always MISS")
|
||||
index = resp.Header().Get("X-Consul-Index")
|
||||
})
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go func() {
|
||||
// launch goroutine for blocking query
|
||||
req := httptest.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?index="+index, nil).Clone(ctx)
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
}()
|
||||
|
||||
// We just need to ensure that the above blocking query is in-flight before
|
||||
// the next step, so do a little sleep.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// The initial non-blocking query populated the leaf cert cache entry
|
||||
// implicitly. The agent cache doesn't prune entries very often at all, so
|
||||
// in between both of these steps the data should still be there, causing
|
||||
// this to be a HIT that completes in less than 10m (the default inner leaf
|
||||
// cert blocking query timeout).
|
||||
runStep(t, "do a non-blocking query that should not block", func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil)
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
|
||||
var issued2 structs.IssuedCert
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
require.NoError(t, dec.Decode(&issued2))
|
||||
|
||||
require.Equal(t, "HIT", resp.Header().Get("X-Cache"))
|
||||
|
||||
// If this is actually returning a cached result, the serial number
|
||||
// should be unchanged.
|
||||
require.Equal(t, serialNumber, issued2.SerialNumber)
|
||||
|
||||
require.Equal(t, issued, issued2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAgentConnectCALeafCert_Vault_doesNotChurnLeafCertsAtIdle(t *testing.T) {
|
||||
ca.SkipIfVaultNotPresent(t)
|
||||
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -24,6 +25,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/google/tcpproxy"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
@ -3931,9 +3934,11 @@ func TestAgent_ReloadConfigOutgoingRPCConfig(t *testing.T) {
|
||||
a := NewTestAgent(t, hcl)
|
||||
defer a.Shutdown()
|
||||
tlsConf := a.tlsConfigurator.OutgoingRPCConfig()
|
||||
|
||||
require.True(t, tlsConf.InsecureSkipVerify)
|
||||
require.Len(t, tlsConf.ClientCAs.Subjects(), 1)
|
||||
require.Len(t, tlsConf.RootCAs.Subjects(), 1)
|
||||
expectedCaPoolByFile := getExpectedCaPoolByFile(t)
|
||||
assertDeepEqual(t, expectedCaPoolByFile, tlsConf.RootCAs, cmpCertPool)
|
||||
assertDeepEqual(t, expectedCaPoolByFile, tlsConf.ClientCAs, cmpCertPool)
|
||||
|
||||
hcl = `
|
||||
data_dir = "` + dataDir + `"
|
||||
@ -3946,9 +3951,11 @@ func TestAgent_ReloadConfigOutgoingRPCConfig(t *testing.T) {
|
||||
c := TestConfig(testutil.Logger(t), config.FileSource{Name: t.Name(), Format: "hcl", Data: hcl})
|
||||
require.NoError(t, a.reloadConfigInternal(c))
|
||||
tlsConf = a.tlsConfigurator.OutgoingRPCConfig()
|
||||
|
||||
require.False(t, tlsConf.InsecureSkipVerify)
|
||||
require.Len(t, tlsConf.RootCAs.Subjects(), 2)
|
||||
require.Len(t, tlsConf.ClientCAs.Subjects(), 2)
|
||||
expectedCaPoolByDir := getExpectedCaPoolByDir(t)
|
||||
assertDeepEqual(t, expectedCaPoolByDir, tlsConf.RootCAs, cmpCertPool)
|
||||
assertDeepEqual(t, expectedCaPoolByDir, tlsConf.ClientCAs, cmpCertPool)
|
||||
}
|
||||
|
||||
func TestAgent_ReloadConfigAndKeepChecksStatus(t *testing.T) {
|
||||
@ -4018,8 +4025,9 @@ func TestAgent_ReloadConfigIncomingRPCConfig(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, tlsConf)
|
||||
require.True(t, tlsConf.InsecureSkipVerify)
|
||||
require.Len(t, tlsConf.ClientCAs.Subjects(), 1)
|
||||
require.Len(t, tlsConf.RootCAs.Subjects(), 1)
|
||||
expectedCaPoolByFile := getExpectedCaPoolByFile(t)
|
||||
assertDeepEqual(t, expectedCaPoolByFile, tlsConf.RootCAs, cmpCertPool)
|
||||
assertDeepEqual(t, expectedCaPoolByFile, tlsConf.ClientCAs, cmpCertPool)
|
||||
|
||||
hcl = `
|
||||
data_dir = "` + dataDir + `"
|
||||
@ -4034,8 +4042,9 @@ func TestAgent_ReloadConfigIncomingRPCConfig(t *testing.T) {
|
||||
tlsConf, err = tlsConf.GetConfigForClient(nil)
|
||||
require.NoError(t, err)
|
||||
require.False(t, tlsConf.InsecureSkipVerify)
|
||||
require.Len(t, tlsConf.ClientCAs.Subjects(), 2)
|
||||
require.Len(t, tlsConf.RootCAs.Subjects(), 2)
|
||||
expectedCaPoolByDir := getExpectedCaPoolByDir(t)
|
||||
assertDeepEqual(t, expectedCaPoolByDir, tlsConf.RootCAs, cmpCertPool)
|
||||
assertDeepEqual(t, expectedCaPoolByDir, tlsConf.ClientCAs, cmpCertPool)
|
||||
}
|
||||
|
||||
func TestAgent_ReloadConfigTLSConfigFailure(t *testing.T) {
|
||||
@ -4066,8 +4075,10 @@ func TestAgent_ReloadConfigTLSConfigFailure(t *testing.T) {
|
||||
tlsConf, err := tlsConf.GetConfigForClient(nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tls.NoClientCert, tlsConf.ClientAuth)
|
||||
require.Len(t, tlsConf.ClientCAs.Subjects(), 1)
|
||||
require.Len(t, tlsConf.RootCAs.Subjects(), 1)
|
||||
|
||||
expectedCaPoolByFile := getExpectedCaPoolByFile(t)
|
||||
assertDeepEqual(t, expectedCaPoolByFile, tlsConf.RootCAs, cmpCertPool)
|
||||
assertDeepEqual(t, expectedCaPoolByFile, tlsConf.ClientCAs, cmpCertPool)
|
||||
}
|
||||
|
||||
func TestAgent_consulConfig_AutoEncryptAllowTLS(t *testing.T) {
|
||||
@ -5845,3 +5856,45 @@ func Test_coalesceTimerTwoPeriods(t *testing.T) {
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func getExpectedCaPoolByFile(t *testing.T) *x509.CertPool {
|
||||
pool := x509.NewCertPool()
|
||||
data, err := ioutil.ReadFile("../test/ca/root.cer")
|
||||
require.NoError(t, err)
|
||||
if !pool.AppendCertsFromPEM(data) {
|
||||
t.Fatal("could not add test ca ../test/ca/root.cer to pool")
|
||||
}
|
||||
return pool
|
||||
}
|
||||
|
||||
func getExpectedCaPoolByDir(t *testing.T) *x509.CertPool {
|
||||
pool := x509.NewCertPool()
|
||||
entries, err := os.ReadDir("../test/ca_path")
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, entry := range entries {
|
||||
filename := path.Join("../test/ca_path", entry.Name())
|
||||
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
require.NoError(t, err)
|
||||
|
||||
if !pool.AppendCertsFromPEM(data) {
|
||||
t.Fatalf("could not add test ca %s to pool", filename)
|
||||
}
|
||||
}
|
||||
|
||||
return pool
|
||||
}
|
||||
|
||||
// lazyCerts has a func field which can't be compared.
|
||||
var cmpCertPool = cmp.Options{
|
||||
cmpopts.IgnoreFields(x509.CertPool{}, "lazyCerts"),
|
||||
cmp.AllowUnexported(x509.CertPool{}),
|
||||
}
|
||||
|
||||
func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) {
|
||||
t.Helper()
|
||||
if diff := cmp.Diff(x, y, opts...); diff != "" {
|
||||
t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff)
|
||||
}
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ type DirectRPC interface {
|
||||
// agent/cache.Cache struct that we care about
|
||||
type Cache interface {
|
||||
Notify(ctx context.Context, t string, r cache.Request, correlationID string, ch chan<- cache.UpdateEvent) error
|
||||
Prepopulate(t string, result cache.FetchResult, dc string, token string, key string) error
|
||||
Prepopulate(t string, result cache.FetchResult, dc string, peerName string, token string, key string) error
|
||||
}
|
||||
|
||||
// ServerProvider is an interface that can be used to find one server in the local DC known to
|
||||
|
@ -137,7 +137,7 @@ func (m *mockCache) Notify(ctx context.Context, t string, r cache.Request, corre
|
||||
return err
|
||||
}
|
||||
|
||||
func (m *mockCache) Prepopulate(t string, result cache.FetchResult, dc string, token string, key string) error {
|
||||
func (m *mockCache) Prepopulate(t string, result cache.FetchResult, dc string, peerName string, token string, key string) error {
|
||||
var restore string
|
||||
cert, ok := result.Value.(*structs.IssuedCert)
|
||||
if ok {
|
||||
@ -147,7 +147,7 @@ func (m *mockCache) Prepopulate(t string, result cache.FetchResult, dc string, t
|
||||
cert.PrivateKeyPEM = "redacted"
|
||||
}
|
||||
|
||||
ret := m.Called(t, result, dc, token, key)
|
||||
ret := m.Called(t, result, dc, peerName, token, key)
|
||||
|
||||
if ok && restore != "" {
|
||||
cert.PrivateKeyPEM = restore
|
||||
@ -304,6 +304,7 @@ func (m *mockedConfig) expectInitialTLS(t *testing.T, agentName, datacenter, tok
|
||||
rootRes,
|
||||
datacenter,
|
||||
"",
|
||||
"",
|
||||
rootsReq.CacheInfo().Key,
|
||||
).Return(nil).Once()
|
||||
|
||||
@ -330,6 +331,7 @@ func (m *mockedConfig) expectInitialTLS(t *testing.T, agentName, datacenter, tok
|
||||
cachetype.ConnectCALeafName,
|
||||
leafRes,
|
||||
datacenter,
|
||||
"",
|
||||
token,
|
||||
leafReq.Key(),
|
||||
).Return(nil).Once()
|
||||
|
@ -96,7 +96,7 @@ func (ac *AutoConfig) populateCertificateCache(certs *structs.SignedResponse) er
|
||||
rootRes := cache.FetchResult{Value: &certs.ConnectCARoots, Index: certs.ConnectCARoots.QueryMeta.Index}
|
||||
rootsReq := ac.caRootsRequest()
|
||||
// getting the roots doesn't require a token so in order to potentially share the cache with another
|
||||
if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCARootName, rootRes, ac.config.Datacenter, "", rootsReq.CacheInfo().Key); err != nil {
|
||||
if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCARootName, rootRes, ac.config.Datacenter, structs.DefaultPeerKeyword, "", rootsReq.CacheInfo().Key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -108,7 +108,7 @@ func (ac *AutoConfig) populateCertificateCache(certs *structs.SignedResponse) er
|
||||
Index: certs.IssuedCert.RaftIndex.ModifyIndex,
|
||||
State: cachetype.ConnectCALeafSuccess(connect.EncodeSigningKeyID(cert.AuthorityKeyId)),
|
||||
}
|
||||
if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCALeafName, certRes, leafReq.Datacenter, leafReq.Token, leafReq.Key()); err != nil {
|
||||
if err := ac.acConfig.Cache.Prepopulate(cachetype.ConnectCALeafName, certRes, leafReq.Datacenter, structs.DefaultPeerKeyword, leafReq.Token, leafReq.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -5,10 +5,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func TestCatalogListServices(t *testing.T) {
|
||||
@ -104,7 +105,7 @@ func TestCatalogListServices_IntegrationWithCache_NotModifiedResponse(t *testing
|
||||
},
|
||||
}
|
||||
|
||||
err := c.Prepopulate(CatalogListServicesName, last, "dc1", "token", req.CacheInfo().Key)
|
||||
err := c.Prepopulate(CatalogListServicesName, last, "dc1", "", "token", req.CacheInfo().Key)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
92
agent/cache-types/mock_Agent.go
Normal file
92
agent/cache-types/mock_Agent.go
Normal file
@ -0,0 +1,92 @@
|
||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
||||
|
||||
package cachetype
|
||||
|
||||
import (
|
||||
local "github.com/hashicorp/consul/agent/local"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
structs "github.com/hashicorp/consul/agent/structs"
|
||||
|
||||
testing "testing"
|
||||
|
||||
time "time"
|
||||
)
|
||||
|
||||
// MockAgent is an autogenerated mock type for the Agent type
|
||||
type MockAgent struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// LocalBlockingQuery provides a mock function with given fields: alwaysBlock, hash, wait, fn
|
||||
func (_m *MockAgent) LocalBlockingQuery(alwaysBlock bool, hash string, wait time.Duration, fn func(memdb.WatchSet) (string, interface{}, error)) (string, interface{}, error) {
|
||||
ret := _m.Called(alwaysBlock, hash, wait, fn)
|
||||
|
||||
var r0 string
|
||||
if rf, ok := ret.Get(0).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) string); ok {
|
||||
r0 = rf(alwaysBlock, hash, wait, fn)
|
||||
} else {
|
||||
r0 = ret.Get(0).(string)
|
||||
}
|
||||
|
||||
var r1 interface{}
|
||||
if rf, ok := ret.Get(1).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) interface{}); ok {
|
||||
r1 = rf(alwaysBlock, hash, wait, fn)
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).(interface{})
|
||||
}
|
||||
}
|
||||
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(2).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) error); ok {
|
||||
r2 = rf(alwaysBlock, hash, wait, fn)
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
}
|
||||
|
||||
return r0, r1, r2
|
||||
}
|
||||
|
||||
// LocalState provides a mock function with given fields:
|
||||
func (_m *MockAgent) LocalState() *local.State {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *local.State
|
||||
if rf, ok := ret.Get(0).(func() *local.State); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*local.State)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// ServiceHTTPBasedChecks provides a mock function with given fields: id
|
||||
func (_m *MockAgent) ServiceHTTPBasedChecks(id structs.ServiceID) []structs.CheckType {
|
||||
ret := _m.Called(id)
|
||||
|
||||
var r0 []structs.CheckType
|
||||
if rf, ok := ret.Get(0).(func(structs.ServiceID) []structs.CheckType); ok {
|
||||
r0 = rf(id)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]structs.CheckType)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NewMockAgent creates a new instance of MockAgent. It also registers a cleanup function to assert the mocks expectations.
|
||||
func NewMockAgent(t testing.TB) *MockAgent {
|
||||
mock := &MockAgent{}
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -1,7 +1,12 @@
|
||||
// Code generated by mockery v1.0.0
|
||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
||||
|
||||
package cachetype
|
||||
|
||||
import mock "github.com/stretchr/testify/mock"
|
||||
import (
|
||||
testing "testing"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockRPC is an autogenerated mock type for the RPC type
|
||||
type MockRPC struct {
|
||||
@ -21,3 +26,12 @@ func (_m *MockRPC) RPC(method string, args interface{}, reply interface{}) error
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NewMockRPC creates a new instance of MockRPC. It also registers a cleanup function to assert the mocks expectations.
|
||||
func NewMockRPC(t testing.TB) *MockRPC {
|
||||
mock := &MockRPC{}
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
package cachetype
|
||||
|
||||
//go:generate mockery -all -inpkg
|
||||
//go:generate mockery --all --inpackage
|
||||
|
||||
// RPC is an interface that an RPC client must implement. This is a helper
|
||||
// interface that is implemented by the agent delegate so that Type
|
||||
|
23
agent/cache/cache.go
vendored
23
agent/cache/cache.go
vendored
@ -33,7 +33,7 @@ import (
|
||||
"github.com/hashicorp/consul/lib/ttlcache"
|
||||
)
|
||||
|
||||
//go:generate mockery -all -inpkg
|
||||
//go:generate mockery --all --inpackage
|
||||
|
||||
// TODO(kit): remove the namespace from these once the metrics themselves change
|
||||
var Gauges = []prometheus.GaugeDefinition{
|
||||
@ -91,7 +91,7 @@ const (
|
||||
// struct in agent/structs. This API makes cache usage a mostly drop-in
|
||||
// replacement for non-cached RPC calls.
|
||||
//
|
||||
// The cache is partitioned by ACL and datacenter. This allows the cache
|
||||
// The cache is partitioned by ACL and datacenter/peer. This allows the cache
|
||||
// to be safe for multi-DC queries and for queries where the data is modified
|
||||
// due to ACLs all without the cache having to have any clever logic, at
|
||||
// the slight expense of a less perfect cache.
|
||||
@ -376,6 +376,13 @@ func (c *Cache) getEntryLocked(
|
||||
// Check if re-validate is requested. If so the first time round the
|
||||
// loop is not a hit but subsequent ones should be treated normally.
|
||||
if !tEntry.Opts.Refresh && info.MustRevalidate {
|
||||
if entry.Fetching {
|
||||
// There is an active blocking query for this data, which has not
|
||||
// returned. We can logically deduce that the contents of the cache
|
||||
// are actually current, and we can simply return this while
|
||||
// leaving the blocking query alone.
|
||||
return true, true, entry
|
||||
}
|
||||
return true, false, entry
|
||||
}
|
||||
|
||||
@ -399,7 +406,7 @@ func (c *Cache) getWithIndex(ctx context.Context, r getOptions) (interface{}, Re
|
||||
return result.Value, ResultMeta{}, err
|
||||
}
|
||||
|
||||
key := makeEntryKey(r.TypeEntry.Name, r.Info.Datacenter, r.Info.Token, r.Info.Key)
|
||||
key := makeEntryKey(r.TypeEntry.Name, r.Info.Datacenter, r.Info.PeerName, r.Info.Token, r.Info.Key)
|
||||
|
||||
// First time through
|
||||
first := true
|
||||
@ -519,7 +526,11 @@ RETRY_GET:
|
||||
}
|
||||
}
|
||||
|
||||
func makeEntryKey(t, dc, token, key string) string {
|
||||
func makeEntryKey(t, dc, peerName, token, key string) string {
|
||||
// TODO(peering): figure out if this is the desired format
|
||||
if peerName != "" {
|
||||
return fmt.Sprintf("%s/%s/%s/%s", t, "peer:"+peerName, token, key)
|
||||
}
|
||||
return fmt.Sprintf("%s/%s/%s/%s", t, dc, token, key)
|
||||
}
|
||||
|
||||
@ -877,8 +888,8 @@ func (c *Cache) Close() error {
|
||||
// on startup. It is used to set the ConnectRootCA and AgentLeafCert when
|
||||
// AutoEncrypt.TLS is turned on. The cache itself cannot fetch that the first
|
||||
// time because it requires a special RPCType. Subsequent runs are fine though.
|
||||
func (c *Cache) Prepopulate(t string, res FetchResult, dc, token, k string) error {
|
||||
key := makeEntryKey(t, dc, token, k)
|
||||
func (c *Cache) Prepopulate(t string, res FetchResult, dc, peerName, token, k string) error {
|
||||
key := makeEntryKey(t, dc, peerName, token, k)
|
||||
newEntry := cacheEntry{
|
||||
Valid: true,
|
||||
Value: res.Value,
|
||||
|
6
agent/cache/cache_test.go
vendored
6
agent/cache/cache_test.go
vendored
@ -1545,7 +1545,7 @@ func TestCacheReload(t *testing.T) {
|
||||
c.entriesLock.Lock()
|
||||
tEntry, ok := c.types["t1"]
|
||||
require.True(t, ok)
|
||||
keyName := makeEntryKey("t1", "", "", "hello1")
|
||||
keyName := makeEntryKey("t1", "", "", "", "hello1")
|
||||
ok, entryValid, entry := c.getEntryLocked(tEntry, keyName, RequestInfo{})
|
||||
require.True(t, ok)
|
||||
require.True(t, entryValid)
|
||||
@ -1687,7 +1687,7 @@ func TestCache_Prepopulate(t *testing.T) {
|
||||
c := New(Options{})
|
||||
c.RegisterType("t", typ)
|
||||
|
||||
c.Prepopulate("t", FetchResult{Value: 17, Index: 1}, "dc1", "token", "v1")
|
||||
c.Prepopulate("t", FetchResult{Value: 17, Index: 1}, "dc1", "", "token", "v1")
|
||||
|
||||
ctx := context.Background()
|
||||
req := fakeRequest{
|
||||
@ -1740,7 +1740,7 @@ func TestCache_RefreshLifeCycle(t *testing.T) {
|
||||
c := New(Options{})
|
||||
c.RegisterType("t", typ)
|
||||
|
||||
key := makeEntryKey("t", "dc1", "token", "v1")
|
||||
key := makeEntryKey("t", "dc1", "", "token", "v1")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
17
agent/cache/mock_Request.go
vendored
17
agent/cache/mock_Request.go
vendored
@ -1,8 +1,12 @@
|
||||
// Code generated by mockery v1.0.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
||||
|
||||
package cache
|
||||
|
||||
import mock "github.com/stretchr/testify/mock"
|
||||
import (
|
||||
testing "testing"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockRequest is an autogenerated mock type for the Request type
|
||||
type MockRequest struct {
|
||||
@ -22,3 +26,12 @@ func (_m *MockRequest) CacheInfo() RequestInfo {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NewMockRequest creates a new instance of MockRequest. It also registers a cleanup function to assert the mocks expectations.
|
||||
func NewMockRequest(t testing.TB) *MockRequest {
|
||||
mock := &MockRequest{}
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
17
agent/cache/mock_Type.go
vendored
17
agent/cache/mock_Type.go
vendored
@ -1,8 +1,12 @@
|
||||
// Code generated by mockery v1.0.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
||||
|
||||
package cache
|
||||
|
||||
import mock "github.com/stretchr/testify/mock"
|
||||
import (
|
||||
testing "testing"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockType is an autogenerated mock type for the Type type
|
||||
type MockType struct {
|
||||
@ -43,3 +47,12 @@ func (_m *MockType) RegisterOptions() RegisterOptions {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NewMockType creates a new instance of MockType. It also registers a cleanup function to assert the mocks expectations.
|
||||
func NewMockType(t testing.TB) *MockType {
|
||||
mock := &MockType{}
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
12
agent/cache/request.go
vendored
12
agent/cache/request.go
vendored
@ -16,6 +16,9 @@ type Request interface {
|
||||
// RequestInfo represents cache information for a request. The caching
|
||||
// framework uses this to control the behavior of caching and to determine
|
||||
// cacheability.
|
||||
//
|
||||
// TODO(peering): finish ensuring everything that sets a Datacenter sets or doesn't set PeerName.
|
||||
// TODO(peering): also make sure the peer name is present in the cache key likely in lieu of the datacenter somehow.
|
||||
type RequestInfo struct {
|
||||
// Key is a unique cache key for this request. This key should
|
||||
// be globally unique to identify this request, since any conflicting
|
||||
@ -28,14 +31,17 @@ type RequestInfo struct {
|
||||
//
|
||||
// Datacenter is the datacenter that the request is targeting.
|
||||
//
|
||||
// Both of these values are used to partition the cache. The cache framework
|
||||
// PeerName is the peer that the request is targeting.
|
||||
//
|
||||
// All of these values are used to partition the cache. The cache framework
|
||||
// today partitions data on these values to simplify behavior: by
|
||||
// partitioning ACL tokens, the cache doesn't need to be smart about
|
||||
// filtering results. By filtering datacenter results, the cache can
|
||||
// service the multi-DC nature of Consul. This comes at the expense of
|
||||
// filtering results. By filtering datacenter/peer results, the cache can
|
||||
// service the multi-DC/multi-peer nature of Consul. This comes at the expense of
|
||||
// working set size, but in general the effect is minimal.
|
||||
Token string
|
||||
Datacenter string
|
||||
PeerName string
|
||||
|
||||
// MinIndex is the minimum index being queried. This is used to
|
||||
// determine if we already have data satisfying the query or if we need
|
||||
|
@ -1,11 +1,13 @@
|
||||
// Code generated by mockery v1.0.0. DO NOT EDIT.
|
||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
||||
|
||||
package ca
|
||||
|
||||
import (
|
||||
x509 "crypto/x509"
|
||||
testing "testing"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
x509 "crypto/x509"
|
||||
)
|
||||
|
||||
// MockProvider is an autogenerated mock type for the Provider type
|
||||
@ -245,3 +247,12 @@ func (_m *MockProvider) SupportsCrossSigning() (bool, error) {
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// NewMockProvider creates a new instance of MockProvider. It also registers a cleanup function to assert the mocks expectations.
|
||||
func NewMockProvider(t testing.TB) *MockProvider {
|
||||
mock := &MockProvider{}
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
//go:generate mockery -name Provider -inpkg
|
||||
//go:generate mockery --name Provider --inpackage
|
||||
|
||||
// ErrRateLimited is a sentinel error value Providers may return from any method
|
||||
// to indicate that the operation can't complete due to a temporary rate limit.
|
||||
|
@ -3,6 +3,7 @@ package agent
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@ -288,8 +289,13 @@ func TestConnectCARoots_PEMEncoding(t *testing.T) {
|
||||
|
||||
data, err := ioutil.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
pool := x509.NewCertPool()
|
||||
require.True(t, pool.AppendCertsFromPEM(data))
|
||||
|
||||
// expecting the root cert from dc1 and an intermediate in dc2
|
||||
require.Len(t, pool.Subjects(), 2)
|
||||
block, rest := pem.Decode(data)
|
||||
_, err = x509.ParseCertificate(block.Bytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
block, _ = pem.Decode(rest)
|
||||
_, err = x509.ParseCertificate(block.Bytes)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
@ -1174,7 +1174,21 @@ func (r *ACLResolver) ACLsEnabled() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *ACLResolver) ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (ACLResolveResult, error) {
|
||||
// TODO(peering): fix all calls to use the new signature and rename it back
|
||||
func (r *ACLResolver) ResolveTokenAndDefaultMeta(
|
||||
token string,
|
||||
entMeta *acl.EnterpriseMeta,
|
||||
authzContext *acl.AuthorizerContext,
|
||||
) (ACLResolveResult, error) {
|
||||
return r.ResolveTokenAndDefaultMetaWithPeerName(token, entMeta, structs.DefaultPeerKeyword, authzContext)
|
||||
}
|
||||
|
||||
func (r *ACLResolver) ResolveTokenAndDefaultMetaWithPeerName(
|
||||
token string,
|
||||
entMeta *acl.EnterpriseMeta,
|
||||
peerName string,
|
||||
authzContext *acl.AuthorizerContext,
|
||||
) (ACLResolveResult, error) {
|
||||
result, err := r.ResolveToken(token)
|
||||
if err != nil {
|
||||
return ACLResolveResult{}, err
|
||||
@ -1186,9 +1200,19 @@ func (r *ACLResolver) ResolveTokenAndDefaultMeta(token string, entMeta *acl.Ente
|
||||
|
||||
// Default the EnterpriseMeta based on the Tokens meta or actual defaults
|
||||
// in the case of unknown identity
|
||||
if result.ACLIdentity != nil {
|
||||
switch {
|
||||
case peerName == "" && result.ACLIdentity != nil:
|
||||
entMeta.Merge(result.ACLIdentity.EnterpriseMetadata())
|
||||
} else {
|
||||
case result.ACLIdentity != nil:
|
||||
// We _do not_ normalize the enterprise meta from the token when a peer
|
||||
// name was specified because namespaces across clusters are not
|
||||
// equivalent. A local namespace is _never_ correct for a remote query.
|
||||
entMeta.Merge(
|
||||
structs.DefaultEnterpriseMetaInPartition(
|
||||
result.ACLIdentity.EnterpriseMetadata().PartitionOrDefault(),
|
||||
),
|
||||
)
|
||||
default:
|
||||
entMeta.Merge(structs.DefaultEnterpriseMetaInDefaultPartition())
|
||||
}
|
||||
|
||||
|
@ -770,7 +770,7 @@ func (a *ACL) tokenSetInternal(args *structs.ACLTokenSetRequest, reply *structs.
|
||||
return fmt.Errorf("Service identity %q cannot specify a list of datacenters on a local token", svcid.ServiceName)
|
||||
}
|
||||
if !isValidServiceIdentityName(svcid.ServiceName) {
|
||||
return fmt.Errorf("Service identity %q has an invalid name. Only alphanumeric characters, '-' and '_' are allowed", svcid.ServiceName)
|
||||
return fmt.Errorf("Service identity %q has an invalid name. Only lowercase alphanumeric characters, '-' and '_' are allowed", svcid.ServiceName)
|
||||
}
|
||||
}
|
||||
token.ServiceIdentities = dedupeServiceIdentities(token.ServiceIdentities)
|
||||
@ -783,7 +783,7 @@ func (a *ACL) tokenSetInternal(args *structs.ACLTokenSetRequest, reply *structs.
|
||||
return fmt.Errorf("Node identity is missing the datacenter field on this token")
|
||||
}
|
||||
if !isValidNodeIdentityName(nodeid.NodeName) {
|
||||
return fmt.Errorf("Node identity has an invalid name. Only alphanumeric characters, '-' and '_' are allowed")
|
||||
return fmt.Errorf("Node identity has an invalid name. Only lowercase alphanumeric characters, '-' and '_' are allowed")
|
||||
}
|
||||
}
|
||||
token.NodeIdentities = dedupeNodeIdentities(token.NodeIdentities)
|
||||
@ -1682,7 +1682,7 @@ func (a *ACL) RoleSet(args *structs.ACLRoleSetRequest, reply *structs.ACLRole) e
|
||||
return fmt.Errorf("Service identity is missing the service name field on this role")
|
||||
}
|
||||
if !isValidServiceIdentityName(svcid.ServiceName) {
|
||||
return fmt.Errorf("Service identity %q has an invalid name. Only alphanumeric characters, '-' and '_' are allowed", svcid.ServiceName)
|
||||
return fmt.Errorf("Service identity %q has an invalid name. Only lowercase alphanumeric characters, '-' and '_' are allowed", svcid.ServiceName)
|
||||
}
|
||||
}
|
||||
role.ServiceIdentities = dedupeServiceIdentities(role.ServiceIdentities)
|
||||
@ -1695,7 +1695,7 @@ func (a *ACL) RoleSet(args *structs.ACLRoleSetRequest, reply *structs.ACLRole) e
|
||||
return fmt.Errorf("Node identity is missing the datacenter field on this role")
|
||||
}
|
||||
if !isValidNodeIdentityName(nodeid.NodeName) {
|
||||
return fmt.Errorf("Node identity has an invalid name. Only alphanumeric characters, '-' and '_' are allowed")
|
||||
return fmt.Errorf("Node identity has an invalid name. Only lowercase alphanumeric characters, '-' and '_' are allowed")
|
||||
}
|
||||
}
|
||||
role.NodeIdentities = dedupeNodeIdentities(role.NodeIdentities)
|
||||
|
@ -11,12 +11,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/memberlist"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
|
||||
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest"
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
autopilot "github.com/hashicorp/raft-autopilot"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
|
||||
"github.com/hashicorp/consul/agent/consul/autopilotevents"
|
||||
"github.com/hashicorp/consul/agent/metadata"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/logging"
|
||||
@ -29,7 +30,8 @@ var AutopilotGauges = []prometheus.GaugeDefinition{
|
||||
|
||||
// AutopilotDelegate is a Consul delegate for autopilot operations.
|
||||
type AutopilotDelegate struct {
|
||||
server *Server
|
||||
server *Server
|
||||
readyServersPublisher *autopilotevents.ReadyServersEventPublisher
|
||||
}
|
||||
|
||||
func (d *AutopilotDelegate) AutopilotConfig() *autopilot.Config {
|
||||
@ -51,6 +53,8 @@ func (d *AutopilotDelegate) NotifyState(state *autopilot.State) {
|
||||
} else {
|
||||
metrics.SetGauge([]string{"autopilot", "healthy"}, 0)
|
||||
}
|
||||
|
||||
d.readyServersPublisher.PublishReadyServersEvents(state)
|
||||
}
|
||||
|
||||
func (d *AutopilotDelegate) RemoveFailedServer(srv *autopilot.Server) {
|
||||
@ -63,7 +67,13 @@ func (d *AutopilotDelegate) RemoveFailedServer(srv *autopilot.Server) {
|
||||
}
|
||||
|
||||
func (s *Server) initAutopilot(config *Config) {
|
||||
apDelegate := &AutopilotDelegate{s}
|
||||
apDelegate := &AutopilotDelegate{
|
||||
server: s,
|
||||
readyServersPublisher: autopilotevents.NewReadyServersEventPublisher(autopilotevents.Config{
|
||||
Publisher: s.publisher,
|
||||
GetStore: func() autopilotevents.StateStore { return s.fsm.State() },
|
||||
}),
|
||||
}
|
||||
|
||||
s.autopilot = autopilot.New(
|
||||
s.raft,
|
||||
@ -74,6 +84,9 @@ func (s *Server) initAutopilot(config *Config) {
|
||||
autopilot.WithPromoter(s.autopilotPromoter()),
|
||||
autopilot.WithReconciliationDisabled(),
|
||||
)
|
||||
|
||||
// registers a snapshot handler for the event publisher to send as the first event for a new stream
|
||||
s.publisher.RegisterHandler(autopilotevents.EventTopicReadyServers, apDelegate.readyServersPublisher.HandleSnapshot)
|
||||
}
|
||||
|
||||
func (s *Server) autopilotServers() map[raft.ServerID]*autopilot.Server {
|
||||
@ -129,7 +142,7 @@ func (s *Server) autopilotServerFromMetadata(srv *metadata.Server) (*autopilot.S
|
||||
// populate the node meta if there is any. When a node first joins or if
|
||||
// there are ACL issues then this could be empty if the server has not
|
||||
// yet been able to register itself in the catalog
|
||||
_, node, err := s.fsm.State().GetNodeID(types.NodeID(srv.ID), structs.NodeEnterpriseMetaInDefaultPartition())
|
||||
_, node, err := s.fsm.State().GetNodeID(types.NodeID(srv.ID), structs.NodeEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error retrieving node from state store: %w", err)
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package consul
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@ -10,6 +11,8 @@ import (
|
||||
"github.com/hashicorp/serf/serf"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/agent/consul/autopilotevents"
|
||||
"github.com/hashicorp/consul/agent/consul/stream"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
@ -522,3 +525,99 @@ func TestAutopilot_MinQuorum(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestAutopilot_EventPublishing(t *testing.T) {
|
||||
// This is really an integration level test. The general flow this test will follow is:
|
||||
//
|
||||
// 1. Start a 3 server cluster
|
||||
// 2. Subscribe to the ready server events
|
||||
// 3. Observe the first event which will be pretty immediately ready as it is the
|
||||
// snapshot event.
|
||||
// 4. Wait for multiple iterations of the autopilot state updater and ensure no
|
||||
// other events are seen. The state update interval is 50ms for tests unless
|
||||
// overridden.
|
||||
// 5. Add a fouth server.
|
||||
// 6. Wait for an event to be emitted containing 4 ready servers.
|
||||
|
||||
// 1. create the test cluster
|
||||
cluster := newTestCluster(t, &testClusterConfig{
|
||||
Servers: 3,
|
||||
ServerConf: testServerACLConfig,
|
||||
// We want to wait until each server has registered itself in the Catalog. Otherwise
|
||||
// the first snapshot even we see might have no servers in it while things are being
|
||||
// initialized. Doing this wait ensure that things are in the right state to start
|
||||
// the subscription.
|
||||
})
|
||||
|
||||
// 2. subscribe to ready server events
|
||||
req := stream.SubscribeRequest{
|
||||
Topic: autopilotevents.EventTopicReadyServers,
|
||||
Subject: stream.SubjectNone,
|
||||
Token: TestDefaultInitialManagementToken,
|
||||
}
|
||||
sub, err := cluster.Servers[0].publisher.Subscribe(&req)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(sub.Unsubscribe)
|
||||
|
||||
// 3. Observe that an event was generated which should be the snapshot event.
|
||||
// As we have just bootstrapped the cluster with 3 servers we expect to
|
||||
// see those 3 here.
|
||||
validatePayload(t, 3, mustGetEventWithTimeout(t, sub, 50*time.Millisecond))
|
||||
|
||||
// TODO - its kind of annoying that the EventPublisher doesn't have a mode where
|
||||
// it knows each event is a full state of the world. The ramifications are that
|
||||
// we have to expect/ignore the framing events for EndOfSnapshot.
|
||||
event := mustGetEventWithTimeout(t, sub, 10*time.Millisecond)
|
||||
require.True(t, event.IsFramingEvent())
|
||||
|
||||
// 4. Wait for 3 iterations of the ServerHealthInterval to ensure no events
|
||||
// are being published when the autopilot state is not changing.
|
||||
eventNotEmitted(t, sub, 150*time.Millisecond)
|
||||
|
||||
// 5. Add a fourth server
|
||||
_, srv := testServerWithConfig(t, testServerACLConfig, func(c *Config) {
|
||||
c.Bootstrap = false
|
||||
c.BootstrapExpect = 0
|
||||
})
|
||||
joinLAN(t, srv, cluster.Servers[0])
|
||||
|
||||
// 6. Now wait for the event for the fourth server being added. This may take a little
|
||||
// while as the joinLAN operation above doesn't wait for the server to actually get
|
||||
// added to Raft.
|
||||
validatePayload(t, 4, mustGetEventWithTimeout(t, sub, time.Second))
|
||||
}
|
||||
|
||||
// mustGetEventWithTimeout is a helper function for validating that a Subscription.Next call will return
|
||||
// an event within the given time. It also validates that no error is returned.
|
||||
func mustGetEventWithTimeout(t *testing.T, subscription *stream.Subscription, timeout time.Duration) stream.Event {
|
||||
t.Helper()
|
||||
event, err := getEventWithTimeout(t, subscription, timeout)
|
||||
require.NoError(t, err)
|
||||
return event
|
||||
}
|
||||
|
||||
// getEventWithTimeout is a helper function for retrieving a Event from a Subscription within the specified timeout.
|
||||
func getEventWithTimeout(t *testing.T, subscription *stream.Subscription, timeout time.Duration) (stream.Event, error) {
|
||||
t.Helper()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
event, err := subscription.Next(ctx)
|
||||
return event, err
|
||||
}
|
||||
|
||||
// eventNotEmitted is a helper to validate that no Event is emitted for the given Subscription
|
||||
func eventNotEmitted(t *testing.T, subscription *stream.Subscription, timeout time.Duration) {
|
||||
t.Helper()
|
||||
var event stream.Event
|
||||
var err error
|
||||
event, err = getEventWithTimeout(t, subscription, timeout)
|
||||
require.Equal(t, context.DeadlineExceeded, err, fmt.Sprintf("event:%v", event))
|
||||
}
|
||||
|
||||
func validatePayload(t *testing.T, expectedNumServers int, event stream.Event) {
|
||||
t.Helper()
|
||||
require.Equal(t, autopilotevents.EventTopicReadyServers, event.Topic)
|
||||
readyServers, ok := event.Payload.(autopilotevents.EventPayloadReadyServers)
|
||||
require.True(t, ok)
|
||||
require.Len(t, readyServers, expectedNumServers)
|
||||
}
|
||||
|
29
agent/consul/autopilotevents/mock_Publisher_test.go
Normal file
29
agent/consul/autopilotevents/mock_Publisher_test.go
Normal file
@ -0,0 +1,29 @@
|
||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
||||
|
||||
package autopilotevents
|
||||
|
||||
import (
|
||||
testing "testing"
|
||||
|
||||
stream "github.com/hashicorp/consul/agent/consul/stream"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockPublisher is an autogenerated mock type for the Publisher type
|
||||
type MockPublisher struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Publish provides a mock function with given fields: _a0
|
||||
func (_m *MockPublisher) Publish(_a0 []stream.Event) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// NewMockPublisher creates a new instance of MockPublisher. It also registers a cleanup function to assert the mocks expectations.
|
||||
func NewMockPublisher(t testing.TB) *MockPublisher {
|
||||
mock := &MockPublisher{}
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
58
agent/consul/autopilotevents/mock_StateStore_test.go
Normal file
58
agent/consul/autopilotevents/mock_StateStore_test.go
Normal file
@ -0,0 +1,58 @@
|
||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
||||
|
||||
package autopilotevents
|
||||
|
||||
import (
|
||||
acl "github.com/hashicorp/consul/acl"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
structs "github.com/hashicorp/consul/agent/structs"
|
||||
|
||||
testing "testing"
|
||||
|
||||
types "github.com/hashicorp/consul/types"
|
||||
)
|
||||
|
||||
// MockStateStore is an autogenerated mock type for the StateStore type
|
||||
type MockStateStore struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// GetNodeID provides a mock function with given fields: _a0, _a1, _a2
|
||||
func (_m *MockStateStore) GetNodeID(_a0 types.NodeID, _a1 *acl.EnterpriseMeta, _a2 string) (uint64, *structs.Node, error) {
|
||||
ret := _m.Called(_a0, _a1, _a2)
|
||||
|
||||
var r0 uint64
|
||||
if rf, ok := ret.Get(0).(func(types.NodeID, *acl.EnterpriseMeta, string) uint64); ok {
|
||||
r0 = rf(_a0, _a1, _a2)
|
||||
} else {
|
||||
r0 = ret.Get(0).(uint64)
|
||||
}
|
||||
|
||||
var r1 *structs.Node
|
||||
if rf, ok := ret.Get(1).(func(types.NodeID, *acl.EnterpriseMeta, string) *structs.Node); ok {
|
||||
r1 = rf(_a0, _a1, _a2)
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).(*structs.Node)
|
||||
}
|
||||
}
|
||||
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(2).(func(types.NodeID, *acl.EnterpriseMeta, string) error); ok {
|
||||
r2 = rf(_a0, _a1, _a2)
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
}
|
||||
|
||||
return r0, r1, r2
|
||||
}
|
||||
|
||||
// NewMockStateStore creates a new instance of MockStateStore. It also registers a cleanup function to assert the mocks expectations.
|
||||
func NewMockStateStore(t testing.TB) *MockStateStore {
|
||||
mock := &MockStateStore{}
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
39
agent/consul/autopilotevents/mock_timeProvider_test.go
Normal file
39
agent/consul/autopilotevents/mock_timeProvider_test.go
Normal file
@ -0,0 +1,39 @@
|
||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
||||
|
||||
package autopilotevents
|
||||
|
||||
import (
|
||||
testing "testing"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
time "time"
|
||||
)
|
||||
|
||||
// mockTimeProvider is an autogenerated mock type for the timeProvider type
|
||||
type mockTimeProvider struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Now provides a mock function with given fields:
|
||||
func (_m *mockTimeProvider) Now() time.Time {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 time.Time
|
||||
if rf, ok := ret.Get(0).(func() time.Time); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(time.Time)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// newMockTimeProvider creates a new instance of mockTimeProvider. It also registers a cleanup function to assert the mocks expectations.
|
||||
func newMockTimeProvider(t testing.TB) *mockTimeProvider {
|
||||
mock := &mockTimeProvider{}
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
311
agent/consul/autopilotevents/ready_servers_events.go
Normal file
311
agent/consul/autopilotevents/ready_servers_events.go
Normal file
@ -0,0 +1,311 @@
|
||||
package autopilotevents
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
autopilot "github.com/hashicorp/raft-autopilot"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/stream"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/proto/pbsubscribe"
|
||||
"github.com/hashicorp/consul/types"
|
||||
)
|
||||
|
||||
const (
|
||||
EventTopicReadyServers stream.StringTopic = "ready-servers"
|
||||
)
|
||||
|
||||
// ReadyServerInfo includes information about a server that is ready
|
||||
// to handle incoming requests.
|
||||
type ReadyServerInfo struct {
|
||||
ID string
|
||||
Address string
|
||||
TaggedAddresses map[string]string
|
||||
Version string
|
||||
}
|
||||
|
||||
func (info *ReadyServerInfo) Equal(other *ReadyServerInfo) bool {
|
||||
if info.ID != other.ID {
|
||||
return false
|
||||
}
|
||||
|
||||
if info.Version != other.Version {
|
||||
return false
|
||||
}
|
||||
|
||||
if info.Address != other.Address {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(info.TaggedAddresses) != len(other.TaggedAddresses) {
|
||||
return false
|
||||
}
|
||||
|
||||
for tag, infoAddr := range info.TaggedAddresses {
|
||||
if otherAddr, ok := other.TaggedAddresses[tag]; !ok || infoAddr != otherAddr {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// EventPayloadReadyServers
|
||||
type EventPayloadReadyServers []ReadyServerInfo
|
||||
|
||||
func (e EventPayloadReadyServers) Subject() stream.Subject { return stream.SubjectNone }
|
||||
|
||||
func (e EventPayloadReadyServers) HasReadPermission(authz acl.Authorizer) bool {
|
||||
// Any service in the mesh will need access to where the servers live. Therefore
|
||||
// we check if the authorizer grants permissions on any service and if so then
|
||||
// we allow seeing where the servers are.
|
||||
var authzContext acl.AuthorizerContext
|
||||
structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier).
|
||||
FillAuthzContext(&authzContext)
|
||||
|
||||
return authz.ServiceWriteAny(&authzContext) == acl.Allow
|
||||
}
|
||||
|
||||
func (e EventPayloadReadyServers) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event {
|
||||
// TODO(peering) is this right?
|
||||
// TODO(agentless) is this right?
|
||||
panic("EventPayloadReadyServers does not implement ToSubscriptionEvent")
|
||||
}
|
||||
|
||||
func ExtractEventPayload(event stream.Event) (EventPayloadReadyServers, error) {
|
||||
if event.Topic != EventTopicReadyServers {
|
||||
return nil, fmt.Errorf("unexpected topic (%q) for a %q event", event.Topic, EventTopicReadyServers)
|
||||
}
|
||||
|
||||
if payload, ok := event.Payload.(EventPayloadReadyServers); ok {
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unexpected payload type %T for %q event", event.Payload, EventTopicReadyServers)
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
GetStore func() StateStore
|
||||
Publisher Publisher
|
||||
timeProvider timeProvider
|
||||
}
|
||||
|
||||
// ReadyServersEventPublisher is capable to tracking changes to ready servers
|
||||
// between consecutive calls to PublishReadyServersEvents. It will then publish
|
||||
// "ready-servers" events as necessary.
|
||||
type ReadyServersEventPublisher struct {
|
||||
Config
|
||||
previous EventPayloadReadyServers
|
||||
|
||||
snapshotLock sync.RWMutex
|
||||
snapshot []stream.Event
|
||||
}
|
||||
|
||||
func NewReadyServersEventPublisher(config Config) *ReadyServersEventPublisher {
|
||||
return &ReadyServersEventPublisher{
|
||||
Config: config,
|
||||
snapshot: []stream.Event{
|
||||
{
|
||||
Topic: EventTopicReadyServers,
|
||||
Index: 0,
|
||||
Payload: EventPayloadReadyServers{},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
//go:generate mockery --name StateStore --inpackage --testonly
|
||||
type StateStore interface {
|
||||
GetNodeID(types.NodeID, *acl.EnterpriseMeta, string) (uint64, *structs.Node, error)
|
||||
}
|
||||
|
||||
//go:generate mockery --name Publisher --inpackage --testonly
|
||||
type Publisher interface {
|
||||
Publish([]stream.Event)
|
||||
}
|
||||
|
||||
//go:generate mockery --name timeProvider --inpackage --testonly
|
||||
type timeProvider interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// PublishReadyServersEvents will publish a "ready-servers" event if the list of
|
||||
// ready servers has changed since the last time events were published.
|
||||
func (r *ReadyServersEventPublisher) PublishReadyServersEvents(state *autopilot.State) {
|
||||
if events, ok := r.readyServersEvents(state); ok {
|
||||
// update the latest snapshot so that any new event subscription will see
|
||||
// use the latest state.
|
||||
r.snapshotLock.Lock()
|
||||
r.snapshot = events
|
||||
r.snapshotLock.Unlock()
|
||||
|
||||
// if the event publisher were to not be able to keep up with procesing events
|
||||
// then its possible this blocks. It could cause autopilot to not update its
|
||||
// state as often as it should. However if this blocks for over 10s then
|
||||
// not updating the autopilot state as quickly is likely the least of our
|
||||
// concerns. If we need to make this async then we probably need to single
|
||||
// flight these to ensure proper event ordering.
|
||||
r.Publisher.Publish(events)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ReadyServersEventPublisher) readyServersEvents(state *autopilot.State) ([]stream.Event, bool) {
|
||||
// First, we need to pull all the ready servers out from the autopilot state.
|
||||
servers := r.autopilotStateToReadyServers(state)
|
||||
|
||||
// Next we, sort the servers list to make comparison easier later on. We do
|
||||
// this outside of the next length check conditional block to ensure that all
|
||||
// values of previousReadyServers we store will be sorted and the future
|
||||
// comparison's will remain valid.
|
||||
sort.Slice(servers, func(i, j int) bool {
|
||||
// no two servers can have the same id so this is sufficient
|
||||
return servers[i].ID < servers[j].ID
|
||||
})
|
||||
|
||||
// If the number of ready servers hasn't changed then we need to inspect individual
|
||||
// servers to see if there are differences. If the number of servers has changed
|
||||
// we know that an event should be generated and sent.
|
||||
if len(r.previous) == len(servers) {
|
||||
diff := false
|
||||
// We are relying on the fact that both of the slices will be sorted and that
|
||||
// we don't care what the actual differences are but instead just that they
|
||||
// have differences.
|
||||
for i := 0; i < len(servers); i++ {
|
||||
if !r.previous[i].Equal(&servers[i]) {
|
||||
diff = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// The list of ready servers is identical to the previous ones. Therefore
|
||||
// we will not send any event.
|
||||
if !diff {
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
r.previous = servers
|
||||
|
||||
return []stream.Event{r.newReadyServersEvent(servers)}, true
|
||||
}
|
||||
|
||||
// autopilotStateToReadyServers will iterate through all servers in the autopilot
|
||||
// state and compile a list of servers which are "ready". Readiness means that
|
||||
// they would be an acceptable target for stale queries.
|
||||
func (r *ReadyServersEventPublisher) autopilotStateToReadyServers(state *autopilot.State) EventPayloadReadyServers {
|
||||
var servers EventPayloadReadyServers
|
||||
for _, srv := range state.Servers {
|
||||
// All healthy servers are caught up enough to be included in a ready servers.
|
||||
// Servers with voting rights that are still healthy according to Serf are
|
||||
// also included as they have likely just fallen behind the leader a little
|
||||
// after initially replicating state. They are still acceptable targets
|
||||
// for most stale queries and clients can bound the staleness if necessary.
|
||||
// Including them is a means to prevent flapping the list of servers we
|
||||
// advertise as ready and flooding the network with notifications to all
|
||||
// dataplanes of server updates.
|
||||
//
|
||||
// TODO (agentless) for a non-voting server that is still alive but fell
|
||||
// behind, should we cause it to be removed. For voters we know they were caught
|
||||
// up at some point but for non-voters we cannot know the same thing.
|
||||
if srv.Health.Healthy || (srv.HasVotingRights() && srv.Server.NodeStatus == autopilot.NodeAlive) {
|
||||
// autopilot information contains addresses in the <host>:<port> form. We only care about the
|
||||
// the host so we parse it out here and discard the port.
|
||||
host, err := extractHost(string(srv.Server.Address))
|
||||
if err != nil || host == "" {
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
servers = append(servers, ReadyServerInfo{
|
||||
ID: string(srv.Server.ID),
|
||||
Address: host,
|
||||
Version: srv.Server.Version,
|
||||
TaggedAddresses: r.getTaggedAddresses(srv),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
// getTaggedAddresses will get the tagged addresses for the given server or return nil
|
||||
// if it encounters an error or unregistered server.
|
||||
func (r *ReadyServersEventPublisher) getTaggedAddresses(srv *autopilot.ServerState) map[string]string {
|
||||
// we have no callback to lookup the tagged addresses so we can return early
|
||||
if r.GetStore == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Assuming we have been provided a callback to get a state store implementation, then
|
||||
// we will attempt to lookup the node for the autopilot server. We use this to get the
|
||||
// tagged addresses so that consumers of these events will be able to distinguish LAN
|
||||
// vs WAN addresses as well as IP protocol differentiation. At first I thought we may
|
||||
// need to hook into catalog events so that if the tagged addresses change then
|
||||
// we can synthesize new events. That would be pretty complex so this code does not
|
||||
// deal with that. The reasoning why that is probably okay is that autopilot will
|
||||
// send us the state at least once every 30s. That means that we will grab the nodes
|
||||
// from the catalog at that often and publish the events. So while its not quite
|
||||
// as responsive as actually watching for the Catalog changes, its MUCH simpler to
|
||||
// code and reason about and having those addresses be updated within 30s is good enough.
|
||||
_, node, err := r.GetStore().GetNodeID(types.NodeID(srv.Server.ID), structs.NodeEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)
|
||||
if err != nil || node == nil {
|
||||
// no catalog information means we should return a nil addres map
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(node.TaggedAddresses) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
addrs := make(map[string]string)
|
||||
for tag, address := range node.TaggedAddresses {
|
||||
// just like for the Nodes main Address, we only care about the IPs and not the
|
||||
// port so we parse the host out and discard the port.
|
||||
host, err := extractHost(address)
|
||||
if err != nil || host == "" {
|
||||
continue
|
||||
}
|
||||
addrs[tag] = host
|
||||
}
|
||||
|
||||
return addrs
|
||||
}
|
||||
|
||||
// newReadyServersEvent will create a stream.Event with the provided ready server info.
|
||||
func (r *ReadyServersEventPublisher) newReadyServersEvent(servers EventPayloadReadyServers) stream.Event {
|
||||
now := time.Now()
|
||||
if r.timeProvider != nil {
|
||||
now = r.timeProvider.Now()
|
||||
}
|
||||
return stream.Event{
|
||||
Topic: EventTopicReadyServers,
|
||||
Index: uint64(now.UnixMicro()),
|
||||
Payload: servers,
|
||||
}
|
||||
}
|
||||
|
||||
// HandleSnapshot is the EventPublisher callback to generate a snapshot for the "ready-servers" event streams.
|
||||
func (r *ReadyServersEventPublisher) HandleSnapshot(_ stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
|
||||
r.snapshotLock.RLock()
|
||||
defer r.snapshotLock.RUnlock()
|
||||
buf.Append(r.snapshot)
|
||||
return r.snapshot[0].Index, nil
|
||||
}
|
||||
|
||||
// extractHost is a small convenience function to catch errors regarding
|
||||
// missing ports from the net.SplitHostPort function.
|
||||
func extractHost(addr string) (string, error) {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err == nil {
|
||||
return host, nil
|
||||
}
|
||||
if ae, ok := err.(*net.AddrError); ok && ae.Err == "missing port in address" {
|
||||
return addr, nil
|
||||
}
|
||||
return "", err
|
||||
}
|
647
agent/consul/autopilotevents/ready_servers_events_test.go
Normal file
647
agent/consul/autopilotevents/ready_servers_events_test.go
Normal file
@ -0,0 +1,647 @@
|
||||
package autopilotevents
|
||||
|
||||
import (
|
||||
"testing"
|
||||
time "time"
|
||||
|
||||
"github.com/hashicorp/raft"
|
||||
autopilot "github.com/hashicorp/raft-autopilot"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/stream"
|
||||
structs "github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/proto/pbsubscribe"
|
||||
types "github.com/hashicorp/consul/types"
|
||||
)
|
||||
|
||||
var testTime = time.Date(2022, 4, 14, 10, 56, 00, 0, time.UTC)
|
||||
|
||||
var exampleState = &autopilot.State{
|
||||
Servers: map[raft.ServerID]*autopilot.ServerState{
|
||||
"792ae13c-d765-470b-852c-e073fdb6e849": {
|
||||
Health: autopilot.ServerHealth{
|
||||
Healthy: true,
|
||||
},
|
||||
State: autopilot.RaftLeader,
|
||||
Server: autopilot.Server{
|
||||
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
|
||||
Address: "198.18.0.2:8300",
|
||||
Version: "v1.12.0",
|
||||
NodeStatus: autopilot.NodeAlive,
|
||||
},
|
||||
},
|
||||
"65e79ff4-bbce-467b-a9d6-725c709fa985": {
|
||||
Health: autopilot.ServerHealth{
|
||||
Healthy: true,
|
||||
},
|
||||
State: autopilot.RaftVoter,
|
||||
Server: autopilot.Server{
|
||||
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
|
||||
Address: "198.18.0.3:8300",
|
||||
Version: "v1.12.0",
|
||||
NodeStatus: autopilot.NodeAlive,
|
||||
},
|
||||
},
|
||||
// this server is up according to Serf but is unhealthy
|
||||
// due to having an index that is behind
|
||||
"db11f0ac-0cbe-4215-80cc-b4e843f4df1e": {
|
||||
Health: autopilot.ServerHealth{
|
||||
Healthy: false,
|
||||
},
|
||||
State: autopilot.RaftVoter,
|
||||
Server: autopilot.Server{
|
||||
ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e",
|
||||
Address: "198.18.0.4:8300",
|
||||
Version: "v1.12.0",
|
||||
NodeStatus: autopilot.NodeAlive,
|
||||
},
|
||||
},
|
||||
// this server is up according to Serf but is unhealthy
|
||||
// due to having an index that is behind. It is a non-voter
|
||||
// and thus will be filtered out
|
||||
"4c48a154-8176-4e14-ba5d-20bf1f784a7e": {
|
||||
Health: autopilot.ServerHealth{
|
||||
Healthy: false,
|
||||
},
|
||||
State: autopilot.RaftNonVoter,
|
||||
Server: autopilot.Server{
|
||||
ID: "4c48a154-8176-4e14-ba5d-20bf1f784a7e",
|
||||
Address: "198.18.0.5:8300",
|
||||
Version: "v1.12.0",
|
||||
NodeStatus: autopilot.NodeAlive,
|
||||
},
|
||||
},
|
||||
// this is a voter that has died
|
||||
"7a22eec8-de85-43a6-a76e-00b427ef6627": {
|
||||
Health: autopilot.ServerHealth{
|
||||
Healthy: false,
|
||||
},
|
||||
State: autopilot.RaftVoter,
|
||||
Server: autopilot.Server{
|
||||
ID: "7a22eec8-de85-43a6-a76e-00b427ef6627",
|
||||
Address: "198.18.0.6",
|
||||
Version: "v1.12.0",
|
||||
NodeStatus: autopilot.NodeFailed,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestEventPayloadReadyServers_HasReadPermission(t *testing.T) {
|
||||
t.Run("no service:write", func(t *testing.T) {
|
||||
hasRead := EventPayloadReadyServers{}.HasReadPermission(acl.DenyAll())
|
||||
require.False(t, hasRead)
|
||||
})
|
||||
|
||||
t.Run("has service:write", func(t *testing.T) {
|
||||
policy, err := acl.NewPolicyFromSource(`
|
||||
service "foo" {
|
||||
policy = "write"
|
||||
}
|
||||
`, acl.SyntaxCurrent, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
authz, err := acl.NewPolicyAuthorizerWithDefaults(acl.DenyAll(), []*acl.Policy{policy}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
hasRead := EventPayloadReadyServers{}.HasReadPermission(authz)
|
||||
require.True(t, hasRead)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAutopilotStateToReadyServers(t *testing.T) {
|
||||
expected := EventPayloadReadyServers{
|
||||
{
|
||||
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
|
||||
Address: "198.18.0.2",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
|
||||
Address: "198.18.0.3",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e",
|
||||
Address: "198.18.0.4",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
}
|
||||
|
||||
r := ReadyServersEventPublisher{}
|
||||
|
||||
actual := r.autopilotStateToReadyServers(exampleState)
|
||||
require.ElementsMatch(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestAutopilotStateToReadyServersWithTaggedAddresses(t *testing.T) {
|
||||
expected := EventPayloadReadyServers{
|
||||
{
|
||||
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
|
||||
Address: "198.18.0.2",
|
||||
TaggedAddresses: map[string]string{"wan": "5.4.3.2"},
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
|
||||
Address: "198.18.0.3",
|
||||
TaggedAddresses: map[string]string{"wan": "1.2.3.4"},
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e",
|
||||
Address: "198.18.0.4",
|
||||
TaggedAddresses: map[string]string{"wan": "9.8.7.6"},
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
}
|
||||
|
||||
store := &MockStateStore{}
|
||||
t.Cleanup(func() { store.AssertExpectations(t) })
|
||||
store.On("GetNodeID",
|
||||
types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"),
|
||||
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||
structs.DefaultPeerKeyword,
|
||||
).Once().Return(
|
||||
uint64(0),
|
||||
&structs.Node{TaggedAddresses: map[string]string{"wan": "5.4.3.2"}},
|
||||
nil,
|
||||
)
|
||||
|
||||
store.On("GetNodeID",
|
||||
types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"),
|
||||
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||
structs.DefaultPeerKeyword,
|
||||
).Once().Return(
|
||||
uint64(0),
|
||||
&structs.Node{TaggedAddresses: map[string]string{"wan": "1.2.3.4"}},
|
||||
nil,
|
||||
)
|
||||
|
||||
store.On("GetNodeID",
|
||||
types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"),
|
||||
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||
structs.DefaultPeerKeyword,
|
||||
).Once().Return(
|
||||
uint64(0),
|
||||
&structs.Node{TaggedAddresses: map[string]string{"wan": "9.8.7.6"}},
|
||||
nil,
|
||||
)
|
||||
|
||||
r := NewReadyServersEventPublisher(Config{
|
||||
GetStore: func() StateStore { return store },
|
||||
})
|
||||
|
||||
actual := r.autopilotStateToReadyServers(exampleState)
|
||||
require.ElementsMatch(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestAutopilotReadyServersEvents(t *testing.T) {
|
||||
// we have already tested the ReadyServerInfo extraction within the
|
||||
// TestAutopilotStateToReadyServers test. Therefore this test is going
|
||||
// to focus only on the change detection.
|
||||
//
|
||||
// * - added server
|
||||
// * - removed server
|
||||
// * - server with address changed
|
||||
// * - upgraded server with version change
|
||||
|
||||
expectedServers := EventPayloadReadyServers{
|
||||
{
|
||||
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
|
||||
Address: "198.18.0.3",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
|
||||
Address: "198.18.0.2",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e",
|
||||
Address: "198.18.0.4",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
// The elements of this slice must already be sorted
|
||||
previous EventPayloadReadyServers
|
||||
changeDetected bool
|
||||
}
|
||||
|
||||
cases := map[string]testCase{
|
||||
"no-change": {
|
||||
previous: EventPayloadReadyServers{
|
||||
{
|
||||
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
|
||||
Address: "198.18.0.3",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
|
||||
Address: "198.18.0.2",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e",
|
||||
Address: "198.18.0.4",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
},
|
||||
changeDetected: false,
|
||||
},
|
||||
"server-added": {
|
||||
previous: EventPayloadReadyServers{
|
||||
{
|
||||
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
|
||||
Address: "198.18.0.3",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
|
||||
Address: "198.18.0.2",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
// server with id db11f0ac-0cbe-4215-80cc-b4e843f4df1e will be added.
|
||||
},
|
||||
changeDetected: true,
|
||||
},
|
||||
"server-removed": {
|
||||
previous: EventPayloadReadyServers{
|
||||
{
|
||||
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
|
||||
Address: "198.18.0.3",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
|
||||
Address: "198.18.0.2",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
// this server isn't present in the state and will be removed
|
||||
{
|
||||
ID: "7e3235de-8a75-4c8d-9ec3-847ca87d07e8",
|
||||
Address: "198.18.0.5",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e",
|
||||
Address: "198.18.0.4",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
},
|
||||
changeDetected: true,
|
||||
},
|
||||
"address-change": {
|
||||
previous: EventPayloadReadyServers{
|
||||
{
|
||||
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
|
||||
// this value is different from the state and should
|
||||
// cause an event to be generated
|
||||
Address: "198.18.0.9",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
|
||||
Address: "198.18.0.2",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e",
|
||||
Address: "198.18.0.4",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
},
|
||||
changeDetected: true,
|
||||
},
|
||||
"upgraded-version": {
|
||||
previous: EventPayloadReadyServers{
|
||||
{
|
||||
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
|
||||
Address: "198.18.0.3",
|
||||
// This is v1.12.0 in the state and therefore an
|
||||
// event should be generated
|
||||
Version: "v1.11.4",
|
||||
},
|
||||
{
|
||||
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
|
||||
Address: "198.18.0.2",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e",
|
||||
Address: "198.18.0.4",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
},
|
||||
changeDetected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tcase := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
r := ReadyServersEventPublisher{
|
||||
previous: tcase.previous,
|
||||
}
|
||||
events, changeDetected := r.readyServersEvents(exampleState)
|
||||
require.Equal(t, tcase.changeDetected, changeDetected, "servers: %+v", events)
|
||||
if tcase.changeDetected {
|
||||
require.Len(t, events, 1)
|
||||
require.Equal(t, EventTopicReadyServers, events[0].Topic)
|
||||
payload, ok := events[0].Payload.(EventPayloadReadyServers)
|
||||
require.True(t, ok)
|
||||
require.ElementsMatch(t, expectedServers, payload)
|
||||
} else {
|
||||
require.Empty(t, events)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutopilotPublishReadyServersEvents(t *testing.T) {
|
||||
t.Run("publish", func(t *testing.T) {
|
||||
pub := &MockPublisher{}
|
||||
pub.On("Publish", []stream.Event{
|
||||
{
|
||||
Topic: EventTopicReadyServers,
|
||||
Index: uint64(testTime.UnixMicro()),
|
||||
Payload: EventPayloadReadyServers{
|
||||
{
|
||||
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
|
||||
Address: "198.18.0.3",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
|
||||
Address: "198.18.0.2",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e",
|
||||
Address: "198.18.0.4",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
mtime := &mockTimeProvider{}
|
||||
mtime.On("Now").Return(testTime).Once()
|
||||
|
||||
t.Cleanup(func() {
|
||||
mtime.AssertExpectations(t)
|
||||
pub.AssertExpectations(t)
|
||||
})
|
||||
|
||||
r := NewReadyServersEventPublisher(Config{
|
||||
Publisher: pub,
|
||||
timeProvider: mtime,
|
||||
})
|
||||
|
||||
r.PublishReadyServersEvents(exampleState)
|
||||
})
|
||||
|
||||
t.Run("suppress", func(t *testing.T) {
|
||||
pub := &MockPublisher{}
|
||||
mtime := &mockTimeProvider{}
|
||||
|
||||
t.Cleanup(func() {
|
||||
mtime.AssertExpectations(t)
|
||||
pub.AssertExpectations(t)
|
||||
})
|
||||
|
||||
r := NewReadyServersEventPublisher(Config{
|
||||
Publisher: pub,
|
||||
timeProvider: mtime,
|
||||
})
|
||||
|
||||
r.previous = EventPayloadReadyServers{
|
||||
{
|
||||
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
|
||||
Address: "198.18.0.3",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
|
||||
Address: "198.18.0.2",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e",
|
||||
Address: "198.18.0.4",
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
}
|
||||
|
||||
r.PublishReadyServersEvents(exampleState)
|
||||
})
|
||||
}
|
||||
|
||||
type MockAppender struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockAppender) Append(events []stream.Event) {
|
||||
m.Called(events)
|
||||
}
|
||||
|
||||
func TestReadyServerEventsSnapshotHandler(t *testing.T) {
|
||||
buf := MockAppender{}
|
||||
buf.On("Append", []stream.Event{
|
||||
{
|
||||
Topic: EventTopicReadyServers,
|
||||
Index: 0,
|
||||
Payload: EventPayloadReadyServers{},
|
||||
},
|
||||
})
|
||||
buf.On("Append", []stream.Event{
|
||||
{
|
||||
Topic: EventTopicReadyServers,
|
||||
Index: 1649933760000000,
|
||||
Payload: EventPayloadReadyServers{
|
||||
{
|
||||
ID: "65e79ff4-bbce-467b-a9d6-725c709fa985",
|
||||
Address: "198.18.0.3",
|
||||
TaggedAddresses: map[string]string{"wan": "1.2.3.4"},
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "792ae13c-d765-470b-852c-e073fdb6e849",
|
||||
Address: "198.18.0.2",
|
||||
TaggedAddresses: map[string]string{"wan": "5.4.3.2"},
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
{
|
||||
ID: "db11f0ac-0cbe-4215-80cc-b4e843f4df1e",
|
||||
Address: "198.18.0.4",
|
||||
TaggedAddresses: map[string]string{"wan": "9.8.7.6"},
|
||||
Version: "v1.12.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
}).Once()
|
||||
|
||||
mtime := mockTimeProvider{}
|
||||
mtime.On("Now").Return(testTime).Once()
|
||||
|
||||
store := &MockStateStore{}
|
||||
t.Cleanup(func() { store.AssertExpectations(t) })
|
||||
store.On("GetNodeID",
|
||||
types.NodeID("792ae13c-d765-470b-852c-e073fdb6e849"),
|
||||
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||
structs.DefaultPeerKeyword,
|
||||
).Once().Return(
|
||||
uint64(0),
|
||||
&structs.Node{TaggedAddresses: map[string]string{"wan": "5.4.3.2"}},
|
||||
nil,
|
||||
)
|
||||
|
||||
store.On("GetNodeID",
|
||||
types.NodeID("65e79ff4-bbce-467b-a9d6-725c709fa985"),
|
||||
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||
structs.DefaultPeerKeyword,
|
||||
).Once().Return(
|
||||
uint64(0),
|
||||
&structs.Node{TaggedAddresses: map[string]string{"wan": "1.2.3.4"}},
|
||||
nil,
|
||||
)
|
||||
|
||||
store.On("GetNodeID",
|
||||
types.NodeID("db11f0ac-0cbe-4215-80cc-b4e843f4df1e"),
|
||||
structs.NodeEnterpriseMetaInDefaultPartition(),
|
||||
structs.DefaultPeerKeyword,
|
||||
).Once().Return(
|
||||
uint64(0),
|
||||
&structs.Node{TaggedAddresses: map[string]string{"wan": "9.8.7.6"}},
|
||||
nil,
|
||||
)
|
||||
|
||||
t.Cleanup(func() {
|
||||
buf.AssertExpectations(t)
|
||||
store.AssertExpectations(t)
|
||||
mtime.AssertExpectations(t)
|
||||
})
|
||||
|
||||
r := NewReadyServersEventPublisher(Config{
|
||||
GetStore: func() StateStore { return store },
|
||||
timeProvider: &mtime,
|
||||
})
|
||||
|
||||
req := stream.SubscribeRequest{
|
||||
Topic: EventTopicReadyServers,
|
||||
Subject: stream.SubjectNone,
|
||||
}
|
||||
|
||||
// get the first snapshot that should have the zero value event
|
||||
_, err := r.HandleSnapshot(req, &buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
// setup the value to be returned by the snapshot handler
|
||||
r.snapshot, _ = r.readyServersEvents(exampleState)
|
||||
|
||||
// now get the second snapshot which has actual servers
|
||||
_, err = r.HandleSnapshot(req, &buf)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
type fakePayload struct{}
|
||||
|
||||
func (e fakePayload) Subject() stream.Subject { return stream.SubjectNone }
|
||||
|
||||
func (e fakePayload) HasReadPermission(authz acl.Authorizer) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (e fakePayload) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event {
|
||||
panic("fakePayload does not implement ToSubscriptionEvent")
|
||||
}
|
||||
|
||||
func TestExtractEventPayload(t *testing.T) {
|
||||
t.Run("wrong-topic", func(t *testing.T) {
|
||||
payload, err := ExtractEventPayload(stream.NewCloseSubscriptionEvent([]string{"foo"}))
|
||||
require.Nil(t, payload)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "unexpected topic")
|
||||
})
|
||||
|
||||
t.Run("unexpected-payload", func(t *testing.T) {
|
||||
payload, err := ExtractEventPayload(stream.Event{
|
||||
Topic: EventTopicReadyServers,
|
||||
Payload: fakePayload{},
|
||||
})
|
||||
require.Nil(t, payload)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "unexpected payload type")
|
||||
})
|
||||
|
||||
t.Run("success", func(t *testing.T) {
|
||||
expected := EventPayloadReadyServers{
|
||||
{
|
||||
ID: "a7c340ae-ce17-47da-895c-af2509767b3d",
|
||||
Address: "198.18.0.1",
|
||||
Version: "1.2.3",
|
||||
},
|
||||
}
|
||||
actual, err := ExtractEventPayload(stream.Event{
|
||||
Topic: EventTopicReadyServers,
|
||||
Payload: expected,
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReadyServerInfo_Equal(t *testing.T) {
|
||||
base := func() *ReadyServerInfo {
|
||||
return &ReadyServerInfo{
|
||||
ID: "0356e5ae-ed6b-4024-b953-e1b6a8f0f81b",
|
||||
Version: "1.12.0",
|
||||
Address: "198.18.0.1",
|
||||
TaggedAddresses: map[string]string{
|
||||
"wan": "1.2.3.4",
|
||||
},
|
||||
}
|
||||
}
|
||||
type testCase struct {
|
||||
modify func(i *ReadyServerInfo)
|
||||
equal bool
|
||||
}
|
||||
|
||||
cases := map[string]testCase{
|
||||
"unmodified": {
|
||||
equal: true,
|
||||
},
|
||||
"id-mod": {
|
||||
modify: func(i *ReadyServerInfo) { i.ID = "30f8f451-e54b-4c7e-a533-b55dddb51be6" },
|
||||
},
|
||||
"version-mod": {
|
||||
modify: func(i *ReadyServerInfo) { i.Version = "1.12.1" },
|
||||
},
|
||||
"address-mod": {
|
||||
modify: func(i *ReadyServerInfo) { i.Address = "198.18.0.2" },
|
||||
},
|
||||
"tagged-addresses-added": {
|
||||
modify: func(i *ReadyServerInfo) { i.TaggedAddresses["wan_ipv4"] = "1.2.3.4" },
|
||||
},
|
||||
"tagged-addresses-mod": {
|
||||
modify: func(i *ReadyServerInfo) { i.TaggedAddresses["wan"] = "4.3.2.1" },
|
||||
},
|
||||
}
|
||||
|
||||
for name, tcase := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
original := base()
|
||||
modified := base()
|
||||
if tcase.modify != nil {
|
||||
tcase.modify(modified)
|
||||
}
|
||||
|
||||
require.Equal(t, tcase.equal, original.Equal(modified))
|
||||
|
||||
})
|
||||
}
|
||||
}
|
@ -133,7 +133,7 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error
|
||||
}
|
||||
|
||||
// Check the complete register request against the given ACL policy.
|
||||
_, ns, err := state.NodeServices(nil, args.Node, entMeta)
|
||||
_, ns, err := state.NodeServices(nil, args.Node, entMeta, args.PeerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Node lookup failed: %v", err)
|
||||
}
|
||||
@ -367,7 +367,7 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e
|
||||
|
||||
var ns *structs.NodeService
|
||||
if args.ServiceID != "" {
|
||||
_, ns, err = state.NodeService(args.Node, args.ServiceID, &args.EnterpriseMeta)
|
||||
_, ns, err = state.NodeService(args.Node, args.ServiceID, &args.EnterpriseMeta, args.PeerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Service lookup failed: %v", err)
|
||||
}
|
||||
@ -375,7 +375,7 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e
|
||||
|
||||
var nc *structs.HealthCheck
|
||||
if args.CheckID != "" {
|
||||
_, nc, err = state.NodeCheck(args.Node, args.CheckID, &args.EnterpriseMeta)
|
||||
_, nc, err = state.NodeCheck(args.Node, args.CheckID, &args.EnterpriseMeta, args.PeerName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Check lookup failed: %v", err)
|
||||
}
|
||||
@ -486,9 +486,9 @@ func (c *Catalog) ListNodes(args *structs.DCSpecificRequest, reply *structs.Inde
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
var err error
|
||||
if len(args.NodeMetaFilters) > 0 {
|
||||
reply.Index, reply.Nodes, err = state.NodesByMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta)
|
||||
reply.Index, reply.Nodes, err = state.NodesByMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName)
|
||||
} else {
|
||||
reply.Index, reply.Nodes, err = state.Nodes(ws, &args.EnterpriseMeta)
|
||||
reply.Index, reply.Nodes, err = state.Nodes(ws, &args.EnterpriseMeta, args.PeerName)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@ -546,9 +546,9 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
var err error
|
||||
if len(args.NodeMetaFilters) > 0 {
|
||||
reply.Index, reply.Services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta)
|
||||
reply.Index, reply.Services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName)
|
||||
} else {
|
||||
reply.Index, reply.Services, err = state.Services(ws, &args.EnterpriseMeta)
|
||||
reply.Index, reply.Services, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@ -584,7 +584,7 @@ func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.In
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
index, services, err := state.ServiceList(ws, &args.EnterpriseMeta)
|
||||
index, services, err := state.ServiceList(ws, &args.EnterpriseMeta, args.PeerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -611,13 +611,13 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru
|
||||
switch {
|
||||
case args.Connect:
|
||||
f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.ServiceNodes, error) {
|
||||
return s.ConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta)
|
||||
return s.ConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName)
|
||||
}
|
||||
|
||||
default:
|
||||
f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.ServiceNodes, error) {
|
||||
if args.ServiceAddress != "" {
|
||||
return s.ServiceAddressNodes(ws, args.ServiceAddress, &args.EnterpriseMeta)
|
||||
return s.ServiceAddressNodes(ws, args.ServiceAddress, &args.EnterpriseMeta, args.PeerName)
|
||||
}
|
||||
|
||||
if args.TagFilter {
|
||||
@ -630,10 +630,10 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru
|
||||
tags = []string{args.ServiceTag}
|
||||
}
|
||||
|
||||
return s.ServiceTagNodes(ws, args.ServiceName, tags, &args.EnterpriseMeta)
|
||||
return s.ServiceTagNodes(ws, args.ServiceName, tags, &args.EnterpriseMeta, args.PeerName)
|
||||
}
|
||||
|
||||
return s.ServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta)
|
||||
return s.ServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName)
|
||||
}
|
||||
}
|
||||
|
||||
@ -768,7 +768,7 @@ func (c *Catalog) NodeServices(args *structs.NodeSpecificRequest, reply *structs
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
index, services, err := state.NodeServices(ws, args.Node, &args.EnterpriseMeta)
|
||||
index, services, err := state.NodeServices(ws, args.Node, &args.EnterpriseMeta, args.PeerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -824,7 +824,7 @@ func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *stru
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
index, services, err := state.NodeServiceList(ws, args.Node, &args.EnterpriseMeta)
|
||||
index, services, err := state.NodeServiceList(ws, args.Node, &args.EnterpriseMeta, args.PeerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1650,6 +1650,7 @@ func TestCatalog_ListServices_Stale(t *testing.T) {
|
||||
c.PrimaryDatacenter = "dc1" // Enable ACLs!
|
||||
c.ACLsEnabled = true
|
||||
c.Bootstrap = false // Disable bootstrap
|
||||
c.RPCHoldTimeout = 10 * time.Millisecond
|
||||
})
|
||||
defer os.RemoveAll(dir2)
|
||||
defer s2.Shutdown()
|
||||
|
@ -291,20 +291,26 @@ TRY:
|
||||
}
|
||||
|
||||
// Move off to another server, and see if we can retry.
|
||||
c.logger.Error("RPC failed to server",
|
||||
"method", method,
|
||||
"server", server.Addr,
|
||||
"error", rpcErr,
|
||||
)
|
||||
metrics.IncrCounterWithLabels([]string{"client", "rpc", "failed"}, 1, []metrics.Label{{Name: "server", Value: server.Name}})
|
||||
manager.NotifyFailedServer(server)
|
||||
|
||||
// Use the zero value for RPCInfo if the request doesn't implement RPCInfo
|
||||
info, _ := args.(structs.RPCInfo)
|
||||
if retry := canRetry(info, rpcErr, firstCheck, c.config); !retry {
|
||||
c.logger.Error("RPC failed to server",
|
||||
"method", method,
|
||||
"server", server.Addr,
|
||||
"error", rpcErr,
|
||||
)
|
||||
metrics.IncrCounterWithLabels([]string{"client", "rpc", "failed"}, 1, []metrics.Label{{Name: "server", Value: server.Name}})
|
||||
return rpcErr
|
||||
}
|
||||
|
||||
c.logger.Warn("Retrying RPC to server",
|
||||
"method", method,
|
||||
"server", server.Addr,
|
||||
"error", rpcErr,
|
||||
)
|
||||
|
||||
// We can wait a bit and retry!
|
||||
jitter := lib.RandomStagger(c.config.RPCHoldTimeout / structs.JitterFraction)
|
||||
select {
|
||||
|
@ -48,6 +48,7 @@ func testClientConfig(t *testing.T) (string, *Config) {
|
||||
config.SerfLANConfig.MemberlistConfig.ProbeTimeout = 200 * time.Millisecond
|
||||
config.SerfLANConfig.MemberlistConfig.ProbeInterval = time.Second
|
||||
config.SerfLANConfig.MemberlistConfig.GossipInterval = 100 * time.Millisecond
|
||||
config.RPCHoldTimeout = 10 * time.Second
|
||||
return dir, config
|
||||
}
|
||||
|
||||
@ -72,7 +73,7 @@ func testClientWithConfigWithErr(t *testing.T, cb func(c *Config)) (string, *Cli
|
||||
}
|
||||
|
||||
// Apply config to copied fields because many tests only set the old
|
||||
//values.
|
||||
// values.
|
||||
config.ACLResolverSettings.ACLsEnabled = config.ACLsEnabled
|
||||
config.ACLResolverSettings.NodeName = config.NodeName
|
||||
config.ACLResolverSettings.Datacenter = config.Datacenter
|
||||
@ -509,7 +510,7 @@ func newDefaultDeps(t *testing.T, c *Config) Deps {
|
||||
|
||||
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||
Name: c.NodeName,
|
||||
Level: testutil.TestLogLevel,
|
||||
Level: hclog.Trace,
|
||||
Output: testutil.NewLogBuffer(t),
|
||||
})
|
||||
|
||||
@ -521,13 +522,16 @@ func newDefaultDeps(t *testing.T, c *Config) Deps {
|
||||
resolver.Register(builder)
|
||||
|
||||
connPool := &pool.ConnPool{
|
||||
Server: false,
|
||||
SrcAddr: c.RPCSrcAddr,
|
||||
Logger: logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true}),
|
||||
MaxTime: 2 * time.Minute,
|
||||
MaxStreams: 4,
|
||||
TLSConfigurator: tls,
|
||||
Datacenter: c.Datacenter,
|
||||
Server: false,
|
||||
SrcAddr: c.RPCSrcAddr,
|
||||
Logger: logger.StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true}),
|
||||
MaxTime: 2 * time.Minute,
|
||||
MaxStreams: 4,
|
||||
TLSConfigurator: tls,
|
||||
Datacenter: c.Datacenter,
|
||||
Timeout: c.RPCHoldTimeout,
|
||||
DefaultQueryTime: c.DefaultQueryTime,
|
||||
MaxQueryTime: c.MaxQueryTime,
|
||||
}
|
||||
|
||||
return Deps{
|
||||
@ -853,3 +857,67 @@ func TestClient_ShortReconnectTimeout(t *testing.T) {
|
||||
50*time.Millisecond,
|
||||
"The client node was not reaped within the alotted time")
|
||||
}
|
||||
|
||||
type waiter struct {
|
||||
duration time.Duration
|
||||
}
|
||||
|
||||
func (w *waiter) Wait(struct{}, *struct{}) error {
|
||||
time.Sleep(w.duration)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestClient_RPC_Timeout(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
_, s1 := testServerWithConfig(t)
|
||||
|
||||
_, c1 := testClientWithConfig(t, func(c *Config) {
|
||||
c.Datacenter = "dc1"
|
||||
c.NodeName = uniqueNodeName(t.Name())
|
||||
c.RPCHoldTimeout = 10 * time.Millisecond
|
||||
c.DefaultQueryTime = 100 * time.Millisecond
|
||||
c.MaxQueryTime = 200 * time.Millisecond
|
||||
})
|
||||
joinLAN(t, c1, s1)
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
var out struct{}
|
||||
if err := c1.RPC("Status.Ping", struct{}{}, &out); err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// waiter will sleep for 50ms
|
||||
require.NoError(t, s1.RegisterEndpoint("Wait", &waiter{duration: 50 * time.Millisecond}))
|
||||
|
||||
// Requests with QueryOptions have a default timeout of RPCHoldTimeout (10ms)
|
||||
// so we expect the RPC call to timeout.
|
||||
var out struct{}
|
||||
err := c1.RPC("Wait.Wait", &structs.NodeSpecificRequest{}, &out)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "rpc error making call: i/o deadline reached")
|
||||
|
||||
// Blocking requests have a longer timeout (100ms) so this should pass
|
||||
out = struct{}{}
|
||||
err = c1.RPC("Wait.Wait", &structs.NodeSpecificRequest{
|
||||
QueryOptions: structs.QueryOptions{
|
||||
MinQueryIndex: 1,
|
||||
},
|
||||
}, &out)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We pass in a custom MaxQueryTime (20ms) through QueryOptions which should fail
|
||||
out = struct{}{}
|
||||
err = c1.RPC("Wait.Wait", &structs.NodeSpecificRequest{
|
||||
QueryOptions: structs.QueryOptions{
|
||||
MinQueryIndex: 1,
|
||||
MaxQueryTime: 20 * time.Millisecond,
|
||||
},
|
||||
}, &out)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "rpc error making call: i/o deadline reached")
|
||||
}
|
||||
|
@ -604,6 +604,8 @@ type ReloadableConfig struct {
|
||||
RaftSnapshotThreshold int
|
||||
RaftSnapshotInterval time.Duration
|
||||
RaftTrailingLogs int
|
||||
HeartbeatTimeout time.Duration
|
||||
ElectionTimeout time.Duration
|
||||
}
|
||||
|
||||
type RaftBoltDBConfig struct {
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
)
|
||||
|
||||
var CommandsSummaries = []prometheus.SummaryDefinition{
|
||||
@ -93,6 +94,10 @@ var CommandsSummaries = []prometheus.SummaryDefinition{
|
||||
Name: []string{"fsm", "system_metadata"},
|
||||
Help: "Measures the time it takes to apply a system metadata operation to the FSM.",
|
||||
},
|
||||
{
|
||||
Name: []string{"fsm", "peering"},
|
||||
Help: "Measures the time it takes to apply a peering operation to the FSM.",
|
||||
},
|
||||
// TODO(kit): We generate the config-entry fsm summaries by reading off of the request. It is
|
||||
// possible to statically declare these when we know all of the names, but I didn't get to it
|
||||
// in this patch. Config-entries are known though and we should add these in the future.
|
||||
@ -131,6 +136,11 @@ func init() {
|
||||
registerCommand(structs.ACLAuthMethodDeleteRequestType, (*FSM).applyACLAuthMethodDeleteOperation)
|
||||
registerCommand(structs.FederationStateRequestType, (*FSM).applyFederationStateOperation)
|
||||
registerCommand(structs.SystemMetadataRequestType, (*FSM).applySystemMetadataOperation)
|
||||
registerCommand(structs.PeeringWriteType, (*FSM).applyPeeringWrite)
|
||||
registerCommand(structs.PeeringDeleteType, (*FSM).applyPeeringDelete)
|
||||
registerCommand(structs.PeeringTerminateByIDType, (*FSM).applyPeeringTerminate)
|
||||
registerCommand(structs.PeeringTrustBundleWriteType, (*FSM).applyPeeringTrustBundleWrite)
|
||||
registerCommand(structs.PeeringTrustBundleDeleteType, (*FSM).applyPeeringTrustBundleDelete)
|
||||
}
|
||||
|
||||
func (c *FSM) applyRegister(buf []byte, index uint64) interface{} {
|
||||
@ -159,17 +169,17 @@ func (c *FSM) applyDeregister(buf []byte, index uint64) interface{} {
|
||||
// here is also baked into vetDeregisterWithACL() in acl.go, so if you
|
||||
// make changes here, be sure to also adjust the code over there.
|
||||
if req.ServiceID != "" {
|
||||
if err := c.state.DeleteService(index, req.Node, req.ServiceID, &req.EnterpriseMeta); err != nil {
|
||||
if err := c.state.DeleteService(index, req.Node, req.ServiceID, &req.EnterpriseMeta, req.PeerName); err != nil {
|
||||
c.logger.Warn("DeleteNodeService failed", "error", err)
|
||||
return err
|
||||
}
|
||||
} else if req.CheckID != "" {
|
||||
if err := c.state.DeleteCheck(index, req.Node, req.CheckID, &req.EnterpriseMeta); err != nil {
|
||||
if err := c.state.DeleteCheck(index, req.Node, req.CheckID, &req.EnterpriseMeta, req.PeerName); err != nil {
|
||||
c.logger.Warn("DeleteNodeCheck failed", "error", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := c.state.DeleteNode(index, req.Node, &req.EnterpriseMeta); err != nil {
|
||||
if err := c.state.DeleteNode(index, req.Node, &req.EnterpriseMeta, req.PeerName); err != nil {
|
||||
c.logger.Warn("DeleteNode failed", "error", err)
|
||||
return err
|
||||
}
|
||||
@ -679,3 +689,73 @@ func (c *FSM) applySystemMetadataOperation(buf []byte, index uint64) interface{}
|
||||
return fmt.Errorf("invalid system metadata operation type: %v", req.Op)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *FSM) applyPeeringWrite(buf []byte, index uint64) interface{} {
|
||||
var req pbpeering.PeeringWriteRequest
|
||||
if err := structs.DecodeProto(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode peering write request: %v", err))
|
||||
}
|
||||
|
||||
defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: "write"}})
|
||||
|
||||
return c.state.PeeringWrite(index, req.Peering)
|
||||
}
|
||||
|
||||
// TODO(peering): replace with deferred deletion since this operation
|
||||
// should involve cleanup of data associated with the peering.
|
||||
func (c *FSM) applyPeeringDelete(buf []byte, index uint64) interface{} {
|
||||
var req pbpeering.PeeringDeleteRequest
|
||||
if err := structs.DecodeProto(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode peering delete request: %v", err))
|
||||
}
|
||||
|
||||
defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: "delete"}})
|
||||
|
||||
q := state.Query{
|
||||
Value: req.Name,
|
||||
EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition),
|
||||
}
|
||||
return c.state.PeeringDelete(index, q)
|
||||
}
|
||||
|
||||
func (c *FSM) applyPeeringTerminate(buf []byte, index uint64) interface{} {
|
||||
var req pbpeering.PeeringTerminateByIDRequest
|
||||
if err := structs.DecodeProto(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode peering delete request: %v", err))
|
||||
}
|
||||
|
||||
defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: "terminate"}})
|
||||
|
||||
return c.state.PeeringTerminateByID(index, req.ID)
|
||||
}
|
||||
|
||||
func (c *FSM) applyPeeringTrustBundleWrite(buf []byte, index uint64) interface{} {
|
||||
var req pbpeering.PeeringTrustBundleWriteRequest
|
||||
if err := structs.DecodeProto(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode peering trust bundle write request: %v", err))
|
||||
}
|
||||
|
||||
defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering_trust_bundle"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: "write"}})
|
||||
|
||||
return c.state.PeeringTrustBundleWrite(index, req.PeeringTrustBundle)
|
||||
}
|
||||
|
||||
func (c *FSM) applyPeeringTrustBundleDelete(buf []byte, index uint64) interface{} {
|
||||
var req pbpeering.PeeringTrustBundleDeleteRequest
|
||||
if err := structs.DecodeProto(buf, &req); err != nil {
|
||||
panic(fmt.Errorf("failed to decode peering trust bundle delete request: %v", err))
|
||||
}
|
||||
|
||||
defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering_trust_bundle"}, time.Now(),
|
||||
[]metrics.Label{{Name: "op", Value: "delete"}})
|
||||
|
||||
q := state.Query{
|
||||
Value: req.Name,
|
||||
EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(req.Partition),
|
||||
}
|
||||
return c.state.PeeringTrustBundleDelete(index, q)
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ func TestFSM_RegisterNode(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify we are registered
|
||||
_, node, err := fsm.state.GetNode("foo", nil)
|
||||
_, node, err := fsm.state.GetNode("foo", nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -81,7 +81,7 @@ func TestFSM_RegisterNode(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify service registered
|
||||
_, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition())
|
||||
_, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -128,7 +128,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify we are registered
|
||||
_, node, err := fsm.state.GetNode("foo", nil)
|
||||
_, node, err := fsm.state.GetNode("foo", nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -137,7 +137,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify service registered
|
||||
_, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition())
|
||||
_, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -146,7 +146,7 @@ func TestFSM_RegisterNode_Service(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify check
|
||||
_, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition())
|
||||
_, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -200,7 +200,7 @@ func TestFSM_DeregisterService(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify we are registered
|
||||
_, node, err := fsm.state.GetNode("foo", nil)
|
||||
_, node, err := fsm.state.GetNode("foo", nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -209,7 +209,7 @@ func TestFSM_DeregisterService(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify service not registered
|
||||
_, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition())
|
||||
_, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -263,7 +263,7 @@ func TestFSM_DeregisterCheck(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify we are registered
|
||||
_, node, err := fsm.state.GetNode("foo", nil)
|
||||
_, node, err := fsm.state.GetNode("foo", nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -272,7 +272,7 @@ func TestFSM_DeregisterCheck(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify check not registered
|
||||
_, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition())
|
||||
_, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -332,7 +332,7 @@ func TestFSM_DeregisterNode(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify we are not registered
|
||||
_, node, err := fsm.state.GetNode("foo", nil)
|
||||
_, node, err := fsm.state.GetNode("foo", nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -341,7 +341,7 @@ func TestFSM_DeregisterNode(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify service not registered
|
||||
_, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition())
|
||||
_, services, err := fsm.state.NodeServices(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -350,7 +350,7 @@ func TestFSM_DeregisterNode(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify checks not registered
|
||||
_, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition())
|
||||
_, checks, err := fsm.state.NodeChecks(nil, "foo", structs.DefaultEnterpriseMetaInDefaultPartition(), "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -1468,7 +1468,7 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) {
|
||||
|
||||
// Verify we are not registered
|
||||
for i := 0; i < 10; i++ {
|
||||
_, node, err := fsm.state.GetNode(fmt.Sprintf("foo%d", i), nil)
|
||||
_, node, err := fsm.state.GetNode(fmt.Sprintf("foo%d", i), nil, "")
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, node)
|
||||
}
|
||||
@ -1491,7 +1491,7 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) {
|
||||
|
||||
// Verify we are still not registered
|
||||
for i := 0; i < 10; i++ {
|
||||
_, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i), nil)
|
||||
_, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i), nil, "")
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, node)
|
||||
}
|
||||
@ -1515,19 +1515,19 @@ func TestFSM_Chunking_Lifecycle(t *testing.T) {
|
||||
|
||||
// Verify we are registered
|
||||
for i := 0; i < 10; i++ {
|
||||
_, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i), nil)
|
||||
_, node, err := fsm2.state.GetNode(fmt.Sprintf("foo%d", i), nil, "")
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, node)
|
||||
|
||||
// Verify service registered
|
||||
_, services, err := fsm2.state.NodeServices(nil, fmt.Sprintf("foo%d", i), structs.DefaultEnterpriseMetaInDefaultPartition())
|
||||
_, services, err := fsm2.state.NodeServices(nil, fmt.Sprintf("foo%d", i), structs.DefaultEnterpriseMetaInDefaultPartition(), "")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, services)
|
||||
_, ok := services.Services["db"]
|
||||
assert.True(t, ok)
|
||||
|
||||
// Verify check
|
||||
_, checks, err := fsm2.state.NodeChecks(nil, fmt.Sprintf("foo%d", i), nil)
|
||||
_, checks, err := fsm2.state.NodeChecks(nil, fmt.Sprintf("foo%d", i), nil, "")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, checks)
|
||||
assert.Equal(t, string(checks[0].CheckID), "db")
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -35,6 +36,8 @@ func init() {
|
||||
registerRestorer(structs.SystemMetadataRequestType, restoreSystemMetadata)
|
||||
registerRestorer(structs.ServiceVirtualIPRequestType, restoreServiceVirtualIP)
|
||||
registerRestorer(structs.FreeVirtualIPRequestType, restoreFreeVirtualIP)
|
||||
registerRestorer(structs.PeeringWriteType, restorePeering)
|
||||
registerRestorer(structs.PeeringTrustBundleWriteType, restorePeeringTrustBundle)
|
||||
}
|
||||
|
||||
func persistOSS(s *snapshot, sink raft.SnapshotSink, encoder *codec.Encoder) error {
|
||||
@ -86,6 +89,12 @@ func persistOSS(s *snapshot, sink raft.SnapshotSink, encoder *codec.Encoder) err
|
||||
if err := s.persistIndex(sink, encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.persistPeerings(sink, encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.persistPeeringTrustBundles(sink, encoder); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -112,6 +121,7 @@ func (s *snapshot) persistNodes(sink raft.SnapshotSink,
|
||||
NodeMeta: n.Meta,
|
||||
RaftIndex: n.RaftIndex,
|
||||
EnterpriseMeta: *nodeEntMeta,
|
||||
PeerName: n.PeerName,
|
||||
}
|
||||
|
||||
// Register the node itself
|
||||
@ -123,7 +133,7 @@ func (s *snapshot) persistNodes(sink raft.SnapshotSink,
|
||||
}
|
||||
|
||||
// Register each service this node has
|
||||
services, err := s.state.Services(n.Node, nodeEntMeta)
|
||||
services, err := s.state.Services(n.Node, nodeEntMeta, n.PeerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -139,7 +149,7 @@ func (s *snapshot) persistNodes(sink raft.SnapshotSink,
|
||||
|
||||
// Register each check this node has
|
||||
req.Service = nil
|
||||
checks, err := s.state.Checks(n.Node, nodeEntMeta)
|
||||
checks, err := s.state.Checks(n.Node, nodeEntMeta, n.PeerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -161,7 +171,6 @@ func (s *snapshot) persistNodes(sink raft.SnapshotSink,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO(partitions)
|
||||
for coord := coords.Next(); coord != nil; coord = coords.Next() {
|
||||
if _, err := sink.Write([]byte{byte(structs.CoordinateBatchUpdateType)}); err != nil {
|
||||
return err
|
||||
@ -547,6 +556,42 @@ func (s *snapshot) persistVirtualIPs(sink raft.SnapshotSink, encoder *codec.Enco
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *snapshot) persistPeerings(sink raft.SnapshotSink, encoder *codec.Encoder) error {
|
||||
peerings, err := s.state.Peerings()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for entry := peerings.Next(); entry != nil; entry = peerings.Next() {
|
||||
if _, err := sink.Write([]byte{byte(structs.PeeringWriteType)}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := encoder.Encode(entry.(*pbpeering.Peering)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *snapshot) persistPeeringTrustBundles(sink raft.SnapshotSink, encoder *codec.Encoder) error {
|
||||
ptbs, err := s.state.PeeringTrustBundles()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for entry := ptbs.Next(); entry != nil; entry = ptbs.Next() {
|
||||
if _, err := sink.Write([]byte{byte(structs.PeeringTrustBundleWriteType)}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := encoder.Encode(entry.(*pbpeering.PeeringTrustBundle)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func restoreRegistration(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error {
|
||||
var req structs.RegisterRequest
|
||||
if err := decoder.Decode(&req); err != nil {
|
||||
@ -849,3 +894,25 @@ func restoreFreeVirtualIP(header *SnapshotHeader, restore *state.Restore, decode
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func restorePeering(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error {
|
||||
var req pbpeering.Peering
|
||||
if err := decoder.Decode(&req); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := restore.Peering(&req); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func restorePeeringTrustBundle(header *SnapshotHeader, restore *state.Restore, decoder *codec.Decoder) error {
|
||||
var req pbpeering.PeeringTrustBundle
|
||||
if err := decoder.Decode(&req); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := restore.PeeringTrustBundle(&req); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
||||
@ -473,6 +474,18 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
|
||||
require.Equal(t, expect[i], sn.Service.Name)
|
||||
}
|
||||
|
||||
// Peerings
|
||||
require.NoError(t, fsm.state.PeeringWrite(31, &pbpeering.Peering{
|
||||
Name: "baz",
|
||||
}))
|
||||
|
||||
// Peering Trust Bundles
|
||||
require.NoError(t, fsm.state.PeeringTrustBundleWrite(32, &pbpeering.PeeringTrustBundle{
|
||||
TrustDomain: "qux.com",
|
||||
PeerName: "qux",
|
||||
RootPEMs: []string{"qux certificate bundle"},
|
||||
}))
|
||||
|
||||
// Snapshot
|
||||
snap, err := fsm.Snapshot()
|
||||
require.NoError(t, err)
|
||||
@ -528,7 +541,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
|
||||
require.NoError(t, fsm2.Restore(sink))
|
||||
|
||||
// Verify the contents
|
||||
_, nodes, err := fsm2.state.Nodes(nil, nil)
|
||||
_, nodes, err := fsm2.state.Nodes(nil, nil, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodes, 2, "incorect number of nodes: %v", nodes)
|
||||
|
||||
@ -556,7 +569,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
|
||||
require.Equal(t, uint64(1), nodes[1].CreateIndex)
|
||||
require.Equal(t, uint64(23), nodes[1].ModifyIndex)
|
||||
|
||||
_, fooSrv, err := fsm2.state.NodeServices(nil, "foo", nil)
|
||||
_, fooSrv, err := fsm2.state.NodeServices(nil, "foo", nil, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, fooSrv.Services, 4)
|
||||
require.Contains(t, fooSrv.Services["db"].Tags, "primary")
|
||||
@ -569,7 +582,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
|
||||
require.Equal(t, uint64(3), fooSrv.Services["web"].CreateIndex)
|
||||
require.Equal(t, uint64(3), fooSrv.Services["web"].ModifyIndex)
|
||||
|
||||
_, checks, err := fsm2.state.NodeChecks(nil, "foo", nil)
|
||||
_, checks, err := fsm2.state.NodeChecks(nil, "foo", nil, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, checks, 1)
|
||||
require.Equal(t, "foo", checks[0].Node)
|
||||
@ -768,6 +781,27 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
|
||||
require.Equal(t, expect[i], sn.Service.Name)
|
||||
}
|
||||
|
||||
// Verify peering is restored
|
||||
idx, prngRestored, err := fsm2.state.PeeringRead(nil, state.Query{
|
||||
Value: "baz",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(31), idx)
|
||||
require.NotNil(t, prngRestored)
|
||||
require.Equal(t, "baz", prngRestored.Name)
|
||||
|
||||
// Verify peering trust bundle is restored
|
||||
idx, ptbRestored, err := fsm2.state.PeeringTrustBundleRead(nil, state.Query{
|
||||
Value: "qux",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(32), idx)
|
||||
require.NotNil(t, ptbRestored)
|
||||
require.Equal(t, "qux.com", ptbRestored.TrustDomain)
|
||||
require.Equal(t, "qux", ptbRestored.PeerName)
|
||||
require.Len(t, ptbRestored.RootPEMs, 1)
|
||||
require.Equal(t, "qux certificate bundle", ptbRestored.RootPEMs[0])
|
||||
|
||||
// Snapshot
|
||||
snap, err = fsm2.Snapshot()
|
||||
require.NoError(t, err)
|
||||
@ -821,7 +855,7 @@ func TestFSM_BadRestore_OSS(t *testing.T) {
|
||||
require.Error(t, fsm.Restore(sink))
|
||||
|
||||
// Verify the contents didn't get corrupted.
|
||||
_, nodes, err := fsm.state.Nodes(nil, nil)
|
||||
_, nodes, err := fsm.state.Nodes(nil, nil, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, nodes, 1)
|
||||
require.Equal(t, "foo", nodes[0].Node)
|
||||
|
@ -2,16 +2,15 @@ package consul
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/grpc/public"
|
||||
"github.com/hashicorp/consul/proto-public/pbconnectca"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/proto-public/pbserverdiscovery"
|
||||
)
|
||||
|
||||
func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) {
|
||||
@ -19,8 +18,6 @@ func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
// The gRPC endpoint itself well-tested with mocks. This test checks we're
|
||||
// correctly wiring everything up in the server by:
|
||||
//
|
||||
@ -28,42 +25,24 @@ func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) {
|
||||
// * Making a request to a follower's public gRPC port.
|
||||
// * Ensuring that the request is correctly forwarded to the leader.
|
||||
// * Ensuring we get a valid certificate back (so it went through the CAManager).
|
||||
dir1, server1 := testServerWithConfig(t, func(c *Config) {
|
||||
server1, conn1 := testGRPCIntegrationServer(t, func(c *Config) {
|
||||
c.Bootstrap = false
|
||||
c.BootstrapExpect = 2
|
||||
})
|
||||
defer os.RemoveAll(dir1)
|
||||
defer server1.Shutdown()
|
||||
|
||||
dir2, server2 := testServerWithConfig(t, func(c *Config) {
|
||||
server2, conn2 := testGRPCIntegrationServer(t, func(c *Config) {
|
||||
c.Bootstrap = false
|
||||
})
|
||||
defer os.RemoveAll(dir2)
|
||||
defer server2.Shutdown()
|
||||
|
||||
joinLAN(t, server2, server1)
|
||||
|
||||
testrpc.WaitForLeader(t, server1.RPC, "dc1")
|
||||
waitForLeaderEstablishment(t, server1, server2)
|
||||
|
||||
var follower *Server
|
||||
if server1.IsLeader() {
|
||||
follower = server2
|
||||
} else {
|
||||
follower = server1
|
||||
conn := conn2
|
||||
if server2.IsLeader() {
|
||||
conn = conn1
|
||||
}
|
||||
|
||||
// publicGRPCServer is bound to a listener by the wrapping agent code, so we
|
||||
// need to do it ourselves here.
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
require.NoError(t, follower.publicGRPCServer.Serve(lis))
|
||||
}()
|
||||
t.Cleanup(follower.publicGRPCServer.Stop)
|
||||
|
||||
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
|
||||
require.NoError(t, err)
|
||||
|
||||
client := pbconnectca.NewConnectCAServiceClient(conn)
|
||||
|
||||
csr, _ := connect.TestCSR(t, &connect.SpiffeIDService{
|
||||
@ -73,8 +52,13 @@ func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) {
|
||||
Service: "foo",
|
||||
})
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
ctx = public.ContextWithToken(ctx, TestDefaultInitialManagementToken)
|
||||
|
||||
// This would fail if it wasn't forwarded to the leader.
|
||||
rsp, err := client.Sign(context.Background(), &pbconnectca.SignRequest{
|
||||
rsp, err := client.Sign(ctx, &pbconnectca.SignRequest{
|
||||
Csr: csr,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@ -82,3 +66,52 @@ func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) {
|
||||
_, err = connect.ParseCert(rsp.CertPem)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGRPCIntegration_ServerDiscovery_WatchServers(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
// The gRPC endpoint itself well-tested with mocks. This test checks we're
|
||||
// correctly wiring everything up in the server by:
|
||||
//
|
||||
// * Starting a server
|
||||
// * Initiating the gRPC stream
|
||||
// * Validating the snapshot
|
||||
// * Adding another server
|
||||
// * Validating another message is sent.
|
||||
|
||||
server1, conn := testGRPCIntegrationServer(t, func(c *Config) {
|
||||
c.Bootstrap = true
|
||||
c.BootstrapExpect = 1
|
||||
})
|
||||
waitForLeaderEstablishment(t, server1)
|
||||
|
||||
client := pbserverdiscovery.NewServerDiscoveryServiceClient(conn)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
ctx = public.ContextWithToken(ctx, TestDefaultInitialManagementToken)
|
||||
|
||||
serverStream, err := client.WatchServers(ctx, &pbserverdiscovery.WatchServersRequest{Wan: false})
|
||||
require.NoError(t, err)
|
||||
|
||||
rsp, err := serverStream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, rsp)
|
||||
require.Len(t, rsp.Servers, 1)
|
||||
|
||||
_, server2, _ := testACLServerWithConfig(t, func(c *Config) {
|
||||
c.Bootstrap = false
|
||||
}, false)
|
||||
|
||||
// join the new server to the leader
|
||||
joinLAN(t, server2, server1)
|
||||
|
||||
// now receive the event containing 2 servers
|
||||
rsp, err = serverStream.Recv()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, rsp)
|
||||
require.Len(t, rsp.Servers, 2)
|
||||
}
|
||||
|
@ -47,9 +47,9 @@ func (h *Health) ChecksInState(args *structs.ChecksInStateRequest,
|
||||
var checks structs.HealthChecks
|
||||
var err error
|
||||
if len(args.NodeMetaFilters) > 0 {
|
||||
index, checks, err = state.ChecksInStateByNodeMeta(ws, args.State, args.NodeMetaFilters, &args.EnterpriseMeta)
|
||||
index, checks, err = state.ChecksInStateByNodeMeta(ws, args.State, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName)
|
||||
} else {
|
||||
index, checks, err = state.ChecksInState(ws, args.State, &args.EnterpriseMeta)
|
||||
index, checks, err = state.ChecksInState(ws, args.State, &args.EnterpriseMeta, args.PeerName)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@ -98,7 +98,7 @@ func (h *Health) NodeChecks(args *structs.NodeSpecificRequest,
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
index, checks, err := state.NodeChecks(ws, args.Node, &args.EnterpriseMeta)
|
||||
index, checks, err := state.NodeChecks(ws, args.Node, &args.EnterpriseMeta, args.PeerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -157,9 +157,9 @@ func (h *Health) ServiceChecks(args *structs.ServiceSpecificRequest,
|
||||
var checks structs.HealthChecks
|
||||
var err error
|
||||
if len(args.NodeMetaFilters) > 0 {
|
||||
index, checks, err = state.ServiceChecksByNodeMeta(ws, args.ServiceName, args.NodeMetaFilters, &args.EnterpriseMeta)
|
||||
index, checks, err = state.ServiceChecksByNodeMeta(ws, args.ServiceName, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName)
|
||||
} else {
|
||||
index, checks, err = state.ServiceChecks(ws, args.ServiceName, &args.EnterpriseMeta)
|
||||
index, checks, err = state.ServiceChecks(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@ -304,7 +304,7 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc
|
||||
// can be used by the ServiceNodes endpoint.
|
||||
|
||||
func (h *Health) serviceNodesConnect(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) {
|
||||
return s.CheckConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta)
|
||||
return s.CheckConnectServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName)
|
||||
}
|
||||
|
||||
func (h *Health) serviceNodesIngress(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) {
|
||||
@ -317,11 +317,11 @@ func (h *Health) serviceNodesTagFilter(ws memdb.WatchSet, s *state.Store, args *
|
||||
// Agents < v1.3.0 populate the ServiceTag field. In this case,
|
||||
// use ServiceTag instead of the ServiceTags field.
|
||||
if args.ServiceTag != "" {
|
||||
return s.CheckServiceTagNodes(ws, args.ServiceName, []string{args.ServiceTag}, &args.EnterpriseMeta)
|
||||
return s.CheckServiceTagNodes(ws, args.ServiceName, []string{args.ServiceTag}, &args.EnterpriseMeta, args.PeerName)
|
||||
}
|
||||
return s.CheckServiceTagNodes(ws, args.ServiceName, args.ServiceTags, &args.EnterpriseMeta)
|
||||
return s.CheckServiceTagNodes(ws, args.ServiceName, args.ServiceTags, &args.EnterpriseMeta, args.PeerName)
|
||||
}
|
||||
|
||||
func (h *Health) serviceNodesDefault(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) {
|
||||
return s.CheckServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta)
|
||||
return s.CheckServiceNodes(ws, args.ServiceName, &args.EnterpriseMeta, args.PeerName)
|
||||
}
|
||||
|
@ -13,7 +13,6 @@ import (
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/types"
|
||||
@ -558,124 +557,109 @@ func TestHealth_ServiceNodes(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
dir1, s1 := testServer(t)
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
_, s1 := testServer(t)
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
waitForLeaderEstablishment(t, s1)
|
||||
|
||||
arg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"primary"},
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
var out struct{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
testingPeerNames := []string{"", "my-peer"}
|
||||
|
||||
arg = structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "bar",
|
||||
Address: "127.0.0.2",
|
||||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"replica"},
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
Status: api.HealthWarning,
|
||||
ServiceID: "db",
|
||||
},
|
||||
}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
var out2 structs.IndexedCheckServiceNodes
|
||||
req := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "db",
|
||||
ServiceTags: []string{"primary"},
|
||||
TagFilter: false,
|
||||
}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
nodes := out2.Nodes
|
||||
if len(nodes) != 2 {
|
||||
t.Fatalf("Bad: %v", nodes)
|
||||
}
|
||||
if nodes[0].Node.Node != "bar" {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
if nodes[1].Node.Node != "foo" {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
}
|
||||
if !stringslice.Contains(nodes[0].Service.Tags, "replica") {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
if !stringslice.Contains(nodes[1].Service.Tags, "primary") {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
}
|
||||
if nodes[0].Checks[0].Status != api.HealthWarning {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
if nodes[1].Checks[0].Status != api.HealthPassing {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
}
|
||||
|
||||
// Same should still work for <1.3 RPCs with singular tags
|
||||
// DEPRECATED (singular-service-tag) - remove this when backwards RPC compat
|
||||
// with 1.2.x is not required.
|
||||
{
|
||||
var out2 structs.IndexedCheckServiceNodes
|
||||
req := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "db",
|
||||
ServiceTag: "primary",
|
||||
TagFilter: false,
|
||||
}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
// TODO(peering): will have to seed this data differently in the future
|
||||
for _, peerName := range testingPeerNames {
|
||||
arg := structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
PeerName: peerName,
|
||||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"primary"},
|
||||
PeerName: peerName,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
Status: api.HealthPassing,
|
||||
ServiceID: "db",
|
||||
PeerName: peerName,
|
||||
},
|
||||
}
|
||||
var out struct{}
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
||||
|
||||
arg = structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "bar",
|
||||
Address: "127.0.0.2",
|
||||
PeerName: peerName,
|
||||
Service: &structs.NodeService{
|
||||
ID: "db",
|
||||
Service: "db",
|
||||
Tags: []string{"replica"},
|
||||
PeerName: peerName,
|
||||
},
|
||||
Check: &structs.HealthCheck{
|
||||
Name: "db connect",
|
||||
Status: api.HealthWarning,
|
||||
ServiceID: "db",
|
||||
PeerName: peerName,
|
||||
},
|
||||
}
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
||||
}
|
||||
|
||||
verify := func(t *testing.T, out2 structs.IndexedCheckServiceNodes, peerName string) {
|
||||
nodes := out2.Nodes
|
||||
if len(nodes) != 2 {
|
||||
t.Fatalf("Bad: %v", nodes)
|
||||
}
|
||||
if nodes[0].Node.Node != "bar" {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
if nodes[1].Node.Node != "foo" {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
}
|
||||
if !stringslice.Contains(nodes[0].Service.Tags, "replica") {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
if !stringslice.Contains(nodes[1].Service.Tags, "primary") {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
}
|
||||
if nodes[0].Checks[0].Status != api.HealthWarning {
|
||||
t.Fatalf("Bad: %v", nodes[0])
|
||||
}
|
||||
if nodes[1].Checks[0].Status != api.HealthPassing {
|
||||
t.Fatalf("Bad: %v", nodes[1])
|
||||
require.Len(t, nodes, 2)
|
||||
require.Equal(t, peerName, nodes[0].Node.PeerName)
|
||||
require.Equal(t, peerName, nodes[1].Node.PeerName)
|
||||
require.Equal(t, "bar", nodes[0].Node.Node)
|
||||
require.Equal(t, "foo", nodes[1].Node.Node)
|
||||
require.Equal(t, peerName, nodes[0].Service.PeerName)
|
||||
require.Equal(t, peerName, nodes[1].Service.PeerName)
|
||||
require.Contains(t, nodes[0].Service.Tags, "replica")
|
||||
require.Contains(t, nodes[1].Service.Tags, "primary")
|
||||
require.Equal(t, peerName, nodes[0].Checks[0].PeerName)
|
||||
require.Equal(t, peerName, nodes[1].Checks[0].PeerName)
|
||||
require.Equal(t, api.HealthWarning, nodes[0].Checks[0].Status)
|
||||
require.Equal(t, api.HealthPassing, nodes[1].Checks[0].Status)
|
||||
}
|
||||
|
||||
for _, peerName := range testingPeerNames {
|
||||
testName := "peer named " + peerName
|
||||
if peerName == "" {
|
||||
testName = "local peer"
|
||||
}
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
t.Run("with service tags", func(t *testing.T) {
|
||||
var out2 structs.IndexedCheckServiceNodes
|
||||
req := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "db",
|
||||
ServiceTags: []string{"primary"},
|
||||
TagFilter: false,
|
||||
PeerName: peerName,
|
||||
}
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2))
|
||||
verify(t, out2, peerName)
|
||||
})
|
||||
|
||||
// Same should still work for <1.3 RPCs with singular tags
|
||||
// DEPRECATED (singular-service-tag) - remove this when backwards RPC compat
|
||||
// with 1.2.x is not required.
|
||||
t.Run("with legacy service tag", func(t *testing.T) {
|
||||
var out2 structs.IndexedCheckServiceNodes
|
||||
req := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "db",
|
||||
ServiceTag: "primary",
|
||||
TagFilter: false,
|
||||
PeerName: peerName,
|
||||
}
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &out2))
|
||||
verify(t, out2, peerName)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,7 @@ func (m *Internal) NodeInfo(args *structs.NodeSpecificRequest,
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
index, dump, err := state.NodeInfo(ws, args.Node, &args.EnterpriseMeta)
|
||||
index, dump, err := state.NodeInfo(ws, args.Node, &args.EnterpriseMeta, args.PeerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -69,7 +69,7 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest,
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
index, dump, err := state.NodeDump(ws, &args.EnterpriseMeta)
|
||||
index, dump, err := state.NodeDump(ws, &args.EnterpriseMeta, args.PeerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -112,7 +112,7 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
// Get, store, and filter nodes
|
||||
maxIdx, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta)
|
||||
maxIdx, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, args.PeerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -314,7 +314,7 @@ func (m *Internal) GatewayServiceDump(args *structs.ServiceSpecificRequest, repl
|
||||
// Loop over the gateway <-> serviceName mappings and fetch all service instances for each
|
||||
var result structs.ServiceDump
|
||||
for _, gs := range gatewayServices {
|
||||
idx, instances, err := state.CheckServiceNodes(ws, gs.Service.Name, &gs.Service.EnterpriseMeta)
|
||||
idx, instances, err := state.CheckServiceNodes(ws, gs.Service.Name, &gs.Service.EnterpriseMeta, args.PeerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ func TestHealthCheckRace(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify the index
|
||||
idx, out1, err := state.CheckServiceNodes(nil, "db", nil)
|
||||
idx, out1, err := state.CheckServiceNodes(nil, "db", nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -85,7 +85,7 @@ func TestHealthCheckRace(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify the index changed
|
||||
idx, out2, err := state.CheckServiceNodes(nil, "db", nil)
|
||||
idx, out2, err := state.CheckServiceNodes(nil, "db", nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -305,6 +305,8 @@ func (s *Server) establishLeadership(ctx context.Context) error {
|
||||
|
||||
s.startFederationStateAntiEntropy(ctx)
|
||||
|
||||
s.startPeeringStreamSync(ctx)
|
||||
|
||||
if err := s.startConnectLeader(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -342,6 +344,8 @@ func (s *Server) revokeLeadership() {
|
||||
|
||||
s.stopACLReplication()
|
||||
|
||||
s.stopPeeringStreamSync()
|
||||
|
||||
s.stopConnectLeader()
|
||||
|
||||
s.stopACLTokenReaping()
|
||||
@ -887,7 +891,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *acl.Ent
|
||||
}
|
||||
|
||||
state := s.fsm.State()
|
||||
_, checks, err := state.ChecksInState(nil, api.HealthAny, nodeEntMeta)
|
||||
_, checks, err := state.ChecksInState(nil, api.HealthAny, nodeEntMeta, structs.DefaultPeerKeyword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -903,7 +907,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *acl.Ent
|
||||
}
|
||||
|
||||
// Get the node services, look for ConsulServiceID
|
||||
_, services, err := state.NodeServices(nil, check.Node, nodeEntMeta)
|
||||
_, services, err := state.NodeServices(nil, check.Node, nodeEntMeta, structs.DefaultPeerKeyword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -914,7 +918,7 @@ func (s *Server) reconcileReaped(known map[string]struct{}, nodeEntMeta *acl.Ent
|
||||
CHECKS:
|
||||
for _, service := range services.Services {
|
||||
if service.ID == structs.ConsulServiceID {
|
||||
_, node, err := state.GetNode(check.Node, nodeEntMeta)
|
||||
_, node, err := state.GetNode(check.Node, nodeEntMeta, check.PeerName)
|
||||
if err != nil {
|
||||
s.logger.Error("Unable to look up node with name", "name", check.Node, "error", err)
|
||||
continue CHECKS
|
||||
@ -1051,7 +1055,7 @@ func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *acl.Enterpri
|
||||
|
||||
// Check if the node exists
|
||||
state := s.fsm.State()
|
||||
_, node, err := state.GetNode(member.Name, nodeEntMeta)
|
||||
_, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1059,7 +1063,7 @@ func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *acl.Enterpri
|
||||
// Check if the associated service is available
|
||||
if service != nil {
|
||||
match := false
|
||||
_, services, err := state.NodeServices(nil, member.Name, nodeEntMeta)
|
||||
_, services, err := state.NodeServices(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1077,7 +1081,7 @@ func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *acl.Enterpri
|
||||
}
|
||||
|
||||
// Check if the serfCheck is in the passing state
|
||||
_, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta)
|
||||
_, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1127,7 +1131,7 @@ func (s *Server) handleFailedMember(member serf.Member, nodeEntMeta *acl.Enterpr
|
||||
|
||||
// Check if the node exists
|
||||
state := s.fsm.State()
|
||||
_, node, err := state.GetNode(member.Name, nodeEntMeta)
|
||||
_, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1142,7 +1146,7 @@ func (s *Server) handleFailedMember(member serf.Member, nodeEntMeta *acl.Enterpr
|
||||
|
||||
if node.Address == member.Addr.String() {
|
||||
// Check if the serfCheck is in the critical state
|
||||
_, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta)
|
||||
_, checks, err := state.NodeChecks(nil, member.Name, nodeEntMeta, structs.DefaultPeerKeyword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1220,7 +1224,7 @@ func (s *Server) handleDeregisterMember(reason string, member serf.Member, nodeE
|
||||
|
||||
// Check if the node does not exist
|
||||
state := s.fsm.State()
|
||||
_, node, err := state.GetNode(member.Name, nodeEntMeta)
|
||||
_, node, err := state.GetNode(member.Name, nodeEntMeta, structs.DefaultPeerKeyword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ func newCARoot(pemValue, provider, clusterID string) (*structs.CARoot, error) {
|
||||
}
|
||||
return &structs.CARoot{
|
||||
ID: connect.CalculateCertFingerprint(primaryCert.Raw),
|
||||
Name: fmt.Sprintf("%s CA Primary Cert", strings.Title(provider)),
|
||||
Name: fmt.Sprintf("%s CA Primary Cert", providerPrettyName(provider)),
|
||||
SerialNumber: primaryCert.SerialNumber.Uint64(),
|
||||
SigningKeyID: connect.EncodeSigningKeyID(primaryCert.SubjectKeyId),
|
||||
ExternalTrustDomain: clusterID,
|
||||
@ -1581,3 +1581,18 @@ func (c *CAManager) isIntermediateUsedToSignLeaf() bool {
|
||||
provider, _ := c.getCAProvider()
|
||||
return primaryUsesIntermediate(provider)
|
||||
}
|
||||
|
||||
func providerPrettyName(provider string) string {
|
||||
switch provider {
|
||||
case "consul":
|
||||
return "Consul"
|
||||
case "vault":
|
||||
return "Vault"
|
||||
case "aws-pca":
|
||||
return "Aws-Pca"
|
||||
case "provider-name":
|
||||
return "Provider-Name"
|
||||
default:
|
||||
return provider
|
||||
}
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ func (s *Server) fetchFederationStateAntiEntropyDetails(
|
||||
|
||||
// Fetch our current list of all mesh gateways.
|
||||
entMeta := structs.WildcardEnterpriseMetaInDefaultPartition()
|
||||
idx2, raw, err := state.ServiceDump(ws, structs.ServiceKindMeshGateway, true, entMeta)
|
||||
idx2, raw, err := state.ServiceDump(ws, structs.ServiceKindMeshGateway, true, entMeta, structs.DefaultPeerKeyword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
244
agent/consul/leader_peering.go
Normal file
244
agent/consul/leader_peering.go
Normal file
@ -0,0 +1,244 @@
|
||||
package consul
|
||||
|
||||
import (
|
||||
"container/ring"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
"github.com/hashicorp/consul/agent/pool"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
)
|
||||
|
||||
func (s *Server) startPeeringStreamSync(ctx context.Context) {
|
||||
s.leaderRoutineManager.Start(ctx, peeringStreamsRoutineName, s.runPeeringSync)
|
||||
}
|
||||
|
||||
func (s *Server) runPeeringSync(ctx context.Context) error {
|
||||
logger := s.logger.Named("peering-syncer")
|
||||
cancelFns := make(map[string]context.CancelFunc)
|
||||
|
||||
retryLoopBackoff(ctx, func() error {
|
||||
if err := s.syncPeeringsAndBlock(ctx, logger, cancelFns); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
}, func(err error) {
|
||||
s.logger.Error("error syncing peering streams from state store", "error", err)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) stopPeeringStreamSync() {
|
||||
// will be a no-op when not started
|
||||
s.leaderRoutineManager.Stop(peeringStreamsRoutineName)
|
||||
}
|
||||
|
||||
// syncPeeringsAndBlock is a long-running goroutine that is responsible for watching
|
||||
// changes to peerings in the state store and managing streams to those peers.
|
||||
func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger, cancelFns map[string]context.CancelFunc) error {
|
||||
state := s.fsm.State()
|
||||
|
||||
// Pull the state store contents and set up to block for changes.
|
||||
ws := memdb.NewWatchSet()
|
||||
ws.Add(state.AbandonCh())
|
||||
ws.Add(ctx.Done())
|
||||
|
||||
_, peers, err := state.PeeringList(ws, *structs.NodeEnterpriseMetaInPartition(structs.WildcardSpecifier))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(peering) Adjust this debug info.
|
||||
// Generate a UUID to trace different passes through this function.
|
||||
seq, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
s.logger.Debug("failed to generate sequence uuid while syncing peerings")
|
||||
}
|
||||
|
||||
logger.Trace("syncing new list of peers", "num_peers", len(peers), "sequence_id", seq)
|
||||
|
||||
// Stored tracks the unique set of peers that should be dialed.
|
||||
// It is used to reconcile the list of active streams.
|
||||
stored := make(map[string]struct{})
|
||||
|
||||
var merr *multierror.Error
|
||||
|
||||
// Create connections and streams to peers in the state store that do not have an active stream.
|
||||
for _, peer := range peers {
|
||||
logger.Trace("evaluating stored peer", "peer", peer.Name, "should_dial", peer.ShouldDial(), "sequence_id", seq)
|
||||
|
||||
if !peer.ShouldDial() {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO(peering) Account for deleted peers that are still in the state store
|
||||
stored[peer.ID] = struct{}{}
|
||||
|
||||
status, found := s.peeringService.StreamStatus(peer.ID)
|
||||
|
||||
// TODO(peering): If there is new peering data and a connected stream, should we tear down the stream?
|
||||
// If the data in the updated token is bad, the user wouldn't know until the old servers/certs become invalid.
|
||||
// Alternatively we could do a basic Ping from the initiate peering endpoint to avoid dealing with that here.
|
||||
if found && status.Connected {
|
||||
// Nothing to do when we already have an active stream to the peer.
|
||||
continue
|
||||
}
|
||||
logger.Trace("ensuring stream to peer", "peer_id", peer.ID, "sequence_id", seq)
|
||||
|
||||
if cancel, ok := cancelFns[peer.ID]; ok {
|
||||
// If the peer is known but we're not connected, clean up the retry-er and start over.
|
||||
// There may be new data in the state store that would enable us to get out of an error state.
|
||||
logger.Trace("cancelling context to re-establish stream", "peer_id", peer.ID, "sequence_id", seq)
|
||||
cancel()
|
||||
}
|
||||
|
||||
if err := s.establishStream(ctx, logger, peer, cancelFns); err != nil {
|
||||
// TODO(peering): These errors should be reported in the peer status, otherwise they're only in the logs.
|
||||
// Lockable status isn't available here though. Could report it via the peering.Service?
|
||||
logger.Error("error establishing peering stream", "peer_id", peer.ID, "error", err)
|
||||
merr = multierror.Append(merr, err)
|
||||
|
||||
// Continue on errors to avoid one bad peering from blocking the establishment and cleanup of others.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
logger.Trace("checking connected streams", "streams", s.peeringService.ConnectedStreams(), "sequence_id", seq)
|
||||
|
||||
// Clean up active streams of peerings that were deleted from the state store.
|
||||
// TODO(peering): This is going to trigger shutting down peerings we generated a token for. Is that OK?
|
||||
for stream, doneCh := range s.peeringService.ConnectedStreams() {
|
||||
if _, ok := stored[stream]; ok {
|
||||
// Active stream is in the state store, nothing to do.
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case <-doneCh:
|
||||
// channel is closed, do nothing to avoid a panic
|
||||
default:
|
||||
logger.Trace("tearing down stream for deleted peer", "peer_id", stream, "sequence_id", seq)
|
||||
close(doneCh)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Trace("blocking for changes", "sequence_id", seq)
|
||||
|
||||
// Block for any changes to the state store.
|
||||
ws.WatchCtx(ctx)
|
||||
|
||||
logger.Trace("unblocked", "sequence_id", seq)
|
||||
return merr.ErrorOrNil()
|
||||
}
|
||||
|
||||
func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer *pbpeering.Peering, cancelFns map[string]context.CancelFunc) error {
|
||||
tlsOption := grpc.WithInsecure()
|
||||
if len(peer.PeerCAPems) > 0 {
|
||||
var haveCerts bool
|
||||
pool := x509.NewCertPool()
|
||||
for _, pem := range peer.PeerCAPems {
|
||||
if !pool.AppendCertsFromPEM([]byte(pem)) {
|
||||
return fmt.Errorf("failed to parse PEM %s", pem)
|
||||
}
|
||||
if len(pem) > 0 {
|
||||
haveCerts = true
|
||||
}
|
||||
}
|
||||
if !haveCerts {
|
||||
return fmt.Errorf("failed to build cert pool from peer CA pems")
|
||||
}
|
||||
cfg := tls.Config{
|
||||
ServerName: peer.PeerServerName,
|
||||
RootCAs: pool,
|
||||
}
|
||||
tlsOption = grpc.WithTransportCredentials(credentials.NewTLS(&cfg))
|
||||
}
|
||||
|
||||
// Create a ring buffer to cycle through peer addresses in the retry loop below.
|
||||
buffer := ring.New(len(peer.PeerServerAddresses))
|
||||
for _, addr := range peer.PeerServerAddresses {
|
||||
buffer.Value = addr
|
||||
buffer = buffer.Next()
|
||||
}
|
||||
|
||||
logger.Trace("establishing stream to peer", "peer_id", peer.ID)
|
||||
|
||||
retryCtx, cancel := context.WithCancel(ctx)
|
||||
cancelFns[peer.ID] = cancel
|
||||
|
||||
// Establish a stream-specific retry so that retrying stream/conn errors isn't dependent on state store changes.
|
||||
go retryLoopBackoff(retryCtx, func() error {
|
||||
// Try a new address on each iteration by advancing the ring buffer on errors.
|
||||
defer func() {
|
||||
buffer = buffer.Next()
|
||||
}()
|
||||
addr, ok := buffer.Value.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("peer server address type %T is not a string", buffer.Value)
|
||||
}
|
||||
|
||||
logger.Trace("dialing peer", "peer_id", peer.ID, "addr", addr)
|
||||
conn, err := grpc.DialContext(retryCtx, addr,
|
||||
grpc.WithContextDialer(newPeerDialer(addr)),
|
||||
grpc.WithBlock(),
|
||||
tlsOption,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to dial: %w", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
client := pbpeering.NewPeeringServiceClient(conn)
|
||||
stream, err := client.StreamResources(retryCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.peeringService.HandleStream(peer.ID, peer.PeerID, stream)
|
||||
if err == nil {
|
||||
// This will cancel the retry-er context, letting us break out of this loop when we want to shut down the stream.
|
||||
cancel()
|
||||
}
|
||||
return err
|
||||
|
||||
}, func(err error) {
|
||||
// TODO(peering): These errors should be reported in the peer status, otherwise they're only in the logs.
|
||||
// Lockable status isn't available here though. Could report it via the peering.Service?
|
||||
logger.Error("error managing peering stream", "peer_id", peer.ID, "error", err)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newPeerDialer(peerAddr string) func(context.Context, string) (net.Conn, error) {
|
||||
return func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
d := net.Dialer{}
|
||||
conn, err := d.DialContext(ctx, "tcp", peerAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(peering): This is going to need to be revisited. This type uses the TLS settings configured on the agent, but
|
||||
// for peering we never want mutual TLS because the client peer doesn't share its CA cert.
|
||||
_, err = conn.Write([]byte{byte(pool.RPCGRPC)})
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
}
|
197
agent/consul/leader_peering_test.go
Normal file
197
agent/consul/leader_peering_test.go
Normal file
@ -0,0 +1,197 @@
|
||||
package consul
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
// TODO(peering): Configure with TLS
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "s1.dc1"
|
||||
c.Datacenter = "dc1"
|
||||
c.TLSConfig.Domain = "consul"
|
||||
})
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create a peering by generating a token
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(),
|
||||
grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())),
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithBlock())
|
||||
require.NoError(t, err)
|
||||
defer conn.Close()
|
||||
|
||||
peeringClient := pbpeering.NewPeeringServiceClient(conn)
|
||||
|
||||
req := pbpeering.GenerateTokenRequest{
|
||||
PeerName: "my-peer-s2",
|
||||
}
|
||||
resp, err := peeringClient.GenerateToken(ctx, &req)
|
||||
require.NoError(t, err)
|
||||
|
||||
tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
var token structs.PeeringToken
|
||||
require.NoError(t, json.Unmarshal(tokenJSON, &token))
|
||||
|
||||
// S1 should not have a stream tracked for dc2 because s1 generated a token for baz, and therefore needs to wait to be dialed.
|
||||
time.Sleep(1 * time.Second)
|
||||
_, found := s1.peeringService.StreamStatus(token.PeerID)
|
||||
require.False(t, found)
|
||||
|
||||
// Bring up s2 and store s1's token so that it attempts to dial.
|
||||
_, s2 := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "s2.dc2"
|
||||
c.Datacenter = "dc2"
|
||||
c.PrimaryDatacenter = "dc2"
|
||||
})
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
||||
|
||||
// Simulate a peering initiation event by writing a peering with data from a peering token.
|
||||
// Eventually the leader in dc2 should dial and connect to the leader in dc1.
|
||||
p := &pbpeering.Peering{
|
||||
Name: "my-peer-s1",
|
||||
PeerID: token.PeerID,
|
||||
PeerCAPems: token.CA,
|
||||
PeerServerName: token.ServerName,
|
||||
PeerServerAddresses: token.ServerAddresses,
|
||||
}
|
||||
require.True(t, p.ShouldDial())
|
||||
|
||||
// We maintain a pointer to the peering on the write so that we can get the ID without needing to re-query the state store.
|
||||
require.NoError(t, s2.fsm.State().PeeringWrite(1000, p))
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
status, found := s2.peeringService.StreamStatus(p.ID)
|
||||
require.True(r, found)
|
||||
require.True(r, status.Connected)
|
||||
})
|
||||
|
||||
// Delete the peering to trigger the termination sequence
|
||||
require.NoError(t, s2.fsm.State().PeeringDelete(2000, state.Query{
|
||||
Value: "my-peer-s1",
|
||||
}))
|
||||
s2.logger.Trace("deleted peering for my-peer-s1")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, found := s2.peeringService.StreamStatus(p.ID)
|
||||
require.False(r, found)
|
||||
})
|
||||
|
||||
// s1 should have also marked the peering as terminated.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, peering, err := s1.fsm.State().PeeringRead(nil, state.Query{
|
||||
Value: "my-peer-s2",
|
||||
})
|
||||
require.NoError(r, err)
|
||||
require.Equal(r, pbpeering.PeeringState_TERMINATED, peering.State)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
// TODO(peering): Configure with TLS
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "s1.dc1"
|
||||
c.Datacenter = "dc1"
|
||||
c.TLSConfig.Domain = "consul"
|
||||
})
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// Create a peering by generating a token
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
conn, err := grpc.DialContext(ctx, s1.config.RPCAddr.String(),
|
||||
grpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())),
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithBlock())
|
||||
require.NoError(t, err)
|
||||
defer conn.Close()
|
||||
|
||||
peeringClient := pbpeering.NewPeeringServiceClient(conn)
|
||||
|
||||
req := pbpeering.GenerateTokenRequest{
|
||||
PeerName: "my-peer-s2",
|
||||
}
|
||||
resp, err := peeringClient.GenerateToken(ctx, &req)
|
||||
require.NoError(t, err)
|
||||
|
||||
tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken)
|
||||
require.NoError(t, err)
|
||||
|
||||
var token structs.PeeringToken
|
||||
require.NoError(t, json.Unmarshal(tokenJSON, &token))
|
||||
|
||||
// Bring up s2 and store s1's token so that it attempts to dial.
|
||||
_, s2 := testServerWithConfig(t, func(c *Config) {
|
||||
c.NodeName = "s2.dc2"
|
||||
c.Datacenter = "dc2"
|
||||
c.PrimaryDatacenter = "dc2"
|
||||
})
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
||||
|
||||
// Simulate a peering initiation event by writing a peering with data from a peering token.
|
||||
// Eventually the leader in dc2 should dial and connect to the leader in dc1.
|
||||
p := &pbpeering.Peering{
|
||||
Name: "my-peer-s1",
|
||||
PeerID: token.PeerID,
|
||||
PeerCAPems: token.CA,
|
||||
PeerServerName: token.ServerName,
|
||||
PeerServerAddresses: token.ServerAddresses,
|
||||
}
|
||||
require.True(t, p.ShouldDial())
|
||||
|
||||
// We maintain a pointer to the peering on the write so that we can get the ID without needing to re-query the state store.
|
||||
require.NoError(t, s2.fsm.State().PeeringWrite(1000, p))
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
status, found := s2.peeringService.StreamStatus(p.ID)
|
||||
require.True(r, found)
|
||||
require.True(r, status.Connected)
|
||||
})
|
||||
|
||||
// Delete the peering from the server peer to trigger the termination sequence
|
||||
require.NoError(t, s1.fsm.State().PeeringDelete(2000, state.Query{
|
||||
Value: "my-peer-s2",
|
||||
}))
|
||||
s2.logger.Trace("deleted peering for my-peer-s1")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, found := s1.peeringService.StreamStatus(p.PeerID)
|
||||
require.False(r, found)
|
||||
})
|
||||
|
||||
// s2 should have received the termination message and updated the peering state
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, peering, err := s2.fsm.State().PeeringRead(nil, state.Query{
|
||||
Value: "my-peer-s1",
|
||||
})
|
||||
require.NoError(r, err)
|
||||
require.Equal(r, pbpeering.PeeringState_TERMINATED, peering.State)
|
||||
})
|
||||
}
|
@ -51,7 +51,7 @@ func TestLeader_RegisterMember(t *testing.T) {
|
||||
// Client should be registered
|
||||
state := s1.fsm.State()
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -61,7 +61,7 @@ func TestLeader_RegisterMember(t *testing.T) {
|
||||
})
|
||||
|
||||
// Should have a check
|
||||
_, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil)
|
||||
_, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -80,7 +80,7 @@ func TestLeader_RegisterMember(t *testing.T) {
|
||||
|
||||
// Server should be registered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(s1.config.NodeName, nil)
|
||||
_, node, err := state.GetNode(s1.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -90,7 +90,7 @@ func TestLeader_RegisterMember(t *testing.T) {
|
||||
})
|
||||
|
||||
// Service should be registered
|
||||
_, services, err := state.NodeServices(nil, s1.config.NodeName, nil)
|
||||
_, services, err := state.NodeServices(nil, s1.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -129,7 +129,7 @@ func TestLeader_FailedMember(t *testing.T) {
|
||||
// Should be registered
|
||||
state := s1.fsm.State()
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -139,7 +139,7 @@ func TestLeader_FailedMember(t *testing.T) {
|
||||
})
|
||||
|
||||
// Should have a check
|
||||
_, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil)
|
||||
_, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -154,7 +154,7 @@ func TestLeader_FailedMember(t *testing.T) {
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, checks, err = state.NodeChecks(nil, c1.config.NodeName, nil)
|
||||
_, checks, err = state.NodeChecks(nil, c1.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -193,7 +193,7 @@ func TestLeader_LeftMember(t *testing.T) {
|
||||
|
||||
// Should be registered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil, "")
|
||||
require.NoError(r, err)
|
||||
require.NotNil(r, node, "client not registered")
|
||||
})
|
||||
@ -204,7 +204,7 @@ func TestLeader_LeftMember(t *testing.T) {
|
||||
|
||||
// Should be deregistered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil, "")
|
||||
require.NoError(r, err)
|
||||
require.Nil(r, node, "client still registered")
|
||||
})
|
||||
@ -236,7 +236,7 @@ func TestLeader_ReapMember(t *testing.T) {
|
||||
|
||||
// Should be registered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil, "")
|
||||
require.NoError(r, err)
|
||||
require.NotNil(r, node, "client not registered")
|
||||
})
|
||||
@ -257,7 +257,7 @@ func TestLeader_ReapMember(t *testing.T) {
|
||||
// anti-entropy will put it back.
|
||||
reaped := false
|
||||
for start := time.Now(); time.Since(start) < 5*time.Second; {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil, "")
|
||||
require.NoError(t, err)
|
||||
if node == nil {
|
||||
reaped = true
|
||||
@ -296,7 +296,7 @@ func TestLeader_ReapOrLeftMember_IgnoreSelf(t *testing.T) {
|
||||
|
||||
// Should be registered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(nodeName, nil)
|
||||
_, node, err := state.GetNode(nodeName, nil, "")
|
||||
require.NoError(r, err)
|
||||
require.NotNil(r, node, "server not registered")
|
||||
})
|
||||
@ -318,7 +318,7 @@ func TestLeader_ReapOrLeftMember_IgnoreSelf(t *testing.T) {
|
||||
// anti-entropy will put it back if it did get deleted.
|
||||
reaped := false
|
||||
for start := time.Now(); time.Since(start) < 5*time.Second; {
|
||||
_, node, err := state.GetNode(nodeName, nil)
|
||||
_, node, err := state.GetNode(nodeName, nil, "")
|
||||
require.NoError(t, err)
|
||||
if node == nil {
|
||||
reaped = true
|
||||
@ -402,7 +402,7 @@ func TestLeader_CheckServersMeta(t *testing.T) {
|
||||
}
|
||||
// s3 should be registered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, service, err := state.NodeService(s3.config.NodeName, "consul", &consulService.EnterpriseMeta)
|
||||
_, service, err := state.NodeService(s3.config.NodeName, "consul", &consulService.EnterpriseMeta, "")
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -438,7 +438,7 @@ func TestLeader_CheckServersMeta(t *testing.T) {
|
||||
if err != nil {
|
||||
r.Fatalf("Unexpected error :%v", err)
|
||||
}
|
||||
_, service, err := state.NodeService(s3.config.NodeName, "consul", &consulService.EnterpriseMeta)
|
||||
_, service, err := state.NodeService(s3.config.NodeName, "consul", &consulService.EnterpriseMeta, "")
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -506,7 +506,7 @@ func TestLeader_ReapServer(t *testing.T) {
|
||||
|
||||
// s3 should be registered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(s3.config.NodeName, nil)
|
||||
_, node, err := state.GetNode(s3.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -527,7 +527,7 @@ func TestLeader_ReapServer(t *testing.T) {
|
||||
}
|
||||
// s3 should be deregistered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(s3.config.NodeName, nil)
|
||||
_, node, err := state.GetNode(s3.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -582,7 +582,7 @@ func TestLeader_Reconcile_ReapMember(t *testing.T) {
|
||||
|
||||
// Node should be gone
|
||||
state := s1.fsm.State()
|
||||
_, node, err := state.GetNode("no-longer-around", nil)
|
||||
_, node, err := state.GetNode("no-longer-around", nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -615,7 +615,7 @@ func TestLeader_Reconcile(t *testing.T) {
|
||||
|
||||
// Should not be registered
|
||||
state := s1.fsm.State()
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -625,7 +625,7 @@ func TestLeader_Reconcile(t *testing.T) {
|
||||
|
||||
// Should be registered
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -657,7 +657,7 @@ func TestLeader_Reconcile_Races(t *testing.T) {
|
||||
state := s1.fsm.State()
|
||||
var nodeAddr string
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -693,7 +693,7 @@ func TestLeader_Reconcile_Races(t *testing.T) {
|
||||
if err := s1.reconcile(); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil)
|
||||
_, node, err := state.GetNode(c1.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -707,7 +707,7 @@ func TestLeader_Reconcile_Races(t *testing.T) {
|
||||
// Fail the member and wait for the health to go critical.
|
||||
c1.Shutdown()
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil)
|
||||
_, checks, err := state.NodeChecks(nil, c1.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -720,7 +720,7 @@ func TestLeader_Reconcile_Races(t *testing.T) {
|
||||
})
|
||||
|
||||
// Make sure the metadata didn't get clobbered.
|
||||
_, node, err = state.GetNode(c1.config.NodeName, nil)
|
||||
_, node, err = state.GetNode(c1.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -835,7 +835,7 @@ func TestLeader_LeftLeader(t *testing.T) {
|
||||
// Verify the old leader is deregistered
|
||||
state := remain.fsm.State()
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, node, err := state.GetNode(leader.config.NodeName, nil)
|
||||
_, node, err := state.GetNode(leader.config.NodeName, nil, "")
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
@ -2336,7 +2336,7 @@ func TestLeader_EnableVirtualIPs(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, node, err := state.NodeService("bar", "tgate1", nil)
|
||||
_, node, err := state.NodeService("bar", "tgate1", nil, "")
|
||||
require.NoError(t, err)
|
||||
sn := structs.ServiceName{Name: "api"}
|
||||
key := structs.ServiceGatewayVirtualIPTag(sn)
|
||||
|
126
agent/consul/peering_backend.go
Normal file
126
agent/consul/peering_backend.go
Normal file
@ -0,0 +1,126 @@
|
||||
package consul
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/hashicorp/consul/agent/consul/stream"
|
||||
"github.com/hashicorp/consul/agent/rpc/peering"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
)
|
||||
|
||||
type peeringBackend struct {
|
||||
srv *Server
|
||||
connPool GRPCClientConner
|
||||
apply *peeringApply
|
||||
}
|
||||
|
||||
var _ peering.Backend = (*peeringBackend)(nil)
|
||||
|
||||
// NewPeeringBackend returns a peering.Backend implementation that is bound to the given server.
|
||||
func NewPeeringBackend(srv *Server, connPool GRPCClientConner) peering.Backend {
|
||||
return &peeringBackend{
|
||||
srv: srv,
|
||||
connPool: connPool,
|
||||
apply: &peeringApply{srv: srv},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *peeringBackend) Forward(info structs.RPCInfo, f func(*grpc.ClientConn) error) (handled bool, err error) {
|
||||
// Only forward the request if the dc in the request matches the server's datacenter.
|
||||
if info.RequestDatacenter() != "" && info.RequestDatacenter() != b.srv.config.Datacenter {
|
||||
return false, fmt.Errorf("requests to generate peering tokens cannot be forwarded to remote datacenters")
|
||||
}
|
||||
return b.srv.ForwardGRPC(b.connPool, info, f)
|
||||
}
|
||||
|
||||
// GetAgentCACertificates gets the server's raw CA data from its TLS Configurator.
|
||||
func (b *peeringBackend) GetAgentCACertificates() ([]string, error) {
|
||||
// TODO(peering): handle empty CA pems
|
||||
return b.srv.tlsConfigurator.ManualCAPems(), nil
|
||||
}
|
||||
|
||||
// GetServerAddresses looks up server node addresses from the state store.
|
||||
func (b *peeringBackend) GetServerAddresses() ([]string, error) {
|
||||
state := b.srv.fsm.State()
|
||||
_, nodes, err := state.ServiceNodes(nil, "consul", structs.DefaultEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var addrs []string
|
||||
for _, node := range nodes {
|
||||
addrs = append(addrs, node.Address+":"+strconv.Itoa(node.ServicePort))
|
||||
}
|
||||
return addrs, nil
|
||||
}
|
||||
|
||||
// GetServerName returns the SNI to be returned in the peering token data which
|
||||
// will be used by peers when establishing peering connections over TLS.
|
||||
func (b *peeringBackend) GetServerName() string {
|
||||
return b.srv.tlsConfigurator.ServerSNI(b.srv.config.Datacenter, "")
|
||||
}
|
||||
|
||||
// EncodeToken encodes a peering token as a bas64-encoded representation of JSON (for now).
|
||||
func (b *peeringBackend) EncodeToken(tok *structs.PeeringToken) ([]byte, error) {
|
||||
jsonToken, err := json.Marshal(tok)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal token: %w", err)
|
||||
}
|
||||
return []byte(base64.StdEncoding.EncodeToString(jsonToken)), nil
|
||||
}
|
||||
|
||||
// DecodeToken decodes a peering token from a base64-encoded JSON byte array (for now).
|
||||
func (b *peeringBackend) DecodeToken(tokRaw []byte) (*structs.PeeringToken, error) {
|
||||
tokJSONRaw, err := base64.StdEncoding.DecodeString(string(tokRaw))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode token: %w", err)
|
||||
}
|
||||
var tok structs.PeeringToken
|
||||
if err := json.Unmarshal(tokJSONRaw, &tok); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tok, nil
|
||||
}
|
||||
|
||||
func (s peeringBackend) Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) {
|
||||
return s.srv.publisher.Subscribe(req)
|
||||
}
|
||||
|
||||
func (b *peeringBackend) Store() peering.Store {
|
||||
return b.srv.fsm.State()
|
||||
}
|
||||
|
||||
func (b *peeringBackend) Apply() peering.Apply {
|
||||
return b.apply
|
||||
}
|
||||
|
||||
func (b *peeringBackend) EnterpriseCheckPartitions(partition string) error {
|
||||
return b.enterpriseCheckPartitions(partition)
|
||||
}
|
||||
|
||||
type peeringApply struct {
|
||||
srv *Server
|
||||
}
|
||||
|
||||
func (a *peeringApply) PeeringWrite(req *pbpeering.PeeringWriteRequest) error {
|
||||
_, err := a.srv.raftApplyProtobuf(structs.PeeringWriteType, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *peeringApply) PeeringDelete(req *pbpeering.PeeringDeleteRequest) error {
|
||||
_, err := a.srv.raftApplyProtobuf(structs.PeeringDeleteType, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(peering): This needs RPC metrics interceptor since it's not triggered by an RPC.
|
||||
func (a *peeringApply) PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error {
|
||||
_, err := a.srv.raftApplyProtobuf(structs.PeeringTerminateByIDType, req)
|
||||
return err
|
||||
}
|
||||
|
||||
var _ peering.Apply = (*peeringApply)(nil)
|
15
agent/consul/peering_backend_oss.go
Normal file
15
agent/consul/peering_backend_oss.go
Normal file
@ -0,0 +1,15 @@
|
||||
//go:build !consulent
|
||||
// +build !consulent
|
||||
|
||||
package consul
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func (b *peeringBackend) enterpriseCheckPartitions(partition string) error {
|
||||
if partition != "" {
|
||||
return fmt.Errorf("Partitions are a Consul Enterprise feature")
|
||||
}
|
||||
return nil
|
||||
}
|
51
agent/consul/peering_backend_oss_test.go
Normal file
51
agent/consul/peering_backend_oss_test.go
Normal file
@ -0,0 +1,51 @@
|
||||
//go:build !consulent
|
||||
// +build !consulent
|
||||
|
||||
package consul
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
gogrpc "google.golang.org/grpc"
|
||||
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func TestPeeringBackend_RejectsPartition(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Datacenter = "dc1"
|
||||
c.Bootstrap = true
|
||||
})
|
||||
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
// make a grpc client to dial s1 directly
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
conn, err := gogrpc.DialContext(ctx, s1.config.RPCAddr.String(),
|
||||
gogrpc.WithContextDialer(newServerDialer(s1.config.RPCAddr.String())),
|
||||
gogrpc.WithInsecure(),
|
||||
gogrpc.WithBlock())
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { conn.Close() })
|
||||
|
||||
peeringClient := pbpeering.NewPeeringServiceClient(conn)
|
||||
|
||||
req := pbpeering.GenerateTokenRequest{
|
||||
Datacenter: "dc1",
|
||||
Partition: "test",
|
||||
}
|
||||
_, err = peeringClient.GenerateToken(ctx, &req)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "Partitions are a Consul Enterprise feature")
|
||||
}
|
115
agent/consul/peering_backend_test.go
Normal file
115
agent/consul/peering_backend_test.go
Normal file
@ -0,0 +1,115 @@
|
||||
package consul
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
gogrpc "google.golang.org/grpc"
|
||||
|
||||
"github.com/hashicorp/consul/agent/pool"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func TestPeeringBackend_DoesNotForwardToDifferentDC(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
_, s1 := testServerDC(t, "dc1")
|
||||
_, s2 := testServerDC(t, "dc2")
|
||||
|
||||
joinWAN(t, s2, s1)
|
||||
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s2.RPC, "dc2")
|
||||
|
||||
// make a grpc client to dial s2 directly
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
conn, err := gogrpc.DialContext(ctx, s2.config.RPCAddr.String(),
|
||||
gogrpc.WithContextDialer(newServerDialer(s2.config.RPCAddr.String())),
|
||||
gogrpc.WithInsecure(),
|
||||
gogrpc.WithBlock())
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { conn.Close() })
|
||||
|
||||
peeringClient := pbpeering.NewPeeringServiceClient(conn)
|
||||
|
||||
// GenerateToken request should fail against dc1, because we are dialing dc2. The GenerateToken request should never be forwarded across datacenters.
|
||||
req := pbpeering.GenerateTokenRequest{
|
||||
PeerName: "peer1-usw1",
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
_, err = peeringClient.GenerateToken(ctx, &req)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "requests to generate peering tokens cannot be forwarded to remote datacenters")
|
||||
}
|
||||
|
||||
func TestPeeringBackend_ForwardToLeader(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, conf1 := testServerConfig(t)
|
||||
server1, err := newServer(t, conf1)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, conf2 := testServerConfig(t)
|
||||
conf2.Bootstrap = false
|
||||
server2, err := newServer(t, conf2)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Join a 2nd server (not the leader)
|
||||
testrpc.WaitForLeader(t, server1.RPC, "dc1")
|
||||
joinLAN(t, server2, server1)
|
||||
testrpc.WaitForLeader(t, server2.RPC, "dc1")
|
||||
|
||||
// Make a write call to server2 and make sure it gets forwarded to server1
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
// Dial server2 directly
|
||||
conn, err := gogrpc.DialContext(ctx, server2.config.RPCAddr.String(),
|
||||
gogrpc.WithContextDialer(newServerDialer(server2.config.RPCAddr.String())),
|
||||
gogrpc.WithInsecure(),
|
||||
gogrpc.WithBlock())
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { conn.Close() })
|
||||
|
||||
peeringClient := pbpeering.NewPeeringServiceClient(conn)
|
||||
|
||||
runStep(t, "forward a write", func(t *testing.T) {
|
||||
// Do the grpc Write call to server2
|
||||
req := pbpeering.GenerateTokenRequest{
|
||||
Datacenter: "dc1",
|
||||
PeerName: "foo",
|
||||
}
|
||||
_, err := peeringClient.GenerateToken(ctx, &req)
|
||||
require.NoError(t, err)
|
||||
|
||||
// TODO(peering) check that state store is updated on leader, indicating a forwarded request after state store
|
||||
// is implemented.
|
||||
})
|
||||
}
|
||||
|
||||
func newServerDialer(serverAddr string) func(context.Context, string) (net.Conn, error) {
|
||||
return func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
d := net.Dialer{}
|
||||
conn, err := d.DialContext(ctx, "tcp", serverAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = conn.Write([]byte{byte(pool.RPCGRPC)})
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
}
|
@ -3,12 +3,12 @@ package prepared_query
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"sort"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestWalk_ServiceQuery(t *testing.T) {
|
||||
@ -42,6 +42,7 @@ func TestWalk_ServiceQuery(t *testing.T) {
|
||||
".Tags[0]:tag1",
|
||||
".Tags[1]:tag2",
|
||||
".Tags[2]:tag3",
|
||||
".PeerName:",
|
||||
}
|
||||
expected = append(expected, entMetaWalkFields...)
|
||||
sort.Strings(expected)
|
||||
|
@ -404,7 +404,7 @@ func (p *PreparedQuery) Execute(args *structs.PreparedQueryExecuteRequest,
|
||||
qs.Node = args.Agent.Node
|
||||
} else if qs.Node == "_ip" {
|
||||
if args.Source.Ip != "" {
|
||||
_, nodes, err := state.Nodes(nil, structs.NodeEnterpriseMetaInDefaultPartition())
|
||||
_, nodes, err := state.Nodes(nil, structs.NodeEnterpriseMetaInDefaultPartition(), structs.TODOPeerKeyword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -534,7 +534,7 @@ func (p *PreparedQuery) execute(query *structs.PreparedQuery,
|
||||
f = state.CheckConnectServiceNodes
|
||||
}
|
||||
|
||||
_, nodes, err := f(nil, query.Service.Service, &query.Service.EnterpriseMeta)
|
||||
_, nodes, err := f(nil, query.Service.Service, &query.Service.EnterpriseMeta, query.Service.PeerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1374,6 +1374,10 @@ func (r isReadRequest) HasTimedOut(since time.Time, rpcHoldTimeout, maxQueryTime
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r isReadRequest) Timeout(rpcHoldTimeout, maxQueryTime, defaultQueryTime time.Duration) time.Duration {
|
||||
return time.Duration(-1)
|
||||
}
|
||||
|
||||
func TestRPC_AuthorizeRaftRPC(t *testing.T) {
|
||||
caPEM, caPK, err := tlsutil.GenerateCA(tlsutil.CAOpts{Days: 5, Domain: "consul"})
|
||||
require.NoError(t, err)
|
||||
|
@ -16,24 +16,20 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/rpc/middleware"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
"go.etcd.io/bbolt"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
||||
connlimit "github.com/hashicorp/go-connlimit"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-version"
|
||||
"github.com/hashicorp/raft"
|
||||
autopilot "github.com/hashicorp/raft-autopilot"
|
||||
raftboltdb "github.com/hashicorp/raft-boltdb/v2"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
"go.etcd.io/bbolt"
|
||||
"golang.org/x/time/rate"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/authmethod"
|
||||
"github.com/hashicorp/consul/agent/consul/authmethod/ssoauth"
|
||||
@ -46,14 +42,18 @@ import (
|
||||
"github.com/hashicorp/consul/agent/grpc/private/services/subscribe"
|
||||
"github.com/hashicorp/consul/agent/grpc/public/services/connectca"
|
||||
"github.com/hashicorp/consul/agent/grpc/public/services/dataplane"
|
||||
"github.com/hashicorp/consul/agent/grpc/public/services/serverdiscovery"
|
||||
"github.com/hashicorp/consul/agent/metadata"
|
||||
"github.com/hashicorp/consul/agent/pool"
|
||||
"github.com/hashicorp/consul/agent/router"
|
||||
"github.com/hashicorp/consul/agent/rpc/middleware"
|
||||
"github.com/hashicorp/consul/agent/rpc/peering"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/agent/token"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/lib/routine"
|
||||
"github.com/hashicorp/consul/logging"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
"github.com/hashicorp/consul/proto/pbsubscribe"
|
||||
"github.com/hashicorp/consul/tlsutil"
|
||||
"github.com/hashicorp/consul/types"
|
||||
@ -123,6 +123,7 @@ const (
|
||||
intermediateCertRenewWatchRoutineName = "intermediate cert renew watch"
|
||||
backgroundCAInitializationRoutineName = "CA initialization"
|
||||
virtualIPCheckRoutineName = "virtual IP version check"
|
||||
peeringStreamsRoutineName = "streaming peering resources"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -355,6 +356,9 @@ type Server struct {
|
||||
// this into the Deps struct and created it much earlier on.
|
||||
publisher *stream.EventPublisher
|
||||
|
||||
// peering is a service used to handle peering streams.
|
||||
peeringService *peering.Service
|
||||
|
||||
// embedded struct to hold all the enterprise specific data
|
||||
EnterpriseServer
|
||||
}
|
||||
@ -677,8 +681,16 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve
|
||||
s.publicConnectCAServer.Register(s.publicGRPCServer)
|
||||
|
||||
dataplane.NewServer(dataplane.Config{
|
||||
GetStore: func() dataplane.StateStore { return s.FSM().State() },
|
||||
Logger: logger.Named("grpc-api.dataplane"),
|
||||
ACLResolver: plainACLResolver{s.ACLResolver},
|
||||
Datacenter: s.config.Datacenter,
|
||||
}).Register(s.publicGRPCServer)
|
||||
|
||||
serverdiscovery.NewServer(serverdiscovery.Config{
|
||||
Publisher: s.publisher,
|
||||
ACLResolver: plainACLResolver{s.ACLResolver},
|
||||
Logger: logger.Named("grpc-api.server-discovery"),
|
||||
}).Register(s.publicGRPCServer)
|
||||
|
||||
// Initialize private gRPC server.
|
||||
@ -721,12 +733,19 @@ func NewServer(config *Config, flat Deps, publicGRPCServer *grpc.Server) (*Serve
|
||||
}
|
||||
|
||||
func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler {
|
||||
p := peering.NewService(
|
||||
deps.Logger.Named("grpc-api.peering"),
|
||||
NewPeeringBackend(s, deps.GRPCConnPool),
|
||||
)
|
||||
s.peeringService = p
|
||||
|
||||
register := func(srv *grpc.Server) {
|
||||
if config.RPCConfig.EnableStreaming {
|
||||
pbsubscribe.RegisterStateChangeSubscriptionServer(srv, subscribe.NewServer(
|
||||
&subscribeBackend{srv: s, connPool: deps.GRPCConnPool},
|
||||
deps.Logger.Named("grpc-api.subscription")))
|
||||
}
|
||||
pbpeering.RegisterPeeringServiceServer(srv, s.peeringService)
|
||||
s.registerEnterpriseGRPCServices(deps, srv)
|
||||
|
||||
// Note: this public gRPC service is also exposed on the private server to
|
||||
@ -774,7 +793,7 @@ func (s *Server) setupRaft() error {
|
||||
}()
|
||||
|
||||
var serverAddressProvider raft.ServerAddressProvider = nil
|
||||
if s.config.RaftConfig.ProtocolVersion >= 3 { //ServerAddressProvider needs server ids to work correctly, which is only supported in protocol version 3 or higher
|
||||
if s.config.RaftConfig.ProtocolVersion >= 3 { // ServerAddressProvider needs server ids to work correctly, which is only supported in protocol version 3 or higher
|
||||
serverAddressProvider = s.serverLookup
|
||||
}
|
||||
|
||||
@ -1554,6 +1573,8 @@ func computeRaftReloadableConfig(config ReloadableConfig) raft.ReloadableConfig
|
||||
TrailingLogs: defaultConf.RaftConfig.TrailingLogs,
|
||||
SnapshotInterval: defaultConf.RaftConfig.SnapshotInterval,
|
||||
SnapshotThreshold: defaultConf.RaftConfig.SnapshotThreshold,
|
||||
ElectionTimeout: defaultConf.RaftConfig.ElectionTimeout,
|
||||
HeartbeatTimeout: defaultConf.RaftConfig.HeartbeatTimeout,
|
||||
}
|
||||
if config.RaftSnapshotThreshold != 0 {
|
||||
raftCfg.SnapshotThreshold = uint64(config.RaftSnapshotThreshold)
|
||||
@ -1564,6 +1585,12 @@ func computeRaftReloadableConfig(config ReloadableConfig) raft.ReloadableConfig
|
||||
if config.RaftTrailingLogs != 0 {
|
||||
raftCfg.TrailingLogs = uint64(config.RaftTrailingLogs)
|
||||
}
|
||||
if config.HeartbeatTimeout >= 5*time.Millisecond {
|
||||
raftCfg.HeartbeatTimeout = config.HeartbeatTimeout
|
||||
}
|
||||
if config.ElectionTimeout >= 5*time.Millisecond {
|
||||
raftCfg.ElectionTimeout = config.ElectionTimeout
|
||||
}
|
||||
return raftCfg
|
||||
}
|
||||
|
||||
@ -1601,7 +1628,7 @@ func (s *Server) trackLeaderChanges() {
|
||||
continue
|
||||
}
|
||||
|
||||
s.grpcLeaderForwarder.UpdateLeaderAddr(s.config.Datacenter, string(leaderObs.Leader))
|
||||
s.grpcLeaderForwarder.UpdateLeaderAddr(s.config.Datacenter, string(leaderObs.LeaderAddr))
|
||||
case <-s.shutdownCh:
|
||||
s.raft.DeregisterObserver(observer)
|
||||
return
|
||||
|
@ -165,7 +165,7 @@ func testServerConfig(t *testing.T) (string, *Config) {
|
||||
|
||||
// TODO (slackpad) - We should be able to run all tests w/o this, but it
|
||||
// looks like several depend on it.
|
||||
config.RPCHoldTimeout = 5 * time.Second
|
||||
config.RPCHoldTimeout = 10 * time.Second
|
||||
|
||||
config.ConnectEnabled = true
|
||||
config.CAConfig = &structs.CAConfiguration{
|
||||
@ -237,6 +237,8 @@ func testServerWithConfig(t *testing.T, configOpts ...func(*Config)) (string, *S
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() { srv.Shutdown() })
|
||||
|
||||
return dir, srv
|
||||
}
|
||||
|
||||
@ -257,6 +259,26 @@ func testACLServerWithConfig(t *testing.T, cb func(*Config), initReplicationToke
|
||||
return dir, srv, codec
|
||||
}
|
||||
|
||||
func testGRPCIntegrationServer(t *testing.T, cb func(*Config)) (*Server, *grpc.ClientConn) {
|
||||
_, srv, _ := testACLServerWithConfig(t, cb, false)
|
||||
|
||||
// Normally the gRPC server listener is created at the agent level and passed down into
|
||||
// the Server creation. For our tests, we need to ensure
|
||||
ln, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
_ = srv.publicGRPCServer.Serve(ln)
|
||||
}()
|
||||
t.Cleanup(srv.publicGRPCServer.Stop)
|
||||
|
||||
conn, err := grpc.Dial(ln.Addr().String(), grpc.WithInsecure())
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() { _ = conn.Close() })
|
||||
|
||||
return srv, conn
|
||||
}
|
||||
|
||||
func newServer(t *testing.T, c *Config) (*Server, error) {
|
||||
return newServerWithDeps(t, c, newDefaultDeps(t, c))
|
||||
}
|
||||
@ -1836,6 +1858,8 @@ func TestServer_computeRaftReloadableConfig(t *testing.T) {
|
||||
SnapshotThreshold: defaults.SnapshotThreshold,
|
||||
SnapshotInterval: defaults.SnapshotInterval,
|
||||
TrailingLogs: defaults.TrailingLogs,
|
||||
ElectionTimeout: defaults.ElectionTimeout,
|
||||
HeartbeatTimeout: defaults.HeartbeatTimeout,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -1847,6 +1871,8 @@ func TestServer_computeRaftReloadableConfig(t *testing.T) {
|
||||
SnapshotThreshold: 123456,
|
||||
SnapshotInterval: defaults.SnapshotInterval,
|
||||
TrailingLogs: defaults.TrailingLogs,
|
||||
ElectionTimeout: defaults.ElectionTimeout,
|
||||
HeartbeatTimeout: defaults.HeartbeatTimeout,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -1858,6 +1884,8 @@ func TestServer_computeRaftReloadableConfig(t *testing.T) {
|
||||
SnapshotThreshold: defaults.SnapshotThreshold,
|
||||
SnapshotInterval: 13 * time.Minute,
|
||||
TrailingLogs: defaults.TrailingLogs,
|
||||
ElectionTimeout: defaults.ElectionTimeout,
|
||||
HeartbeatTimeout: defaults.HeartbeatTimeout,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -1869,6 +1897,8 @@ func TestServer_computeRaftReloadableConfig(t *testing.T) {
|
||||
SnapshotThreshold: defaults.SnapshotThreshold,
|
||||
SnapshotInterval: defaults.SnapshotInterval,
|
||||
TrailingLogs: 78910,
|
||||
ElectionTimeout: defaults.ElectionTimeout,
|
||||
HeartbeatTimeout: defaults.HeartbeatTimeout,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -1877,11 +1907,15 @@ func TestServer_computeRaftReloadableConfig(t *testing.T) {
|
||||
RaftSnapshotThreshold: 123456,
|
||||
RaftSnapshotInterval: 13 * time.Minute,
|
||||
RaftTrailingLogs: 78910,
|
||||
ElectionTimeout: 300 * time.Millisecond,
|
||||
HeartbeatTimeout: 400 * time.Millisecond,
|
||||
},
|
||||
want: raft.ReloadableConfig{
|
||||
SnapshotThreshold: 123456,
|
||||
SnapshotInterval: 13 * time.Minute,
|
||||
TrailingLogs: 78910,
|
||||
ElectionTimeout: 300 * time.Millisecond,
|
||||
HeartbeatTimeout: 400 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -239,6 +239,26 @@ func prefixIndexFromUUIDQuery(arg interface{}) ([]byte, error) {
|
||||
return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg)
|
||||
}
|
||||
|
||||
func prefixIndexFromUUIDWithPeerQuery(arg interface{}) ([]byte, error) {
|
||||
switch v := arg.(type) {
|
||||
case Query:
|
||||
var b indexBuilder
|
||||
peername := v.PeerOrEmpty()
|
||||
if peername == "" {
|
||||
b.String(structs.LocalPeerKeyword)
|
||||
} else {
|
||||
b.String(strings.ToLower(peername))
|
||||
}
|
||||
uuidBytes, err := variableLengthUUIDStringToBytes(v.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(b.Bytes(), uuidBytes...), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg)
|
||||
}
|
||||
|
||||
func multiIndexPolicyFromACLRole(raw interface{}) ([][]byte, error) {
|
||||
role, ok := raw.(*structs.ACLRole)
|
||||
if !ok {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -9,6 +9,7 @@ import (
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/stream"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/proto/pbservice"
|
||||
"github.com/hashicorp/consul/proto/pbsubscribe"
|
||||
)
|
||||
|
||||
@ -17,33 +18,13 @@ import (
|
||||
type EventSubjectService struct {
|
||||
Key string
|
||||
EnterpriseMeta acl.EnterpriseMeta
|
||||
PeerName string
|
||||
|
||||
overrideKey string
|
||||
overrideNamespace string
|
||||
overridePartition string
|
||||
}
|
||||
|
||||
// String satisfies the stream.Subject interface.
|
||||
func (s EventSubjectService) String() string {
|
||||
partition := s.EnterpriseMeta.PartitionOrDefault()
|
||||
if v := s.overridePartition; v != "" {
|
||||
partition = strings.ToLower(v)
|
||||
}
|
||||
|
||||
namespace := s.EnterpriseMeta.NamespaceOrDefault()
|
||||
if v := s.overrideNamespace; v != "" {
|
||||
namespace = strings.ToLower(v)
|
||||
}
|
||||
|
||||
key := s.Key
|
||||
if v := s.overrideKey; v != "" {
|
||||
key = v
|
||||
}
|
||||
key = strings.ToLower(key)
|
||||
|
||||
return partition + "/" + namespace + "/" + key
|
||||
}
|
||||
|
||||
// EventPayloadCheckServiceNode is used as the Payload for a stream.Event to
|
||||
// indicates changes to a CheckServiceNode for service health.
|
||||
//
|
||||
@ -62,6 +43,7 @@ type EventPayloadCheckServiceNode struct {
|
||||
}
|
||||
|
||||
func (e EventPayloadCheckServiceNode) HasReadPermission(authz acl.Authorizer) bool {
|
||||
// TODO(peering): figure out how authz works for peered data
|
||||
return e.Value.CanRead(authz) == acl.Allow
|
||||
}
|
||||
|
||||
@ -76,6 +58,31 @@ func (e EventPayloadCheckServiceNode) Subject() stream.Subject {
|
||||
}
|
||||
}
|
||||
|
||||
func (e EventPayloadCheckServiceNode) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event {
|
||||
return &pbsubscribe.Event{
|
||||
Index: idx,
|
||||
Payload: &pbsubscribe.Event_ServiceHealth{
|
||||
ServiceHealth: &pbsubscribe.ServiceHealthUpdate{
|
||||
Op: e.Op,
|
||||
CheckServiceNode: pbservice.NewCheckServiceNodeFromStructs(e.Value),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func PBToStreamSubscribeRequest(req *pbsubscribe.SubscribeRequest, entMeta acl.EnterpriseMeta) *stream.SubscribeRequest {
|
||||
return &stream.SubscribeRequest{
|
||||
Topic: req.Topic,
|
||||
Subject: EventSubjectService{
|
||||
Key: req.Key,
|
||||
EnterpriseMeta: entMeta,
|
||||
PeerName: req.PeerName,
|
||||
},
|
||||
Token: req.Token,
|
||||
Index: req.Index,
|
||||
}
|
||||
}
|
||||
|
||||
// serviceHealthSnapshot returns a stream.SnapshotFunc that provides a snapshot
|
||||
// of stream.Events that describe the current state of a service health query.
|
||||
func (s *Store) ServiceHealthSnapshot(req stream.SubscribeRequest, buf stream.SnapshotAppender) (index uint64, err error) {
|
||||
@ -89,7 +96,7 @@ func (s *Store) ServiceHealthSnapshot(req stream.SubscribeRequest, buf stream.Sn
|
||||
return 0, fmt.Errorf("expected SubscribeRequest.Subject to be a: state.EventSubjectService, was a: %T", req.Subject)
|
||||
}
|
||||
|
||||
idx, nodes, err := checkServiceNodesTxn(tx, nil, subject.Key, connect, &subject.EnterpriseMeta)
|
||||
idx, nodes, err := checkServiceNodesTxn(tx, nil, subject.Key, connect, &subject.EnterpriseMeta, subject.PeerName)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -127,6 +134,7 @@ type nodeServiceTuple struct {
|
||||
Node string
|
||||
ServiceID string
|
||||
EntMeta acl.EnterpriseMeta
|
||||
PeerName string
|
||||
}
|
||||
|
||||
func newNodeServiceTupleFromServiceNode(sn *structs.ServiceNode) nodeServiceTuple {
|
||||
@ -134,6 +142,7 @@ func newNodeServiceTupleFromServiceNode(sn *structs.ServiceNode) nodeServiceTupl
|
||||
Node: strings.ToLower(sn.Node),
|
||||
ServiceID: sn.ServiceID,
|
||||
EntMeta: sn.EnterpriseMeta,
|
||||
PeerName: sn.PeerName,
|
||||
}
|
||||
}
|
||||
|
||||
@ -142,6 +151,7 @@ func newNodeServiceTupleFromServiceHealthCheck(hc *structs.HealthCheck) nodeServ
|
||||
Node: strings.ToLower(hc.Node),
|
||||
ServiceID: hc.ServiceID,
|
||||
EntMeta: hc.EnterpriseMeta,
|
||||
PeerName: hc.PeerName,
|
||||
}
|
||||
}
|
||||
|
||||
@ -153,6 +163,7 @@ type serviceChange struct {
|
||||
type nodeTuple struct {
|
||||
Node string
|
||||
Partition string
|
||||
PeerName string
|
||||
}
|
||||
|
||||
var serviceChangeIndirect = serviceChange{changeType: changeIndirect}
|
||||
@ -286,7 +297,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
|
||||
}
|
||||
// Rebuild events for all services on this node
|
||||
es, err := newServiceHealthEventsForNode(tx, changes.Index, node.Node,
|
||||
structs.WildcardEnterpriseMetaInPartition(node.Partition))
|
||||
structs.WildcardEnterpriseMetaInPartition(node.Partition), node.PeerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -342,6 +353,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
|
||||
q := Query{
|
||||
Value: gs.Gateway.Name,
|
||||
EnterpriseMeta: gatewayName.EnterpriseMeta,
|
||||
PeerName: structs.TODOPeerKeyword,
|
||||
}
|
||||
_, nodes, err := serviceNodesTxn(tx, nil, indexService, q)
|
||||
if err != nil {
|
||||
@ -504,6 +516,8 @@ func connectEventsByServiceKind(tx ReadTxn, origEvent stream.Event) ([]stream.Ev
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
var result []stream.Event
|
||||
|
||||
// TODO(peering): handle terminating gateways somehow
|
||||
|
||||
sn := structs.ServiceName{
|
||||
Name: node.Service.Service,
|
||||
EnterpriseMeta: node.Service.EnterpriseMeta,
|
||||
@ -551,16 +565,17 @@ func getPayloadCheckServiceNode(payload stream.Payload) *structs.CheckServiceNod
|
||||
// given node. This mirrors some of the the logic in the oddly-named
|
||||
// parseCheckServiceNodes but is more efficient since we know they are all on
|
||||
// the same node.
|
||||
func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string, entMeta *acl.EnterpriseMeta) ([]stream.Event, error) {
|
||||
func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string, entMeta *acl.EnterpriseMeta, peerName string) ([]stream.Event, error) {
|
||||
services, err := tx.Get(tableServices, indexNode, Query{
|
||||
Value: node,
|
||||
EnterpriseMeta: *entMeta,
|
||||
PeerName: peerName,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n, checksFunc, err := getNodeAndChecks(tx, node, entMeta)
|
||||
n, checksFunc, err := getNodeAndChecks(tx, node, entMeta, peerName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -578,11 +593,12 @@ func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string, entMeta
|
||||
|
||||
// getNodeAndNodeChecks returns a the node structure and a function that returns
|
||||
// the full list of checks for a specific service on that node.
|
||||
func getNodeAndChecks(tx ReadTxn, node string, entMeta *acl.EnterpriseMeta) (*structs.Node, serviceChecksFunc, error) {
|
||||
func getNodeAndChecks(tx ReadTxn, node string, entMeta *acl.EnterpriseMeta, peerName string) (*structs.Node, serviceChecksFunc, error) {
|
||||
// Fetch the node
|
||||
nodeRaw, err := tx.First(tableNodes, indexID, Query{
|
||||
Value: node,
|
||||
EnterpriseMeta: *entMeta,
|
||||
PeerName: peerName,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -595,6 +611,7 @@ func getNodeAndChecks(tx ReadTxn, node string, entMeta *acl.EnterpriseMeta) (*st
|
||||
iter, err := tx.Get(tableChecks, indexNode, Query{
|
||||
Value: node,
|
||||
EnterpriseMeta: *entMeta,
|
||||
PeerName: peerName,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@ -629,7 +646,7 @@ func getNodeAndChecks(tx ReadTxn, node string, entMeta *acl.EnterpriseMeta) (*st
|
||||
type serviceChecksFunc func(serviceID string) structs.HealthChecks
|
||||
|
||||
func newServiceHealthEventForService(tx ReadTxn, idx uint64, tuple nodeServiceTuple) (stream.Event, error) {
|
||||
n, checksFunc, err := getNodeAndChecks(tx, tuple.Node, &tuple.EntMeta)
|
||||
n, checksFunc, err := getNodeAndChecks(tx, tuple.Node, &tuple.EntMeta, tuple.PeerName)
|
||||
if err != nil {
|
||||
return stream.Event{}, err
|
||||
}
|
||||
@ -638,6 +655,7 @@ func newServiceHealthEventForService(tx ReadTxn, idx uint64, tuple nodeServiceTu
|
||||
EnterpriseMeta: tuple.EntMeta,
|
||||
Node: tuple.Node,
|
||||
Service: tuple.ServiceID,
|
||||
PeerName: tuple.PeerName,
|
||||
})
|
||||
if err != nil {
|
||||
return stream.Event{}, err
|
||||
@ -690,6 +708,7 @@ func newServiceHealthEventDeregister(idx uint64, sn *structs.ServiceNode) stream
|
||||
Node: &structs.Node{
|
||||
Node: sn.Node,
|
||||
Partition: entMeta.PartitionOrEmpty(),
|
||||
PeerName: sn.PeerName,
|
||||
},
|
||||
Service: sn.ToNodeService(),
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ func (nst nodeServiceTuple) nodeTuple() nodeTuple {
|
||||
return nodeTuple{
|
||||
Node: strings.ToLower(nst.Node),
|
||||
Partition: "",
|
||||
PeerName: nst.PeerName,
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,6 +21,7 @@ func newNodeTupleFromNode(node *structs.Node) nodeTuple {
|
||||
return nodeTuple{
|
||||
Node: strings.ToLower(node.Node),
|
||||
Partition: "",
|
||||
PeerName: node.PeerName,
|
||||
}
|
||||
}
|
||||
|
||||
@ -27,5 +29,20 @@ func newNodeTupleFromHealthCheck(hc *structs.HealthCheck) nodeTuple {
|
||||
return nodeTuple{
|
||||
Node: strings.ToLower(hc.Node),
|
||||
Partition: "",
|
||||
PeerName: hc.PeerName,
|
||||
}
|
||||
}
|
||||
|
||||
// String satisfies the stream.Subject interface.
|
||||
func (s EventSubjectService) String() string {
|
||||
key := s.Key
|
||||
if v := s.overrideKey; v != "" {
|
||||
key = v
|
||||
}
|
||||
key = strings.ToLower(key)
|
||||
|
||||
if s.PeerName == "" {
|
||||
return key
|
||||
}
|
||||
return s.PeerName + "/" + key
|
||||
}
|
||||
|
45
agent/consul/state/catalog_events_oss_test.go
Normal file
45
agent/consul/state/catalog_events_oss_test.go
Normal file
@ -0,0 +1,45 @@
|
||||
//go:build !consulent
|
||||
// +build !consulent
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func TestEventPayloadCheckServiceNode_Subject_OSS(t *testing.T) {
|
||||
for desc, tc := range map[string]struct {
|
||||
evt EventPayloadCheckServiceNode
|
||||
sub string
|
||||
}{
|
||||
"mixed casing": {
|
||||
EventPayloadCheckServiceNode{
|
||||
Value: &structs.CheckServiceNode{
|
||||
Service: &structs.NodeService{
|
||||
Service: "FoO",
|
||||
},
|
||||
},
|
||||
},
|
||||
"foo",
|
||||
},
|
||||
"override key": {
|
||||
EventPayloadCheckServiceNode{
|
||||
Value: &structs.CheckServiceNode{
|
||||
Service: &structs.NodeService{
|
||||
Service: "foo",
|
||||
},
|
||||
},
|
||||
overrideKey: "bar",
|
||||
},
|
||||
"bar",
|
||||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
require.Equal(t, tc.sub, tc.evt.Subject().String())
|
||||
})
|
||||
}
|
||||
}
|
@ -16,49 +16,6 @@ import (
|
||||
"github.com/hashicorp/consul/types"
|
||||
)
|
||||
|
||||
func TestEventPayloadCheckServiceNode_Subject(t *testing.T) {
|
||||
for desc, tc := range map[string]struct {
|
||||
evt EventPayloadCheckServiceNode
|
||||
sub string
|
||||
}{
|
||||
"default partition and namespace": {
|
||||
EventPayloadCheckServiceNode{
|
||||
Value: &structs.CheckServiceNode{
|
||||
Service: &structs.NodeService{
|
||||
Service: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
"default/default/foo",
|
||||
},
|
||||
"mixed casing": {
|
||||
EventPayloadCheckServiceNode{
|
||||
Value: &structs.CheckServiceNode{
|
||||
Service: &structs.NodeService{
|
||||
Service: "FoO",
|
||||
},
|
||||
},
|
||||
},
|
||||
"default/default/foo",
|
||||
},
|
||||
"override key": {
|
||||
EventPayloadCheckServiceNode{
|
||||
Value: &structs.CheckServiceNode{
|
||||
Service: &structs.NodeService{
|
||||
Service: "foo",
|
||||
},
|
||||
},
|
||||
overrideKey: "bar",
|
||||
},
|
||||
"default/default/bar",
|
||||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
require.Equal(t, tc.sub, tc.evt.Subject().String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceHealthSnapshot(t *testing.T) {
|
||||
store := NewStateStore(nil)
|
||||
|
||||
@ -307,7 +264,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
|
||||
return nil
|
||||
},
|
||||
Mutate: func(s *Store, tx *txn) error {
|
||||
return s.deleteServiceTxn(tx, tx.Index, "node1", "web", nil)
|
||||
return s.deleteServiceTxn(tx, tx.Index, "node1", "web", nil, "")
|
||||
},
|
||||
WantEvents: []stream.Event{
|
||||
// Should only publish deregistration for that service
|
||||
@ -327,7 +284,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
|
||||
return nil
|
||||
},
|
||||
Mutate: func(s *Store, tx *txn) error {
|
||||
return s.deleteNodeTxn(tx, tx.Index, "node1", nil)
|
||||
return s.deleteNodeTxn(tx, tx.Index, "node1", nil, "")
|
||||
},
|
||||
WantEvents: []stream.Event{
|
||||
// Should publish deregistration events for all services
|
||||
@ -380,7 +337,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
|
||||
return s.ensureRegistrationTxn(tx, tx.Index, false, testServiceRegistration(t, "web", regConnectNative), false)
|
||||
},
|
||||
Mutate: func(s *Store, tx *txn) error {
|
||||
return s.deleteServiceTxn(tx, tx.Index, "node1", "web", nil)
|
||||
return s.deleteServiceTxn(tx, tx.Index, "node1", "web", nil, "")
|
||||
},
|
||||
WantEvents: []stream.Event{
|
||||
// We should see both a regular service dereg event and a connect one
|
||||
@ -444,7 +401,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
|
||||
},
|
||||
Mutate: func(s *Store, tx *txn) error {
|
||||
// Delete only the sidecar
|
||||
return s.deleteServiceTxn(tx, tx.Index, "node1", "web_sidecar_proxy", nil)
|
||||
return s.deleteServiceTxn(tx, tx.Index, "node1", "web_sidecar_proxy", nil, "")
|
||||
},
|
||||
WantEvents: []stream.Event{
|
||||
// We should see both a regular service dereg event and a connect one
|
||||
@ -910,7 +867,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
|
||||
},
|
||||
Mutate: func(s *Store, tx *txn) error {
|
||||
// Delete only the node-level check
|
||||
if err := s.deleteCheckTxn(tx, tx.Index, "node1", "serf-health", nil); err != nil {
|
||||
if err := s.deleteCheckTxn(tx, tx.Index, "node1", "serf-health", nil, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -964,11 +921,11 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
|
||||
},
|
||||
Mutate: func(s *Store, tx *txn) error {
|
||||
// Delete the service-level check for the main service
|
||||
if err := s.deleteCheckTxn(tx, tx.Index, "node1", "service:web", nil); err != nil {
|
||||
if err := s.deleteCheckTxn(tx, tx.Index, "node1", "service:web", nil, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
// Also delete for a proxy
|
||||
if err := s.deleteCheckTxn(tx, tx.Index, "node1", "service:web_sidecar_proxy", nil); err != nil {
|
||||
if err := s.deleteCheckTxn(tx, tx.Index, "node1", "service:web_sidecar_proxy", nil, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -1029,10 +986,10 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
|
||||
// In one transaction the operator moves the web service and it's
|
||||
// sidecar from node2 back to node1 and deletes them from node2
|
||||
|
||||
if err := s.deleteServiceTxn(tx, tx.Index, "node2", "web", nil); err != nil {
|
||||
if err := s.deleteServiceTxn(tx, tx.Index, "node2", "web", nil, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.deleteServiceTxn(tx, tx.Index, "node2", "web_sidecar_proxy", nil); err != nil {
|
||||
if err := s.deleteServiceTxn(tx, tx.Index, "node2", "web_sidecar_proxy", nil, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1544,7 +1501,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
|
||||
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
|
||||
},
|
||||
Mutate: func(s *Store, tx *txn) error {
|
||||
return s.deleteServiceTxn(tx, tx.Index, "node1", "srv1", nil)
|
||||
return s.deleteServiceTxn(tx, tx.Index, "node1", "srv1", nil, "")
|
||||
},
|
||||
WantEvents: []stream.Event{
|
||||
testServiceHealthDeregistrationEvent(t, "srv1"),
|
||||
@ -1649,7 +1606,7 @@ func TestServiceHealthEventsFromChanges(t *testing.T) {
|
||||
testServiceRegistration(t, "tgate1", regTerminatingGateway), false)
|
||||
},
|
||||
Mutate: func(s *Store, tx *txn) error {
|
||||
return s.deleteServiceTxn(tx, tx.Index, "node1", "tgate1", structs.DefaultEnterpriseMetaInDefaultPartition())
|
||||
return s.deleteServiceTxn(tx, tx.Index, "node1", "tgate1", structs.DefaultEnterpriseMetaInDefaultPartition(), "")
|
||||
},
|
||||
WantEvents: []stream.Event{
|
||||
testServiceHealthDeregistrationEvent(t,
|
||||
|
@ -15,54 +15,83 @@ import (
|
||||
|
||||
func withEnterpriseSchema(_ *memdb.DBSchema) {}
|
||||
|
||||
func serviceIndexName(name string, _ *acl.EnterpriseMeta) string {
|
||||
return fmt.Sprintf("service.%s", name)
|
||||
func serviceIndexName(name string, _ *acl.EnterpriseMeta, peerName string) string {
|
||||
return peeredIndexEntryName(fmt.Sprintf("service.%s", name), peerName)
|
||||
}
|
||||
|
||||
func serviceKindIndexName(kind structs.ServiceKind, _ *acl.EnterpriseMeta) string {
|
||||
return "service_kind." + kind.Normalized()
|
||||
func serviceKindIndexName(kind structs.ServiceKind, _ *acl.EnterpriseMeta, peerName string) string {
|
||||
base := "service_kind." + kind.Normalized()
|
||||
return peeredIndexEntryName(base, peerName)
|
||||
}
|
||||
|
||||
func catalogUpdateNodesIndexes(tx WriteTxn, idx uint64, entMeta *acl.EnterpriseMeta) error {
|
||||
func catalogUpdateNodesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error {
|
||||
// overall nodes index
|
||||
if err := indexUpdateMaxTxn(tx, idx, tableNodes); err != nil {
|
||||
return fmt.Errorf("failed updating index: %s", err)
|
||||
}
|
||||
|
||||
// peered index
|
||||
if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(tableNodes, peerName)); err != nil {
|
||||
return fmt.Errorf("failed updating partitioned+peered index for nodes table: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func catalogUpdateServicesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta) error {
|
||||
// catalogUpdateServicesIndexes upserts the max index for the entire services table with varying levels
|
||||
// of granularity (no-op if `idx` is lower than what exists for that index key):
|
||||
// - all services
|
||||
// - all services in a specified peer (including internal)
|
||||
func catalogUpdateServicesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error {
|
||||
// overall services index
|
||||
if err := indexUpdateMaxTxn(tx, idx, tableServices); err != nil {
|
||||
return fmt.Errorf("failed updating index: %s", err)
|
||||
return fmt.Errorf("failed updating index for services table: %w", err)
|
||||
}
|
||||
|
||||
// peered services index
|
||||
if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(tableServices, peerName)); err != nil {
|
||||
return fmt.Errorf("failed updating peered index for services table: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func catalogUpdateServiceKindIndexes(tx WriteTxn, kind structs.ServiceKind, idx uint64, _ *acl.EnterpriseMeta) error {
|
||||
// catalogUpdateServiceKindIndexes upserts the max index for the ServiceKind with varying levels
|
||||
// of granularity (no-op if `idx` is lower than what exists for that index key):
|
||||
// - all services of ServiceKind
|
||||
// - all services of ServiceKind in a specified peer (including internal)
|
||||
func catalogUpdateServiceKindIndexes(tx WriteTxn, idx uint64, kind structs.ServiceKind, _ *acl.EnterpriseMeta, peerName string) error {
|
||||
base := "service_kind." + kind.Normalized()
|
||||
// service-kind index
|
||||
if err := indexUpdateMaxTxn(tx, idx, serviceKindIndexName(kind, nil)); err != nil {
|
||||
return fmt.Errorf("failed updating index: %s", err)
|
||||
if err := indexUpdateMaxTxn(tx, idx, base); err != nil {
|
||||
return fmt.Errorf("failed updating index for service kind: %w", err)
|
||||
}
|
||||
|
||||
// peered index
|
||||
if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(base, peerName)); err != nil {
|
||||
return fmt.Errorf("failed updating peered index for service kind: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func catalogUpdateServiceIndexes(tx WriteTxn, serviceName string, idx uint64, _ *acl.EnterpriseMeta) error {
|
||||
func catalogUpdateServiceIndexes(tx WriteTxn, idx uint64, serviceName string, _ *acl.EnterpriseMeta, peerName string) error {
|
||||
// per-service index
|
||||
if err := indexUpdateMaxTxn(tx, idx, serviceIndexName(serviceName, nil)); err != nil {
|
||||
if err := indexUpdateMaxTxn(tx, idx, serviceIndexName(serviceName, nil, peerName)); err != nil {
|
||||
return fmt.Errorf("failed updating index: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func catalogUpdateServiceExtinctionIndex(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta) error {
|
||||
if err := tx.Insert(tableIndex, &IndexEntry{indexServiceExtinction, idx}); err != nil {
|
||||
return fmt.Errorf("failed updating missing service extinction index: %s", err)
|
||||
func catalogUpdateServiceExtinctionIndex(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error {
|
||||
if err := indexUpdateMaxTxn(tx, idx, indexServiceExtinction); err != nil {
|
||||
return fmt.Errorf("failed updating missing service extinction index: %w", err)
|
||||
}
|
||||
// update the peer index
|
||||
if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(indexServiceExtinction, peerName)); err != nil {
|
||||
return fmt.Errorf("failed updating missing service extinction peered index: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -75,14 +104,14 @@ func catalogInsertNode(tx WriteTxn, node *structs.Node) error {
|
||||
return fmt.Errorf("failed inserting node: %s", err)
|
||||
}
|
||||
|
||||
if err := catalogUpdateNodesIndexes(tx, node.ModifyIndex, node.GetEnterpriseMeta()); err != nil {
|
||||
if err := catalogUpdateNodesIndexes(tx, node.ModifyIndex, node.GetEnterpriseMeta(), node.PeerName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the node's service indexes as the node information is included
|
||||
// in health queries and we would otherwise miss node updates in some cases
|
||||
// for those queries.
|
||||
if err := updateAllServiceIndexesOfNode(tx, node.ModifyIndex, node.Node, node.GetEnterpriseMeta()); err != nil {
|
||||
if err := updateAllServiceIndexesOfNode(tx, node.ModifyIndex, node.Node, node.GetEnterpriseMeta(), node.PeerName); err != nil {
|
||||
return fmt.Errorf("failed updating index: %s", err)
|
||||
}
|
||||
|
||||
@ -95,73 +124,95 @@ func catalogInsertService(tx WriteTxn, svc *structs.ServiceNode) error {
|
||||
return fmt.Errorf("failed inserting service: %s", err)
|
||||
}
|
||||
|
||||
if err := catalogUpdateServicesIndexes(tx, svc.ModifyIndex, &svc.EnterpriseMeta); err != nil {
|
||||
if err := catalogUpdateServicesIndexes(tx, svc.ModifyIndex, &svc.EnterpriseMeta, svc.PeerName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := catalogUpdateServiceIndexes(tx, svc.ServiceName, svc.ModifyIndex, &svc.EnterpriseMeta); err != nil {
|
||||
if err := catalogUpdateServiceIndexes(tx, svc.ModifyIndex, svc.ServiceName, &svc.EnterpriseMeta, svc.PeerName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := catalogUpdateServiceKindIndexes(tx, svc.ServiceKind, svc.ModifyIndex, &svc.EnterpriseMeta); err != nil {
|
||||
if err := catalogUpdateServiceKindIndexes(tx, svc.ModifyIndex, svc.ServiceKind, &svc.EnterpriseMeta, svc.PeerName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func catalogNodesMaxIndex(tx ReadTxn, entMeta *acl.EnterpriseMeta) uint64 {
|
||||
return maxIndexTxn(tx, tableNodes)
|
||||
func catalogNodesMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) uint64 {
|
||||
return maxIndexTxn(tx, peeredIndexEntryName(tableNodes, peerName))
|
||||
}
|
||||
|
||||
func catalogServicesMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta) uint64 {
|
||||
return maxIndexTxn(tx, tableServices)
|
||||
func catalogServicesMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) uint64 {
|
||||
return maxIndexTxn(tx, peeredIndexEntryName(tableServices, peerName))
|
||||
}
|
||||
|
||||
func catalogServiceMaxIndex(tx ReadTxn, serviceName string, _ *acl.EnterpriseMeta) (<-chan struct{}, interface{}, error) {
|
||||
return tx.FirstWatch(tableIndex, "id", serviceIndexName(serviceName, nil))
|
||||
func catalogServiceMaxIndex(tx ReadTxn, serviceName string, _ *acl.EnterpriseMeta, peerName string) (<-chan struct{}, interface{}, error) {
|
||||
return tx.FirstWatch(tableIndex, indexID, serviceIndexName(serviceName, nil, peerName))
|
||||
}
|
||||
|
||||
func catalogServiceKindMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *acl.EnterpriseMeta) uint64 {
|
||||
return maxIndexWatchTxn(tx, ws, serviceKindIndexName(kind, nil))
|
||||
func catalogServiceKindMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind, _ *acl.EnterpriseMeta, peerName string) uint64 {
|
||||
return maxIndexWatchTxn(tx, ws, serviceKindIndexName(kind, nil, peerName))
|
||||
}
|
||||
|
||||
func catalogServiceListNoWildcard(tx ReadTxn, _ *acl.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get(tableServices, indexID)
|
||||
}
|
||||
|
||||
func catalogServiceListByNode(tx ReadTxn, node string, _ *acl.EnterpriseMeta, _ bool) (memdb.ResultIterator, error) {
|
||||
return tx.Get(tableServices, indexNode, Query{Value: node})
|
||||
}
|
||||
|
||||
func catalogServiceLastExtinctionIndex(tx ReadTxn, _ *acl.EnterpriseMeta) (interface{}, error) {
|
||||
return tx.First(tableIndex, "id", indexServiceExtinction)
|
||||
}
|
||||
|
||||
func catalogMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, checks bool) uint64 {
|
||||
if checks {
|
||||
return maxIndexTxn(tx, tableNodes, tableServices, tableChecks)
|
||||
func catalogServiceListNoWildcard(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) (memdb.ResultIterator, error) {
|
||||
q := Query{
|
||||
PeerName: peerName,
|
||||
}
|
||||
return maxIndexTxn(tx, tableNodes, tableServices)
|
||||
return tx.Get(tableServices, indexID+"_prefix", q)
|
||||
}
|
||||
|
||||
func catalogMaxIndexWatch(tx ReadTxn, ws memdb.WatchSet, _ *acl.EnterpriseMeta, checks bool) uint64 {
|
||||
func catalogServiceListByNode(tx ReadTxn, node string, _ *acl.EnterpriseMeta, peerName string, _ bool) (memdb.ResultIterator, error) {
|
||||
return tx.Get(tableServices, indexNode, Query{Value: node, PeerName: peerName})
|
||||
}
|
||||
|
||||
func catalogServiceLastExtinctionIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) (interface{}, error) {
|
||||
return tx.First(tableIndex, indexID, peeredIndexEntryName(indexServiceExtinction, peerName))
|
||||
}
|
||||
|
||||
func catalogMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string, checks bool) uint64 {
|
||||
if checks {
|
||||
return maxIndexWatchTxn(tx, ws, tableNodes, tableServices, tableChecks)
|
||||
return maxIndexTxn(tx,
|
||||
peeredIndexEntryName(tableChecks, peerName),
|
||||
peeredIndexEntryName(tableServices, peerName),
|
||||
peeredIndexEntryName(tableNodes, peerName),
|
||||
)
|
||||
}
|
||||
return maxIndexWatchTxn(tx, ws, tableNodes, tableServices)
|
||||
return maxIndexTxn(tx,
|
||||
peeredIndexEntryName(tableServices, peerName),
|
||||
peeredIndexEntryName(tableNodes, peerName),
|
||||
)
|
||||
}
|
||||
|
||||
func catalogUpdateCheckIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta) error {
|
||||
func catalogMaxIndexWatch(tx ReadTxn, ws memdb.WatchSet, _ *acl.EnterpriseMeta, peerName string, checks bool) uint64 {
|
||||
// TODO(peering_indexes): pipe peerName here
|
||||
if checks {
|
||||
return maxIndexWatchTxn(tx, ws,
|
||||
peeredIndexEntryName(tableChecks, peerName),
|
||||
peeredIndexEntryName(tableServices, peerName),
|
||||
peeredIndexEntryName(tableNodes, peerName),
|
||||
)
|
||||
}
|
||||
return maxIndexWatchTxn(tx, ws,
|
||||
peeredIndexEntryName(tableServices, peerName),
|
||||
peeredIndexEntryName(tableNodes, peerName),
|
||||
)
|
||||
}
|
||||
|
||||
func catalogUpdateCheckIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error {
|
||||
// update the universal index entry
|
||||
if err := tx.Insert(tableIndex, &IndexEntry{tableChecks, idx}); err != nil {
|
||||
if err := indexUpdateMaxTxn(tx, idx, tableChecks); err != nil {
|
||||
return fmt.Errorf("failed updating index: %s", err)
|
||||
}
|
||||
|
||||
if err := indexUpdateMaxTxn(tx, idx, peeredIndexEntryName(tableChecks, peerName)); err != nil {
|
||||
return fmt.Errorf("failed updating index: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func catalogChecksMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta) uint64 {
|
||||
return maxIndexTxn(tx, tableChecks)
|
||||
func catalogChecksMaxIndex(tx ReadTxn, _ *acl.EnterpriseMeta, peerName string) uint64 {
|
||||
return maxIndexTxn(tx, peeredIndexEntryName(tableChecks, peerName))
|
||||
}
|
||||
|
||||
func catalogListChecksByNode(tx ReadTxn, q Query) (memdb.ResultIterator, error) {
|
||||
@ -174,7 +225,7 @@ func catalogInsertCheck(tx WriteTxn, chk *structs.HealthCheck, idx uint64) error
|
||||
return fmt.Errorf("failed inserting check: %s", err)
|
||||
}
|
||||
|
||||
if err := catalogUpdateCheckIndexes(tx, idx, &chk.EnterpriseMeta); err != nil {
|
||||
if err := catalogUpdateCheckIndexes(tx, idx, &chk.EnterpriseMeta, chk.PeerName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -207,3 +258,10 @@ func indexFromKindServiceName(arg interface{}) ([]byte, error) {
|
||||
return nil, fmt.Errorf("type must be KindServiceNameQuery or *KindServiceName: %T", arg)
|
||||
}
|
||||
}
|
||||
|
||||
func updateKindServiceNamesIndex(tx WriteTxn, idx uint64, kind structs.ServiceKind, entMeta acl.EnterpriseMeta) error {
|
||||
if err := indexUpdateMaxTxn(tx, idx, kindServiceNameIndexName(kind.Normalized())); err != nil {
|
||||
return fmt.Errorf("failed updating %s table index: %v", tableKindServiceNames, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -19,6 +19,14 @@ func testIndexerTableChecks() map[string]indexerTestCase {
|
||||
CheckID: "CheckID",
|
||||
Status: "PASSING",
|
||||
}
|
||||
objWPeer := &structs.HealthCheck{
|
||||
Node: "NoDe",
|
||||
ServiceID: "SeRvIcE",
|
||||
ServiceName: "ServiceName",
|
||||
CheckID: "CheckID",
|
||||
Status: "PASSING",
|
||||
PeerName: "Peer1",
|
||||
}
|
||||
return map[string]indexerTestCase{
|
||||
indexID: {
|
||||
read: indexValue{
|
||||
@ -26,11 +34,11 @@ func testIndexerTableChecks() map[string]indexerTestCase {
|
||||
Node: "NoDe",
|
||||
CheckID: "CheckId",
|
||||
},
|
||||
expected: []byte("node\x00checkid\x00"),
|
||||
expected: []byte("internal\x00node\x00checkid\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("node\x00checkid\x00"),
|
||||
expected: []byte("internal\x00node\x00checkid\x00"),
|
||||
},
|
||||
prefix: []indexValue{
|
||||
{
|
||||
@ -39,28 +47,75 @@ func testIndexerTableChecks() map[string]indexerTestCase {
|
||||
},
|
||||
{
|
||||
source: Query{Value: "nOdE"},
|
||||
expected: []byte("node\x00"),
|
||||
expected: []byte("internal\x00node\x00"),
|
||||
},
|
||||
},
|
||||
extra: []indexerTestCase{
|
||||
{
|
||||
read: indexValue{
|
||||
source: NodeCheckQuery{
|
||||
Node: "NoDe",
|
||||
CheckID: "CheckId",
|
||||
PeerName: "Peer1",
|
||||
},
|
||||
expected: []byte("peer1\x00node\x00checkid\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: objWPeer,
|
||||
expected: []byte("peer1\x00node\x00checkid\x00"),
|
||||
},
|
||||
prefix: []indexValue{
|
||||
{
|
||||
source: Query{Value: "nOdE",
|
||||
PeerName: "Peer1"},
|
||||
expected: []byte("peer1\x00node\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
indexStatus: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "PASSING"},
|
||||
expected: []byte("passing\x00"),
|
||||
expected: []byte("internal\x00passing\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("passing\x00"),
|
||||
expected: []byte("internal\x00passing\x00"),
|
||||
},
|
||||
extra: []indexerTestCase{
|
||||
{
|
||||
read: indexValue{
|
||||
source: Query{Value: "PASSING", PeerName: "Peer1"},
|
||||
expected: []byte("peer1\x00passing\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: objWPeer,
|
||||
expected: []byte("peer1\x00passing\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
indexService: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "ServiceName"},
|
||||
expected: []byte("servicename\x00"),
|
||||
expected: []byte("internal\x00servicename\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("servicename\x00"),
|
||||
expected: []byte("internal\x00servicename\x00"),
|
||||
},
|
||||
extra: []indexerTestCase{
|
||||
{
|
||||
read: indexValue{
|
||||
source: Query{Value: "ServiceName", PeerName: "Peer1"},
|
||||
expected: []byte("peer1\x00servicename\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: objWPeer,
|
||||
expected: []byte("peer1\x00servicename\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
indexNodeService: {
|
||||
@ -69,11 +124,27 @@ func testIndexerTableChecks() map[string]indexerTestCase {
|
||||
Node: "NoDe",
|
||||
Service: "SeRvIcE",
|
||||
},
|
||||
expected: []byte("node\x00service\x00"),
|
||||
expected: []byte("internal\x00node\x00service\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("node\x00service\x00"),
|
||||
expected: []byte("internal\x00node\x00service\x00"),
|
||||
},
|
||||
extra: []indexerTestCase{
|
||||
{
|
||||
read: indexValue{
|
||||
source: NodeServiceQuery{
|
||||
Node: "NoDe",
|
||||
PeerName: "Peer1",
|
||||
Service: "SeRvIcE",
|
||||
},
|
||||
expected: []byte("peer1\x00node\x00service\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: objWPeer,
|
||||
expected: []byte("peer1\x00node\x00service\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
indexNode: {
|
||||
@ -81,11 +152,26 @@ func testIndexerTableChecks() map[string]indexerTestCase {
|
||||
source: Query{
|
||||
Value: "NoDe",
|
||||
},
|
||||
expected: []byte("node\x00"),
|
||||
expected: []byte("internal\x00node\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("node\x00"),
|
||||
expected: []byte("internal\x00node\x00"),
|
||||
},
|
||||
extra: []indexerTestCase{
|
||||
{
|
||||
read: indexValue{
|
||||
source: Query{
|
||||
Value: "NoDe",
|
||||
PeerName: "Peer1",
|
||||
},
|
||||
expected: []byte("peer1\x00node\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: objWPeer,
|
||||
expected: []byte("peer1\x00node\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -186,11 +272,11 @@ func testIndexerTableNodes() map[string]indexerTestCase {
|
||||
indexID: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "NoDeId"},
|
||||
expected: []byte("nodeid\x00"),
|
||||
expected: []byte("internal\x00nodeid\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: &structs.Node{Node: "NoDeId"},
|
||||
expected: []byte("nodeid\x00"),
|
||||
expected: []byte("internal\x00nodeid\x00"),
|
||||
},
|
||||
prefix: []indexValue{
|
||||
{
|
||||
@ -203,38 +289,90 @@ func testIndexerTableNodes() map[string]indexerTestCase {
|
||||
},
|
||||
{
|
||||
source: Query{Value: "NoDeId"},
|
||||
expected: []byte("nodeid\x00"),
|
||||
expected: []byte("internal\x00nodeid\x00"),
|
||||
},
|
||||
{
|
||||
source: Query{},
|
||||
expected: []byte("internal\x00"),
|
||||
},
|
||||
},
|
||||
extra: []indexerTestCase{
|
||||
{
|
||||
read: indexValue{
|
||||
source: Query{Value: "NoDeId", PeerName: "Peer1"},
|
||||
expected: []byte("peer1\x00nodeid\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: &structs.Node{Node: "NoDeId", PeerName: "Peer1"},
|
||||
expected: []byte("peer1\x00nodeid\x00"),
|
||||
},
|
||||
prefix: []indexValue{
|
||||
{
|
||||
source: Query{PeerName: "Peer1"},
|
||||
expected: []byte("peer1\x00"),
|
||||
},
|
||||
{
|
||||
source: Query{Value: "NoDeId", PeerName: "Peer1"},
|
||||
expected: []byte("peer1\x00nodeid\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
indexUUID: {
|
||||
read: indexValue{
|
||||
source: Query{Value: uuid},
|
||||
expected: uuidBuf,
|
||||
expected: append([]byte("internal\x00"), uuidBuf...),
|
||||
},
|
||||
write: indexValue{
|
||||
source: &structs.Node{
|
||||
ID: types.NodeID(uuid),
|
||||
Node: "NoDeId",
|
||||
},
|
||||
expected: uuidBuf,
|
||||
expected: append([]byte("internal\x00"), uuidBuf...),
|
||||
},
|
||||
prefix: []indexValue{
|
||||
{
|
||||
source: (*acl.EnterpriseMeta)(nil),
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
source: acl.EnterpriseMeta{},
|
||||
expected: nil,
|
||||
},
|
||||
{ // partial length
|
||||
source: Query{Value: uuid[:6]},
|
||||
expected: uuidBuf[:3],
|
||||
expected: append([]byte("internal\x00"), uuidBuf[:3]...),
|
||||
},
|
||||
{ // full length
|
||||
source: Query{Value: uuid},
|
||||
expected: uuidBuf,
|
||||
expected: append([]byte("internal\x00"), uuidBuf...),
|
||||
},
|
||||
{
|
||||
source: Query{},
|
||||
expected: []byte("internal\x00"),
|
||||
},
|
||||
},
|
||||
extra: []indexerTestCase{
|
||||
{
|
||||
read: indexValue{
|
||||
source: Query{Value: uuid, PeerName: "Peer1"},
|
||||
expected: append([]byte("peer1\x00"), uuidBuf...),
|
||||
},
|
||||
write: indexValue{
|
||||
source: &structs.Node{
|
||||
ID: types.NodeID(uuid),
|
||||
PeerName: "Peer1",
|
||||
Node: "NoDeId",
|
||||
},
|
||||
expected: append([]byte("peer1\x00"), uuidBuf...),
|
||||
},
|
||||
prefix: []indexValue{
|
||||
{ // partial length
|
||||
source: Query{Value: uuid[:6], PeerName: "Peer1"},
|
||||
expected: append([]byte("peer1\x00"), uuidBuf[:3]...),
|
||||
},
|
||||
{ // full length
|
||||
source: Query{Value: uuid, PeerName: "Peer1"},
|
||||
expected: append([]byte("peer1\x00"), uuidBuf...),
|
||||
},
|
||||
{
|
||||
source: Query{PeerName: "Peer1"},
|
||||
expected: []byte("peer1\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -244,7 +382,7 @@ func testIndexerTableNodes() map[string]indexerTestCase {
|
||||
Key: "KeY",
|
||||
Value: "VaLuE",
|
||||
},
|
||||
expected: []byte("KeY\x00VaLuE\x00"),
|
||||
expected: []byte("internal\x00KeY\x00VaLuE\x00"),
|
||||
},
|
||||
writeMulti: indexValueMulti{
|
||||
source: &structs.Node{
|
||||
@ -255,8 +393,34 @@ func testIndexerTableNodes() map[string]indexerTestCase {
|
||||
},
|
||||
},
|
||||
expected: [][]byte{
|
||||
[]byte("MaP-kEy-1\x00mAp-VaL-1\x00"),
|
||||
[]byte("mAp-KeY-2\x00MaP-vAl-2\x00"),
|
||||
[]byte("internal\x00MaP-kEy-1\x00mAp-VaL-1\x00"),
|
||||
[]byte("internal\x00mAp-KeY-2\x00MaP-vAl-2\x00"),
|
||||
},
|
||||
},
|
||||
extra: []indexerTestCase{
|
||||
{
|
||||
read: indexValue{
|
||||
source: KeyValueQuery{
|
||||
Key: "KeY",
|
||||
Value: "VaLuE",
|
||||
PeerName: "Peer1",
|
||||
},
|
||||
expected: []byte("peer1\x00KeY\x00VaLuE\x00"),
|
||||
},
|
||||
writeMulti: indexValueMulti{
|
||||
source: &structs.Node{
|
||||
Node: "NoDeId",
|
||||
Meta: map[string]string{
|
||||
"MaP-kEy-1": "mAp-VaL-1",
|
||||
"mAp-KeY-2": "MaP-vAl-2",
|
||||
},
|
||||
PeerName: "Peer1",
|
||||
},
|
||||
expected: [][]byte{
|
||||
[]byte("peer1\x00MaP-kEy-1\x00mAp-VaL-1\x00"),
|
||||
[]byte("peer1\x00mAp-KeY-2\x00MaP-vAl-2\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -271,6 +435,12 @@ func testIndexerTableServices() map[string]indexerTestCase {
|
||||
ServiceID: "SeRviCe",
|
||||
ServiceName: "ServiceName",
|
||||
}
|
||||
objWPeer := &structs.ServiceNode{
|
||||
Node: "NoDeId",
|
||||
ServiceID: "SeRviCe",
|
||||
ServiceName: "ServiceName",
|
||||
PeerName: "Peer1",
|
||||
}
|
||||
|
||||
return map[string]indexerTestCase{
|
||||
indexID: {
|
||||
@ -279,11 +449,11 @@ func testIndexerTableServices() map[string]indexerTestCase {
|
||||
Node: "NoDeId",
|
||||
Service: "SeRvIcE",
|
||||
},
|
||||
expected: []byte("nodeid\x00service\x00"),
|
||||
expected: []byte("internal\x00nodeid\x00service\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("nodeid\x00service\x00"),
|
||||
expected: []byte("internal\x00nodeid\x00service\x00"),
|
||||
},
|
||||
prefix: []indexValue{
|
||||
{
|
||||
@ -294,9 +464,39 @@ func testIndexerTableServices() map[string]indexerTestCase {
|
||||
source: acl.EnterpriseMeta{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
source: Query{},
|
||||
expected: []byte("internal\x00"),
|
||||
},
|
||||
{
|
||||
source: Query{Value: "NoDeId"},
|
||||
expected: []byte("nodeid\x00"),
|
||||
expected: []byte("internal\x00nodeid\x00"),
|
||||
},
|
||||
},
|
||||
extra: []indexerTestCase{
|
||||
{
|
||||
read: indexValue{
|
||||
source: NodeServiceQuery{
|
||||
Node: "NoDeId",
|
||||
PeerName: "Peer1",
|
||||
Service: "SeRvIcE",
|
||||
},
|
||||
expected: []byte("peer1\x00nodeid\x00service\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: objWPeer,
|
||||
expected: []byte("peer1\x00nodeid\x00service\x00"),
|
||||
},
|
||||
prefix: []indexValue{
|
||||
{
|
||||
source: Query{Value: "NoDeId", PeerName: "Peer1"},
|
||||
expected: []byte("peer1\x00nodeid\x00"),
|
||||
},
|
||||
{
|
||||
source: Query{PeerName: "Peer1"},
|
||||
expected: []byte("peer1\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -305,34 +505,61 @@ func testIndexerTableServices() map[string]indexerTestCase {
|
||||
source: Query{
|
||||
Value: "NoDeId",
|
||||
},
|
||||
expected: []byte("nodeid\x00"),
|
||||
expected: []byte("internal\x00nodeid\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("nodeid\x00"),
|
||||
expected: []byte("internal\x00nodeid\x00"),
|
||||
},
|
||||
extra: []indexerTestCase{
|
||||
{
|
||||
read: indexValue{
|
||||
source: Query{
|
||||
Value: "NoDeId",
|
||||
PeerName: "Peer1",
|
||||
},
|
||||
expected: []byte("peer1\x00nodeid\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: objWPeer,
|
||||
expected: []byte("peer1\x00nodeid\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
indexService: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "ServiceName"},
|
||||
expected: []byte("servicename\x00"),
|
||||
expected: []byte("internal\x00servicename\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("servicename\x00"),
|
||||
expected: []byte("internal\x00servicename\x00"),
|
||||
},
|
||||
extra: []indexerTestCase{
|
||||
{
|
||||
read: indexValue{
|
||||
source: Query{Value: "ServiceName", PeerName: "Peer1"},
|
||||
expected: []byte("peer1\x00servicename\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: objWPeer,
|
||||
expected: []byte("peer1\x00servicename\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
indexConnect: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "ConnectName"},
|
||||
expected: []byte("connectname\x00"),
|
||||
expected: []byte("internal\x00connectname\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: &structs.ServiceNode{
|
||||
ServiceName: "ConnectName",
|
||||
ServiceConnect: structs.ServiceConnect{Native: true},
|
||||
},
|
||||
expected: []byte("connectname\x00"),
|
||||
expected: []byte("internal\x00connectname\x00"),
|
||||
},
|
||||
extra: []indexerTestCase{
|
||||
{
|
||||
@ -344,7 +571,20 @@ func testIndexerTableServices() map[string]indexerTestCase {
|
||||
DestinationServiceName: "ConnectName",
|
||||
},
|
||||
},
|
||||
expected: []byte("connectname\x00"),
|
||||
expected: []byte("internal\x00connectname\x00"),
|
||||
},
|
||||
},
|
||||
{
|
||||
write: indexValue{
|
||||
source: &structs.ServiceNode{
|
||||
ServiceName: "ServiceName",
|
||||
ServiceKind: structs.ServiceKindConnectProxy,
|
||||
ServiceProxy: structs.ConnectProxyConfig{
|
||||
DestinationServiceName: "ConnectName",
|
||||
},
|
||||
PeerName: "Peer1",
|
||||
},
|
||||
expected: []byte("peer1\x00connectname\x00"),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -362,18 +602,32 @@ func testIndexerTableServices() map[string]indexerTestCase {
|
||||
expectedIndexMissing: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
read: indexValue{
|
||||
source: Query{Value: "ConnectName", PeerName: "Peer1"},
|
||||
expected: []byte("peer1\x00connectname\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: &structs.ServiceNode{
|
||||
ServiceName: "ConnectName",
|
||||
ServiceConnect: structs.ServiceConnect{Native: true},
|
||||
PeerName: "Peer1",
|
||||
},
|
||||
expected: []byte("peer1\x00connectname\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
indexKind: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "connect-proxy"},
|
||||
expected: []byte("connect-proxy\x00"),
|
||||
expected: []byte("internal\x00connect-proxy\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: &structs.ServiceNode{
|
||||
ServiceKind: structs.ServiceKindConnectProxy,
|
||||
},
|
||||
expected: []byte("connect-proxy\x00"),
|
||||
expected: []byte("internal\x00connect-proxy\x00"),
|
||||
},
|
||||
extra: []indexerTestCase{
|
||||
{
|
||||
@ -382,7 +636,30 @@ func testIndexerTableServices() map[string]indexerTestCase {
|
||||
ServiceName: "ServiceName",
|
||||
ServiceKind: structs.ServiceKindTypical,
|
||||
},
|
||||
expected: []byte("\x00"),
|
||||
expected: []byte("internal\x00\x00"),
|
||||
},
|
||||
},
|
||||
{
|
||||
write: indexValue{
|
||||
source: &structs.ServiceNode{
|
||||
ServiceName: "ServiceName",
|
||||
ServiceKind: structs.ServiceKindTypical,
|
||||
PeerName: "Peer1",
|
||||
},
|
||||
expected: []byte("peer1\x00\x00"),
|
||||
},
|
||||
},
|
||||
{
|
||||
read: indexValue{
|
||||
source: Query{Value: "connect-proxy", PeerName: "Peer1"},
|
||||
expected: []byte("peer1\x00connect-proxy\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: &structs.ServiceNode{
|
||||
ServiceKind: structs.ServiceKindConnectProxy,
|
||||
PeerName: "Peer1",
|
||||
},
|
||||
expected: []byte("peer1\x00connect-proxy\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -440,7 +717,7 @@ func testIndexerTableKindServiceNames() map[string]indexerTestCase {
|
||||
},
|
||||
indexKind: {
|
||||
read: indexValue{
|
||||
source: structs.ServiceKindConnectProxy,
|
||||
source: Query{Value: string(structs.ServiceKindConnectProxy)},
|
||||
expected: []byte("connect-proxy\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
|
@ -48,9 +48,9 @@ func nodesTableSchema() *memdb.TableSchema {
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: indexerSingleWithPrefix{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexFromNode,
|
||||
prefixIndex: prefixIndexFromQueryNoNamespace,
|
||||
readIndex: indexWithPeerName(indexFromQuery),
|
||||
writeIndex: indexWithPeerName(indexFromNode),
|
||||
prefixIndex: prefixIndexFromQueryWithPeer,
|
||||
},
|
||||
},
|
||||
indexUUID: {
|
||||
@ -58,9 +58,9 @@ func nodesTableSchema() *memdb.TableSchema {
|
||||
AllowMissing: true,
|
||||
Unique: true,
|
||||
Indexer: indexerSingleWithPrefix{
|
||||
readIndex: indexFromUUIDQuery,
|
||||
writeIndex: indexIDFromNode,
|
||||
prefixIndex: prefixIndexFromUUIDQuery,
|
||||
readIndex: indexWithPeerName(indexFromUUIDQuery),
|
||||
writeIndex: indexWithPeerName(indexIDFromNode),
|
||||
prefixIndex: prefixIndexFromUUIDWithPeerQuery,
|
||||
},
|
||||
},
|
||||
indexMeta: {
|
||||
@ -68,8 +68,8 @@ func nodesTableSchema() *memdb.TableSchema {
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: indexerMulti{
|
||||
readIndex: indexFromKeyValueQuery,
|
||||
writeIndexMulti: indexMetaFromNode,
|
||||
readIndex: indexWithPeerName(indexFromKeyValueQuery),
|
||||
writeIndexMulti: multiIndexWithPeerName(indexMetaFromNode),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -146,9 +146,9 @@ func servicesTableSchema() *memdb.TableSchema {
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: indexerSingleWithPrefix{
|
||||
readIndex: indexFromNodeServiceQuery,
|
||||
writeIndex: indexFromServiceNode,
|
||||
prefixIndex: prefixIndexFromQuery,
|
||||
readIndex: indexWithPeerName(indexFromNodeServiceQuery),
|
||||
writeIndex: indexWithPeerName(indexFromServiceNode),
|
||||
prefixIndex: prefixIndexFromQueryWithPeer,
|
||||
},
|
||||
},
|
||||
indexNode: {
|
||||
@ -156,8 +156,8 @@ func servicesTableSchema() *memdb.TableSchema {
|
||||
AllowMissing: false,
|
||||
Unique: false,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexFromNodeIdentity,
|
||||
readIndex: indexWithPeerName(indexFromQuery),
|
||||
writeIndex: indexWithPeerName(indexFromNodeIdentity),
|
||||
},
|
||||
},
|
||||
indexService: {
|
||||
@ -165,8 +165,8 @@ func servicesTableSchema() *memdb.TableSchema {
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexServiceNameFromServiceNode,
|
||||
readIndex: indexWithPeerName(indexFromQuery),
|
||||
writeIndex: indexWithPeerName(indexServiceNameFromServiceNode),
|
||||
},
|
||||
},
|
||||
indexConnect: {
|
||||
@ -174,8 +174,8 @@ func servicesTableSchema() *memdb.TableSchema {
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexConnectNameFromServiceNode,
|
||||
readIndex: indexWithPeerName(indexFromQuery),
|
||||
writeIndex: indexWithPeerName(indexConnectNameFromServiceNode),
|
||||
},
|
||||
},
|
||||
indexKind: {
|
||||
@ -183,8 +183,8 @@ func servicesTableSchema() *memdb.TableSchema {
|
||||
AllowMissing: false,
|
||||
Unique: false,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexKindFromServiceNode,
|
||||
readIndex: indexWithPeerName(indexFromQuery),
|
||||
writeIndex: indexWithPeerName(indexKindFromServiceNode),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -295,6 +295,61 @@ func indexKindFromServiceNode(raw interface{}) ([]byte, error) {
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// indexWithPeerName adds peer name to the index.
|
||||
func indexWithPeerName(
|
||||
fn func(interface{}) ([]byte, error),
|
||||
) func(interface{}) ([]byte, error) {
|
||||
return func(raw interface{}) ([]byte, error) {
|
||||
v, err := fn(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n, ok := raw.(peerIndexable)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("type must be peerIndexable: %T", raw)
|
||||
}
|
||||
|
||||
peername := n.PeerOrEmpty()
|
||||
if peername == "" {
|
||||
peername = structs.LocalPeerKeyword
|
||||
}
|
||||
b := newIndexBuilder(len(v) + len(peername) + 1)
|
||||
b.String(strings.ToLower(peername))
|
||||
b.Raw(v)
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
}
|
||||
|
||||
// multiIndexWithPeerName adds peer name to multiple indices, and returns multiple indices.
|
||||
func multiIndexWithPeerName(
|
||||
fn func(interface{}) ([][]byte, error),
|
||||
) func(interface{}) ([][]byte, error) {
|
||||
return func(raw interface{}) ([][]byte, error) {
|
||||
results, err := fn(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n, ok := raw.(peerIndexable)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("type must be peerIndexable: %T", raw)
|
||||
}
|
||||
|
||||
peername := n.PeerOrEmpty()
|
||||
if peername == "" {
|
||||
peername = structs.LocalPeerKeyword
|
||||
}
|
||||
for i, v := range results {
|
||||
b := newIndexBuilder(len(v) + len(peername) + 1)
|
||||
b.String(strings.ToLower(peername))
|
||||
b.Raw(v)
|
||||
results[i] = b.Bytes()
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
}
|
||||
|
||||
// checksTableSchema returns a new table schema used for storing and indexing
|
||||
// health check information. Health checks have a number of different attributes
|
||||
// we want to filter by, so this table is a bit more complex.
|
||||
@ -307,9 +362,9 @@ func checksTableSchema() *memdb.TableSchema {
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: indexerSingleWithPrefix{
|
||||
readIndex: indexFromNodeCheckQuery,
|
||||
writeIndex: indexFromHealthCheck,
|
||||
prefixIndex: prefixIndexFromQuery,
|
||||
readIndex: indexWithPeerName(indexFromNodeCheckQuery),
|
||||
writeIndex: indexWithPeerName(indexFromHealthCheck),
|
||||
prefixIndex: prefixIndexFromQueryWithPeer,
|
||||
},
|
||||
},
|
||||
indexStatus: {
|
||||
@ -317,8 +372,8 @@ func checksTableSchema() *memdb.TableSchema {
|
||||
AllowMissing: false,
|
||||
Unique: false,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexStatusFromHealthCheck,
|
||||
readIndex: indexWithPeerName(indexFromQuery),
|
||||
writeIndex: indexWithPeerName(indexStatusFromHealthCheck),
|
||||
},
|
||||
},
|
||||
indexService: {
|
||||
@ -326,8 +381,8 @@ func checksTableSchema() *memdb.TableSchema {
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexServiceNameFromHealthCheck,
|
||||
readIndex: indexWithPeerName(indexFromQuery),
|
||||
writeIndex: indexWithPeerName(indexServiceNameFromHealthCheck),
|
||||
},
|
||||
},
|
||||
indexNode: {
|
||||
@ -335,8 +390,8 @@ func checksTableSchema() *memdb.TableSchema {
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexFromNodeIdentity,
|
||||
readIndex: indexWithPeerName(indexFromQuery),
|
||||
writeIndex: indexWithPeerName(indexFromNodeIdentity),
|
||||
},
|
||||
},
|
||||
indexNodeService: {
|
||||
@ -344,8 +399,8 @@ func checksTableSchema() *memdb.TableSchema {
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromNodeServiceQuery,
|
||||
writeIndex: indexNodeServiceFromHealthCheck,
|
||||
readIndex: indexWithPeerName(indexFromNodeServiceQuery),
|
||||
writeIndex: indexWithPeerName(indexNodeServiceFromHealthCheck),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -588,11 +643,20 @@ type upstreamDownstream struct {
|
||||
|
||||
// NodeCheckQuery is used to query the ID index of the checks table.
|
||||
type NodeCheckQuery struct {
|
||||
Node string
|
||||
CheckID string
|
||||
Node string
|
||||
CheckID string
|
||||
PeerName string
|
||||
acl.EnterpriseMeta
|
||||
}
|
||||
|
||||
type peerIndexable interface {
|
||||
PeerOrEmpty() string
|
||||
}
|
||||
|
||||
func (q NodeCheckQuery) PeerOrEmpty() string {
|
||||
return q.PeerName
|
||||
}
|
||||
|
||||
// NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer
|
||||
// receiver for this method. Remove once that is fixed.
|
||||
func (q NodeCheckQuery) NamespaceOrDefault() string {
|
||||
@ -680,7 +744,16 @@ type KindServiceName struct {
|
||||
structs.RaftIndex
|
||||
}
|
||||
|
||||
func (n *KindServiceName) PartitionOrDefault() string {
|
||||
return n.Service.PartitionOrDefault()
|
||||
}
|
||||
|
||||
func (n *KindServiceName) NamespaceOrDefault() string {
|
||||
return n.Service.NamespaceOrDefault()
|
||||
}
|
||||
|
||||
func kindServiceNameTableSchema() *memdb.TableSchema {
|
||||
// TODO(peering): make this peer-aware
|
||||
return &memdb.TableSchema{
|
||||
Name: tableKindServiceNames,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
@ -693,8 +766,8 @@ func kindServiceNameTableSchema() *memdb.TableSchema {
|
||||
writeIndex: indexFromKindServiceName,
|
||||
},
|
||||
},
|
||||
indexKindOnly: {
|
||||
Name: indexKindOnly,
|
||||
indexKind: {
|
||||
Name: indexKind,
|
||||
AllowMissing: false,
|
||||
Unique: false,
|
||||
Indexer: indexerSingle{
|
||||
@ -732,20 +805,20 @@ func indexFromKindServiceNameKindOnly(raw interface{}) ([]byte, error) {
|
||||
b.String(strings.ToLower(string(x.Kind)))
|
||||
return b.Bytes(), nil
|
||||
|
||||
case structs.ServiceKind:
|
||||
case Query:
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(string(x)))
|
||||
b.String(strings.ToLower(x.Value))
|
||||
return b.Bytes(), nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("type must be *KindServiceName or structs.ServiceKind: %T", raw)
|
||||
return nil, fmt.Errorf("type must be *KindServiceName or Query: %T", raw)
|
||||
}
|
||||
}
|
||||
|
||||
func kindServiceNamesMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind) uint64 {
|
||||
func kindServiceNamesMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind string) uint64 {
|
||||
return maxIndexWatchTxn(tx, ws, kindServiceNameIndexName(kind))
|
||||
}
|
||||
|
||||
func kindServiceNameIndexName(kind structs.ServiceKind) string {
|
||||
return "kind_service_names." + kind.Normalized()
|
||||
func kindServiceNameIndexName(kind string) string {
|
||||
return "kind_service_names." + kind
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -4,6 +4,7 @@ import (
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/stream"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/proto/pbsubscribe"
|
||||
)
|
||||
|
||||
// EventTopicCARoots is the streaming topic to which events will be published
|
||||
@ -12,13 +13,7 @@ import (
|
||||
//
|
||||
// Note: topics are ordinarily defined in subscribe.proto, but this one isn't
|
||||
// currently available via the Subscribe endpoint.
|
||||
const EventTopicCARoots stringer = "CARoots"
|
||||
|
||||
// stringer is a convenience type to turn a regular string into a fmt.Stringer
|
||||
// so that it can be used as a stream.Topic or stream.Subject.
|
||||
type stringer string
|
||||
|
||||
func (s stringer) String() string { return string(s) }
|
||||
const EventTopicCARoots stream.StringTopic = "CARoots"
|
||||
|
||||
type EventPayloadCARoots struct {
|
||||
CARoots structs.CARoots
|
||||
@ -35,6 +30,10 @@ func (e EventPayloadCARoots) HasReadPermission(authz acl.Authorizer) bool {
|
||||
return authz.ServiceWriteAny(&authzContext) == acl.Allow
|
||||
}
|
||||
|
||||
func (e EventPayloadCARoots) ToSubscriptionEvent(idx uint64) *pbsubscribe.Event {
|
||||
panic("EventPayloadCARoots does not implement ToSubscriptionEvent")
|
||||
}
|
||||
|
||||
// caRootsChangeEvents returns an event on EventTopicCARoots whenever the list
|
||||
// of active CA Roots changes.
|
||||
func caRootsChangeEvents(tx ReadTxn, changes Changes) ([]stream.Event, error) {
|
||||
|
@ -181,7 +181,7 @@ func TestStateStore_Coordinate_Cleanup(t *testing.T) {
|
||||
require.Equal(t, expected, coords)
|
||||
|
||||
// Now delete the node.
|
||||
require.NoError(t, s.DeleteNode(3, "node1", nil))
|
||||
require.NoError(t, s.DeleteNode(3, "node1", nil, ""))
|
||||
|
||||
// Make sure the coordinate is gone.
|
||||
_, coords, err = s.Coordinate(nil, "node1", nil)
|
||||
|
@ -997,8 +997,9 @@ func (s *Store) intentionTopologyTxn(tx ReadTxn, ws memdb.WatchSet,
|
||||
|
||||
// TODO(tproxy): One remaining improvement is that this includes non-Connect services (typical services without a proxy)
|
||||
// Ideally those should be excluded as well, since they can't be upstreams/downstreams without a proxy.
|
||||
// Maybe narrow serviceNamesOfKindTxn to services represented by proxies? (ingress, sidecar-proxy, terminating)
|
||||
index, services, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindTypical)
|
||||
// Maybe narrow serviceNamesOfKindTxn to services represented by proxies? (ingress, sidecar-
|
||||
wildcardMeta := structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier)
|
||||
index, services, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindTypical, *wildcardMeta)
|
||||
if err != nil {
|
||||
return index, nil, fmt.Errorf("failed to list ingress service names: %v", err)
|
||||
}
|
||||
@ -1008,7 +1009,7 @@ func (s *Store) intentionTopologyTxn(tx ReadTxn, ws memdb.WatchSet,
|
||||
|
||||
if downstreams {
|
||||
// Ingress gateways can only ever be downstreams, since mesh services don't dial them.
|
||||
index, ingress, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindIngressGateway)
|
||||
index, ingress, err := serviceNamesOfKindTxn(tx, ws, structs.ServiceKindIngressGateway, *wildcardMeta)
|
||||
if err != nil {
|
||||
return index, nil, fmt.Errorf("failed to list ingress service names: %v", err)
|
||||
}
|
||||
|
486
agent/consul/state/peering.go
Normal file
486
agent/consul/state/peering.go
Normal file
@ -0,0 +1,486 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
)
|
||||
|
||||
const (
|
||||
tablePeering = "peering"
|
||||
tablePeeringTrustBundles = "peering-trust-bundles"
|
||||
)
|
||||
|
||||
func peeringTableSchema() *memdb.TableSchema {
|
||||
return &memdb.TableSchema{
|
||||
Name: tablePeering,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
indexID: {
|
||||
Name: indexID,
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: readIndex(indexFromUUIDString),
|
||||
writeIndex: writeIndex(indexIDFromPeering),
|
||||
},
|
||||
},
|
||||
indexName: {
|
||||
Name: indexName,
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: indexerSingleWithPrefix{
|
||||
readIndex: indexPeeringFromQuery,
|
||||
writeIndex: indexFromPeering,
|
||||
prefixIndex: prefixIndexFromQueryNoNamespace,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func peeringTrustBundlesTableSchema() *memdb.TableSchema {
|
||||
return &memdb.TableSchema{
|
||||
Name: tablePeeringTrustBundles,
|
||||
Indexes: map[string]*memdb.IndexSchema{
|
||||
indexID: {
|
||||
Name: indexID,
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexPeeringFromQuery, // same as peering table since we'll use the query.Value
|
||||
writeIndex: indexFromPeeringTrustBundle,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func indexIDFromPeering(raw interface{}) ([]byte, error) {
|
||||
p, ok := raw.(*pbpeering.Peering)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for pbpeering.Peering index", raw)
|
||||
}
|
||||
|
||||
if p.ID == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
uuid, err := uuidStringToBytes(p.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var b indexBuilder
|
||||
b.Raw(uuid)
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (s *Store) PeeringReadByID(ws memdb.WatchSet, id string) (uint64, *pbpeering.Peering, error) {
|
||||
tx := s.db.ReadTxn()
|
||||
defer tx.Abort()
|
||||
|
||||
peering, err := peeringReadByIDTxn(ws, tx, id)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to read peering by id: %w", err)
|
||||
}
|
||||
if peering == nil {
|
||||
// Return the tables index so caller can watch it for changes if the peering doesn't exist
|
||||
return maxIndexWatchTxn(tx, ws, tablePeering), nil, nil
|
||||
}
|
||||
|
||||
return peering.ModifyIndex, peering, nil
|
||||
}
|
||||
|
||||
func (s *Store) PeeringRead(ws memdb.WatchSet, q Query) (uint64, *pbpeering.Peering, error) {
|
||||
tx := s.db.ReadTxn()
|
||||
defer tx.Abort()
|
||||
|
||||
watchCh, peeringRaw, err := tx.FirstWatch(tablePeering, indexName, q)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed peering lookup: %w", err)
|
||||
}
|
||||
|
||||
peering, ok := peeringRaw.(*pbpeering.Peering)
|
||||
if peering != nil && !ok {
|
||||
return 0, nil, fmt.Errorf("invalid type %T", peering)
|
||||
}
|
||||
ws.Add(watchCh)
|
||||
|
||||
if peering == nil {
|
||||
// Return the tables index so caller can watch it for changes if the peering doesn't exist
|
||||
return maxIndexWatchTxn(tx, ws, partitionedIndexEntryName(tablePeering, q.PartitionOrDefault())), nil, nil
|
||||
}
|
||||
return peering.ModifyIndex, peering, nil
|
||||
}
|
||||
|
||||
func peeringReadByIDTxn(ws memdb.WatchSet, tx ReadTxn, id string) (*pbpeering.Peering, error) {
|
||||
watchCh, peeringRaw, err := tx.FirstWatch(tablePeering, indexID, id)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed peering lookup: %w", err)
|
||||
}
|
||||
ws.Add(watchCh)
|
||||
|
||||
peering, ok := peeringRaw.(*pbpeering.Peering)
|
||||
if peering != nil && !ok {
|
||||
return nil, fmt.Errorf("invalid type %T", peering)
|
||||
}
|
||||
return peering, nil
|
||||
}
|
||||
|
||||
func (s *Store) PeeringList(ws memdb.WatchSet, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.Peering, error) {
|
||||
tx := s.db.ReadTxn()
|
||||
defer tx.Abort()
|
||||
|
||||
var (
|
||||
iter memdb.ResultIterator
|
||||
err error
|
||||
idx uint64
|
||||
)
|
||||
if entMeta.PartitionOrDefault() == structs.WildcardSpecifier {
|
||||
iter, err = tx.Get(tablePeering, indexID)
|
||||
idx = maxIndexWatchTxn(tx, ws, tablePeering)
|
||||
} else {
|
||||
iter, err = tx.Get(tablePeering, indexName+"_prefix", entMeta)
|
||||
idx = maxIndexWatchTxn(tx, ws, partitionedIndexEntryName(tablePeering, entMeta.PartitionOrDefault()))
|
||||
}
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed peering lookup: %v", err)
|
||||
}
|
||||
|
||||
var result []*pbpeering.Peering
|
||||
for entry := iter.Next(); entry != nil; entry = iter.Next() {
|
||||
result = append(result, entry.(*pbpeering.Peering))
|
||||
}
|
||||
|
||||
return idx, result, nil
|
||||
}
|
||||
|
||||
func generatePeeringUUID(tx ReadTxn) (string, error) {
|
||||
for {
|
||||
uuid, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate UUID: %w", err)
|
||||
}
|
||||
existing, err := peeringReadByIDTxn(nil, tx, uuid)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read peering: %w", err)
|
||||
}
|
||||
if existing == nil {
|
||||
return uuid, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Store) PeeringWrite(idx uint64, p *pbpeering.Peering) error {
|
||||
tx := s.db.WriteTxn(idx)
|
||||
defer tx.Abort()
|
||||
|
||||
q := Query{
|
||||
Value: p.Name,
|
||||
EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(p.Partition),
|
||||
}
|
||||
existingRaw, err := tx.First(tablePeering, indexName, q)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed peering lookup: %w", err)
|
||||
}
|
||||
|
||||
existing, ok := existingRaw.(*pbpeering.Peering)
|
||||
if existingRaw != nil && !ok {
|
||||
return fmt.Errorf("invalid type %T", existingRaw)
|
||||
}
|
||||
|
||||
if existing != nil {
|
||||
p.CreateIndex = existing.CreateIndex
|
||||
p.ID = existing.ID
|
||||
|
||||
} else {
|
||||
// TODO(peering): consider keeping PeeringState enum elsewhere?
|
||||
p.State = pbpeering.PeeringState_INITIAL
|
||||
p.CreateIndex = idx
|
||||
|
||||
p.ID, err = generatePeeringUUID(tx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate peering id: %w", err)
|
||||
}
|
||||
}
|
||||
p.ModifyIndex = idx
|
||||
|
||||
if err := tx.Insert(tablePeering, p); err != nil {
|
||||
return fmt.Errorf("failed inserting peering: %w", err)
|
||||
}
|
||||
|
||||
if err := updatePeeringTableIndexes(tx, idx, p.PartitionOrDefault()); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// TODO(peering): replace with deferred deletion since this operation
|
||||
// should involve cleanup of data associated with the peering.
|
||||
func (s *Store) PeeringDelete(idx uint64, q Query) error {
|
||||
tx := s.db.WriteTxn(idx)
|
||||
defer tx.Abort()
|
||||
|
||||
existing, err := tx.First(tablePeering, indexName, q)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed peering lookup: %v", err)
|
||||
}
|
||||
|
||||
if existing == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := tx.Delete(tablePeering, existing); err != nil {
|
||||
return fmt.Errorf("failed deleting peering: %v", err)
|
||||
}
|
||||
|
||||
if err := updatePeeringTableIndexes(tx, idx, q.PartitionOrDefault()); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (s *Store) PeeringTerminateByID(idx uint64, id string) error {
|
||||
tx := s.db.WriteTxn(idx)
|
||||
defer tx.Abort()
|
||||
|
||||
existing, err := peeringReadByIDTxn(nil, tx, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read peering %q: %w", id, err)
|
||||
}
|
||||
if existing == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
c := proto.Clone(existing)
|
||||
clone, ok := c.(*pbpeering.Peering)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid type %T, expected *pbpeering.Peering", existing)
|
||||
}
|
||||
|
||||
clone.State = pbpeering.PeeringState_TERMINATED
|
||||
clone.ModifyIndex = idx
|
||||
|
||||
if err := tx.Insert(tablePeering, clone); err != nil {
|
||||
return fmt.Errorf("failed inserting peering: %w", err)
|
||||
}
|
||||
|
||||
if err := updatePeeringTableIndexes(tx, idx, clone.PartitionOrDefault()); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// ExportedServicesForPeer returns the list of typical and proxy services exported to a peer.
|
||||
// TODO(peering): What to do about terminating gateways? Sometimes terminating gateways are the appropriate destination
|
||||
// to dial for an upstream mesh service. However, that information is handled by observing the terminating gateway's
|
||||
// config entry, which we wouldn't want to replicate. How would client peers know to route through terminating gateways
|
||||
// when they're not dialing through a remote mesh gateway?
|
||||
func (s *Store) ExportedServicesForPeer(ws memdb.WatchSet, peerID string) (uint64, []structs.ServiceName, error) {
|
||||
tx := s.db.ReadTxn()
|
||||
defer tx.Abort()
|
||||
|
||||
peering, err := peeringReadByIDTxn(ws, tx, peerID)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to read peering: %w", err)
|
||||
}
|
||||
if peering == nil {
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
maxIdx := peering.ModifyIndex
|
||||
|
||||
entMeta := structs.NodeEnterpriseMetaInPartition(peering.Partition)
|
||||
idx, raw, err := configEntryTxn(tx, ws, structs.ExportedServices, entMeta.PartitionOrDefault(), entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to fetch exported-services config entry: %w", err)
|
||||
}
|
||||
if idx > maxIdx {
|
||||
maxIdx = idx
|
||||
}
|
||||
if raw == nil {
|
||||
return maxIdx, nil, nil
|
||||
}
|
||||
conf, ok := raw.(*structs.ExportedServicesConfigEntry)
|
||||
if !ok {
|
||||
return 0, nil, fmt.Errorf("expected type *structs.ExportedServicesConfigEntry, got %T", raw)
|
||||
}
|
||||
|
||||
set := make(map[structs.ServiceName]struct{})
|
||||
|
||||
for _, svc := range conf.Services {
|
||||
svcMeta := acl.NewEnterpriseMetaWithPartition(entMeta.PartitionOrDefault(), svc.Namespace)
|
||||
|
||||
sawPeer := false
|
||||
for _, consumer := range svc.Consumers {
|
||||
name := structs.NewServiceName(svc.Name, &svcMeta)
|
||||
|
||||
if _, ok := set[name]; ok {
|
||||
// Service was covered by a wildcard that was already accounted for
|
||||
continue
|
||||
}
|
||||
if consumer.PeerName != peering.Name {
|
||||
continue
|
||||
}
|
||||
sawPeer = true
|
||||
|
||||
if svc.Name != structs.WildcardSpecifier {
|
||||
set[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// If the target peer is a consumer, and all services in the namespace are exported, query those service names.
|
||||
if sawPeer && svc.Name == structs.WildcardSpecifier {
|
||||
var typicalServices []*KindServiceName
|
||||
idx, typicalServices, err = serviceNamesOfKindTxn(tx, ws, structs.ServiceKindTypical, svcMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to get service names: %w", err)
|
||||
}
|
||||
if idx > maxIdx {
|
||||
maxIdx = idx
|
||||
}
|
||||
for _, s := range typicalServices {
|
||||
set[s.Service] = struct{}{}
|
||||
}
|
||||
|
||||
var proxyServices []*KindServiceName
|
||||
idx, proxyServices, err = serviceNamesOfKindTxn(tx, ws, structs.ServiceKindConnectProxy, svcMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to get service names: %w", err)
|
||||
}
|
||||
if idx > maxIdx {
|
||||
maxIdx = idx
|
||||
}
|
||||
for _, s := range proxyServices {
|
||||
set[s.Service] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var resp []structs.ServiceName
|
||||
for svc := range set {
|
||||
resp = append(resp, svc)
|
||||
}
|
||||
return maxIdx, resp, nil
|
||||
}
|
||||
|
||||
func (s *Store) PeeringTrustBundleRead(ws memdb.WatchSet, q Query) (uint64, *pbpeering.PeeringTrustBundle, error) {
|
||||
tx := s.db.ReadTxn()
|
||||
defer tx.Abort()
|
||||
|
||||
watchCh, ptbRaw, err := tx.FirstWatch(tablePeeringTrustBundles, indexID, q)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed peering trust bundle lookup: %w", err)
|
||||
}
|
||||
|
||||
ptb, ok := ptbRaw.(*pbpeering.PeeringTrustBundle)
|
||||
if ptb != nil && !ok {
|
||||
return 0, nil, fmt.Errorf("invalid type %T", ptb)
|
||||
}
|
||||
ws.Add(watchCh)
|
||||
|
||||
if ptb == nil {
|
||||
// Return the tables index so caller can watch it for changes if the trust bundle doesn't exist
|
||||
return maxIndexWatchTxn(tx, ws, partitionedIndexEntryName(tablePeeringTrustBundles, q.PartitionOrDefault())), nil, nil
|
||||
}
|
||||
return ptb.ModifyIndex, ptb, nil
|
||||
}
|
||||
|
||||
// PeeringTrustBundleWrite writes ptb to the state store. If there is an existing trust bundle with the given peer name,
|
||||
// it will be overwritten.
|
||||
func (s *Store) PeeringTrustBundleWrite(idx uint64, ptb *pbpeering.PeeringTrustBundle) error {
|
||||
tx := s.db.WriteTxn(idx)
|
||||
defer tx.Abort()
|
||||
|
||||
q := Query{
|
||||
Value: ptb.PeerName,
|
||||
EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(ptb.Partition),
|
||||
}
|
||||
existingRaw, err := tx.First(tablePeeringTrustBundles, indexID, q)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed peering trust bundle lookup: %w", err)
|
||||
}
|
||||
|
||||
existing, ok := existingRaw.(*pbpeering.PeeringTrustBundle)
|
||||
if existingRaw != nil && !ok {
|
||||
return fmt.Errorf("invalid type %T", existingRaw)
|
||||
}
|
||||
|
||||
if existing != nil {
|
||||
ptb.CreateIndex = existing.CreateIndex
|
||||
|
||||
} else {
|
||||
ptb.CreateIndex = idx
|
||||
}
|
||||
|
||||
ptb.ModifyIndex = idx
|
||||
|
||||
if err := tx.Insert(tablePeeringTrustBundles, ptb); err != nil {
|
||||
return fmt.Errorf("failed inserting peering trust bundle: %w", err)
|
||||
}
|
||||
|
||||
if err := updatePeeringTrustBundlesTableIndexes(tx, idx, ptb.PartitionOrDefault()); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (s *Store) PeeringTrustBundleDelete(idx uint64, q Query) error {
|
||||
tx := s.db.WriteTxn(idx)
|
||||
defer tx.Abort()
|
||||
|
||||
existing, err := tx.First(tablePeeringTrustBundles, indexID, q)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed peering trust bundle lookup: %v", err)
|
||||
}
|
||||
|
||||
if existing == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := tx.Delete(tablePeeringTrustBundles, existing); err != nil {
|
||||
return fmt.Errorf("failed deleting peering trust bundle: %v", err)
|
||||
}
|
||||
|
||||
if err := updatePeeringTrustBundlesTableIndexes(tx, idx, q.PartitionOrDefault()); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (s *Snapshot) Peerings() (memdb.ResultIterator, error) {
|
||||
return s.tx.Get(tablePeering, indexName)
|
||||
}
|
||||
|
||||
func (s *Snapshot) PeeringTrustBundles() (memdb.ResultIterator, error) {
|
||||
return s.tx.Get(tablePeeringTrustBundles, indexID)
|
||||
}
|
||||
|
||||
func (r *Restore) Peering(p *pbpeering.Peering) error {
|
||||
if err := r.tx.Insert(tablePeering, p); err != nil {
|
||||
return fmt.Errorf("failed restoring peering: %w", err)
|
||||
}
|
||||
|
||||
if err := updatePeeringTableIndexes(r.tx, p.ModifyIndex, p.PartitionOrDefault()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Restore) PeeringTrustBundle(ptb *pbpeering.PeeringTrustBundle) error {
|
||||
if err := r.tx.Insert(tablePeeringTrustBundles, ptb); err != nil {
|
||||
return fmt.Errorf("failed restoring peering trust bundle: %w", err)
|
||||
}
|
||||
|
||||
if err := updatePeeringTrustBundlesTableIndexes(r.tx, ptb.ModifyIndex, ptb.PartitionOrDefault()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
66
agent/consul/state/peering_oss.go
Normal file
66
agent/consul/state/peering_oss.go
Normal file
@ -0,0 +1,66 @@
|
||||
//go:build !consulent
|
||||
// +build !consulent
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
)
|
||||
|
||||
func indexPeeringFromQuery(raw interface{}) ([]byte, error) {
|
||||
q, ok := raw.(Query)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for Query index", raw)
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(q.Value))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func indexFromPeering(raw interface{}) ([]byte, error) {
|
||||
p, ok := raw.(*pbpeering.Peering)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.Peering index", raw)
|
||||
}
|
||||
|
||||
if p.Name == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(p.Name))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func indexFromPeeringTrustBundle(raw interface{}) ([]byte, error) {
|
||||
ptb, ok := raw.(*pbpeering.PeeringTrustBundle)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for pbpeering.PeeringTrustBundle index", raw)
|
||||
}
|
||||
|
||||
if ptb.PeerName == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(ptb.PeerName))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func updatePeeringTableIndexes(tx WriteTxn, idx uint64, _ string) error {
|
||||
if err := tx.Insert(tableIndex, &IndexEntry{Key: tablePeering, Value: idx}); err != nil {
|
||||
return fmt.Errorf("failed updating table index: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updatePeeringTrustBundlesTableIndexes(tx WriteTxn, idx uint64, _ string) error {
|
||||
if err := tx.Insert(tableIndex, &IndexEntry{Key: tablePeeringTrustBundles, Value: idx}); err != nil {
|
||||
return fmt.Errorf("failed updating table index: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
811
agent/consul/state/peering_test.go
Normal file
811
agent/consul/state/peering_test.go
Normal file
@ -0,0 +1,811 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/proto/pbpeering"
|
||||
)
|
||||
|
||||
func insertTestPeerings(t *testing.T, s *Store) {
|
||||
t.Helper()
|
||||
|
||||
tx := s.db.WriteTxn(0)
|
||||
defer tx.Abort()
|
||||
|
||||
err := tx.Insert(tablePeering, &pbpeering.Peering{
|
||||
Name: "foo",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
|
||||
State: pbpeering.PeeringState_INITIAL,
|
||||
CreateIndex: 1,
|
||||
ModifyIndex: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = tx.Insert(tablePeering, &pbpeering.Peering{
|
||||
Name: "bar",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
ID: "5ebcff30-5509-4858-8142-a8e580f1863f",
|
||||
State: pbpeering.PeeringState_FAILING,
|
||||
CreateIndex: 2,
|
||||
ModifyIndex: 2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = tx.Insert(tableIndex, &IndexEntry{
|
||||
Key: tablePeering,
|
||||
Value: 2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, tx.Commit())
|
||||
}
|
||||
|
||||
func insertTestPeeringTrustBundles(t *testing.T, s *Store) {
|
||||
t.Helper()
|
||||
|
||||
tx := s.db.WriteTxn(0)
|
||||
defer tx.Abort()
|
||||
|
||||
err := tx.Insert(tablePeeringTrustBundles, &pbpeering.PeeringTrustBundle{
|
||||
TrustDomain: "foo.com",
|
||||
PeerName: "foo",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
RootPEMs: []string{"foo certificate bundle"},
|
||||
CreateIndex: 1,
|
||||
ModifyIndex: 1,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = tx.Insert(tablePeeringTrustBundles, &pbpeering.PeeringTrustBundle{
|
||||
TrustDomain: "bar.com",
|
||||
PeerName: "bar",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
RootPEMs: []string{"bar certificate bundle"},
|
||||
CreateIndex: 2,
|
||||
ModifyIndex: 2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = tx.Insert(tableIndex, &IndexEntry{
|
||||
Key: tablePeeringTrustBundles,
|
||||
Value: 2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, tx.Commit())
|
||||
}
|
||||
|
||||
func TestStateStore_PeeringReadByID(t *testing.T) {
|
||||
s := NewStateStore(nil)
|
||||
insertTestPeerings(t, s)
|
||||
|
||||
type testcase struct {
|
||||
name string
|
||||
id string
|
||||
expect *pbpeering.Peering
|
||||
}
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
_, peering, err := s.PeeringReadByID(nil, tc.id)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expect, peering)
|
||||
}
|
||||
tcs := []testcase{
|
||||
{
|
||||
name: "get foo",
|
||||
id: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
|
||||
expect: &pbpeering.Peering{
|
||||
Name: "foo",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
|
||||
State: pbpeering.PeeringState_INITIAL,
|
||||
CreateIndex: 1,
|
||||
ModifyIndex: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get bar",
|
||||
id: "5ebcff30-5509-4858-8142-a8e580f1863f",
|
||||
expect: &pbpeering.Peering{
|
||||
Name: "bar",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
ID: "5ebcff30-5509-4858-8142-a8e580f1863f",
|
||||
State: pbpeering.PeeringState_FAILING,
|
||||
CreateIndex: 2,
|
||||
ModifyIndex: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get non-existent",
|
||||
id: "05f54e2f-7813-4d4d-ba03-534554c88a18",
|
||||
expect: nil,
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateStore_PeeringRead(t *testing.T) {
|
||||
s := NewStateStore(nil)
|
||||
insertTestPeerings(t, s)
|
||||
|
||||
type testcase struct {
|
||||
name string
|
||||
query Query
|
||||
expect *pbpeering.Peering
|
||||
}
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
_, peering, err := s.PeeringRead(nil, tc.query)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expect, peering)
|
||||
}
|
||||
tcs := []testcase{
|
||||
{
|
||||
name: "get foo",
|
||||
query: Query{
|
||||
Value: "foo",
|
||||
},
|
||||
expect: &pbpeering.Peering{
|
||||
Name: "foo",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
|
||||
State: pbpeering.PeeringState_INITIAL,
|
||||
CreateIndex: 1,
|
||||
ModifyIndex: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get non-existent baz",
|
||||
query: Query{
|
||||
Value: "baz",
|
||||
},
|
||||
expect: nil,
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_Peering_Watch(t *testing.T) {
|
||||
s := NewStateStore(nil)
|
||||
|
||||
var lastIdx uint64
|
||||
lastIdx++
|
||||
|
||||
// set up initial write
|
||||
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
|
||||
Name: "foo",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
newWatch := func(t *testing.T, q Query) memdb.WatchSet {
|
||||
t.Helper()
|
||||
// set up a watch
|
||||
ws := memdb.NewWatchSet()
|
||||
|
||||
_, _, err := s.PeeringRead(ws, q)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ws
|
||||
}
|
||||
|
||||
t.Run("insert fires watch", func(t *testing.T) {
|
||||
// watch on non-existent bar
|
||||
ws := newWatch(t, Query{Value: "bar"})
|
||||
|
||||
lastIdx++
|
||||
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
|
||||
Name: "bar",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, watchFired(ws))
|
||||
|
||||
// should find bar peering
|
||||
idx, p, err := s.PeeringRead(ws, Query{Value: "bar"})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lastIdx, idx)
|
||||
require.NotNil(t, p)
|
||||
})
|
||||
|
||||
t.Run("update fires watch", func(t *testing.T) {
|
||||
// watch on existing foo
|
||||
ws := newWatch(t, Query{Value: "foo"})
|
||||
|
||||
// unrelated write shouldn't fire watch
|
||||
lastIdx++
|
||||
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
|
||||
Name: "bar",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.False(t, watchFired(ws))
|
||||
|
||||
// foo write should fire watch
|
||||
lastIdx++
|
||||
err = s.PeeringWrite(lastIdx, &pbpeering.Peering{
|
||||
Name: "foo",
|
||||
State: pbpeering.PeeringState_FAILING,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.True(t, watchFired(ws))
|
||||
|
||||
// check foo is updated
|
||||
idx, p, err := s.PeeringRead(ws, Query{Value: "foo"})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lastIdx, idx)
|
||||
require.Equal(t, pbpeering.PeeringState_FAILING, p.State)
|
||||
})
|
||||
|
||||
t.Run("delete fires watch", func(t *testing.T) {
|
||||
// watch on existing foo
|
||||
ws := newWatch(t, Query{Value: "foo"})
|
||||
|
||||
// delete on bar shouldn't fire watch
|
||||
lastIdx++
|
||||
require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.Peering{Name: "bar"}))
|
||||
lastIdx++
|
||||
require.NoError(t, s.PeeringDelete(lastIdx, Query{Value: "bar"}))
|
||||
require.False(t, watchFired(ws))
|
||||
|
||||
// delete on foo should fire watch
|
||||
lastIdx++
|
||||
err := s.PeeringDelete(lastIdx, Query{Value: "foo"})
|
||||
require.NoError(t, err)
|
||||
require.True(t, watchFired(ws))
|
||||
|
||||
// check foo is gone
|
||||
idx, p, err := s.PeeringRead(ws, Query{Value: "foo"})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lastIdx, idx)
|
||||
require.Nil(t, p)
|
||||
})
|
||||
}
|
||||
|
||||
func TestStore_PeeringList(t *testing.T) {
|
||||
s := NewStateStore(nil)
|
||||
insertTestPeerings(t, s)
|
||||
|
||||
_, pps, err := s.PeeringList(nil, acl.EnterpriseMeta{})
|
||||
require.NoError(t, err)
|
||||
expect := []*pbpeering.Peering{
|
||||
{
|
||||
Name: "foo",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
|
||||
State: pbpeering.PeeringState_INITIAL,
|
||||
CreateIndex: 1,
|
||||
ModifyIndex: 1,
|
||||
},
|
||||
{
|
||||
Name: "bar",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
ID: "5ebcff30-5509-4858-8142-a8e580f1863f",
|
||||
State: pbpeering.PeeringState_FAILING,
|
||||
CreateIndex: 2,
|
||||
ModifyIndex: 2,
|
||||
},
|
||||
}
|
||||
require.ElementsMatch(t, expect, pps)
|
||||
}
|
||||
|
||||
func TestStore_PeeringList_Watch(t *testing.T) {
|
||||
s := NewStateStore(nil)
|
||||
|
||||
var lastIdx uint64
|
||||
lastIdx++ // start at 1
|
||||
|
||||
// track number of expected peerings in state store
|
||||
var count int
|
||||
|
||||
newWatch := func(t *testing.T, entMeta acl.EnterpriseMeta) memdb.WatchSet {
|
||||
t.Helper()
|
||||
// set up a watch
|
||||
ws := memdb.NewWatchSet()
|
||||
|
||||
_, _, err := s.PeeringList(ws, entMeta)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ws
|
||||
}
|
||||
|
||||
t.Run("insert fires watch", func(t *testing.T) {
|
||||
ws := newWatch(t, acl.EnterpriseMeta{})
|
||||
|
||||
lastIdx++
|
||||
// insert a peering
|
||||
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
|
||||
Name: "bar",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
count++
|
||||
|
||||
require.True(t, watchFired(ws))
|
||||
|
||||
// should find bar peering
|
||||
idx, pp, err := s.PeeringList(ws, acl.EnterpriseMeta{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lastIdx, idx)
|
||||
require.Len(t, pp, count)
|
||||
})
|
||||
|
||||
t.Run("update fires watch", func(t *testing.T) {
|
||||
// set up initial write
|
||||
lastIdx++
|
||||
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
|
||||
Name: "foo",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
count++
|
||||
|
||||
ws := newWatch(t, acl.EnterpriseMeta{})
|
||||
|
||||
// update peering
|
||||
lastIdx++
|
||||
err = s.PeeringWrite(lastIdx, &pbpeering.Peering{
|
||||
Name: "foo",
|
||||
State: pbpeering.PeeringState_FAILING,
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, watchFired(ws))
|
||||
|
||||
idx, pp, err := s.PeeringList(ws, acl.EnterpriseMeta{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lastIdx, idx)
|
||||
require.Len(t, pp, count)
|
||||
})
|
||||
|
||||
t.Run("delete fires watch", func(t *testing.T) {
|
||||
// set up initial write
|
||||
lastIdx++
|
||||
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
|
||||
Name: "baz",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
count++
|
||||
|
||||
ws := newWatch(t, acl.EnterpriseMeta{})
|
||||
|
||||
// delete peering
|
||||
lastIdx++
|
||||
err = s.PeeringDelete(lastIdx, Query{Value: "baz"})
|
||||
require.NoError(t, err)
|
||||
count--
|
||||
|
||||
require.True(t, watchFired(ws))
|
||||
|
||||
idx, pp, err := s.PeeringList(ws, acl.EnterpriseMeta{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lastIdx, idx)
|
||||
require.Len(t, pp, count)
|
||||
})
|
||||
}
|
||||
|
||||
func TestStore_PeeringWrite(t *testing.T) {
|
||||
s := NewStateStore(nil)
|
||||
insertTestPeerings(t, s)
|
||||
type testcase struct {
|
||||
name string
|
||||
input *pbpeering.Peering
|
||||
}
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
require.NoError(t, s.PeeringWrite(10, tc.input))
|
||||
|
||||
q := Query{
|
||||
Value: tc.input.Name,
|
||||
EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(tc.input.Partition),
|
||||
}
|
||||
_, p, err := s.PeeringRead(nil, q)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, p)
|
||||
if tc.input.State == 0 {
|
||||
require.Equal(t, pbpeering.PeeringState_INITIAL, p.State)
|
||||
}
|
||||
require.Equal(t, tc.input.Name, p.Name)
|
||||
}
|
||||
tcs := []testcase{
|
||||
{
|
||||
name: "create baz",
|
||||
input: &pbpeering.Peering{
|
||||
Name: "baz",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update foo",
|
||||
input: &pbpeering.Peering{
|
||||
Name: "foo",
|
||||
State: pbpeering.PeeringState_FAILING,
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_PeeringWrite_GenerateUUID(t *testing.T) {
|
||||
rand.Seed(1)
|
||||
|
||||
s := NewStateStore(nil)
|
||||
|
||||
entMeta := structs.NodeEnterpriseMetaInDefaultPartition()
|
||||
partition := entMeta.PartitionOrDefault()
|
||||
|
||||
for i := 1; i < 11; i++ {
|
||||
require.NoError(t, s.PeeringWrite(uint64(i), &pbpeering.Peering{
|
||||
Name: fmt.Sprintf("peering-%d", i),
|
||||
Partition: partition,
|
||||
}))
|
||||
}
|
||||
|
||||
idx, peerings, err := s.PeeringList(nil, *entMeta)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(10), idx)
|
||||
require.Len(t, peerings, 10)
|
||||
|
||||
// Ensure that all assigned UUIDs are unique.
|
||||
uniq := make(map[string]struct{})
|
||||
for _, p := range peerings {
|
||||
uniq[p.ID] = struct{}{}
|
||||
}
|
||||
require.Len(t, uniq, 10)
|
||||
|
||||
// Ensure that the ID of an existing peering cannot be overwritten.
|
||||
updated := &pbpeering.Peering{
|
||||
Name: peerings[0].Name,
|
||||
Partition: peerings[0].Partition,
|
||||
}
|
||||
|
||||
// Attempt to overwrite ID.
|
||||
updated.ID, err = uuid.GenerateUUID()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.PeeringWrite(11, updated))
|
||||
|
||||
q := Query{
|
||||
Value: updated.Name,
|
||||
EnterpriseMeta: *entMeta,
|
||||
}
|
||||
idx, got, err := s.PeeringRead(nil, q)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(11), idx)
|
||||
require.Equal(t, peerings[0].ID, got.ID)
|
||||
}
|
||||
|
||||
func TestStore_PeeringDelete(t *testing.T) {
|
||||
s := NewStateStore(nil)
|
||||
insertTestPeerings(t, s)
|
||||
|
||||
q := Query{Value: "foo"}
|
||||
|
||||
require.NoError(t, s.PeeringDelete(10, q))
|
||||
|
||||
_, p, err := s.PeeringRead(nil, q)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, p)
|
||||
}
|
||||
|
||||
func TestStore_PeeringTerminateByID(t *testing.T) {
|
||||
s := NewStateStore(nil)
|
||||
insertTestPeerings(t, s)
|
||||
|
||||
// id corresponding to default/foo
|
||||
id := "9e650110-ac74-4c5a-a6a8-9348b2bed4e9"
|
||||
|
||||
require.NoError(t, s.PeeringTerminateByID(10, id))
|
||||
|
||||
_, p, err := s.PeeringReadByID(nil, id)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, pbpeering.PeeringState_TERMINATED, p.State)
|
||||
}
|
||||
|
||||
func TestStateStore_PeeringTrustBundleRead(t *testing.T) {
|
||||
s := NewStateStore(nil)
|
||||
insertTestPeeringTrustBundles(t, s)
|
||||
|
||||
type testcase struct {
|
||||
name string
|
||||
query Query
|
||||
expect *pbpeering.PeeringTrustBundle
|
||||
}
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
_, ptb, err := s.PeeringTrustBundleRead(nil, tc.query)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expect, ptb)
|
||||
}
|
||||
|
||||
entMeta := structs.NodeEnterpriseMetaInDefaultPartition()
|
||||
|
||||
tcs := []testcase{
|
||||
{
|
||||
name: "get foo",
|
||||
query: Query{
|
||||
Value: "foo",
|
||||
EnterpriseMeta: *entMeta,
|
||||
},
|
||||
expect: &pbpeering.PeeringTrustBundle{
|
||||
TrustDomain: "foo.com",
|
||||
PeerName: "foo",
|
||||
Partition: entMeta.PartitionOrEmpty(),
|
||||
RootPEMs: []string{"foo certificate bundle"},
|
||||
CreateIndex: 1,
|
||||
ModifyIndex: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get non-existent baz",
|
||||
query: Query{
|
||||
Value: "baz",
|
||||
},
|
||||
expect: nil,
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_PeeringTrustBundleWrite(t *testing.T) {
|
||||
s := NewStateStore(nil)
|
||||
insertTestPeeringTrustBundles(t, s)
|
||||
type testcase struct {
|
||||
name string
|
||||
input *pbpeering.PeeringTrustBundle
|
||||
}
|
||||
run := func(t *testing.T, tc testcase) {
|
||||
require.NoError(t, s.PeeringTrustBundleWrite(10, tc.input))
|
||||
|
||||
q := Query{
|
||||
Value: tc.input.PeerName,
|
||||
EnterpriseMeta: *structs.NodeEnterpriseMetaInPartition(tc.input.Partition),
|
||||
}
|
||||
_, ptb, err := s.PeeringTrustBundleRead(nil, q)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, ptb)
|
||||
require.Equal(t, tc.input.TrustDomain, ptb.TrustDomain)
|
||||
require.Equal(t, tc.input.PeerName, ptb.PeerName)
|
||||
}
|
||||
tcs := []testcase{
|
||||
{
|
||||
name: "create baz",
|
||||
input: &pbpeering.PeeringTrustBundle{
|
||||
TrustDomain: "baz.com",
|
||||
PeerName: "baz",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update foo",
|
||||
input: &pbpeering.PeeringTrustBundle{
|
||||
TrustDomain: "foo-updated.com",
|
||||
PeerName: "foo",
|
||||
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tcs {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_PeeringTrustBundleDelete(t *testing.T) {
|
||||
s := NewStateStore(nil)
|
||||
insertTestPeeringTrustBundles(t, s)
|
||||
|
||||
q := Query{Value: "foo"}
|
||||
|
||||
require.NoError(t, s.PeeringTrustBundleDelete(10, q))
|
||||
|
||||
_, ptb, err := s.PeeringRead(nil, q)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, ptb)
|
||||
}
|
||||
|
||||
func TestStateStore_ExportedServicesForPeer(t *testing.T) {
|
||||
s := NewStateStore(nil)
|
||||
|
||||
var lastIdx uint64
|
||||
|
||||
lastIdx++
|
||||
err := s.PeeringWrite(lastIdx, &pbpeering.Peering{
|
||||
Name: "my-peering",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
q := Query{Value: "my-peering"}
|
||||
_, p, err := s.PeeringRead(nil, q)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, p)
|
||||
|
||||
id := p.ID
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
|
||||
runStep(t, "no exported services", func(t *testing.T) {
|
||||
idx, exported, err := s.ExportedServicesForPeer(ws, id)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lastIdx, idx)
|
||||
require.Empty(t, exported)
|
||||
})
|
||||
|
||||
runStep(t, "config entry with exact service names", func(t *testing.T) {
|
||||
entry := &structs.ExportedServicesConfigEntry{
|
||||
Name: "default",
|
||||
Services: []structs.ExportedService{
|
||||
{
|
||||
Name: "mysql",
|
||||
Consumers: []structs.ServiceConsumer{
|
||||
{
|
||||
PeerName: "my-peering",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "redis",
|
||||
Consumers: []structs.ServiceConsumer{
|
||||
{
|
||||
PeerName: "my-peering",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "mongo",
|
||||
Consumers: []structs.ServiceConsumer{
|
||||
{
|
||||
PeerName: "my-other-peering",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
lastIdx++
|
||||
err = s.EnsureConfigEntry(lastIdx, entry)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, watchFired(ws))
|
||||
ws = memdb.NewWatchSet()
|
||||
|
||||
expect := []structs.ServiceName{
|
||||
{
|
||||
Name: "mysql",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||
},
|
||||
{
|
||||
Name: "redis",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||
},
|
||||
}
|
||||
idx, got, err := s.ExportedServicesForPeer(ws, id)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lastIdx, idx)
|
||||
require.ElementsMatch(t, expect, got)
|
||||
})
|
||||
|
||||
runStep(t, "config entry with wildcard service name picks up existing service", func(t *testing.T) {
|
||||
lastIdx++
|
||||
require.NoError(t, s.EnsureNode(lastIdx, &structs.Node{Node: "foo", Address: "127.0.0.1"}))
|
||||
|
||||
lastIdx++
|
||||
require.NoError(t, s.EnsureService(lastIdx, "foo", &structs.NodeService{ID: "billing", Service: "billing", Port: 5000}))
|
||||
|
||||
entry := &structs.ExportedServicesConfigEntry{
|
||||
Name: "default",
|
||||
Services: []structs.ExportedService{
|
||||
{
|
||||
Name: "*",
|
||||
Consumers: []structs.ServiceConsumer{
|
||||
{
|
||||
PeerName: "my-peering",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
lastIdx++
|
||||
err = s.EnsureConfigEntry(lastIdx, entry)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, watchFired(ws))
|
||||
ws = memdb.NewWatchSet()
|
||||
|
||||
expect := []structs.ServiceName{
|
||||
{
|
||||
Name: "billing",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||
},
|
||||
}
|
||||
idx, got, err := s.ExportedServicesForPeer(ws, id)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lastIdx, idx)
|
||||
require.Equal(t, expect, got)
|
||||
})
|
||||
|
||||
runStep(t, "config entry with wildcard service names picks up new registrations", func(t *testing.T) {
|
||||
lastIdx++
|
||||
require.NoError(t, s.EnsureService(lastIdx, "foo", &structs.NodeService{ID: "payments", Service: "payments", Port: 5000}))
|
||||
|
||||
lastIdx++
|
||||
proxy := structs.NodeService{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
ID: "payments-proxy",
|
||||
Service: "payments-proxy",
|
||||
Port: 5000,
|
||||
}
|
||||
require.NoError(t, s.EnsureService(lastIdx, "foo", &proxy))
|
||||
|
||||
require.True(t, watchFired(ws))
|
||||
ws = memdb.NewWatchSet()
|
||||
|
||||
expect := []structs.ServiceName{
|
||||
{
|
||||
Name: "billing",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||
},
|
||||
{
|
||||
Name: "payments",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||
},
|
||||
{
|
||||
Name: "payments-proxy",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||
},
|
||||
}
|
||||
idx, got, err := s.ExportedServicesForPeer(ws, id)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lastIdx, idx)
|
||||
require.ElementsMatch(t, expect, got)
|
||||
})
|
||||
|
||||
runStep(t, "config entry with wildcard service names picks up service deletions", func(t *testing.T) {
|
||||
lastIdx++
|
||||
require.NoError(t, s.DeleteService(lastIdx, "foo", "billing", nil, ""))
|
||||
|
||||
require.True(t, watchFired(ws))
|
||||
ws = memdb.NewWatchSet()
|
||||
|
||||
expect := []structs.ServiceName{
|
||||
{
|
||||
Name: "payments",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||
},
|
||||
{
|
||||
Name: "payments-proxy",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
|
||||
},
|
||||
}
|
||||
idx, got, err := s.ExportedServicesForPeer(ws, id)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lastIdx, idx)
|
||||
require.ElementsMatch(t, expect, got)
|
||||
})
|
||||
|
||||
runStep(t, "deleting the config entry clears exported services", func(t *testing.T) {
|
||||
require.NoError(t, s.DeleteConfigEntry(lastIdx, structs.ExportedServices, "default", structs.DefaultEnterpriseMetaInDefaultPartition()))
|
||||
idx, exported, err := s.ExportedServicesForPeer(ws, id)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lastIdx, idx)
|
||||
require.Empty(t, exported)
|
||||
})
|
||||
}
|
@ -12,10 +12,15 @@ import (
|
||||
// Query is a type used to query any single value index that may include an
|
||||
// enterprise identifier.
|
||||
type Query struct {
|
||||
Value string
|
||||
Value string
|
||||
PeerName string
|
||||
acl.EnterpriseMeta
|
||||
}
|
||||
|
||||
func (q Query) PeerOrEmpty() string {
|
||||
return q.PeerName
|
||||
}
|
||||
|
||||
func (q Query) IDValue() string {
|
||||
return q.Value
|
||||
}
|
||||
@ -137,11 +142,16 @@ func (q BoolQuery) PartitionOrDefault() string {
|
||||
// KeyValueQuery is a type used to query for both a key and a value that may
|
||||
// include an enterprise identifier.
|
||||
type KeyValueQuery struct {
|
||||
Key string
|
||||
Value string
|
||||
Key string
|
||||
Value string
|
||||
PeerName string
|
||||
acl.EnterpriseMeta
|
||||
}
|
||||
|
||||
func (q KeyValueQuery) PeerOrEmpty() string {
|
||||
return q.PeerName
|
||||
}
|
||||
|
||||
// NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer
|
||||
// receiver for this method. Remove once that is fixed.
|
||||
func (q KeyValueQuery) NamespaceOrDefault() string {
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func prefixIndexFromQuery(arg interface{}) ([]byte, error) {
|
||||
@ -28,6 +29,29 @@ func prefixIndexFromQuery(arg interface{}) ([]byte, error) {
|
||||
return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg)
|
||||
}
|
||||
|
||||
func prefixIndexFromQueryWithPeer(arg interface{}) ([]byte, error) {
|
||||
var b indexBuilder
|
||||
switch v := arg.(type) {
|
||||
case *acl.EnterpriseMeta:
|
||||
return nil, nil
|
||||
case acl.EnterpriseMeta:
|
||||
return nil, nil
|
||||
case Query:
|
||||
if v.PeerOrEmpty() == "" {
|
||||
b.String(structs.LocalPeerKeyword)
|
||||
} else {
|
||||
b.String(strings.ToLower(v.PeerOrEmpty()))
|
||||
}
|
||||
if v.Value == "" {
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
b.String(strings.ToLower(v.Value))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg)
|
||||
}
|
||||
|
||||
func prefixIndexFromQueryNoNamespace(arg interface{}) ([]byte, error) {
|
||||
return prefixIndexFromQuery(arg)
|
||||
}
|
||||
|
@ -22,12 +22,16 @@ func newDBSchema() *memdb.DBSchema {
|
||||
configTableSchema,
|
||||
coordinatesTableSchema,
|
||||
federationStateTableSchema,
|
||||
freeVirtualIPTableSchema,
|
||||
gatewayServicesTableSchema,
|
||||
indexTableSchema,
|
||||
intentionsTableSchema,
|
||||
kindServiceNameTableSchema,
|
||||
kvsTableSchema,
|
||||
meshTopologyTableSchema,
|
||||
nodesTableSchema,
|
||||
peeringTableSchema,
|
||||
peeringTrustBundlesTableSchema,
|
||||
policiesTableSchema,
|
||||
preparedQueriesTableSchema,
|
||||
rolesTableSchema,
|
||||
@ -39,8 +43,6 @@ func newDBSchema() *memdb.DBSchema {
|
||||
tokensTableSchema,
|
||||
tombstonesTableSchema,
|
||||
usageTableSchema,
|
||||
freeVirtualIPTableSchema,
|
||||
kindServiceNameTableSchema,
|
||||
)
|
||||
withEnterpriseSchema(db)
|
||||
return db
|
||||
|
@ -3,7 +3,12 @@
|
||||
|
||||
package state
|
||||
|
||||
import "github.com/hashicorp/consul/acl"
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func partitionedIndexEntryName(entry string, _ string) string {
|
||||
return entry
|
||||
@ -12,3 +17,11 @@ func partitionedIndexEntryName(entry string, _ string) string {
|
||||
func partitionedAndNamespacedIndexEntryName(entry string, _ *acl.EnterpriseMeta) string {
|
||||
return entry
|
||||
}
|
||||
|
||||
// peeredIndexEntryName returns the peered index key for an importable entity (e.g. checks, services, or nodes).
|
||||
func peeredIndexEntryName(entry, peerName string) string {
|
||||
if peerName == "" {
|
||||
peerName = structs.LocalPeerKeyword
|
||||
}
|
||||
return fmt.Sprintf("peer.%s:%s", peerName, entry)
|
||||
}
|
||||
|
@ -553,7 +553,7 @@ func TestStateStore_Session_Invalidate_DeleteNode(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := s.DeleteNode(15, "foo", nil); err != nil {
|
||||
if err := s.DeleteNode(15, "foo", nil, ""); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !watchFired(ws) {
|
||||
@ -608,7 +608,7 @@ func TestStateStore_Session_Invalidate_DeleteService(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := s.DeleteService(15, "foo", "api", nil); err != nil {
|
||||
if err := s.DeleteService(15, "foo", "api", nil, ""); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !watchFired(ws) {
|
||||
@ -709,7 +709,7 @@ func TestStateStore_Session_Invalidate_DeleteCheck(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := s.DeleteCheck(15, "foo", "bar", nil); err != nil {
|
||||
if err := s.DeleteCheck(15, "foo", "bar", nil, ""); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !watchFired(ws) {
|
||||
@ -777,7 +777,7 @@ func TestStateStore_Session_Invalidate_Key_Unlock_Behavior(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := s.DeleteNode(6, "foo", nil); err != nil {
|
||||
if err := s.DeleteNode(6, "foo", nil, ""); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !watchFired(ws) {
|
||||
@ -859,7 +859,7 @@ func TestStateStore_Session_Invalidate_Key_Delete_Behavior(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := s.DeleteNode(6, "foo", nil); err != nil {
|
||||
if err := s.DeleteNode(6, "foo", nil, ""); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !watchFired(ws) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user