mirror of https://github.com/status-im/consul.git
merge
This commit is contained in:
commit
a178e87e14
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:enhancement
|
||||||
|
catalog: Add per-node indexes to reduce watchset firing for unrelated nodes and services.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
deps: Update go-grpc/grpc, resolving connection memory leak
|
||||||
|
```
|
|
@ -0,0 +1,4 @@
|
||||||
|
```release-note:feature
|
||||||
|
agent: Added information about build date alongside other version information for Consul. Extended /agent/self endpoint and `consul version` commands
|
||||||
|
to report this. Agent also reports build date in log on startup.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
ui: upgrade ember-composable-helpers to v5.x
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
ui: Fix incorrect text on certain page empty states
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
dns: Added support for specifying admin partition in node lookups.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
connect: Update Envoy support matrix to latest patch releases (1.22.2, 1.21.3, 1.20.4, 1.19.5)
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:enhancement
|
||||||
|
api: `merge-central-config` query parameter support added to `/catalog/node-services/:node-name` API, to view a fully resolved service definition (especially when not written into the catalog that way).
|
||||||
|
```
|
|
@ -0,0 +1,4 @@
|
||||||
|
```release-note:improvement
|
||||||
|
command: Add support for enabling TLS in the Envoy Prometheus endpoint via the `consul connect envoy` command.
|
||||||
|
Adds the `-prometheus-ca-file`, `-prometheus-ca-path`, `-prometheus-cert-file` and `-prometheus-key-file` flags.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:breaking-change
|
||||||
|
telemetry: config flag `telemetry { disable_compat_1.9 = (true|false) }` has been removed. Before upgrading you should remove this flag from your config if the flag is being used.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:bug
|
||||||
|
xds: Fix a bug that resulted in Lambda services not using the payload-passthrough option as expected.
|
||||||
|
```
|
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:feature
|
||||||
|
streaming: Added topics for `ingress-gateway`, `mesh`, `service-intentions` and `service-resolver` config entry events.
|
||||||
|
```
|
|
@ -4,4 +4,7 @@ export GIT_COMMIT=$(git rev-parse --short HEAD)
|
||||||
export GIT_COMMIT_YEAR=$(git show -s --format=%cd --date=format:%Y HEAD)
|
export GIT_COMMIT_YEAR=$(git show -s --format=%cd --date=format:%Y HEAD)
|
||||||
export GIT_DIRTY=$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)
|
export GIT_DIRTY=$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)
|
||||||
export GIT_IMPORT=github.com/hashicorp/consul/version
|
export GIT_IMPORT=github.com/hashicorp/consul/version
|
||||||
export GOLDFLAGS="-X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}${GIT_DIRTY}"
|
# we're using this for build date because it's stable across platform builds
|
||||||
|
# the env -i and -noprofile are used to ensure we don't try to recursively call this profile when starting bash
|
||||||
|
export GIT_DATE=$(env -i /bin/bash --noprofile -norc ${CIRCLE_WORKING_DIRECTORY}/build-support/scripts/build-date.sh)
|
||||||
|
export GOLDFLAGS="-X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}${GIT_DIRTY} -X ${GIT_IMPORT}.BuildDate=${GIT_DATE}"
|
||||||
|
|
|
@ -23,6 +23,10 @@ references:
|
||||||
BASH_ENV: .circleci/bash_env.sh
|
BASH_ENV: .circleci/bash_env.sh
|
||||||
VAULT_BINARY_VERSION: 1.9.4
|
VAULT_BINARY_VERSION: 1.9.4
|
||||||
GO_VERSION: 1.18.1
|
GO_VERSION: 1.18.1
|
||||||
|
envoy-versions: &supported_envoy_versions
|
||||||
|
- &default_envoy_version "1.19.5"
|
||||||
|
- "1.20.4"
|
||||||
|
- "1.21.3"
|
||||||
images:
|
images:
|
||||||
# When updating the Go version, remember to also update the versions in the
|
# When updating the Go version, remember to also update the versions in the
|
||||||
# workflows section for go-test-lib jobs.
|
# workflows section for go-test-lib jobs.
|
||||||
|
@ -30,7 +34,7 @@ references:
|
||||||
ember: &EMBER_IMAGE docker.mirror.hashicorp.services/circleci/node:14-browsers
|
ember: &EMBER_IMAGE docker.mirror.hashicorp.services/circleci/node:14-browsers
|
||||||
ubuntu: &UBUNTU_CI_IMAGE ubuntu-2004:202201-02
|
ubuntu: &UBUNTU_CI_IMAGE ubuntu-2004:202201-02
|
||||||
cache:
|
cache:
|
||||||
yarn: &YARN_CACHE_KEY consul-ui-v7-{{ checksum "ui/yarn.lock" }}
|
yarn: &YARN_CACHE_KEY consul-ui-v8-{{ checksum "ui/yarn.lock" }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
install-gotestsum: &install-gotestsum
|
install-gotestsum: &install-gotestsum
|
||||||
|
@ -852,13 +856,23 @@ jobs:
|
||||||
path: *TEST_RESULTS_DIR
|
path: *TEST_RESULTS_DIR
|
||||||
- run: *notify-slack-failure
|
- run: *notify-slack-failure
|
||||||
|
|
||||||
envoy-integration-test-1_19_3: &ENVOY_TESTS
|
envoy-integration-test: &ENVOY_TESTS
|
||||||
machine:
|
machine:
|
||||||
image: *UBUNTU_CI_IMAGE
|
image: *UBUNTU_CI_IMAGE
|
||||||
parallelism: 4
|
parallelism: 4
|
||||||
resource_class: medium
|
resource_class: medium
|
||||||
|
parameters:
|
||||||
|
envoy-version:
|
||||||
|
type: enum
|
||||||
|
enum: *supported_envoy_versions
|
||||||
|
default: *default_envoy_version
|
||||||
|
xds-target:
|
||||||
|
type: enum
|
||||||
|
enum: ["server", "client"]
|
||||||
|
default: "server"
|
||||||
environment:
|
environment:
|
||||||
ENVOY_VERSION: "1.19.3"
|
ENVOY_VERSION: << parameters.envoy-version >>
|
||||||
|
XDS_TARGET: << parameters.xds-target >>
|
||||||
steps: &ENVOY_INTEGRATION_TEST_STEPS
|
steps: &ENVOY_INTEGRATION_TEST_STEPS
|
||||||
- checkout
|
- checkout
|
||||||
# Get go binary from workspace
|
# Get go binary from workspace
|
||||||
|
@ -891,21 +905,6 @@ jobs:
|
||||||
path: *TEST_RESULTS_DIR
|
path: *TEST_RESULTS_DIR
|
||||||
- run: *notify-slack-failure
|
- run: *notify-slack-failure
|
||||||
|
|
||||||
envoy-integration-test-1_20_2:
|
|
||||||
<<: *ENVOY_TESTS
|
|
||||||
environment:
|
|
||||||
ENVOY_VERSION: "1.20.2"
|
|
||||||
|
|
||||||
envoy-integration-test-1_21_1:
|
|
||||||
<<: *ENVOY_TESTS
|
|
||||||
environment:
|
|
||||||
ENVOY_VERSION: "1.21.1"
|
|
||||||
|
|
||||||
envoy-integration-test-1_22_0:
|
|
||||||
<<: *ENVOY_TESTS
|
|
||||||
environment:
|
|
||||||
ENVOY_VERSION: "1.22.0"
|
|
||||||
|
|
||||||
# run integration tests for the connect ca providers
|
# run integration tests for the connect ca providers
|
||||||
test-connect-ca-providers:
|
test-connect-ca-providers:
|
||||||
docker:
|
docker:
|
||||||
|
@ -930,21 +929,6 @@ jobs:
|
||||||
path: *TEST_RESULTS_DIR
|
path: *TEST_RESULTS_DIR
|
||||||
- run: *notify-slack-failure
|
- run: *notify-slack-failure
|
||||||
|
|
||||||
trigger-oss-merge:
|
|
||||||
docker:
|
|
||||||
- image: docker.mirror.hashicorp.services/alpine:3.12
|
|
||||||
steps:
|
|
||||||
- run: apk add --no-cache --no-progress curl jq
|
|
||||||
- run:
|
|
||||||
name: trigger oss merge
|
|
||||||
command: |
|
|
||||||
curl -s -X POST \
|
|
||||||
--header "Circle-Token: ${CIRCLECI_API_TOKEN}" \
|
|
||||||
--header "Content-Type: application/json" \
|
|
||||||
-d '{"build_parameters": {"CIRCLE_JOB": "oss-merge"}}' \
|
|
||||||
"https://circleci.com/api/v1.1/project/github/hashicorp/consul-enterprise/tree/${CIRCLE_BRANCH}" | jq -r '.build_url'
|
|
||||||
- run: *notify-slack-failure
|
|
||||||
|
|
||||||
# Run load tests against a commit
|
# Run load tests against a commit
|
||||||
load-test:
|
load-test:
|
||||||
docker:
|
docker:
|
||||||
|
@ -1131,18 +1115,13 @@ workflows:
|
||||||
- nomad-integration-0_8:
|
- nomad-integration-0_8:
|
||||||
requires:
|
requires:
|
||||||
- dev-build
|
- dev-build
|
||||||
- envoy-integration-test-1_19_3:
|
- envoy-integration-test:
|
||||||
requires:
|
|
||||||
- dev-build
|
|
||||||
- envoy-integration-test-1_20_2:
|
|
||||||
requires:
|
|
||||||
- dev-build
|
|
||||||
- envoy-integration-test-1_21_1:
|
|
||||||
requires:
|
|
||||||
- dev-build
|
|
||||||
- envoy-integration-test-1_22_0:
|
|
||||||
requires:
|
requires:
|
||||||
- dev-build
|
- dev-build
|
||||||
|
matrix:
|
||||||
|
parameters:
|
||||||
|
envoy-version: *supported_envoy_versions
|
||||||
|
xds-target: ["server", "client"]
|
||||||
- compatibility-integration-test:
|
- compatibility-integration-test:
|
||||||
requires:
|
requires:
|
||||||
- dev-build
|
- dev-build
|
||||||
|
@ -1180,16 +1159,6 @@ workflows:
|
||||||
requires:
|
requires:
|
||||||
- ember-build-ent
|
- ember-build-ent
|
||||||
- noop
|
- noop
|
||||||
workflow-automation:
|
|
||||||
unless: << pipeline.parameters.trigger-load-test >>
|
|
||||||
jobs:
|
|
||||||
- trigger-oss-merge:
|
|
||||||
context: team-consul
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- main
|
|
||||||
- /release\/\d+\.\d+\.x$/
|
|
||||||
|
|
||||||
load-test:
|
load-test:
|
||||||
when: << pipeline.parameters.trigger-load-test >>
|
when: << pipeline.parameters.trigger-load-test >>
|
||||||
|
|
|
@ -7,12 +7,6 @@ provider "aws" {
|
||||||
assume_role {
|
assume_role {
|
||||||
role_arn = var.role_arn
|
role_arn = var.role_arn
|
||||||
}
|
}
|
||||||
|
|
||||||
default_tags {
|
|
||||||
tags = {
|
|
||||||
Environment = "ConsulLoadTest"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module "load-test" {
|
module "load-test" {
|
||||||
|
@ -21,6 +15,7 @@ module "load-test" {
|
||||||
vpc_az = ["us-east-2a", "us-east-2b"]
|
vpc_az = ["us-east-2a", "us-east-2b"]
|
||||||
vpc_name = var.vpc_name
|
vpc_name = var.vpc_name
|
||||||
vpc_cidr = "10.0.0.0/16"
|
vpc_cidr = "10.0.0.0/16"
|
||||||
|
vpc_allwed_ssh_cidr = "0.0.0.0/0"
|
||||||
public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24"]
|
public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24"]
|
||||||
private_subnet_cidrs = ["10.0.3.0/24"]
|
private_subnet_cidrs = ["10.0.3.0/24"]
|
||||||
test_public_ip = true
|
test_public_ip = true
|
||||||
|
|
|
@ -6,7 +6,7 @@ set -uo pipefail
|
||||||
### It is still up to the reviewer to make sure that any tests added are needed and meaningful.
|
### It is still up to the reviewer to make sure that any tests added are needed and meaningful.
|
||||||
|
|
||||||
# search for any "new" or modified metric emissions
|
# search for any "new" or modified metric emissions
|
||||||
metrics_modified=$(git --no-pager diff HEAD origin/main | grep -i "SetGauge\|EmitKey\|IncrCounter\|AddSample\|MeasureSince\|UpdateFilter")
|
metrics_modified=$(git --no-pager diff origin/main...HEAD | grep -i "SetGauge\|EmitKey\|IncrCounter\|AddSample\|MeasureSince\|UpdateFilter")
|
||||||
# search for PR body or title metric references
|
# search for PR body or title metric references
|
||||||
metrics_in_pr_body=$(echo "${PR_BODY-""}" | grep -i "metric")
|
metrics_in_pr_body=$(echo "${PR_BODY-""}" | grep -i "metric")
|
||||||
metrics_in_pr_title=$(echo "${PR_TITLE-""}" | grep -i "metric")
|
metrics_in_pr_title=$(echo "${PR_TITLE-""}" | grep -i "metric")
|
||||||
|
|
|
@ -73,7 +73,7 @@ function verify_rpm {
|
||||||
docker_platform="linux/amd64"
|
docker_platform="linux/amd64"
|
||||||
docker_image="amd64/centos:7"
|
docker_image="amd64/centos:7"
|
||||||
;;
|
;;
|
||||||
*.arm.rpm)
|
*.armv7hl.rpm)
|
||||||
docker_platform="linux/arm/v7"
|
docker_platform="linux/arm/v7"
|
||||||
docker_image="arm32v7/fedora:36"
|
docker_image="arm32v7/fedora:36"
|
||||||
;;
|
;;
|
||||||
|
@ -120,7 +120,7 @@ function verify_deb {
|
||||||
docker_platform="linux/amd64"
|
docker_platform="linux/amd64"
|
||||||
docker_image="amd64/debian:bullseye"
|
docker_image="amd64/debian:bullseye"
|
||||||
;;
|
;;
|
||||||
*_arm.deb)
|
*_armhf.deb)
|
||||||
docker_platform="linux/arm/v7"
|
docker_platform="linux/arm/v7"
|
||||||
docker_image="arm32v7/debian:bullseye"
|
docker_image="arm32v7/debian:bullseye"
|
||||||
;;
|
;;
|
||||||
|
|
|
@ -15,6 +15,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
product-version: ${{ steps.get-product-version.outputs.product-version }}
|
product-version: ${{ steps.get-product-version.outputs.product-version }}
|
||||||
|
product-date: ${{ steps.get-product-version.outputs.product-date }}
|
||||||
pre-version: ${{ steps.get-product-version.outputs.pre-version }}
|
pre-version: ${{ steps.get-product-version.outputs.pre-version }}
|
||||||
pkg-version: ${{ steps.get-product-version.outputs.pkg-version }}
|
pkg-version: ${{ steps.get-product-version.outputs.pkg-version }}
|
||||||
shared-ldflags: ${{ steps.shared-ldflags.outputs.shared-ldflags }}
|
shared-ldflags: ${{ steps.shared-ldflags.outputs.shared-ldflags }}
|
||||||
|
@ -24,6 +25,7 @@ jobs:
|
||||||
id: get-product-version
|
id: get-product-version
|
||||||
run: |
|
run: |
|
||||||
CONSUL_VERSION=$(build-support/scripts/version.sh -r)
|
CONSUL_VERSION=$(build-support/scripts/version.sh -r)
|
||||||
|
CONSUL_DATE=$(build-support/scripts/build-date.sh)
|
||||||
## TODO: This assumes `make version` outputs 1.1.1+ent-prerel
|
## TODO: This assumes `make version` outputs 1.1.1+ent-prerel
|
||||||
IFS="+" read VERSION _other <<< "$CONSUL_VERSION"
|
IFS="+" read VERSION _other <<< "$CONSUL_VERSION"
|
||||||
IFS="-" read _other PREREL_VERSION <<< "$CONSUL_VERSION"
|
IFS="-" read _other PREREL_VERSION <<< "$CONSUL_VERSION"
|
||||||
|
@ -32,12 +34,15 @@ jobs:
|
||||||
## [version]{-prerelease}+ent before then, we'll need to add
|
## [version]{-prerelease}+ent before then, we'll need to add
|
||||||
## logic to handle presense/absence of the prerelease
|
## logic to handle presense/absence of the prerelease
|
||||||
echo "::set-output name=product-version::${CONSUL_VERSION}"
|
echo "::set-output name=product-version::${CONSUL_VERSION}"
|
||||||
|
echo "::set-output name=product-date::${CONSUL_DATE}"
|
||||||
echo "::set-output name=pre-version::${PREREL_VERSION}"
|
echo "::set-output name=pre-version::${PREREL_VERSION}"
|
||||||
echo "::set-output name=pkg-version::${VERSION}"
|
echo "::set-output name=pkg-version::${VERSION}"
|
||||||
|
|
||||||
- name: Set shared -ldflags
|
- name: Set shared -ldflags
|
||||||
id: shared-ldflags
|
id: shared-ldflags
|
||||||
run: echo "::set-output name=shared-ldflags::-X github.com/hashicorp/consul/version.GitCommit=${GITHUB_SHA::8} -X github.com/hashicorp/consul/version.GitDescribe=${{ steps.get-product-version.outputs.product-version }}"
|
run: |
|
||||||
|
T="github.com/hashicorp/consul/version"
|
||||||
|
echo "::set-output name=shared-ldflags::-X ${T}.GitCommit=${GITHUB_SHA::8} -X ${T}.GitDescribe=${{ steps.get-product-version.outputs.product-version }} -X ${T}.BuildDate=${{ steps.get-product-version.outputs.product-date }}"
|
||||||
|
|
||||||
generate-metadata-file:
|
generate-metadata-file:
|
||||||
needs: get-product-version
|
needs: get-product-version
|
||||||
|
@ -95,9 +100,11 @@ jobs:
|
||||||
- name: Build UI
|
- name: Build UI
|
||||||
run: |
|
run: |
|
||||||
CONSUL_VERSION=${{ needs.get-product-version.outputs.product-version }}
|
CONSUL_VERSION=${{ needs.get-product-version.outputs.product-version }}
|
||||||
|
CONSUL_DATE=${{ needs.get-product-version.outputs.product-date }}
|
||||||
CONSUL_BINARY_TYPE=${CONSUL_BINARY_TYPE}
|
CONSUL_BINARY_TYPE=${CONSUL_BINARY_TYPE}
|
||||||
CONSUL_COPYRIGHT_YEAR=$(git show -s --format=%cd --date=format:%Y HEAD)
|
CONSUL_COPYRIGHT_YEAR=$(git show -s --format=%cd --date=format:%Y HEAD)
|
||||||
echo "consul_version is ${CONSUL_VERSION}"
|
echo "consul_version is ${CONSUL_VERSION}"
|
||||||
|
echo "consul_date is ${CONSUL_DATE}"
|
||||||
echo "consul binary type is ${CONSUL_BINARY_TYPE}"
|
echo "consul binary type is ${CONSUL_BINARY_TYPE}"
|
||||||
echo "consul copyright year is ${CONSUL_COPYRIGHT_YEAR}"
|
echo "consul copyright year is ${CONSUL_COPYRIGHT_YEAR}"
|
||||||
cd ui && make && cd ..
|
cd ui && make && cd ..
|
||||||
|
@ -225,6 +232,14 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
|
|
||||||
|
# Strip everything but MAJOR.MINOR from the version string and add a `-dev` suffix
|
||||||
|
# This naming convention will be used ONLY for per-commit dev images
|
||||||
|
- name: Set docker dev tag
|
||||||
|
run: |
|
||||||
|
version="${{ env.version }}"
|
||||||
|
echo "dev_tag=${version%.*}-dev" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Docker Build (Action)
|
- name: Docker Build (Action)
|
||||||
uses: hashicorp/actions-docker-build@v1
|
uses: hashicorp/actions-docker-build@v1
|
||||||
with:
|
with:
|
||||||
|
@ -235,8 +250,8 @@ jobs:
|
||||||
docker.io/hashicorp/${{env.repo}}:${{env.version}}
|
docker.io/hashicorp/${{env.repo}}:${{env.version}}
|
||||||
public.ecr.aws/hashicorp/${{env.repo}}:${{env.version}}
|
public.ecr.aws/hashicorp/${{env.repo}}:${{env.version}}
|
||||||
dev_tags: |
|
dev_tags: |
|
||||||
docker.io/hashicorppreview/${{ env.repo }}:${{ env.version }}
|
docker.io/hashicorppreview/${{ env.repo }}:${{ env.dev_tag }}
|
||||||
docker.io/hashicorppreview/${{ env.repo }}:${{ env.version }}-${{ github.sha }}
|
docker.io/hashicorppreview/${{ env.repo }}:${{ env.dev_tag }}-${{ github.sha }}
|
||||||
smoke_test: .github/scripts/verify_docker.sh v${{ env.version }}
|
smoke_test: .github/scripts/verify_docker.sh v${{ env.version }}
|
||||||
|
|
||||||
build-docker-redhat:
|
build-docker-redhat:
|
||||||
|
@ -256,7 +271,7 @@ jobs:
|
||||||
version: ${{env.version}}
|
version: ${{env.version}}
|
||||||
target: ubi
|
target: ubi
|
||||||
arch: amd64
|
arch: amd64
|
||||||
redhat_tag: scan.connect.redhat.com/ospid-612d01d49f14588c41ebf67c/${{env.repo}}:${{env.version}}-ubi
|
redhat_tag: scan.connect.redhat.com/ospid-60f9fdbec3a80eac643abedf/${{env.repo}}:${{env.version}}-ubi
|
||||||
smoke_test: .github/scripts/verify_docker.sh v${{ env.version }}
|
smoke_test: .github/scripts/verify_docker.sh v${{ env.version }}
|
||||||
|
|
||||||
verify-linux:
|
verify-linux:
|
||||||
|
@ -324,7 +339,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
arch: ["i386", "amd64", "arm", "arm64"]
|
arch: ["i386", "amd64", "armhf", "arm64"]
|
||||||
# fail-fast: true
|
# fail-fast: true
|
||||||
env:
|
env:
|
||||||
version: ${{ needs.get-product-version.outputs.product-version }}
|
version: ${{ needs.get-product-version.outputs.product-version }}
|
||||||
|
@ -361,7 +376,7 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
arch: ["i386", "x86_64", "arm", "aarch64"]
|
arch: ["i386", "x86_64", "armv7hl", "aarch64"]
|
||||||
# fail-fast: true
|
# fail-fast: true
|
||||||
env:
|
env:
|
||||||
version: ${{ needs.get-product-version.outputs.product-version }}
|
version: ${{ needs.get-product-version.outputs.product-version }}
|
||||||
|
|
|
@ -5,7 +5,7 @@ container {
|
||||||
}
|
}
|
||||||
|
|
||||||
binary {
|
binary {
|
||||||
secrets = true
|
secrets = false
|
||||||
go_modules = false
|
go_modules = false
|
||||||
osv = true
|
osv = true
|
||||||
oss_index = true
|
oss_index = true
|
||||||
|
|
55
CHANGELOG.md
55
CHANGELOG.md
|
@ -1,3 +1,58 @@
|
||||||
|
## 1.13.0-alpha2 (June 21, 2022)
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
* api: `merge-central-config` query parameter support added to `/catalog/node-services/:node-name` API, to view a fully resolved service definition (especially when not written into the catalog that way). [[GH-13450](https://github.com/hashicorp/consul/issues/13450)]
|
||||||
|
* connect: Update Envoy support matrix to latest patch releases (1.22.2, 1.21.3, 1.20.4, 1.19.5) [[GH-13431](https://github.com/hashicorp/consul/issues/13431)]
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
|
||||||
|
* ui: Fix incorrect text on certain page empty states [[GH-13409](https://github.com/hashicorp/consul/issues/13409)]
|
||||||
|
|
||||||
|
## 1.13.0-alpha1 (June 15, 2022)
|
||||||
|
|
||||||
|
BREAKING CHANGES:
|
||||||
|
|
||||||
|
* config-entry: Exporting a specific service name across all namespace is invalid.
|
||||||
|
|
||||||
|
FEATURES:
|
||||||
|
|
||||||
|
* acl: It is now possible to login and logout using the gRPC API [[GH-12935](https://github.com/hashicorp/consul/issues/12935)]
|
||||||
|
* agent: Added information about build date alongside other version information for Consul. Extended /agent/self endpoint and `consul version` commands
|
||||||
|
to report this. Agent also reports build date in log on startup. [[GH-13357](https://github.com/hashicorp/consul/issues/13357)]
|
||||||
|
* ca: Leaf certificates can now be obtained via the gRPC API: `Sign` [[GH-12787](https://github.com/hashicorp/consul/issues/12787)]
|
||||||
|
* checks: add UDP health checks.. [[GH-12722](https://github.com/hashicorp/consul/issues/12722)]
|
||||||
|
* grpc: New gRPC endpoint to return envoy bootstrap parameters. [[GH-12825](https://github.com/hashicorp/consul/issues/12825)]
|
||||||
|
* grpc: New gRPC endpoint to return envoy bootstrap parameters. [[GH-1717](https://github.com/hashicorp/consul/issues/1717)]
|
||||||
|
* grpc: New gRPC service and endpoint to return the list of supported consul dataplane features [[GH-12695](https://github.com/hashicorp/consul/issues/12695)]
|
||||||
|
|
||||||
|
IMPROVEMENTS:
|
||||||
|
|
||||||
|
* api: `merge-central-config` query parameter support added to some catalog and health endpoints to view a fully resolved service definition (especially when not written into the catalog that way). [[GH-13001](https://github.com/hashicorp/consul/issues/13001)]
|
||||||
|
* api: add the ability to specify a path prefix for when consul is behind a reverse proxy or API gateway [[GH-12914](https://github.com/hashicorp/consul/issues/12914)]
|
||||||
|
* connect: add validation to ensure connect native services have a port or socketpath specified on catalog registration.
|
||||||
|
This was the only missing piece to ensure all mesh services are validated for a port (or socketpath) specification on catalog registration. [[GH-12881](https://github.com/hashicorp/consul/issues/12881)]
|
||||||
|
* Support Vault namespaces in Connect CA by adding RootPKINamespace and
|
||||||
|
IntermediatePKINamespace fields to the config. [[GH-12904](https://github.com/hashicorp/consul/issues/12904)]
|
||||||
|
* acl: Clarify node/service identities must be lowercase [[GH-12807](https://github.com/hashicorp/consul/issues/12807)]
|
||||||
|
* connect: Added a `max_inbound_connections` setting to service-defaults for limiting the number of concurrent inbound connections to each service instance. [[GH-13143](https://github.com/hashicorp/consul/issues/13143)]
|
||||||
|
* dns: Added support for specifying admin partition in node lookups. [[GH-13421](https://github.com/hashicorp/consul/issues/13421)]
|
||||||
|
* grpc: Add a new ServerDiscovery.WatchServers gRPC endpoint for being notified when the set of ready servers has changed. [[GH-12819](https://github.com/hashicorp/consul/issues/12819)]
|
||||||
|
* telemetry: Added `consul.raft.thread.main.saturation` and `consul.raft.thread.fsm.saturation` metrics to measure approximate saturation of the Raft goroutines [[GH-12865](https://github.com/hashicorp/consul/issues/12865)]
|
||||||
|
* telemetry: Added a `consul.server.isLeader` metric to track if a server is a leader or not. [[GH-13304](https://github.com/hashicorp/consul/issues/13304)]
|
||||||
|
* ui: removed external dependencies for serving UI assets in favor of Go's native embed capabilities [[GH-10996](https://github.com/hashicorp/consul/issues/10996)]
|
||||||
|
* ui: upgrade ember-composable-helpers to v5.x [[GH-13394](https://github.com/hashicorp/consul/issues/13394)]
|
||||||
|
|
||||||
|
BUG FIXES:
|
||||||
|
|
||||||
|
* acl: Fixed a bug where the ACL down policy wasn't being applied on remote errors from the primary datacenter. [[GH-12885](https://github.com/hashicorp/consul/issues/12885)]
|
||||||
|
* agent: Fixed a bug in HTTP handlers where URLs were being decoded twice [[GH-13256](https://github.com/hashicorp/consul/issues/13256)]
|
||||||
|
* deps: Update go-grpc/grpc, resolving connection memory leak [[GH-13051](https://github.com/hashicorp/consul/issues/13051)]
|
||||||
|
* fix a bug that caused an error when creating `grpc` or `http2` ingress gateway listeners with multiple services [[GH-13127](https://github.com/hashicorp/consul/issues/13127)]
|
||||||
|
* proxycfg: Fixed a minor bug that would cause configuring a terminating gateway to watch too many service resolvers and waste resources doing filtering. [[GH-13012](https://github.com/hashicorp/consul/issues/13012)]
|
||||||
|
* raft: upgrade to v1.3.8 which fixes a bug where non cluster member can still be able to participate in an election. [[GH-12844](https://github.com/hashicorp/consul/issues/12844)]
|
||||||
|
* serf: upgrade serf to v0.9.8 which fixes a bug that crashes Consul when serf keyrings are listed [[GH-13062](https://github.com/hashicorp/consul/issues/13062)]
|
||||||
|
|
||||||
## 1.12.2 (June 3, 2022)
|
## 1.12.2 (June 3, 2022)
|
||||||
|
|
||||||
BUG FIXES:
|
BUG FIXES:
|
||||||
|
|
|
@ -25,7 +25,9 @@ GIT_COMMIT?=$(shell git rev-parse --short HEAD)
|
||||||
GIT_COMMIT_YEAR?=$(shell git show -s --format=%cd --date=format:%Y HEAD)
|
GIT_COMMIT_YEAR?=$(shell git show -s --format=%cd --date=format:%Y HEAD)
|
||||||
GIT_DIRTY?=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true)
|
GIT_DIRTY?=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true)
|
||||||
GIT_IMPORT=github.com/hashicorp/consul/version
|
GIT_IMPORT=github.com/hashicorp/consul/version
|
||||||
GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY)
|
DATE_FORMAT="%Y-%m-%dT%H:%M:%SZ" # it's tricky to do an RFC3339 format in a cross platform way, so we hardcode UTC
|
||||||
|
GIT_DATE=$(shell $(CURDIR)/build-support/scripts/build-date.sh) # we're using this for build date because it's stable across platform builds
|
||||||
|
GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY) -X $(GIT_IMPORT).BuildDate=$(GIT_DATE)
|
||||||
|
|
||||||
ifeq ($(FORCE_REBUILD),1)
|
ifeq ($(FORCE_REBUILD),1)
|
||||||
NOCACHE=--no-cache
|
NOCACHE=--no-cache
|
||||||
|
@ -331,12 +333,12 @@ ifeq ("$(GOTAGS)","")
|
||||||
@docker tag consul-dev:latest consul:local
|
@docker tag consul-dev:latest consul:local
|
||||||
@docker run --rm -t consul:local consul version
|
@docker run --rm -t consul:local consul version
|
||||||
@cd ./test/integration/consul-container && \
|
@cd ./test/integration/consul-container && \
|
||||||
go test -v -timeout=30m ./upgrade --target-version local --latest-version latest
|
go test -v -timeout=30m ./... --target-version local --latest-version latest
|
||||||
else
|
else
|
||||||
@docker tag consul-dev:latest hashicorp/consul-enterprise:local
|
@docker tag consul-dev:latest hashicorp/consul-enterprise:local
|
||||||
@docker run --rm -t hashicorp/consul-enterprise:local consul version
|
@docker run --rm -t hashicorp/consul-enterprise:local consul version
|
||||||
@cd ./test/integration/consul-container && \
|
@cd ./test/integration/consul-container && \
|
||||||
go test -v -timeout=30m ./upgrade --tags $(GOTAGS) --target-version local --latest-version latest
|
go test -v -timeout=30m ./... --tags $(GOTAGS) --target-version local --latest-version latest
|
||||||
endif
|
endif
|
||||||
|
|
||||||
.PHONY: test-metrics-integ
|
.PHONY: test-metrics-integ
|
||||||
|
|
|
@ -49,6 +49,7 @@ const (
|
||||||
ResourceQuery Resource = "query"
|
ResourceQuery Resource = "query"
|
||||||
ResourceService Resource = "service"
|
ResourceService Resource = "service"
|
||||||
ResourceSession Resource = "session"
|
ResourceSession Resource = "session"
|
||||||
|
ResourcePeering Resource = "peering"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Authorizer is the interface for policy enforcement.
|
// Authorizer is the interface for policy enforcement.
|
||||||
|
@ -540,6 +541,14 @@ func Enforce(authz Authorizer, rsc Resource, segment string, access string, ctx
|
||||||
case "write":
|
case "write":
|
||||||
return authz.SessionWrite(segment, ctx), nil
|
return authz.SessionWrite(segment, ctx), nil
|
||||||
}
|
}
|
||||||
|
case ResourcePeering:
|
||||||
|
// TODO (peering) switch this over to using PeeringRead & PeeringWrite methods once implemented
|
||||||
|
switch lowerAccess {
|
||||||
|
case "read":
|
||||||
|
return authz.OperatorRead(ctx), nil
|
||||||
|
case "write":
|
||||||
|
return authz.OperatorWrite(ctx), nil
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
if processed, decision, err := enforceEnterprise(authz, rsc, segment, lowerAccess, ctx); processed {
|
if processed, decision, err := enforceEnterprise(authz, rsc, segment, lowerAccess, ctx); processed {
|
||||||
return decision, err
|
return decision, err
|
||||||
|
|
|
@ -462,6 +462,34 @@ func TestACL_Enforce(t *testing.T) {
|
||||||
ret: Deny,
|
ret: Deny,
|
||||||
err: "Invalid access level",
|
err: "Invalid access level",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
// TODO (peering) Update to use PeeringRead
|
||||||
|
method: "OperatorRead",
|
||||||
|
resource: ResourcePeering,
|
||||||
|
access: "read",
|
||||||
|
ret: Allow,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// TODO (peering) Update to use PeeringRead
|
||||||
|
method: "OperatorRead",
|
||||||
|
resource: ResourcePeering,
|
||||||
|
access: "read",
|
||||||
|
ret: Deny,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// TODO (peering) Update to use PeeringWrite
|
||||||
|
method: "OperatorWrite",
|
||||||
|
resource: ResourcePeering,
|
||||||
|
access: "write",
|
||||||
|
ret: Allow,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// TODO (peering) Update to use PeeringWrite
|
||||||
|
method: "OperatorWrite",
|
||||||
|
resource: ResourcePeering,
|
||||||
|
access: "write",
|
||||||
|
ret: Deny,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
method: "PreparedQueryRead",
|
method: "PreparedQueryRead",
|
||||||
resource: ResourceQuery,
|
resource: ResourceQuery,
|
||||||
|
|
|
@ -82,7 +82,9 @@ func (m *EnterpriseMeta) MergeNoWildcard(_ *EnterpriseMeta) {
|
||||||
// do nothing
|
// do nothing
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ *EnterpriseMeta) Normalize() {}
|
func (_ *EnterpriseMeta) Normalize() {}
|
||||||
|
func (_ *EnterpriseMeta) NormalizePartition() {}
|
||||||
|
func (_ *EnterpriseMeta) NormalizeNamespace() {}
|
||||||
|
|
||||||
func (m *EnterpriseMeta) Matches(_ *EnterpriseMeta) bool {
|
func (m *EnterpriseMeta) Matches(_ *EnterpriseMeta) bool {
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
package resolver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Result struct {
|
||||||
|
acl.Authorizer
|
||||||
|
// TODO: likely we can reduce this interface
|
||||||
|
ACLIdentity structs.ACLIdentity
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Result) AccessorID() string {
|
||||||
|
if a.ACLIdentity == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return a.ACLIdentity.ID()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Result) Identity() structs.ACLIdentity {
|
||||||
|
return a.ACLIdentity
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a Result) ToAllowAuthorizer() acl.AllowAuthorizer {
|
||||||
|
return acl.AllowAuthorizer{Authorizer: a, AccessorID: a.AccessorID()}
|
||||||
|
}
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"github.com/hashicorp/serf/serf"
|
"github.com/hashicorp/serf/serf"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/acl/resolver"
|
||||||
"github.com/hashicorp/consul/agent/config"
|
"github.com/hashicorp/consul/agent/config"
|
||||||
"github.com/hashicorp/consul/agent/consul"
|
"github.com/hashicorp/consul/agent/consul"
|
||||||
"github.com/hashicorp/consul/agent/local"
|
"github.com/hashicorp/consul/agent/local"
|
||||||
|
@ -94,15 +95,15 @@ func (a *TestACLAgent) ResolveToken(secretID string) (acl.Authorizer, error) {
|
||||||
return authz, err
|
return authz, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *TestACLAgent) ResolveTokenAndDefaultMeta(secretID string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (consul.ACLResolveResult, error) {
|
func (a *TestACLAgent) ResolveTokenAndDefaultMeta(secretID string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (resolver.Result, error) {
|
||||||
authz, err := a.ResolveToken(secretID)
|
authz, err := a.ResolveToken(secretID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return consul.ACLResolveResult{}, err
|
return resolver.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
identity, err := a.resolveIdentFn(secretID)
|
identity, err := a.resolveIdentFn(secretID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return consul.ACLResolveResult{}, err
|
return resolver.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default the EnterpriseMeta based on the Tokens meta or actual defaults
|
// Default the EnterpriseMeta based on the Tokens meta or actual defaults
|
||||||
|
@ -116,7 +117,7 @@ func (a *TestACLAgent) ResolveTokenAndDefaultMeta(secretID string, entMeta *acl.
|
||||||
// Use the meta to fill in the ACL authorization context
|
// Use the meta to fill in the ACL authorization context
|
||||||
entMeta.FillAuthzContext(authzContext)
|
entMeta.FillAuthzContext(authzContext)
|
||||||
|
|
||||||
return consul.ACLResolveResult{Authorizer: authz, ACLIdentity: identity}, err
|
return resolver.Result{Authorizer: authz, ACLIdentity: identity}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// All of these are stubs to satisfy the interface
|
// All of these are stubs to satisfy the interface
|
||||||
|
|
|
@ -30,6 +30,7 @@ import (
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/acl/resolver"
|
||||||
"github.com/hashicorp/consul/agent/ae"
|
"github.com/hashicorp/consul/agent/ae"
|
||||||
"github.com/hashicorp/consul/agent/cache"
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||||
|
@ -177,7 +178,7 @@ type delegate interface {
|
||||||
// actions based on the permissions granted to the token.
|
// actions based on the permissions granted to the token.
|
||||||
// If either entMeta or authzContext are non-nil they will be populated with the
|
// If either entMeta or authzContext are non-nil they will be populated with the
|
||||||
// default partition and namespace from the token.
|
// default partition and namespace from the token.
|
||||||
ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (consul.ACLResolveResult, error)
|
ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (resolver.Result, error)
|
||||||
|
|
||||||
RPC(method string, args interface{}, reply interface{}) error
|
RPC(method string, args interface{}, reply interface{}) error
|
||||||
SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer, replyFn structs.SnapshotReplyFn) error
|
SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer, replyFn structs.SnapshotReplyFn) error
|
||||||
|
@ -632,30 +633,8 @@ func (a *Agent) Start(ctx context.Context) error {
|
||||||
go a.baseDeps.ViewStore.Run(&lib.StopChannelContext{StopCh: a.shutdownCh})
|
go a.baseDeps.ViewStore.Run(&lib.StopChannelContext{StopCh: a.shutdownCh})
|
||||||
|
|
||||||
// Start the proxy config manager.
|
// Start the proxy config manager.
|
||||||
proxyDataSources := proxycfg.DataSources{
|
|
||||||
CARoots: proxycfgglue.CacheCARoots(a.cache),
|
|
||||||
CompiledDiscoveryChain: proxycfgglue.CacheCompiledDiscoveryChain(a.cache),
|
|
||||||
ConfigEntry: proxycfgglue.CacheConfigEntry(a.cache),
|
|
||||||
ConfigEntryList: proxycfgglue.CacheConfigEntryList(a.cache),
|
|
||||||
Datacenters: proxycfgglue.CacheDatacenters(a.cache),
|
|
||||||
FederationStateListMeshGateways: proxycfgglue.CacheFederationStateListMeshGateways(a.cache),
|
|
||||||
GatewayServices: proxycfgglue.CacheGatewayServices(a.cache),
|
|
||||||
Health: proxycfgglue.Health(a.rpcClientHealth),
|
|
||||||
HTTPChecks: proxycfgglue.CacheHTTPChecks(a.cache),
|
|
||||||
Intentions: proxycfgglue.CacheIntentions(a.cache),
|
|
||||||
IntentionUpstreams: proxycfgglue.CacheIntentionUpstreams(a.cache),
|
|
||||||
InternalServiceDump: proxycfgglue.CacheInternalServiceDump(a.cache),
|
|
||||||
LeafCertificate: proxycfgglue.CacheLeafCertificate(a.cache),
|
|
||||||
PreparedQuery: proxycfgglue.CachePrepraredQuery(a.cache),
|
|
||||||
ResolvedServiceConfig: proxycfgglue.CacheResolvedServiceConfig(a.cache),
|
|
||||||
ServiceList: proxycfgglue.CacheServiceList(a.cache),
|
|
||||||
TrustBundle: proxycfgglue.CacheTrustBundle(a.cache),
|
|
||||||
TrustBundleList: proxycfgglue.CacheTrustBundleList(a.cache),
|
|
||||||
ExportedPeeredServices: proxycfgglue.CacheExportedPeeredServices(a.cache),
|
|
||||||
}
|
|
||||||
a.fillEnterpriseProxyDataSources(&proxyDataSources)
|
|
||||||
a.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{
|
a.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{
|
||||||
DataSources: proxyDataSources,
|
DataSources: a.proxyDataSources(),
|
||||||
Logger: a.logger.Named(logging.ProxyConfig),
|
Logger: a.logger.Named(logging.ProxyConfig),
|
||||||
Source: &structs.QuerySource{
|
Source: &structs.QuerySource{
|
||||||
Datacenter: a.config.Datacenter,
|
Datacenter: a.config.Datacenter,
|
||||||
|
@ -739,12 +718,6 @@ func (a *Agent) Start(ctx context.Context) error {
|
||||||
go a.retryJoinWAN()
|
go a.retryJoinWAN()
|
||||||
}
|
}
|
||||||
|
|
||||||
// DEPRECATED: Warn users if they're emitting deprecated metrics. Remove this warning and the flagged metrics in a
|
|
||||||
// future release of Consul.
|
|
||||||
if !a.config.Telemetry.DisableCompatOneNine {
|
|
||||||
a.logger.Warn("DEPRECATED Backwards compatibility with pre-1.9 metrics enabled. These metrics will be removed in Consul 1.13. Consider not using this flag and rework instrumentation for 1.10 style http metrics.")
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.tlsConfigurator.Cert() != nil {
|
if a.tlsConfigurator.Cert() != nil {
|
||||||
m := tlsCertExpirationMonitor(a.tlsConfigurator, a.logger)
|
m := tlsCertExpirationMonitor(a.tlsConfigurator, a.logger)
|
||||||
go m.Monitor(&lib.StopChannelContext{StopCh: a.shutdownCh})
|
go m.Monitor(&lib.StopChannelContext{StopCh: a.shutdownCh})
|
||||||
|
@ -3890,12 +3863,6 @@ func (a *Agent) reloadConfig(autoReload bool) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DEPRECATED: Warn users on reload if they're emitting deprecated metrics. Remove this warning and the flagged
|
|
||||||
// metrics in a future release of Consul.
|
|
||||||
if !a.config.Telemetry.DisableCompatOneNine {
|
|
||||||
a.logger.Warn("DEPRECATED Backwards compatibility with pre-1.9 metrics enabled. These metrics will be removed in Consul 1.13. Consider not using this flag and rework instrumentation for 1.10 style http metrics.")
|
|
||||||
}
|
|
||||||
|
|
||||||
return a.reloadConfigInternal(newCfg)
|
return a.reloadConfigInternal(newCfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4240,6 +4207,49 @@ func (a *Agent) listenerPortLocked(svcID structs.ServiceID, checkID structs.Chec
|
||||||
return port, nil
|
return port, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *Agent) proxyDataSources() proxycfg.DataSources {
|
||||||
|
sources := proxycfg.DataSources{
|
||||||
|
CARoots: proxycfgglue.CacheCARoots(a.cache),
|
||||||
|
CompiledDiscoveryChain: proxycfgglue.CacheCompiledDiscoveryChain(a.cache),
|
||||||
|
ConfigEntry: proxycfgglue.CacheConfigEntry(a.cache),
|
||||||
|
ConfigEntryList: proxycfgglue.CacheConfigEntryList(a.cache),
|
||||||
|
Datacenters: proxycfgglue.CacheDatacenters(a.cache),
|
||||||
|
FederationStateListMeshGateways: proxycfgglue.CacheFederationStateListMeshGateways(a.cache),
|
||||||
|
GatewayServices: proxycfgglue.CacheGatewayServices(a.cache),
|
||||||
|
Health: proxycfgglue.Health(a.rpcClientHealth),
|
||||||
|
HTTPChecks: proxycfgglue.CacheHTTPChecks(a.cache),
|
||||||
|
Intentions: proxycfgglue.CacheIntentions(a.cache),
|
||||||
|
IntentionUpstreams: proxycfgglue.CacheIntentionUpstreams(a.cache),
|
||||||
|
InternalServiceDump: proxycfgglue.CacheInternalServiceDump(a.cache),
|
||||||
|
LeafCertificate: proxycfgglue.CacheLeafCertificate(a.cache),
|
||||||
|
PeeredUpstreams: proxycfgglue.CachePeeredUpstreams(a.cache),
|
||||||
|
PreparedQuery: proxycfgglue.CachePrepraredQuery(a.cache),
|
||||||
|
ResolvedServiceConfig: proxycfgglue.CacheResolvedServiceConfig(a.cache),
|
||||||
|
ServiceList: proxycfgglue.CacheServiceList(a.cache),
|
||||||
|
TrustBundle: proxycfgglue.CacheTrustBundle(a.cache),
|
||||||
|
TrustBundleList: proxycfgglue.CacheTrustBundleList(a.cache),
|
||||||
|
ExportedPeeredServices: proxycfgglue.CacheExportedPeeredServices(a.cache),
|
||||||
|
}
|
||||||
|
|
||||||
|
if server, ok := a.delegate.(*consul.Server); ok {
|
||||||
|
deps := proxycfgglue.ServerDataSourceDeps{
|
||||||
|
EventPublisher: a.baseDeps.EventPublisher,
|
||||||
|
ViewStore: a.baseDeps.ViewStore,
|
||||||
|
Logger: a.logger.Named("proxycfg.server-data-sources"),
|
||||||
|
ACLResolver: a.delegate,
|
||||||
|
GetStore: func() proxycfgglue.Store { return server.FSM().State() },
|
||||||
|
}
|
||||||
|
sources.ConfigEntry = proxycfgglue.ServerConfigEntry(deps)
|
||||||
|
sources.ConfigEntryList = proxycfgglue.ServerConfigEntryList(deps)
|
||||||
|
sources.Intentions = proxycfgglue.ServerIntentions(deps)
|
||||||
|
sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps)
|
||||||
|
}
|
||||||
|
|
||||||
|
a.fillEnterpriseProxyDataSources(&sources)
|
||||||
|
return sources
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func listenerPortKey(svcID structs.ServiceID, checkID structs.CheckID) string {
|
func listenerPortKey(svcID structs.ServiceID, checkID structs.CheckID) string {
|
||||||
return fmt.Sprintf("%s:%s", svcID, checkID)
|
return fmt.Sprintf("%s:%s", svcID, checkID)
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,6 +91,7 @@ func (s *HTTPHandlers) AgentSelf(resp http.ResponseWriter, req *http.Request) (i
|
||||||
Revision string
|
Revision string
|
||||||
Server bool
|
Server bool
|
||||||
Version string
|
Version string
|
||||||
|
BuildDate string
|
||||||
}{
|
}{
|
||||||
Datacenter: s.agent.config.Datacenter,
|
Datacenter: s.agent.config.Datacenter,
|
||||||
PrimaryDatacenter: s.agent.config.PrimaryDatacenter,
|
PrimaryDatacenter: s.agent.config.PrimaryDatacenter,
|
||||||
|
@ -100,8 +101,10 @@ func (s *HTTPHandlers) AgentSelf(resp http.ResponseWriter, req *http.Request) (i
|
||||||
Revision: s.agent.config.Revision,
|
Revision: s.agent.config.Revision,
|
||||||
Server: s.agent.config.ServerMode,
|
Server: s.agent.config.ServerMode,
|
||||||
// We expect the ent version to be part of the reported version string, and that's now part of the metadata, not the actual version.
|
// We expect the ent version to be part of the reported version string, and that's now part of the metadata, not the actual version.
|
||||||
Version: s.agent.config.VersionWithMetadata(),
|
Version: s.agent.config.VersionWithMetadata(),
|
||||||
|
BuildDate: s.agent.config.BuildDate.Format(time.RFC3339),
|
||||||
}
|
}
|
||||||
|
|
||||||
return Self{
|
return Self{
|
||||||
Config: config,
|
Config: config,
|
||||||
DebugConfig: s.agent.config.Sanitized(),
|
DebugConfig: s.agent.config.Sanitized(),
|
||||||
|
@ -1524,6 +1527,8 @@ func (s *HTTPHandlers) AgentConnectCALeafCert(resp http.ResponseWriter, req *htt
|
||||||
// not the ID of the service instance.
|
// not the ID of the service instance.
|
||||||
serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/ca/leaf/")
|
serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/ca/leaf/")
|
||||||
|
|
||||||
|
// TODO(peering): expose way to get kind=mesh-gateway type cert with appropriate ACLs
|
||||||
|
|
||||||
args := cachetype.ConnectCALeafRequest{
|
args := cachetype.ConnectCALeafRequest{
|
||||||
Service: serviceName, // Need name not ID
|
Service: serviceName, // Need name not ID
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@ import (
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/acl/resolver"
|
||||||
"github.com/hashicorp/consul/agent/config"
|
"github.com/hashicorp/consul/agent/config"
|
||||||
"github.com/hashicorp/consul/agent/connect"
|
"github.com/hashicorp/consul/agent/connect"
|
||||||
"github.com/hashicorp/consul/agent/connect/ca"
|
"github.com/hashicorp/consul/agent/connect/ca"
|
||||||
|
@ -1645,8 +1646,8 @@ type fakeResolveTokenDelegate struct {
|
||||||
authorizer acl.Authorizer
|
authorizer acl.Authorizer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f fakeResolveTokenDelegate) ResolveTokenAndDefaultMeta(_ string, _ *acl.EnterpriseMeta, _ *acl.AuthorizerContext) (consul.ACLResolveResult, error) {
|
func (f fakeResolveTokenDelegate) ResolveTokenAndDefaultMeta(_ string, _ *acl.EnterpriseMeta, _ *acl.AuthorizerContext) (resolver.Result, error) {
|
||||||
return consul.ACLResolveResult{Authorizer: f.authorizer}, nil
|
return resolver.Result{Authorizer: f.authorizer}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAgent_Reload(t *testing.T) {
|
func TestAgent_Reload(t *testing.T) {
|
||||||
|
|
|
@ -558,8 +558,19 @@ func (c *ConnectCALeaf) generateNewLeaf(req *ConnectCALeafRequest,
|
||||||
}
|
}
|
||||||
dnsNames = append([]string{"localhost"}, req.DNSSAN...)
|
dnsNames = append([]string{"localhost"}, req.DNSSAN...)
|
||||||
ipAddresses = append([]net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, req.IPSAN...)
|
ipAddresses = append([]net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, req.IPSAN...)
|
||||||
|
} else if req.Kind != "" {
|
||||||
|
if req.Kind != structs.ServiceKindMeshGateway {
|
||||||
|
return result, fmt.Errorf("unsupported kind: %s", req.Kind)
|
||||||
|
}
|
||||||
|
|
||||||
|
id = &connect.SpiffeIDMeshGateway{
|
||||||
|
Host: roots.TrustDomain,
|
||||||
|
Datacenter: req.Datacenter,
|
||||||
|
Partition: req.TargetPartition(),
|
||||||
|
}
|
||||||
|
dnsNames = append(dnsNames, req.DNSSAN...)
|
||||||
} else {
|
} else {
|
||||||
return result, errors.New("URI must be either service or agent")
|
return result, errors.New("URI must be either service, agent, or kind")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new private key
|
// Create a new private key
|
||||||
|
@ -665,8 +676,9 @@ func (c *ConnectCALeaf) generateNewLeaf(req *ConnectCALeafRequest,
|
||||||
type ConnectCALeafRequest struct {
|
type ConnectCALeafRequest struct {
|
||||||
Token string
|
Token string
|
||||||
Datacenter string
|
Datacenter string
|
||||||
Service string // Service name, not ID
|
Service string // Service name, not ID
|
||||||
Agent string // Agent name, not ID
|
Agent string // Agent name, not ID
|
||||||
|
Kind structs.ServiceKind // only mesh-gateway for now
|
||||||
DNSSAN []string
|
DNSSAN []string
|
||||||
IPSAN []net.IP
|
IPSAN []net.IP
|
||||||
MinQueryIndex uint64
|
MinQueryIndex uint64
|
||||||
|
@ -677,20 +689,38 @@ type ConnectCALeafRequest struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ConnectCALeafRequest) Key() string {
|
func (r *ConnectCALeafRequest) Key() string {
|
||||||
if len(r.Agent) > 0 {
|
|
||||||
return fmt.Sprintf("agent:%s", r.Agent)
|
|
||||||
}
|
|
||||||
|
|
||||||
r.EnterpriseMeta.Normalize()
|
r.EnterpriseMeta.Normalize()
|
||||||
|
|
||||||
v, err := hashstructure.Hash([]interface{}{
|
switch {
|
||||||
r.Service,
|
case r.Agent != "":
|
||||||
r.EnterpriseMeta,
|
v, err := hashstructure.Hash([]interface{}{
|
||||||
r.DNSSAN,
|
r.Agent,
|
||||||
r.IPSAN,
|
r.PartitionOrDefault(),
|
||||||
}, nil)
|
}, nil)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return fmt.Sprintf("service:%d", v)
|
return fmt.Sprintf("agent:%d", v)
|
||||||
|
}
|
||||||
|
case r.Kind == structs.ServiceKindMeshGateway:
|
||||||
|
v, err := hashstructure.Hash([]interface{}{
|
||||||
|
r.PartitionOrDefault(),
|
||||||
|
r.DNSSAN,
|
||||||
|
r.IPSAN,
|
||||||
|
}, nil)
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Sprintf("kind:%d", v)
|
||||||
|
}
|
||||||
|
case r.Kind != "":
|
||||||
|
// this is not valid
|
||||||
|
default:
|
||||||
|
v, err := hashstructure.Hash([]interface{}{
|
||||||
|
r.Service,
|
||||||
|
r.EnterpriseMeta,
|
||||||
|
r.DNSSAN,
|
||||||
|
r.IPSAN,
|
||||||
|
}, nil)
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Sprintf("service:%d", v)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is an error, we don't set the key. A blank key forces
|
// If there is an error, we don't set the key. A blank key forces
|
||||||
|
|
|
@ -1104,29 +1104,64 @@ func (r *testGatedRootsRPC) RPC(method string, args interface{}, reply interface
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConnectCALeaf_Key(t *testing.T) {
|
func TestConnectCALeaf_Key(t *testing.T) {
|
||||||
r1 := ConnectCALeafRequest{Service: "web"}
|
key := func(r ConnectCALeafRequest) string {
|
||||||
r2 := ConnectCALeafRequest{Service: "api"}
|
return r.Key()
|
||||||
|
}
|
||||||
r3 := ConnectCALeafRequest{DNSSAN: []string{"a.com"}}
|
t.Run("service", func(t *testing.T) {
|
||||||
r4 := ConnectCALeafRequest{DNSSAN: []string{"b.com"}}
|
t.Run("name", func(t *testing.T) {
|
||||||
|
r1 := key(ConnectCALeafRequest{Service: "web"})
|
||||||
r5 := ConnectCALeafRequest{IPSAN: []net.IP{net.ParseIP("192.168.4.139")}}
|
r2 := key(ConnectCALeafRequest{Service: "api"})
|
||||||
r6 := ConnectCALeafRequest{IPSAN: []net.IP{net.ParseIP("192.168.4.140")}}
|
require.True(t, strings.HasPrefix(r1, "service:"), "Key %s does not start with service:", r1)
|
||||||
// hashstructure will hash the service name + ent meta to produce this key
|
require.True(t, strings.HasPrefix(r2, "service:"), "Key %s does not start with service:", r2)
|
||||||
r1Key := r1.Key()
|
require.NotEqual(t, r1, r2, "Cache keys for different services should not be equal")
|
||||||
r2Key := r2.Key()
|
})
|
||||||
|
t.Run("dns-san", func(t *testing.T) {
|
||||||
r3Key := r3.Key()
|
r3 := key(ConnectCALeafRequest{Service: "foo", DNSSAN: []string{"a.com"}})
|
||||||
r4Key := r4.Key()
|
r4 := key(ConnectCALeafRequest{Service: "foo", DNSSAN: []string{"b.com"}})
|
||||||
|
require.NotEqual(t, r3, r4, "Cache keys for different DNSSAN should not be equal")
|
||||||
r5Key := r5.Key()
|
})
|
||||||
r6Key := r6.Key()
|
t.Run("ip-san", func(t *testing.T) {
|
||||||
|
r5 := key(ConnectCALeafRequest{Service: "foo", IPSAN: []net.IP{net.ParseIP("192.168.4.139")}})
|
||||||
require.True(t, strings.HasPrefix(r1Key, "service:"), "Key %s does not start with service:", r1Key)
|
r6 := key(ConnectCALeafRequest{Service: "foo", IPSAN: []net.IP{net.ParseIP("192.168.4.140")}})
|
||||||
require.True(t, strings.HasPrefix(r2Key, "service:"), "Key %s does not start with service:", r2Key)
|
require.NotEqual(t, r5, r6, "Cache keys for different IPSAN should not be equal")
|
||||||
require.NotEqual(t, r1Key, r2Key, "Cache keys for different services are not equal")
|
})
|
||||||
require.NotEqual(t, r3Key, r4Key, "Cache keys for different DNSSAN are not equal")
|
})
|
||||||
require.NotEqual(t, r5Key, r6Key, "Cache keys for different IPSAN are not equal")
|
t.Run("agent", func(t *testing.T) {
|
||||||
r := ConnectCALeafRequest{Agent: "abc"}
|
t.Run("name", func(t *testing.T) {
|
||||||
require.Equal(t, "agent:abc", r.Key())
|
r1 := key(ConnectCALeafRequest{Agent: "abc"})
|
||||||
|
require.True(t, strings.HasPrefix(r1, "agent:"), "Key %s does not start with agent:", r1)
|
||||||
|
})
|
||||||
|
t.Run("dns-san ignored", func(t *testing.T) {
|
||||||
|
r3 := key(ConnectCALeafRequest{Agent: "foo", DNSSAN: []string{"a.com"}})
|
||||||
|
r4 := key(ConnectCALeafRequest{Agent: "foo", DNSSAN: []string{"b.com"}})
|
||||||
|
require.Equal(t, r3, r4, "DNSSAN is ignored for agent type")
|
||||||
|
})
|
||||||
|
t.Run("ip-san ignored", func(t *testing.T) {
|
||||||
|
r5 := key(ConnectCALeafRequest{Agent: "foo", IPSAN: []net.IP{net.ParseIP("192.168.4.139")}})
|
||||||
|
r6 := key(ConnectCALeafRequest{Agent: "foo", IPSAN: []net.IP{net.ParseIP("192.168.4.140")}})
|
||||||
|
require.Equal(t, r5, r6, "IPSAN is ignored for agent type")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
t.Run("kind", func(t *testing.T) {
|
||||||
|
t.Run("invalid", func(t *testing.T) {
|
||||||
|
r1 := key(ConnectCALeafRequest{Kind: "terminating-gateway"})
|
||||||
|
require.Empty(t, r1)
|
||||||
|
})
|
||||||
|
t.Run("mesh-gateway", func(t *testing.T) {
|
||||||
|
t.Run("normal", func(t *testing.T) {
|
||||||
|
r1 := key(ConnectCALeafRequest{Kind: "mesh-gateway"})
|
||||||
|
require.True(t, strings.HasPrefix(r1, "kind:"), "Key %s does not start with kind:", r1)
|
||||||
|
})
|
||||||
|
t.Run("dns-san", func(t *testing.T) {
|
||||||
|
r3 := key(ConnectCALeafRequest{Kind: "mesh-gateway", DNSSAN: []string{"a.com"}})
|
||||||
|
r4 := key(ConnectCALeafRequest{Kind: "mesh-gateway", DNSSAN: []string{"b.com"}})
|
||||||
|
require.NotEqual(t, r3, r4, "Cache keys for different DNSSAN should not be equal")
|
||||||
|
})
|
||||||
|
t.Run("ip-san", func(t *testing.T) {
|
||||||
|
r5 := key(ConnectCALeafRequest{Kind: "mesh-gateway", IPSAN: []net.IP{net.ParseIP("192.168.4.139")}})
|
||||||
|
r6 := key(ConnectCALeafRequest{Kind: "mesh-gateway", IPSAN: []net.IP{net.ParseIP("192.168.4.140")}})
|
||||||
|
require.NotEqual(t, r5, r6, "Cache keys for different IPSAN should not be equal")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
// Recommended name for registration.
|
// Recommended name for registration.
|
||||||
const IntentionUpstreamsName = "intention-upstreams"
|
const IntentionUpstreamsName = "intention-upstreams"
|
||||||
|
|
||||||
// GatewayUpstreams supports fetching upstreams for a given gateway name.
|
// IntentionUpstreams supports fetching upstreams for a given service name.
|
||||||
type IntentionUpstreams struct {
|
type IntentionUpstreams struct {
|
||||||
RegisterOptionsBlockingRefresh
|
RegisterOptionsBlockingRefresh
|
||||||
RPC RPC
|
RPC RPC
|
||||||
|
|
|
@ -0,0 +1,52 @@
|
||||||
|
package cachetype
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IntentionUpstreamsDestinationName Recommended name for registration.
|
||||||
|
const IntentionUpstreamsDestinationName = "intention-upstreams-destination"
|
||||||
|
|
||||||
|
// IntentionUpstreamsDestination supports fetching upstreams for a given gateway name.
|
||||||
|
type IntentionUpstreamsDestination struct {
|
||||||
|
RegisterOptionsBlockingRefresh
|
||||||
|
RPC RPC
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *IntentionUpstreamsDestination) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) {
|
||||||
|
var result cache.FetchResult
|
||||||
|
|
||||||
|
// The request should be a ServiceSpecificRequest.
|
||||||
|
reqReal, ok := req.(*structs.ServiceSpecificRequest)
|
||||||
|
if !ok {
|
||||||
|
return result, fmt.Errorf(
|
||||||
|
"Internal cache failure: request wrong type: %T", req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lightweight copy this object so that manipulating QueryOptions doesn't race.
|
||||||
|
dup := *reqReal
|
||||||
|
reqReal = &dup
|
||||||
|
|
||||||
|
// Set the minimum query index to our current index so we block
|
||||||
|
reqReal.QueryOptions.MinQueryIndex = opts.MinIndex
|
||||||
|
reqReal.QueryOptions.MaxQueryTime = opts.Timeout
|
||||||
|
|
||||||
|
// Always allow stale - there's no point in hitting leader if the request is
|
||||||
|
// going to be served from cache and end up arbitrarily stale anyway. This
|
||||||
|
// allows cached service-discover to automatically read scale across all
|
||||||
|
// servers too.
|
||||||
|
reqReal.AllowStale = true
|
||||||
|
|
||||||
|
// Fetch
|
||||||
|
var reply structs.IndexedServiceList
|
||||||
|
if err := i.RPC.RPC("Internal.IntentionUpstreamsDestination", reqReal, &reply); err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Value = &reply
|
||||||
|
result.Index = reply.QueryMeta.Index
|
||||||
|
return result, nil
|
||||||
|
}
|
|
@ -0,0 +1,52 @@
|
||||||
|
package cachetype
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIntentionUpstreamsDestination(t *testing.T) {
|
||||||
|
rpc := TestRPC(t)
|
||||||
|
typ := &IntentionUpstreamsDestination{RPC: rpc}
|
||||||
|
|
||||||
|
// Expect the proper RPC call. This also sets the expected value
|
||||||
|
// since that is return-by-pointer in the arguments.
|
||||||
|
var resp *structs.IndexedServiceList
|
||||||
|
rpc.On("RPC", "Internal.IntentionUpstreamsDestination", mock.Anything, mock.Anything).Return(nil).
|
||||||
|
Run(func(args mock.Arguments) {
|
||||||
|
req := args.Get(1).(*structs.ServiceSpecificRequest)
|
||||||
|
require.Equal(t, uint64(24), req.QueryOptions.MinQueryIndex)
|
||||||
|
require.Equal(t, 1*time.Second, req.QueryOptions.MaxQueryTime)
|
||||||
|
require.True(t, req.AllowStale)
|
||||||
|
require.Equal(t, "foo", req.ServiceName)
|
||||||
|
|
||||||
|
services := structs.ServiceList{
|
||||||
|
{Name: "foo"},
|
||||||
|
}
|
||||||
|
reply := args.Get(2).(*structs.IndexedServiceList)
|
||||||
|
reply.Services = services
|
||||||
|
reply.QueryMeta.Index = 48
|
||||||
|
resp = reply
|
||||||
|
})
|
||||||
|
|
||||||
|
// Fetch
|
||||||
|
resultA, err := typ.Fetch(cache.FetchOptions{
|
||||||
|
MinIndex: 24,
|
||||||
|
Timeout: 1 * time.Second,
|
||||||
|
}, &structs.ServiceSpecificRequest{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
ServiceName: "foo",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, cache.FetchResult{
|
||||||
|
Value: resp,
|
||||||
|
Index: 48,
|
||||||
|
}, resultA)
|
||||||
|
|
||||||
|
rpc.AssertExpectations(t)
|
||||||
|
}
|
|
@ -1,92 +0,0 @@
|
||||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
|
||||||
|
|
||||||
package cachetype
|
|
||||||
|
|
||||||
import (
|
|
||||||
local "github.com/hashicorp/consul/agent/local"
|
|
||||||
memdb "github.com/hashicorp/go-memdb"
|
|
||||||
|
|
||||||
mock "github.com/stretchr/testify/mock"
|
|
||||||
|
|
||||||
structs "github.com/hashicorp/consul/agent/structs"
|
|
||||||
|
|
||||||
testing "testing"
|
|
||||||
|
|
||||||
time "time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MockAgent is an autogenerated mock type for the Agent type
|
|
||||||
type MockAgent struct {
|
|
||||||
mock.Mock
|
|
||||||
}
|
|
||||||
|
|
||||||
// LocalBlockingQuery provides a mock function with given fields: alwaysBlock, hash, wait, fn
|
|
||||||
func (_m *MockAgent) LocalBlockingQuery(alwaysBlock bool, hash string, wait time.Duration, fn func(memdb.WatchSet) (string, interface{}, error)) (string, interface{}, error) {
|
|
||||||
ret := _m.Called(alwaysBlock, hash, wait, fn)
|
|
||||||
|
|
||||||
var r0 string
|
|
||||||
if rf, ok := ret.Get(0).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) string); ok {
|
|
||||||
r0 = rf(alwaysBlock, hash, wait, fn)
|
|
||||||
} else {
|
|
||||||
r0 = ret.Get(0).(string)
|
|
||||||
}
|
|
||||||
|
|
||||||
var r1 interface{}
|
|
||||||
if rf, ok := ret.Get(1).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) interface{}); ok {
|
|
||||||
r1 = rf(alwaysBlock, hash, wait, fn)
|
|
||||||
} else {
|
|
||||||
if ret.Get(1) != nil {
|
|
||||||
r1 = ret.Get(1).(interface{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var r2 error
|
|
||||||
if rf, ok := ret.Get(2).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) error); ok {
|
|
||||||
r2 = rf(alwaysBlock, hash, wait, fn)
|
|
||||||
} else {
|
|
||||||
r2 = ret.Error(2)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0, r1, r2
|
|
||||||
}
|
|
||||||
|
|
||||||
// LocalState provides a mock function with given fields:
|
|
||||||
func (_m *MockAgent) LocalState() *local.State {
|
|
||||||
ret := _m.Called()
|
|
||||||
|
|
||||||
var r0 *local.State
|
|
||||||
if rf, ok := ret.Get(0).(func() *local.State); ok {
|
|
||||||
r0 = rf()
|
|
||||||
} else {
|
|
||||||
if ret.Get(0) != nil {
|
|
||||||
r0 = ret.Get(0).(*local.State)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServiceHTTPBasedChecks provides a mock function with given fields: id
|
|
||||||
func (_m *MockAgent) ServiceHTTPBasedChecks(id structs.ServiceID) []structs.CheckType {
|
|
||||||
ret := _m.Called(id)
|
|
||||||
|
|
||||||
var r0 []structs.CheckType
|
|
||||||
if rf, ok := ret.Get(0).(func(structs.ServiceID) []structs.CheckType); ok {
|
|
||||||
r0 = rf(id)
|
|
||||||
} else {
|
|
||||||
if ret.Get(0) != nil {
|
|
||||||
r0 = ret.Get(0).([]structs.CheckType)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMockAgent creates a new instance of MockAgent. It also registers a cleanup function to assert the mocks expectations.
|
|
||||||
func NewMockAgent(t testing.TB) *MockAgent {
|
|
||||||
mock := &MockAgent{}
|
|
||||||
|
|
||||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
|
||||||
|
|
||||||
return mock
|
|
||||||
}
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
// Code generated by mockery v2.12.2. DO NOT EDIT.
|
||||||
|
|
||||||
package cachetype
|
package cachetype
|
||||||
|
|
||||||
|
@ -27,9 +27,10 @@ func (_m *MockRPC) RPC(method string, args interface{}, reply interface{}) error
|
||||||
return r0
|
return r0
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockRPC creates a new instance of MockRPC. It also registers a cleanup function to assert the mocks expectations.
|
// NewMockRPC creates a new instance of MockRPC. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
func NewMockRPC(t testing.TB) *MockRPC {
|
func NewMockRPC(t testing.TB) *MockRPC {
|
||||||
mock := &MockRPC{}
|
mock := &MockRPC{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
// Code generated by mockery v2.12.2. DO NOT EDIT.
|
||||||
|
|
||||||
|
package cachetype
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
|
||||||
|
grpc "google.golang.org/grpc"
|
||||||
|
|
||||||
|
mock "github.com/stretchr/testify/mock"
|
||||||
|
|
||||||
|
pbpeering "github.com/hashicorp/consul/proto/pbpeering"
|
||||||
|
|
||||||
|
testing "testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockTrustBundleLister is an autogenerated mock type for the TrustBundleLister type
|
||||||
|
type MockTrustBundleLister struct {
|
||||||
|
mock.Mock
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrustBundleListByService provides a mock function with given fields: ctx, in, opts
|
||||||
|
func (_m *MockTrustBundleLister) TrustBundleListByService(ctx context.Context, in *pbpeering.TrustBundleListByServiceRequest, opts ...grpc.CallOption) (*pbpeering.TrustBundleListByServiceResponse, error) {
|
||||||
|
_va := make([]interface{}, len(opts))
|
||||||
|
for _i := range opts {
|
||||||
|
_va[_i] = opts[_i]
|
||||||
|
}
|
||||||
|
var _ca []interface{}
|
||||||
|
_ca = append(_ca, ctx, in)
|
||||||
|
_ca = append(_ca, _va...)
|
||||||
|
ret := _m.Called(_ca...)
|
||||||
|
|
||||||
|
var r0 *pbpeering.TrustBundleListByServiceResponse
|
||||||
|
if rf, ok := ret.Get(0).(func(context.Context, *pbpeering.TrustBundleListByServiceRequest, ...grpc.CallOption) *pbpeering.TrustBundleListByServiceResponse); ok {
|
||||||
|
r0 = rf(ctx, in, opts...)
|
||||||
|
} else {
|
||||||
|
if ret.Get(0) != nil {
|
||||||
|
r0 = ret.Get(0).(*pbpeering.TrustBundleListByServiceResponse)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var r1 error
|
||||||
|
if rf, ok := ret.Get(1).(func(context.Context, *pbpeering.TrustBundleListByServiceRequest, ...grpc.CallOption) error); ok {
|
||||||
|
r1 = rf(ctx, in, opts...)
|
||||||
|
} else {
|
||||||
|
r1 = ret.Error(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r0, r1
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMockTrustBundleLister creates a new instance of MockTrustBundleLister. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
|
func NewMockTrustBundleLister(t testing.TB) *MockTrustBundleLister {
|
||||||
|
mock := &MockTrustBundleLister{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
return mock
|
||||||
|
}
|
|
@ -0,0 +1,51 @@
|
||||||
|
package cachetype
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Recommended name for registration.
|
||||||
|
const PeeredUpstreamsName = "peered-upstreams"
|
||||||
|
|
||||||
|
// PeeredUpstreams supports fetching imported upstream candidates of a given partition.
|
||||||
|
type PeeredUpstreams struct {
|
||||||
|
RegisterOptionsBlockingRefresh
|
||||||
|
RPC RPC
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *PeeredUpstreams) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) {
|
||||||
|
var result cache.FetchResult
|
||||||
|
|
||||||
|
reqReal, ok := req.(*structs.PartitionSpecificRequest)
|
||||||
|
if !ok {
|
||||||
|
return result, fmt.Errorf(
|
||||||
|
"Internal cache failure: request wrong type: %T", req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lightweight copy this object so that manipulating QueryOptions doesn't race.
|
||||||
|
dup := *reqReal
|
||||||
|
reqReal = &dup
|
||||||
|
|
||||||
|
// Set the minimum query index to our current index so we block
|
||||||
|
reqReal.QueryOptions.MinQueryIndex = opts.MinIndex
|
||||||
|
reqReal.QueryOptions.MaxQueryTime = opts.Timeout
|
||||||
|
|
||||||
|
// Always allow stale - there's no point in hitting leader if the request is
|
||||||
|
// going to be served from cache and end up arbitrarily stale anyway. This
|
||||||
|
// allows cached service-discover to automatically read scale across all
|
||||||
|
// servers too.
|
||||||
|
reqReal.AllowStale = true
|
||||||
|
|
||||||
|
// Fetch
|
||||||
|
var reply structs.IndexedPeeredServiceList
|
||||||
|
if err := i.RPC.RPC("Internal.PeeredUpstreams", reqReal, &reply); err != nil {
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Value = &reply
|
||||||
|
result.Index = reply.QueryMeta.Index
|
||||||
|
return result, nil
|
||||||
|
}
|
|
@ -0,0 +1,60 @@
|
||||||
|
package cachetype
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/mock"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPeeredUpstreams(t *testing.T) {
|
||||||
|
rpc := TestRPC(t)
|
||||||
|
defer rpc.AssertExpectations(t)
|
||||||
|
typ := &PeeredUpstreams{RPC: rpc}
|
||||||
|
|
||||||
|
// Expect the proper RPC call. This also sets the expected value
|
||||||
|
// since that is return-by-pointer in the arguments.
|
||||||
|
var resp *structs.IndexedPeeredServiceList
|
||||||
|
rpc.On("RPC", "Internal.PeeredUpstreams", mock.Anything, mock.Anything).Return(nil).
|
||||||
|
Run(func(args mock.Arguments) {
|
||||||
|
req := args.Get(1).(*structs.PartitionSpecificRequest)
|
||||||
|
require.Equal(t, uint64(24), req.MinQueryIndex)
|
||||||
|
require.Equal(t, 1*time.Second, req.QueryOptions.MaxQueryTime)
|
||||||
|
require.True(t, req.AllowStale)
|
||||||
|
|
||||||
|
reply := args.Get(2).(*structs.IndexedPeeredServiceList)
|
||||||
|
reply.Index = 48
|
||||||
|
resp = reply
|
||||||
|
})
|
||||||
|
|
||||||
|
// Fetch
|
||||||
|
result, err := typ.Fetch(cache.FetchOptions{
|
||||||
|
MinIndex: 24,
|
||||||
|
Timeout: 1 * time.Second,
|
||||||
|
}, &structs.PartitionSpecificRequest{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
EnterpriseMeta: *acl.DefaultEnterpriseMeta(),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, cache.FetchResult{
|
||||||
|
Value: resp,
|
||||||
|
Index: 48,
|
||||||
|
}, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPeeredUpstreams_badReqType(t *testing.T) {
|
||||||
|
rpc := TestRPC(t)
|
||||||
|
defer rpc.AssertExpectations(t)
|
||||||
|
typ := &PeeredUpstreams{RPC: rpc}
|
||||||
|
|
||||||
|
// Fetch
|
||||||
|
_, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest(
|
||||||
|
t, cache.RequestInfo{Key: "foo", MinIndex: 64}))
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "wrong type")
|
||||||
|
}
|
|
@ -1,10 +1,9 @@
|
||||||
package cachetype
|
package cachetype
|
||||||
|
|
||||||
//go:generate mockery --all --inpackage
|
|
||||||
|
|
||||||
// RPC is an interface that an RPC client must implement. This is a helper
|
// RPC is an interface that an RPC client must implement. This is a helper
|
||||||
// interface that is implemented by the agent delegate so that Type
|
// interface that is implemented by the agent delegate so that Type
|
||||||
// implementations can request RPC access.
|
// implementations can request RPC access.
|
||||||
|
//go:generate mockery --name RPC --inpackage
|
||||||
type RPC interface {
|
type RPC interface {
|
||||||
RPC(method string, args interface{}, reply interface{}) error
|
RPC(method string, args interface{}, reply interface{}) error
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,13 +4,13 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mitchellh/go-testing-interface"
|
testinf "github.com/mitchellh/go-testing-interface"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/cache"
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestRPC returns a mock implementation of the RPC interface.
|
// TestRPC returns a mock implementation of the RPC interface.
|
||||||
func TestRPC(t testing.T) *MockRPC {
|
func TestRPC(t testinf.T) *MockRPC {
|
||||||
// This function is relatively useless but this allows us to perhaps
|
// This function is relatively useless but this allows us to perhaps
|
||||||
// perform some initialization later.
|
// perform some initialization later.
|
||||||
return &MockRPC{}
|
return &MockRPC{}
|
||||||
|
@ -21,7 +21,7 @@ func TestRPC(t testing.T) *MockRPC {
|
||||||
// Errors will show up as an error type on the resulting channel so a
|
// Errors will show up as an error type on the resulting channel so a
|
||||||
// type switch should be used.
|
// type switch should be used.
|
||||||
func TestFetchCh(
|
func TestFetchCh(
|
||||||
t testing.T,
|
t testinf.T,
|
||||||
typ cache.Type,
|
typ cache.Type,
|
||||||
opts cache.FetchOptions,
|
opts cache.FetchOptions,
|
||||||
req cache.Request,
|
req cache.Request,
|
||||||
|
@ -43,7 +43,7 @@ func TestFetchCh(
|
||||||
// TestFetchChResult tests that the result from TestFetchCh matches
|
// TestFetchChResult tests that the result from TestFetchCh matches
|
||||||
// within a reasonable period of time (it expects it to be "immediate" but
|
// within a reasonable period of time (it expects it to be "immediate" but
|
||||||
// waits some milliseconds).
|
// waits some milliseconds).
|
||||||
func TestFetchChResult(t testing.T, ch <-chan interface{}, expected interface{}) {
|
func TestFetchChResult(t testinf.T, ch <-chan interface{}, expected interface{}) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
|
|
|
@ -4,9 +4,10 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/cache"
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
"github.com/hashicorp/consul/proto/pbpeering"
|
"github.com/hashicorp/consul/proto/pbpeering"
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Recommended name for registration.
|
// Recommended name for registration.
|
||||||
|
@ -19,7 +20,7 @@ type TrustBundle struct {
|
||||||
Client TrustBundleReader
|
Client TrustBundleReader
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:generate mockery --name TrustBundleReader --inpackage --testonly
|
//go:generate mockery --name TrustBundleReader --inpackage --filename mock_TrustBundleReader_test.go
|
||||||
type TrustBundleReader interface {
|
type TrustBundleReader interface {
|
||||||
TrustBundleRead(
|
TrustBundleRead(
|
||||||
ctx context.Context, in *pbpeering.TrustBundleReadRequest, opts ...grpc.CallOption,
|
ctx context.Context, in *pbpeering.TrustBundleReadRequest, opts ...grpc.CallOption,
|
||||||
|
|
|
@ -4,9 +4,10 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/cache"
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
"github.com/hashicorp/consul/proto/pbpeering"
|
"github.com/hashicorp/consul/proto/pbpeering"
|
||||||
"google.golang.org/grpc"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Recommended name for registration.
|
// Recommended name for registration.
|
||||||
|
@ -19,6 +20,7 @@ type TrustBundles struct {
|
||||||
Client TrustBundleLister
|
Client TrustBundleLister
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//go:generate mockery --name TrustBundleLister --inpackage --filename mock_TrustBundleLister_test.go
|
||||||
type TrustBundleLister interface {
|
type TrustBundleLister interface {
|
||||||
TrustBundleListByService(
|
TrustBundleListByService(
|
||||||
ctx context.Context, in *pbpeering.TrustBundleListByServiceRequest, opts ...grpc.CallOption,
|
ctx context.Context, in *pbpeering.TrustBundleListByServiceRequest, opts ...grpc.CallOption,
|
||||||
|
|
|
@ -5,11 +5,11 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/cache"
|
|
||||||
"github.com/hashicorp/consul/proto/pbpeering"
|
|
||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"google.golang.org/grpc"
|
|
||||||
|
"github.com/hashicorp/consul/agent/cache"
|
||||||
|
"github.com/hashicorp/consul/proto/pbpeering"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTrustBundles(t *testing.T) {
|
func TestTrustBundles(t *testing.T) {
|
||||||
|
@ -105,48 +105,3 @@ func TestTrustBundles_MultipleUpdates(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockTrustBundleLister is an autogenerated mock type for the TrustBundleLister type
|
|
||||||
type MockTrustBundleLister struct {
|
|
||||||
mock.Mock
|
|
||||||
}
|
|
||||||
|
|
||||||
// TrustBundleListByService provides a mock function with given fields: ctx, in, opts
|
|
||||||
func (_m *MockTrustBundleLister) TrustBundleListByService(ctx context.Context, in *pbpeering.TrustBundleListByServiceRequest, opts ...grpc.CallOption) (*pbpeering.TrustBundleListByServiceResponse, error) {
|
|
||||||
_va := make([]interface{}, len(opts))
|
|
||||||
for _i := range opts {
|
|
||||||
_va[_i] = opts[_i]
|
|
||||||
}
|
|
||||||
var _ca []interface{}
|
|
||||||
_ca = append(_ca, ctx, in)
|
|
||||||
_ca = append(_ca, _va...)
|
|
||||||
ret := _m.Called(_ca...)
|
|
||||||
|
|
||||||
var r0 *pbpeering.TrustBundleListByServiceResponse
|
|
||||||
if rf, ok := ret.Get(0).(func(context.Context, *pbpeering.TrustBundleListByServiceRequest, ...grpc.CallOption) *pbpeering.TrustBundleListByServiceResponse); ok {
|
|
||||||
r0 = rf(ctx, in, opts...)
|
|
||||||
} else {
|
|
||||||
if ret.Get(0) != nil {
|
|
||||||
r0 = ret.Get(0).(*pbpeering.TrustBundleListByServiceResponse)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var r1 error
|
|
||||||
if rf, ok := ret.Get(1).(func(context.Context, *pbpeering.TrustBundleListByServiceRequest, ...grpc.CallOption) error); ok {
|
|
||||||
r1 = rf(ctx, in, opts...)
|
|
||||||
} else {
|
|
||||||
r1 = ret.Error(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r0, r1
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMockTrustBundleLister creates a new instance of MockTrustBundleLister. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
|
|
||||||
func NewMockTrustBundleLister(t testing.TB) *MockTrustBundleLister {
|
|
||||||
mock := &MockTrustBundleLister{}
|
|
||||||
mock.Mock.Test(t)
|
|
||||||
|
|
||||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
|
||||||
|
|
||||||
return mock
|
|
||||||
}
|
|
||||||
|
|
|
@ -33,8 +33,6 @@ import (
|
||||||
"github.com/hashicorp/consul/lib/ttlcache"
|
"github.com/hashicorp/consul/lib/ttlcache"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate mockery --all --inpackage
|
|
||||||
|
|
||||||
// TODO(kit): remove the namespace from these once the metrics themselves change
|
// TODO(kit): remove the namespace from these once the metrics themselves change
|
||||||
var Gauges = []prometheus.GaugeDefinition{
|
var Gauges = []prometheus.GaugeDefinition{
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
// Code generated by mockery v2.12.2. DO NOT EDIT.
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
@ -27,9 +27,10 @@ func (_m *MockRequest) CacheInfo() RequestInfo {
|
||||||
return r0
|
return r0
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockRequest creates a new instance of MockRequest. It also registers a cleanup function to assert the mocks expectations.
|
// NewMockRequest creates a new instance of MockRequest. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
func NewMockRequest(t testing.TB) *MockRequest {
|
func NewMockRequest(t testing.TB) *MockRequest {
|
||||||
mock := &MockRequest{}
|
mock := &MockRequest{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
// Code generated by mockery v2.12.2. DO NOT EDIT.
|
||||||
|
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
|
@ -48,9 +48,10 @@ func (_m *MockType) RegisterOptions() RegisterOptions {
|
||||||
return r0
|
return r0
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockType creates a new instance of MockType. It also registers a cleanup function to assert the mocks expectations.
|
// NewMockType creates a new instance of MockType. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
func NewMockType(t testing.TB) *MockType {
|
func NewMockType(t testing.TB) *MockType {
|
||||||
mock := &MockType{}
|
mock := &MockType{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
//
|
//
|
||||||
// This interface is typically implemented by request structures in
|
// This interface is typically implemented by request structures in
|
||||||
// the agent/structs package.
|
// the agent/structs package.
|
||||||
|
//go:generate mockery --name Request --inpackage
|
||||||
type Request interface {
|
type Request interface {
|
||||||
// CacheInfo returns information used for caching this request.
|
// CacheInfo returns information used for caching this request.
|
||||||
CacheInfo() RequestInfo
|
CacheInfo() RequestInfo
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mitchellh/go-testing-interface"
|
testinf "github.com/mitchellh/go-testing-interface"
|
||||||
"github.com/stretchr/testify/mock"
|
"github.com/stretchr/testify/mock"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -13,7 +13,7 @@ import (
|
||||||
// TestCacheGetCh returns a channel that returns the result of the Get call.
|
// TestCacheGetCh returns a channel that returns the result of the Get call.
|
||||||
// This is useful for testing timing and concurrency with Get calls. Any
|
// This is useful for testing timing and concurrency with Get calls. Any
|
||||||
// error will be logged, so the result value should always be asserted.
|
// error will be logged, so the result value should always be asserted.
|
||||||
func TestCacheGetCh(t testing.T, c *Cache, typ string, r Request) <-chan interface{} {
|
func TestCacheGetCh(t testinf.T, c *Cache, typ string, r Request) <-chan interface{} {
|
||||||
resultCh := make(chan interface{})
|
resultCh := make(chan interface{})
|
||||||
go func() {
|
go func() {
|
||||||
result, _, err := c.Get(context.Background(), typ, r)
|
result, _, err := c.Get(context.Background(), typ, r)
|
||||||
|
@ -32,7 +32,7 @@ func TestCacheGetCh(t testing.T, c *Cache, typ string, r Request) <-chan interfa
|
||||||
// TestCacheGetChResult tests that the result from TestCacheGetCh matches
|
// TestCacheGetChResult tests that the result from TestCacheGetCh matches
|
||||||
// within a reasonable period of time (it expects it to be "immediate" but
|
// within a reasonable period of time (it expects it to be "immediate" but
|
||||||
// waits some milliseconds).
|
// waits some milliseconds).
|
||||||
func TestCacheGetChResult(t testing.T, ch <-chan interface{}, expected interface{}) {
|
func TestCacheGetChResult(t testinf.T, ch <-chan interface{}, expected interface{}) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
select {
|
select {
|
||||||
|
@ -51,7 +51,7 @@ func TestCacheGetChResult(t testing.T, ch <-chan interface{}, expected interface
|
||||||
// "immediate" but waits some milliseconds). Expected may be given multiple
|
// "immediate" but waits some milliseconds). Expected may be given multiple
|
||||||
// times and if so these are all waited for and asserted to match but IN ANY
|
// times and if so these are all waited for and asserted to match but IN ANY
|
||||||
// ORDER to ensure we aren't timing dependent.
|
// ORDER to ensure we aren't timing dependent.
|
||||||
func TestCacheNotifyChResult(t testing.T, ch <-chan UpdateEvent, expected ...UpdateEvent) {
|
func TestCacheNotifyChResult(t testinf.T, ch <-chan UpdateEvent, expected ...UpdateEvent) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
expectLen := len(expected)
|
expectLen := len(expected)
|
||||||
|
@ -85,14 +85,14 @@ OUT:
|
||||||
|
|
||||||
// TestRequest returns a Request that returns the given cache key and index.
|
// TestRequest returns a Request that returns the given cache key and index.
|
||||||
// The Reset method can be called to reset it for custom usage.
|
// The Reset method can be called to reset it for custom usage.
|
||||||
func TestRequest(t testing.T, info RequestInfo) *MockRequest {
|
func TestRequest(t testinf.T, info RequestInfo) *MockRequest {
|
||||||
req := &MockRequest{}
|
req := &MockRequest{}
|
||||||
req.On("CacheInfo").Return(info)
|
req.On("CacheInfo").Return(info)
|
||||||
return req
|
return req
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestType returns a MockType that sets default RegisterOptions.
|
// TestType returns a MockType that sets default RegisterOptions.
|
||||||
func TestType(t testing.T) *MockType {
|
func TestType(t testinf.T) *MockType {
|
||||||
typ := &MockType{}
|
typ := &MockType{}
|
||||||
typ.On("RegisterOptions").Return(RegisterOptions{
|
typ.On("RegisterOptions").Return(RegisterOptions{
|
||||||
SupportsBlocking: true,
|
SupportsBlocking: true,
|
||||||
|
@ -101,7 +101,7 @@ func TestType(t testing.T) *MockType {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestTypeNonBlocking returns a MockType that returns false to SupportsBlocking.
|
// TestTypeNonBlocking returns a MockType that returns false to SupportsBlocking.
|
||||||
func TestTypeNonBlocking(t testing.T) *MockType {
|
func TestTypeNonBlocking(t testinf.T) *MockType {
|
||||||
typ := &MockType{}
|
typ := &MockType{}
|
||||||
typ.On("RegisterOptions").Return(RegisterOptions{
|
typ.On("RegisterOptions").Return(RegisterOptions{
|
||||||
SupportsBlocking: false,
|
SupportsBlocking: false,
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Type implements the logic to fetch certain types of data.
|
// Type implements the logic to fetch certain types of data.
|
||||||
|
//go:generate mockery --name Type --inpackage
|
||||||
type Type interface {
|
type Type interface {
|
||||||
// Fetch fetches a single unique item.
|
// Fetch fetches a single unique item.
|
||||||
//
|
//
|
||||||
|
|
|
@ -499,6 +499,10 @@ func (s *HTTPHandlers) CatalogNodeServiceList(resp http.ResponseWriter, req *htt
|
||||||
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing node name"}
|
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing node name"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if _, ok := req.URL.Query()["merge-central-config"]; ok {
|
||||||
|
args.MergeCentralConfig = true
|
||||||
|
}
|
||||||
|
|
||||||
// Make the RPC request
|
// Make the RPC request
|
||||||
var out structs.IndexedNodeServiceList
|
var out structs.IndexedNodeServiceList
|
||||||
defer setMeta(resp, &out.QueryMeta)
|
defer setMeta(resp, &out.QueryMeta)
|
||||||
|
|
|
@ -1529,6 +1529,111 @@ func TestCatalogNodeServiceList(t *testing.T) {
|
||||||
require.Equal(t, args.Service.Proxy, proxySvc.Proxy)
|
require.Equal(t, args.Service.Proxy, proxySvc.Proxy)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCatalogNodeServiceList_MergeCentralConfig(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
a := NewTestAgent(t, "")
|
||||||
|
defer a.Shutdown()
|
||||||
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
// Register the service
|
||||||
|
registerServiceReq := registerService(t, a)
|
||||||
|
// Register proxy-defaults
|
||||||
|
proxyGlobalEntry := registerProxyDefaults(t, a)
|
||||||
|
// Register service-defaults
|
||||||
|
serviceDefaultsConfigEntry := registerServiceDefaults(t, a, registerServiceReq.Service.Proxy.DestinationServiceName)
|
||||||
|
|
||||||
|
url := fmt.Sprintf("/v1/catalog/node-services/%s?merge-central-config", registerServiceReq.Node)
|
||||||
|
req, _ := http.NewRequest("GET", url, nil)
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
obj, err := a.srv.CatalogNodeServiceList(resp, req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assertIndex(t, resp)
|
||||||
|
|
||||||
|
nodeServices := obj.(*structs.NodeServiceList)
|
||||||
|
// validate response
|
||||||
|
require.Len(t, nodeServices.Services, 1)
|
||||||
|
validateMergeCentralConfigResponse(t, nodeServices.Services[0].ToServiceNode(nodeServices.Node.Node), registerServiceReq, proxyGlobalEntry, serviceDefaultsConfigEntry)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCatalogNodeServiceList_MergeCentralConfigBlocking(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
a := NewTestAgent(t, "")
|
||||||
|
defer a.Shutdown()
|
||||||
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
// Register the service
|
||||||
|
registerServiceReq := registerService(t, a)
|
||||||
|
// Register proxy-defaults
|
||||||
|
proxyGlobalEntry := registerProxyDefaults(t, a)
|
||||||
|
|
||||||
|
// Run the query
|
||||||
|
rpcReq := structs.NodeSpecificRequest{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
Node: registerServiceReq.Node,
|
||||||
|
MergeCentralConfig: true,
|
||||||
|
}
|
||||||
|
var rpcResp structs.IndexedNodeServiceList
|
||||||
|
require.NoError(t, a.RPC("Catalog.NodeServiceList", &rpcReq, &rpcResp))
|
||||||
|
require.Len(t, rpcResp.NodeServices.Services, 1)
|
||||||
|
nodeService := rpcResp.NodeServices.Services[0]
|
||||||
|
require.Equal(t, registerServiceReq.Service.Service, nodeService.Service)
|
||||||
|
// validate proxy global defaults are resolved in the merged service config
|
||||||
|
require.Equal(t, proxyGlobalEntry.Config, nodeService.Proxy.Config)
|
||||||
|
require.Equal(t, proxyGlobalEntry.Mode, nodeService.Proxy.Mode)
|
||||||
|
|
||||||
|
// Async cause a change - register service defaults
|
||||||
|
waitIndex := rpcResp.Index
|
||||||
|
start := time.Now()
|
||||||
|
var serviceDefaultsConfigEntry structs.ServiceConfigEntry
|
||||||
|
go func() {
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
// Register service-defaults
|
||||||
|
serviceDefaultsConfigEntry = registerServiceDefaults(t, a, registerServiceReq.Service.Proxy.DestinationServiceName)
|
||||||
|
}()
|
||||||
|
|
||||||
|
const waitDuration = 3 * time.Second
|
||||||
|
RUN_BLOCKING_QUERY:
|
||||||
|
|
||||||
|
url := fmt.Sprintf("/v1/catalog/node-services/%s?merge-central-config&wait=%s&index=%d",
|
||||||
|
registerServiceReq.Node, waitDuration.String(), waitIndex)
|
||||||
|
req, _ := http.NewRequest("GET", url, nil)
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
obj, err := a.srv.CatalogNodeServiceList(resp, req)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assertIndex(t, resp)
|
||||||
|
|
||||||
|
elapsed := time.Since(start)
|
||||||
|
idx := getIndex(t, resp)
|
||||||
|
if idx < waitIndex {
|
||||||
|
t.Fatalf("bad index returned: %v", idx)
|
||||||
|
} else if idx == waitIndex {
|
||||||
|
if elapsed > waitDuration {
|
||||||
|
// This should prevent the loop from running longer than the waitDuration
|
||||||
|
t.Fatalf("too slow: %v", elapsed)
|
||||||
|
}
|
||||||
|
goto RUN_BLOCKING_QUERY
|
||||||
|
}
|
||||||
|
// Should block at least 100ms before getting the changed results
|
||||||
|
if elapsed < 100*time.Millisecond {
|
||||||
|
t.Fatalf("too fast: %v", elapsed)
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeServices := obj.(*structs.NodeServiceList)
|
||||||
|
// validate response
|
||||||
|
require.Len(t, nodeServices.Services, 1)
|
||||||
|
validateMergeCentralConfigResponse(t, nodeServices.Services[0].ToServiceNode(nodeServices.Node.Node), registerServiceReq, proxyGlobalEntry, serviceDefaultsConfigEntry)
|
||||||
|
}
|
||||||
|
|
||||||
func TestCatalogNodeServices_Filter(t *testing.T) {
|
func TestCatalogNodeServices_Filter(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("too slow for testing.Short")
|
t.Skip("too slow for testing.Short")
|
||||||
|
|
|
@ -804,6 +804,8 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
||||||
Version: stringVal(c.Version),
|
Version: stringVal(c.Version),
|
||||||
VersionPrerelease: stringVal(c.VersionPrerelease),
|
VersionPrerelease: stringVal(c.VersionPrerelease),
|
||||||
VersionMetadata: stringVal(c.VersionMetadata),
|
VersionMetadata: stringVal(c.VersionMetadata),
|
||||||
|
// What is a sensible default for BuildDate?
|
||||||
|
BuildDate: timeValWithDefault(c.BuildDate, time.Date(1970, 1, 00, 00, 00, 01, 0, time.UTC)),
|
||||||
|
|
||||||
// consul configuration
|
// consul configuration
|
||||||
ConsulCoordinateUpdateBatchSize: intVal(c.Consul.Coordinate.UpdateBatchSize),
|
ConsulCoordinateUpdateBatchSize: intVal(c.Consul.Coordinate.UpdateBatchSize),
|
||||||
|
@ -913,7 +915,6 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
||||||
CirconusCheckTags: stringVal(c.Telemetry.CirconusCheckTags),
|
CirconusCheckTags: stringVal(c.Telemetry.CirconusCheckTags),
|
||||||
CirconusSubmissionInterval: stringVal(c.Telemetry.CirconusSubmissionInterval),
|
CirconusSubmissionInterval: stringVal(c.Telemetry.CirconusSubmissionInterval),
|
||||||
CirconusSubmissionURL: stringVal(c.Telemetry.CirconusSubmissionURL),
|
CirconusSubmissionURL: stringVal(c.Telemetry.CirconusSubmissionURL),
|
||||||
DisableCompatOneNine: boolValWithDefault(c.Telemetry.DisableCompatOneNine, true),
|
|
||||||
DisableHostname: boolVal(c.Telemetry.DisableHostname),
|
DisableHostname: boolVal(c.Telemetry.DisableHostname),
|
||||||
DogstatsdAddr: stringVal(c.Telemetry.DogstatsdAddr),
|
DogstatsdAddr: stringVal(c.Telemetry.DogstatsdAddr),
|
||||||
DogstatsdTags: c.Telemetry.DogstatsdTags,
|
DogstatsdTags: c.Telemetry.DogstatsdTags,
|
||||||
|
@ -1946,6 +1947,13 @@ func stringVal(v *string) string {
|
||||||
return *v
|
return *v
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func timeValWithDefault(v *time.Time, defaultVal time.Time) time.Time {
|
||||||
|
if v == nil {
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
|
||||||
func float64ValWithDefault(v *float64, defaultVal float64) float64 {
|
func float64ValWithDefault(v *float64, defaultVal float64) float64 {
|
||||||
if v == nil {
|
if v == nil {
|
||||||
return defaultVal
|
return defaultVal
|
||||||
|
|
|
@ -3,6 +3,7 @@ package config
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/consul"
|
"github.com/hashicorp/consul/agent/consul"
|
||||||
|
|
||||||
|
@ -261,18 +262,19 @@ type Config struct {
|
||||||
SnapshotAgent map[string]interface{} `mapstructure:"snapshot_agent"`
|
SnapshotAgent map[string]interface{} `mapstructure:"snapshot_agent"`
|
||||||
|
|
||||||
// non-user configurable values
|
// non-user configurable values
|
||||||
AEInterval *string `mapstructure:"ae_interval"`
|
AEInterval *string `mapstructure:"ae_interval"`
|
||||||
CheckDeregisterIntervalMin *string `mapstructure:"check_deregister_interval_min"`
|
CheckDeregisterIntervalMin *string `mapstructure:"check_deregister_interval_min"`
|
||||||
CheckReapInterval *string `mapstructure:"check_reap_interval"`
|
CheckReapInterval *string `mapstructure:"check_reap_interval"`
|
||||||
Consul Consul `mapstructure:"consul"`
|
Consul Consul `mapstructure:"consul"`
|
||||||
Revision *string `mapstructure:"revision"`
|
Revision *string `mapstructure:"revision"`
|
||||||
SegmentLimit *int `mapstructure:"segment_limit"`
|
SegmentLimit *int `mapstructure:"segment_limit"`
|
||||||
SegmentNameLimit *int `mapstructure:"segment_name_limit"`
|
SegmentNameLimit *int `mapstructure:"segment_name_limit"`
|
||||||
SyncCoordinateIntervalMin *string `mapstructure:"sync_coordinate_interval_min"`
|
SyncCoordinateIntervalMin *string `mapstructure:"sync_coordinate_interval_min"`
|
||||||
SyncCoordinateRateTarget *float64 `mapstructure:"sync_coordinate_rate_target"`
|
SyncCoordinateRateTarget *float64 `mapstructure:"sync_coordinate_rate_target"`
|
||||||
Version *string `mapstructure:"version"`
|
Version *string `mapstructure:"version"`
|
||||||
VersionPrerelease *string `mapstructure:"version_prerelease"`
|
VersionPrerelease *string `mapstructure:"version_prerelease"`
|
||||||
VersionMetadata *string `mapstructure:"version_metadata"`
|
VersionMetadata *string `mapstructure:"version_metadata"`
|
||||||
|
BuildDate *time.Time `mapstructure:"build_date"`
|
||||||
|
|
||||||
// Enterprise Only
|
// Enterprise Only
|
||||||
Audit Audit `mapstructure:"audit"`
|
Audit Audit `mapstructure:"audit"`
|
||||||
|
@ -671,7 +673,6 @@ type Telemetry struct {
|
||||||
CirconusCheckTags *string `mapstructure:"circonus_check_tags"`
|
CirconusCheckTags *string `mapstructure:"circonus_check_tags"`
|
||||||
CirconusSubmissionInterval *string `mapstructure:"circonus_submission_interval"`
|
CirconusSubmissionInterval *string `mapstructure:"circonus_submission_interval"`
|
||||||
CirconusSubmissionURL *string `mapstructure:"circonus_submission_url"`
|
CirconusSubmissionURL *string `mapstructure:"circonus_submission_url"`
|
||||||
DisableCompatOneNine *bool `mapstructure:"disable_compat_1.9"`
|
|
||||||
DisableHostname *bool `mapstructure:"disable_hostname"`
|
DisableHostname *bool `mapstructure:"disable_hostname"`
|
||||||
DogstatsdAddr *string `mapstructure:"dogstatsd_addr"`
|
DogstatsdAddr *string `mapstructure:"dogstatsd_addr"`
|
||||||
DogstatsdTags []string `mapstructure:"dogstatsd_tags"`
|
DogstatsdTags []string `mapstructure:"dogstatsd_tags"`
|
||||||
|
|
|
@ -2,6 +2,7 @@ package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/raft"
|
"github.com/hashicorp/raft"
|
||||||
|
|
||||||
|
@ -210,7 +211,7 @@ func NonUserSource() Source {
|
||||||
// versionSource creates a config source for the version parameters.
|
// versionSource creates a config source for the version parameters.
|
||||||
// This should be merged in the tail since these values are not
|
// This should be merged in the tail since these values are not
|
||||||
// user configurable.
|
// user configurable.
|
||||||
func versionSource(rev, ver, verPre, meta string) Source {
|
func versionSource(rev, ver, verPre, meta string, buildDate time.Time) Source {
|
||||||
return LiteralSource{
|
return LiteralSource{
|
||||||
Name: "version",
|
Name: "version",
|
||||||
Config: Config{
|
Config: Config{
|
||||||
|
@ -218,6 +219,7 @@ func versionSource(rev, ver, verPre, meta string) Source {
|
||||||
Version: &ver,
|
Version: &ver,
|
||||||
VersionPrerelease: &verPre,
|
VersionPrerelease: &verPre,
|
||||||
VersionMetadata: &meta,
|
VersionMetadata: &meta,
|
||||||
|
BuildDate: &buildDate,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -225,7 +227,8 @@ func versionSource(rev, ver, verPre, meta string) Source {
|
||||||
// defaultVersionSource returns the version config source for the embedded
|
// defaultVersionSource returns the version config source for the embedded
|
||||||
// version numbers.
|
// version numbers.
|
||||||
func defaultVersionSource() Source {
|
func defaultVersionSource() Source {
|
||||||
return versionSource(version.GitCommit, version.Version, version.VersionPrerelease, version.VersionMetadata)
|
buildDate, _ := time.Parse(time.RFC3339, version.BuildDate) // This has been checked elsewhere
|
||||||
|
return versionSource(version.GitCommit, version.Version, version.VersionPrerelease, version.VersionMetadata, buildDate)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultConsulSource returns the default configuration for the consul agent.
|
// DefaultConsulSource returns the default configuration for the consul agent.
|
||||||
|
|
|
@ -62,6 +62,7 @@ type RuntimeConfig struct {
|
||||||
Version string
|
Version string
|
||||||
VersionPrerelease string
|
VersionPrerelease string
|
||||||
VersionMetadata string
|
VersionMetadata string
|
||||||
|
BuildDate time.Time
|
||||||
|
|
||||||
// consul config
|
// consul config
|
||||||
ConsulCoordinateUpdateMaxBatches int
|
ConsulCoordinateUpdateMaxBatches int
|
||||||
|
@ -1700,6 +1701,10 @@ func sanitize(name string, v reflect.Value) reflect.Value {
|
||||||
x := v.Interface().(time.Duration)
|
x := v.Interface().(time.Duration)
|
||||||
return reflect.ValueOf(x.String())
|
return reflect.ValueOf(x.String())
|
||||||
|
|
||||||
|
case isTime(typ):
|
||||||
|
x := v.Interface().(time.Time)
|
||||||
|
return reflect.ValueOf(x.String())
|
||||||
|
|
||||||
case isString(typ):
|
case isString(typ):
|
||||||
if strings.HasPrefix(name, "RetryJoinLAN[") || strings.HasPrefix(name, "RetryJoinWAN[") {
|
if strings.HasPrefix(name, "RetryJoinLAN[") || strings.HasPrefix(name, "RetryJoinWAN[") {
|
||||||
x := v.Interface().(string)
|
x := v.Interface().(string)
|
||||||
|
@ -1771,6 +1776,7 @@ func sanitize(name string, v reflect.Value) reflect.Value {
|
||||||
}
|
}
|
||||||
|
|
||||||
func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) }
|
func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) }
|
||||||
|
func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) }
|
||||||
func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map }
|
func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map }
|
||||||
func isNetAddr(t reflect.Type) bool { return t.Implements(reflect.TypeOf((*net.Addr)(nil)).Elem()) }
|
func isNetAddr(t reflect.Type) bool { return t.Implements(reflect.TypeOf((*net.Addr)(nil)).Elem()) }
|
||||||
func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr }
|
func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr }
|
||||||
|
|
|
@ -5661,6 +5661,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
||||||
Version: "R909Hblt",
|
Version: "R909Hblt",
|
||||||
VersionPrerelease: "ZT1JOQLn",
|
VersionPrerelease: "ZT1JOQLn",
|
||||||
VersionMetadata: "GtTCa13",
|
VersionMetadata: "GtTCa13",
|
||||||
|
BuildDate: time.Date(2019, 11, 20, 5, 0, 0, 0, time.UTC),
|
||||||
|
|
||||||
// consul configuration
|
// consul configuration
|
||||||
ConsulCoordinateUpdateBatchSize: 128,
|
ConsulCoordinateUpdateBatchSize: 128,
|
||||||
|
@ -6302,7 +6303,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
||||||
CirconusCheckTags: "prvO4uBl",
|
CirconusCheckTags: "prvO4uBl",
|
||||||
CirconusSubmissionInterval: "DolzaflP",
|
CirconusSubmissionInterval: "DolzaflP",
|
||||||
CirconusSubmissionURL: "gTcbS93G",
|
CirconusSubmissionURL: "gTcbS93G",
|
||||||
DisableCompatOneNine: true,
|
|
||||||
DisableHostname: true,
|
DisableHostname: true,
|
||||||
DogstatsdAddr: "0wSndumK",
|
DogstatsdAddr: "0wSndumK",
|
||||||
DogstatsdTags: []string{"3N81zSUB", "Xtj8AnXZ"},
|
DogstatsdTags: []string{"3N81zSUB", "Xtj8AnXZ"},
|
||||||
|
@ -6447,7 +6447,8 @@ func TestLoad_FullConfig(t *testing.T) {
|
||||||
ConfigFiles: []string{"testdata/full-config." + format},
|
ConfigFiles: []string{"testdata/full-config." + format},
|
||||||
HCL: []string{fmt.Sprintf(`data_dir = "%s"`, dataDir)},
|
HCL: []string{fmt.Sprintf(`data_dir = "%s"`, dataDir)},
|
||||||
}
|
}
|
||||||
opts.Overrides = append(opts.Overrides, versionSource("JNtPSav3", "R909Hblt", "ZT1JOQLn", "GtTCa13"))
|
opts.Overrides = append(opts.Overrides, versionSource("JNtPSav3", "R909Hblt", "ZT1JOQLn", "GtTCa13",
|
||||||
|
time.Date(2019, 11, 20, 5, 0, 0, 0, time.UTC)))
|
||||||
r, err := Load(opts)
|
r, err := Load(opts)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
prototest.AssertDeepEqual(t, expected, r.RuntimeConfig)
|
prototest.AssertDeepEqual(t, expected, r.RuntimeConfig)
|
||||||
|
@ -6641,6 +6642,7 @@ func parseCIDR(t *testing.T, cidr string) *net.IPNet {
|
||||||
func TestRuntimeConfig_Sanitize(t *testing.T) {
|
func TestRuntimeConfig_Sanitize(t *testing.T) {
|
||||||
rt := RuntimeConfig{
|
rt := RuntimeConfig{
|
||||||
BindAddr: &net.IPAddr{IP: net.ParseIP("127.0.0.1")},
|
BindAddr: &net.IPAddr{IP: net.ParseIP("127.0.0.1")},
|
||||||
|
BuildDate: time.Date(2019, 11, 20, 5, 0, 0, 0, time.UTC),
|
||||||
CheckOutputMaxSize: checks.DefaultBufSize,
|
CheckOutputMaxSize: checks.DefaultBufSize,
|
||||||
SerfAdvertiseAddrLAN: &net.TCPAddr{IP: net.ParseIP("1.2.3.4"), Port: 5678},
|
SerfAdvertiseAddrLAN: &net.TCPAddr{IP: net.ParseIP("1.2.3.4"), Port: 5678},
|
||||||
DNSAddrs: []net.Addr{
|
DNSAddrs: []net.Addr{
|
||||||
|
|
|
@ -76,6 +76,7 @@
|
||||||
"BindAddr": "127.0.0.1",
|
"BindAddr": "127.0.0.1",
|
||||||
"Bootstrap": false,
|
"Bootstrap": false,
|
||||||
"BootstrapExpect": 0,
|
"BootstrapExpect": 0,
|
||||||
|
"BuildDate": "2019-11-20 05:00:00 +0000 UTC",
|
||||||
"Cache": {
|
"Cache": {
|
||||||
"EntryFetchMaxBurst": 42,
|
"EntryFetchMaxBurst": 42,
|
||||||
"EntryFetchRate": 0.334,
|
"EntryFetchRate": 0.334,
|
||||||
|
@ -416,7 +417,6 @@
|
||||||
"CirconusSubmissionInterval": "",
|
"CirconusSubmissionInterval": "",
|
||||||
"CirconusSubmissionURL": "",
|
"CirconusSubmissionURL": "",
|
||||||
"Disable": false,
|
"Disable": false,
|
||||||
"DisableCompatOneNine": false,
|
|
||||||
"DisableHostname": false,
|
"DisableHostname": false,
|
||||||
"DogstatsdAddr": "",
|
"DogstatsdAddr": "",
|
||||||
"DogstatsdTags": [],
|
"DogstatsdTags": [],
|
||||||
|
|
|
@ -654,7 +654,6 @@ telemetry {
|
||||||
prometheus_retention_time = "15s"
|
prometheus_retention_time = "15s"
|
||||||
statsd_address = "drce87cy"
|
statsd_address = "drce87cy"
|
||||||
statsite_address = "HpFwKB8R"
|
statsite_address = "HpFwKB8R"
|
||||||
disable_compat_1.9 = true
|
|
||||||
}
|
}
|
||||||
tls {
|
tls {
|
||||||
defaults {
|
defaults {
|
||||||
|
|
|
@ -650,8 +650,7 @@
|
||||||
"metrics_prefix": "ftO6DySn",
|
"metrics_prefix": "ftO6DySn",
|
||||||
"prometheus_retention_time": "15s",
|
"prometheus_retention_time": "15s",
|
||||||
"statsd_address": "drce87cy",
|
"statsd_address": "drce87cy",
|
||||||
"statsite_address": "HpFwKB8R",
|
"statsite_address": "HpFwKB8R"
|
||||||
"disable_compat_1.9": true
|
|
||||||
},
|
},
|
||||||
"tls": {
|
"tls": {
|
||||||
"defaults": {
|
"defaults": {
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/hashicorp/go-uuid"
|
"github.com/hashicorp/go-uuid"
|
||||||
"github.com/mitchellh/go-testing-interface"
|
"github.com/mitchellh/go-testing-interface"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -296,6 +297,21 @@ func TestLeafWithNamespace(t testing.T, service, namespace string, root *structs
|
||||||
return certPEM, keyPEM
|
return certPEM, keyPEM
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMeshGatewayLeaf(t testing.T, partition string, root *structs.CARoot) (string, string) {
|
||||||
|
// Build the SPIFFE ID
|
||||||
|
spiffeId := &SpiffeIDMeshGateway{
|
||||||
|
Host: fmt.Sprintf("%s.consul", TestClusterID),
|
||||||
|
Partition: acl.PartitionOrDefault(partition),
|
||||||
|
Datacenter: "dc1",
|
||||||
|
}
|
||||||
|
|
||||||
|
certPEM, keyPEM, err := testLeafWithID(t, spiffeId, root, DefaultPrivateKeyType, DefaultPrivateKeyBits, 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf(err.Error())
|
||||||
|
}
|
||||||
|
return certPEM, keyPEM
|
||||||
|
}
|
||||||
|
|
||||||
// TestCSR returns a CSR to sign the given service along with the PEM-encoded
|
// TestCSR returns a CSR to sign the given service along with the PEM-encoded
|
||||||
// private key for this certificate.
|
// private key for this certificate.
|
||||||
func TestCSR(t testing.T, uri CertURI) (string, string) {
|
func TestCSR(t testing.T, uri CertURI) (string, string) {
|
||||||
|
|
|
@ -24,6 +24,8 @@ var (
|
||||||
`^(?:/ap/([^/]+))?/ns/([^/]+)/dc/([^/]+)/svc/([^/]+)$`)
|
`^(?:/ap/([^/]+))?/ns/([^/]+)/dc/([^/]+)/svc/([^/]+)$`)
|
||||||
spiffeIDAgentRegexp = regexp.MustCompile(
|
spiffeIDAgentRegexp = regexp.MustCompile(
|
||||||
`^(?:/ap/([^/]+))?/agent/client/dc/([^/]+)/id/([^/]+)$`)
|
`^(?:/ap/([^/]+))?/agent/client/dc/([^/]+)/id/([^/]+)$`)
|
||||||
|
spiffeIDMeshGatewayRegexp = regexp.MustCompile(
|
||||||
|
`^(?:/ap/([^/]+))?/gateway/mesh/dc/([^/]+)$`)
|
||||||
)
|
)
|
||||||
|
|
||||||
// ParseCertURIFromString attempts to parse a string representation of a
|
// ParseCertURIFromString attempts to parse a string representation of a
|
||||||
|
@ -117,6 +119,31 @@ func ParseCertURI(input *url.URL) (CertURI, error) {
|
||||||
Datacenter: dc,
|
Datacenter: dc,
|
||||||
Agent: agent,
|
Agent: agent,
|
||||||
}, nil
|
}, nil
|
||||||
|
} else if v := spiffeIDMeshGatewayRegexp.FindStringSubmatch(path); v != nil {
|
||||||
|
// Determine the values. We assume they're reasonable to save cycles,
|
||||||
|
// but if the raw path is not empty that means that something is
|
||||||
|
// URL encoded so we go to the slow path.
|
||||||
|
ap := v[1]
|
||||||
|
dc := v[2]
|
||||||
|
if input.RawPath != "" {
|
||||||
|
var err error
|
||||||
|
if ap, err = url.PathUnescape(v[1]); err != nil {
|
||||||
|
return nil, fmt.Errorf("Invalid admin partition: %s", err)
|
||||||
|
}
|
||||||
|
if dc, err = url.PathUnescape(v[2]); err != nil {
|
||||||
|
return nil, fmt.Errorf("Invalid datacenter: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ap == "" {
|
||||||
|
ap = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
return &SpiffeIDMeshGateway{
|
||||||
|
Host: input.Host,
|
||||||
|
Partition: ap,
|
||||||
|
Datacenter: dc,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test for signing ID
|
// Test for signing ID
|
||||||
|
|
|
@ -0,0 +1,30 @@
|
||||||
|
package connect
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/acl"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SpiffeIDMeshGateway struct {
|
||||||
|
Host string
|
||||||
|
Partition string
|
||||||
|
Datacenter string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id SpiffeIDMeshGateway) MatchesPartition(partition string) bool {
|
||||||
|
return id.PartitionOrDefault() == acl.PartitionOrDefault(partition)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id SpiffeIDMeshGateway) PartitionOrDefault() string {
|
||||||
|
return acl.PartitionOrDefault(id.Partition)
|
||||||
|
}
|
||||||
|
|
||||||
|
// URI returns the *url.URL for this SPIFFE ID.
|
||||||
|
func (id SpiffeIDMeshGateway) URI() *url.URL {
|
||||||
|
var result url.URL
|
||||||
|
result.Scheme = "spiffe"
|
||||||
|
result.Host = id.Host
|
||||||
|
result.Path = id.uriPath()
|
||||||
|
return &result
|
||||||
|
}
|
|
@ -0,0 +1,20 @@
|
||||||
|
//go:build !consulent
|
||||||
|
// +build !consulent
|
||||||
|
|
||||||
|
package connect
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/acl"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetEnterpriseMeta will synthesize an EnterpriseMeta struct from the SpiffeIDAgent.
|
||||||
|
// in OSS this just returns an empty (but never nil) struct pointer
|
||||||
|
func (id SpiffeIDMeshGateway) GetEnterpriseMeta() *acl.EnterpriseMeta {
|
||||||
|
return &acl.EnterpriseMeta{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id SpiffeIDMeshGateway) uriPath() string {
|
||||||
|
return fmt.Sprintf("/gateway/mesh/dc/%s", id.Datacenter)
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
//go:build !consulent
|
||||||
|
// +build !consulent
|
||||||
|
|
||||||
|
package connect
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSpiffeIDMeshGatewayURI(t *testing.T) {
|
||||||
|
t.Run("default partition", func(t *testing.T) {
|
||||||
|
mgw := &SpiffeIDMeshGateway{
|
||||||
|
Host: "1234.consul",
|
||||||
|
Datacenter: "dc1",
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, "spiffe://1234.consul/gateway/mesh/dc/dc1", mgw.URI().String())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("partitions are ignored", func(t *testing.T) {
|
||||||
|
mgw := &SpiffeIDMeshGateway{
|
||||||
|
Host: "1234.consul",
|
||||||
|
Partition: "foobar",
|
||||||
|
Datacenter: "dc1",
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, "spiffe://1234.consul/gateway/mesh/dc/dc1", mgw.URI().String())
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,6 +1,7 @@
|
||||||
package connect
|
package connect
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
@ -23,10 +24,6 @@ func (id SpiffeIDService) MatchesPartition(partition string) bool {
|
||||||
return id.PartitionOrDefault() == acl.PartitionOrDefault(partition)
|
return id.PartitionOrDefault() == acl.PartitionOrDefault(partition)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (id SpiffeIDService) PartitionOrDefault() string {
|
|
||||||
return acl.PartitionOrDefault(id.Partition)
|
|
||||||
}
|
|
||||||
|
|
||||||
// URI returns the *url.URL for this SPIFFE ID.
|
// URI returns the *url.URL for this SPIFFE ID.
|
||||||
func (id SpiffeIDService) URI() *url.URL {
|
func (id SpiffeIDService) URI() *url.URL {
|
||||||
var result url.URL
|
var result url.URL
|
||||||
|
@ -35,3 +32,20 @@ func (id SpiffeIDService) URI() *url.URL {
|
||||||
result.Path = id.uriPath()
|
result.Path = id.uriPath()
|
||||||
return &result
|
return &result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (id SpiffeIDService) uriPath() string {
|
||||||
|
path := fmt.Sprintf("/ns/%s/dc/%s/svc/%s",
|
||||||
|
id.NamespaceOrDefault(),
|
||||||
|
id.Datacenter,
|
||||||
|
id.Service,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Although OSS has no support for partitions, it still needs to be able to
|
||||||
|
// handle exportedPartition from peered Consul Enterprise clusters in order
|
||||||
|
// to generate the correct SpiffeID.
|
||||||
|
// We intentionally avoid using pbpartition.DefaultName here to be OSS friendly.
|
||||||
|
if ap := id.PartitionOrDefault(); ap != "" && ap != "default" {
|
||||||
|
return "/ap/" + ap + path
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
package connect
|
package connect
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
)
|
)
|
||||||
|
@ -15,10 +15,14 @@ func (id SpiffeIDService) GetEnterpriseMeta() *acl.EnterpriseMeta {
|
||||||
return &acl.EnterpriseMeta{}
|
return &acl.EnterpriseMeta{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (id SpiffeIDService) uriPath() string {
|
// PartitionOrDefault breaks from OSS's pattern of returning empty strings.
|
||||||
return fmt.Sprintf("/ns/%s/dc/%s/svc/%s",
|
// Although OSS has no support for partitions, it still needs to be able to
|
||||||
id.NamespaceOrDefault(),
|
// handle exportedPartition from peered Consul Enterprise clusters in order
|
||||||
id.Datacenter,
|
// to generate the correct SpiffeID.
|
||||||
id.Service,
|
func (id SpiffeIDService) PartitionOrDefault() string {
|
||||||
)
|
if id.Partition == "" {
|
||||||
|
return "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.ToLower(id.Partition)
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,16 +19,6 @@ func TestSpiffeIDServiceURI(t *testing.T) {
|
||||||
require.Equal(t, "spiffe://1234.consul/ns/default/dc/dc1/svc/web", svc.URI().String())
|
require.Equal(t, "spiffe://1234.consul/ns/default/dc/dc1/svc/web", svc.URI().String())
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("partitions are ignored", func(t *testing.T) {
|
|
||||||
svc := &SpiffeIDService{
|
|
||||||
Host: "1234.consul",
|
|
||||||
Partition: "other",
|
|
||||||
Datacenter: "dc1",
|
|
||||||
Service: "web",
|
|
||||||
}
|
|
||||||
require.Equal(t, "spiffe://1234.consul/ns/default/dc/dc1/svc/web", svc.URI().String())
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("namespaces are ignored", func(t *testing.T) {
|
t.Run("namespaces are ignored", func(t *testing.T) {
|
||||||
svc := &SpiffeIDService{
|
svc := &SpiffeIDService{
|
||||||
Host: "1234.consul",
|
Host: "1234.consul",
|
||||||
|
|
|
@ -48,6 +48,12 @@ func (id SpiffeIDSigning) CanSign(cu CertURI) bool {
|
||||||
// worry about Unicode domains if we start allowing customisation beyond the
|
// worry about Unicode domains if we start allowing customisation beyond the
|
||||||
// built-in cluster ids.
|
// built-in cluster ids.
|
||||||
return strings.ToLower(other.Host) == id.Host()
|
return strings.ToLower(other.Host) == id.Host()
|
||||||
|
case *SpiffeIDMeshGateway:
|
||||||
|
// The host component of the service must be an exact match for now under
|
||||||
|
// ascii case folding (since hostnames are case-insensitive). Later we might
|
||||||
|
// worry about Unicode domains if we start allowing customisation beyond the
|
||||||
|
// built-in cluster ids.
|
||||||
|
return strings.ToLower(other.Host) == id.Host()
|
||||||
default:
|
default:
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -95,6 +95,30 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) {
|
||||||
input: &SpiffeIDService{Host: TestClusterID + ".fake", Namespace: "default", Datacenter: "dc1", Service: "web"},
|
input: &SpiffeIDService{Host: TestClusterID + ".fake", Namespace: "default", Datacenter: "dc1", Service: "web"},
|
||||||
want: false,
|
want: false,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "mesh gateway - good",
|
||||||
|
id: testSigning,
|
||||||
|
input: &SpiffeIDMeshGateway{Host: TestClusterID + ".consul", Datacenter: "dc1"},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mesh gateway - good midex case",
|
||||||
|
id: testSigning,
|
||||||
|
input: &SpiffeIDMeshGateway{Host: strings.ToUpper(TestClusterID) + ".CONsuL", Datacenter: "dc1"},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mesh gateway - different cluster",
|
||||||
|
id: testSigning,
|
||||||
|
input: &SpiffeIDMeshGateway{Host: "55555555-4444-3333-2222-111111111111.consul", Datacenter: "dc1"},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mesh gateway - different TLD",
|
||||||
|
id: testSigning,
|
||||||
|
input: &SpiffeIDMeshGateway{Host: TestClusterID + ".fake", Datacenter: "dc1"},
|
||||||
|
want: false,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
|
|
|
@ -70,6 +70,26 @@ func TestParseCertURIFromString(t *testing.T) {
|
||||||
},
|
},
|
||||||
"",
|
"",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"mesh-gateway with no partition",
|
||||||
|
"spiffe://1234.consul/gateway/mesh/dc/dc1",
|
||||||
|
&SpiffeIDMeshGateway{
|
||||||
|
Host: "1234.consul",
|
||||||
|
Partition: "default",
|
||||||
|
Datacenter: "dc1",
|
||||||
|
},
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"mesh-gateway with partition",
|
||||||
|
"spiffe://1234.consul/ap/bizdev/gateway/mesh/dc/dc1",
|
||||||
|
&SpiffeIDMeshGateway{
|
||||||
|
Host: "1234.consul",
|
||||||
|
Partition: "bizdev",
|
||||||
|
Datacenter: "dc1",
|
||||||
|
},
|
||||||
|
"",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"service with URL-encoded values",
|
"service with URL-encoded values",
|
||||||
"spiffe://1234.consul/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux",
|
"spiffe://1234.consul/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux",
|
||||||
|
|
|
@ -13,7 +13,9 @@ import (
|
||||||
"golang.org/x/time/rate"
|
"golang.org/x/time/rate"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/acl/resolver"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/agent/structs/aclfilter"
|
||||||
"github.com/hashicorp/consul/agent/token"
|
"github.com/hashicorp/consul/agent/token"
|
||||||
"github.com/hashicorp/consul/logging"
|
"github.com/hashicorp/consul/logging"
|
||||||
)
|
)
|
||||||
|
@ -42,10 +44,6 @@ const (
|
||||||
// provided.
|
// provided.
|
||||||
anonymousToken = "anonymous"
|
anonymousToken = "anonymous"
|
||||||
|
|
||||||
// redactedToken is shown in structures with embedded tokens when they
|
|
||||||
// are not allowed to be displayed.
|
|
||||||
redactedToken = "<hidden>"
|
|
||||||
|
|
||||||
// aclTokenReapingRateLimit is the number of batch token reaping requests per second allowed.
|
// aclTokenReapingRateLimit is the number of batch token reaping requests per second allowed.
|
||||||
aclTokenReapingRateLimit rate.Limit = 1.0
|
aclTokenReapingRateLimit rate.Limit = 1.0
|
||||||
|
|
||||||
|
@ -662,26 +660,6 @@ func (r *ACLResolver) synthesizePoliciesForNodeIdentities(nodeIdentities []*stru
|
||||||
return syntheticPolicies
|
return syntheticPolicies
|
||||||
}
|
}
|
||||||
|
|
||||||
// plainACLResolver wraps ACLResolver so that it can be used in other packages
|
|
||||||
// that cannot import agent/consul wholesale (e.g. because of import cycles).
|
|
||||||
//
|
|
||||||
// TODO(agentless): this pattern was copied from subscribeBackend for expediency
|
|
||||||
// but we should really refactor ACLResolver so it can be passed as a dependency
|
|
||||||
// to other packages.
|
|
||||||
type plainACLResolver struct {
|
|
||||||
resolver *ACLResolver
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r plainACLResolver) ResolveTokenAndDefaultMeta(
|
|
||||||
token string,
|
|
||||||
entMeta *acl.EnterpriseMeta,
|
|
||||||
authzContext *acl.AuthorizerContext,
|
|
||||||
) (acl.Authorizer, error) {
|
|
||||||
// ACLResolver.ResolveTokenAndDefaultMeta returns a ACLResolveResult which
|
|
||||||
// can't be used in other packages, but it embeds acl.Authorizer which can.
|
|
||||||
return r.resolver.ResolveTokenAndDefaultMeta(token, entMeta, authzContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeStringSlice(a, b []string) []string {
|
func mergeStringSlice(a, b []string) []string {
|
||||||
out := make([]string, 0, len(a)+len(b))
|
out := make([]string, 0, len(a)+len(b))
|
||||||
out = append(out, a...)
|
out = append(out, a...)
|
||||||
|
@ -1008,13 +986,13 @@ func (r *ACLResolver) resolveLocallyManagedToken(token string) (structs.ACLIdent
|
||||||
// ResolveToken to an acl.Authorizer and structs.ACLIdentity. The acl.Authorizer
|
// ResolveToken to an acl.Authorizer and structs.ACLIdentity. The acl.Authorizer
|
||||||
// can be used to check permissions granted to the token, and the ACLIdentity
|
// can be used to check permissions granted to the token, and the ACLIdentity
|
||||||
// describes the token and any defaults applied to it.
|
// describes the token and any defaults applied to it.
|
||||||
func (r *ACLResolver) ResolveToken(token string) (ACLResolveResult, error) {
|
func (r *ACLResolver) ResolveToken(token string) (resolver.Result, error) {
|
||||||
if !r.ACLsEnabled() {
|
if !r.ACLsEnabled() {
|
||||||
return ACLResolveResult{Authorizer: acl.ManageAll()}, nil
|
return resolver.Result{Authorizer: acl.ManageAll()}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if acl.RootAuthorizer(token) != nil {
|
if acl.RootAuthorizer(token) != nil {
|
||||||
return ACLResolveResult{}, acl.ErrRootDenied
|
return resolver.Result{}, acl.ErrRootDenied
|
||||||
}
|
}
|
||||||
|
|
||||||
// handle the anonymous token
|
// handle the anonymous token
|
||||||
|
@ -1023,7 +1001,7 @@ func (r *ACLResolver) ResolveToken(token string) (ACLResolveResult, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if ident, authz, ok := r.resolveLocallyManagedToken(token); ok {
|
if ident, authz, ok := r.resolveLocallyManagedToken(token); ok {
|
||||||
return ACLResolveResult{Authorizer: authz, ACLIdentity: ident}, nil
|
return resolver.Result{Authorizer: authz, ACLIdentity: ident}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
defer metrics.MeasureSince([]string{"acl", "ResolveToken"}, time.Now())
|
defer metrics.MeasureSince([]string{"acl", "ResolveToken"}, time.Now())
|
||||||
|
@ -1034,10 +1012,10 @@ func (r *ACLResolver) ResolveToken(token string) (ACLResolveResult, error) {
|
||||||
if IsACLRemoteError(err) {
|
if IsACLRemoteError(err) {
|
||||||
r.logger.Error("Error resolving token", "error", err)
|
r.logger.Error("Error resolving token", "error", err)
|
||||||
ident := &missingIdentity{reason: "primary-dc-down", token: token}
|
ident := &missingIdentity{reason: "primary-dc-down", token: token}
|
||||||
return ACLResolveResult{Authorizer: r.down, ACLIdentity: ident}, nil
|
return resolver.Result{Authorizer: r.down, ACLIdentity: ident}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return ACLResolveResult{}, err
|
return resolver.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build the Authorizer
|
// Build the Authorizer
|
||||||
|
@ -1050,7 +1028,7 @@ func (r *ACLResolver) ResolveToken(token string) (ACLResolveResult, error) {
|
||||||
|
|
||||||
authz, err := policies.Compile(r.cache, &conf)
|
authz, err := policies.Compile(r.cache, &conf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ACLResolveResult{}, err
|
return resolver.Result{}, err
|
||||||
}
|
}
|
||||||
chain = append(chain, authz)
|
chain = append(chain, authz)
|
||||||
|
|
||||||
|
@ -1058,36 +1036,15 @@ func (r *ACLResolver) ResolveToken(token string) (ACLResolveResult, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if IsACLRemoteError(err) {
|
if IsACLRemoteError(err) {
|
||||||
r.logger.Error("Error resolving identity defaults", "error", err)
|
r.logger.Error("Error resolving identity defaults", "error", err)
|
||||||
return ACLResolveResult{Authorizer: r.down, ACLIdentity: identity}, nil
|
return resolver.Result{Authorizer: r.down, ACLIdentity: identity}, nil
|
||||||
}
|
}
|
||||||
return ACLResolveResult{}, err
|
return resolver.Result{}, err
|
||||||
} else if authz != nil {
|
} else if authz != nil {
|
||||||
chain = append(chain, authz)
|
chain = append(chain, authz)
|
||||||
}
|
}
|
||||||
|
|
||||||
chain = append(chain, acl.RootAuthorizer(r.config.ACLDefaultPolicy))
|
chain = append(chain, acl.RootAuthorizer(r.config.ACLDefaultPolicy))
|
||||||
return ACLResolveResult{Authorizer: acl.NewChainedAuthorizer(chain), ACLIdentity: identity}, nil
|
return resolver.Result{Authorizer: acl.NewChainedAuthorizer(chain), ACLIdentity: identity}, nil
|
||||||
}
|
|
||||||
|
|
||||||
type ACLResolveResult struct {
|
|
||||||
acl.Authorizer
|
|
||||||
// TODO: likely we can reduce this interface
|
|
||||||
ACLIdentity structs.ACLIdentity
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ACLResolveResult) AccessorID() string {
|
|
||||||
if a.ACLIdentity == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return a.ACLIdentity.ID()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ACLResolveResult) Identity() structs.ACLIdentity {
|
|
||||||
return a.ACLIdentity
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a ACLResolveResult) ToAllowAuthorizer() acl.AllowAuthorizer {
|
|
||||||
return acl.AllowAuthorizer{Authorizer: a, AccessorID: a.AccessorID()}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *ACLResolver) ACLsEnabled() bool {
|
func (r *ACLResolver) ACLsEnabled() bool {
|
||||||
|
@ -1111,7 +1068,7 @@ func (r *ACLResolver) ResolveTokenAndDefaultMeta(
|
||||||
token string,
|
token string,
|
||||||
entMeta *acl.EnterpriseMeta,
|
entMeta *acl.EnterpriseMeta,
|
||||||
authzContext *acl.AuthorizerContext,
|
authzContext *acl.AuthorizerContext,
|
||||||
) (ACLResolveResult, error) {
|
) (resolver.Result, error) {
|
||||||
return r.ResolveTokenAndDefaultMetaWithPeerName(token, entMeta, structs.DefaultPeerKeyword, authzContext)
|
return r.ResolveTokenAndDefaultMetaWithPeerName(token, entMeta, structs.DefaultPeerKeyword, authzContext)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1120,10 +1077,10 @@ func (r *ACLResolver) ResolveTokenAndDefaultMetaWithPeerName(
|
||||||
entMeta *acl.EnterpriseMeta,
|
entMeta *acl.EnterpriseMeta,
|
||||||
peerName string,
|
peerName string,
|
||||||
authzContext *acl.AuthorizerContext,
|
authzContext *acl.AuthorizerContext,
|
||||||
) (ACLResolveResult, error) {
|
) (resolver.Result, error) {
|
||||||
result, err := r.ResolveToken(token)
|
result, err := r.ResolveToken(token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ACLResolveResult{}, err
|
return resolver.Result{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if entMeta == nil {
|
if entMeta == nil {
|
||||||
|
@ -1154,816 +1111,8 @@ func (r *ACLResolver) ResolveTokenAndDefaultMetaWithPeerName(
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// aclFilter is used to filter results from our state store based on ACL rules
|
|
||||||
// configured for the provided token.
|
|
||||||
type aclFilter struct {
|
|
||||||
authorizer acl.Authorizer
|
|
||||||
logger hclog.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// newACLFilter constructs a new aclFilter.
|
|
||||||
func newACLFilter(authorizer acl.Authorizer, logger hclog.Logger) *aclFilter {
|
|
||||||
if logger == nil {
|
|
||||||
logger = hclog.New(&hclog.LoggerOptions{})
|
|
||||||
}
|
|
||||||
return &aclFilter{
|
|
||||||
authorizer: authorizer,
|
|
||||||
logger: logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// allowNode is used to determine if a node is accessible for an ACL.
|
|
||||||
func (f *aclFilter) allowNode(node string, ent *acl.AuthorizerContext) bool {
|
|
||||||
return f.authorizer.NodeRead(node, ent) == acl.Allow
|
|
||||||
}
|
|
||||||
|
|
||||||
// allowNode is used to determine if the gateway and service are accessible for an ACL
|
|
||||||
func (f *aclFilter) allowGateway(gs *structs.GatewayService) bool {
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
|
|
||||||
// Need read on service and gateway. Gateway may have different EnterpriseMeta so we fill authzContext twice
|
|
||||||
gs.Gateway.FillAuthzContext(&authzContext)
|
|
||||||
if !f.allowService(gs.Gateway.Name, &authzContext) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
gs.Service.FillAuthzContext(&authzContext)
|
|
||||||
if !f.allowService(gs.Service.Name, &authzContext) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// allowService is used to determine if a service is accessible for an ACL.
|
|
||||||
func (f *aclFilter) allowService(service string, ent *acl.AuthorizerContext) bool {
|
|
||||||
if service == "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return f.authorizer.ServiceRead(service, ent) == acl.Allow
|
|
||||||
}
|
|
||||||
|
|
||||||
// allowSession is used to determine if a session for a node is accessible for
|
|
||||||
// an ACL.
|
|
||||||
func (f *aclFilter) allowSession(node string, ent *acl.AuthorizerContext) bool {
|
|
||||||
return f.authorizer.SessionRead(node, ent) == acl.Allow
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterHealthChecks is used to filter a set of health checks down based on
|
|
||||||
// the configured ACL rules for a token. Returns true if any elements were
|
|
||||||
// removed.
|
|
||||||
func (f *aclFilter) filterHealthChecks(checks *structs.HealthChecks) bool {
|
|
||||||
hc := *checks
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
var removed bool
|
|
||||||
|
|
||||||
for i := 0; i < len(hc); i++ {
|
|
||||||
check := hc[i]
|
|
||||||
check.FillAuthzContext(&authzContext)
|
|
||||||
if f.allowNode(check.Node, &authzContext) && f.allowService(check.ServiceName, &authzContext) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
f.logger.Debug("dropping check from result due to ACLs", "check", check.CheckID)
|
|
||||||
removed = true
|
|
||||||
hc = append(hc[:i], hc[i+1:]...)
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
*checks = hc
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterServices is used to filter a set of services based on ACLs. Returns
|
|
||||||
// true if any elements were removed.
|
|
||||||
func (f *aclFilter) filterServices(services structs.Services, entMeta *acl.EnterpriseMeta) bool {
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
entMeta.FillAuthzContext(&authzContext)
|
|
||||||
|
|
||||||
var removed bool
|
|
||||||
|
|
||||||
for svc := range services {
|
|
||||||
if f.allowService(svc, &authzContext) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f.logger.Debug("dropping service from result due to ACLs", "service", svc)
|
|
||||||
removed = true
|
|
||||||
delete(services, svc)
|
|
||||||
}
|
|
||||||
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterServiceNodes is used to filter a set of nodes for a given service
|
|
||||||
// based on the configured ACL rules. Returns true if any elements were removed.
|
|
||||||
func (f *aclFilter) filterServiceNodes(nodes *structs.ServiceNodes) bool {
|
|
||||||
sn := *nodes
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
var removed bool
|
|
||||||
|
|
||||||
for i := 0; i < len(sn); i++ {
|
|
||||||
node := sn[i]
|
|
||||||
|
|
||||||
node.FillAuthzContext(&authzContext)
|
|
||||||
if f.allowNode(node.Node, &authzContext) && f.allowService(node.ServiceName, &authzContext) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
removed = true
|
|
||||||
f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node.Node, &node.EnterpriseMeta))
|
|
||||||
sn = append(sn[:i], sn[i+1:]...)
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
*nodes = sn
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterNodeServices is used to filter services on a given node base on ACLs.
|
|
||||||
// Returns true if any elements were removed
|
|
||||||
func (f *aclFilter) filterNodeServices(services **structs.NodeServices) bool {
|
|
||||||
if *services == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
(*services).Node.FillAuthzContext(&authzContext)
|
|
||||||
if !f.allowNode((*services).Node.Node, &authzContext) {
|
|
||||||
*services = nil
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
var removed bool
|
|
||||||
for svcName, svc := range (*services).Services {
|
|
||||||
svc.FillAuthzContext(&authzContext)
|
|
||||||
|
|
||||||
if f.allowNode((*services).Node.Node, &authzContext) && f.allowService(svcName, &authzContext) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f.logger.Debug("dropping service from result due to ACLs", "service", svc.CompoundServiceID())
|
|
||||||
removed = true
|
|
||||||
delete((*services).Services, svcName)
|
|
||||||
}
|
|
||||||
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterNodeServices is used to filter services on a given node base on ACLs.
|
|
||||||
// Returns true if any elements were removed.
|
|
||||||
func (f *aclFilter) filterNodeServiceList(services *structs.NodeServiceList) bool {
|
|
||||||
if services.Node == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
services.Node.FillAuthzContext(&authzContext)
|
|
||||||
if !f.allowNode(services.Node.Node, &authzContext) {
|
|
||||||
*services = structs.NodeServiceList{}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
var removed bool
|
|
||||||
svcs := services.Services
|
|
||||||
for i := 0; i < len(svcs); i++ {
|
|
||||||
svc := svcs[i]
|
|
||||||
svc.FillAuthzContext(&authzContext)
|
|
||||||
|
|
||||||
if f.allowService(svc.Service, &authzContext) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
f.logger.Debug("dropping service from result due to ACLs", "service", svc.CompoundServiceID())
|
|
||||||
svcs = append(svcs[:i], svcs[i+1:]...)
|
|
||||||
i--
|
|
||||||
removed = true
|
|
||||||
}
|
|
||||||
services.Services = svcs
|
|
||||||
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterCheckServiceNodes is used to filter nodes based on ACL rules. Returns
|
|
||||||
// true if any elements were removed.
|
|
||||||
func (f *aclFilter) filterCheckServiceNodes(nodes *structs.CheckServiceNodes) bool {
|
|
||||||
csn := *nodes
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
var removed bool
|
|
||||||
|
|
||||||
for i := 0; i < len(csn); i++ {
|
|
||||||
node := csn[i]
|
|
||||||
node.Service.FillAuthzContext(&authzContext)
|
|
||||||
if f.allowNode(node.Node.Node, &authzContext) && f.allowService(node.Service.Service, &authzContext) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node.Node.Node, node.Node.GetEnterpriseMeta()))
|
|
||||||
removed = true
|
|
||||||
csn = append(csn[:i], csn[i+1:]...)
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
*nodes = csn
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterServiceTopology is used to filter upstreams/downstreams based on ACL rules.
|
|
||||||
// this filter is unlike others in that it also returns whether the result was filtered by ACLs
|
|
||||||
func (f *aclFilter) filterServiceTopology(topology *structs.ServiceTopology) bool {
|
|
||||||
filteredUpstreams := f.filterCheckServiceNodes(&topology.Upstreams)
|
|
||||||
filteredDownstreams := f.filterCheckServiceNodes(&topology.Downstreams)
|
|
||||||
return filteredUpstreams || filteredDownstreams
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterDatacenterCheckServiceNodes is used to filter nodes based on ACL rules.
|
|
||||||
// Returns true if any elements are removed.
|
|
||||||
func (f *aclFilter) filterDatacenterCheckServiceNodes(datacenterNodes *map[string]structs.CheckServiceNodes) bool {
|
|
||||||
dn := *datacenterNodes
|
|
||||||
out := make(map[string]structs.CheckServiceNodes)
|
|
||||||
var removed bool
|
|
||||||
for dc := range dn {
|
|
||||||
nodes := dn[dc]
|
|
||||||
if f.filterCheckServiceNodes(&nodes) {
|
|
||||||
removed = true
|
|
||||||
}
|
|
||||||
if len(nodes) > 0 {
|
|
||||||
out[dc] = nodes
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*datacenterNodes = out
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterSessions is used to filter a set of sessions based on ACLs. Returns
|
|
||||||
// true if any elements were removed.
|
|
||||||
func (f *aclFilter) filterSessions(sessions *structs.Sessions) bool {
|
|
||||||
s := *sessions
|
|
||||||
|
|
||||||
var removed bool
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
session := s[i]
|
|
||||||
|
|
||||||
var entCtx acl.AuthorizerContext
|
|
||||||
session.FillAuthzContext(&entCtx)
|
|
||||||
|
|
||||||
if f.allowSession(session.Node, &entCtx) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
removed = true
|
|
||||||
f.logger.Debug("dropping session from result due to ACLs", "session", session.ID)
|
|
||||||
s = append(s[:i], s[i+1:]...)
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
*sessions = s
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterCoordinates is used to filter nodes in a coordinate dump based on ACL
|
|
||||||
// rules. Returns true if any elements were removed.
|
|
||||||
func (f *aclFilter) filterCoordinates(coords *structs.Coordinates) bool {
|
|
||||||
c := *coords
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
var removed bool
|
|
||||||
|
|
||||||
for i := 0; i < len(c); i++ {
|
|
||||||
c[i].FillAuthzContext(&authzContext)
|
|
||||||
node := c[i].Node
|
|
||||||
if f.allowNode(node, &authzContext) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node, c[i].GetEnterpriseMeta()))
|
|
||||||
removed = true
|
|
||||||
c = append(c[:i], c[i+1:]...)
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
*coords = c
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterIntentions is used to filter intentions based on ACL rules.
|
|
||||||
// We prune entries the user doesn't have access to, and we redact any tokens
|
|
||||||
// if the user doesn't have a management token. Returns true if any elements
|
|
||||||
// were removed.
|
|
||||||
func (f *aclFilter) filterIntentions(ixns *structs.Intentions) bool {
|
|
||||||
ret := make(structs.Intentions, 0, len(*ixns))
|
|
||||||
var removed bool
|
|
||||||
for _, ixn := range *ixns {
|
|
||||||
if !ixn.CanRead(f.authorizer) {
|
|
||||||
removed = true
|
|
||||||
f.logger.Debug("dropping intention from result due to ACLs", "intention", ixn.ID)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = append(ret, ixn)
|
|
||||||
}
|
|
||||||
|
|
||||||
*ixns = ret
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterNodeDump is used to filter through all parts of a node dump and
|
|
||||||
// remove elements the provided ACL token cannot access. Returns true if
|
|
||||||
// any elements were removed.
|
|
||||||
func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) bool {
|
|
||||||
nd := *dump
|
|
||||||
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
var removed bool
|
|
||||||
for i := 0; i < len(nd); i++ {
|
|
||||||
info := nd[i]
|
|
||||||
|
|
||||||
// Filter nodes
|
|
||||||
info.FillAuthzContext(&authzContext)
|
|
||||||
if node := info.Node; !f.allowNode(node, &authzContext) {
|
|
||||||
f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node, info.GetEnterpriseMeta()))
|
|
||||||
removed = true
|
|
||||||
nd = append(nd[:i], nd[i+1:]...)
|
|
||||||
i--
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter services
|
|
||||||
for j := 0; j < len(info.Services); j++ {
|
|
||||||
svc := info.Services[j].Service
|
|
||||||
info.Services[j].FillAuthzContext(&authzContext)
|
|
||||||
if f.allowNode(info.Node, &authzContext) && f.allowService(svc, &authzContext) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f.logger.Debug("dropping service from result due to ACLs", "service", svc)
|
|
||||||
removed = true
|
|
||||||
info.Services = append(info.Services[:j], info.Services[j+1:]...)
|
|
||||||
j--
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter checks
|
|
||||||
for j := 0; j < len(info.Checks); j++ {
|
|
||||||
chk := info.Checks[j]
|
|
||||||
chk.FillAuthzContext(&authzContext)
|
|
||||||
if f.allowNode(info.Node, &authzContext) && f.allowService(chk.ServiceName, &authzContext) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f.logger.Debug("dropping check from result due to ACLs", "check", chk.CheckID)
|
|
||||||
removed = true
|
|
||||||
info.Checks = append(info.Checks[:j], info.Checks[j+1:]...)
|
|
||||||
j--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*dump = nd
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterServiceDump is used to filter nodes based on ACL rules. Returns true
|
|
||||||
// if any elements were removed.
|
|
||||||
func (f *aclFilter) filterServiceDump(services *structs.ServiceDump) bool {
|
|
||||||
svcs := *services
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
var removed bool
|
|
||||||
|
|
||||||
for i := 0; i < len(svcs); i++ {
|
|
||||||
service := svcs[i]
|
|
||||||
|
|
||||||
if f.allowGateway(service.GatewayService) {
|
|
||||||
// ServiceDump might only have gateway config and no node information
|
|
||||||
if service.Node == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
service.Service.FillAuthzContext(&authzContext)
|
|
||||||
if f.allowNode(service.Node.Node, &authzContext) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
f.logger.Debug("dropping service from result due to ACLs", "service", service.GatewayService.Service)
|
|
||||||
removed = true
|
|
||||||
svcs = append(svcs[:i], svcs[i+1:]...)
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
*services = svcs
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterNodes is used to filter through all parts of a node list and remove
|
|
||||||
// elements the provided ACL token cannot access. Returns true if any elements
|
|
||||||
// were removed.
|
|
||||||
func (f *aclFilter) filterNodes(nodes *structs.Nodes) bool {
|
|
||||||
n := *nodes
|
|
||||||
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
var removed bool
|
|
||||||
|
|
||||||
for i := 0; i < len(n); i++ {
|
|
||||||
n[i].FillAuthzContext(&authzContext)
|
|
||||||
node := n[i].Node
|
|
||||||
if f.allowNode(node, &authzContext) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node, n[i].GetEnterpriseMeta()))
|
|
||||||
removed = true
|
|
||||||
n = append(n[:i], n[i+1:]...)
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
*nodes = n
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
// redactPreparedQueryTokens will redact any tokens unless the client has a
|
|
||||||
// management token. This eases the transition to delegated authority over
|
|
||||||
// prepared queries, since it was easy to capture management tokens in Consul
|
|
||||||
// 0.6.3 and earlier, and we don't want to willy-nilly show those. This does
|
|
||||||
// have the limitation of preventing delegated non-management users from seeing
|
|
||||||
// captured tokens, but they can at least see whether or not a token is set.
|
|
||||||
func (f *aclFilter) redactPreparedQueryTokens(query **structs.PreparedQuery) {
|
|
||||||
// Management tokens can see everything with no filtering.
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
structs.DefaultEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
|
|
||||||
if f.authorizer.ACLWrite(&authzContext) == acl.Allow {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Let the user see if there's a blank token, otherwise we need
|
|
||||||
// to redact it, since we know they don't have a management
|
|
||||||
// token.
|
|
||||||
if (*query).Token != "" {
|
|
||||||
// Redact the token, using a copy of the query structure
|
|
||||||
// since we could be pointed at a live instance from the
|
|
||||||
// state store so it's not safe to modify it. Note that
|
|
||||||
// this clone will still point to things like underlying
|
|
||||||
// arrays in the original, but for modifying just the
|
|
||||||
// token it will be safe to use.
|
|
||||||
clone := *(*query)
|
|
||||||
clone.Token = redactedToken
|
|
||||||
*query = &clone
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterPreparedQueries is used to filter prepared queries based on ACL rules.
|
|
||||||
// We prune entries the user doesn't have access to, and we redact any tokens
|
|
||||||
// if the user doesn't have a management token. Returns true if any (named)
|
|
||||||
// queries were removed - un-named queries are meant to be ephemeral and can
|
|
||||||
// only be enumerated by a management token
|
|
||||||
func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) bool {
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
structs.DefaultEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
|
|
||||||
// Management tokens can see everything with no filtering.
|
|
||||||
// TODO is this check even necessary - this looks like a search replace from
|
|
||||||
// the 1.4 ACL rewrite. The global-management token will provide unrestricted query privileges
|
|
||||||
// so asking for ACLWrite should be unnecessary.
|
|
||||||
if f.authorizer.ACLWrite(&authzContext) == acl.Allow {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, we need to see what the token has access to.
|
|
||||||
var namedQueriesRemoved bool
|
|
||||||
ret := make(structs.PreparedQueries, 0, len(*queries))
|
|
||||||
for _, query := range *queries {
|
|
||||||
// If no prefix ACL applies to this query then filter it, since
|
|
||||||
// we know at this point the user doesn't have a management
|
|
||||||
// token, otherwise see what the policy says.
|
|
||||||
prefix, hasName := query.GetACLPrefix()
|
|
||||||
switch {
|
|
||||||
case hasName && f.authorizer.PreparedQueryRead(prefix, &authzContext) != acl.Allow:
|
|
||||||
namedQueriesRemoved = true
|
|
||||||
fallthrough
|
|
||||||
case !hasName:
|
|
||||||
f.logger.Debug("dropping prepared query from result due to ACLs", "query", query.ID)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Redact any tokens if necessary. We make a copy of just the
|
|
||||||
// pointer so we don't mess with the caller's slice.
|
|
||||||
final := query
|
|
||||||
f.redactPreparedQueryTokens(&final)
|
|
||||||
ret = append(ret, final)
|
|
||||||
}
|
|
||||||
*queries = ret
|
|
||||||
return namedQueriesRemoved
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *aclFilter) filterToken(token **structs.ACLToken) {
|
|
||||||
var entCtx acl.AuthorizerContext
|
|
||||||
if token == nil || *token == nil || f == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
(*token).FillAuthzContext(&entCtx)
|
|
||||||
|
|
||||||
if f.authorizer.ACLRead(&entCtx) != acl.Allow {
|
|
||||||
// no permissions to read
|
|
||||||
*token = nil
|
|
||||||
} else if f.authorizer.ACLWrite(&entCtx) != acl.Allow {
|
|
||||||
// no write permissions - redact secret
|
|
||||||
clone := *(*token)
|
|
||||||
clone.SecretID = redactedToken
|
|
||||||
*token = &clone
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *aclFilter) filterTokens(tokens *structs.ACLTokens) {
|
|
||||||
ret := make(structs.ACLTokens, 0, len(*tokens))
|
|
||||||
for _, token := range *tokens {
|
|
||||||
final := token
|
|
||||||
f.filterToken(&final)
|
|
||||||
if final != nil {
|
|
||||||
ret = append(ret, final)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*tokens = ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *aclFilter) filterTokenStub(token **structs.ACLTokenListStub) {
|
|
||||||
var entCtx acl.AuthorizerContext
|
|
||||||
if token == nil || *token == nil || f == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
(*token).FillAuthzContext(&entCtx)
|
|
||||||
|
|
||||||
if f.authorizer.ACLRead(&entCtx) != acl.Allow {
|
|
||||||
*token = nil
|
|
||||||
} else if f.authorizer.ACLWrite(&entCtx) != acl.Allow {
|
|
||||||
// no write permissions - redact secret
|
|
||||||
clone := *(*token)
|
|
||||||
clone.SecretID = redactedToken
|
|
||||||
*token = &clone
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *aclFilter) filterTokenStubs(tokens *[]*structs.ACLTokenListStub) {
|
|
||||||
ret := make(structs.ACLTokenListStubs, 0, len(*tokens))
|
|
||||||
for _, token := range *tokens {
|
|
||||||
final := token
|
|
||||||
f.filterTokenStub(&final)
|
|
||||||
if final != nil {
|
|
||||||
ret = append(ret, final)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*tokens = ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *aclFilter) filterPolicy(policy **structs.ACLPolicy) {
|
|
||||||
var entCtx acl.AuthorizerContext
|
|
||||||
if policy == nil || *policy == nil || f == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
(*policy).FillAuthzContext(&entCtx)
|
|
||||||
|
|
||||||
if f.authorizer.ACLRead(&entCtx) != acl.Allow {
|
|
||||||
// no permissions to read
|
|
||||||
*policy = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *aclFilter) filterPolicies(policies *structs.ACLPolicies) {
|
|
||||||
ret := make(structs.ACLPolicies, 0, len(*policies))
|
|
||||||
for _, policy := range *policies {
|
|
||||||
final := policy
|
|
||||||
f.filterPolicy(&final)
|
|
||||||
if final != nil {
|
|
||||||
ret = append(ret, final)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*policies = ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *aclFilter) filterRole(role **structs.ACLRole) {
|
|
||||||
var entCtx acl.AuthorizerContext
|
|
||||||
if role == nil || *role == nil || f == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
(*role).FillAuthzContext(&entCtx)
|
|
||||||
|
|
||||||
if f.authorizer.ACLRead(&entCtx) != acl.Allow {
|
|
||||||
// no permissions to read
|
|
||||||
*role = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *aclFilter) filterRoles(roles *structs.ACLRoles) {
|
|
||||||
ret := make(structs.ACLRoles, 0, len(*roles))
|
|
||||||
for _, role := range *roles {
|
|
||||||
final := role
|
|
||||||
f.filterRole(&final)
|
|
||||||
if final != nil {
|
|
||||||
ret = append(ret, final)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*roles = ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *aclFilter) filterBindingRule(rule **structs.ACLBindingRule) {
|
|
||||||
var entCtx acl.AuthorizerContext
|
|
||||||
if rule == nil || *rule == nil || f == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
(*rule).FillAuthzContext(&entCtx)
|
|
||||||
|
|
||||||
if f.authorizer.ACLRead(&entCtx) != acl.Allow {
|
|
||||||
// no permissions to read
|
|
||||||
*rule = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *aclFilter) filterBindingRules(rules *structs.ACLBindingRules) {
|
|
||||||
ret := make(structs.ACLBindingRules, 0, len(*rules))
|
|
||||||
for _, rule := range *rules {
|
|
||||||
final := rule
|
|
||||||
f.filterBindingRule(&final)
|
|
||||||
if final != nil {
|
|
||||||
ret = append(ret, final)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*rules = ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *aclFilter) filterAuthMethod(method **structs.ACLAuthMethod) {
|
|
||||||
var entCtx acl.AuthorizerContext
|
|
||||||
if method == nil || *method == nil || f == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
(*method).FillAuthzContext(&entCtx)
|
|
||||||
|
|
||||||
if f.authorizer.ACLRead(&entCtx) != acl.Allow {
|
|
||||||
// no permissions to read
|
|
||||||
*method = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *aclFilter) filterAuthMethods(methods *structs.ACLAuthMethods) {
|
|
||||||
ret := make(structs.ACLAuthMethods, 0, len(*methods))
|
|
||||||
for _, method := range *methods {
|
|
||||||
final := method
|
|
||||||
f.filterAuthMethod(&final)
|
|
||||||
if final != nil {
|
|
||||||
ret = append(ret, final)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*methods = ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *aclFilter) filterServiceList(services *structs.ServiceList) bool {
|
|
||||||
ret := make(structs.ServiceList, 0, len(*services))
|
|
||||||
var removed bool
|
|
||||||
for _, svc := range *services {
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
|
|
||||||
svc.FillAuthzContext(&authzContext)
|
|
||||||
|
|
||||||
if f.authorizer.ServiceRead(svc.Name, &authzContext) != acl.Allow {
|
|
||||||
removed = true
|
|
||||||
sid := structs.NewServiceID(svc.Name, &svc.EnterpriseMeta)
|
|
||||||
f.logger.Debug("dropping service from result due to ACLs", "service", sid.String())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = append(ret, svc)
|
|
||||||
}
|
|
||||||
|
|
||||||
*services = ret
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
// filterGatewayServices is used to filter gateway to service mappings based on ACL rules.
|
|
||||||
// Returns true if any elements were removed.
|
|
||||||
func (f *aclFilter) filterGatewayServices(mappings *structs.GatewayServices) bool {
|
|
||||||
ret := make(structs.GatewayServices, 0, len(*mappings))
|
|
||||||
var removed bool
|
|
||||||
for _, s := range *mappings {
|
|
||||||
// This filter only checks ServiceRead on the linked service.
|
|
||||||
// ServiceRead on the gateway is checked in the GatewayServices endpoint before filtering.
|
|
||||||
var authzContext acl.AuthorizerContext
|
|
||||||
s.Service.FillAuthzContext(&authzContext)
|
|
||||||
|
|
||||||
if f.authorizer.ServiceRead(s.Service.Name, &authzContext) != acl.Allow {
|
|
||||||
f.logger.Debug("dropping service from result due to ACLs", "service", s.Service.String())
|
|
||||||
removed = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ret = append(ret, s)
|
|
||||||
}
|
|
||||||
*mappings = ret
|
|
||||||
return removed
|
|
||||||
}
|
|
||||||
|
|
||||||
func filterACLWithAuthorizer(logger hclog.Logger, authorizer acl.Authorizer, subj interface{}) {
|
func filterACLWithAuthorizer(logger hclog.Logger, authorizer acl.Authorizer, subj interface{}) {
|
||||||
if authorizer == nil {
|
aclfilter.New(authorizer, logger).Filter(subj)
|
||||||
return
|
|
||||||
}
|
|
||||||
filt := newACLFilter(authorizer, logger)
|
|
||||||
|
|
||||||
switch v := subj.(type) {
|
|
||||||
case *structs.CheckServiceNodes:
|
|
||||||
filt.filterCheckServiceNodes(v)
|
|
||||||
|
|
||||||
case *structs.IndexedCheckServiceNodes:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterCheckServiceNodes(&v.Nodes)
|
|
||||||
|
|
||||||
case *structs.PreparedQueryExecuteResponse:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterCheckServiceNodes(&v.Nodes)
|
|
||||||
|
|
||||||
case *structs.IndexedServiceTopology:
|
|
||||||
filtered := filt.filterServiceTopology(v.ServiceTopology)
|
|
||||||
if filtered {
|
|
||||||
v.FilteredByACLs = true
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = true
|
|
||||||
}
|
|
||||||
|
|
||||||
case *structs.DatacenterIndexedCheckServiceNodes:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterDatacenterCheckServiceNodes(&v.DatacenterNodes)
|
|
||||||
|
|
||||||
case *structs.IndexedCoordinates:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterCoordinates(&v.Coordinates)
|
|
||||||
|
|
||||||
case *structs.IndexedHealthChecks:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterHealthChecks(&v.HealthChecks)
|
|
||||||
|
|
||||||
case *structs.IndexedIntentions:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterIntentions(&v.Intentions)
|
|
||||||
|
|
||||||
case *structs.IndexedNodeDump:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterNodeDump(&v.Dump)
|
|
||||||
|
|
||||||
case *structs.IndexedServiceDump:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterServiceDump(&v.Dump)
|
|
||||||
|
|
||||||
case *structs.IndexedNodes:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterNodes(&v.Nodes)
|
|
||||||
|
|
||||||
case *structs.IndexedNodeServices:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterNodeServices(&v.NodeServices)
|
|
||||||
|
|
||||||
case *structs.IndexedNodeServiceList:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterNodeServiceList(&v.NodeServices)
|
|
||||||
|
|
||||||
case *structs.IndexedServiceNodes:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterServiceNodes(&v.ServiceNodes)
|
|
||||||
|
|
||||||
case *structs.IndexedServices:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterServices(v.Services, &v.EnterpriseMeta)
|
|
||||||
|
|
||||||
case *structs.IndexedSessions:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterSessions(&v.Sessions)
|
|
||||||
|
|
||||||
case *structs.IndexedPreparedQueries:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterPreparedQueries(&v.Queries)
|
|
||||||
|
|
||||||
case **structs.PreparedQuery:
|
|
||||||
filt.redactPreparedQueryTokens(v)
|
|
||||||
|
|
||||||
case *structs.ACLTokens:
|
|
||||||
filt.filterTokens(v)
|
|
||||||
case **structs.ACLToken:
|
|
||||||
filt.filterToken(v)
|
|
||||||
case *[]*structs.ACLTokenListStub:
|
|
||||||
filt.filterTokenStubs(v)
|
|
||||||
case **structs.ACLTokenListStub:
|
|
||||||
filt.filterTokenStub(v)
|
|
||||||
|
|
||||||
case *structs.ACLPolicies:
|
|
||||||
filt.filterPolicies(v)
|
|
||||||
case **structs.ACLPolicy:
|
|
||||||
filt.filterPolicy(v)
|
|
||||||
|
|
||||||
case *structs.ACLRoles:
|
|
||||||
filt.filterRoles(v)
|
|
||||||
case **structs.ACLRole:
|
|
||||||
filt.filterRole(v)
|
|
||||||
|
|
||||||
case *structs.ACLBindingRules:
|
|
||||||
filt.filterBindingRules(v)
|
|
||||||
case **structs.ACLBindingRule:
|
|
||||||
filt.filterBindingRule(v)
|
|
||||||
|
|
||||||
case *structs.ACLAuthMethods:
|
|
||||||
filt.filterAuthMethods(v)
|
|
||||||
case **structs.ACLAuthMethod:
|
|
||||||
filt.filterAuthMethod(v)
|
|
||||||
|
|
||||||
case *structs.IndexedServiceList:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterServiceList(&v.Services)
|
|
||||||
|
|
||||||
case *structs.IndexedExportedServiceList:
|
|
||||||
for peer, peerServices := range v.Services {
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterServiceList(&peerServices)
|
|
||||||
if len(peerServices) == 0 {
|
|
||||||
delete(v.Services, peer)
|
|
||||||
} else {
|
|
||||||
v.Services[peer] = peerServices
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case *structs.IndexedGatewayServices:
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = filt.filterGatewayServices(&v.Services)
|
|
||||||
|
|
||||||
case *structs.IndexedNodesWithGateways:
|
|
||||||
if filt.filterCheckServiceNodes(&v.Nodes) {
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = true
|
|
||||||
}
|
|
||||||
if filt.filterGatewayServices(&v.Gateways) {
|
|
||||||
v.QueryMeta.ResultsFilteredByACLs = true
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("Unhandled type passed to ACL filter: %T %#v", subj, subj))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// filterACL uses the ACLResolver to resolve the token in an acl.Authorizer,
|
// filterACL uses the ACLResolver to resolve the token in an acl.Authorizer,
|
||||||
|
|
|
@ -17,10 +17,12 @@ import (
|
||||||
uuid "github.com/hashicorp/go-uuid"
|
uuid "github.com/hashicorp/go-uuid"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/acl/resolver"
|
||||||
"github.com/hashicorp/consul/agent/consul/auth"
|
"github.com/hashicorp/consul/agent/consul/auth"
|
||||||
"github.com/hashicorp/consul/agent/consul/authmethod"
|
"github.com/hashicorp/consul/agent/consul/authmethod"
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/agent/structs/aclfilter"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -263,7 +265,7 @@ func (a *ACL) TokenRead(args *structs.ACLTokenGetRequest, reply *structs.ACLToke
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var authz ACLResolveResult
|
var authz resolver.Result
|
||||||
|
|
||||||
if args.TokenIDType == structs.ACLTokenAccessor {
|
if args.TokenIDType == structs.ACLTokenAccessor {
|
||||||
var err error
|
var err error
|
||||||
|
@ -290,7 +292,7 @@ func (a *ACL) TokenRead(args *structs.ACLTokenGetRequest, reply *structs.ACLToke
|
||||||
a.srv.filterACLWithAuthorizer(authz, &token)
|
a.srv.filterACLWithAuthorizer(authz, &token)
|
||||||
|
|
||||||
// token secret was redacted
|
// token secret was redacted
|
||||||
if token.SecretID == redactedToken {
|
if token.SecretID == aclfilter.RedactedToken {
|
||||||
reply.Redacted = true
|
reply.Redacted = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -718,7 +720,7 @@ func (a *ACL) TokenBatchRead(args *structs.ACLTokenBatchGetRequest, reply *struc
|
||||||
a.srv.filterACLWithAuthorizer(authz, &final)
|
a.srv.filterACLWithAuthorizer(authz, &final)
|
||||||
if final != nil {
|
if final != nil {
|
||||||
ret = append(ret, final)
|
ret = append(ret, final)
|
||||||
if final.SecretID == redactedToken {
|
if final.SecretID == aclfilter.RedactedToken {
|
||||||
reply.Redacted = true
|
reply.Redacted = true
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/consul/authmethod/kubeauth"
|
"github.com/hashicorp/consul/agent/consul/authmethod/kubeauth"
|
||||||
"github.com/hashicorp/consul/agent/consul/authmethod/testauth"
|
"github.com/hashicorp/consul/agent/consul/authmethod/testauth"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/agent/structs/aclfilter"
|
||||||
"github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest"
|
"github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest"
|
||||||
"github.com/hashicorp/consul/sdk/testutil"
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
|
@ -1854,7 +1855,7 @@ func TestACLEndpoint_TokenList(t *testing.T) {
|
||||||
}
|
}
|
||||||
require.ElementsMatch(t, gatherIDs(t, resp.Tokens), tokens)
|
require.ElementsMatch(t, gatherIDs(t, resp.Tokens), tokens)
|
||||||
for _, token := range resp.Tokens {
|
for _, token := range resp.Tokens {
|
||||||
require.Equal(t, redactedToken, token.SecretID)
|
require.Equal(t, aclfilter.RedactedToken, token.SecretID)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/consul/authmethod/testauth"
|
"github.com/hashicorp/consul/agent/consul/authmethod/testauth"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/agent/structs/aclfilter"
|
||||||
tokenStore "github.com/hashicorp/consul/agent/token"
|
tokenStore "github.com/hashicorp/consul/agent/token"
|
||||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
"github.com/hashicorp/consul/testrpc"
|
"github.com/hashicorp/consul/testrpc"
|
||||||
|
@ -752,9 +753,9 @@ func TestACLReplication_TokensRedacted(t *testing.T) {
|
||||||
var tokenResp structs.ACLTokenResponse
|
var tokenResp structs.ACLTokenResponse
|
||||||
req := structs.ACLTokenGetRequest{
|
req := structs.ACLTokenGetRequest{
|
||||||
Datacenter: "dc2",
|
Datacenter: "dc2",
|
||||||
TokenID: redactedToken,
|
TokenID: aclfilter.RedactedToken,
|
||||||
TokenIDType: structs.ACLTokenSecret,
|
TokenIDType: structs.ACLTokenSecret,
|
||||||
QueryOptions: structs.QueryOptions{Token: redactedToken},
|
QueryOptions: structs.QueryOptions{Token: aclfilter.RedactedToken},
|
||||||
}
|
}
|
||||||
err := s2.RPC("ACL.TokenRead", &req, &tokenResp)
|
err := s2.RPC("ACL.TokenRead", &req, &tokenResp)
|
||||||
// its not an error for the secret to not be found.
|
// its not an error for the secret to not be found.
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/agent/structs/aclfilter"
|
||||||
)
|
)
|
||||||
|
|
||||||
type aclTokenReplicator struct {
|
type aclTokenReplicator struct {
|
||||||
|
@ -99,7 +100,7 @@ func (r *aclTokenReplicator) PendingUpdateEstimatedSize(i int) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *aclTokenReplicator) PendingUpdateIsRedacted(i int) bool {
|
func (r *aclTokenReplicator) PendingUpdateIsRedacted(i int) bool {
|
||||||
return r.updated[i].SecretID == redactedToken
|
return r.updated[i].SecretID == aclfilter.RedactedToken
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *aclTokenReplicator) UpdateLocalBatch(ctx context.Context, srv *Server, start, end int) error {
|
func (r *aclTokenReplicator) UpdateLocalBatch(ctx context.Context, srv *Server, start, end int) error {
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -86,7 +86,7 @@ func (s *Server) initAutopilot(config *Config) {
|
||||||
)
|
)
|
||||||
|
|
||||||
// registers a snapshot handler for the event publisher to send as the first event for a new stream
|
// registers a snapshot handler for the event publisher to send as the first event for a new stream
|
||||||
s.publisher.RegisterHandler(autopilotevents.EventTopicReadyServers, apDelegate.readyServersPublisher.HandleSnapshot)
|
s.publisher.RegisterHandler(autopilotevents.EventTopicReadyServers, apDelegate.readyServersPublisher.HandleSnapshot, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) autopilotServers() map[raft.ServerID]*autopilot.Server {
|
func (s *Server) autopilotServers() map[raft.ServerID]*autopilot.Server {
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
// Code generated by mockery v2.12.2. DO NOT EDIT.
|
||||||
|
|
||||||
package autopilotevents
|
package autopilotevents
|
||||||
|
|
||||||
|
@ -19,9 +19,10 @@ func (_m *MockPublisher) Publish(_a0 []stream.Event) {
|
||||||
_m.Called(_a0)
|
_m.Called(_a0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockPublisher creates a new instance of MockPublisher. It also registers a cleanup function to assert the mocks expectations.
|
// NewMockPublisher creates a new instance of MockPublisher. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
func NewMockPublisher(t testing.TB) *MockPublisher {
|
func NewMockPublisher(t testing.TB) *MockPublisher {
|
||||||
mock := &MockPublisher{}
|
mock := &MockPublisher{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
// Code generated by mockery v2.12.2. DO NOT EDIT.
|
||||||
|
|
||||||
package autopilotevents
|
package autopilotevents
|
||||||
|
|
||||||
|
@ -48,9 +48,10 @@ func (_m *MockStateStore) GetNodeID(_a0 types.NodeID, _a1 *acl.EnterpriseMeta, _
|
||||||
return r0, r1, r2
|
return r0, r1, r2
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMockStateStore creates a new instance of MockStateStore. It also registers a cleanup function to assert the mocks expectations.
|
// NewMockStateStore creates a new instance of MockStateStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
func NewMockStateStore(t testing.TB) *MockStateStore {
|
func NewMockStateStore(t testing.TB) *MockStateStore {
|
||||||
mock := &MockStateStore{}
|
mock := &MockStateStore{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Code generated by mockery v2.11.0. DO NOT EDIT.
|
// Code generated by mockery v2.12.2. DO NOT EDIT.
|
||||||
|
|
||||||
package autopilotevents
|
package autopilotevents
|
||||||
|
|
||||||
|
@ -29,9 +29,10 @@ func (_m *mockTimeProvider) Now() time.Time {
|
||||||
return r0
|
return r0
|
||||||
}
|
}
|
||||||
|
|
||||||
// newMockTimeProvider creates a new instance of mockTimeProvider. It also registers a cleanup function to assert the mocks expectations.
|
// newMockTimeProvider creates a new instance of mockTimeProvider. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
|
||||||
func newMockTimeProvider(t testing.TB) *mockTimeProvider {
|
func newMockTimeProvider(t testing.TB) *mockTimeProvider {
|
||||||
mock := &mockTimeProvider{}
|
mock := &mockTimeProvider{}
|
||||||
|
mock.Mock.Test(t)
|
||||||
|
|
||||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||||
|
|
||||||
|
|
|
@ -119,17 +119,17 @@ func NewReadyServersEventPublisher(config Config) *ReadyServersEventPublisher {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:generate mockery --name StateStore --inpackage --testonly
|
//go:generate mockery --name StateStore --inpackage --filename mock_StateStore_test.go
|
||||||
type StateStore interface {
|
type StateStore interface {
|
||||||
GetNodeID(types.NodeID, *acl.EnterpriseMeta, string) (uint64, *structs.Node, error)
|
GetNodeID(types.NodeID, *acl.EnterpriseMeta, string) (uint64, *structs.Node, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:generate mockery --name Publisher --inpackage --testonly
|
//go:generate mockery --name Publisher --inpackage --filename mock_Publisher_test.go
|
||||||
type Publisher interface {
|
type Publisher interface {
|
||||||
Publish([]stream.Event)
|
Publish([]stream.Event)
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:generate mockery --name timeProvider --inpackage --testonly
|
//go:generate mockery --name timeProvider --inpackage --filename mock_timeProvider_test.go
|
||||||
type timeProvider interface {
|
type timeProvider interface {
|
||||||
Now() time.Time
|
Now() time.Time
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,13 +8,14 @@ import (
|
||||||
|
|
||||||
"github.com/armon/go-metrics"
|
"github.com/armon/go-metrics"
|
||||||
"github.com/armon/go-metrics/prometheus"
|
"github.com/armon/go-metrics/prometheus"
|
||||||
bexpr "github.com/hashicorp/go-bexpr"
|
"github.com/hashicorp/go-bexpr"
|
||||||
"github.com/hashicorp/go-hclog"
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
"github.com/hashicorp/go-uuid"
|
"github.com/hashicorp/go-uuid"
|
||||||
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
|
hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/acl/resolver"
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/ipaddr"
|
"github.com/hashicorp/consul/ipaddr"
|
||||||
|
@ -160,7 +161,7 @@ func nodePreApply(nodeName, nodeID string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func servicePreApply(service *structs.NodeService, authz ACLResolveResult, authzCtxFill func(*acl.AuthorizerContext)) error {
|
func servicePreApply(service *structs.NodeService, authz resolver.Result, authzCtxFill func(*acl.AuthorizerContext)) error {
|
||||||
// Validate the service. This is in addition to the below since
|
// Validate the service. This is in addition to the below since
|
||||||
// the above just hasn't been moved over yet. We should move it over
|
// the above just hasn't been moved over yet. We should move it over
|
||||||
// in time.
|
// in time.
|
||||||
|
@ -230,7 +231,7 @@ func checkPreApply(check *structs.HealthCheck) {
|
||||||
// worst let a service update revert a recent node update, so it doesn't open up
|
// worst let a service update revert a recent node update, so it doesn't open up
|
||||||
// too much abuse).
|
// too much abuse).
|
||||||
func vetRegisterWithACL(
|
func vetRegisterWithACL(
|
||||||
authz ACLResolveResult,
|
authz resolver.Result,
|
||||||
subj *structs.RegisterRequest,
|
subj *structs.RegisterRequest,
|
||||||
ns *structs.NodeServices,
|
ns *structs.NodeServices,
|
||||||
) error {
|
) error {
|
||||||
|
@ -396,7 +397,7 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e
|
||||||
// endpoint. The NodeService for the referenced service must be supplied, and can
|
// endpoint. The NodeService for the referenced service must be supplied, and can
|
||||||
// be nil; similar for the HealthCheck for the referenced health check.
|
// be nil; similar for the HealthCheck for the referenced health check.
|
||||||
func vetDeregisterWithACL(
|
func vetDeregisterWithACL(
|
||||||
authz ACLResolveResult,
|
authz resolver.Result,
|
||||||
subj *structs.DeregisterRequest,
|
subj *structs.DeregisterRequest,
|
||||||
ns *structs.NodeService,
|
ns *structs.NodeService,
|
||||||
nc *structs.HealthCheck,
|
nc *structs.HealthCheck,
|
||||||
|
@ -869,6 +870,11 @@ func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *stru
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
priorMergeHash uint64
|
||||||
|
ranMergeOnce bool
|
||||||
|
)
|
||||||
|
|
||||||
return c.srv.blockingQuery(
|
return c.srv.blockingQuery(
|
||||||
&args.QueryOptions,
|
&args.QueryOptions,
|
||||||
&reply.QueryMeta,
|
&reply.QueryMeta,
|
||||||
|
@ -878,10 +884,55 @@ func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *stru
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mergedServices := services
|
||||||
|
var cfgIndex uint64
|
||||||
|
if services != nil && args.MergeCentralConfig {
|
||||||
|
var mergedNodeServices []*structs.NodeService
|
||||||
|
for _, ns := range services.Services {
|
||||||
|
mergedns := ns
|
||||||
|
if ns.IsSidecarProxy() || ns.IsGateway() {
|
||||||
|
serviceSpecificReq := structs.ServiceSpecificRequest{
|
||||||
|
Datacenter: args.Datacenter,
|
||||||
|
QueryOptions: args.QueryOptions,
|
||||||
|
}
|
||||||
|
cfgIndex, mergedns, err = mergeNodeServiceWithCentralConfig(ws, state, &serviceSpecificReq, ns, c.logger)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if cfgIndex > index {
|
||||||
|
index = cfgIndex
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mergedNodeServices = append(mergedNodeServices, mergedns)
|
||||||
|
}
|
||||||
|
if len(mergedNodeServices) > 0 {
|
||||||
|
mergedServices.Services = mergedNodeServices
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a hash of the mergedServices driving this response.
|
||||||
|
// Use it to determine if the response is identical to a prior wakeup.
|
||||||
|
newMergeHash, err := hashstructure_v2.Hash(mergedServices, hashstructure_v2.FormatV2, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error hashing reply for spurious wakeup suppression: %w", err)
|
||||||
|
}
|
||||||
|
if ranMergeOnce && priorMergeHash == newMergeHash {
|
||||||
|
// the below assignment is not required as the if condition already validates equality,
|
||||||
|
// but makes it more clear that prior value is being reset to the new hash on each run.
|
||||||
|
priorMergeHash = newMergeHash
|
||||||
|
reply.Index = index
|
||||||
|
// NOTE: the prior response is still alive inside of *reply, which is desirable
|
||||||
|
return errNotChanged
|
||||||
|
} else {
|
||||||
|
priorMergeHash = newMergeHash
|
||||||
|
ranMergeOnce = true
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
reply.Index = index
|
reply.Index = index
|
||||||
|
|
||||||
if services != nil {
|
if mergedServices != nil {
|
||||||
reply.NodeServices = *services
|
reply.NodeServices = *mergedServices
|
||||||
|
|
||||||
raw, err := filter.Execute(reply.NodeServices.Services)
|
raw, err := filter.Execute(reply.NodeServices.Services)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -985,6 +1036,7 @@ func (c *Catalog) VirtualIPForService(args *structs.ServiceSpecificRequest, repl
|
||||||
}
|
}
|
||||||
|
|
||||||
state := c.srv.fsm.State()
|
state := c.srv.fsm.State()
|
||||||
*reply, err = state.VirtualIPForService(structs.NewServiceName(args.ServiceName, &args.EnterpriseMeta))
|
psn := structs.PeeredServiceName{Peer: args.PeerName, ServiceName: structs.NewServiceName(args.ServiceName, &args.EnterpriseMeta)}
|
||||||
|
*reply, err = state.VirtualIPForService(psn)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/acl/resolver"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
|
@ -3467,11 +3468,11 @@ func TestVetRegisterWithACL(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// With an "allow all" authorizer the update should be allowed.
|
// With an "allow all" authorizer the update should be allowed.
|
||||||
require.NoError(t, vetRegisterWithACL(ACLResolveResult{Authorizer: acl.ManageAll()}, args, nil))
|
require.NoError(t, vetRegisterWithACL(resolver.Result{Authorizer: acl.ManageAll()}, args, nil))
|
||||||
})
|
})
|
||||||
|
|
||||||
var perms acl.Authorizer = acl.DenyAll()
|
var perms acl.Authorizer = acl.DenyAll()
|
||||||
var resolvedPerms ACLResolveResult
|
var resolvedPerms resolver.Result
|
||||||
|
|
||||||
args := &structs.RegisterRequest{
|
args := &structs.RegisterRequest{
|
||||||
Node: "nope",
|
Node: "nope",
|
||||||
|
@ -3483,7 +3484,7 @@ func TestVetRegisterWithACL(t *testing.T) {
|
||||||
node "node" {
|
node "node" {
|
||||||
policy = "write"
|
policy = "write"
|
||||||
} `)
|
} `)
|
||||||
resolvedPerms = ACLResolveResult{Authorizer: perms}
|
resolvedPerms = resolver.Result{Authorizer: perms}
|
||||||
|
|
||||||
// With that policy, the update should now be blocked for node reasons.
|
// With that policy, the update should now be blocked for node reasons.
|
||||||
err := vetRegisterWithACL(resolvedPerms, args, nil)
|
err := vetRegisterWithACL(resolvedPerms, args, nil)
|
||||||
|
@ -3514,7 +3515,7 @@ func TestVetRegisterWithACL(t *testing.T) {
|
||||||
ID: "my-id",
|
ID: "my-id",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
err = vetRegisterWithACL(ACLResolveResult{Authorizer: perms}, args, ns)
|
err = vetRegisterWithACL(resolver.Result{Authorizer: perms}, args, ns)
|
||||||
require.True(t, acl.IsErrPermissionDenied(err))
|
require.True(t, acl.IsErrPermissionDenied(err))
|
||||||
|
|
||||||
// Chain on a basic service policy.
|
// Chain on a basic service policy.
|
||||||
|
@ -3522,7 +3523,7 @@ func TestVetRegisterWithACL(t *testing.T) {
|
||||||
service "service" {
|
service "service" {
|
||||||
policy = "write"
|
policy = "write"
|
||||||
} `)
|
} `)
|
||||||
resolvedPerms = ACLResolveResult{Authorizer: perms}
|
resolvedPerms = resolver.Result{Authorizer: perms}
|
||||||
|
|
||||||
// With the service ACL, the update should go through.
|
// With the service ACL, the update should go through.
|
||||||
require.NoError(t, vetRegisterWithACL(resolvedPerms, args, ns))
|
require.NoError(t, vetRegisterWithACL(resolvedPerms, args, ns))
|
||||||
|
@ -3549,7 +3550,7 @@ func TestVetRegisterWithACL(t *testing.T) {
|
||||||
service "other" {
|
service "other" {
|
||||||
policy = "write"
|
policy = "write"
|
||||||
} `)
|
} `)
|
||||||
resolvedPerms = ACLResolveResult{Authorizer: perms}
|
resolvedPerms = resolver.Result{Authorizer: perms}
|
||||||
|
|
||||||
// Now it should go through.
|
// Now it should go through.
|
||||||
require.NoError(t, vetRegisterWithACL(resolvedPerms, args, ns))
|
require.NoError(t, vetRegisterWithACL(resolvedPerms, args, ns))
|
||||||
|
@ -3655,7 +3656,7 @@ func TestVetRegisterWithACL(t *testing.T) {
|
||||||
service "other" {
|
service "other" {
|
||||||
policy = "deny"
|
policy = "deny"
|
||||||
} `)
|
} `)
|
||||||
resolvedPerms = ACLResolveResult{Authorizer: perms}
|
resolvedPerms = resolver.Result{Authorizer: perms}
|
||||||
|
|
||||||
// This should get rejected.
|
// This should get rejected.
|
||||||
err = vetRegisterWithACL(resolvedPerms, args, ns)
|
err = vetRegisterWithACL(resolvedPerms, args, ns)
|
||||||
|
@ -3682,7 +3683,7 @@ func TestVetRegisterWithACL(t *testing.T) {
|
||||||
node "node" {
|
node "node" {
|
||||||
policy = "deny"
|
policy = "deny"
|
||||||
} `)
|
} `)
|
||||||
resolvedPerms = ACLResolveResult{Authorizer: perms}
|
resolvedPerms = resolver.Result{Authorizer: perms}
|
||||||
|
|
||||||
// This should get rejected because there's a node-level check in here.
|
// This should get rejected because there's a node-level check in here.
|
||||||
err = vetRegisterWithACL(resolvedPerms, args, ns)
|
err = vetRegisterWithACL(resolvedPerms, args, ns)
|
||||||
|
@ -3733,7 +3734,7 @@ func TestVetDeregisterWithACL(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// With an "allow all" authorizer the update should be allowed.
|
// With an "allow all" authorizer the update should be allowed.
|
||||||
if err := vetDeregisterWithACL(ACLResolveResult{Authorizer: acl.ManageAll()}, args, nil, nil); err != nil {
|
if err := vetDeregisterWithACL(resolver.Result{Authorizer: acl.ManageAll()}, args, nil, nil); err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3966,7 +3967,7 @@ node "node" {
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run(args.Name, func(t *testing.T) {
|
t.Run(args.Name, func(t *testing.T) {
|
||||||
err = vetDeregisterWithACL(ACLResolveResult{Authorizer: args.Perms}, &args.DeregisterRequest, args.Service, args.Check)
|
err = vetDeregisterWithACL(resolver.Result{Authorizer: args.Perms}, &args.DeregisterRequest, args.Service, args.Check)
|
||||||
if !args.Expected {
|
if !args.Expected {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected error with %+v", args.DeregisterRequest)
|
t.Errorf("expected error with %+v", args.DeregisterRequest)
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
|
|
||||||
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
|
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent/consul/stream"
|
||||||
grpc "github.com/hashicorp/consul/agent/grpc/private"
|
grpc "github.com/hashicorp/consul/agent/grpc/private"
|
||||||
"github.com/hashicorp/consul/agent/grpc/private/resolver"
|
"github.com/hashicorp/consul/agent/grpc/private/resolver"
|
||||||
"github.com/hashicorp/consul/agent/pool"
|
"github.com/hashicorp/consul/agent/pool"
|
||||||
|
@ -510,7 +511,7 @@ func newDefaultDeps(t *testing.T, c *Config) Deps {
|
||||||
|
|
||||||
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
|
||||||
Name: c.NodeName,
|
Name: c.NodeName,
|
||||||
Level: hclog.Trace,
|
Level: testutil.TestLogLevel,
|
||||||
Output: testutil.NewLogBuffer(t),
|
Output: testutil.NewLogBuffer(t),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -535,6 +536,7 @@ func newDefaultDeps(t *testing.T, c *Config) Deps {
|
||||||
}
|
}
|
||||||
|
|
||||||
return Deps{
|
return Deps{
|
||||||
|
EventPublisher: stream.NewEventPublisher(10 * time.Second),
|
||||||
Logger: logger,
|
Logger: logger,
|
||||||
TLSConfigurator: tls,
|
TLSConfigurator: tls,
|
||||||
Tokens: new(token.Store),
|
Tokens: new(token.Store),
|
||||||
|
|
|
@ -1133,6 +1133,31 @@ func TestConfigEntry_ResolveServiceConfig_TransparentProxy(t *testing.T) {
|
||||||
TransparentProxy: structs.TransparentProxyConfig{OutboundListenerPort: 808},
|
TransparentProxy: structs.TransparentProxyConfig{OutboundListenerPort: 808},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "from service-defaults with endpoint",
|
||||||
|
entries: []structs.ConfigEntry{
|
||||||
|
&structs.ServiceConfigEntry{
|
||||||
|
Kind: structs.ServiceDefaults,
|
||||||
|
Name: "foo",
|
||||||
|
Mode: structs.ProxyModeTransparent,
|
||||||
|
Destination: &structs.DestinationConfig{
|
||||||
|
Address: "hello.world.com",
|
||||||
|
Port: 443,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
request: structs.ServiceConfigRequest{
|
||||||
|
Name: "foo",
|
||||||
|
Datacenter: "dc1",
|
||||||
|
},
|
||||||
|
expect: structs.ServiceConfigResponse{
|
||||||
|
Mode: structs.ProxyModeTransparent,
|
||||||
|
Destination: structs.DestinationConfig{
|
||||||
|
Address: "hello.world.com",
|
||||||
|
Port: 443,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "service-defaults overrides proxy-defaults",
|
name: "service-defaults overrides proxy-defaults",
|
||||||
entries: []structs.ConfigEntry{
|
entries: []structs.ConfigEntry{
|
||||||
|
@ -1207,11 +1232,10 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
|
||||||
wildcard := structs.NewServiceID(structs.WildcardSpecifier, structs.WildcardEnterpriseMetaInDefaultPartition())
|
wildcard := structs.NewServiceID(structs.WildcardSpecifier, structs.WildcardEnterpriseMetaInDefaultPartition())
|
||||||
|
|
||||||
tt := []struct {
|
tt := []struct {
|
||||||
name string
|
name string
|
||||||
entries []structs.ConfigEntry
|
entries []structs.ConfigEntry
|
||||||
request structs.ServiceConfigRequest
|
request structs.ServiceConfigRequest
|
||||||
proxyCfg structs.ConnectProxyConfig
|
expect structs.ServiceConfigResponse
|
||||||
expect structs.ServiceConfigResponse
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "upstream config entries from Upstreams and service-defaults",
|
name: "upstream config entries from Upstreams and service-defaults",
|
||||||
|
|
|
@ -57,6 +57,12 @@ func (s *Server) revokeEnterpriseLeadership() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Server) startTenancyDeferredDeletion(ctx context.Context) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) stopTenancyDeferredDeletion() {
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Server) validateEnterpriseRequest(entMeta *acl.EnterpriseMeta, write bool) error {
|
func (s *Server) validateEnterpriseRequest(entMeta *acl.EnterpriseMeta, write bool) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,11 +6,12 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/consul-net-rpc/go-msgpack/codec"
|
|
||||||
"github.com/hashicorp/go-hclog"
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-raftchunking"
|
"github.com/hashicorp/go-raftchunking"
|
||||||
"github.com/hashicorp/raft"
|
"github.com/hashicorp/raft"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul-net-rpc/go-msgpack/codec"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/consul/stream"
|
"github.com/hashicorp/consul/agent/consul/stream"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
@ -277,21 +278,49 @@ func (c *FSM) registerStreamSnapshotHandlers() {
|
||||||
|
|
||||||
err := c.deps.Publisher.RegisterHandler(state.EventTopicServiceHealth, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
|
err := c.deps.Publisher.RegisterHandler(state.EventTopicServiceHealth, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
|
||||||
return c.State().ServiceHealthSnapshot(req, buf)
|
return c.State().ServiceHealthSnapshot(req, buf)
|
||||||
})
|
}, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
|
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.deps.Publisher.RegisterHandler(state.EventTopicServiceHealthConnect, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
|
err = c.deps.Publisher.RegisterHandler(state.EventTopicServiceHealthConnect, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
|
||||||
return c.State().ServiceHealthSnapshot(req, buf)
|
return c.State().ServiceHealthSnapshot(req, buf)
|
||||||
})
|
}, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
|
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.deps.Publisher.RegisterHandler(state.EventTopicCARoots, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
|
err = c.deps.Publisher.RegisterHandler(state.EventTopicCARoots, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
|
||||||
return c.State().CARootsSnapshot(req, buf)
|
return c.State().CARootsSnapshot(req, buf)
|
||||||
})
|
}, false)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.deps.Publisher.RegisterHandler(state.EventTopicMeshConfig, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
|
||||||
|
return c.State().MeshConfigSnapshot(req, buf)
|
||||||
|
}, true)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.deps.Publisher.RegisterHandler(state.EventTopicServiceResolver, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
|
||||||
|
return c.State().ServiceResolverSnapshot(req, buf)
|
||||||
|
}, true)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.deps.Publisher.RegisterHandler(state.EventTopicIngressGateway, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
|
||||||
|
return c.State().IngressGatewaySnapshot(req, buf)
|
||||||
|
}, true)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.deps.Publisher.RegisterHandler(state.EventTopicServiceIntentions, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
|
||||||
|
return c.State().ServiceIntentionsSnapshot(req, buf)
|
||||||
|
}, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
|
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
|
||||||
}
|
}
|
||||||
|
|
|
@ -451,7 +451,8 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
|
||||||
Port: 8000,
|
Port: 8000,
|
||||||
Connect: connectConf,
|
Connect: connectConf,
|
||||||
})
|
})
|
||||||
vip, err := fsm.state.VirtualIPForService(structs.NewServiceName("frontend", nil))
|
psn := structs.PeeredServiceName{ServiceName: structs.NewServiceName("frontend", nil)}
|
||||||
|
vip, err := fsm.state.VirtualIPForService(psn)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, vip, "240.0.0.1")
|
require.Equal(t, vip, "240.0.0.1")
|
||||||
|
|
||||||
|
@ -462,7 +463,8 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
|
||||||
Port: 9000,
|
Port: 9000,
|
||||||
Connect: connectConf,
|
Connect: connectConf,
|
||||||
})
|
})
|
||||||
vip, err = fsm.state.VirtualIPForService(structs.NewServiceName("backend", nil))
|
psn = structs.PeeredServiceName{ServiceName: structs.NewServiceName("backend", nil)}
|
||||||
|
vip, err = fsm.state.VirtualIPForService(psn)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, vip, "240.0.0.2")
|
require.Equal(t, vip, "240.0.0.2")
|
||||||
|
|
||||||
|
@ -476,6 +478,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
|
||||||
|
|
||||||
// Peerings
|
// Peerings
|
||||||
require.NoError(t, fsm.state.PeeringWrite(31, &pbpeering.Peering{
|
require.NoError(t, fsm.state.PeeringWrite(31, &pbpeering.Peering{
|
||||||
|
ID: "1fabcd52-1d46-49b0-b1d8-71559aee47f5",
|
||||||
Name: "baz",
|
Name: "baz",
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
@ -591,10 +594,12 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
|
||||||
require.Equal(t, uint64(25), checks[0].ModifyIndex)
|
require.Equal(t, uint64(25), checks[0].ModifyIndex)
|
||||||
|
|
||||||
// Verify virtual IPs are consistent.
|
// Verify virtual IPs are consistent.
|
||||||
vip, err = fsm2.state.VirtualIPForService(structs.NewServiceName("frontend", nil))
|
psn = structs.PeeredServiceName{ServiceName: structs.NewServiceName("frontend", nil)}
|
||||||
|
vip, err = fsm2.state.VirtualIPForService(psn)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, vip, "240.0.0.1")
|
require.Equal(t, vip, "240.0.0.1")
|
||||||
vip, err = fsm2.state.VirtualIPForService(structs.NewServiceName("backend", nil))
|
psn = structs.PeeredServiceName{ServiceName: structs.NewServiceName("backend", nil)}
|
||||||
|
vip, err = fsm2.state.VirtualIPForService(psn)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, vip, "240.0.0.2")
|
require.Equal(t, vip, "240.0.0.2")
|
||||||
|
|
||||||
|
|
|
@ -14,6 +14,7 @@ import (
|
||||||
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
|
@ -1209,6 +1210,102 @@ func registerTestRoutingConfigTopologyEntries(t *testing.T, codec rpc.ClientCode
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func registerLocalAndRemoteServicesVIPEnabled(t *testing.T, state *state.Store) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
_, entry, err := state.SystemMetadataGet(nil, structs.SystemMetadataVirtualIPsEnabled)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, entry)
|
||||||
|
require.Equal(t, "true", entry.Value)
|
||||||
|
|
||||||
|
// Register a local connect-native service
|
||||||
|
require.NoError(t, state.EnsureRegistration(10, &structs.RegisterRequest{
|
||||||
|
Node: "foo",
|
||||||
|
Address: "127.0.0.1",
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
Service: "api",
|
||||||
|
Connect: structs.ServiceConnect{
|
||||||
|
Native: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}))
|
||||||
|
// Should be assigned VIP
|
||||||
|
psn := structs.PeeredServiceName{ServiceName: structs.NewServiceName("api", nil)}
|
||||||
|
vip, err := state.VirtualIPForService(psn)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, "240.0.0.1", vip)
|
||||||
|
|
||||||
|
// Register an imported service and its proxy
|
||||||
|
require.NoError(t, state.EnsureRegistration(11, &structs.RegisterRequest{
|
||||||
|
Node: "bar",
|
||||||
|
SkipNodeUpdate: true,
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
Kind: structs.ServiceKindTypical,
|
||||||
|
Service: "web",
|
||||||
|
ID: "web-1",
|
||||||
|
},
|
||||||
|
PeerName: "peer-a",
|
||||||
|
}))
|
||||||
|
require.NoError(t, state.EnsureRegistration(12, &structs.RegisterRequest{
|
||||||
|
Node: "bar",
|
||||||
|
Address: "127.0.0.2",
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
Kind: structs.ServiceKindConnectProxy,
|
||||||
|
ID: "web-proxy",
|
||||||
|
Service: "web-proxy",
|
||||||
|
Proxy: structs.ConnectProxyConfig{
|
||||||
|
DestinationServiceName: "web",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
PeerName: "peer-a",
|
||||||
|
}))
|
||||||
|
// Should be assigned one VIP for the real service name
|
||||||
|
psn = structs.PeeredServiceName{Peer: "peer-a", ServiceName: structs.NewServiceName("web", nil)}
|
||||||
|
vip, err = state.VirtualIPForService(psn)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, "240.0.0.2", vip)
|
||||||
|
// web-proxy should not have a VIP
|
||||||
|
psn = structs.PeeredServiceName{Peer: "peer-a", ServiceName: structs.NewServiceName("web-proxy", nil)}
|
||||||
|
vip, err = state.VirtualIPForService(psn)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Empty(t, vip)
|
||||||
|
|
||||||
|
// Register an imported service and its proxy from another peer
|
||||||
|
require.NoError(t, state.EnsureRegistration(11, &structs.RegisterRequest{
|
||||||
|
Node: "gir",
|
||||||
|
SkipNodeUpdate: true,
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
Kind: structs.ServiceKindTypical,
|
||||||
|
Service: "web",
|
||||||
|
ID: "web-1",
|
||||||
|
},
|
||||||
|
PeerName: "peer-b",
|
||||||
|
}))
|
||||||
|
require.NoError(t, state.EnsureRegistration(12, &structs.RegisterRequest{
|
||||||
|
Node: "gir",
|
||||||
|
Address: "127.0.0.3",
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
Kind: structs.ServiceKindConnectProxy,
|
||||||
|
ID: "web-proxy",
|
||||||
|
Service: "web-proxy",
|
||||||
|
Proxy: structs.ConnectProxyConfig{
|
||||||
|
DestinationServiceName: "web",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
PeerName: "peer-b",
|
||||||
|
}))
|
||||||
|
// Should be assigned one VIP for the real service name
|
||||||
|
psn = structs.PeeredServiceName{Peer: "peer-b", ServiceName: structs.NewServiceName("web", nil)}
|
||||||
|
vip, err = state.VirtualIPForService(psn)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, "240.0.0.3", vip)
|
||||||
|
// web-proxy should not have a VIP
|
||||||
|
psn = structs.PeeredServiceName{Peer: "peer-b", ServiceName: structs.NewServiceName("web-proxy", nil)}
|
||||||
|
vip, err = state.VirtualIPForService(psn)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Empty(t, vip)
|
||||||
|
}
|
||||||
|
|
||||||
func registerIntentionUpstreamEntries(t *testing.T, codec rpc.ClientCodec, token string) {
|
func registerIntentionUpstreamEntries(t *testing.T, codec rpc.ClientCodec, token string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
|
@ -1307,7 +1404,7 @@ func registerIntentionUpstreamEntries(t *testing.T, codec rpc.ClientCodec, token
|
||||||
}
|
}
|
||||||
registerTestCatalogEntriesMap(t, codec, registrations)
|
registerTestCatalogEntriesMap(t, codec, registrations)
|
||||||
|
|
||||||
// Add intentions: deny all and web -> api
|
// Add intentions: deny all and web -> api and web -> api.example.com
|
||||||
entries := []structs.ConfigEntryRequest{
|
entries := []structs.ConfigEntryRequest{
|
||||||
{
|
{
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
|
@ -1323,6 +1420,20 @@ func registerIntentionUpstreamEntries(t *testing.T, codec rpc.ClientCodec, token
|
||||||
},
|
},
|
||||||
WriteRequest: structs.WriteRequest{Token: token},
|
WriteRequest: structs.WriteRequest{Token: token},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
Entry: &structs.ServiceIntentionsConfigEntry{
|
||||||
|
Kind: structs.ServiceIntentions,
|
||||||
|
Name: "api.example.com",
|
||||||
|
Sources: []*structs.SourceIntention{
|
||||||
|
{
|
||||||
|
Name: "web",
|
||||||
|
Action: structs.IntentionActionAllow,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
WriteRequest: structs.WriteRequest{Token: token},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
Entry: &structs.ServiceIntentionsConfigEntry{
|
Entry: &structs.ServiceIntentionsConfigEntry{
|
||||||
|
@ -1342,4 +1453,36 @@ func registerIntentionUpstreamEntries(t *testing.T, codec rpc.ClientCodec, token
|
||||||
var out bool
|
var out bool
|
||||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &out))
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &out))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add destinations
|
||||||
|
dests := []structs.ConfigEntryRequest{
|
||||||
|
{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
Entry: &structs.ServiceConfigEntry{
|
||||||
|
Kind: structs.ServiceDefaults,
|
||||||
|
Name: "api.example.com",
|
||||||
|
Destination: &structs.DestinationConfig{
|
||||||
|
Address: "api.example.com",
|
||||||
|
Port: 443,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
WriteRequest: structs.WriteRequest{Token: token},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
Entry: &structs.ServiceConfigEntry{
|
||||||
|
Kind: structs.ServiceDefaults,
|
||||||
|
Name: "kafka.store.com",
|
||||||
|
Destination: &structs.DestinationConfig{
|
||||||
|
Address: "172.168.2.1",
|
||||||
|
Port: 9003,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
WriteRequest: structs.WriteRequest{Token: token},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, req := range dests {
|
||||||
|
var out bool
|
||||||
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &out))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,6 +77,10 @@ func (s *Intention) Apply(args *structs.IntentionRequest, reply *string) error {
|
||||||
return ErrConnectNotEnabled
|
return ErrConnectNotEnabled
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if args.Intention != nil && args.Intention.SourcePeer != "" {
|
||||||
|
return fmt.Errorf("SourcePeer field is not supported on this endpoint. Use config entries instead")
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure that all service-intentions config entry writes go to the primary
|
// Ensure that all service-intentions config entry writes go to the primary
|
||||||
// datacenter. These will then be replicated to all the other datacenters.
|
// datacenter. These will then be replicated to all the other datacenters.
|
||||||
args.Datacenter = s.srv.config.PrimaryDatacenter
|
args.Datacenter = s.srv.config.PrimaryDatacenter
|
||||||
|
@ -432,7 +436,7 @@ func (s *Intention) Get(args *structs.IntentionQueryRequest, reply *structs.Inde
|
||||||
}
|
}
|
||||||
|
|
||||||
if args.Exact != nil {
|
if args.Exact != nil {
|
||||||
// // Finish defaulting the namespace fields.
|
// Finish defaulting the namespace fields.
|
||||||
if args.Exact.SourceNS == "" {
|
if args.Exact.SourceNS == "" {
|
||||||
args.Exact.SourceNS = entMeta.NamespaceOrDefault()
|
args.Exact.SourceNS = entMeta.NamespaceOrDefault()
|
||||||
}
|
}
|
||||||
|
|
|
@ -273,6 +273,41 @@ func TestIntentionApply_updateGood(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestIntentionApply_NoSourcePeer makes sure that no intention is created with a SourcePeer since this is not supported
|
||||||
|
func TestIntentionApply_NoSourcePeer(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
_, s1 := testServer(t)
|
||||||
|
codec := rpcClient(t, s1)
|
||||||
|
|
||||||
|
waitForLeaderEstablishment(t, s1)
|
||||||
|
|
||||||
|
// Setup a basic record to create
|
||||||
|
ixn := structs.IntentionRequest{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
Op: structs.IntentionOpCreate,
|
||||||
|
Intention: &structs.Intention{
|
||||||
|
SourceNS: structs.IntentionDefaultNamespace,
|
||||||
|
SourceName: "test",
|
||||||
|
SourcePeer: "peer1",
|
||||||
|
DestinationNS: structs.IntentionDefaultNamespace,
|
||||||
|
DestinationName: "test",
|
||||||
|
Action: structs.IntentionActionAllow,
|
||||||
|
SourceType: structs.IntentionSourceConsul,
|
||||||
|
Meta: map[string]string{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
var reply string
|
||||||
|
err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err, "SourcePeer field is not supported on this endpoint. Use config entries instead")
|
||||||
|
require.Empty(t, reply)
|
||||||
|
}
|
||||||
|
|
||||||
// Shouldn't be able to update a non-existent intention
|
// Shouldn't be able to update a non-existent intention
|
||||||
func TestIntentionApply_updateNonExist(t *testing.T) {
|
func TestIntentionApply_updateNonExist(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
|
|
|
@ -69,18 +69,60 @@ func (m *Internal) NodeDump(args *structs.DCSpecificRequest,
|
||||||
&args.QueryOptions,
|
&args.QueryOptions,
|
||||||
&reply.QueryMeta,
|
&reply.QueryMeta,
|
||||||
func(ws memdb.WatchSet, state *state.Store) error {
|
func(ws memdb.WatchSet, state *state.Store) error {
|
||||||
index, dump, err := state.NodeDump(ws, &args.EnterpriseMeta, args.PeerName)
|
// we don't support calling this endpoint for a specific peer
|
||||||
if err != nil {
|
if args.PeerName != "" {
|
||||||
return err
|
return fmt.Errorf("this endpoint does not support specifying a peer: %q", args.PeerName)
|
||||||
}
|
}
|
||||||
reply.Index, reply.Dump = index, dump
|
|
||||||
|
// this maxIndex will be the max of the NodeDump calls and the PeeringList call
|
||||||
|
var maxIndex uint64
|
||||||
|
// Get data for local nodes
|
||||||
|
index, dump, err := state.NodeDump(ws, &args.EnterpriseMeta, structs.DefaultPeerKeyword)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not get a node dump for local nodes: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if index > maxIndex {
|
||||||
|
maxIndex = index
|
||||||
|
}
|
||||||
|
reply.Dump = dump
|
||||||
|
|
||||||
|
// get a list of all peerings
|
||||||
|
index, listedPeerings, err := state.PeeringList(ws, args.EnterpriseMeta)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not list peers for node dump %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if index > maxIndex {
|
||||||
|
maxIndex = index
|
||||||
|
}
|
||||||
|
|
||||||
|
// get node dumps for all peerings
|
||||||
|
for _, p := range listedPeerings {
|
||||||
|
index, importedDump, err := state.NodeDump(ws, &args.EnterpriseMeta, p.Name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not get a node dump for peer %q: %w", p.Name, err)
|
||||||
|
}
|
||||||
|
reply.ImportedDump = append(reply.ImportedDump, importedDump...)
|
||||||
|
|
||||||
|
if index > maxIndex {
|
||||||
|
maxIndex = index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reply.Index = maxIndex
|
||||||
|
|
||||||
raw, err := filter.Execute(reply.Dump)
|
raw, err := filter.Execute(reply.Dump)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("could not filter local node dump: %w", err)
|
||||||
}
|
}
|
||||||
reply.Dump = raw.(structs.NodeDump)
|
reply.Dump = raw.(structs.NodeDump)
|
||||||
|
|
||||||
|
importedRaw, err := filter.Execute(reply.ImportedDump)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not filter peer node dump: %w", err)
|
||||||
|
}
|
||||||
|
reply.ImportedDump = importedRaw.(structs.NodeDump)
|
||||||
|
|
||||||
// Note: we filter the results with ACLs *after* applying the user-supplied
|
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||||
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
|
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
|
||||||
// results that would be filtered out even if the user did have permission.
|
// results that would be filtered out even if the user did have permission.
|
||||||
|
@ -111,13 +153,47 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
|
||||||
&args.QueryOptions,
|
&args.QueryOptions,
|
||||||
&reply.QueryMeta,
|
&reply.QueryMeta,
|
||||||
func(ws memdb.WatchSet, state *state.Store) error {
|
func(ws memdb.WatchSet, state *state.Store) error {
|
||||||
// Get, store, and filter nodes
|
// we don't support calling this endpoint for a specific peer
|
||||||
maxIdx, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, args.PeerName)
|
if args.PeerName != "" {
|
||||||
|
return fmt.Errorf("this endpoint does not support specifying a peer: %q", args.PeerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// this maxIndex will be the max of the ServiceDump calls and the PeeringList call
|
||||||
|
var maxIndex uint64
|
||||||
|
|
||||||
|
// get a local dump for services
|
||||||
|
index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, structs.DefaultPeerKeyword)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("could not get a service dump for local nodes: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if index > maxIndex {
|
||||||
|
maxIndex = index
|
||||||
}
|
}
|
||||||
reply.Nodes = nodes
|
reply.Nodes = nodes
|
||||||
|
|
||||||
|
// get a list of all peerings
|
||||||
|
index, listedPeerings, err := state.PeeringList(ws, args.EnterpriseMeta)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not list peers for service dump %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if index > maxIndex {
|
||||||
|
maxIndex = index
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range listedPeerings {
|
||||||
|
index, importedNodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, p.Name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not get a service dump for peer %q: %w", p.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if index > maxIndex {
|
||||||
|
maxIndex = index
|
||||||
|
}
|
||||||
|
reply.ImportedNodes = append(reply.ImportedNodes, importedNodes...)
|
||||||
|
}
|
||||||
|
|
||||||
// Get, store, and filter gateway services
|
// Get, store, and filter gateway services
|
||||||
idx, gatewayServices, err := state.DumpGatewayServices(ws)
|
idx, gatewayServices, err := state.DumpGatewayServices(ws)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -125,17 +201,23 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
|
||||||
}
|
}
|
||||||
reply.Gateways = gatewayServices
|
reply.Gateways = gatewayServices
|
||||||
|
|
||||||
if idx > maxIdx {
|
if idx > maxIndex {
|
||||||
maxIdx = idx
|
maxIndex = idx
|
||||||
}
|
}
|
||||||
reply.Index = maxIdx
|
reply.Index = maxIndex
|
||||||
|
|
||||||
raw, err := filter.Execute(reply.Nodes)
|
raw, err := filter.Execute(reply.Nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("could not filter local service dump: %w", err)
|
||||||
}
|
}
|
||||||
reply.Nodes = raw.(structs.CheckServiceNodes)
|
reply.Nodes = raw.(structs.CheckServiceNodes)
|
||||||
|
|
||||||
|
importedRaw, err := filter.Execute(reply.ImportedNodes)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not filter peer service dump: %w", err)
|
||||||
|
}
|
||||||
|
reply.ImportedNodes = importedRaw.(structs.CheckServiceNodes)
|
||||||
|
|
||||||
// Note: we filter the results with ACLs *after* applying the user-supplied
|
// Note: we filter the results with ACLs *after* applying the user-supplied
|
||||||
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
|
// bexpr filter, to ensure QueryMeta.ResultsFilteredByACLs does not include
|
||||||
// results that would be filtered out even if the user did have permission.
|
// results that would be filtered out even if the user did have permission.
|
||||||
|
@ -210,7 +292,7 @@ func (m *Internal) ServiceTopology(args *structs.ServiceSpecificRequest, reply *
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// IntentionUpstreams returns the upstreams of a service. Upstreams are inferred from intentions.
|
// IntentionUpstreams returns a service's upstreams which are inferred from intentions.
|
||||||
// If intentions allow a connection from the target to some candidate service, the candidate service is considered
|
// If intentions allow a connection from the target to some candidate service, the candidate service is considered
|
||||||
// an upstream of the target.
|
// an upstream of the target.
|
||||||
func (m *Internal) IntentionUpstreams(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceList) error {
|
func (m *Internal) IntentionUpstreams(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceList) error {
|
||||||
|
@ -224,6 +306,27 @@ func (m *Internal) IntentionUpstreams(args *structs.ServiceSpecificRequest, repl
|
||||||
if done, err := m.srv.ForwardRPC("Internal.IntentionUpstreams", args, reply); done {
|
if done, err := m.srv.ForwardRPC("Internal.IntentionUpstreams", args, reply); done {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
return m.internalUpstreams(args, reply, structs.IntentionTargetService)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntentionUpstreamsDestination returns a service's upstreams which are inferred from intentions.
|
||||||
|
// If intentions allow a connection from the target to some candidate destination, the candidate destination is considered
|
||||||
|
// an upstream of the target. This performs the same logic as IntentionUpstreams endpoint but for destination upstreams only.
|
||||||
|
func (m *Internal) IntentionUpstreamsDestination(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceList) error {
|
||||||
|
// Exit early if Connect hasn't been enabled.
|
||||||
|
if !m.srv.config.ConnectEnabled {
|
||||||
|
return ErrConnectNotEnabled
|
||||||
|
}
|
||||||
|
if args.ServiceName == "" {
|
||||||
|
return fmt.Errorf("Must provide a service name")
|
||||||
|
}
|
||||||
|
if done, err := m.srv.ForwardRPC("Internal.IntentionUpstreamsDestination", args, reply); done {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return m.internalUpstreams(args, reply, structs.IntentionTargetDestination)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Internal) internalUpstreams(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceList, intentionTarget structs.IntentionTargetType) error {
|
||||||
|
|
||||||
authz, err := m.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, nil)
|
authz, err := m.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -244,7 +347,7 @@ func (m *Internal) IntentionUpstreams(args *structs.ServiceSpecificRequest, repl
|
||||||
defaultDecision := authz.IntentionDefaultAllow(nil)
|
defaultDecision := authz.IntentionDefaultAllow(nil)
|
||||||
|
|
||||||
sn := structs.NewServiceName(args.ServiceName, &args.EnterpriseMeta)
|
sn := structs.NewServiceName(args.ServiceName, &args.EnterpriseMeta)
|
||||||
index, services, err := state.IntentionTopology(ws, sn, false, defaultDecision)
|
index, services, err := state.IntentionTopology(ws, sn, false, defaultDecision, intentionTarget)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -272,7 +375,7 @@ func (m *Internal) IntentionUpstreams(args *structs.ServiceSpecificRequest, repl
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// GatewayServiceNodes returns all the nodes for services associated with a gateway along with their gateway config
|
// GatewayServiceDump returns all the nodes for services associated with a gateway along with their gateway config
|
||||||
func (m *Internal) GatewayServiceDump(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceDump) error {
|
func (m *Internal) GatewayServiceDump(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceDump) error {
|
||||||
if done, err := m.srv.ForwardRPC("Internal.GatewayServiceDump", args, reply); done {
|
if done, err := m.srv.ForwardRPC("Internal.GatewayServiceDump", args, reply); done {
|
||||||
return err
|
return err
|
||||||
|
@ -350,7 +453,7 @@ func (m *Internal) GatewayServiceDump(args *structs.ServiceSpecificRequest, repl
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match returns the set of intentions that match the given source/destination.
|
// GatewayIntentions Match returns the set of intentions that match the given source/destination.
|
||||||
func (m *Internal) GatewayIntentions(args *structs.IntentionQueryRequest, reply *structs.IndexedIntentions) error {
|
func (m *Internal) GatewayIntentions(args *structs.IntentionQueryRequest, reply *structs.IndexedIntentions) error {
|
||||||
// Forward if necessary
|
// Forward if necessary
|
||||||
if done, err := m.srv.ForwardRPC("Internal.GatewayIntentions", args, reply); done {
|
if done, err := m.srv.ForwardRPC("Internal.GatewayIntentions", args, reply); done {
|
||||||
|
@ -468,6 +571,49 @@ func (m *Internal) ExportedPeeredServices(args *structs.DCSpecificRequest, reply
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PeeredUpstreams returns all imported services as upstreams for any service in a given partition.
|
||||||
|
// Cluster peering does not replicate intentions so all imported services are considered potential upstreams.
|
||||||
|
func (m *Internal) PeeredUpstreams(args *structs.PartitionSpecificRequest, reply *structs.IndexedPeeredServiceList) error {
|
||||||
|
// Exit early if Connect hasn't been enabled.
|
||||||
|
if !m.srv.config.ConnectEnabled {
|
||||||
|
return ErrConnectNotEnabled
|
||||||
|
}
|
||||||
|
if done, err := m.srv.ForwardRPC("Internal.PeeredUpstreams", args, reply); done {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(peering): ACL for filtering
|
||||||
|
// authz, err := m.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, nil)
|
||||||
|
// if err != nil {
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
|
||||||
|
if err := m.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.srv.blockingQuery(
|
||||||
|
&args.QueryOptions,
|
||||||
|
&reply.QueryMeta,
|
||||||
|
func(ws memdb.WatchSet, state *state.Store) error {
|
||||||
|
index, vips, err := state.VirtualIPsForAllImportedServices(ws, args.EnterpriseMeta)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make([]structs.PeeredServiceName, 0, len(vips))
|
||||||
|
for _, vip := range vips {
|
||||||
|
result = append(result, vip.Service)
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.Index, reply.Services = index, result
|
||||||
|
|
||||||
|
// TODO(peering): low priority: consider ACL filtering
|
||||||
|
// m.srv.filterACLWithAuthorizer(authz, reply)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// EventFire is a bit of an odd endpoint, but it allows for a cross-DC RPC
|
// EventFire is a bit of an odd endpoint, but it allows for a cross-DC RPC
|
||||||
// call to fire an event. The primary use case is to enable user events being
|
// call to fire an event. The primary use case is to enable user events being
|
||||||
// triggered in a remote DC.
|
// triggered in a remote DC.
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
@ -17,6 +18,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/lib/stringslice"
|
"github.com/hashicorp/consul/lib/stringslice"
|
||||||
|
"github.com/hashicorp/consul/proto/pbpeering"
|
||||||
"github.com/hashicorp/consul/sdk/testutil"
|
"github.com/hashicorp/consul/sdk/testutil"
|
||||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
"github.com/hashicorp/consul/testrpc"
|
"github.com/hashicorp/consul/testrpc"
|
||||||
|
@ -29,56 +31,79 @@ func TestInternal_NodeInfo(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
dir1, s1 := testServer(t)
|
_, s1 := testServer(t)
|
||||||
defer os.RemoveAll(dir1)
|
|
||||||
defer s1.Shutdown()
|
|
||||||
codec := rpcClient(t, s1)
|
codec := rpcClient(t, s1)
|
||||||
defer codec.Close()
|
|
||||||
|
|
||||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||||
|
|
||||||
arg := structs.RegisterRequest{
|
args := []*structs.RegisterRequest{
|
||||||
Datacenter: "dc1",
|
{
|
||||||
Node: "foo",
|
Datacenter: "dc1",
|
||||||
Address: "127.0.0.1",
|
Node: "foo",
|
||||||
Service: &structs.NodeService{
|
Address: "127.0.0.1",
|
||||||
ID: "db",
|
Service: &structs.NodeService{
|
||||||
Service: "db",
|
ID: "db",
|
||||||
Tags: []string{"primary"},
|
Service: "db",
|
||||||
|
Tags: []string{"primary"},
|
||||||
|
},
|
||||||
|
Check: &structs.HealthCheck{
|
||||||
|
Name: "db connect",
|
||||||
|
Status: api.HealthPassing,
|
||||||
|
ServiceID: "db",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Check: &structs.HealthCheck{
|
{
|
||||||
Name: "db connect",
|
Datacenter: "dc1",
|
||||||
Status: api.HealthPassing,
|
Node: "foo",
|
||||||
ServiceID: "db",
|
Address: "127.0.0.3",
|
||||||
|
PeerName: "peer1",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
var out struct{}
|
|
||||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
for _, reg := range args {
|
||||||
t.Fatalf("err: %v", err)
|
err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", reg, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var out2 structs.IndexedNodeDump
|
t.Run("get local node", func(t *testing.T) {
|
||||||
req := structs.NodeSpecificRequest{
|
var out structs.IndexedNodeDump
|
||||||
Datacenter: "dc1",
|
req := structs.NodeSpecificRequest{
|
||||||
Node: "foo",
|
Datacenter: "dc1",
|
||||||
}
|
Node: "foo",
|
||||||
if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeInfo", &req, &out2); err != nil {
|
}
|
||||||
t.Fatalf("err: %v", err)
|
if err := msgpackrpc.CallWithCodec(codec, "Internal.NodeInfo", &req, &out); err != nil {
|
||||||
}
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
nodes := out2.Dump
|
nodes := out.Dump
|
||||||
if len(nodes) != 1 {
|
if len(nodes) != 1 {
|
||||||
t.Fatalf("Bad: %v", nodes)
|
t.Fatalf("Bad: %v", nodes)
|
||||||
}
|
}
|
||||||
if nodes[0].Node != "foo" {
|
if nodes[0].Node != "foo" {
|
||||||
t.Fatalf("Bad: %v", nodes[0])
|
t.Fatalf("Bad: %v", nodes[0])
|
||||||
}
|
}
|
||||||
if !stringslice.Contains(nodes[0].Services[0].Tags, "primary") {
|
if !stringslice.Contains(nodes[0].Services[0].Tags, "primary") {
|
||||||
t.Fatalf("Bad: %v", nodes[0])
|
t.Fatalf("Bad: %v", nodes[0])
|
||||||
}
|
}
|
||||||
if nodes[0].Checks[0].Status != api.HealthPassing {
|
if nodes[0].Checks[0].Status != api.HealthPassing {
|
||||||
t.Fatalf("Bad: %v", nodes[0])
|
t.Fatalf("Bad: %v", nodes[0])
|
||||||
}
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("get peered node", func(t *testing.T) {
|
||||||
|
var out structs.IndexedNodeDump
|
||||||
|
req := structs.NodeSpecificRequest{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
Node: "foo",
|
||||||
|
PeerName: "peer1",
|
||||||
|
}
|
||||||
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeInfo", &req, &out))
|
||||||
|
|
||||||
|
nodes := out.Dump
|
||||||
|
require.Equal(t, 1, len(nodes))
|
||||||
|
require.Equal(t, "foo", nodes[0].Node)
|
||||||
|
require.Equal(t, "peer1", nodes[0].PeerName)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternal_NodeDump(t *testing.T) {
|
func TestInternal_NodeDump(t *testing.T) {
|
||||||
|
@ -87,53 +112,61 @@ func TestInternal_NodeDump(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
dir1, s1 := testServer(t)
|
_, s1 := testServer(t)
|
||||||
defer os.RemoveAll(dir1)
|
|
||||||
defer s1.Shutdown()
|
|
||||||
codec := rpcClient(t, s1)
|
codec := rpcClient(t, s1)
|
||||||
defer codec.Close()
|
|
||||||
|
|
||||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||||
|
|
||||||
arg := structs.RegisterRequest{
|
args := []*structs.RegisterRequest{
|
||||||
Datacenter: "dc1",
|
{
|
||||||
Node: "foo",
|
Datacenter: "dc1",
|
||||||
Address: "127.0.0.1",
|
Node: "foo",
|
||||||
Service: &structs.NodeService{
|
Address: "127.0.0.1",
|
||||||
ID: "db",
|
Service: &structs.NodeService{
|
||||||
Service: "db",
|
ID: "db",
|
||||||
Tags: []string{"primary"},
|
Service: "db",
|
||||||
|
Tags: []string{"primary"},
|
||||||
|
},
|
||||||
|
Check: &structs.HealthCheck{
|
||||||
|
Name: "db connect",
|
||||||
|
Status: api.HealthPassing,
|
||||||
|
ServiceID: "db",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Check: &structs.HealthCheck{
|
{
|
||||||
Name: "db connect",
|
Datacenter: "dc1",
|
||||||
Status: api.HealthPassing,
|
Node: "bar",
|
||||||
ServiceID: "db",
|
Address: "127.0.0.2",
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
ID: "db",
|
||||||
|
Service: "db",
|
||||||
|
Tags: []string{"replica"},
|
||||||
|
},
|
||||||
|
Check: &structs.HealthCheck{
|
||||||
|
Name: "db connect",
|
||||||
|
Status: api.HealthWarning,
|
||||||
|
ServiceID: "db",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
Node: "foo-peer",
|
||||||
|
Address: "127.0.0.3",
|
||||||
|
PeerName: "peer1",
|
||||||
},
|
},
|
||||||
}
|
|
||||||
var out struct{}
|
|
||||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
|
||||||
t.Fatalf("err: %v", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
arg = structs.RegisterRequest{
|
for _, reg := range args {
|
||||||
Datacenter: "dc1",
|
err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", reg, nil)
|
||||||
Node: "bar",
|
require.NoError(t, err)
|
||||||
Address: "127.0.0.2",
|
|
||||||
Service: &structs.NodeService{
|
|
||||||
ID: "db",
|
|
||||||
Service: "db",
|
|
||||||
Tags: []string{"replica"},
|
|
||||||
},
|
|
||||||
Check: &structs.HealthCheck{
|
|
||||||
Name: "db connect",
|
|
||||||
Status: api.HealthWarning,
|
|
||||||
ServiceID: "db",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out); err != nil {
|
|
||||||
t.Fatalf("err: %v", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err := s1.fsm.State().PeeringWrite(1, &pbpeering.Peering{
|
||||||
|
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
|
||||||
|
Name: "peer1",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
var out2 structs.IndexedNodeDump
|
var out2 structs.IndexedNodeDump
|
||||||
req := structs.DCSpecificRequest{
|
req := structs.DCSpecificRequest{
|
||||||
Datacenter: "dc1",
|
Datacenter: "dc1",
|
||||||
|
@ -175,6 +208,10 @@ func TestInternal_NodeDump(t *testing.T) {
|
||||||
if !foundFoo || !foundBar {
|
if !foundFoo || !foundBar {
|
||||||
t.Fatalf("missing foo or bar")
|
t.Fatalf("missing foo or bar")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
require.Len(t, out2.ImportedDump, 1)
|
||||||
|
require.Equal(t, "peer1", out2.ImportedDump[0].PeerName)
|
||||||
|
require.Equal(t, "foo-peer", out2.ImportedDump[0].Node)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternal_NodeDump_Filter(t *testing.T) {
|
func TestInternal_NodeDump_Filter(t *testing.T) {
|
||||||
|
@ -183,60 +220,107 @@ func TestInternal_NodeDump_Filter(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
dir1, s1 := testServer(t)
|
_, s1 := testServer(t)
|
||||||
defer os.RemoveAll(dir1)
|
|
||||||
defer s1.Shutdown()
|
|
||||||
codec := rpcClient(t, s1)
|
codec := rpcClient(t, s1)
|
||||||
defer codec.Close()
|
|
||||||
|
|
||||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||||
|
|
||||||
arg := structs.RegisterRequest{
|
args := []*structs.RegisterRequest{
|
||||||
Datacenter: "dc1",
|
{
|
||||||
Node: "foo",
|
Datacenter: "dc1",
|
||||||
Address: "127.0.0.1",
|
Node: "foo",
|
||||||
Service: &structs.NodeService{
|
Address: "127.0.0.1",
|
||||||
ID: "db",
|
Service: &structs.NodeService{
|
||||||
Service: "db",
|
ID: "db",
|
||||||
Tags: []string{"primary"},
|
Service: "db",
|
||||||
|
Tags: []string{"primary"},
|
||||||
|
},
|
||||||
|
Check: &structs.HealthCheck{
|
||||||
|
Name: "db connect",
|
||||||
|
Status: api.HealthPassing,
|
||||||
|
ServiceID: "db",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Check: &structs.HealthCheck{
|
{
|
||||||
Name: "db connect",
|
Datacenter: "dc1",
|
||||||
Status: api.HealthPassing,
|
Node: "bar",
|
||||||
ServiceID: "db",
|
Address: "127.0.0.2",
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
ID: "db",
|
||||||
|
Service: "db",
|
||||||
|
Tags: []string{"replica"},
|
||||||
|
},
|
||||||
|
Check: &structs.HealthCheck{
|
||||||
|
Name: "db connect",
|
||||||
|
Status: api.HealthWarning,
|
||||||
|
ServiceID: "db",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
{
|
||||||
var out struct{}
|
Datacenter: "dc1",
|
||||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
Node: "foo-peer",
|
||||||
|
Address: "127.0.0.3",
|
||||||
arg = structs.RegisterRequest{
|
PeerName: "peer1",
|
||||||
Datacenter: "dc1",
|
|
||||||
Node: "bar",
|
|
||||||
Address: "127.0.0.2",
|
|
||||||
Service: &structs.NodeService{
|
|
||||||
ID: "db",
|
|
||||||
Service: "db",
|
|
||||||
Tags: []string{"replica"},
|
|
||||||
},
|
|
||||||
Check: &structs.HealthCheck{
|
|
||||||
Name: "db connect",
|
|
||||||
Status: api.HealthWarning,
|
|
||||||
ServiceID: "db",
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
|
for _, reg := range args {
|
||||||
|
err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", reg, nil)
|
||||||
var out2 structs.IndexedNodeDump
|
require.NoError(t, err)
|
||||||
req := structs.DCSpecificRequest{
|
|
||||||
Datacenter: "dc1",
|
|
||||||
QueryOptions: structs.QueryOptions{Filter: "primary in Services.Tags"},
|
|
||||||
}
|
}
|
||||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req, &out2))
|
|
||||||
|
|
||||||
nodes := out2.Dump
|
err := s1.fsm.State().PeeringWrite(1, &pbpeering.Peering{
|
||||||
require.Len(t, nodes, 1)
|
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
|
||||||
require.Equal(t, "foo", nodes[0].Node)
|
Name: "peer1",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Run("filter on the local node", func(t *testing.T) {
|
||||||
|
var out2 structs.IndexedNodeDump
|
||||||
|
req := structs.DCSpecificRequest{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
QueryOptions: structs.QueryOptions{Filter: "primary in Services.Tags"},
|
||||||
|
}
|
||||||
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req, &out2))
|
||||||
|
|
||||||
|
nodes := out2.Dump
|
||||||
|
require.Len(t, nodes, 1)
|
||||||
|
require.Equal(t, "foo", nodes[0].Node)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("filter on imported dump", func(t *testing.T) {
|
||||||
|
var out3 structs.IndexedNodeDump
|
||||||
|
req2 := structs.DCSpecificRequest{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
QueryOptions: structs.QueryOptions{Filter: "friend in PeerName"},
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req2, &out3))
|
||||||
|
require.Len(t, out3.Dump, 0)
|
||||||
|
require.Len(t, out3.ImportedDump, 0)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("filter look for peer nodes (non local nodes)", func(t *testing.T) {
|
||||||
|
var out3 structs.IndexedNodeDump
|
||||||
|
req2 := structs.DCSpecificRequest{
|
||||||
|
QueryOptions: structs.QueryOptions{Filter: "PeerName != \"\""},
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req2, &out3))
|
||||||
|
require.Len(t, out3.Dump, 0)
|
||||||
|
require.Len(t, out3.ImportedDump, 1)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("filter look for a specific peer", func(t *testing.T) {
|
||||||
|
var out3 structs.IndexedNodeDump
|
||||||
|
req2 := structs.DCSpecificRequest{
|
||||||
|
QueryOptions: structs.QueryOptions{Filter: "PeerName == peer1"},
|
||||||
|
}
|
||||||
|
|
||||||
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.NodeDump", &req2, &out3))
|
||||||
|
require.Len(t, out3.Dump, 0)
|
||||||
|
require.Len(t, out3.ImportedDump, 1)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternal_KeyringOperation(t *testing.T) {
|
func TestInternal_KeyringOperation(t *testing.T) {
|
||||||
|
@ -1665,6 +1749,89 @@ func TestInternal_GatewayServiceDump_Ingress_ACL(t *testing.T) {
|
||||||
require.Equal(t, nodes[0].Checks[0].Status, api.HealthWarning)
|
require.Equal(t, nodes[0].Checks[0].Status, api.HealthWarning)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestInternal_ServiceDump_Peering(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
_, s1 := testServer(t)
|
||||||
|
codec := rpcClient(t, s1)
|
||||||
|
|
||||||
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||||
|
|
||||||
|
// prep the cluster with some data we can use in our filters
|
||||||
|
registerTestCatalogEntries(t, codec)
|
||||||
|
|
||||||
|
doRequest := func(t *testing.T, filter string) structs.IndexedNodesWithGateways {
|
||||||
|
t.Helper()
|
||||||
|
args := structs.DCSpecificRequest{
|
||||||
|
QueryOptions: structs.QueryOptions{Filter: filter},
|
||||||
|
}
|
||||||
|
|
||||||
|
var out structs.IndexedNodesWithGateways
|
||||||
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.ServiceDump", &args, &out))
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("No peerings", func(t *testing.T) {
|
||||||
|
nodes := doRequest(t, "")
|
||||||
|
// redis (3), web (3), critical (1), warning (1) and consul (1)
|
||||||
|
require.Len(t, nodes.Nodes, 9)
|
||||||
|
require.Len(t, nodes.ImportedNodes, 0)
|
||||||
|
})
|
||||||
|
|
||||||
|
addPeerService(t, codec)
|
||||||
|
|
||||||
|
err := s1.fsm.State().PeeringWrite(1, &pbpeering.Peering{
|
||||||
|
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
|
||||||
|
Name: "peer1",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Run("peerings", func(t *testing.T) {
|
||||||
|
nodes := doRequest(t, "")
|
||||||
|
// redis (3), web (3), critical (1), warning (1) and consul (1)
|
||||||
|
require.Len(t, nodes.Nodes, 9)
|
||||||
|
// service (1)
|
||||||
|
require.Len(t, nodes.ImportedNodes, 1)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("peerings w filter", func(t *testing.T) {
|
||||||
|
nodes := doRequest(t, "Node.PeerName == foo")
|
||||||
|
require.Len(t, nodes.Nodes, 0)
|
||||||
|
require.Len(t, nodes.ImportedNodes, 0)
|
||||||
|
|
||||||
|
nodes2 := doRequest(t, "Node.PeerName == peer1")
|
||||||
|
require.Len(t, nodes2.Nodes, 0)
|
||||||
|
require.Len(t, nodes2.ImportedNodes, 1)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func addPeerService(t *testing.T, codec rpc.ClientCodec) {
|
||||||
|
// prep the cluster with some data we can use in our filters
|
||||||
|
registrations := map[string]*structs.RegisterRequest{
|
||||||
|
"Peer node foo with peer service": {
|
||||||
|
Datacenter: "dc1",
|
||||||
|
Node: "foo",
|
||||||
|
ID: types.NodeID("e0155642-135d-4739-9853-a1ee6c9f945b"),
|
||||||
|
Address: "127.0.0.2",
|
||||||
|
PeerName: "peer1",
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
Kind: structs.ServiceKindTypical,
|
||||||
|
ID: "serviceID",
|
||||||
|
Service: "service",
|
||||||
|
Port: 1235,
|
||||||
|
Address: "198.18.1.2",
|
||||||
|
PeerName: "peer1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
registerTestCatalogEntriesMap(t, codec, registrations)
|
||||||
|
}
|
||||||
|
|
||||||
func TestInternal_GatewayIntentions(t *testing.T) {
|
func TestInternal_GatewayIntentions(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("too slow for testing.Short")
|
t.Skip("too slow for testing.Short")
|
||||||
|
@ -2323,6 +2490,50 @@ func TestInternal_IntentionUpstreams(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestInternal_IntentionUpstreamsDestination(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
dir1, s1 := testServer(t)
|
||||||
|
defer os.RemoveAll(dir1)
|
||||||
|
defer s1.Shutdown()
|
||||||
|
|
||||||
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||||
|
|
||||||
|
codec := rpcClient(t, s1)
|
||||||
|
defer codec.Close()
|
||||||
|
|
||||||
|
// Services:
|
||||||
|
// api and api-proxy on node foo
|
||||||
|
// web and web-proxy on node foo
|
||||||
|
//
|
||||||
|
// Intentions
|
||||||
|
// * -> * (deny) intention
|
||||||
|
// web -> api (allow)
|
||||||
|
registerIntentionUpstreamEntries(t, codec, "")
|
||||||
|
|
||||||
|
t.Run("api.example.com", func(t *testing.T) {
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
args := structs.ServiceSpecificRequest{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
ServiceName: "web",
|
||||||
|
}
|
||||||
|
var out structs.IndexedServiceList
|
||||||
|
require.NoError(r, msgpackrpc.CallWithCodec(codec, "Internal.IntentionUpstreamsDestination", &args, &out))
|
||||||
|
|
||||||
|
// foo/api
|
||||||
|
require.Len(r, out.Services, 1)
|
||||||
|
|
||||||
|
expectUp := structs.ServiceList{
|
||||||
|
structs.NewServiceName("api.example.com", structs.DefaultEnterpriseMetaInDefaultPartition()),
|
||||||
|
}
|
||||||
|
require.Equal(r, expectUp, out.Services)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestInternal_IntentionUpstreams_BlockOnNoChange(t *testing.T) {
|
func TestInternal_IntentionUpstreams_BlockOnNoChange(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("too slow for testing.Short")
|
t.Skip("too slow for testing.Short")
|
||||||
|
@ -2565,3 +2776,38 @@ func TestInternal_CatalogOverview_ACLDeny(t *testing.T) {
|
||||||
arg.Token = opReadToken.SecretID
|
arg.Token = opReadToken.SecretID
|
||||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.CatalogOverview", &arg, &out))
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.CatalogOverview", &arg, &out))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestInternal_PeeredUpstreams(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Parallel()
|
||||||
|
_, s1 := testServerWithConfig(t)
|
||||||
|
|
||||||
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||||
|
|
||||||
|
// Services
|
||||||
|
// api local
|
||||||
|
// web peer: peer-a
|
||||||
|
// web-proxy peer: peer-a
|
||||||
|
// web peer: peer-b
|
||||||
|
// web-proxy peer: peer-b
|
||||||
|
registerLocalAndRemoteServicesVIPEnabled(t, s1.fsm.State())
|
||||||
|
|
||||||
|
codec := rpcClient(t, s1)
|
||||||
|
|
||||||
|
args := structs.PartitionSpecificRequest{
|
||||||
|
Datacenter: "dc1",
|
||||||
|
EnterpriseMeta: *acl.DefaultEnterpriseMeta(),
|
||||||
|
}
|
||||||
|
var out structs.IndexedPeeredServiceList
|
||||||
|
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.PeeredUpstreams", &args, &out))
|
||||||
|
|
||||||
|
require.Len(t, out.Services, 2)
|
||||||
|
expect := []structs.PeeredServiceName{
|
||||||
|
{Peer: "peer-a", ServiceName: structs.NewServiceName("web", structs.DefaultEnterpriseMetaInDefaultPartition())},
|
||||||
|
{Peer: "peer-b", ServiceName: structs.NewServiceName("web", structs.DefaultEnterpriseMetaInDefaultPartition())},
|
||||||
|
}
|
||||||
|
require.Equal(t, expect, out.Services)
|
||||||
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/acl/resolver"
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
|
@ -32,7 +33,7 @@ type KVS struct {
|
||||||
// preApply does all the verification of a KVS update that is performed BEFORE
|
// preApply does all the verification of a KVS update that is performed BEFORE
|
||||||
// we submit as a Raft log entry. This includes enforcing the lock delay which
|
// we submit as a Raft log entry. This includes enforcing the lock delay which
|
||||||
// must only be done on the leader.
|
// must only be done on the leader.
|
||||||
func kvsPreApply(logger hclog.Logger, srv *Server, authz ACLResolveResult, op api.KVOp, dirEnt *structs.DirEntry) (bool, error) {
|
func kvsPreApply(logger hclog.Logger, srv *Server, authz resolver.Result, op api.KVOp, dirEnt *structs.DirEntry) (bool, error) {
|
||||||
// Verify the entry.
|
// Verify the entry.
|
||||||
if dirEnt.Key == "" && op != api.KVDeleteTree {
|
if dirEnt.Key == "" && op != api.KVDeleteTree {
|
||||||
return false, fmt.Errorf("Must provide key")
|
return false, fmt.Errorf("Must provide key")
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"github.com/hashicorp/consul/acl"
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/metadata"
|
"github.com/hashicorp/consul/agent/metadata"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/agent/structs/aclfilter"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/lib"
|
"github.com/hashicorp/consul/lib"
|
||||||
"github.com/hashicorp/consul/logging"
|
"github.com/hashicorp/consul/logging"
|
||||||
|
@ -47,6 +48,9 @@ var LeaderSummaries = []prometheus.SummaryDefinition{
|
||||||
const (
|
const (
|
||||||
newLeaderEvent = "consul:new-leader"
|
newLeaderEvent = "consul:new-leader"
|
||||||
barrierWriteTimeout = 2 * time.Minute
|
barrierWriteTimeout = 2 * time.Minute
|
||||||
|
|
||||||
|
defaultDeletionRoundBurst int = 5 // number replication round bursts
|
||||||
|
defaultDeletionApplyRate rate.Limit = 10 // raft applies per second
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -313,6 +317,8 @@ func (s *Server) establishLeadership(ctx context.Context) error {
|
||||||
|
|
||||||
s.startPeeringStreamSync(ctx)
|
s.startPeeringStreamSync(ctx)
|
||||||
|
|
||||||
|
s.startDeferredDeletion(ctx)
|
||||||
|
|
||||||
if err := s.startConnectLeader(ctx); err != nil {
|
if err := s.startConnectLeader(ctx); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -380,7 +386,7 @@ func (s *Server) initializeACLs(ctx context.Context) error {
|
||||||
|
|
||||||
// Remove any token affected by CVE-2019-8336
|
// Remove any token affected by CVE-2019-8336
|
||||||
if !s.InPrimaryDatacenter() {
|
if !s.InPrimaryDatacenter() {
|
||||||
_, token, err := s.fsm.State().ACLTokenGetBySecret(nil, redactedToken, nil)
|
_, token, err := s.fsm.State().ACLTokenGetBySecret(nil, aclfilter.RedactedToken, nil)
|
||||||
if err == nil && token != nil {
|
if err == nil && token != nil {
|
||||||
req := structs.ACLTokenBatchDeleteRequest{
|
req := structs.ACLTokenBatchDeleteRequest{
|
||||||
TokenIDs: []string{token.AccessorID},
|
TokenIDs: []string{token.AccessorID},
|
||||||
|
@ -751,6 +757,16 @@ func (s *Server) stopACLReplication() {
|
||||||
s.leaderRoutineManager.Stop(aclTokenReplicationRoutineName)
|
s.leaderRoutineManager.Stop(aclTokenReplicationRoutineName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Server) startDeferredDeletion(ctx context.Context) {
|
||||||
|
s.startPeeringDeferredDeletion(ctx)
|
||||||
|
s.startTenancyDeferredDeletion(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) stopDeferredDeletion() {
|
||||||
|
s.leaderRoutineManager.Stop(peeringDeletionRoutineName)
|
||||||
|
s.stopTenancyDeferredDeletion()
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Server) startConfigReplication(ctx context.Context) {
|
func (s *Server) startConfigReplication(ctx context.Context) {
|
||||||
if s.config.PrimaryDatacenter == "" || s.config.PrimaryDatacenter == s.config.Datacenter {
|
if s.config.PrimaryDatacenter == "" || s.config.PrimaryDatacenter == s.config.Datacenter {
|
||||||
// replication shouldn't run in the primary DC
|
// replication shouldn't run in the primary DC
|
||||||
|
|
|
@ -1412,6 +1412,20 @@ func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, au
|
||||||
if err := allow.NodeWriteAllowed(v.Agent, &authzContext); err != nil {
|
if err := allow.NodeWriteAllowed(v.Agent, &authzContext); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
case *connect.SpiffeIDMeshGateway:
|
||||||
|
// TODO(peering): figure out what is appropriate here for ACLs
|
||||||
|
v.GetEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||||
|
if err := allow.MeshWriteAllowed(&authzContext); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that the DC in the gateway URI matches us. We might relax this
|
||||||
|
// requirement later but being restrictive for now is safer.
|
||||||
|
dc := c.serverConf.Datacenter
|
||||||
|
if v.Datacenter != dc {
|
||||||
|
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different datacenter: %s, "+
|
||||||
|
"we are %s", v.Datacenter, dc)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service or agent ID")
|
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service or agent ID")
|
||||||
}
|
}
|
||||||
|
@ -1436,18 +1450,25 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
|
||||||
signingID := connect.SpiffeIDSigningForCluster(config.ClusterID)
|
signingID := connect.SpiffeIDSigningForCluster(config.ClusterID)
|
||||||
serviceID, isService := spiffeID.(*connect.SpiffeIDService)
|
serviceID, isService := spiffeID.(*connect.SpiffeIDService)
|
||||||
agentID, isAgent := spiffeID.(*connect.SpiffeIDAgent)
|
agentID, isAgent := spiffeID.(*connect.SpiffeIDAgent)
|
||||||
if !isService && !isAgent {
|
mgwID, isMeshGateway := spiffeID.(*connect.SpiffeIDMeshGateway)
|
||||||
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service or agent ID")
|
|
||||||
}
|
|
||||||
|
|
||||||
var entMeta acl.EnterpriseMeta
|
var entMeta acl.EnterpriseMeta
|
||||||
if isService {
|
switch {
|
||||||
|
case isService:
|
||||||
if !signingID.CanSign(spiffeID) {
|
if !signingID.CanSign(spiffeID) {
|
||||||
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+
|
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+
|
||||||
"we are %s", serviceID.Host, signingID.Host())
|
"we are %s", serviceID.Host, signingID.Host())
|
||||||
}
|
}
|
||||||
entMeta.Merge(serviceID.GetEnterpriseMeta())
|
entMeta.Merge(serviceID.GetEnterpriseMeta())
|
||||||
} else {
|
|
||||||
|
case isMeshGateway:
|
||||||
|
if !signingID.CanSign(spiffeID) {
|
||||||
|
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+
|
||||||
|
"we are %s", mgwID.Host, signingID.Host())
|
||||||
|
}
|
||||||
|
entMeta.Merge(mgwID.GetEnterpriseMeta())
|
||||||
|
|
||||||
|
case isAgent:
|
||||||
// isAgent - if we support more ID types then this would need to be an else if
|
// isAgent - if we support more ID types then this would need to be an else if
|
||||||
// here we are just automatically fixing the trust domain. For auto-encrypt and
|
// here we are just automatically fixing the trust domain. For auto-encrypt and
|
||||||
// auto-config they make certificate requests before learning about the roots
|
// auto-config they make certificate requests before learning about the roots
|
||||||
|
@ -1471,6 +1492,9 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
|
||||||
csr.URIs = uris
|
csr.URIs = uris
|
||||||
}
|
}
|
||||||
entMeta.Merge(agentID.GetEnterpriseMeta())
|
entMeta.Merge(agentID.GetEnterpriseMeta())
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service, agent, or mesh gateway ID")
|
||||||
}
|
}
|
||||||
|
|
||||||
commonCfg, err := config.GetCommonConfig()
|
commonCfg, err := config.GetCommonConfig()
|
||||||
|
@ -1548,12 +1572,19 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
|
||||||
CreateIndex: modIdx,
|
CreateIndex: modIdx,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if isService {
|
|
||||||
|
switch {
|
||||||
|
case isService:
|
||||||
reply.Service = serviceID.Service
|
reply.Service = serviceID.Service
|
||||||
reply.ServiceURI = cert.URIs[0].String()
|
reply.ServiceURI = cert.URIs[0].String()
|
||||||
} else if isAgent {
|
case isMeshGateway:
|
||||||
|
reply.Kind = structs.ServiceKindMeshGateway
|
||||||
|
reply.KindURI = cert.URIs[0].String()
|
||||||
|
case isAgent:
|
||||||
reply.Agent = agentID.Agent
|
reply.Agent = agentID.Agent
|
||||||
reply.AgentURI = cert.URIs[0].String()
|
reply.AgentURI = cert.URIs[0].String()
|
||||||
|
default:
|
||||||
|
return nil, errors.New("not possible")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &reply, nil
|
return &reply, nil
|
||||||
|
|
|
@ -8,16 +8,21 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/rpc/peering"
|
|
||||||
"github.com/hashicorp/go-hclog"
|
"github.com/hashicorp/go-hclog"
|
||||||
"github.com/hashicorp/go-memdb"
|
"github.com/hashicorp/go-memdb"
|
||||||
"github.com/hashicorp/go-multierror"
|
"github.com/hashicorp/go-multierror"
|
||||||
"github.com/hashicorp/go-uuid"
|
"github.com/hashicorp/go-uuid"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/acl"
|
||||||
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/pool"
|
"github.com/hashicorp/consul/agent/pool"
|
||||||
|
"github.com/hashicorp/consul/agent/rpc/peering"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/logging"
|
||||||
"github.com/hashicorp/consul/proto/pbpeering"
|
"github.com/hashicorp/consul/proto/pbpeering"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -50,6 +55,39 @@ func (s *Server) stopPeeringStreamSync() {
|
||||||
// syncPeeringsAndBlock is a long-running goroutine that is responsible for watching
|
// syncPeeringsAndBlock is a long-running goroutine that is responsible for watching
|
||||||
// changes to peerings in the state store and managing streams to those peers.
|
// changes to peerings in the state store and managing streams to those peers.
|
||||||
func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger, cancelFns map[string]context.CancelFunc) error {
|
func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger, cancelFns map[string]context.CancelFunc) error {
|
||||||
|
// We have to be careful not to introduce a data race here. We want to
|
||||||
|
// compare the current known peerings in the state store with known
|
||||||
|
// connected streams to know when we should TERMINATE stray peerings.
|
||||||
|
//
|
||||||
|
// If you read the current peerings from the state store, then read the
|
||||||
|
// current established streams you could lose the data race and have the
|
||||||
|
// sequence of events be:
|
||||||
|
//
|
||||||
|
// 1. list peerings [A,B,C]
|
||||||
|
// 2. persist new peering [D]
|
||||||
|
// 3. accept new stream for [D]
|
||||||
|
// 4. list streams [A,B,C,D]
|
||||||
|
// 5. terminate [D]
|
||||||
|
//
|
||||||
|
// Which is wrong. If we instead ensure that (4) happens before (1), given
|
||||||
|
// that you can't get an established stream without first passing a "does
|
||||||
|
// this peering exist in the state store?" inquiry then this happens:
|
||||||
|
//
|
||||||
|
// 1. list streams [A,B,C]
|
||||||
|
// 2. list peerings [A,B,C]
|
||||||
|
// 3. persist new peering [D]
|
||||||
|
// 4. accept new stream for [D]
|
||||||
|
// 5. terminate []
|
||||||
|
//
|
||||||
|
// Or even this is fine:
|
||||||
|
//
|
||||||
|
// 1. list streams [A,B,C]
|
||||||
|
// 2. persist new peering [D]
|
||||||
|
// 3. accept new stream for [D]
|
||||||
|
// 4. list peerings [A,B,C,D]
|
||||||
|
// 5. terminate []
|
||||||
|
connectedStreams := s.peeringService.ConnectedStreams()
|
||||||
|
|
||||||
state := s.fsm.State()
|
state := s.fsm.State()
|
||||||
|
|
||||||
// Pull the state store contents and set up to block for changes.
|
// Pull the state store contents and set up to block for changes.
|
||||||
|
@ -81,18 +119,24 @@ func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger,
|
||||||
for _, peer := range peers {
|
for _, peer := range peers {
|
||||||
logger.Trace("evaluating stored peer", "peer", peer.Name, "should_dial", peer.ShouldDial(), "sequence_id", seq)
|
logger.Trace("evaluating stored peer", "peer", peer.Name, "should_dial", peer.ShouldDial(), "sequence_id", seq)
|
||||||
|
|
||||||
if !peer.ShouldDial() {
|
if !peer.IsActive() {
|
||||||
|
// The peering was marked for deletion by ourselves or our peer, no need to dial or track them.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(peering) Account for deleted peers that are still in the state store
|
// Track all active peerings,since the reconciliation loop below applies to the token generator as well.
|
||||||
stored[peer.ID] = struct{}{}
|
stored[peer.ID] = struct{}{}
|
||||||
|
|
||||||
|
if !peer.ShouldDial() {
|
||||||
|
// We do not need to dial peerings where we generated the peering token.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
status, found := s.peeringService.StreamStatus(peer.ID)
|
status, found := s.peeringService.StreamStatus(peer.ID)
|
||||||
|
|
||||||
// TODO(peering): If there is new peering data and a connected stream, should we tear down the stream?
|
// TODO(peering): If there is new peering data and a connected stream, should we tear down the stream?
|
||||||
// If the data in the updated token is bad, the user wouldn't know until the old servers/certs become invalid.
|
// If the data in the updated token is bad, the user wouldn't know until the old servers/certs become invalid.
|
||||||
// Alternatively we could do a basic Ping from the initiate peering endpoint to avoid dealing with that here.
|
// Alternatively we could do a basic Ping from the establish peering endpoint to avoid dealing with that here.
|
||||||
if found && status.Connected {
|
if found && status.Connected {
|
||||||
// Nothing to do when we already have an active stream to the peer.
|
// Nothing to do when we already have an active stream to the peer.
|
||||||
continue
|
continue
|
||||||
|
@ -121,7 +165,7 @@ func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger,
|
||||||
|
|
||||||
// Clean up active streams of peerings that were deleted from the state store.
|
// Clean up active streams of peerings that were deleted from the state store.
|
||||||
// TODO(peering): This is going to trigger shutting down peerings we generated a token for. Is that OK?
|
// TODO(peering): This is going to trigger shutting down peerings we generated a token for. Is that OK?
|
||||||
for stream, doneCh := range s.peeringService.ConnectedStreams() {
|
for stream, doneCh := range connectedStreams {
|
||||||
if _, ok := stored[stream]; ok {
|
if _, ok := stored[stream]; ok {
|
||||||
// Active stream is in the state store, nothing to do.
|
// Active stream is in the state store, nothing to do.
|
||||||
continue
|
continue
|
||||||
|
@ -146,6 +190,8 @@ func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer *pbpeering.Peering, cancelFns map[string]context.CancelFunc) error {
|
func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer *pbpeering.Peering, cancelFns map[string]context.CancelFunc) error {
|
||||||
|
logger = logger.With("peer_name", peer.Name, "peer_id", peer.ID)
|
||||||
|
|
||||||
tlsOption := grpc.WithInsecure()
|
tlsOption := grpc.WithInsecure()
|
||||||
if len(peer.PeerCAPems) > 0 {
|
if len(peer.PeerCAPems) > 0 {
|
||||||
var haveCerts bool
|
var haveCerts bool
|
||||||
|
@ -175,7 +221,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer
|
||||||
buffer = buffer.Next()
|
buffer = buffer.Next()
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Trace("establishing stream to peer", "peer_id", peer.ID)
|
logger.Trace("establishing stream to peer")
|
||||||
|
|
||||||
retryCtx, cancel := context.WithCancel(ctx)
|
retryCtx, cancel := context.WithCancel(ctx)
|
||||||
cancelFns[peer.ID] = cancel
|
cancelFns[peer.ID] = cancel
|
||||||
|
@ -191,7 +237,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer
|
||||||
return fmt.Errorf("peer server address type %T is not a string", buffer.Value)
|
return fmt.Errorf("peer server address type %T is not a string", buffer.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Trace("dialing peer", "peer_id", peer.ID, "addr", addr)
|
logger.Trace("dialing peer", "addr", addr)
|
||||||
conn, err := grpc.DialContext(retryCtx, addr,
|
conn, err := grpc.DialContext(retryCtx, addr,
|
||||||
grpc.WithContextDialer(newPeerDialer(addr)),
|
grpc.WithContextDialer(newPeerDialer(addr)),
|
||||||
grpc.WithBlock(),
|
grpc.WithBlock(),
|
||||||
|
@ -208,16 +254,23 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.peeringService.HandleStream(peering.HandleStreamRequest{
|
streamReq := peering.HandleStreamRequest{
|
||||||
LocalID: peer.ID,
|
LocalID: peer.ID,
|
||||||
RemoteID: peer.PeerID,
|
RemoteID: peer.PeerID,
|
||||||
PeerName: peer.Name,
|
PeerName: peer.Name,
|
||||||
Partition: peer.Partition,
|
Partition: peer.Partition,
|
||||||
Stream: stream,
|
Stream: stream,
|
||||||
})
|
}
|
||||||
|
err = s.peeringService.HandleStream(streamReq)
|
||||||
|
// A nil error indicates that the peering was deleted and the stream needs to be gracefully shutdown.
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
stream.CloseSend()
|
||||||
|
s.peeringService.DrainStream(streamReq)
|
||||||
|
|
||||||
// This will cancel the retry-er context, letting us break out of this loop when we want to shut down the stream.
|
// This will cancel the retry-er context, letting us break out of this loop when we want to shut down the stream.
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
|
logger.Info("closed outbound stream")
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
|
||||||
|
@ -249,3 +302,156 @@ func newPeerDialer(peerAddr string) func(context.Context, string) (net.Conn, err
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Server) startPeeringDeferredDeletion(ctx context.Context) {
|
||||||
|
s.leaderRoutineManager.Start(ctx, peeringDeletionRoutineName, s.runPeeringDeletions)
|
||||||
|
}
|
||||||
|
|
||||||
|
// runPeeringDeletions watches for peerings marked for deletions and then cleans up data for them.
|
||||||
|
func (s *Server) runPeeringDeletions(ctx context.Context) error {
|
||||||
|
logger := s.loggers.Named(logging.Peering)
|
||||||
|
|
||||||
|
// This limiter's purpose is to control the rate of raft applies caused by the deferred deletion
|
||||||
|
// process. This includes deletion of the peerings themselves in addition to any peering data
|
||||||
|
raftLimiter := rate.NewLimiter(defaultDeletionApplyRate, int(defaultDeletionApplyRate))
|
||||||
|
for {
|
||||||
|
ws := memdb.NewWatchSet()
|
||||||
|
state := s.fsm.State()
|
||||||
|
_, peerings, err := s.fsm.State().PeeringListDeleted(ws)
|
||||||
|
if err != nil {
|
||||||
|
logger.Warn("encountered an error while searching for deleted peerings", "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(peerings) == 0 {
|
||||||
|
ws.Add(state.AbandonCh())
|
||||||
|
|
||||||
|
// wait for a peering to be deleted or the routine to be cancelled
|
||||||
|
if err := ws.WatchCtx(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range peerings {
|
||||||
|
s.removePeeringAndData(ctx, logger, raftLimiter, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// removepPeeringAndData removes data imported for a peering and the peering itself.
|
||||||
|
func (s *Server) removePeeringAndData(ctx context.Context, logger hclog.Logger, limiter *rate.Limiter, peer *pbpeering.Peering) {
|
||||||
|
logger = logger.With("peer_name", peer.Name, "peer_id", peer.ID)
|
||||||
|
entMeta := *structs.NodeEnterpriseMetaInPartition(peer.Partition)
|
||||||
|
|
||||||
|
// First delete all imported data.
|
||||||
|
// By deleting all imported nodes we also delete all services and checks registered on them.
|
||||||
|
if err := s.deleteAllNodes(ctx, limiter, entMeta, peer.Name); err != nil {
|
||||||
|
logger.Error("Failed to remove Nodes for peer", "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := s.deleteTrustBundleFromPeer(ctx, limiter, entMeta, peer.Name); err != nil {
|
||||||
|
logger.Error("Failed to remove trust bundle for peer", "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := limiter.Wait(ctx); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if peer.State == pbpeering.PeeringState_TERMINATED {
|
||||||
|
// For peerings terminated by our peer we only clean up the local data, we do not delete the peering itself.
|
||||||
|
// This is to avoid a situation where the peering disappears without the local operator's knowledge.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Once all imported data is deleted, the peering itself is also deleted.
|
||||||
|
req := &pbpeering.PeeringDeleteRequest{
|
||||||
|
Name: peer.Name,
|
||||||
|
Partition: acl.PartitionOrDefault(peer.Partition),
|
||||||
|
}
|
||||||
|
_, err := s.raftApplyProtobuf(structs.PeeringDeleteType, req)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error("failed to apply full peering deletion", "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteAllNodes will delete all nodes in a partition or all nodes imported from a given peer name.
|
||||||
|
func (s *Server) deleteAllNodes(ctx context.Context, limiter *rate.Limiter, entMeta acl.EnterpriseMeta, peerName string) error {
|
||||||
|
// Same as ACL batch upsert size
|
||||||
|
nodeBatchSizeBytes := 256 * 1024
|
||||||
|
|
||||||
|
_, nodes, err := s.fsm.State().NodeDump(nil, &entMeta, peerName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for {
|
||||||
|
var ops structs.TxnOps
|
||||||
|
for batchSize := 0; batchSize < nodeBatchSizeBytes && i < len(nodes); i++ {
|
||||||
|
entry := nodes[i]
|
||||||
|
|
||||||
|
op := structs.TxnOp{
|
||||||
|
Node: &structs.TxnNodeOp{
|
||||||
|
Verb: api.NodeDelete,
|
||||||
|
Node: structs.Node{
|
||||||
|
Node: entry.Node,
|
||||||
|
Partition: entry.Partition,
|
||||||
|
PeerName: entry.PeerName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
ops = append(ops, &op)
|
||||||
|
|
||||||
|
// Add entries to the transaction until it reaches the max batch size
|
||||||
|
batchSize += len(entry.Node) + len(entry.Partition) + len(entry.PeerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send each batch as a TXN Req to avoid sending one at a time
|
||||||
|
req := structs.TxnRequest{
|
||||||
|
Datacenter: s.config.Datacenter,
|
||||||
|
Ops: ops,
|
||||||
|
}
|
||||||
|
if len(req.Ops) > 0 {
|
||||||
|
if err := limiter.Wait(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := s.raftApplyMsgpack(structs.TxnRequestType, &req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteTrustBundleFromPeer deletes the trust bundle imported from a peer, if present.
|
||||||
|
func (s *Server) deleteTrustBundleFromPeer(ctx context.Context, limiter *rate.Limiter, entMeta acl.EnterpriseMeta, peerName string) error {
|
||||||
|
_, bundle, err := s.fsm.State().PeeringTrustBundleRead(nil, state.Query{Value: peerName, EnterpriseMeta: entMeta})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if bundle == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := limiter.Wait(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &pbpeering.PeeringTrustBundleDeleteRequest{
|
||||||
|
Name: peerName,
|
||||||
|
Partition: entMeta.PartitionOrDefault(),
|
||||||
|
}
|
||||||
|
_, err = s.raftApplyProtobuf(structs.PeeringTrustBundleDeleteType, req)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
|
@ -10,8 +10,10 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/acl"
|
||||||
"github.com/hashicorp/consul/agent/consul/state"
|
"github.com/hashicorp/consul/agent/consul/state"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/proto/pbpeering"
|
"github.com/hashicorp/consul/proto/pbpeering"
|
||||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||||
"github.com/hashicorp/consul/testrpc"
|
"github.com/hashicorp/consul/testrpc"
|
||||||
|
@ -60,6 +62,10 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
|
||||||
_, found := s1.peeringService.StreamStatus(token.PeerID)
|
_, found := s1.peeringService.StreamStatus(token.PeerID)
|
||||||
require.False(t, found)
|
require.False(t, found)
|
||||||
|
|
||||||
|
var (
|
||||||
|
s2PeerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d"
|
||||||
|
)
|
||||||
|
|
||||||
// Bring up s2 and store s1's token so that it attempts to dial.
|
// Bring up s2 and store s1's token so that it attempts to dial.
|
||||||
_, s2 := testServerWithConfig(t, func(c *Config) {
|
_, s2 := testServerWithConfig(t, func(c *Config) {
|
||||||
c.NodeName = "s2.dc2"
|
c.NodeName = "s2.dc2"
|
||||||
|
@ -71,6 +77,7 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
|
||||||
// Simulate a peering initiation event by writing a peering with data from a peering token.
|
// Simulate a peering initiation event by writing a peering with data from a peering token.
|
||||||
// Eventually the leader in dc2 should dial and connect to the leader in dc1.
|
// Eventually the leader in dc2 should dial and connect to the leader in dc1.
|
||||||
p := &pbpeering.Peering{
|
p := &pbpeering.Peering{
|
||||||
|
ID: s2PeerID,
|
||||||
Name: "my-peer-s1",
|
Name: "my-peer-s1",
|
||||||
PeerID: token.PeerID,
|
PeerID: token.PeerID,
|
||||||
PeerCAPems: token.CA,
|
PeerCAPems: token.CA,
|
||||||
|
@ -88,10 +95,13 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
|
||||||
require.True(r, status.Connected)
|
require.True(r, status.Connected)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Delete the peering to trigger the termination sequence
|
// Delete the peering to trigger the termination sequence.
|
||||||
require.NoError(t, s2.fsm.State().PeeringDelete(2000, state.Query{
|
deleted := &pbpeering.Peering{
|
||||||
Value: "my-peer-s1",
|
ID: s2PeerID,
|
||||||
}))
|
Name: "my-peer-s1",
|
||||||
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
|
}
|
||||||
|
require.NoError(t, s2.fsm.State().PeeringWrite(2000, deleted))
|
||||||
s2.logger.Trace("deleted peering for my-peer-s1")
|
s2.logger.Trace("deleted peering for my-peer-s1")
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
@ -147,6 +157,11 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
|
||||||
var token structs.PeeringToken
|
var token structs.PeeringToken
|
||||||
require.NoError(t, json.Unmarshal(tokenJSON, &token))
|
require.NoError(t, json.Unmarshal(tokenJSON, &token))
|
||||||
|
|
||||||
|
var (
|
||||||
|
s1PeerID = token.PeerID
|
||||||
|
s2PeerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d"
|
||||||
|
)
|
||||||
|
|
||||||
// Bring up s2 and store s1's token so that it attempts to dial.
|
// Bring up s2 and store s1's token so that it attempts to dial.
|
||||||
_, s2 := testServerWithConfig(t, func(c *Config) {
|
_, s2 := testServerWithConfig(t, func(c *Config) {
|
||||||
c.NodeName = "s2.dc2"
|
c.NodeName = "s2.dc2"
|
||||||
|
@ -158,6 +173,7 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
|
||||||
// Simulate a peering initiation event by writing a peering with data from a peering token.
|
// Simulate a peering initiation event by writing a peering with data from a peering token.
|
||||||
// Eventually the leader in dc2 should dial and connect to the leader in dc1.
|
// Eventually the leader in dc2 should dial and connect to the leader in dc1.
|
||||||
p := &pbpeering.Peering{
|
p := &pbpeering.Peering{
|
||||||
|
ID: s2PeerID,
|
||||||
Name: "my-peer-s1",
|
Name: "my-peer-s1",
|
||||||
PeerID: token.PeerID,
|
PeerID: token.PeerID,
|
||||||
PeerCAPems: token.CA,
|
PeerCAPems: token.CA,
|
||||||
|
@ -175,10 +191,13 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
|
||||||
require.True(r, status.Connected)
|
require.True(r, status.Connected)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Delete the peering from the server peer to trigger the termination sequence
|
// Delete the peering from the server peer to trigger the termination sequence.
|
||||||
require.NoError(t, s1.fsm.State().PeeringDelete(2000, state.Query{
|
deleted := &pbpeering.Peering{
|
||||||
Value: "my-peer-s2",
|
ID: s1PeerID,
|
||||||
}))
|
Name: "my-peer-s2",
|
||||||
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
|
}
|
||||||
|
require.NoError(t, s1.fsm.State().PeeringWrite(2000, deleted))
|
||||||
s2.logger.Trace("deleted peering for my-peer-s1")
|
s2.logger.Trace("deleted peering for my-peer-s1")
|
||||||
|
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
@ -186,7 +205,7 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
|
||||||
require.False(r, found)
|
require.False(r, found)
|
||||||
})
|
})
|
||||||
|
|
||||||
// s2 should have received the termination message and updated the peering state
|
// s2 should have received the termination message and updated the peering state.
|
||||||
retry.Run(t, func(r *retry.R) {
|
retry.Run(t, func(r *retry.R) {
|
||||||
_, peering, err := s2.fsm.State().PeeringRead(nil, state.Query{
|
_, peering, err := s2.fsm.State().PeeringRead(nil, state.Query{
|
||||||
Value: "my-peer-s1",
|
Value: "my-peer-s1",
|
||||||
|
@ -195,3 +214,162 @@ func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
|
||||||
require.Equal(r, pbpeering.PeeringState_TERMINATED, peering.State)
|
require.Equal(r, pbpeering.PeeringState_TERMINATED, peering.State)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLeader_Peering_DeferredDeletion(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("too slow for testing.Short")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(peering): Configure with TLS
|
||||||
|
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||||
|
c.NodeName = "s1.dc1"
|
||||||
|
c.Datacenter = "dc1"
|
||||||
|
c.TLSConfig.Domain = "consul"
|
||||||
|
})
|
||||||
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||||
|
|
||||||
|
var (
|
||||||
|
peerID = "cc56f0b8-3885-4e78-8d7b-614a0c45712d"
|
||||||
|
peerName = "my-peer-s2"
|
||||||
|
defaultMeta = acl.DefaultEnterpriseMeta()
|
||||||
|
lastIdx = uint64(0)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Simulate a peering initiation event by writing a peering to the state store.
|
||||||
|
lastIdx++
|
||||||
|
require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{
|
||||||
|
ID: peerID,
|
||||||
|
Name: peerName,
|
||||||
|
}))
|
||||||
|
|
||||||
|
// Insert imported data: nodes, services, checks, trust bundle
|
||||||
|
lastIdx = insertTestPeeringData(t, s1.fsm.State(), peerName, lastIdx)
|
||||||
|
|
||||||
|
// Mark the peering for deletion to trigger the termination sequence.
|
||||||
|
lastIdx++
|
||||||
|
require.NoError(t, s1.fsm.State().PeeringWrite(lastIdx, &pbpeering.Peering{
|
||||||
|
ID: peerID,
|
||||||
|
Name: peerName,
|
||||||
|
DeletedAt: structs.TimeToProto(time.Now()),
|
||||||
|
}))
|
||||||
|
|
||||||
|
// Ensure imported data is gone:
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
_, csn, err := s1.fsm.State().ServiceDump(nil, "", false, defaultMeta, peerName)
|
||||||
|
require.NoError(r, err)
|
||||||
|
require.Len(r, csn, 0)
|
||||||
|
|
||||||
|
_, checks, err := s1.fsm.State().ChecksInState(nil, api.HealthAny, defaultMeta, peerName)
|
||||||
|
require.NoError(r, err)
|
||||||
|
require.Len(r, checks, 0)
|
||||||
|
|
||||||
|
_, nodes, err := s1.fsm.State().NodeDump(nil, defaultMeta, peerName)
|
||||||
|
require.NoError(r, err)
|
||||||
|
require.Len(r, nodes, 0)
|
||||||
|
|
||||||
|
_, tb, err := s1.fsm.State().PeeringTrustBundleRead(nil, state.Query{Value: peerName})
|
||||||
|
require.NoError(r, err)
|
||||||
|
require.Nil(r, tb)
|
||||||
|
})
|
||||||
|
|
||||||
|
// The leader routine should pick up the deletion and finish deleting the peering.
|
||||||
|
retry.Run(t, func(r *retry.R) {
|
||||||
|
_, peering, err := s1.fsm.State().PeeringRead(nil, state.Query{
|
||||||
|
Value: peerName,
|
||||||
|
})
|
||||||
|
require.NoError(r, err)
|
||||||
|
require.Nil(r, peering)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func insertTestPeeringData(t *testing.T, store *state.Store, peer string, lastIdx uint64) uint64 {
|
||||||
|
lastIdx++
|
||||||
|
require.NoError(t, store.PeeringTrustBundleWrite(lastIdx, &pbpeering.PeeringTrustBundle{
|
||||||
|
TrustDomain: "952e6bd1-f4d6-47f7-83ff-84b31babaa17",
|
||||||
|
PeerName: peer,
|
||||||
|
RootPEMs: []string{"certificate bundle"},
|
||||||
|
}))
|
||||||
|
|
||||||
|
lastIdx++
|
||||||
|
require.NoError(t, store.EnsureRegistration(lastIdx, &structs.RegisterRequest{
|
||||||
|
Node: "aaa",
|
||||||
|
Address: "10.0.0.1",
|
||||||
|
PeerName: peer,
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
Service: "a-service",
|
||||||
|
ID: "a-service-1",
|
||||||
|
Port: 8080,
|
||||||
|
PeerName: peer,
|
||||||
|
},
|
||||||
|
Checks: structs.HealthChecks{
|
||||||
|
{
|
||||||
|
CheckID: "a-service-1-check",
|
||||||
|
ServiceName: "a-service",
|
||||||
|
ServiceID: "a-service-1",
|
||||||
|
Node: "aaa",
|
||||||
|
PeerName: peer,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
CheckID: structs.SerfCheckID,
|
||||||
|
Node: "aaa",
|
||||||
|
PeerName: peer,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}))
|
||||||
|
|
||||||
|
lastIdx++
|
||||||
|
require.NoError(t, store.EnsureRegistration(lastIdx, &structs.RegisterRequest{
|
||||||
|
Node: "bbb",
|
||||||
|
Address: "10.0.0.2",
|
||||||
|
PeerName: peer,
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
Service: "b-service",
|
||||||
|
ID: "b-service-1",
|
||||||
|
Port: 8080,
|
||||||
|
PeerName: peer,
|
||||||
|
},
|
||||||
|
Checks: structs.HealthChecks{
|
||||||
|
{
|
||||||
|
CheckID: "b-service-1-check",
|
||||||
|
ServiceName: "b-service",
|
||||||
|
ServiceID: "b-service-1",
|
||||||
|
Node: "bbb",
|
||||||
|
PeerName: peer,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
CheckID: structs.SerfCheckID,
|
||||||
|
Node: "bbb",
|
||||||
|
PeerName: peer,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}))
|
||||||
|
|
||||||
|
lastIdx++
|
||||||
|
require.NoError(t, store.EnsureRegistration(lastIdx, &structs.RegisterRequest{
|
||||||
|
Node: "ccc",
|
||||||
|
Address: "10.0.0.3",
|
||||||
|
PeerName: peer,
|
||||||
|
Service: &structs.NodeService{
|
||||||
|
Service: "c-service",
|
||||||
|
ID: "c-service-1",
|
||||||
|
Port: 8080,
|
||||||
|
PeerName: peer,
|
||||||
|
},
|
||||||
|
Checks: structs.HealthChecks{
|
||||||
|
{
|
||||||
|
CheckID: "c-service-1-check",
|
||||||
|
ServiceName: "c-service",
|
||||||
|
ServiceID: "c-service-1",
|
||||||
|
Node: "ccc",
|
||||||
|
PeerName: peer,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
CheckID: structs.SerfCheckID,
|
||||||
|
Node: "ccc",
|
||||||
|
PeerName: peer,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}))
|
||||||
|
|
||||||
|
return lastIdx
|
||||||
|
}
|
||||||
|
|
|
@ -2258,7 +2258,8 @@ func TestLeader_EnableVirtualIPs(t *testing.T) {
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
vip, err := state.VirtualIPForService(structs.NewServiceName("api", nil))
|
psn := structs.PeeredServiceName{ServiceName: structs.NewServiceName("api", nil)}
|
||||||
|
vip, err := state.VirtualIPForService(psn)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "", vip)
|
require.Equal(t, "", vip)
|
||||||
|
|
||||||
|
@ -2287,7 +2288,8 @@ func TestLeader_EnableVirtualIPs(t *testing.T) {
|
||||||
|
|
||||||
// Make sure the service referenced in the terminating gateway config doesn't have
|
// Make sure the service referenced in the terminating gateway config doesn't have
|
||||||
// a virtual IP yet.
|
// a virtual IP yet.
|
||||||
vip, err = state.VirtualIPForService(structs.NewServiceName("bar", nil))
|
psn = structs.PeeredServiceName{ServiceName: structs.NewServiceName("bar", nil)}
|
||||||
|
vip, err = state.VirtualIPForService(psn)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "", vip)
|
require.Equal(t, "", vip)
|
||||||
|
|
||||||
|
@ -2316,8 +2318,8 @@ func TestLeader_EnableVirtualIPs(t *testing.T) {
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
psn = structs.PeeredServiceName{ServiceName: structs.NewServiceName("api", nil)}
|
||||||
vip, err = state.VirtualIPForService(structs.NewServiceName("api", nil))
|
vip, err = state.VirtualIPForService(psn)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "240.0.0.1", vip)
|
require.Equal(t, "240.0.0.1", vip)
|
||||||
|
|
||||||
|
@ -2345,7 +2347,8 @@ func TestLeader_EnableVirtualIPs(t *testing.T) {
|
||||||
|
|
||||||
// Make sure the baz service (only referenced in the config entry so far)
|
// Make sure the baz service (only referenced in the config entry so far)
|
||||||
// has a virtual IP.
|
// has a virtual IP.
|
||||||
vip, err = state.VirtualIPForService(structs.NewServiceName("baz", nil))
|
psn = structs.PeeredServiceName{ServiceName: structs.NewServiceName("baz", nil)}
|
||||||
|
vip, err = state.VirtualIPForService(psn)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "240.0.0.2", vip)
|
require.Equal(t, "240.0.0.2", vip)
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue