Merge branch 'main' into fix-kv_entries-metric

This commit is contained in:
Freddy 2022-08-01 13:19:27 -06:00 committed by GitHub
commit dacf703d20
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
892 changed files with 48461 additions and 20030 deletions

3
.changelog/13532.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:breaking-change
telemetry: config flag `telemetry { disable_compat_1.9 = (true|false) }` has been removed. Before upgrading you should remove this flag from your config if the flag is being used.
```

3
.changelog/13607.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
xds: Fix a bug that resulted in Lambda services not using the payload-passthrough option as expected.
```

3
.changelog/13658.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
streaming: Added topics for `ingress-gateway`, `mesh`, `service-intentions` and `service-resolver` config entry events.
```

4
.changelog/13677.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:feature
cli: A new flag for config delete to delete a config entry in a
valid config file, e.g., config delete -filename intention-allow.hcl
```

3
.changelog/13686.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:enhancement
ui: Add new CopyableCode component and use it in certain pre-existing areas
```

3
.changelog/13687.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
server: broadcast the public grpc port using lan serf and update the consul service in the catalog with the same data
```

3
.changelog/13699.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
xds: Fix a bug where terminating gateway upstream clusters weren't configured properly when the service protocol was `http2`.
```

3
.changelog/13722.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
streaming: Added topic that can be used to consume updates about the list of services in a datacenter
```

3
.changelog/13787.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
cli: when `acl token read` is used with the `-self` and `-expanded` flags, return an error instead of panicking
```

6
.changelog/13807.txt Normal file
View File

@ -0,0 +1,6 @@
```release-note: improvement
connect: Add Envoy 1.23.0 to support matrix
```
```release-note: breaking-change
connect: Removes support for Envoy 1.19
```

3
.changelog/13847.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
connect: Fixed a goroutine/memory leak that would occur when using the ingress gateway.
```

View File

@ -23,6 +23,11 @@ references:
BASH_ENV: .circleci/bash_env.sh
VAULT_BINARY_VERSION: 1.9.4
GO_VERSION: 1.18.1
envoy-versions: &supported_envoy_versions
- &default_envoy_version "1.20.6"
- "1.21.4"
- "1.22.2"
- "1.23.0"
images:
# When updating the Go version, remember to also update the versions in the
# workflows section for go-test-lib jobs.
@ -30,7 +35,7 @@ references:
ember: &EMBER_IMAGE docker.mirror.hashicorp.services/circleci/node:14-browsers
ubuntu: &UBUNTU_CI_IMAGE ubuntu-2004:202201-02
cache:
yarn: &YARN_CACHE_KEY consul-ui-v8-{{ checksum "ui/yarn.lock" }}
yarn: &YARN_CACHE_KEY consul-ui-v9-{{ checksum "ui/yarn.lock" }}
steps:
install-gotestsum: &install-gotestsum
@ -237,7 +242,9 @@ jobs:
- run:
name: Install protobuf
command: make proto-tools
- run:
name: "Protobuf Format"
command: make proto-format
- run:
command: make --always-make proto
- run: |
@ -852,15 +859,30 @@ jobs:
path: *TEST_RESULTS_DIR
- run: *notify-slack-failure
envoy-integration-test-1_19_5: &ENVOY_TESTS
envoy-integration-test: &ENVOY_TESTS
machine:
image: *UBUNTU_CI_IMAGE
parallelism: 4
resource_class: medium
parameters:
envoy-version:
type: enum
enum: *supported_envoy_versions
default: *default_envoy_version
xds-target:
type: enum
enum: ["server", "client"]
default: "server"
environment:
ENVOY_VERSION: "1.19.5"
ENVOY_VERSION: << parameters.envoy-version >>
XDS_TARGET: << parameters.xds-target >>
AWS_LAMBDA_REGION: us-west-2
steps: &ENVOY_INTEGRATION_TEST_STEPS
- checkout
- assume-role:
access-key: AWS_ACCESS_KEY_ID_LAMBDA
secret-key: AWS_SECRET_ACCESS_KEY_LAMBDA
role-arn: ROLE_ARN_LAMBDA
# Get go binary from workspace
- attach_workspace:
at: .
@ -891,21 +913,6 @@ jobs:
path: *TEST_RESULTS_DIR
- run: *notify-slack-failure
envoy-integration-test-1_20_4:
<<: *ENVOY_TESTS
environment:
ENVOY_VERSION: "1.20.4"
envoy-integration-test-1_21_3:
<<: *ENVOY_TESTS
environment:
ENVOY_VERSION: "1.21.3"
envoy-integration-test-1_22_2:
<<: *ENVOY_TESTS
environment:
ENVOY_VERSION: "1.22.2"
# run integration tests for the connect ca providers
test-connect-ca-providers:
docker:
@ -1116,18 +1123,13 @@ workflows:
- nomad-integration-0_8:
requires:
- dev-build
- envoy-integration-test-1_19_5:
requires:
- dev-build
- envoy-integration-test-1_20_4:
requires:
- dev-build
- envoy-integration-test-1_21_3:
requires:
- dev-build
- envoy-integration-test-1_22_2:
- envoy-integration-test:
requires:
- dev-build
matrix:
parameters:
envoy-version: *supported_envoy_versions
xds-target: ["server", "client"]
- compatibility-integration-test:
requires:
- dev-build

View File

@ -73,7 +73,7 @@ function verify_rpm {
docker_platform="linux/amd64"
docker_image="amd64/centos:7"
;;
*.arm.rpm)
*.armv7hl.rpm)
docker_platform="linux/arm/v7"
docker_image="arm32v7/fedora:36"
;;
@ -120,7 +120,7 @@ function verify_deb {
docker_platform="linux/amd64"
docker_image="amd64/debian:bullseye"
;;
*_arm.deb)
*_armhf.deb)
docker_platform="linux/arm/v7"
docker_image="arm32v7/debian:bullseye"
;;

View File

@ -254,8 +254,8 @@ jobs:
docker.io/hashicorppreview/${{ env.repo }}:${{ env.dev_tag }}-${{ github.sha }}
smoke_test: .github/scripts/verify_docker.sh v${{ env.version }}
build-docker-redhat:
name: Docker Build UBI Image for RedHat
build-docker-ubi-redhat:
name: Docker Build UBI Image for RedHat Registry
needs:
- get-product-version
- build
@ -274,6 +274,39 @@ jobs:
redhat_tag: scan.connect.redhat.com/ospid-60f9fdbec3a80eac643abedf/${{env.repo}}:${{env.version}}-ubi
smoke_test: .github/scripts/verify_docker.sh v${{ env.version }}
build-docker-ubi-dockerhub:
name: Docker Build UBI Image for DockerHub
needs:
- get-product-version
- build
runs-on: ubuntu-latest
env:
repo: ${{github.event.repository.name}}
version: ${{needs.get-product-version.outputs.product-version}}
steps:
- uses: actions/checkout@v2
# Strip everything but MAJOR.MINOR from the version string and add a `-dev` suffix
# This naming convention will be used ONLY for per-commit dev images
- name: Set docker dev tag
run: |
version="${{ env.version }}"
echo "dev_tag=${version%.*}-dev" >> $GITHUB_ENV
- uses: hashicorp/actions-docker-build@v1
with:
version: ${{env.version}}
target: ubi
arch: amd64
tags: |
docker.io/hashicorp/${{env.repo}}:${{env.version}}-ubi
public.ecr.aws/hashicorp/${{env.repo}}:${{env.version}}-ubi
dev_tags: |
docker.io/hashicorppreview/${{ env.repo }}:${{ env.dev_tag }}-ubi
docker.io/hashicorppreview/${{ env.repo }}:${{ env.dev_tag }}-ubi-${{ github.sha }}
smoke_test: .github/scripts/verify_docker.sh v${{ env.version }}
verify-linux:
needs:
- get-product-version
@ -339,7 +372,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
arch: ["i386", "amd64", "arm", "arm64"]
arch: ["i386", "amd64", "armhf", "arm64"]
# fail-fast: true
env:
version: ${{ needs.get-product-version.outputs.product-version }}
@ -376,7 +409,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
arch: ["i386", "x86_64", "arm", "aarch64"]
arch: ["i386", "x86_64", "armv7hl", "aarch64"]
# fail-fast: true
env:
version: ${{ needs.get-product-version.outputs.product-version }}

View File

@ -1,4 +1,4 @@
name: Nightly 1.10.x Test
name: Nightly Test 1.13.x
on:
schedule:
- cron: '0 4 * * *'
@ -6,8 +6,8 @@ on:
env:
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
BRANCH: "release/1.10.x"
BRANCH_NAME: "release-1.10.x" # Used for naming artifacts
BRANCH: "release/1.13.x"
BRANCH_NAME: "release-1.13.x" # Used for naming artifacts
jobs:
frontend-test-workspace-node:
@ -27,7 +27,7 @@ jobs:
- name: Install
id: install
working-directory: ./ui
run: yarn install
run: make deps
- name: Workspace Tests
id: workspace-test
@ -59,7 +59,7 @@ jobs:
- name: Install
id: install
working-directory: ./ui
run: yarn install
run: make deps
- name: Ember Build OSS
id: build-oss
@ -98,7 +98,7 @@ jobs:
- name: Install
id: install
working-directory: ./ui
run: yarn install
run: make deps
- name: Download OSS Frontend
uses: actions/download-artifact@v3
@ -131,7 +131,7 @@ jobs:
- name: Install
id: install
working-directory: ./ui
run: yarn install
run: make deps
- name: Ember Build ENT
id: build-oss
@ -170,7 +170,7 @@ jobs:
- name: Install
id: install
working-directory: ./ui
run: yarn install
run: make deps
- name: Download ENT Frontend
uses: actions/download-artifact@v3
@ -201,7 +201,7 @@ jobs:
- name: Install
id: install
working-directory: ./ui
run: yarn install
run: make deps
- name: Download ENT Frontend
uses: actions/download-artifact@v3

1
.gitignore vendored
View File

@ -14,6 +14,7 @@ changelog.tmp
exit-code
Thumbs.db
.idea
.vscode
# MacOS
.DS_Store

View File

@ -178,6 +178,15 @@ event "promote-dev-docker" {
}
}
event "fossa-scan" {
depends = ["promote-dev-docker"]
action "fossa-scan" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "fossa-scan"
}
}
## These are promotion and post-publish events
## they should be added to the end of the file after the verify event stanza.

View File

@ -5,7 +5,7 @@ container {
}
binary {
secrets = true
secrets = false
go_modules = false
osv = true
oss_index = true

View File

@ -1,3 +1,41 @@
## 1.12.3 (July 13, 2022)
IMPROVEMENTS:
* Support Vault namespaces in Connect CA by adding RootPKINamespace and
IntermediatePKINamespace fields to the config. [[GH-12904](https://github.com/hashicorp/consul/issues/12904)]
* connect: Update Envoy support matrix to latest patch releases (1.22.2, 1.21.3, 1.20.4, 1.19.5) [[GH-13431](https://github.com/hashicorp/consul/issues/13431)]
* dns: Added support for specifying admin partition in node lookups. [[GH-13421](https://github.com/hashicorp/consul/issues/13421)]
* telemetry: Added a `consul.server.isLeader` metric to track if a server is a leader or not. [[GH-13304](https://github.com/hashicorp/consul/issues/13304)]
BUG FIXES:
* agent: Fixed a bug in HTTP handlers where URLs were being decoded twice [[GH-13256](https://github.com/hashicorp/consul/issues/13256)]
* deps: Update go-grpc/grpc, resolving connection memory leak [[GH-13051](https://github.com/hashicorp/consul/issues/13051)]
* fix a bug that caused an error when creating `grpc` or `http2` ingress gateway listeners with multiple services [[GH-13127](https://github.com/hashicorp/consul/issues/13127)]
* ui: Fix incorrect text on certain page empty states [[GH-13409](https://github.com/hashicorp/consul/issues/13409)]
* xds: Fix a bug that resulted in Lambda services not using the payload-passthrough option as expected. [[GH-13607](https://github.com/hashicorp/consul/issues/13607)]
* xds: Fix a bug where terminating gateway upstream clusters weren't configured properly when the service protocol was `http2`. [[GH-13699](https://github.com/hashicorp/consul/issues/13699)]
## 1.11.7 (July 13, 2022)
IMPROVEMENTS:
* connect: Update supported Envoy versions to 1.20.4, 1.19.5, 1.18.6, 1.17.4 [[GH-13434](https://github.com/hashicorp/consul/issues/13434)]
BUG FIXES:
* agent: Fixed a bug in HTTP handlers where URLs were being decoded twice [[GH-13265](https://github.com/hashicorp/consul/issues/13265)]
* fix a bug that caused an error when creating `grpc` or `http2` ingress gateway listeners with multiple services [[GH-13127](https://github.com/hashicorp/consul/issues/13127)]
* xds: Fix a bug where terminating gateway upstream clusters weren't configured properly when the service protocol was `http2`. [[GH-13699](https://github.com/hashicorp/consul/issues/13699)]
## 1.10.12 (July 13, 2022)
BUG FIXES:
* agent: Fixed a bug in HTTP handlers where URLs were being decoded twice [[GH-13264](https://github.com/hashicorp/consul/issues/13264)]
* fix a bug that caused an error when creating `grpc` or `http2` ingress gateway listeners with multiple services [[GH-13127](https://github.com/hashicorp/consul/issues/13127)]
## 1.13.0-alpha2 (June 21, 2022)
IMPROVEMENTS:

View File

@ -376,6 +376,18 @@ proto-format: proto-tools
proto-lint: proto-tools
@buf lint --config proto/buf.yaml --path proto
@buf lint --config proto-public/buf.yaml --path proto-public
@for fn in $$(find proto -name '*.proto'); do \
if [[ "$$fn" = "proto/pbsubscribe/subscribe.proto" ]]; then \
continue ; \
elif [[ "$$fn" = "proto/pbpartition/partition.proto" ]]; then \
continue ; \
fi ; \
pkg=$$(grep "^package " "$$fn" | sed 's/^package \(.*\);/\1/'); \
if [[ "$$pkg" != hashicorp.consul.internal.* ]]; then \
echo "ERROR: $$fn: is missing 'hashicorp.consul.internal' package prefix: $$pkg" >&2; \
exit 1; \
fi \
done
# utility to echo a makefile variable (i.e. 'make print-PROTOC_VERSION')
print-% : ; @echo $($*)

View File

@ -27,6 +27,7 @@ func legacyPolicy(policy *Policy) *Policy {
Keyring: policy.Keyring,
Operator: policy.Operator,
Mesh: policy.Mesh,
Peering: policy.Peering,
},
}
}
@ -117,6 +118,14 @@ func checkAllowMeshWrite(t *testing.T, authz Authorizer, prefix string, entCtx *
require.Equal(t, Allow, authz.MeshWrite(entCtx))
}
func checkAllowPeeringRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.PeeringRead(entCtx))
}
func checkAllowPeeringWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.PeeringWrite(entCtx))
}
func checkAllowOperatorRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Allow, authz.OperatorRead(entCtx))
}
@ -241,6 +250,14 @@ func checkDenyMeshWrite(t *testing.T, authz Authorizer, prefix string, entCtx *A
require.Equal(t, Deny, authz.MeshWrite(entCtx))
}
func checkDenyPeeringRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.PeeringRead(entCtx))
}
func checkDenyPeeringWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.PeeringWrite(entCtx))
}
func checkDenyOperatorRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Deny, authz.OperatorRead(entCtx))
}
@ -365,6 +382,14 @@ func checkDefaultMeshWrite(t *testing.T, authz Authorizer, prefix string, entCtx
require.Equal(t, Default, authz.MeshWrite(entCtx))
}
func checkDefaultPeeringRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.PeeringRead(entCtx))
}
func checkDefaultPeeringWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.PeeringWrite(entCtx))
}
func checkDefaultOperatorRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
require.Equal(t, Default, authz.OperatorRead(entCtx))
}
@ -446,6 +471,8 @@ func TestACL(t *testing.T) {
{name: "DenyNodeWrite", check: checkDenyNodeWrite},
{name: "DenyMeshRead", check: checkDenyMeshRead},
{name: "DenyMeshWrite", check: checkDenyMeshWrite},
{name: "DenyPeeringRead", check: checkDenyPeeringRead},
{name: "DenyPeeringWrite", check: checkDenyPeeringWrite},
{name: "DenyOperatorRead", check: checkDenyOperatorRead},
{name: "DenyOperatorWrite", check: checkDenyOperatorWrite},
{name: "DenyPreparedQueryRead", check: checkDenyPreparedQueryRead},
@ -480,6 +507,8 @@ func TestACL(t *testing.T) {
{name: "AllowNodeWrite", check: checkAllowNodeWrite},
{name: "AllowMeshRead", check: checkAllowMeshRead},
{name: "AllowMeshWrite", check: checkAllowMeshWrite},
{name: "AllowPeeringRead", check: checkAllowPeeringRead},
{name: "AllowPeeringWrite", check: checkAllowPeeringWrite},
{name: "AllowOperatorRead", check: checkAllowOperatorRead},
{name: "AllowOperatorWrite", check: checkAllowOperatorWrite},
{name: "AllowPreparedQueryRead", check: checkAllowPreparedQueryRead},
@ -514,6 +543,8 @@ func TestACL(t *testing.T) {
{name: "AllowNodeWrite", check: checkAllowNodeWrite},
{name: "AllowMeshRead", check: checkAllowMeshRead},
{name: "AllowMeshWrite", check: checkAllowMeshWrite},
{name: "AllowPeeringRead", check: checkAllowPeeringRead},
{name: "AllowPeeringWrite", check: checkAllowPeeringWrite},
{name: "AllowOperatorRead", check: checkAllowOperatorRead},
{name: "AllowOperatorWrite", check: checkAllowOperatorWrite},
{name: "AllowPreparedQueryRead", check: checkAllowPreparedQueryRead},
@ -1217,6 +1248,319 @@ func TestACL(t *testing.T) {
{name: "WriteAllowed", check: checkAllowMeshWrite},
},
},
{
name: "PeeringDefaultAllowPolicyDeny",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Peering: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
name: "PeeringDefaultAllowPolicyRead",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Peering: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
name: "PeeringDefaultAllowPolicyWrite",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Peering: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteAllowed", check: checkAllowPeeringWrite},
},
},
{
name: "PeeringDefaultAllowPolicyNone",
defaultPolicy: AllowAll(),
policyStack: []*Policy{
{},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteAllowed", check: checkAllowPeeringWrite},
},
},
{
name: "PeeringDefaultDenyPolicyDeny",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Peering: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
name: "PeeringDefaultDenyPolicyRead",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Peering: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
name: "PeeringDefaultDenyPolicyWrite",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Peering: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteAllowed", check: checkAllowPeeringWrite},
},
},
{
name: "PeeringDefaultDenyPolicyNone",
defaultPolicy: DenyAll(),
policyStack: []*Policy{
{},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:deny, p:deny = deny
name: "PeeringOperatorDenyPolicyDeny",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyDeny,
Peering: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:read, p:deny = deny
name: "PeeringOperatorReadPolicyDeny",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyRead,
Peering: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:write, p:deny = deny
name: "PeeringOperatorWritePolicyDeny",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyWrite,
Peering: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:deny, p:read = read
name: "PeeringOperatorDenyPolicyRead",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyDeny,
Peering: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:read, p:read = read
name: "PeeringOperatorReadPolicyRead",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyRead,
Peering: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:write, p:read = read
name: "PeeringOperatorWritePolicyRead",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyWrite,
Peering: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:deny, p:write = write
name: "PeeringOperatorDenyPolicyWrite",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyDeny,
Peering: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteAllowed", check: checkAllowPeeringWrite},
},
},
{
// o:read, p:write = write
name: "PeeringOperatorReadPolicyWrite",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyRead,
Peering: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteAllowed", check: checkAllowPeeringWrite},
},
},
{
// o:write, p:write = write
name: "PeeringOperatorWritePolicyWrite",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyWrite,
Peering: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteAllowed", check: checkAllowPeeringWrite},
},
},
{
// o:deny, p:<none> = deny
name: "PeeringOperatorDenyPolicyNone",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyDeny,
},
},
},
checks: []aclCheck{
{name: "ReadDenied", check: checkDenyPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:read, p:<none> = read
name: "PeeringOperatorReadPolicyNone",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyRead,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteDenied", check: checkDenyPeeringWrite},
},
},
{
// o:write, p:<none> = write
name: "PeeringOperatorWritePolicyNone",
defaultPolicy: nil, // test both
policyStack: []*Policy{
{
PolicyRules: PolicyRules{
Operator: PolicyWrite,
},
},
},
checks: []aclCheck{
{name: "ReadAllowed", check: checkAllowPeeringRead},
{name: "WriteAllowed", check: checkAllowPeeringWrite},
},
},
{
name: "OperatorDefaultAllowPolicyDeny",
defaultPolicy: AllowAll(),

View File

@ -49,6 +49,7 @@ const (
ResourceQuery Resource = "query"
ResourceService Resource = "service"
ResourceSession Resource = "session"
ResourcePeering Resource = "peering"
)
// Authorizer is the interface for policy enforcement.
@ -113,6 +114,14 @@ type Authorizer interface {
// functions can be used.
MeshWrite(*AuthorizerContext) EnforcementDecision
// PeeringRead determines if the read-only Consul peering functions
// can be used.
PeeringRead(*AuthorizerContext) EnforcementDecision
// PeeringWrite determines if the stage-changing Consul peering
// functions can be used.
PeeringWrite(*AuthorizerContext) EnforcementDecision
// NodeRead checks for permission to read (discover) a given node.
NodeRead(string, *AuthorizerContext) EnforcementDecision
@ -326,6 +335,24 @@ func (a AllowAuthorizer) MeshWriteAllowed(ctx *AuthorizerContext) error {
return nil
}
// PeeringReadAllowed determines if the read-only Consul peering functions
// can be used.
func (a AllowAuthorizer) PeeringReadAllowed(ctx *AuthorizerContext) error {
if a.Authorizer.PeeringRead(ctx) != Allow {
return PermissionDeniedByACLUnnamed(a, ctx, ResourcePeering, AccessRead)
}
return nil
}
// PeeringWriteAllowed determines if the state-changing Consul peering
// functions can be used.
func (a AllowAuthorizer) PeeringWriteAllowed(ctx *AuthorizerContext) error {
if a.Authorizer.PeeringWrite(ctx) != Allow {
return PermissionDeniedByACLUnnamed(a, ctx, ResourcePeering, AccessWrite)
}
return nil
}
// NodeReadAllowed checks for permission to read (discover) a given node.
func (a AllowAuthorizer) NodeReadAllowed(name string, ctx *AuthorizerContext) error {
if a.Authorizer.NodeRead(name, ctx) != Allow {
@ -540,6 +567,13 @@ func Enforce(authz Authorizer, rsc Resource, segment string, access string, ctx
case "write":
return authz.SessionWrite(segment, ctx), nil
}
case ResourcePeering:
switch lowerAccess {
case "read":
return authz.PeeringRead(ctx), nil
case "write":
return authz.PeeringWrite(ctx), nil
}
default:
if processed, decision, err := enforceEnterprise(authz, rsc, segment, lowerAccess, ctx); processed {
return decision, err
@ -552,6 +586,7 @@ func Enforce(authz Authorizer, rsc Resource, segment string, access string, ctx
// NewAuthorizerFromRules is a convenience function to invoke NewPolicyFromSource followed by NewPolicyAuthorizer with
// the parse policy.
// TODO(ACL-Legacy-Compat): remove syntax arg after removing SyntaxLegacy
func NewAuthorizerFromRules(rules string, syntax SyntaxVersion, conf *Config, meta *EnterprisePolicyMeta) (Authorizer, error) {
policy, err := NewPolicyFromSource(rules, syntax, conf, meta)
if err != nil {

View File

@ -139,6 +139,20 @@ func (m *mockAuthorizer) MeshWrite(ctx *AuthorizerContext) EnforcementDecision {
return ret.Get(0).(EnforcementDecision)
}
// PeeringRead determines if the read-only Consul peering functions
// can be used.
func (m *mockAuthorizer) PeeringRead(ctx *AuthorizerContext) EnforcementDecision {
ret := m.Called(ctx)
return ret.Get(0).(EnforcementDecision)
}
// PeeringWrite determines if the state-changing Consul peering
// functions can be used.
func (m *mockAuthorizer) PeeringWrite(ctx *AuthorizerContext) EnforcementDecision {
ret := m.Called(ctx)
return ret.Get(0).(EnforcementDecision)
}
// OperatorRead determines if the read-only Consul operator functions
// can be used. ret := m.Called(segment, ctx)
func (m *mockAuthorizer) OperatorRead(ctx *AuthorizerContext) EnforcementDecision {
@ -462,6 +476,30 @@ func TestACL_Enforce(t *testing.T) {
ret: Deny,
err: "Invalid access level",
},
{
method: "PeeringRead",
resource: ResourcePeering,
access: "read",
ret: Allow,
},
{
method: "PeeringRead",
resource: ResourcePeering,
access: "read",
ret: Deny,
},
{
method: "PeeringWrite",
resource: ResourcePeering,
access: "write",
ret: Allow,
},
{
method: "PeeringWrite",
resource: ResourcePeering,
access: "write",
ret: Deny,
},
{
method: "PreparedQueryRead",
resource: ResourceQuery,

View File

@ -161,6 +161,22 @@ func (c *ChainedAuthorizer) MeshWrite(entCtx *AuthorizerContext) EnforcementDeci
})
}
// PeeringRead determines if the read-only Consul peering functions
// can be used.
func (c *ChainedAuthorizer) PeeringRead(entCtx *AuthorizerContext) EnforcementDecision {
return c.executeChain(func(authz Authorizer) EnforcementDecision {
return authz.PeeringRead(entCtx)
})
}
// PeeringWrite determines if the state-changing Consul peering
// functions can be used.
func (c *ChainedAuthorizer) PeeringWrite(entCtx *AuthorizerContext) EnforcementDecision {
return c.executeChain(func(authz Authorizer) EnforcementDecision {
return authz.PeeringWrite(entCtx)
})
}
// NodeRead checks for permission to read (discover) a given node.
func (c *ChainedAuthorizer) NodeRead(node string, entCtx *AuthorizerContext) EnforcementDecision {
return c.executeChain(func(authz Authorizer) EnforcementDecision {

View File

@ -68,6 +68,12 @@ func (authz testAuthorizer) MeshRead(*AuthorizerContext) EnforcementDecision {
func (authz testAuthorizer) MeshWrite(*AuthorizerContext) EnforcementDecision {
return EnforcementDecision(authz)
}
func (authz testAuthorizer) PeeringRead(*AuthorizerContext) EnforcementDecision {
return EnforcementDecision(authz)
}
func (authz testAuthorizer) PeeringWrite(*AuthorizerContext) EnforcementDecision {
return EnforcementDecision(authz)
}
func (authz testAuthorizer) OperatorRead(*AuthorizerContext) EnforcementDecision {
return EnforcementDecision(authz)
}
@ -128,6 +134,8 @@ func TestChainedAuthorizer(t *testing.T) {
checkDenyNodeWrite(t, authz, "foo", nil)
checkDenyMeshRead(t, authz, "foo", nil)
checkDenyMeshWrite(t, authz, "foo", nil)
checkDenyPeeringRead(t, authz, "foo", nil)
checkDenyPeeringWrite(t, authz, "foo", nil)
checkDenyOperatorRead(t, authz, "foo", nil)
checkDenyOperatorWrite(t, authz, "foo", nil)
checkDenyPreparedQueryRead(t, authz, "foo", nil)
@ -160,6 +168,8 @@ func TestChainedAuthorizer(t *testing.T) {
checkDenyNodeWrite(t, authz, "foo", nil)
checkDenyMeshRead(t, authz, "foo", nil)
checkDenyMeshWrite(t, authz, "foo", nil)
checkDenyPeeringRead(t, authz, "foo", nil)
checkDenyPeeringWrite(t, authz, "foo", nil)
checkDenyOperatorRead(t, authz, "foo", nil)
checkDenyOperatorWrite(t, authz, "foo", nil)
checkDenyPreparedQueryRead(t, authz, "foo", nil)
@ -192,6 +202,8 @@ func TestChainedAuthorizer(t *testing.T) {
checkAllowNodeWrite(t, authz, "foo", nil)
checkAllowMeshRead(t, authz, "foo", nil)
checkAllowMeshWrite(t, authz, "foo", nil)
checkAllowPeeringRead(t, authz, "foo", nil)
checkAllowPeeringWrite(t, authz, "foo", nil)
checkAllowOperatorRead(t, authz, "foo", nil)
checkAllowOperatorWrite(t, authz, "foo", nil)
checkAllowPreparedQueryRead(t, authz, "foo", nil)
@ -224,6 +236,8 @@ func TestChainedAuthorizer(t *testing.T) {
checkDenyNodeWrite(t, authz, "foo", nil)
checkDenyMeshRead(t, authz, "foo", nil)
checkDenyMeshWrite(t, authz, "foo", nil)
checkDenyPeeringRead(t, authz, "foo", nil)
checkDenyPeeringWrite(t, authz, "foo", nil)
checkDenyOperatorRead(t, authz, "foo", nil)
checkDenyOperatorWrite(t, authz, "foo", nil)
checkDenyPreparedQueryRead(t, authz, "foo", nil)
@ -254,6 +268,8 @@ func TestChainedAuthorizer(t *testing.T) {
checkAllowNodeWrite(t, authz, "foo", nil)
checkAllowMeshRead(t, authz, "foo", nil)
checkAllowMeshWrite(t, authz, "foo", nil)
checkAllowPeeringRead(t, authz, "foo", nil)
checkAllowPeeringWrite(t, authz, "foo", nil)
checkAllowOperatorRead(t, authz, "foo", nil)
checkAllowOperatorWrite(t, authz, "foo", nil)
checkAllowPreparedQueryRead(t, authz, "foo", nil)

View File

@ -85,6 +85,7 @@ type PolicyRules struct {
Keyring string `hcl:"keyring"`
Operator string `hcl:"operator"`
Mesh string `hcl:"mesh"`
Peering string `hcl:"peering"`
}
// Policy is used to represent the policy specified by an ACL configuration.
@ -289,6 +290,10 @@ func (pr *PolicyRules) Validate(conf *Config) error {
return fmt.Errorf("Invalid mesh policy: %#v", pr.Mesh)
}
// Validate the peering policy - this one is allowed to be empty
if pr.Peering != "" && !isPolicyValid(pr.Peering, false) {
return fmt.Errorf("Invalid peering policy: %#v", pr.Peering)
}
return nil
}
@ -309,6 +314,7 @@ func parseCurrent(rules string, conf *Config, meta *EnterprisePolicyMeta) (*Poli
return p, nil
}
// TODO(ACL-Legacy-Compat): remove in phase 2
func parseLegacy(rules string, conf *Config) (*Policy, error) {
p := &Policy{}
@ -436,6 +442,7 @@ func NewPolicyFromSource(rules string, syntax SyntaxVersion, conf *Config, meta
var policy *Policy
var err error
switch syntax {
// TODO(ACL-Legacy-Compat): remove and remove as argument from function
case SyntaxLegacy:
policy, err = parseLegacy(rules, conf)
case SyntaxCurrent:

View File

@ -43,6 +43,9 @@ type policyAuthorizer struct {
// meshRule contains the mesh policies.
meshRule *policyAuthorizerRule
// peeringRule contains the peering policies.
peeringRule *policyAuthorizerRule
// embedded enterprise policy authorizer
enterprisePolicyAuthorizer
}
@ -322,6 +325,15 @@ func (p *policyAuthorizer) loadRules(policy *PolicyRules) error {
p.meshRule = &policyAuthorizerRule{access: access}
}
// Load the peering policy
if policy.Peering != "" {
access, err := AccessLevelFromString(policy.Peering)
if err != nil {
return err
}
p.peeringRule = &policyAuthorizerRule{access: access}
}
return nil
}
@ -692,6 +704,25 @@ func (p *policyAuthorizer) MeshWrite(ctx *AuthorizerContext) EnforcementDecision
return p.OperatorWrite(ctx)
}
// PeeringRead determines if the read-only peering functions are allowed.
func (p *policyAuthorizer) PeeringRead(ctx *AuthorizerContext) EnforcementDecision {
if p.peeringRule != nil {
return enforce(p.peeringRule.access, AccessRead)
}
// default to OperatorRead access
return p.OperatorRead(ctx)
}
// PeeringWrite determines if the state-changing peering functions are
// allowed.
func (p *policyAuthorizer) PeeringWrite(ctx *AuthorizerContext) EnforcementDecision {
if p.peeringRule != nil {
return enforce(p.peeringRule.access, AccessWrite)
}
// default to OperatorWrite access
return p.OperatorWrite(ctx)
}
// OperatorRead determines if the read-only operator functions are allowed.
func (p *policyAuthorizer) OperatorRead(*AuthorizerContext) EnforcementDecision {
if p.operatorRule != nil {

View File

@ -50,6 +50,8 @@ func TestPolicyAuthorizer(t *testing.T) {
{name: "DefaultNodeWrite", prefix: "foo", check: checkDefaultNodeWrite},
{name: "DefaultMeshRead", prefix: "foo", check: checkDefaultMeshRead},
{name: "DefaultMeshWrite", prefix: "foo", check: checkDefaultMeshWrite},
{name: "DefaultPeeringRead", prefix: "foo", check: checkDefaultPeeringRead},
{name: "DefaultPeeringWrite", prefix: "foo", check: checkDefaultPeeringWrite},
{name: "DefaultOperatorRead", prefix: "foo", check: checkDefaultOperatorRead},
{name: "DefaultOperatorWrite", prefix: "foo", check: checkDefaultOperatorWrite},
{name: "DefaultPreparedQueryRead", prefix: "foo", check: checkDefaultPreparedQueryRead},

View File

@ -10,6 +10,7 @@ type policyRulesMergeContext struct {
keyRules map[string]*KeyRule
keyPrefixRules map[string]*KeyRule
meshRule string
peeringRule string
nodeRules map[string]*NodeRule
nodePrefixRules map[string]*NodeRule
operatorRule string
@ -33,6 +34,7 @@ func (p *policyRulesMergeContext) init() {
p.keyRules = make(map[string]*KeyRule)
p.keyPrefixRules = make(map[string]*KeyRule)
p.meshRule = ""
p.peeringRule = ""
p.nodeRules = make(map[string]*NodeRule)
p.nodePrefixRules = make(map[string]*NodeRule)
p.operatorRule = ""
@ -119,10 +121,6 @@ func (p *policyRulesMergeContext) merge(policy *PolicyRules) {
}
}
if takesPrecedenceOver(policy.Mesh, p.meshRule) {
p.meshRule = policy.Mesh
}
for _, np := range policy.Nodes {
update := true
if permission, found := p.nodeRules[np.Name]; found {
@ -145,6 +143,14 @@ func (p *policyRulesMergeContext) merge(policy *PolicyRules) {
}
}
if takesPrecedenceOver(policy.Mesh, p.meshRule) {
p.meshRule = policy.Mesh
}
if takesPrecedenceOver(policy.Peering, p.peeringRule) {
p.peeringRule = policy.Peering
}
if takesPrecedenceOver(policy.Operator, p.operatorRule) {
p.operatorRule = policy.Operator
}
@ -235,6 +241,7 @@ func (p *policyRulesMergeContext) fill(merged *PolicyRules) {
merged.Keyring = p.keyringRule
merged.Operator = p.operatorRule
merged.Mesh = p.meshRule
merged.Peering = p.peeringRule
// All the for loop appends are ugly but Go doesn't have a way to get
// a slice of all values within a map so this is necessary

View File

@ -65,6 +65,7 @@ func TestPolicySourceParse(t *testing.T) {
}
operator = "deny"
mesh = "deny"
peering = "deny"
service_prefix "" {
policy = "write"
}
@ -147,6 +148,7 @@ func TestPolicySourceParse(t *testing.T) {
},
"operator": "deny",
"mesh": "deny",
"peering": "deny",
"service_prefix": {
"": {
"policy": "write"
@ -253,6 +255,7 @@ func TestPolicySourceParse(t *testing.T) {
},
Operator: PolicyDeny,
Mesh: PolicyDeny,
Peering: PolicyDeny,
PreparedQueryPrefixes: []*PreparedQueryRule{
{
Prefix: "",
@ -743,6 +746,13 @@ func TestPolicySourceParse(t *testing.T) {
RulesJSON: `{ "mesh": "nope" }`,
Err: "Invalid mesh policy",
},
{
Name: "Bad Policy - Peering",
Syntax: SyntaxCurrent,
Rules: `peering = "nope"`,
RulesJSON: `{ "peering": "nope" }`,
Err: "Invalid peering policy",
},
{
Name: "Keyring Empty",
Syntax: SyntaxCurrent,
@ -764,6 +774,13 @@ func TestPolicySourceParse(t *testing.T) {
RulesJSON: `{ "mesh": "" }`,
Expected: &Policy{PolicyRules: PolicyRules{Mesh: ""}},
},
{
Name: "Peering Empty",
Syntax: SyntaxCurrent,
Rules: `peering = ""`,
RulesJSON: `{ "peering": "" }`,
Expected: &Policy{PolicyRules: PolicyRules{Peering: ""}},
},
}
for _, tc := range cases {
@ -1453,66 +1470,90 @@ func TestMergePolicies(t *testing.T) {
{
name: "Write Precedence",
input: []*Policy{
{PolicyRules: PolicyRules{
{
PolicyRules: PolicyRules{
ACL: PolicyRead,
Keyring: PolicyRead,
Operator: PolicyRead,
Mesh: PolicyRead,
}},
{PolicyRules: PolicyRules{
ACL: PolicyWrite,
Keyring: PolicyWrite,
Operator: PolicyWrite,
Mesh: PolicyWrite,
}},
Peering: PolicyRead,
},
expected: &Policy{PolicyRules: PolicyRules{
},
{
PolicyRules: PolicyRules{
ACL: PolicyWrite,
Keyring: PolicyWrite,
Operator: PolicyWrite,
Mesh: PolicyWrite,
}},
Peering: PolicyWrite,
},
},
},
expected: &Policy{
PolicyRules: PolicyRules{
ACL: PolicyWrite,
Keyring: PolicyWrite,
Operator: PolicyWrite,
Mesh: PolicyWrite,
Peering: PolicyWrite,
},
},
},
{
name: "Deny Precedence",
input: []*Policy{
{PolicyRules: PolicyRules{
{
PolicyRules: PolicyRules{
ACL: PolicyWrite,
Keyring: PolicyWrite,
Operator: PolicyWrite,
Mesh: PolicyWrite,
}},
{PolicyRules: PolicyRules{
ACL: PolicyDeny,
Keyring: PolicyDeny,
Operator: PolicyDeny,
Mesh: PolicyDeny,
}},
Peering: PolicyWrite,
},
expected: &Policy{PolicyRules: PolicyRules{
},
{
PolicyRules: PolicyRules{
ACL: PolicyDeny,
Keyring: PolicyDeny,
Operator: PolicyDeny,
Mesh: PolicyDeny,
}},
Peering: PolicyDeny,
},
},
},
expected: &Policy{
PolicyRules: PolicyRules{
ACL: PolicyDeny,
Keyring: PolicyDeny,
Operator: PolicyDeny,
Mesh: PolicyDeny,
Peering: PolicyDeny,
},
},
},
{
name: "Read Precedence",
input: []*Policy{
{PolicyRules: PolicyRules{
{
PolicyRules: PolicyRules{
ACL: PolicyRead,
Keyring: PolicyRead,
Operator: PolicyRead,
Mesh: PolicyRead,
}},
Peering: PolicyRead,
},
},
{},
},
expected: &Policy{PolicyRules: PolicyRules{
expected: &Policy{
PolicyRules: PolicyRules{
ACL: PolicyRead,
Keyring: PolicyRead,
Operator: PolicyRead,
Mesh: PolicyRead,
}},
Peering: PolicyRead,
},
},
},
}
@ -1524,6 +1565,7 @@ func TestMergePolicies(t *testing.T) {
require.Equal(t, exp.Keyring, act.Keyring)
require.Equal(t, exp.Operator, act.Operator)
require.Equal(t, exp.Mesh, act.Mesh)
require.Equal(t, exp.Peering, act.Peering)
require.ElementsMatch(t, exp.Agents, act.Agents)
require.ElementsMatch(t, exp.AgentPrefixes, act.AgentPrefixes)
require.ElementsMatch(t, exp.Events, act.Events)
@ -1597,6 +1639,9 @@ operator = "write"
# comment
mesh = "write"
# comment
peering = "write"
`
expected := `
@ -1652,6 +1697,9 @@ operator = "write"
# comment
mesh = "write"
# comment
peering = "write"
`
output, err := TranslateLegacyRules([]byte(input))

View File

@ -170,6 +170,20 @@ func (s *staticAuthorizer) MeshWrite(*AuthorizerContext) EnforcementDecision {
return Deny
}
func (s *staticAuthorizer) PeeringRead(*AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) PeeringWrite(*AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow
}
return Deny
}
func (s *staticAuthorizer) OperatorRead(*AuthorizerContext) EnforcementDecision {
if s.defaultAllow {
return Allow

View File

@ -2044,6 +2044,14 @@ func TestACL_Authorize(t *testing.T) {
Resource: "mesh",
Access: "write",
},
{
Resource: "peering",
Access: "read",
},
{
Resource: "peering",
Access: "write",
},
{
Resource: "query",
Segment: "foo",
@ -2186,6 +2194,14 @@ func TestACL_Authorize(t *testing.T) {
Resource: "mesh",
Access: "write",
},
{
Resource: "peering",
Access: "read",
},
{
Resource: "peering",
Access: "write",
},
{
Resource: "query",
Segment: "foo",
@ -2238,6 +2254,8 @@ func TestACL_Authorize(t *testing.T) {
true, // operator:write
true, // mesh:read
true, // mesh:write
true, // peering:read
true, // peering:write
false, // query:read
false, // query:write
true, // service:read

View File

@ -274,10 +274,10 @@ func TestACL_vetServiceRegister(t *testing.T) {
// Try to register over a service without write privs to the existing
// service.
a.State.AddService(&structs.NodeService{
a.State.AddServiceWithChecks(&structs.NodeService{
ID: "my-service",
Service: "other",
}, "")
}, nil, "")
err = a.vetServiceRegister(serviceRWSecret, &structs.NodeService{
ID: "my-service",
Service: "service",
@ -304,10 +304,10 @@ func TestACL_vetServiceUpdateWithAuthorizer(t *testing.T) {
require.Contains(t, err.Error(), "Unknown service")
// Update with write privs.
a.State.AddService(&structs.NodeService{
a.State.AddServiceWithChecks(&structs.NodeService{
ID: "my-service",
Service: "service",
}, "")
}, nil, "")
err = vetServiceUpdate(serviceRWSecret, structs.NewServiceID("my-service", nil))
require.NoError(t, err)
@ -361,10 +361,10 @@ func TestACL_vetCheckRegisterWithAuthorizer(t *testing.T) {
// Try to register over a service check without write privs to the
// existing service.
a.State.AddService(&structs.NodeService{
a.State.AddServiceWithChecks(&structs.NodeService{
ID: "my-service",
Service: "service",
}, "")
}, nil, "")
a.State.AddCheck(&structs.HealthCheck{
CheckID: types.CheckID("my-check"),
ServiceID: "my-service",
@ -410,10 +410,10 @@ func TestACL_vetCheckUpdateWithAuthorizer(t *testing.T) {
require.Contains(t, err.Error(), "Unknown check")
// Update service check with write privs.
a.State.AddService(&structs.NodeService{
a.State.AddServiceWithChecks(&structs.NodeService{
ID: "my-service",
Service: "service",
}, "")
}, nil, "")
a.State.AddCheck(&structs.HealthCheck{
CheckID: types.CheckID("my-service-check"),
ServiceID: "my-service",

View File

@ -38,7 +38,7 @@ import (
"github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/dns"
publicgrpc "github.com/hashicorp/consul/agent/grpc/public"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/proxycfg"
proxycfgglue "github.com/hashicorp/consul/agent/proxycfg-glue"
@ -213,9 +213,9 @@ type Agent struct {
// depending on the configuration
delegate delegate
// publicGRPCServer is the gRPC server exposed on the dedicated gRPC port (as
// externalGRPCServer is the gRPC server exposed on the dedicated gRPC port (as
// opposed to the multiplexed "server" port).
publicGRPCServer *grpc.Server
externalGRPCServer *grpc.Server
// state stores a local representation of the node,
// services and checks. Used for anti-entropy.
@ -539,7 +539,7 @@ func (a *Agent) Start(ctx context.Context) error {
// This needs to happen after the initial auto-config is loaded, because TLS
// can only be configured on the gRPC server at the point of creation.
a.buildPublicGRPCServer()
a.buildExternalGRPCServer()
if err := a.startLicenseManager(ctx); err != nil {
return err
@ -578,7 +578,7 @@ func (a *Agent) Start(ctx context.Context) error {
// Setup either the client or the server.
if c.ServerMode {
server, err := consul.NewServer(consulCfg, a.baseDeps.Deps, a.publicGRPCServer)
server, err := consul.NewServer(consulCfg, a.baseDeps.Deps, a.externalGRPCServer)
if err != nil {
return fmt.Errorf("Failed to start Consul server: %v", err)
}
@ -633,30 +633,8 @@ func (a *Agent) Start(ctx context.Context) error {
go a.baseDeps.ViewStore.Run(&lib.StopChannelContext{StopCh: a.shutdownCh})
// Start the proxy config manager.
proxyDataSources := proxycfg.DataSources{
CARoots: proxycfgglue.CacheCARoots(a.cache),
CompiledDiscoveryChain: proxycfgglue.CacheCompiledDiscoveryChain(a.cache),
ConfigEntry: proxycfgglue.CacheConfigEntry(a.cache),
ConfigEntryList: proxycfgglue.CacheConfigEntryList(a.cache),
Datacenters: proxycfgglue.CacheDatacenters(a.cache),
FederationStateListMeshGateways: proxycfgglue.CacheFederationStateListMeshGateways(a.cache),
GatewayServices: proxycfgglue.CacheGatewayServices(a.cache),
Health: proxycfgglue.Health(a.rpcClientHealth),
HTTPChecks: proxycfgglue.CacheHTTPChecks(a.cache),
Intentions: proxycfgglue.CacheIntentions(a.cache),
IntentionUpstreams: proxycfgglue.CacheIntentionUpstreams(a.cache),
InternalServiceDump: proxycfgglue.CacheInternalServiceDump(a.cache),
LeafCertificate: proxycfgglue.CacheLeafCertificate(a.cache),
PreparedQuery: proxycfgglue.CachePrepraredQuery(a.cache),
ResolvedServiceConfig: proxycfgglue.CacheResolvedServiceConfig(a.cache),
ServiceList: proxycfgglue.CacheServiceList(a.cache),
TrustBundle: proxycfgglue.CacheTrustBundle(a.cache),
TrustBundleList: proxycfgglue.CacheTrustBundleList(a.cache),
ExportedPeeredServices: proxycfgglue.CacheExportedPeeredServices(a.cache),
}
a.fillEnterpriseProxyDataSources(&proxyDataSources)
a.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{
DataSources: proxyDataSources,
DataSources: a.proxyDataSources(),
Logger: a.logger.Named(logging.ProxyConfig),
Source: &structs.QuerySource{
Datacenter: a.config.Datacenter,
@ -740,12 +718,6 @@ func (a *Agent) Start(ctx context.Context) error {
go a.retryJoinWAN()
}
// DEPRECATED: Warn users if they're emitting deprecated metrics. Remove this warning and the flagged metrics in a
// future release of Consul.
if !a.config.Telemetry.DisableCompatOneNine {
a.logger.Warn("DEPRECATED Backwards compatibility with pre-1.9 metrics enabled. These metrics will be removed in Consul 1.13. Consider not using this flag and rework instrumentation for 1.10 style http metrics.")
}
if a.tlsConfigurator.Cert() != nil {
m := tlsCertExpirationMonitor(a.tlsConfigurator, a.logger)
go m.Monitor(&lib.StopChannelContext{StopCh: a.shutdownCh})
@ -788,13 +760,8 @@ func (a *Agent) Failed() <-chan struct{} {
return a.apiServers.failed
}
func (a *Agent) buildPublicGRPCServer() {
// TLS is only enabled on the gRPC server if there's an HTTPS port configured.
var tls *tlsutil.Configurator
if a.config.HTTPSPort > 0 {
tls = a.tlsConfigurator
}
a.publicGRPCServer = publicgrpc.NewServer(a.logger.Named("grpc.public"), tls)
func (a *Agent) buildExternalGRPCServer() {
a.externalGRPCServer = external.NewServer(a.logger.Named("grpc.external"), a.tlsConfigurator)
}
func (a *Agent) listenAndServeGRPC() error {
@ -831,7 +798,7 @@ func (a *Agent) listenAndServeGRPC() error {
},
a,
)
a.xdsServer.Register(a.publicGRPCServer)
a.xdsServer.Register(a.externalGRPCServer)
ln, err := a.startListeners(a.config.GRPCAddrs)
if err != nil {
@ -844,7 +811,7 @@ func (a *Agent) listenAndServeGRPC() error {
"address", innerL.Addr().String(),
"network", innerL.Addr().Network(),
)
err := a.publicGRPCServer.Serve(innerL)
err := a.externalGRPCServer.Serve(innerL)
if err != nil {
a.logger.Error("gRPC server failed", "error", err)
}
@ -1221,6 +1188,8 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co
cfg.RPCAddr = runtimeCfg.RPCBindAddr
cfg.RPCAdvertise = runtimeCfg.RPCAdvertiseAddr
cfg.GRPCPort = runtimeCfg.GRPCPort
cfg.Segment = runtimeCfg.SegmentName
if len(runtimeCfg.Segments) > 0 {
segments, err := segmentConfig(runtimeCfg)
@ -1372,6 +1341,9 @@ func newConsulConfig(runtimeCfg *config.RuntimeConfig, logger hclog.Logger) (*co
// function does not drift.
cfg.SerfLANConfig = consul.CloneSerfLANConfig(cfg.SerfLANConfig)
cfg.PeeringEnabled = runtimeCfg.PeeringEnabled
cfg.PeeringTestAllowPeerRegistrations = runtimeCfg.PeeringTestAllowPeerRegistrations
enterpriseConsulConfig(cfg, runtimeCfg)
return cfg, nil
}
@ -1520,7 +1492,7 @@ func (a *Agent) ShutdownAgent() error {
}
// Stop gRPC
a.publicGRPCServer.Stop()
a.externalGRPCServer.Stop()
// Stop the proxy config manager
if a.proxyConfig != nil {
@ -3891,12 +3863,6 @@ func (a *Agent) reloadConfig(autoReload bool) error {
}
}
// DEPRECATED: Warn users on reload if they're emitting deprecated metrics. Remove this warning and the flagged
// metrics in a future release of Consul.
if !a.config.Telemetry.DisableCompatOneNine {
a.logger.Warn("DEPRECATED Backwards compatibility with pre-1.9 metrics enabled. These metrics will be removed in Consul 1.13. Consider not using this flag and rework instrumentation for 1.10 style http metrics.")
}
return a.reloadConfigInternal(newCfg)
}
@ -4107,6 +4073,7 @@ func (a *Agent) registerCache() {
a.cache.RegisterType(cachetype.IntentionMatchName, &cachetype.IntentionMatch{RPC: a})
a.cache.RegisterType(cachetype.IntentionUpstreamsName, &cachetype.IntentionUpstreams{RPC: a})
a.cache.RegisterType(cachetype.IntentionUpstreamsDestinationName, &cachetype.IntentionUpstreamsDestination{RPC: a})
a.cache.RegisterType(cachetype.CatalogServicesName, &cachetype.CatalogServices{RPC: a})
@ -4129,6 +4096,7 @@ func (a *Agent) registerCache() {
a.cache.RegisterType(cachetype.CompiledDiscoveryChainName, &cachetype.CompiledDiscoveryChain{RPC: a})
a.cache.RegisterType(cachetype.GatewayServicesName, &cachetype.GatewayServices{RPC: a})
a.cache.RegisterType(cachetype.ServiceGatewaysName, &cachetype.ServiceGateways{RPC: a})
a.cache.RegisterType(cachetype.ConfigEntryListName, &cachetype.ConfigEntryList{RPC: a})
@ -4145,6 +4113,8 @@ func (a *Agent) registerCache() {
a.cache.RegisterType(cachetype.TrustBundleListName, &cachetype.TrustBundles{Client: a.rpcClientPeering})
a.cache.RegisterType(cachetype.PeeredUpstreamsName, &cachetype.PeeredUpstreams{RPC: a})
a.registerEntCache()
}
@ -4241,6 +4211,61 @@ func (a *Agent) listenerPortLocked(svcID structs.ServiceID, checkID structs.Chec
return port, nil
}
func (a *Agent) proxyDataSources() proxycfg.DataSources {
sources := proxycfg.DataSources{
CARoots: proxycfgglue.CacheCARoots(a.cache),
CompiledDiscoveryChain: proxycfgglue.CacheCompiledDiscoveryChain(a.cache),
ConfigEntry: proxycfgglue.CacheConfigEntry(a.cache),
ConfigEntryList: proxycfgglue.CacheConfigEntryList(a.cache),
Datacenters: proxycfgglue.CacheDatacenters(a.cache),
FederationStateListMeshGateways: proxycfgglue.CacheFederationStateListMeshGateways(a.cache),
GatewayServices: proxycfgglue.CacheGatewayServices(a.cache),
ServiceGateways: proxycfgglue.CacheServiceGateways(a.cache),
Health: proxycfgglue.ClientHealth(a.rpcClientHealth),
HTTPChecks: proxycfgglue.CacheHTTPChecks(a.cache),
Intentions: proxycfgglue.CacheIntentions(a.cache),
IntentionUpstreams: proxycfgglue.CacheIntentionUpstreams(a.cache),
IntentionUpstreamsDestination: proxycfgglue.CacheIntentionUpstreamsDestination(a.cache),
InternalServiceDump: proxycfgglue.CacheInternalServiceDump(a.cache),
LeafCertificate: proxycfgglue.CacheLeafCertificate(a.cache),
PeeredUpstreams: proxycfgglue.CachePeeredUpstreams(a.cache),
PreparedQuery: proxycfgglue.CachePrepraredQuery(a.cache),
ResolvedServiceConfig: proxycfgglue.CacheResolvedServiceConfig(a.cache),
ServiceList: proxycfgglue.CacheServiceList(a.cache),
TrustBundle: proxycfgglue.CacheTrustBundle(a.cache),
TrustBundleList: proxycfgglue.CacheTrustBundleList(a.cache),
ExportedPeeredServices: proxycfgglue.CacheExportedPeeredServices(a.cache),
}
if server, ok := a.delegate.(*consul.Server); ok {
deps := proxycfgglue.ServerDataSourceDeps{
Datacenter: a.config.Datacenter,
EventPublisher: a.baseDeps.EventPublisher,
ViewStore: a.baseDeps.ViewStore,
Logger: a.logger.Named("proxycfg.server-data-sources"),
ACLResolver: a.delegate,
GetStore: func() proxycfgglue.Store { return server.FSM().State() },
}
sources.ConfigEntry = proxycfgglue.ServerConfigEntry(deps)
sources.ConfigEntryList = proxycfgglue.ServerConfigEntryList(deps)
sources.CompiledDiscoveryChain = proxycfgglue.ServerCompiledDiscoveryChain(deps, proxycfgglue.CacheCompiledDiscoveryChain(a.cache))
sources.ExportedPeeredServices = proxycfgglue.ServerExportedPeeredServices(deps)
sources.FederationStateListMeshGateways = proxycfgglue.ServerFederationStateListMeshGateways(deps)
sources.GatewayServices = proxycfgglue.ServerGatewayServices(deps)
sources.Health = proxycfgglue.ServerHealth(deps, proxycfgglue.ClientHealth(a.rpcClientHealth))
sources.Intentions = proxycfgglue.ServerIntentions(deps)
sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps)
sources.PeeredUpstreams = proxycfgglue.ServerPeeredUpstreams(deps)
sources.ServiceList = proxycfgglue.ServerServiceList(deps, proxycfgglue.CacheServiceList(a.cache))
sources.TrustBundle = proxycfgglue.ServerTrustBundle(deps)
sources.TrustBundleList = proxycfgglue.ServerTrustBundleList(deps)
}
a.fillEnterpriseProxyDataSources(&sources)
return sources
}
func listenerPortKey(svcID structs.ServiceID, checkID structs.CheckID) string {
return fmt.Sprintf("%s:%s", svcID, checkID)
}

View File

@ -93,7 +93,7 @@ func TestAgent_Services(t *testing.T) {
},
Port: 5000,
}
require.NoError(t, a.State.AddService(srv1, ""))
require.NoError(t, a.State.AddServiceWithChecks(srv1, nil, ""))
req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
resp := httptest.NewRecorder()
@ -128,7 +128,7 @@ func TestAgent_ServicesFiltered(t *testing.T) {
},
Port: 5000,
}
require.NoError(t, a.State.AddService(srv1, ""))
require.NoError(t, a.State.AddServiceWithChecks(srv1, nil, ""))
// Add another service
srv2 := &structs.NodeService{
@ -140,7 +140,7 @@ func TestAgent_ServicesFiltered(t *testing.T) {
},
Port: 1234,
}
require.NoError(t, a.State.AddService(srv2, ""))
require.NoError(t, a.State.AddServiceWithChecks(srv2, nil, ""))
req, _ := http.NewRequest("GET", "/v1/agent/services?filter="+url.QueryEscape("foo in Meta"), nil)
resp := httptest.NewRecorder()
@ -188,7 +188,7 @@ func TestAgent_Services_ExternalConnectProxy(t *testing.T) {
Upstreams: structs.TestUpstreams(t),
},
}
a.State.AddService(srv1, "")
a.State.AddServiceWithChecks(srv1, nil, "")
req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
resp := httptest.NewRecorder()
@ -232,7 +232,7 @@ func TestAgent_Services_Sidecar(t *testing.T) {
},
},
}
a.State.AddService(srv1, "")
a.State.AddServiceWithChecks(srv1, nil, "")
req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
resp := httptest.NewRecorder()
@ -281,7 +281,7 @@ func TestAgent_Services_MeshGateway(t *testing.T) {
},
},
}
a.State.AddService(srv1, "")
a.State.AddServiceWithChecks(srv1, nil, "")
req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
resp := httptest.NewRecorder()
@ -325,7 +325,7 @@ func TestAgent_Services_TerminatingGateway(t *testing.T) {
},
},
}
require.NoError(t, a.State.AddService(srv1, ""))
require.NoError(t, a.State.AddServiceWithChecks(srv1, nil, ""))
req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
resp := httptest.NewRecorder()
@ -370,7 +370,7 @@ func TestAgent_Services_ACLFilter(t *testing.T) {
},
}
for _, s := range services {
a.State.AddService(s, "")
a.State.AddServiceWithChecks(s, nil, "")
}
t.Run("no token", func(t *testing.T) {
@ -7994,7 +7994,7 @@ func TestAgent_Services_ExposeConfig(t *testing.T) {
},
},
}
a.State.AddService(srv1, "")
a.State.AddServiceWithChecks(srv1, nil, "")
req, _ := http.NewRequest("GET", "/v1/agent/services", nil)
resp := httptest.NewRecorder()

View File

@ -10,7 +10,7 @@ import (
// Recommended name for registration.
const IntentionUpstreamsName = "intention-upstreams"
// GatewayUpstreams supports fetching upstreams for a given gateway name.
// IntentionUpstreams supports fetching upstreams for a given service name.
type IntentionUpstreams struct {
RegisterOptionsBlockingRefresh
RPC RPC

View File

@ -1,92 +0,0 @@
// Code generated by mockery v2.11.0. DO NOT EDIT.
package cachetype
import (
local "github.com/hashicorp/consul/agent/local"
memdb "github.com/hashicorp/go-memdb"
mock "github.com/stretchr/testify/mock"
structs "github.com/hashicorp/consul/agent/structs"
testing "testing"
time "time"
)
// MockAgent is an autogenerated mock type for the Agent type
type MockAgent struct {
mock.Mock
}
// LocalBlockingQuery provides a mock function with given fields: alwaysBlock, hash, wait, fn
func (_m *MockAgent) LocalBlockingQuery(alwaysBlock bool, hash string, wait time.Duration, fn func(memdb.WatchSet) (string, interface{}, error)) (string, interface{}, error) {
ret := _m.Called(alwaysBlock, hash, wait, fn)
var r0 string
if rf, ok := ret.Get(0).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) string); ok {
r0 = rf(alwaysBlock, hash, wait, fn)
} else {
r0 = ret.Get(0).(string)
}
var r1 interface{}
if rf, ok := ret.Get(1).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) interface{}); ok {
r1 = rf(alwaysBlock, hash, wait, fn)
} else {
if ret.Get(1) != nil {
r1 = ret.Get(1).(interface{})
}
}
var r2 error
if rf, ok := ret.Get(2).(func(bool, string, time.Duration, func(memdb.WatchSet) (string, interface{}, error)) error); ok {
r2 = rf(alwaysBlock, hash, wait, fn)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// LocalState provides a mock function with given fields:
func (_m *MockAgent) LocalState() *local.State {
ret := _m.Called()
var r0 *local.State
if rf, ok := ret.Get(0).(func() *local.State); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*local.State)
}
}
return r0
}
// ServiceHTTPBasedChecks provides a mock function with given fields: id
func (_m *MockAgent) ServiceHTTPBasedChecks(id structs.ServiceID) []structs.CheckType {
ret := _m.Called(id)
var r0 []structs.CheckType
if rf, ok := ret.Get(0).(func(structs.ServiceID) []structs.CheckType); ok {
r0 = rf(id)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]structs.CheckType)
}
}
return r0
}
// NewMockAgent creates a new instance of MockAgent. It also registers a cleanup function to assert the mocks expectations.
func NewMockAgent(t testing.TB) *MockAgent {
mock := &MockAgent{}
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -1,4 +1,4 @@
// Code generated by mockery v2.11.0. DO NOT EDIT.
// Code generated by mockery v2.12.2. DO NOT EDIT.
package cachetype
@ -27,9 +27,10 @@ func (_m *MockRPC) RPC(method string, args interface{}, reply interface{}) error
return r0
}
// NewMockRPC creates a new instance of MockRPC. It also registers a cleanup function to assert the mocks expectations.
// NewMockRPC creates a new instance of MockRPC. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockRPC(t testing.TB) *MockRPC {
mock := &MockRPC{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })

View File

@ -0,0 +1,60 @@
// Code generated by mockery v2.12.2. DO NOT EDIT.
package cachetype
import (
context "context"
grpc "google.golang.org/grpc"
mock "github.com/stretchr/testify/mock"
pbpeering "github.com/hashicorp/consul/proto/pbpeering"
testing "testing"
)
// MockTrustBundleLister is an autogenerated mock type for the TrustBundleLister type
type MockTrustBundleLister struct {
mock.Mock
}
// TrustBundleListByService provides a mock function with given fields: ctx, in, opts
func (_m *MockTrustBundleLister) TrustBundleListByService(ctx context.Context, in *pbpeering.TrustBundleListByServiceRequest, opts ...grpc.CallOption) (*pbpeering.TrustBundleListByServiceResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 *pbpeering.TrustBundleListByServiceResponse
if rf, ok := ret.Get(0).(func(context.Context, *pbpeering.TrustBundleListByServiceRequest, ...grpc.CallOption) *pbpeering.TrustBundleListByServiceResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*pbpeering.TrustBundleListByServiceResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *pbpeering.TrustBundleListByServiceRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewMockTrustBundleLister creates a new instance of MockTrustBundleLister. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockTrustBundleLister(t testing.TB) *MockTrustBundleLister {
mock := &MockTrustBundleLister{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,51 @@
package cachetype
import (
"fmt"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/structs"
)
// Recommended name for registration.
const PeeredUpstreamsName = "peered-upstreams"
// PeeredUpstreams supports fetching imported upstream candidates of a given partition.
type PeeredUpstreams struct {
RegisterOptionsBlockingRefresh
RPC RPC
}
func (i *PeeredUpstreams) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) {
var result cache.FetchResult
reqReal, ok := req.(*structs.PartitionSpecificRequest)
if !ok {
return result, fmt.Errorf(
"Internal cache failure: request wrong type: %T", req)
}
// Lightweight copy this object so that manipulating QueryOptions doesn't race.
dup := *reqReal
reqReal = &dup
// Set the minimum query index to our current index so we block
reqReal.QueryOptions.MinQueryIndex = opts.MinIndex
reqReal.QueryOptions.MaxQueryTime = opts.Timeout
// Always allow stale - there's no point in hitting leader if the request is
// going to be served from cache and end up arbitrarily stale anyway. This
// allows cached service-discover to automatically read scale across all
// servers too.
reqReal.AllowStale = true
// Fetch
var reply structs.IndexedPeeredServiceList
if err := i.RPC.RPC("Internal.PeeredUpstreams", reqReal, &reply); err != nil {
return result, err
}
result.Value = &reply
result.Index = reply.QueryMeta.Index
return result, nil
}

View File

@ -0,0 +1,60 @@
package cachetype
import (
"testing"
"time"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/structs"
)
func TestPeeredUpstreams(t *testing.T) {
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &PeeredUpstreams{RPC: rpc}
// Expect the proper RPC call. This also sets the expected value
// since that is return-by-pointer in the arguments.
var resp *structs.IndexedPeeredServiceList
rpc.On("RPC", "Internal.PeeredUpstreams", mock.Anything, mock.Anything).Return(nil).
Run(func(args mock.Arguments) {
req := args.Get(1).(*structs.PartitionSpecificRequest)
require.Equal(t, uint64(24), req.MinQueryIndex)
require.Equal(t, 1*time.Second, req.QueryOptions.MaxQueryTime)
require.True(t, req.AllowStale)
reply := args.Get(2).(*structs.IndexedPeeredServiceList)
reply.Index = 48
resp = reply
})
// Fetch
result, err := typ.Fetch(cache.FetchOptions{
MinIndex: 24,
Timeout: 1 * time.Second,
}, &structs.PartitionSpecificRequest{
Datacenter: "dc1",
EnterpriseMeta: *acl.DefaultEnterpriseMeta(),
})
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
Value: resp,
Index: 48,
}, result)
}
func TestPeeredUpstreams_badReqType(t *testing.T) {
rpc := TestRPC(t)
defer rpc.AssertExpectations(t)
typ := &PeeredUpstreams{RPC: rpc}
// Fetch
_, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest(
t, cache.RequestInfo{Key: "foo", MinIndex: 64}))
require.Error(t, err)
require.Contains(t, err.Error(), "wrong type")
}

View File

@ -1,10 +1,9 @@
package cachetype
//go:generate mockery --all --inpackage
// RPC is an interface that an RPC client must implement. This is a helper
// interface that is implemented by the agent delegate so that Type
// implementations can request RPC access.
//go:generate mockery --name RPC --inpackage
type RPC interface {
RPC(method string, args interface{}, reply interface{}) error
}

View File

@ -0,0 +1,52 @@
package cachetype
import (
"fmt"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/structs"
)
// Recommended name for registration.
const ServiceGatewaysName = "service-gateways"
// GatewayUpstreams supports fetching upstreams for a given gateway name.
type ServiceGateways struct {
RegisterOptionsBlockingRefresh
RPC RPC
}
func (g *ServiceGateways) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) {
var result cache.FetchResult
// The request should be a ServiceSpecificRequest.
reqReal, ok := req.(*structs.ServiceSpecificRequest)
if !ok {
return result, fmt.Errorf(
"Internal cache failure: request wrong type: %T", req)
}
// Lightweight copy this object so that manipulating QueryOptions doesn't race.
dup := *reqReal
reqReal = &dup
// Set the minimum query index to our current index so we block
reqReal.QueryOptions.MinQueryIndex = opts.MinIndex
reqReal.QueryOptions.MaxQueryTime = opts.Timeout
// Always allow stale - there's no point in hitting leader if the request is
// going to be served from cache and end up arbitrarily stale anyway. This
// allows cached service-discover to automatically read scale across all
// servers too.
reqReal.AllowStale = true
// Fetch
var reply structs.IndexedCheckServiceNodes
if err := g.RPC.RPC("Internal.ServiceGateways", reqReal, &reply); err != nil {
return result, err
}
result.Value = &reply
result.Index = reply.QueryMeta.Index
return result, nil
}

View File

@ -0,0 +1,57 @@
package cachetype
import (
"testing"
"time"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/structs"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestServiceGateways(t *testing.T) {
rpc := TestRPC(t)
typ := &ServiceGateways{RPC: rpc}
// Expect the proper RPC call. This also sets the expected value
// since that is return-by-pointer in the arguments.
var resp *structs.IndexedCheckServiceNodes
rpc.On("RPC", "Internal.ServiceGateways", mock.Anything, mock.Anything).Return(nil).
Run(func(args mock.Arguments) {
req := args.Get(1).(*structs.ServiceSpecificRequest)
require.Equal(t, uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(t, 1*time.Second, req.QueryOptions.MaxQueryTime)
require.True(t, req.AllowStale)
require.Equal(t, "foo", req.ServiceName)
nodes := []structs.CheckServiceNode{
{
Service: &structs.NodeService{
Tags: req.ServiceTags,
},
},
}
reply := args.Get(2).(*structs.IndexedCheckServiceNodes)
reply.Nodes = nodes
reply.QueryMeta.Index = 48
resp = reply
})
// Fetch
resultA, err := typ.Fetch(cache.FetchOptions{
MinIndex: 24,
Timeout: 1 * time.Second,
}, &structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "foo",
})
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
Value: resp,
Index: 48,
}, resultA)
rpc.AssertExpectations(t)
}

View File

@ -4,13 +4,13 @@ import (
"reflect"
"time"
"github.com/mitchellh/go-testing-interface"
testinf "github.com/mitchellh/go-testing-interface"
"github.com/hashicorp/consul/agent/cache"
)
// TestRPC returns a mock implementation of the RPC interface.
func TestRPC(t testing.T) *MockRPC {
func TestRPC(t testinf.T) *MockRPC {
// This function is relatively useless but this allows us to perhaps
// perform some initialization later.
return &MockRPC{}
@ -21,7 +21,7 @@ func TestRPC(t testing.T) *MockRPC {
// Errors will show up as an error type on the resulting channel so a
// type switch should be used.
func TestFetchCh(
t testing.T,
t testinf.T,
typ cache.Type,
opts cache.FetchOptions,
req cache.Request,
@ -43,7 +43,7 @@ func TestFetchCh(
// TestFetchChResult tests that the result from TestFetchCh matches
// within a reasonable period of time (it expects it to be "immediate" but
// waits some milliseconds).
func TestFetchChResult(t testing.T, ch <-chan interface{}, expected interface{}) {
func TestFetchChResult(t testinf.T, ch <-chan interface{}, expected interface{}) {
t.Helper()
select {

View File

@ -3,15 +3,53 @@ package cachetype
import (
"context"
"fmt"
"strconv"
"time"
"github.com/mitchellh/hashstructure"
"google.golang.org/grpc"
"github.com/hashicorp/consul/agent/cache"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
"google.golang.org/grpc"
)
// Recommended name for registration.
const TrustBundleReadName = "peer-trust-bundle"
type TrustBundleReadRequest struct {
Request *pbpeering.TrustBundleReadRequest
structs.QueryOptions
}
func (r *TrustBundleReadRequest) CacheInfo() cache.RequestInfo {
info := cache.RequestInfo{
Token: r.Token,
Datacenter: "",
MinIndex: 0,
Timeout: 0,
MustRevalidate: false,
// OPTIMIZE(peering): Cache.notifyPollingQuery polls at this interval. We need to revisit how that polling works.
// Using an exponential backoff when the result hasn't changed may be preferable.
MaxAge: 1 * time.Second,
}
v, err := hashstructure.Hash([]interface{}{
r.Request.Partition,
r.Request.Name,
}, nil)
if err == nil {
// If there is an error, we don't set the key. A blank key forces
// no cache for this request so the request is forwarded directly
// to the server.
info.Key = strconv.FormatUint(v, 10)
}
return info
}
// TrustBundle supports fetching discovering service instances via prepared
// queries.
type TrustBundle struct {
@ -19,7 +57,7 @@ type TrustBundle struct {
Client TrustBundleReader
}
//go:generate mockery --name TrustBundleReader --inpackage --testonly
//go:generate mockery --name TrustBundleReader --inpackage --filename mock_TrustBundleReader_test.go
type TrustBundleReader interface {
TrustBundleRead(
ctx context.Context, in *pbpeering.TrustBundleReadRequest, opts ...grpc.CallOption,
@ -32,14 +70,20 @@ func (t *TrustBundle) Fetch(_ cache.FetchOptions, req cache.Request) (cache.Fetc
// The request should be a TrustBundleReadRequest.
// We do not need to make a copy of this request type like in other cache types
// because the RequestInfo is synthetic.
reqReal, ok := req.(*pbpeering.TrustBundleReadRequest)
reqReal, ok := req.(*TrustBundleReadRequest)
if !ok {
return result, fmt.Errorf(
"Internal cache failure: request wrong type: %T", req)
}
// Always allow stale - there's no point in hitting leader if the request is
// going to be served from cache and end up arbitrarily stale anyway. This
// allows cached service-discover to automatically read scale across all
// servers too.
reqReal.QueryOptions.SetAllowStale(true)
// Fetch
reply, err := t.Client.TrustBundleRead(context.Background(), reqReal)
reply, err := t.Client.TrustBundleRead(external.ContextWithToken(context.Background(), reqReal.Token), reqReal.Request)
if err != nil {
return result, err
}

View File

@ -33,8 +33,10 @@ func TestTrustBundle(t *testing.T) {
Return(resp, nil)
// Fetch and assert against the result.
result, err := typ.Fetch(cache.FetchOptions{}, &pbpeering.TrustBundleReadRequest{
result, err := typ.Fetch(cache.FetchOptions{}, &TrustBundleReadRequest{
Request: &pbpeering.TrustBundleReadRequest{
Name: "foo",
},
})
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
@ -82,7 +84,9 @@ func TestTrustBundle_MultipleUpdates(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
t.Cleanup(cancel)
err := c.Notify(ctx, TrustBundleReadName, &pbpeering.TrustBundleReadRequest{Name: "foo"}, "updates", ch)
err := c.Notify(ctx, TrustBundleReadName, &TrustBundleReadRequest{
Request: &pbpeering.TrustBundleReadRequest{Name: "foo"},
}, "updates", ch)
require.NoError(t, err)
i := uint64(1)

View File

@ -3,15 +3,55 @@ package cachetype
import (
"context"
"fmt"
"strconv"
"time"
"github.com/mitchellh/hashstructure"
"google.golang.org/grpc"
"github.com/hashicorp/consul/agent/cache"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
"google.golang.org/grpc"
)
// Recommended name for registration.
const TrustBundleListName = "trust-bundles"
type TrustBundleListRequest struct {
Request *pbpeering.TrustBundleListByServiceRequest
structs.QueryOptions
}
func (r *TrustBundleListRequest) CacheInfo() cache.RequestInfo {
info := cache.RequestInfo{
Token: r.Token,
Datacenter: "",
MinIndex: 0,
Timeout: 0,
MustRevalidate: false,
// OPTIMIZE(peering): Cache.notifyPollingQuery polls at this interval. We need to revisit how that polling works.
// Using an exponential backoff when the result hasn't changed may be preferable.
MaxAge: 1 * time.Second,
}
v, err := hashstructure.Hash([]interface{}{
r.Request.Partition,
r.Request.Namespace,
r.Request.ServiceName,
r.Request.Kind,
}, nil)
if err == nil {
// If there is an error, we don't set the key. A blank key forces
// no cache for this request so the request is forwarded directly
// to the server.
info.Key = strconv.FormatUint(v, 10)
}
return info
}
// TrustBundles supports fetching discovering service instances via prepared
// queries.
type TrustBundles struct {
@ -19,6 +59,7 @@ type TrustBundles struct {
Client TrustBundleLister
}
//go:generate mockery --name TrustBundleLister --inpackage --filename mock_TrustBundleLister_test.go
type TrustBundleLister interface {
TrustBundleListByService(
ctx context.Context, in *pbpeering.TrustBundleListByServiceRequest, opts ...grpc.CallOption,
@ -28,17 +69,23 @@ type TrustBundleLister interface {
func (t *TrustBundles) Fetch(_ cache.FetchOptions, req cache.Request) (cache.FetchResult, error) {
var result cache.FetchResult
// The request should be a TrustBundleListByServiceRequest.
// The request should be a TrustBundleListRequest.
// We do not need to make a copy of this request type like in other cache types
// because the RequestInfo is synthetic.
reqReal, ok := req.(*pbpeering.TrustBundleListByServiceRequest)
reqReal, ok := req.(*TrustBundleListRequest)
if !ok {
return result, fmt.Errorf(
"Internal cache failure: request wrong type: %T", req)
}
// Always allow stale - there's no point in hitting leader if the request is
// going to be served from cache and end up arbitrarily stale anyway. This
// allows cached service-discover to automatically read scale across all
// servers too.
reqReal.QueryOptions.SetAllowStale(true)
// Fetch
reply, err := t.Client.TrustBundleListByService(context.Background(), reqReal)
reply, err := t.Client.TrustBundleListByService(external.ContextWithToken(context.Background(), reqReal.Token), reqReal.Request)
if err != nil {
return result, err
}

View File

@ -5,11 +5,11 @@ import (
"testing"
"time"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/proto/pbpeering"
)
func TestTrustBundles(t *testing.T) {
@ -36,8 +36,10 @@ func TestTrustBundles(t *testing.T) {
Return(resp, nil)
// Fetch and assert against the result.
result, err := typ.Fetch(cache.FetchOptions{}, &pbpeering.TrustBundleListByServiceRequest{
result, err := typ.Fetch(cache.FetchOptions{}, &TrustBundleListRequest{
Request: &pbpeering.TrustBundleListByServiceRequest{
ServiceName: "foo",
},
})
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
@ -85,7 +87,9 @@ func TestTrustBundles_MultipleUpdates(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
t.Cleanup(cancel)
err := c.Notify(ctx, TrustBundleListName, &pbpeering.TrustBundleListByServiceRequest{ServiceName: "foo"}, "updates", ch)
err := c.Notify(ctx, TrustBundleListName, &TrustBundleListRequest{
Request: &pbpeering.TrustBundleListByServiceRequest{ServiceName: "foo"},
}, "updates", ch)
require.NoError(t, err)
i := uint64(1)
@ -105,48 +109,3 @@ func TestTrustBundles_MultipleUpdates(t *testing.T) {
}
}
}
// MockTrustBundleLister is an autogenerated mock type for the TrustBundleLister type
type MockTrustBundleLister struct {
mock.Mock
}
// TrustBundleListByService provides a mock function with given fields: ctx, in, opts
func (_m *MockTrustBundleLister) TrustBundleListByService(ctx context.Context, in *pbpeering.TrustBundleListByServiceRequest, opts ...grpc.CallOption) (*pbpeering.TrustBundleListByServiceResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 *pbpeering.TrustBundleListByServiceResponse
if rf, ok := ret.Get(0).(func(context.Context, *pbpeering.TrustBundleListByServiceRequest, ...grpc.CallOption) *pbpeering.TrustBundleListByServiceResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*pbpeering.TrustBundleListByServiceResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *pbpeering.TrustBundleListByServiceRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewMockTrustBundleLister creates a new instance of MockTrustBundleLister. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockTrustBundleLister(t testing.TB) *MockTrustBundleLister {
mock := &MockTrustBundleLister{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -33,8 +33,6 @@ import (
"github.com/hashicorp/consul/lib/ttlcache"
)
//go:generate mockery --all --inpackage
// TODO(kit): remove the namespace from these once the metrics themselves change
var Gauges = []prometheus.GaugeDefinition{
{

View File

@ -1,4 +1,4 @@
// Code generated by mockery v2.11.0. DO NOT EDIT.
// Code generated by mockery v2.12.2. DO NOT EDIT.
package cache
@ -27,9 +27,10 @@ func (_m *MockRequest) CacheInfo() RequestInfo {
return r0
}
// NewMockRequest creates a new instance of MockRequest. It also registers a cleanup function to assert the mocks expectations.
// NewMockRequest creates a new instance of MockRequest. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockRequest(t testing.TB) *MockRequest {
mock := &MockRequest{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })

View File

@ -1,4 +1,4 @@
// Code generated by mockery v2.11.0. DO NOT EDIT.
// Code generated by mockery v2.12.2. DO NOT EDIT.
package cache
@ -48,9 +48,10 @@ func (_m *MockType) RegisterOptions() RegisterOptions {
return r0
}
// NewMockType creates a new instance of MockType. It also registers a cleanup function to assert the mocks expectations.
// NewMockType creates a new instance of MockType. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockType(t testing.TB) *MockType {
mock := &MockType{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })

View File

@ -8,6 +8,7 @@ import (
//
// This interface is typically implemented by request structures in
// the agent/structs package.
//go:generate mockery --name Request --inpackage
type Request interface {
// CacheInfo returns information used for caching this request.
CacheInfo() RequestInfo

View File

@ -5,7 +5,7 @@ import (
"reflect"
"time"
"github.com/mitchellh/go-testing-interface"
testinf "github.com/mitchellh/go-testing-interface"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
@ -13,7 +13,7 @@ import (
// TestCacheGetCh returns a channel that returns the result of the Get call.
// This is useful for testing timing and concurrency with Get calls. Any
// error will be logged, so the result value should always be asserted.
func TestCacheGetCh(t testing.T, c *Cache, typ string, r Request) <-chan interface{} {
func TestCacheGetCh(t testinf.T, c *Cache, typ string, r Request) <-chan interface{} {
resultCh := make(chan interface{})
go func() {
result, _, err := c.Get(context.Background(), typ, r)
@ -32,7 +32,7 @@ func TestCacheGetCh(t testing.T, c *Cache, typ string, r Request) <-chan interfa
// TestCacheGetChResult tests that the result from TestCacheGetCh matches
// within a reasonable period of time (it expects it to be "immediate" but
// waits some milliseconds).
func TestCacheGetChResult(t testing.T, ch <-chan interface{}, expected interface{}) {
func TestCacheGetChResult(t testinf.T, ch <-chan interface{}, expected interface{}) {
t.Helper()
select {
@ -51,7 +51,7 @@ func TestCacheGetChResult(t testing.T, ch <-chan interface{}, expected interface
// "immediate" but waits some milliseconds). Expected may be given multiple
// times and if so these are all waited for and asserted to match but IN ANY
// ORDER to ensure we aren't timing dependent.
func TestCacheNotifyChResult(t testing.T, ch <-chan UpdateEvent, expected ...UpdateEvent) {
func TestCacheNotifyChResult(t testinf.T, ch <-chan UpdateEvent, expected ...UpdateEvent) {
t.Helper()
expectLen := len(expected)
@ -85,14 +85,14 @@ OUT:
// TestRequest returns a Request that returns the given cache key and index.
// The Reset method can be called to reset it for custom usage.
func TestRequest(t testing.T, info RequestInfo) *MockRequest {
func TestRequest(t testinf.T, info RequestInfo) *MockRequest {
req := &MockRequest{}
req.On("CacheInfo").Return(info)
return req
}
// TestType returns a MockType that sets default RegisterOptions.
func TestType(t testing.T) *MockType {
func TestType(t testinf.T) *MockType {
typ := &MockType{}
typ.On("RegisterOptions").Return(RegisterOptions{
SupportsBlocking: true,
@ -101,7 +101,7 @@ func TestType(t testing.T) *MockType {
}
// TestTypeNonBlocking returns a MockType that returns false to SupportsBlocking.
func TestTypeNonBlocking(t testing.T) *MockType {
func TestTypeNonBlocking(t testinf.T) *MockType {
typ := &MockType{}
typ.On("RegisterOptions").Return(RegisterOptions{
SupportsBlocking: false,

1
agent/cache/type.go vendored
View File

@ -5,6 +5,7 @@ import (
)
// Type implements the logic to fetch certain types of data.
//go:generate mockery --name Type --inpackage
type Type interface {
// Fetch fetches a single unique item.
//

View File

@ -20,6 +20,60 @@ import (
"github.com/hashicorp/consul/testrpc"
)
func TestCatalogRegister_PeeringRegistration(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
t.Run("deny peer registrations by default", func(t *testing.T) {
a := NewTestAgent(t, "")
defer a.Shutdown()
// Register request with peer
args := &structs.RegisterRequest{Node: "foo", PeerName: "foo", Address: "127.0.0.1"}
req, _ := http.NewRequest("PUT", "/v1/catalog/register", jsonReader(args))
obj, err := a.srv.CatalogRegister(nil, req)
require.Error(t, err)
require.Contains(t, err.Error(), "cannot register requests with PeerName in them")
require.Nil(t, obj)
})
t.Run("cannot hcl set the peer registrations config", func(t *testing.T) {
// this will have no effect, as the value is overriden in non user source
a := NewTestAgent(t, "peering = { test_allow_peer_registrations = true }")
defer a.Shutdown()
// Register request with peer
args := &structs.RegisterRequest{Node: "foo", PeerName: "foo", Address: "127.0.0.1"}
req, _ := http.NewRequest("PUT", "/v1/catalog/register", jsonReader(args))
obj, err := a.srv.CatalogRegister(nil, req)
require.Error(t, err)
require.Contains(t, err.Error(), "cannot register requests with PeerName in them")
require.Nil(t, obj)
})
t.Run("allow peer registrations with test overrides", func(t *testing.T) {
// the only way to set the config in the agent is via the overrides
a := StartTestAgent(t, TestAgent{HCL: ``, Overrides: `peering = { test_allow_peer_registrations = true }`})
defer a.Shutdown()
// Register request with peer
args := &structs.RegisterRequest{Node: "foo", PeerName: "foo", Address: "127.0.0.1"}
req, _ := http.NewRequest("PUT", "/v1/catalog/register", jsonReader(args))
obj, err := a.srv.CatalogRegister(nil, req)
require.NoError(t, err)
applied, ok := obj.(bool)
require.True(t, ok)
require.True(t, applied)
})
}
func TestCatalogRegister_Service_InvalidAddress(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")

View File

@ -915,7 +915,6 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
CirconusCheckTags: stringVal(c.Telemetry.CirconusCheckTags),
CirconusSubmissionInterval: stringVal(c.Telemetry.CirconusSubmissionInterval),
CirconusSubmissionURL: stringVal(c.Telemetry.CirconusSubmissionURL),
DisableCompatOneNine: boolValWithDefault(c.Telemetry.DisableCompatOneNine, true),
DisableHostname: boolVal(c.Telemetry.DisableHostname),
DogstatsdAddr: stringVal(c.Telemetry.DogstatsdAddr),
DogstatsdTags: c.Telemetry.DogstatsdTags,
@ -1015,6 +1014,8 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
NodeMeta: c.NodeMeta,
NodeName: b.nodeName(c.NodeName),
ReadReplica: boolVal(c.ReadReplica),
PeeringEnabled: boolVal(c.Peering.Enabled),
PeeringTestAllowPeerRegistrations: boolValWithDefault(c.Peering.TestAllowPeerRegistrations, false),
PidFile: stringVal(c.PidFile),
PrimaryDatacenter: primaryDatacenter,
PrimaryGateways: b.expandAllOptionalAddrs("primary_gateways", c.PrimaryGateways),

View File

@ -197,6 +197,7 @@ type Config struct {
NodeID *string `mapstructure:"node_id"`
NodeMeta map[string]string `mapstructure:"node_meta"`
NodeName *string `mapstructure:"node_name"`
Peering Peering `mapstructure:"peering"`
Performance Performance `mapstructure:"performance"`
PidFile *string `mapstructure:"pid_file"`
Ports Ports `mapstructure:"ports"`
@ -673,7 +674,6 @@ type Telemetry struct {
CirconusCheckTags *string `mapstructure:"circonus_check_tags"`
CirconusSubmissionInterval *string `mapstructure:"circonus_submission_interval"`
CirconusSubmissionURL *string `mapstructure:"circonus_submission_url"`
DisableCompatOneNine *bool `mapstructure:"disable_compat_1.9"`
DisableHostname *bool `mapstructure:"disable_hostname"`
DogstatsdAddr *string `mapstructure:"dogstatsd_addr"`
DogstatsdTags []string `mapstructure:"dogstatsd_tags"`
@ -888,3 +888,11 @@ type TLS struct {
// config merging logic.
GRPCModifiedByDeprecatedConfig *struct{} `mapstructure:"-"`
}
type Peering struct {
Enabled *bool `mapstructure:"enabled"`
// TestAllowPeerRegistrations controls whether CatalogRegister endpoints allow registrations for objects with `PeerName`
// This always gets overridden in NonUserSource()
TestAllowPeerRegistrations *bool `mapstructure:"test_allow_peer_registrations"`
}

View File

@ -104,6 +104,9 @@ func DefaultSource() Source {
kv_max_value_size = ` + strconv.FormatInt(raft.SuggestedMaxDataSize, 10) + `
txn_max_req_len = ` + strconv.FormatInt(raft.SuggestedMaxDataSize, 10) + `
}
peering = {
enabled = true
}
performance = {
leave_drain_time = "5s"
raft_multiplier = ` + strconv.Itoa(int(consul.DefaultRaftMultiplier)) + `
@ -204,6 +207,11 @@ func NonUserSource() Source {
# the max time before leaf certs can be generated after a roots change.
test_ca_leaf_root_change_spread = "0s"
}
peering = {
# We use peer registration for various testing
test_allow_peer_registrations = false
}
`,
}
}

View File

@ -810,6 +810,18 @@ type RuntimeConfig struct {
// flag: -non-voting-server
ReadReplica bool
// PeeringEnabled enables cluster peering. This setting only applies for servers.
// When disabled, all peering RPC endpoints will return errors,
// peering requests from other clusters will receive errors, and any peerings already stored in this server's
// state will be ignored.
//
// hcl: peering { enabled = (true|false) }
PeeringEnabled bool
// TestAllowPeerRegistrations controls whether CatalogRegister endpoints allow
// registrations for objects with `PeerName`
PeeringTestAllowPeerRegistrations bool
// PidFile is the file to store our PID in.
//
// hcl: pid_file = string

View File

@ -5548,6 +5548,16 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
"tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)",
},
})
run(t, testCase{
desc: "peering.enabled defaults to true",
args: []string{
`-data-dir=` + dataDir,
},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.PeeringEnabled = true
},
})
}
func (tc testCase) run(format string, dataDir string) func(t *testing.T) {
@ -5955,6 +5965,7 @@ func TestLoad_FullConfig(t *testing.T) {
NodeMeta: map[string]string{"5mgGQMBk": "mJLtVMSG", "A7ynFMJB": "0Nx6RGab"},
NodeName: "otlLxGaI",
ReadReplica: true,
PeeringEnabled: true,
PidFile: "43xN80Km",
PrimaryGateways: []string{"aej8eeZo", "roh2KahS"},
PrimaryGatewaysInterval: 18866 * time.Second,
@ -6303,7 +6314,6 @@ func TestLoad_FullConfig(t *testing.T) {
CirconusCheckTags: "prvO4uBl",
CirconusSubmissionInterval: "DolzaflP",
CirconusSubmissionURL: "gTcbS93G",
DisableCompatOneNine: true,
DisableHostname: true,
DogstatsdAddr: "0wSndumK",
DogstatsdTags: []string{"3N81zSUB", "Xtj8AnXZ"},

View File

@ -235,6 +235,8 @@
"NodeID": "",
"NodeMeta": {},
"NodeName": "",
"PeeringEnabled": false,
"PeeringTestAllowPeerRegistrations": false,
"PidFile": "",
"PrimaryDatacenter": "",
"PrimaryGateways": [
@ -417,7 +419,6 @@
"CirconusSubmissionInterval": "",
"CirconusSubmissionURL": "",
"Disable": false,
"DisableCompatOneNine": false,
"DisableHostname": false,
"DogstatsdAddr": "",
"DogstatsdTags": [],

View File

@ -305,6 +305,9 @@ node_meta {
node_name = "otlLxGaI"
non_voting_server = true
partition = ""
peering {
enabled = true
}
performance {
leave_drain_time = "8265s"
raft_multiplier = 5
@ -654,7 +657,6 @@ telemetry {
prometheus_retention_time = "15s"
statsd_address = "drce87cy"
statsite_address = "HpFwKB8R"
disable_compat_1.9 = true
}
tls {
defaults {

View File

@ -305,6 +305,9 @@
"node_name": "otlLxGaI",
"non_voting_server": true,
"partition": "",
"peering": {
"enabled": true
},
"performance": {
"leave_drain_time": "8265s",
"raft_multiplier": 5,
@ -650,8 +653,7 @@
"metrics_prefix": "ftO6DySn",
"prometheus_retention_time": "15s",
"statsd_address": "drce87cy",
"statsite_address": "HpFwKB8R",
"disable_compat_1.9": true
"statsite_address": "HpFwKB8R"
},
"tls": {
"defaults": {

View File

@ -15,6 +15,7 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/structs/aclfilter"
"github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/logging"
)
@ -43,10 +44,6 @@ const (
// provided.
anonymousToken = "anonymous"
// redactedToken is shown in structures with embedded tokens when they
// are not allowed to be displayed.
redactedToken = "<hidden>"
// aclTokenReapingRateLimit is the number of batch token reaping requests per second allowed.
aclTokenReapingRateLimit rate.Limit = 1.0
@ -1114,816 +1111,8 @@ func (r *ACLResolver) ResolveTokenAndDefaultMetaWithPeerName(
return result, err
}
// aclFilter is used to filter results from our state store based on ACL rules
// configured for the provided token.
type aclFilter struct {
authorizer acl.Authorizer
logger hclog.Logger
}
// newACLFilter constructs a new aclFilter.
func newACLFilter(authorizer acl.Authorizer, logger hclog.Logger) *aclFilter {
if logger == nil {
logger = hclog.New(&hclog.LoggerOptions{})
}
return &aclFilter{
authorizer: authorizer,
logger: logger,
}
}
// allowNode is used to determine if a node is accessible for an ACL.
func (f *aclFilter) allowNode(node string, ent *acl.AuthorizerContext) bool {
return f.authorizer.NodeRead(node, ent) == acl.Allow
}
// allowNode is used to determine if the gateway and service are accessible for an ACL
func (f *aclFilter) allowGateway(gs *structs.GatewayService) bool {
var authzContext acl.AuthorizerContext
// Need read on service and gateway. Gateway may have different EnterpriseMeta so we fill authzContext twice
gs.Gateway.FillAuthzContext(&authzContext)
if !f.allowService(gs.Gateway.Name, &authzContext) {
return false
}
gs.Service.FillAuthzContext(&authzContext)
if !f.allowService(gs.Service.Name, &authzContext) {
return false
}
return true
}
// allowService is used to determine if a service is accessible for an ACL.
func (f *aclFilter) allowService(service string, ent *acl.AuthorizerContext) bool {
if service == "" {
return true
}
return f.authorizer.ServiceRead(service, ent) == acl.Allow
}
// allowSession is used to determine if a session for a node is accessible for
// an ACL.
func (f *aclFilter) allowSession(node string, ent *acl.AuthorizerContext) bool {
return f.authorizer.SessionRead(node, ent) == acl.Allow
}
// filterHealthChecks is used to filter a set of health checks down based on
// the configured ACL rules for a token. Returns true if any elements were
// removed.
func (f *aclFilter) filterHealthChecks(checks *structs.HealthChecks) bool {
hc := *checks
var authzContext acl.AuthorizerContext
var removed bool
for i := 0; i < len(hc); i++ {
check := hc[i]
check.FillAuthzContext(&authzContext)
if f.allowNode(check.Node, &authzContext) && f.allowService(check.ServiceName, &authzContext) {
continue
}
f.logger.Debug("dropping check from result due to ACLs", "check", check.CheckID)
removed = true
hc = append(hc[:i], hc[i+1:]...)
i--
}
*checks = hc
return removed
}
// filterServices is used to filter a set of services based on ACLs. Returns
// true if any elements were removed.
func (f *aclFilter) filterServices(services structs.Services, entMeta *acl.EnterpriseMeta) bool {
var authzContext acl.AuthorizerContext
entMeta.FillAuthzContext(&authzContext)
var removed bool
for svc := range services {
if f.allowService(svc, &authzContext) {
continue
}
f.logger.Debug("dropping service from result due to ACLs", "service", svc)
removed = true
delete(services, svc)
}
return removed
}
// filterServiceNodes is used to filter a set of nodes for a given service
// based on the configured ACL rules. Returns true if any elements were removed.
func (f *aclFilter) filterServiceNodes(nodes *structs.ServiceNodes) bool {
sn := *nodes
var authzContext acl.AuthorizerContext
var removed bool
for i := 0; i < len(sn); i++ {
node := sn[i]
node.FillAuthzContext(&authzContext)
if f.allowNode(node.Node, &authzContext) && f.allowService(node.ServiceName, &authzContext) {
continue
}
removed = true
f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node.Node, &node.EnterpriseMeta))
sn = append(sn[:i], sn[i+1:]...)
i--
}
*nodes = sn
return removed
}
// filterNodeServices is used to filter services on a given node base on ACLs.
// Returns true if any elements were removed
func (f *aclFilter) filterNodeServices(services **structs.NodeServices) bool {
if *services == nil {
return false
}
var authzContext acl.AuthorizerContext
(*services).Node.FillAuthzContext(&authzContext)
if !f.allowNode((*services).Node.Node, &authzContext) {
*services = nil
return true
}
var removed bool
for svcName, svc := range (*services).Services {
svc.FillAuthzContext(&authzContext)
if f.allowNode((*services).Node.Node, &authzContext) && f.allowService(svcName, &authzContext) {
continue
}
f.logger.Debug("dropping service from result due to ACLs", "service", svc.CompoundServiceID())
removed = true
delete((*services).Services, svcName)
}
return removed
}
// filterNodeServices is used to filter services on a given node base on ACLs.
// Returns true if any elements were removed.
func (f *aclFilter) filterNodeServiceList(services *structs.NodeServiceList) bool {
if services.Node == nil {
return false
}
var authzContext acl.AuthorizerContext
services.Node.FillAuthzContext(&authzContext)
if !f.allowNode(services.Node.Node, &authzContext) {
*services = structs.NodeServiceList{}
return true
}
var removed bool
svcs := services.Services
for i := 0; i < len(svcs); i++ {
svc := svcs[i]
svc.FillAuthzContext(&authzContext)
if f.allowService(svc.Service, &authzContext) {
continue
}
f.logger.Debug("dropping service from result due to ACLs", "service", svc.CompoundServiceID())
svcs = append(svcs[:i], svcs[i+1:]...)
i--
removed = true
}
services.Services = svcs
return removed
}
// filterCheckServiceNodes is used to filter nodes based on ACL rules. Returns
// true if any elements were removed.
func (f *aclFilter) filterCheckServiceNodes(nodes *structs.CheckServiceNodes) bool {
csn := *nodes
var authzContext acl.AuthorizerContext
var removed bool
for i := 0; i < len(csn); i++ {
node := csn[i]
node.Service.FillAuthzContext(&authzContext)
if f.allowNode(node.Node.Node, &authzContext) && f.allowService(node.Service.Service, &authzContext) {
continue
}
f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node.Node.Node, node.Node.GetEnterpriseMeta()))
removed = true
csn = append(csn[:i], csn[i+1:]...)
i--
}
*nodes = csn
return removed
}
// filterServiceTopology is used to filter upstreams/downstreams based on ACL rules.
// this filter is unlike others in that it also returns whether the result was filtered by ACLs
func (f *aclFilter) filterServiceTopology(topology *structs.ServiceTopology) bool {
filteredUpstreams := f.filterCheckServiceNodes(&topology.Upstreams)
filteredDownstreams := f.filterCheckServiceNodes(&topology.Downstreams)
return filteredUpstreams || filteredDownstreams
}
// filterDatacenterCheckServiceNodes is used to filter nodes based on ACL rules.
// Returns true if any elements are removed.
func (f *aclFilter) filterDatacenterCheckServiceNodes(datacenterNodes *map[string]structs.CheckServiceNodes) bool {
dn := *datacenterNodes
out := make(map[string]structs.CheckServiceNodes)
var removed bool
for dc := range dn {
nodes := dn[dc]
if f.filterCheckServiceNodes(&nodes) {
removed = true
}
if len(nodes) > 0 {
out[dc] = nodes
}
}
*datacenterNodes = out
return removed
}
// filterSessions is used to filter a set of sessions based on ACLs. Returns
// true if any elements were removed.
func (f *aclFilter) filterSessions(sessions *structs.Sessions) bool {
s := *sessions
var removed bool
for i := 0; i < len(s); i++ {
session := s[i]
var entCtx acl.AuthorizerContext
session.FillAuthzContext(&entCtx)
if f.allowSession(session.Node, &entCtx) {
continue
}
removed = true
f.logger.Debug("dropping session from result due to ACLs", "session", session.ID)
s = append(s[:i], s[i+1:]...)
i--
}
*sessions = s
return removed
}
// filterCoordinates is used to filter nodes in a coordinate dump based on ACL
// rules. Returns true if any elements were removed.
func (f *aclFilter) filterCoordinates(coords *structs.Coordinates) bool {
c := *coords
var authzContext acl.AuthorizerContext
var removed bool
for i := 0; i < len(c); i++ {
c[i].FillAuthzContext(&authzContext)
node := c[i].Node
if f.allowNode(node, &authzContext) {
continue
}
f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node, c[i].GetEnterpriseMeta()))
removed = true
c = append(c[:i], c[i+1:]...)
i--
}
*coords = c
return removed
}
// filterIntentions is used to filter intentions based on ACL rules.
// We prune entries the user doesn't have access to, and we redact any tokens
// if the user doesn't have a management token. Returns true if any elements
// were removed.
func (f *aclFilter) filterIntentions(ixns *structs.Intentions) bool {
ret := make(structs.Intentions, 0, len(*ixns))
var removed bool
for _, ixn := range *ixns {
if !ixn.CanRead(f.authorizer) {
removed = true
f.logger.Debug("dropping intention from result due to ACLs", "intention", ixn.ID)
continue
}
ret = append(ret, ixn)
}
*ixns = ret
return removed
}
// filterNodeDump is used to filter through all parts of a node dump and
// remove elements the provided ACL token cannot access. Returns true if
// any elements were removed.
func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) bool {
nd := *dump
var authzContext acl.AuthorizerContext
var removed bool
for i := 0; i < len(nd); i++ {
info := nd[i]
// Filter nodes
info.FillAuthzContext(&authzContext)
if node := info.Node; !f.allowNode(node, &authzContext) {
f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node, info.GetEnterpriseMeta()))
removed = true
nd = append(nd[:i], nd[i+1:]...)
i--
continue
}
// Filter services
for j := 0; j < len(info.Services); j++ {
svc := info.Services[j].Service
info.Services[j].FillAuthzContext(&authzContext)
if f.allowNode(info.Node, &authzContext) && f.allowService(svc, &authzContext) {
continue
}
f.logger.Debug("dropping service from result due to ACLs", "service", svc)
removed = true
info.Services = append(info.Services[:j], info.Services[j+1:]...)
j--
}
// Filter checks
for j := 0; j < len(info.Checks); j++ {
chk := info.Checks[j]
chk.FillAuthzContext(&authzContext)
if f.allowNode(info.Node, &authzContext) && f.allowService(chk.ServiceName, &authzContext) {
continue
}
f.logger.Debug("dropping check from result due to ACLs", "check", chk.CheckID)
removed = true
info.Checks = append(info.Checks[:j], info.Checks[j+1:]...)
j--
}
}
*dump = nd
return removed
}
// filterServiceDump is used to filter nodes based on ACL rules. Returns true
// if any elements were removed.
func (f *aclFilter) filterServiceDump(services *structs.ServiceDump) bool {
svcs := *services
var authzContext acl.AuthorizerContext
var removed bool
for i := 0; i < len(svcs); i++ {
service := svcs[i]
if f.allowGateway(service.GatewayService) {
// ServiceDump might only have gateway config and no node information
if service.Node == nil {
continue
}
service.Service.FillAuthzContext(&authzContext)
if f.allowNode(service.Node.Node, &authzContext) {
continue
}
}
f.logger.Debug("dropping service from result due to ACLs", "service", service.GatewayService.Service)
removed = true
svcs = append(svcs[:i], svcs[i+1:]...)
i--
}
*services = svcs
return removed
}
// filterNodes is used to filter through all parts of a node list and remove
// elements the provided ACL token cannot access. Returns true if any elements
// were removed.
func (f *aclFilter) filterNodes(nodes *structs.Nodes) bool {
n := *nodes
var authzContext acl.AuthorizerContext
var removed bool
for i := 0; i < len(n); i++ {
n[i].FillAuthzContext(&authzContext)
node := n[i].Node
if f.allowNode(node, &authzContext) {
continue
}
f.logger.Debug("dropping node from result due to ACLs", "node", structs.NodeNameString(node, n[i].GetEnterpriseMeta()))
removed = true
n = append(n[:i], n[i+1:]...)
i--
}
*nodes = n
return removed
}
// redactPreparedQueryTokens will redact any tokens unless the client has a
// management token. This eases the transition to delegated authority over
// prepared queries, since it was easy to capture management tokens in Consul
// 0.6.3 and earlier, and we don't want to willy-nilly show those. This does
// have the limitation of preventing delegated non-management users from seeing
// captured tokens, but they can at least see whether or not a token is set.
func (f *aclFilter) redactPreparedQueryTokens(query **structs.PreparedQuery) {
// Management tokens can see everything with no filtering.
var authzContext acl.AuthorizerContext
structs.DefaultEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
if f.authorizer.ACLWrite(&authzContext) == acl.Allow {
return
}
// Let the user see if there's a blank token, otherwise we need
// to redact it, since we know they don't have a management
// token.
if (*query).Token != "" {
// Redact the token, using a copy of the query structure
// since we could be pointed at a live instance from the
// state store so it's not safe to modify it. Note that
// this clone will still point to things like underlying
// arrays in the original, but for modifying just the
// token it will be safe to use.
clone := *(*query)
clone.Token = redactedToken
*query = &clone
}
}
// filterPreparedQueries is used to filter prepared queries based on ACL rules.
// We prune entries the user doesn't have access to, and we redact any tokens
// if the user doesn't have a management token. Returns true if any (named)
// queries were removed - un-named queries are meant to be ephemeral and can
// only be enumerated by a management token
func (f *aclFilter) filterPreparedQueries(queries *structs.PreparedQueries) bool {
var authzContext acl.AuthorizerContext
structs.DefaultEnterpriseMetaInDefaultPartition().FillAuthzContext(&authzContext)
// Management tokens can see everything with no filtering.
// TODO is this check even necessary - this looks like a search replace from
// the 1.4 ACL rewrite. The global-management token will provide unrestricted query privileges
// so asking for ACLWrite should be unnecessary.
if f.authorizer.ACLWrite(&authzContext) == acl.Allow {
return false
}
// Otherwise, we need to see what the token has access to.
var namedQueriesRemoved bool
ret := make(structs.PreparedQueries, 0, len(*queries))
for _, query := range *queries {
// If no prefix ACL applies to this query then filter it, since
// we know at this point the user doesn't have a management
// token, otherwise see what the policy says.
prefix, hasName := query.GetACLPrefix()
switch {
case hasName && f.authorizer.PreparedQueryRead(prefix, &authzContext) != acl.Allow:
namedQueriesRemoved = true
fallthrough
case !hasName:
f.logger.Debug("dropping prepared query from result due to ACLs", "query", query.ID)
continue
}
// Redact any tokens if necessary. We make a copy of just the
// pointer so we don't mess with the caller's slice.
final := query
f.redactPreparedQueryTokens(&final)
ret = append(ret, final)
}
*queries = ret
return namedQueriesRemoved
}
func (f *aclFilter) filterToken(token **structs.ACLToken) {
var entCtx acl.AuthorizerContext
if token == nil || *token == nil || f == nil {
return
}
(*token).FillAuthzContext(&entCtx)
if f.authorizer.ACLRead(&entCtx) != acl.Allow {
// no permissions to read
*token = nil
} else if f.authorizer.ACLWrite(&entCtx) != acl.Allow {
// no write permissions - redact secret
clone := *(*token)
clone.SecretID = redactedToken
*token = &clone
}
}
func (f *aclFilter) filterTokens(tokens *structs.ACLTokens) {
ret := make(structs.ACLTokens, 0, len(*tokens))
for _, token := range *tokens {
final := token
f.filterToken(&final)
if final != nil {
ret = append(ret, final)
}
}
*tokens = ret
}
func (f *aclFilter) filterTokenStub(token **structs.ACLTokenListStub) {
var entCtx acl.AuthorizerContext
if token == nil || *token == nil || f == nil {
return
}
(*token).FillAuthzContext(&entCtx)
if f.authorizer.ACLRead(&entCtx) != acl.Allow {
*token = nil
} else if f.authorizer.ACLWrite(&entCtx) != acl.Allow {
// no write permissions - redact secret
clone := *(*token)
clone.SecretID = redactedToken
*token = &clone
}
}
func (f *aclFilter) filterTokenStubs(tokens *[]*structs.ACLTokenListStub) {
ret := make(structs.ACLTokenListStubs, 0, len(*tokens))
for _, token := range *tokens {
final := token
f.filterTokenStub(&final)
if final != nil {
ret = append(ret, final)
}
}
*tokens = ret
}
func (f *aclFilter) filterPolicy(policy **structs.ACLPolicy) {
var entCtx acl.AuthorizerContext
if policy == nil || *policy == nil || f == nil {
return
}
(*policy).FillAuthzContext(&entCtx)
if f.authorizer.ACLRead(&entCtx) != acl.Allow {
// no permissions to read
*policy = nil
}
}
func (f *aclFilter) filterPolicies(policies *structs.ACLPolicies) {
ret := make(structs.ACLPolicies, 0, len(*policies))
for _, policy := range *policies {
final := policy
f.filterPolicy(&final)
if final != nil {
ret = append(ret, final)
}
}
*policies = ret
}
func (f *aclFilter) filterRole(role **structs.ACLRole) {
var entCtx acl.AuthorizerContext
if role == nil || *role == nil || f == nil {
return
}
(*role).FillAuthzContext(&entCtx)
if f.authorizer.ACLRead(&entCtx) != acl.Allow {
// no permissions to read
*role = nil
}
}
func (f *aclFilter) filterRoles(roles *structs.ACLRoles) {
ret := make(structs.ACLRoles, 0, len(*roles))
for _, role := range *roles {
final := role
f.filterRole(&final)
if final != nil {
ret = append(ret, final)
}
}
*roles = ret
}
func (f *aclFilter) filterBindingRule(rule **structs.ACLBindingRule) {
var entCtx acl.AuthorizerContext
if rule == nil || *rule == nil || f == nil {
return
}
(*rule).FillAuthzContext(&entCtx)
if f.authorizer.ACLRead(&entCtx) != acl.Allow {
// no permissions to read
*rule = nil
}
}
func (f *aclFilter) filterBindingRules(rules *structs.ACLBindingRules) {
ret := make(structs.ACLBindingRules, 0, len(*rules))
for _, rule := range *rules {
final := rule
f.filterBindingRule(&final)
if final != nil {
ret = append(ret, final)
}
}
*rules = ret
}
func (f *aclFilter) filterAuthMethod(method **structs.ACLAuthMethod) {
var entCtx acl.AuthorizerContext
if method == nil || *method == nil || f == nil {
return
}
(*method).FillAuthzContext(&entCtx)
if f.authorizer.ACLRead(&entCtx) != acl.Allow {
// no permissions to read
*method = nil
}
}
func (f *aclFilter) filterAuthMethods(methods *structs.ACLAuthMethods) {
ret := make(structs.ACLAuthMethods, 0, len(*methods))
for _, method := range *methods {
final := method
f.filterAuthMethod(&final)
if final != nil {
ret = append(ret, final)
}
}
*methods = ret
}
func (f *aclFilter) filterServiceList(services *structs.ServiceList) bool {
ret := make(structs.ServiceList, 0, len(*services))
var removed bool
for _, svc := range *services {
var authzContext acl.AuthorizerContext
svc.FillAuthzContext(&authzContext)
if f.authorizer.ServiceRead(svc.Name, &authzContext) != acl.Allow {
removed = true
sid := structs.NewServiceID(svc.Name, &svc.EnterpriseMeta)
f.logger.Debug("dropping service from result due to ACLs", "service", sid.String())
continue
}
ret = append(ret, svc)
}
*services = ret
return removed
}
// filterGatewayServices is used to filter gateway to service mappings based on ACL rules.
// Returns true if any elements were removed.
func (f *aclFilter) filterGatewayServices(mappings *structs.GatewayServices) bool {
ret := make(structs.GatewayServices, 0, len(*mappings))
var removed bool
for _, s := range *mappings {
// This filter only checks ServiceRead on the linked service.
// ServiceRead on the gateway is checked in the GatewayServices endpoint before filtering.
var authzContext acl.AuthorizerContext
s.Service.FillAuthzContext(&authzContext)
if f.authorizer.ServiceRead(s.Service.Name, &authzContext) != acl.Allow {
f.logger.Debug("dropping service from result due to ACLs", "service", s.Service.String())
removed = true
continue
}
ret = append(ret, s)
}
*mappings = ret
return removed
}
func filterACLWithAuthorizer(logger hclog.Logger, authorizer acl.Authorizer, subj interface{}) {
if authorizer == nil {
return
}
filt := newACLFilter(authorizer, logger)
switch v := subj.(type) {
case *structs.CheckServiceNodes:
filt.filterCheckServiceNodes(v)
case *structs.IndexedCheckServiceNodes:
v.QueryMeta.ResultsFilteredByACLs = filt.filterCheckServiceNodes(&v.Nodes)
case *structs.PreparedQueryExecuteResponse:
v.QueryMeta.ResultsFilteredByACLs = filt.filterCheckServiceNodes(&v.Nodes)
case *structs.IndexedServiceTopology:
filtered := filt.filterServiceTopology(v.ServiceTopology)
if filtered {
v.FilteredByACLs = true
v.QueryMeta.ResultsFilteredByACLs = true
}
case *structs.DatacenterIndexedCheckServiceNodes:
v.QueryMeta.ResultsFilteredByACLs = filt.filterDatacenterCheckServiceNodes(&v.DatacenterNodes)
case *structs.IndexedCoordinates:
v.QueryMeta.ResultsFilteredByACLs = filt.filterCoordinates(&v.Coordinates)
case *structs.IndexedHealthChecks:
v.QueryMeta.ResultsFilteredByACLs = filt.filterHealthChecks(&v.HealthChecks)
case *structs.IndexedIntentions:
v.QueryMeta.ResultsFilteredByACLs = filt.filterIntentions(&v.Intentions)
case *structs.IndexedNodeDump:
v.QueryMeta.ResultsFilteredByACLs = filt.filterNodeDump(&v.Dump)
case *structs.IndexedServiceDump:
v.QueryMeta.ResultsFilteredByACLs = filt.filterServiceDump(&v.Dump)
case *structs.IndexedNodes:
v.QueryMeta.ResultsFilteredByACLs = filt.filterNodes(&v.Nodes)
case *structs.IndexedNodeServices:
v.QueryMeta.ResultsFilteredByACLs = filt.filterNodeServices(&v.NodeServices)
case *structs.IndexedNodeServiceList:
v.QueryMeta.ResultsFilteredByACLs = filt.filterNodeServiceList(&v.NodeServices)
case *structs.IndexedServiceNodes:
v.QueryMeta.ResultsFilteredByACLs = filt.filterServiceNodes(&v.ServiceNodes)
case *structs.IndexedServices:
v.QueryMeta.ResultsFilteredByACLs = filt.filterServices(v.Services, &v.EnterpriseMeta)
case *structs.IndexedSessions:
v.QueryMeta.ResultsFilteredByACLs = filt.filterSessions(&v.Sessions)
case *structs.IndexedPreparedQueries:
v.QueryMeta.ResultsFilteredByACLs = filt.filterPreparedQueries(&v.Queries)
case **structs.PreparedQuery:
filt.redactPreparedQueryTokens(v)
case *structs.ACLTokens:
filt.filterTokens(v)
case **structs.ACLToken:
filt.filterToken(v)
case *[]*structs.ACLTokenListStub:
filt.filterTokenStubs(v)
case **structs.ACLTokenListStub:
filt.filterTokenStub(v)
case *structs.ACLPolicies:
filt.filterPolicies(v)
case **structs.ACLPolicy:
filt.filterPolicy(v)
case *structs.ACLRoles:
filt.filterRoles(v)
case **structs.ACLRole:
filt.filterRole(v)
case *structs.ACLBindingRules:
filt.filterBindingRules(v)
case **structs.ACLBindingRule:
filt.filterBindingRule(v)
case *structs.ACLAuthMethods:
filt.filterAuthMethods(v)
case **structs.ACLAuthMethod:
filt.filterAuthMethod(v)
case *structs.IndexedServiceList:
v.QueryMeta.ResultsFilteredByACLs = filt.filterServiceList(&v.Services)
case *structs.IndexedExportedServiceList:
for peer, peerServices := range v.Services {
v.QueryMeta.ResultsFilteredByACLs = filt.filterServiceList(&peerServices)
if len(peerServices) == 0 {
delete(v.Services, peer)
} else {
v.Services[peer] = peerServices
}
}
case *structs.IndexedGatewayServices:
v.QueryMeta.ResultsFilteredByACLs = filt.filterGatewayServices(&v.Services)
case *structs.IndexedNodesWithGateways:
if filt.filterCheckServiceNodes(&v.Nodes) {
v.QueryMeta.ResultsFilteredByACLs = true
}
if filt.filterGatewayServices(&v.Gateways) {
v.QueryMeta.ResultsFilteredByACLs = true
}
default:
panic(fmt.Errorf("Unhandled type passed to ACL filter: %T %#v", subj, subj))
}
aclfilter.New(authorizer, logger).Filter(subj)
}
// filterACL uses the ACLResolver to resolve the token in an acl.Authorizer,

View File

@ -22,6 +22,7 @@ import (
"github.com/hashicorp/consul/agent/consul/authmethod"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/structs/aclfilter"
"github.com/hashicorp/consul/lib"
)
@ -291,7 +292,7 @@ func (a *ACL) TokenRead(args *structs.ACLTokenGetRequest, reply *structs.ACLToke
a.srv.filterACLWithAuthorizer(authz, &token)
// token secret was redacted
if token.SecretID == redactedToken {
if token.SecretID == aclfilter.RedactedToken {
reply.Redacted = true
}
}
@ -719,7 +720,7 @@ func (a *ACL) TokenBatchRead(args *structs.ACLTokenBatchGetRequest, reply *struc
a.srv.filterACLWithAuthorizer(authz, &final)
if final != nil {
ret = append(ret, final)
if final.SecretID == redactedToken {
if final.SecretID == aclfilter.RedactedToken {
reply.Redacted = true
}
} else {

View File

@ -20,6 +20,7 @@ import (
"github.com/hashicorp/consul/agent/consul/authmethod/kubeauth"
"github.com/hashicorp/consul/agent/consul/authmethod/testauth"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/structs/aclfilter"
"github.com/hashicorp/consul/internal/go-sso/oidcauth/oidcauthtest"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
@ -1854,7 +1855,7 @@ func TestACLEndpoint_TokenList(t *testing.T) {
}
require.ElementsMatch(t, gatherIDs(t, resp.Tokens), tokens)
for _, token := range resp.Tokens {
require.Equal(t, redactedToken, token.SecretID)
require.Equal(t, aclfilter.RedactedToken, token.SecretID)
}
})
}

View File

@ -12,6 +12,7 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/authmethod/testauth"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/structs/aclfilter"
tokenStore "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc"
@ -752,9 +753,9 @@ func TestACLReplication_TokensRedacted(t *testing.T) {
var tokenResp structs.ACLTokenResponse
req := structs.ACLTokenGetRequest{
Datacenter: "dc2",
TokenID: redactedToken,
TokenID: aclfilter.RedactedToken,
TokenIDType: structs.ACLTokenSecret,
QueryOptions: structs.QueryOptions{Token: redactedToken},
QueryOptions: structs.QueryOptions{Token: aclfilter.RedactedToken},
}
err := s2.RPC("ACL.TokenRead", &req, &tokenResp)
// its not an error for the secret to not be found.

View File

@ -5,6 +5,7 @@ import (
"fmt"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/structs/aclfilter"
)
type aclTokenReplicator struct {
@ -99,7 +100,7 @@ func (r *aclTokenReplicator) PendingUpdateEstimatedSize(i int) int {
}
func (r *aclTokenReplicator) PendingUpdateIsRedacted(i int) bool {
return r.updated[i].SecretID == redactedToken
return r.updated[i].SecretID == aclfilter.RedactedToken
}
func (r *aclTokenReplicator) UpdateLocalBatch(ctx context.Context, srv *Server, start, end int) error {

File diff suppressed because it is too large Load Diff

View File

@ -4,9 +4,9 @@ import (
"context"
"fmt"
iamauth "github.com/hashicorp/consul-awsauth"
"github.com/hashicorp/consul/agent/consul/authmethod"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/internal/iamauth"
"github.com/hashicorp/go-hclog"
)

View File

@ -8,10 +8,10 @@ import (
"testing"
"github.com/aws/aws-sdk-go/aws/credentials"
iamauth "github.com/hashicorp/consul-awsauth"
"github.com/hashicorp/consul-awsauth/iamauthtest"
"github.com/hashicorp/consul/agent/consul/authmethod"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/internal/iamauth"
"github.com/hashicorp/consul/internal/iamauth/iamauthtest"
"github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
)

View File

@ -86,7 +86,7 @@ func (s *Server) initAutopilot(config *Config) {
)
// registers a snapshot handler for the event publisher to send as the first event for a new stream
s.publisher.RegisterHandler(autopilotevents.EventTopicReadyServers, apDelegate.readyServersPublisher.HandleSnapshot)
s.publisher.RegisterHandler(autopilotevents.EventTopicReadyServers, apDelegate.readyServersPublisher.HandleSnapshot, false)
}
func (s *Server) autopilotServers() map[raft.ServerID]*autopilot.Server {

View File

@ -1,4 +1,4 @@
// Code generated by mockery v2.11.0. DO NOT EDIT.
// Code generated by mockery v2.12.2. DO NOT EDIT.
package autopilotevents
@ -19,9 +19,10 @@ func (_m *MockPublisher) Publish(_a0 []stream.Event) {
_m.Called(_a0)
}
// NewMockPublisher creates a new instance of MockPublisher. It also registers a cleanup function to assert the mocks expectations.
// NewMockPublisher creates a new instance of MockPublisher. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockPublisher(t testing.TB) *MockPublisher {
mock := &MockPublisher{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })

View File

@ -1,4 +1,4 @@
// Code generated by mockery v2.11.0. DO NOT EDIT.
// Code generated by mockery v2.12.2. DO NOT EDIT.
package autopilotevents
@ -48,9 +48,10 @@ func (_m *MockStateStore) GetNodeID(_a0 types.NodeID, _a1 *acl.EnterpriseMeta, _
return r0, r1, r2
}
// NewMockStateStore creates a new instance of MockStateStore. It also registers a cleanup function to assert the mocks expectations.
// NewMockStateStore creates a new instance of MockStateStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockStateStore(t testing.TB) *MockStateStore {
mock := &MockStateStore{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })

View File

@ -1,4 +1,4 @@
// Code generated by mockery v2.11.0. DO NOT EDIT.
// Code generated by mockery v2.12.2. DO NOT EDIT.
package autopilotevents
@ -29,9 +29,10 @@ func (_m *mockTimeProvider) Now() time.Time {
return r0
}
// newMockTimeProvider creates a new instance of mockTimeProvider. It also registers a cleanup function to assert the mocks expectations.
// newMockTimeProvider creates a new instance of mockTimeProvider. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
func newMockTimeProvider(t testing.TB) *mockTimeProvider {
mock := &mockTimeProvider{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })

View File

@ -119,17 +119,17 @@ func NewReadyServersEventPublisher(config Config) *ReadyServersEventPublisher {
}
}
//go:generate mockery --name StateStore --inpackage --testonly
//go:generate mockery --name StateStore --inpackage --filename mock_StateStore_test.go
type StateStore interface {
GetNodeID(types.NodeID, *acl.EnterpriseMeta, string) (uint64, *structs.Node, error)
}
//go:generate mockery --name Publisher --inpackage --testonly
//go:generate mockery --name Publisher --inpackage --filename mock_Publisher_test.go
type Publisher interface {
Publish([]stream.Event)
}
//go:generate mockery --name timeProvider --inpackage --testonly
//go:generate mockery --name timeProvider --inpackage --filename mock_timeProvider_test.go
type timeProvider interface {
Now() time.Time
}

View File

@ -74,9 +74,36 @@ type Catalog struct {
logger hclog.Logger
}
func hasPeerNameInRequest(req *structs.RegisterRequest) bool {
if req == nil {
return false
}
// nodes, services, checks
if req.PeerName != structs.DefaultPeerKeyword {
return true
}
if req.Service != nil && req.Service.PeerName != structs.DefaultPeerKeyword {
return true
}
if req.Check != nil && req.Check.PeerName != structs.DefaultPeerKeyword {
return true
}
for _, check := range req.Checks {
if check.PeerName != structs.DefaultPeerKeyword {
return true
}
}
return false
}
// Register a service and/or check(s) in a node, creating the node if it doesn't exist.
// It is valid to pass no service or checks to simply create the node itself.
func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error {
if !c.srv.config.PeeringTestAllowPeerRegistrations && hasPeerNameInRequest(args) {
return fmt.Errorf("cannot register requests with PeerName in them")
}
if done, err := c.srv.ForwardRPC("Catalog.Register", args, reply); done {
return err
}
@ -176,7 +203,7 @@ func servicePreApply(service *structs.NodeService, authz resolver.Result, authzC
// Verify ServiceName provided if ID.
if service.ID != "" && service.Service == "" {
return fmt.Errorf("Must provide service name with ID")
return fmt.Errorf("Must provide service name (Service.Service) when service ID is provided")
}
// Check the service address here and in the agent endpoint

View File

@ -2765,6 +2765,104 @@ node_prefix "" {
return
}
// TestCatalog_Register_DenyPeeringRegistration makes sure that users cannot send structs.RegisterRequest
// with a PeerName in any part of the request.
func TestCatalog_Register_DenyPeeringRegistration(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
_, s := testServerWithConfig(t)
codec := rpcClient(t, s)
// we will add PeerName to copies of arg
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "db",
Tags: []string{"primary"},
Port: 8000,
},
Check: &structs.HealthCheck{
CheckID: types.CheckID("db-check"),
ServiceID: "db",
},
Checks: structs.HealthChecks{
&structs.HealthCheck{
CheckID: types.CheckID("db-check"),
ServiceID: "db",
},
},
}
type testcase struct {
name string
reqCopyFn func(arg *structs.RegisterRequest) structs.RegisterRequest
}
testCases := []testcase{
{
name: "peer name on top level",
reqCopyFn: func(arg *structs.RegisterRequest) structs.RegisterRequest {
copyR := *arg
copyR.PeerName = "foo"
return copyR
},
},
{
name: "peer name in service",
reqCopyFn: func(arg *structs.RegisterRequest) structs.RegisterRequest {
copyR := *arg
copyR.Service.PeerName = "foo"
return copyR
},
},
{
name: "peer name in check",
reqCopyFn: func(arg *structs.RegisterRequest) structs.RegisterRequest {
copyR := *arg
copyR.Check.PeerName = "foo"
return copyR
},
},
{
name: "peer name in checks",
reqCopyFn: func(arg *structs.RegisterRequest) structs.RegisterRequest {
copyR := *arg
copyR.Checks[0].PeerName = "foo"
return copyR
},
},
{
name: "peer name everywhere",
reqCopyFn: func(arg *structs.RegisterRequest) structs.RegisterRequest {
copyR := *arg
copyR.PeerName = "foo1"
copyR.Service.PeerName = "foo2"
copyR.Check.PeerName = "foo3"
copyR.Checks[0].PeerName = "foo4"
return copyR
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
req := tc.reqCopyFn(&arg)
var out struct{}
err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &req, &out)
require.Error(t, err)
require.Contains(t, err.Error(), "cannot register requests with PeerName in them")
})
}
}
func TestCatalog_ListServices_FilterACL(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")

View File

@ -17,8 +17,9 @@ import (
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
grpc "github.com/hashicorp/consul/agent/grpc/private"
"github.com/hashicorp/consul/agent/grpc/private/resolver"
"github.com/hashicorp/consul/agent/consul/stream"
grpc "github.com/hashicorp/consul/agent/grpc-internal"
"github.com/hashicorp/consul/agent/grpc-internal/resolver"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/router"
"github.com/hashicorp/consul/agent/rpc/middleware"
@ -535,6 +536,7 @@ func newDefaultDeps(t *testing.T, c *Config) Deps {
}
return Deps{
EventPublisher: stream.NewEventPublisher(10 * time.Second),
Logger: logger,
TLSConfigurator: tls,
Tokens: new(token.Store),

View File

@ -130,6 +130,9 @@ type Config struct {
// RPCSrcAddr is the source address for outgoing RPC connections.
RPCSrcAddr *net.TCPAddr
// GRPCPort is the port the public gRPC server listens on.
GRPCPort int
// (Enterprise-only) The network segment this agent is part of.
Segment string
@ -393,6 +396,11 @@ type Config struct {
RaftBoltDBConfig RaftBoltDBConfig
// PeeringEnabled enables cluster peering.
PeeringEnabled bool
PeeringTestAllowPeerRegistrations bool
// Embedded Consul Enterprise specific configuration
*EnterpriseConfig
}
@ -509,6 +517,9 @@ func DefaultConfig() *Config {
DefaultQueryTime: 300 * time.Second,
MaxQueryTime: 600 * time.Second,
PeeringEnabled: true,
PeeringTestAllowPeerRegistrations: false,
EnterpriseConfig: DefaultEnterpriseConfig(),
}

View File

@ -1141,7 +1141,7 @@ func TestConfigEntry_ResolveServiceConfig_TransparentProxy(t *testing.T) {
Name: "foo",
Mode: structs.ProxyModeTransparent,
Destination: &structs.DestinationConfig{
Address: "hello.world.com",
Addresses: []string{"hello.world.com"},
Port: 443,
},
},
@ -1153,7 +1153,7 @@ func TestConfigEntry_ResolveServiceConfig_TransparentProxy(t *testing.T) {
expect: structs.ServiceConfigResponse{
Mode: structs.ProxyModeTransparent,
Destination: structs.DestinationConfig{
Address: "hello.world.com",
Addresses: []string{"hello.world.com"},
Port: 443,
},
},

View File

@ -141,6 +141,7 @@ func init() {
registerCommand(structs.PeeringTerminateByIDType, (*FSM).applyPeeringTerminate)
registerCommand(structs.PeeringTrustBundleWriteType, (*FSM).applyPeeringTrustBundleWrite)
registerCommand(structs.PeeringTrustBundleDeleteType, (*FSM).applyPeeringTrustBundleDelete)
registerCommand(structs.PeeringSecretsWriteType, (*FSM).applyPeeringSecretsWrite)
}
func (c *FSM) applyRegister(buf []byte, index uint64) interface{} {
@ -699,11 +700,9 @@ func (c *FSM) applyPeeringWrite(buf []byte, index uint64) interface{} {
defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering"}, time.Now(),
[]metrics.Label{{Name: "op", Value: "write"}})
return c.state.PeeringWrite(index, req.Peering)
return c.state.PeeringWrite(index, &req)
}
// TODO(peering): replace with deferred deletion since this operation
// should involve cleanup of data associated with the peering.
func (c *FSM) applyPeeringDelete(buf []byte, index uint64) interface{} {
var req pbpeering.PeeringDeleteRequest
if err := structs.DecodeProto(buf, &req); err != nil {
@ -720,6 +719,18 @@ func (c *FSM) applyPeeringDelete(buf []byte, index uint64) interface{} {
return c.state.PeeringDelete(index, q)
}
func (c *FSM) applyPeeringSecretsWrite(buf []byte, index uint64) interface{} {
var req pbpeering.PeeringSecrets
if err := structs.DecodeProto(buf, &req); err != nil {
panic(fmt.Errorf("failed to decode peering write request: %v", err))
}
defer metrics.MeasureSinceWithLabels([]string{"fsm", "peering_secrets"}, time.Now(),
[]metrics.Label{{Name: "op", Value: "write"}})
return c.state.PeeringSecretsWrite(index, &req)
}
func (c *FSM) applyPeeringTerminate(buf []byte, index uint64) interface{} {
var req pbpeering.PeeringTerminateByIDRequest
if err := structs.DecodeProto(buf, &req); err != nil {

View File

@ -6,11 +6,12 @@ import (
"sync"
"time"
"github.com/hashicorp/consul-net-rpc/go-msgpack/codec"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-raftchunking"
"github.com/hashicorp/raft"
"github.com/hashicorp/consul-net-rpc/go-msgpack/codec"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/structs"
@ -277,21 +278,56 @@ func (c *FSM) registerStreamSnapshotHandlers() {
err := c.deps.Publisher.RegisterHandler(state.EventTopicServiceHealth, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
return c.State().ServiceHealthSnapshot(req, buf)
})
}, false)
if err != nil {
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
}
err = c.deps.Publisher.RegisterHandler(state.EventTopicServiceHealthConnect, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
return c.State().ServiceHealthSnapshot(req, buf)
})
}, false)
if err != nil {
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
}
err = c.deps.Publisher.RegisterHandler(state.EventTopicCARoots, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
return c.State().CARootsSnapshot(req, buf)
})
}, false)
if err != nil {
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
}
err = c.deps.Publisher.RegisterHandler(state.EventTopicMeshConfig, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
return c.State().MeshConfigSnapshot(req, buf)
}, true)
if err != nil {
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
}
err = c.deps.Publisher.RegisterHandler(state.EventTopicServiceResolver, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
return c.State().ServiceResolverSnapshot(req, buf)
}, true)
if err != nil {
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
}
err = c.deps.Publisher.RegisterHandler(state.EventTopicIngressGateway, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
return c.State().IngressGatewaySnapshot(req, buf)
}, true)
if err != nil {
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
}
err = c.deps.Publisher.RegisterHandler(state.EventTopicServiceIntentions, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
return c.State().ServiceIntentionsSnapshot(req, buf)
}, true)
if err != nil {
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
}
err = c.deps.Publisher.RegisterHandler(state.EventTopicServiceList, func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (uint64, error) {
return c.State().ServiceListSnapshot(req, buf)
}, true)
if err != nil {
panic(fmt.Errorf("fatal error encountered registering streaming snapshot handlers: %w", err))
}

View File

@ -477,9 +477,11 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) {
}
// Peerings
require.NoError(t, fsm.state.PeeringWrite(31, &pbpeering.Peering{
require.NoError(t, fsm.state.PeeringWrite(31, &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: "1fabcd52-1d46-49b0-b1d8-71559aee47f5",
Name: "baz",
},
}))
// Peering Trust Bundles

View File

@ -9,7 +9,7 @@ import (
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/authmethod/testauth"
"github.com/hashicorp/consul/agent/grpc/public"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/structs"
tokenStore "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/proto-public/pbacl"
@ -26,7 +26,7 @@ func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) {
// correctly wiring everything up in the server by:
//
// * Starting a cluster with multiple servers.
// * Making a request to a follower's public gRPC port.
// * Making a request to a follower's external gRPC port.
// * Ensuring that the request is correctly forwarded to the leader.
// * Ensuring we get a valid certificate back (so it went through the CAManager).
server1, conn1, _ := testGRPCIntegrationServer(t, func(c *Config) {
@ -59,7 +59,7 @@ func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
t.Cleanup(cancel)
ctx = public.ContextWithToken(ctx, TestDefaultInitialManagementToken)
ctx = external.ContextWithToken(ctx, TestDefaultInitialManagementToken)
// This would fail if it wasn't forwarded to the leader.
rsp, err := client.Sign(ctx, &pbconnectca.SignRequest{
@ -96,7 +96,7 @@ func TestGRPCIntegration_ServerDiscovery_WatchServers(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
t.Cleanup(cancel)
ctx = public.ContextWithToken(ctx, TestDefaultInitialManagementToken)
ctx = external.ContextWithToken(ctx, TestDefaultInitialManagementToken)
serverStream, err := client.WatchServers(ctx, &pbserverdiscovery.WatchServersRequest{Wan: false})
require.NoError(t, err)

View File

@ -558,7 +558,9 @@ func TestHealth_ServiceNodes(t *testing.T) {
}
t.Parallel()
_, s1 := testServer(t)
_, s1 := testServerWithConfig(t, func(config *Config) {
config.PeeringTestAllowPeerRegistrations = true
})
codec := rpcClient(t, s1)
waitForLeaderEstablishment(t, s1)

View File

@ -14,6 +14,7 @@ import (
"github.com/hashicorp/consul-net-rpc/net/rpc"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
@ -1209,6 +1210,104 @@ func registerTestRoutingConfigTopologyEntries(t *testing.T, codec rpc.ClientCode
}
}
func registerLocalAndRemoteServicesVIPEnabled(t *testing.T, state *state.Store) {
t.Helper()
retry.Run(t, func(r *retry.R) {
_, entry, err := state.SystemMetadataGet(nil, structs.SystemMetadataVirtualIPsEnabled)
require.NoError(r, err)
require.NotNil(r, entry)
require.Equal(r, "true", entry.Value)
})
// Register a local connect-native service
require.NoError(t, state.EnsureRegistration(10, &structs.RegisterRequest{
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "api",
Connect: structs.ServiceConnect{
Native: true,
},
},
}))
// Should be assigned VIP
psn := structs.PeeredServiceName{ServiceName: structs.NewServiceName("api", nil)}
vip, err := state.VirtualIPForService(psn)
require.NoError(t, err)
require.Equal(t, "240.0.0.1", vip)
// Register an imported service and its proxy
require.NoError(t, state.EnsureRegistration(11, &structs.RegisterRequest{
Node: "bar",
SkipNodeUpdate: true,
Service: &structs.NodeService{
Kind: structs.ServiceKindTypical,
Service: "web",
ID: "web-1",
},
PeerName: "peer-a",
}))
require.NoError(t, state.EnsureRegistration(12, &structs.RegisterRequest{
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
ID: "web-proxy",
Service: "web-proxy",
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
},
},
PeerName: "peer-a",
}))
// Should be assigned one VIP for the real service name
psn = structs.PeeredServiceName{Peer: "peer-a", ServiceName: structs.NewServiceName("web", nil)}
vip, err = state.VirtualIPForService(psn)
require.NoError(t, err)
require.Equal(t, "240.0.0.2", vip)
// web-proxy should not have a VIP
psn = structs.PeeredServiceName{Peer: "peer-a", ServiceName: structs.NewServiceName("web-proxy", nil)}
vip, err = state.VirtualIPForService(psn)
require.NoError(t, err)
require.Empty(t, vip)
// Register an imported service and its proxy from another peer
require.NoError(t, state.EnsureRegistration(11, &structs.RegisterRequest{
Node: "gir",
SkipNodeUpdate: true,
Service: &structs.NodeService{
Kind: structs.ServiceKindTypical,
Service: "web",
ID: "web-1",
},
PeerName: "peer-b",
}))
require.NoError(t, state.EnsureRegistration(12, &structs.RegisterRequest{
Node: "gir",
Address: "127.0.0.3",
Service: &structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
ID: "web-proxy",
Service: "web-proxy",
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
},
},
PeerName: "peer-b",
}))
// Should be assigned one VIP for the real service name
psn = structs.PeeredServiceName{Peer: "peer-b", ServiceName: structs.NewServiceName("web", nil)}
vip, err = state.VirtualIPForService(psn)
require.NoError(t, err)
require.Equal(t, "240.0.0.3", vip)
// web-proxy should not have a VIP
psn = structs.PeeredServiceName{Peer: "peer-b", ServiceName: structs.NewServiceName("web-proxy", nil)}
vip, err = state.VirtualIPForService(psn)
require.NoError(t, err)
require.Empty(t, vip)
}
func registerIntentionUpstreamEntries(t *testing.T, codec rpc.ClientCodec, token string) {
t.Helper()
@ -1365,7 +1464,7 @@ func registerIntentionUpstreamEntries(t *testing.T, codec rpc.ClientCodec, token
Kind: structs.ServiceDefaults,
Name: "api.example.com",
Destination: &structs.DestinationConfig{
Address: "api.example.com",
Addresses: []string{"api.example.com"},
Port: 443,
},
},
@ -1377,7 +1476,7 @@ func registerIntentionUpstreamEntries(t *testing.T, codec rpc.ClientCodec, token
Kind: structs.ServiceDefaults,
Name: "kafka.store.com",
Destination: &structs.DestinationConfig{
Address: "172.168.2.1",
Addresses: []string{"172.168.2.1"},
Port: 9003,
},
},

View File

@ -292,7 +292,7 @@ func (m *Internal) ServiceTopology(args *structs.ServiceSpecificRequest, reply *
})
}
// IntentionUpstreams returns the upstreams of a service. Upstreams are inferred from intentions.
// IntentionUpstreams returns a service's upstreams which are inferred from intentions.
// If intentions allow a connection from the target to some candidate service, the candidate service is considered
// an upstream of the target.
func (m *Internal) IntentionUpstreams(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceList) error {
@ -309,9 +309,9 @@ func (m *Internal) IntentionUpstreams(args *structs.ServiceSpecificRequest, repl
return m.internalUpstreams(args, reply, structs.IntentionTargetService)
}
// IntentionUpstreamsDestination returns the upstreams of a service. Upstreams are inferred from intentions.
// IntentionUpstreamsDestination returns a service's upstreams which are inferred from intentions.
// If intentions allow a connection from the target to some candidate destination, the candidate destination is considered
// an upstream of the target.this is performs the same logic as IntentionUpstreams endpoint but for destination upstreams only.
// an upstream of the target. This performs the same logic as IntentionUpstreams endpoint but for destination upstreams only.
func (m *Internal) IntentionUpstreamsDestination(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceList) error {
// Exit early if Connect hasn't been enabled.
if !m.srv.config.ConnectEnabled {
@ -453,6 +453,56 @@ func (m *Internal) GatewayServiceDump(args *structs.ServiceSpecificRequest, repl
return err
}
// ServiceGateways returns all the nodes for services associated with a gateway along with their gateway config
func (m *Internal) ServiceGateways(args *structs.ServiceSpecificRequest, reply *structs.IndexedCheckServiceNodes) error {
if done, err := m.srv.ForwardRPC("Internal.ServiceGateways", args, reply); done {
return err
}
// Verify the arguments
if args.ServiceName == "" {
return fmt.Errorf("Must provide gateway name")
}
var authzContext acl.AuthorizerContext
authz, err := m.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzContext)
if err != nil {
return err
}
if err := m.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
// We need read access to the service we're trying to find gateways for, so check that first.
if err := authz.ToAllowAuthorizer().ServiceReadAllowed(args.ServiceName, &authzContext); err != nil {
return err
}
err = m.srv.blockingQuery(
&args.QueryOptions,
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
var maxIdx uint64
idx, gateways, err := state.ServiceGateways(ws, args.ServiceName, args.ServiceKind, args.EnterpriseMeta)
if err != nil {
return err
}
if idx > maxIdx {
maxIdx = idx
}
reply.Index, reply.Nodes = maxIdx, gateways
if err := m.srv.filterACL(args.Token, reply); err != nil {
return err
}
return nil
})
return err
}
// GatewayIntentions Match returns the set of intentions that match the given source/destination.
func (m *Internal) GatewayIntentions(args *structs.IntentionQueryRequest, reply *structs.IndexedIntentions) error {
// Forward if necessary
@ -545,17 +595,15 @@ func (m *Internal) ExportedPeeredServices(args *structs.DCSpecificRequest, reply
return err
}
authz, err := m.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, nil)
var authzCtx acl.AuthorizerContext
authz, err := m.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzCtx)
if err != nil {
return err
}
if err := m.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
// TODO(peering): acls: mesh gateway needs appropriate wildcard service:read
return m.srv.blockingQuery(
&args.QueryOptions,
&reply.QueryMeta,
@ -571,6 +619,49 @@ func (m *Internal) ExportedPeeredServices(args *structs.DCSpecificRequest, reply
})
}
// PeeredUpstreams returns all imported services as upstreams for any service in a given partition.
// Cluster peering does not replicate intentions so all imported services are considered potential upstreams.
func (m *Internal) PeeredUpstreams(args *structs.PartitionSpecificRequest, reply *structs.IndexedPeeredServiceList) error {
// Exit early if Connect hasn't been enabled.
if !m.srv.config.ConnectEnabled {
return ErrConnectNotEnabled
}
if done, err := m.srv.ForwardRPC("Internal.PeeredUpstreams", args, reply); done {
return err
}
var authzCtx acl.AuthorizerContext
authz, err := m.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, &authzCtx)
if err != nil {
return err
}
if err := authz.ToAllowAuthorizer().ServiceWriteAnyAllowed(&authzCtx); err != nil {
return err
}
if err := m.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
return err
}
return m.srv.blockingQuery(
&args.QueryOptions,
&reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error {
index, vips, err := state.VirtualIPsForAllImportedServices(ws, args.EnterpriseMeta)
if err != nil {
return err
}
result := make([]structs.PeeredServiceName, 0, len(vips))
for _, vip := range vips {
result = append(result, vip.Service)
}
reply.Index, reply.Services = index, result
return nil
})
}
// EventFire is a bit of an odd endpoint, but it allows for a cross-DC RPC
// call to fire an event. The primary use case is to enable user events being
// triggered in a remote DC.

View File

@ -3,6 +3,7 @@ package consul
import (
"encoding/base64"
"fmt"
"math/rand"
"os"
"strings"
"testing"
@ -31,7 +32,9 @@ func TestInternal_NodeInfo(t *testing.T) {
}
t.Parallel()
_, s1 := testServer(t)
_, s1 := testServerWithConfig(t, func(config *Config) {
config.PeeringTestAllowPeerRegistrations = true
})
codec := rpcClient(t, s1)
testrpc.WaitForLeader(t, s1.RPC, "dc1")
@ -112,7 +115,9 @@ func TestInternal_NodeDump(t *testing.T) {
}
t.Parallel()
_, s1 := testServer(t)
_, s1 := testServerWithConfig(t, func(config *Config) {
config.PeeringTestAllowPeerRegistrations = true
})
codec := rpcClient(t, s1)
testrpc.WaitForLeader(t, s1.RPC, "dc1")
@ -161,9 +166,11 @@ func TestInternal_NodeDump(t *testing.T) {
require.NoError(t, err)
}
err := s1.fsm.State().PeeringWrite(1, &pbpeering.Peering{
err := s1.fsm.State().PeeringWrite(1, &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
Name: "peer1",
},
})
require.NoError(t, err)
@ -220,7 +227,9 @@ func TestInternal_NodeDump_Filter(t *testing.T) {
}
t.Parallel()
_, s1 := testServer(t)
_, s1 := testServerWithConfig(t, func(config *Config) {
config.PeeringTestAllowPeerRegistrations = true
})
codec := rpcClient(t, s1)
testrpc.WaitForLeader(t, s1.RPC, "dc1")
@ -269,9 +278,11 @@ func TestInternal_NodeDump_Filter(t *testing.T) {
require.NoError(t, err)
}
err := s1.fsm.State().PeeringWrite(1, &pbpeering.Peering{
err := s1.fsm.State().PeeringWrite(1, &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
Name: "peer1",
},
})
require.NoError(t, err)
@ -1755,7 +1766,9 @@ func TestInternal_ServiceDump_Peering(t *testing.T) {
}
t.Parallel()
_, s1 := testServer(t)
_, s1 := testServerWithConfig(t, func(config *Config) {
config.PeeringTestAllowPeerRegistrations = true
})
codec := rpcClient(t, s1)
testrpc.WaitForLeader(t, s1.RPC, "dc1")
@ -1784,9 +1797,11 @@ func TestInternal_ServiceDump_Peering(t *testing.T) {
addPeerService(t, codec)
err := s1.fsm.State().PeeringWrite(1, &pbpeering.Peering{
err := s1.fsm.State().PeeringWrite(1, &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: "9e650110-ac74-4c5a-a6a8-9348b2bed4e9",
Name: "peer1",
},
})
require.NoError(t, err)
@ -2776,3 +2791,712 @@ func TestInternal_CatalogOverview_ACLDeny(t *testing.T) {
arg.Token = opReadToken.SecretID
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.CatalogOverview", &arg, &out))
}
func TestInternal_PeeredUpstreams(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
orig := virtualIPVersionCheckInterval
virtualIPVersionCheckInterval = 50 * time.Millisecond
t.Cleanup(func() { virtualIPVersionCheckInterval = orig })
t.Parallel()
_, s1 := testServerWithConfig(t)
testrpc.WaitForLeader(t, s1.RPC, "dc1")
// Services
// api local
// web peer: peer-a
// web-proxy peer: peer-a
// web peer: peer-b
// web-proxy peer: peer-b
registerLocalAndRemoteServicesVIPEnabled(t, s1.fsm.State())
codec := rpcClient(t, s1)
args := structs.PartitionSpecificRequest{
Datacenter: "dc1",
EnterpriseMeta: *acl.DefaultEnterpriseMeta(),
}
var out structs.IndexedPeeredServiceList
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.PeeredUpstreams", &args, &out))
require.Len(t, out.Services, 2)
expect := []structs.PeeredServiceName{
{Peer: "peer-a", ServiceName: structs.NewServiceName("web", structs.DefaultEnterpriseMetaInDefaultPartition())},
{Peer: "peer-b", ServiceName: structs.NewServiceName("web", structs.DefaultEnterpriseMetaInDefaultPartition())},
}
require.Equal(t, expect, out.Services)
}
func TestInternal_ServiceGatewayService_Terminating(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
db := structs.NodeService{
ID: "db2",
Service: "db",
}
redis := structs.NodeService{
ID: "redis",
Service: "redis",
}
// Register gateway and two service instances that will be associated with it
{
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "10.1.2.2",
Service: &structs.NodeService{
ID: "terminating-gateway-01",
Service: "terminating-gateway",
Kind: structs.ServiceKindTerminatingGateway,
Port: 443,
Address: "198.18.1.3",
},
Check: &structs.HealthCheck{
Name: "terminating connect",
Status: api.HealthPassing,
ServiceID: "terminating-gateway-01",
},
}
var out struct{}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
arg = structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
ID: "db",
Service: "db",
},
Check: &structs.HealthCheck{
Name: "db-warning",
Status: api.HealthWarning,
ServiceID: "db",
},
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
arg = structs.RegisterRequest{
Datacenter: "dc1",
Node: "baz",
Address: "127.0.0.3",
Service: &db,
Check: &structs.HealthCheck{
Name: "db2-passing",
Status: api.HealthPassing,
ServiceID: "db2",
},
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
}
// Register terminating-gateway config entry, linking it to db and redis (dne)
{
args := &structs.TerminatingGatewayConfigEntry{
Name: "terminating-gateway",
Kind: structs.TerminatingGateway,
Services: []structs.LinkedService{
{
Name: "db",
},
{
Name: "redis",
CAFile: "/etc/certs/ca.pem",
CertFile: "/etc/certs/cert.pem",
KeyFile: "/etc/certs/key.pem",
},
},
}
req := structs.ConfigEntryRequest{
Op: structs.ConfigEntryUpsert,
Datacenter: "dc1",
Entry: args,
}
var configOutput bool
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &configOutput))
require.True(t, configOutput)
}
var out structs.IndexedCheckServiceNodes
req := structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "db",
ServiceKind: structs.ServiceKindTerminatingGateway,
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.ServiceGateways", &req, &out))
for _, n := range out.Nodes {
n.Node.RaftIndex = structs.RaftIndex{}
n.Service.RaftIndex = structs.RaftIndex{}
for _, m := range n.Checks {
m.RaftIndex = structs.RaftIndex{}
}
}
expect := structs.CheckServiceNodes{
structs.CheckServiceNode{
Node: &structs.Node{
Node: "foo",
RaftIndex: structs.RaftIndex{},
Address: "10.1.2.2",
Datacenter: "dc1",
Partition: acl.DefaultPartitionName,
},
Service: &structs.NodeService{
Kind: structs.ServiceKindTerminatingGateway,
ID: "terminating-gateway-01",
Service: "terminating-gateway",
TaggedAddresses: map[string]structs.ServiceAddress{
"consul-virtual:" + db.CompoundServiceName().String(): {Address: "240.0.0.1"},
"consul-virtual:" + redis.CompoundServiceName().String(): {Address: "240.0.0.2"},
},
Weights: &structs.Weights{Passing: 1, Warning: 1},
Port: 443,
Tags: []string{},
Meta: map[string]string{},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
RaftIndex: structs.RaftIndex{},
Address: "198.18.1.3",
},
Checks: structs.HealthChecks{
&structs.HealthCheck{
Name: "terminating connect",
Node: "foo",
CheckID: "terminating connect",
Status: api.HealthPassing,
ServiceID: "terminating-gateway-01",
ServiceName: "terminating-gateway",
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
},
},
},
}
assert.Equal(t, expect, out.Nodes)
}
func TestInternal_ServiceGatewayService_Terminating_ACL(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
dir1, s1 := testServerWithConfig(t, func(c *Config) {
c.PrimaryDatacenter = "dc1"
c.ACLsEnabled = true
c.ACLInitialManagementToken = "root"
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
})
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root"))
// Create the ACL.
token, err := upsertTestTokenWithPolicyRules(codec, "root", "dc1", `
service "db" { policy = "read" }
service "terminating-gateway" { policy = "read" }
node_prefix "" { policy = "read" }`)
require.NoError(t, err)
// Register gateway and two service instances that will be associated with it
{
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "terminating-gateway",
Service: "terminating-gateway",
Kind: structs.ServiceKindTerminatingGateway,
Port: 443,
},
Check: &structs.HealthCheck{
Name: "terminating connect",
Status: api.HealthPassing,
ServiceID: "terminating-gateway",
},
WriteRequest: structs.WriteRequest{Token: "root"},
}
var out struct{}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
{
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "terminating-gateway2",
Service: "terminating-gateway2",
Kind: structs.ServiceKindTerminatingGateway,
Port: 444,
},
Check: &structs.HealthCheck{
Name: "terminating connect",
Status: api.HealthPassing,
ServiceID: "terminating-gateway2",
},
WriteRequest: structs.WriteRequest{Token: "root"},
}
var out struct{}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
}
arg = structs.RegisterRequest{
Datacenter: "dc1",
Node: "bar",
Address: "127.0.0.2",
Service: &structs.NodeService{
ID: "db",
Service: "db",
},
Check: &structs.HealthCheck{
Name: "db-warning",
Status: api.HealthWarning,
ServiceID: "db",
},
WriteRequest: structs.WriteRequest{Token: "root"},
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
arg = structs.RegisterRequest{
Datacenter: "dc1",
Node: "baz",
Address: "127.0.0.3",
Service: &structs.NodeService{
ID: "api",
Service: "api",
},
Check: &structs.HealthCheck{
Name: "api-passing",
Status: api.HealthPassing,
ServiceID: "api",
},
WriteRequest: structs.WriteRequest{Token: "root"},
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
}
// Register terminating-gateway config entry, linking it to db and api
{
args := &structs.TerminatingGatewayConfigEntry{
Name: "terminating-gateway",
Kind: structs.TerminatingGateway,
Services: []structs.LinkedService{
{Name: "db"},
{Name: "api"},
},
}
req := structs.ConfigEntryRequest{
Op: structs.ConfigEntryUpsert,
Datacenter: "dc1",
Entry: args,
WriteRequest: structs.WriteRequest{Token: "root"},
}
var out bool
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &out))
require.True(t, out)
}
// Register terminating-gateway config entry, linking it to db and api
{
args := &structs.TerminatingGatewayConfigEntry{
Name: "terminating-gateway2",
Kind: structs.TerminatingGateway,
Services: []structs.LinkedService{
{Name: "db"},
{Name: "api"},
},
}
req := structs.ConfigEntryRequest{
Op: structs.ConfigEntryUpsert,
Datacenter: "dc1",
Entry: args,
WriteRequest: structs.WriteRequest{Token: "root"},
}
var out bool
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &out))
require.True(t, out)
}
var out structs.IndexedCheckServiceNodes
// Not passing a token with service:read on Gateway leads to PermissionDenied
req := structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "db",
ServiceKind: structs.ServiceKindTerminatingGateway,
}
err = msgpackrpc.CallWithCodec(codec, "Internal.ServiceGateways", &req, &out)
require.Error(t, err, acl.ErrPermissionDenied)
// Passing a token without service:read on api leads to it getting filtered out
req = structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "db",
ServiceKind: structs.ServiceKindTerminatingGateway,
QueryOptions: structs.QueryOptions{Token: token.SecretID},
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.ServiceGateways", &req, &out))
nodes := out.Nodes
require.Len(t, nodes, 1)
require.Equal(t, "foo", nodes[0].Node.Node)
require.Equal(t, structs.ServiceKindTerminatingGateway, nodes[0].Service.Kind)
require.Equal(t, "terminating-gateway", nodes[0].Service.Service)
require.Equal(t, "terminating-gateway", nodes[0].Service.ID)
require.True(t, out.QueryMeta.ResultsFilteredByACLs, "ResultsFilteredByACLs should be true")
}
func TestInternal_ServiceGatewayService_Terminating_Destination(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
dir1, s1 := testServer(t)
defer os.RemoveAll(dir1)
defer s1.Shutdown()
codec := rpcClient(t, s1)
defer codec.Close()
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
google := structs.NodeService{
ID: "google",
Service: "google",
}
// Register service-default with conflicting destination address
{
arg := structs.ConfigEntryRequest{
Op: structs.ConfigEntryUpsert,
Datacenter: "dc1",
Entry: &structs.ServiceConfigEntry{
Name: "google",
Destination: &structs.DestinationConfig{Addresses: []string{"www.google.com"}, Port: 443},
EnterpriseMeta: *acl.DefaultEnterpriseMeta(),
},
}
var configOutput bool
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &arg, &configOutput))
require.True(t, configOutput)
}
// Register terminating-gateway config entry, linking it to google.com
{
arg := structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
ID: "terminating-gateway",
Service: "terminating-gateway",
Kind: structs.ServiceKindTerminatingGateway,
Port: 443,
},
Check: &structs.HealthCheck{
Name: "terminating connect",
Status: api.HealthPassing,
ServiceID: "terminating-gateway",
},
}
var out struct{}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &arg, &out))
}
{
args := &structs.TerminatingGatewayConfigEntry{
Name: "terminating-gateway",
Kind: structs.TerminatingGateway,
Services: []structs.LinkedService{
{
Name: "google",
},
},
}
req := structs.ConfigEntryRequest{
Op: structs.ConfigEntryUpsert,
Datacenter: "dc1",
Entry: args,
}
var configOutput bool
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &configOutput))
require.True(t, configOutput)
}
var out structs.IndexedCheckServiceNodes
req := structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "google",
ServiceKind: structs.ServiceKindTerminatingGateway,
}
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Internal.ServiceGateways", &req, &out))
nodes := out.Nodes
for _, n := range nodes {
n.Node.RaftIndex = structs.RaftIndex{}
n.Service.RaftIndex = structs.RaftIndex{}
for _, m := range n.Checks {
m.RaftIndex = structs.RaftIndex{}
}
}
expect := structs.CheckServiceNodes{
structs.CheckServiceNode{
Node: &structs.Node{
Node: "foo",
RaftIndex: structs.RaftIndex{},
Address: "127.0.0.1",
Datacenter: "dc1",
Partition: acl.DefaultPartitionName,
},
Service: &structs.NodeService{
Kind: structs.ServiceKindTerminatingGateway,
ID: "terminating-gateway",
Service: "terminating-gateway",
Weights: &structs.Weights{Passing: 1, Warning: 1},
Port: 443,
Tags: []string{},
Meta: map[string]string{},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
TaggedAddresses: map[string]structs.ServiceAddress{
"consul-virtual:" + google.CompoundServiceName().String(): {Address: "240.0.0.1"},
},
RaftIndex: structs.RaftIndex{},
Address: "",
},
Checks: structs.HealthChecks{
&structs.HealthCheck{
Name: "terminating connect",
Node: "foo",
CheckID: "terminating connect",
Status: api.HealthPassing,
ServiceID: "terminating-gateway",
ServiceName: "terminating-gateway",
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
},
},
},
}
assert.Len(t, nodes, 1)
assert.Equal(t, expect, nodes)
}
func TestInternal_ExportedPeeredServices_ACLEnforcement(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
_, s := testServerWithConfig(t, testServerACLConfig)
codec := rpcClient(t, s)
require.NoError(t, s.fsm.State().PeeringWrite(1, &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testUUID(),
Name: "peer-1",
},
}))
require.NoError(t, s.fsm.State().PeeringWrite(1, &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testUUID(),
Name: "peer-2",
},
}))
require.NoError(t, s.fsm.State().EnsureConfigEntry(1, &structs.ExportedServicesConfigEntry{
Name: "default",
Services: []structs.ExportedService{
{
Name: "web",
Consumers: []structs.ServiceConsumer{
{PeerName: "peer-1"},
},
},
{
Name: "db",
Consumers: []structs.ServiceConsumer{
{PeerName: "peer-2"},
},
},
{
Name: "api",
Consumers: []structs.ServiceConsumer{
{PeerName: "peer-1"},
},
},
},
}))
type testcase struct {
name string
token string
expect map[string]structs.ServiceList
expectErr string
}
run := func(t *testing.T, tc testcase) {
var out *structs.IndexedExportedServiceList
req := structs.DCSpecificRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{Token: tc.token},
}
err := msgpackrpc.CallWithCodec(codec, "Internal.ExportedPeeredServices", &req, &out)
if tc.expectErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectErr)
require.Nil(t, out)
return
}
require.NoError(t, err)
require.Len(t, out.Services, len(tc.expect))
for k, v := range tc.expect {
require.ElementsMatch(t, v, out.Services[k])
}
}
tcs := []testcase{
{
name: "can read all",
token: tokenWithRules(t, codec, TestDefaultInitialManagementToken,
`
service_prefix "" {
policy = "read"
}
`),
expect: map[string]structs.ServiceList{
"peer-1": {
structs.NewServiceName("api", nil),
structs.NewServiceName("web", nil),
},
"peer-2": {
structs.NewServiceName("db", nil),
},
},
},
{
name: "filtered",
token: tokenWithRules(t, codec, TestDefaultInitialManagementToken,
`
service "web" { policy = "read" }
service "api" { policy = "read" }
service "db" { policy = "deny" }
`),
expect: map[string]structs.ServiceList{
"peer-1": {
structs.NewServiceName("api", nil),
structs.NewServiceName("web", nil),
},
},
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
run(t, tc)
})
}
}
func tokenWithRules(t *testing.T, codec rpc.ClientCodec, mgmtToken, rules string) string {
t.Helper()
var tok *structs.ACLToken
var err error
retry.Run(t, func(r *retry.R) {
tok, err = upsertTestTokenWithPolicyRules(codec, mgmtToken, "dc1", rules)
require.NoError(r, err)
})
return tok.SecretID
}
func TestInternal_PeeredUpstreams_ACLEnforcement(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
_, s := testServerWithConfig(t, testServerACLConfig)
codec := rpcClient(t, s)
type testcase struct {
name string
token string
expectErr string
}
run := func(t *testing.T, tc testcase) {
var out *structs.IndexedPeeredServiceList
req := structs.PartitionSpecificRequest{
Datacenter: "dc1",
QueryOptions: structs.QueryOptions{Token: tc.token},
}
err := msgpackrpc.CallWithCodec(codec, "Internal.PeeredUpstreams", &req, &out)
if tc.expectErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectErr)
require.Nil(t, out)
} else {
require.NoError(t, err)
}
}
tcs := []testcase{
{
name: "can write all",
token: tokenWithRules(t, codec, TestDefaultInitialManagementToken, `
service_prefix "" {
policy = "write"
}
`),
},
{
name: "can't write",
token: tokenWithRules(t, codec, TestDefaultInitialManagementToken, ``),
expectErr: "lacks permission 'service:write' on \"any service\"",
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
run(t, tc)
})
}
}
func testUUID() string {
buf := make([]byte, 16)
if _, err := rand.Read(buf); err != nil {
panic(fmt.Errorf("failed to read random bytes: %v", err))
}
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
buf[0:4],
buf[4:6],
buf[6:8],
buf[8:10],
buf[10:16])
}

View File

@ -23,6 +23,7 @@ import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/metadata"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/structs/aclfilter"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/logging"
@ -314,7 +315,9 @@ func (s *Server) establishLeadership(ctx context.Context) error {
s.startFederationStateAntiEntropy(ctx)
if s.config.PeeringEnabled {
s.startPeeringStreamSync(ctx)
}
s.startDeferredDeletion(ctx)
@ -385,7 +388,7 @@ func (s *Server) initializeACLs(ctx context.Context) error {
// Remove any token affected by CVE-2019-8336
if !s.InPrimaryDatacenter() {
_, token, err := s.fsm.State().ACLTokenGetBySecret(nil, redactedToken, nil)
_, token, err := s.fsm.State().ACLTokenGetBySecret(nil, aclfilter.RedactedToken, nil)
if err == nil && token != nil {
req := structs.ACLTokenBatchDeleteRequest{
TokenIDs: []string{token.AccessorID},
@ -757,7 +760,9 @@ func (s *Server) stopACLReplication() {
}
func (s *Server) startDeferredDeletion(ctx context.Context) {
if s.config.PeeringEnabled {
s.startPeeringDeferredDeletion(ctx)
}
s.startTenancyDeferredDeletion(ctx)
}
@ -1068,6 +1073,11 @@ func (s *Server) handleAliveMember(member serf.Member, nodeEntMeta *acl.Enterpri
},
}
grpcPortStr := member.Tags["grpc_port"]
if v, err := strconv.Atoi(grpcPortStr); err == nil && v > 0 {
service.Meta["grpc_port"] = grpcPortStr
}
// Attempt to join the consul server
if err := s.joinConsulServer(member, parts); err != nil {
return err

View File

@ -36,7 +36,7 @@ func TestConnectCA_ConfigurationSet_ChangeKeyConfig_Primary(t *testing.T) {
keyBits int
}{
{connect.DefaultPrivateKeyType, connect.DefaultPrivateKeyBits},
{"ec", 256},
// {"ec", 256}, skip since values are same as Defaults
{"ec", 384},
{"rsa", 2048},
{"rsa", 4096},
@ -55,7 +55,7 @@ func TestConnectCA_ConfigurationSet_ChangeKeyConfig_Primary(t *testing.T) {
providerState := map[string]string{"foo": "dc1-value"}
// Initialize primary as the primary DC
dir1, srv := testServerWithConfig(t, func(c *Config) {
_, srv := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1"
c.PrimaryDatacenter = "dc1"
c.Build = "1.6.0"
@ -63,12 +63,9 @@ func TestConnectCA_ConfigurationSet_ChangeKeyConfig_Primary(t *testing.T) {
c.CAConfig.Config["PrivateKeyBits"] = src.keyBits
c.CAConfig.Config["test_state"] = providerState
})
defer os.RemoveAll(dir1)
defer srv.Shutdown()
codec := rpcClient(t, srv)
defer codec.Close()
testrpc.WaitForLeader(t, srv.RPC, "dc1")
waitForLeaderEstablishment(t, srv)
testrpc.WaitForActiveCARoot(t, srv.RPC, "dc1", nil)
var (

View File

@ -3,31 +3,108 @@ package consul
import (
"container/ring"
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"math"
"time"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-uuid"
"golang.org/x/time/rate"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/keepalive"
grpcstatus "google.golang.org/grpc/status"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/rpc/peering"
"github.com/hashicorp/consul/agent/grpc-external/services/peerstream"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/logging"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbpeerstream"
)
var leaderExportedServicesCountKey = []string{"consul", "peering", "exported_services"}
var LeaderPeeringMetrics = []prometheus.GaugeDefinition{
{
Name: leaderExportedServicesCountKey,
Help: "A gauge that tracks how many services are exported for the peering. " +
"The labels are \"peering\" and, for enterprise, \"partition\". " +
"We emit this metric every 9 seconds",
},
}
var (
// fastConnRetryTimeout is how long we wait between retrying connections following the "fast" path
// which is triggered on specific connection errors.
fastConnRetryTimeout = 8 * time.Millisecond
// maxFastConnRetries is the maximum number of fast connection retries before we follow exponential backoff.
maxFastConnRetries = uint(5)
// maxFastRetryBackoff is the maximum amount of time we'll wait between retries following the fast path.
maxFastRetryBackoff = 8192 * time.Millisecond
)
func (s *Server) startPeeringStreamSync(ctx context.Context) {
s.leaderRoutineManager.Start(ctx, peeringStreamsRoutineName, s.runPeeringSync)
s.leaderRoutineManager.Start(ctx, peeringStreamsMetricsRoutineName, s.runPeeringMetrics)
}
func (s *Server) runPeeringMetrics(ctx context.Context) error {
ticker := time.NewTicker(s.config.MetricsReportingInterval)
defer ticker.Stop()
logger := s.logger.Named(logging.PeeringMetrics)
defaultMetrics := metrics.Default
for {
select {
case <-ctx.Done():
logger.Info("stopping peering metrics")
// "Zero-out" the metric on exit so that when prometheus scrapes this
// metric from a non-leader, it does not get a stale value.
metrics.SetGauge(leaderExportedServicesCountKey, float32(0))
return nil
case <-ticker.C:
if err := s.emitPeeringMetricsOnce(logger, defaultMetrics()); err != nil {
s.logger.Error("error emitting peering stream metrics", "error", err)
}
}
}
}
func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metrics.Metrics) error {
_, peers, err := s.fsm.State().PeeringList(nil, *structs.NodeEnterpriseMetaInPartition(structs.WildcardSpecifier))
if err != nil {
return err
}
for _, peer := range peers {
status, found := s.peerStreamServer.StreamStatus(peer.ID)
if !found {
logger.Trace("did not find status for", "peer_name", peer.Name)
continue
}
esc := status.GetExportedServicesCount()
part := peer.Partition
labels := []metrics.Label{
{Name: "peer_name", Value: peer.Name},
{Name: "peer_id", Value: peer.ID},
}
if part != "" {
labels = append(labels, metrics.Label{Name: "partition", Value: part})
}
metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels)
}
return nil
}
func (s *Server) runPeeringSync(ctx context.Context) error {
@ -50,6 +127,7 @@ func (s *Server) runPeeringSync(ctx context.Context) error {
func (s *Server) stopPeeringStreamSync() {
// will be a no-op when not started
s.leaderRoutineManager.Stop(peeringStreamsRoutineName)
s.leaderRoutineManager.Stop(peeringStreamsMetricsRoutineName)
}
// syncPeeringsAndBlock is a long-running goroutine that is responsible for watching
@ -86,7 +164,7 @@ func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger,
// 3. accept new stream for [D]
// 4. list peerings [A,B,C,D]
// 5. terminate []
connectedStreams := s.peeringService.ConnectedStreams()
connectedStreams := s.peerStreamServer.ConnectedStreams()
state := s.fsm.State()
@ -132,11 +210,9 @@ func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger,
continue
}
status, found := s.peeringService.StreamStatus(peer.ID)
status, found := s.peerStreamServer.StreamStatus(peer.ID)
// TODO(peering): If there is new peering data and a connected stream, should we tear down the stream?
// If the data in the updated token is bad, the user wouldn't know until the old servers/certs become invalid.
// Alternatively we could do a basic Ping from the establish peering endpoint to avoid dealing with that here.
if found && status.Connected {
// Nothing to do when we already have an active stream to the peer.
continue
@ -150,7 +226,7 @@ func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger,
cancel()
}
if err := s.establishStream(ctx, logger, peer, cancelFns); err != nil {
if err := s.establishStream(ctx, logger, ws, peer, cancelFns); err != nil {
// TODO(peering): These errors should be reported in the peer status, otherwise they're only in the logs.
// Lockable status isn't available here though. Could report it via the peering.Service?
logger.Error("error establishing peering stream", "peer_id", peer.ID, "error", err)
@ -161,7 +237,7 @@ func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger,
}
}
logger.Trace("checking connected streams", "streams", s.peeringService.ConnectedStreams(), "sequence_id", seq)
logger.Trace("checking connected streams", "streams", s.peerStreamServer.ConnectedStreams(), "sequence_id", seq)
// Clean up active streams of peerings that were deleted from the state store.
// TODO(peering): This is going to trigger shutting down peerings we generated a token for. Is that OK?
@ -189,29 +265,16 @@ func (s *Server) syncPeeringsAndBlock(ctx context.Context, logger hclog.Logger,
return merr.ErrorOrNil()
}
func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer *pbpeering.Peering, cancelFns map[string]context.CancelFunc) error {
func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws memdb.WatchSet, peer *pbpeering.Peering, cancelFns map[string]context.CancelFunc) error {
logger = logger.With("peer_name", peer.Name, "peer_id", peer.ID)
tlsOption := grpc.WithInsecure()
if len(peer.PeerCAPems) > 0 {
var haveCerts bool
pool := x509.NewCertPool()
for _, pem := range peer.PeerCAPems {
if !pool.AppendCertsFromPEM([]byte(pem)) {
return fmt.Errorf("failed to parse PEM %s", pem)
if peer.PeerID == "" {
return fmt.Errorf("expected PeerID to be non empty; the wrong end of peering is being dialed")
}
if len(pem) > 0 {
haveCerts = true
}
}
if !haveCerts {
return fmt.Errorf("failed to build cert pool from peer CA pems")
}
cfg := tls.Config{
ServerName: peer.PeerServerName,
RootCAs: pool,
}
tlsOption = grpc.WithTransportCredentials(credentials.NewTLS(&cfg))
tlsOption, err := peer.TLSDialOption()
if err != nil {
return fmt.Errorf("failed to build TLS dial option from peering: %w", err)
}
// Create a ring buffer to cycle through peer addresses in the retry loop below.
@ -221,13 +284,26 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer
buffer = buffer.Next()
}
secret, err := s.fsm.State().PeeringSecretsRead(ws, peer.ID)
if err != nil {
return fmt.Errorf("failed to read secret for peering: %w", err)
}
if secret.GetStream().GetActiveSecretID() == "" {
return errors.New("missing stream secret for peering stream authorization, peering must be re-established")
}
logger.Trace("establishing stream to peer")
retryCtx, cancel := context.WithCancel(ctx)
cancelFns[peer.ID] = cancel
streamStatus, err := s.peerStreamTracker.Register(peer.ID)
if err != nil {
return fmt.Errorf("failed to register stream: %v", err)
}
// Establish a stream-specific retry so that retrying stream/conn errors isn't dependent on state store changes.
go retryLoopBackoff(retryCtx, func() error {
go retryLoopBackoffPeering(retryCtx, logger, func() error {
// Try a new address on each iteration by advancing the ring buffer on errors.
defer func() {
buffer = buffer.Next()
@ -239,70 +315,70 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, peer
logger.Trace("dialing peer", "addr", addr)
conn, err := grpc.DialContext(retryCtx, addr,
grpc.WithContextDialer(newPeerDialer(addr)),
grpc.WithBlock(),
// TODO(peering): use a grpc.WithStatsHandler here?)
tlsOption,
// For keep alive parameters there is a larger comment in ClientConnPool.dial about that.
grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: 30 * time.Second,
Timeout: 10 * time.Second,
// send keepalive pings even if there is no active streams
PermitWithoutStream: true,
}),
)
if err != nil {
return fmt.Errorf("failed to dial: %w", err)
}
defer conn.Close()
client := pbpeering.NewPeeringServiceClient(conn)
client := pbpeerstream.NewPeerStreamServiceClient(conn)
stream, err := client.StreamResources(retryCtx)
if err != nil {
return err
}
streamReq := peering.HandleStreamRequest{
initialReq := &pbpeerstream.ReplicationMessage{
Payload: &pbpeerstream.ReplicationMessage_Open_{
Open: &pbpeerstream.ReplicationMessage_Open{
PeerID: peer.PeerID,
StreamSecretID: secret.GetStream().GetActiveSecretID(),
},
},
}
if err := stream.Send(initialReq); err != nil {
return fmt.Errorf("failed to send initial stream request: %w", err)
}
streamReq := peerstream.HandleStreamRequest{
LocalID: peer.ID,
RemoteID: peer.PeerID,
PeerName: peer.Name,
Partition: peer.Partition,
Stream: stream,
}
err = s.peeringService.HandleStream(streamReq)
err = s.peerStreamServer.HandleStream(streamReq)
// A nil error indicates that the peering was deleted and the stream needs to be gracefully shutdown.
if err == nil {
stream.CloseSend()
s.peeringService.DrainStream(streamReq)
// This will cancel the retry-er context, letting us break out of this loop when we want to shut down the stream.
s.peerStreamServer.DrainStream(streamReq)
cancel()
logger.Info("closed outbound stream")
}
return err
}, func(err error) {
// TODO(peering): These errors should be reported in the peer status, otherwise they're only in the logs.
// Lockable status isn't available here though. Could report it via the peering.Service?
logger.Error("error managing peering stream", "peer_id", peer.ID, "error", err)
})
// TODO(peering): why are we using TrackSendError here? This could also be a receive error.
streamStatus.TrackSendError(err.Error())
if isFailedPreconditionErr(err) {
logger.Debug("stream disconnected due to 'failed precondition' error; reconnecting",
"error", err)
return
}
logger.Error("error managing peering stream", "error", err)
}, peeringRetryTimeout)
return nil
}
func newPeerDialer(peerAddr string) func(context.Context, string) (net.Conn, error) {
return func(ctx context.Context, addr string) (net.Conn, error) {
d := net.Dialer{}
conn, err := d.DialContext(ctx, "tcp", peerAddr)
if err != nil {
return nil, err
}
// TODO(peering): This is going to need to be revisited. This type uses the TLS settings configured on the agent, but
// for peering we never want mutual TLS because the client peer doesn't share its CA cert.
_, err = conn.Write([]byte{byte(pool.RPCGRPC)})
if err != nil {
conn.Close()
return nil, err
}
return conn, nil
}
}
func (s *Server) startPeeringDeferredDeletion(ctx context.Context) {
s.leaderRoutineManager.Start(ctx, peeringDeletionRoutineName, s.runPeeringDeletions)
}
@ -455,3 +531,84 @@ func (s *Server) deleteTrustBundleFromPeer(ctx context.Context, limiter *rate.Li
_, err = s.raftApplyProtobuf(structs.PeeringTrustBundleDeleteType, req)
return err
}
// retryLoopBackoffPeering re-runs loopFn with a backoff on error. errFn is run whenever
// loopFn returns an error. retryTimeFn is used to calculate the time between retries on error.
// It is passed the number of errors in a row that loopFn has returned and the latest error
// from loopFn.
//
// This function is modelled off of retryLoopBackoffHandleSuccess but is specific to peering
// because peering needs to use different retry times depending on which error is returned.
// This function doesn't use a rate limiter, unlike retryLoopBackoffHandleSuccess, because
// the rate limiter is only needed in the success case when loopFn returns nil and we want to
// loop again. In the peering case, we exit on a successful loop so we don't need the limter.
func retryLoopBackoffPeering(ctx context.Context, logger hclog.Logger, loopFn func() error, errFn func(error),
retryTimeFn func(failedAttempts uint, loopErr error) time.Duration) {
var failedAttempts uint
var err error
for {
if err = loopFn(); err != nil {
errFn(err)
if failedAttempts < math.MaxUint {
failedAttempts++
}
retryTime := retryTimeFn(failedAttempts, err)
logger.Trace("in connection retry backoff", "delay", retryTime)
timer := time.NewTimer(retryTime)
select {
case <-ctx.Done():
timer.Stop()
return
case <-timer.C:
}
continue
}
return
}
}
// peeringRetryTimeout returns the time that should be waited between re-establishing a peering
// connection after an error. We follow the default backoff from retryLoopBackoff
// unless the error is a "failed precondition" error in which case we retry much more quickly.
// Retrying quickly is important in the case of a failed precondition error because we expect it to resolve
// quickly. For example in the case of connecting with a follower through a load balancer, we just need to retry
// until our request lands on a leader.
func peeringRetryTimeout(failedAttempts uint, loopErr error) time.Duration {
if loopErr != nil && isFailedPreconditionErr(loopErr) {
// Wait a constant time for the first number of retries.
if failedAttempts <= maxFastConnRetries {
return fastConnRetryTimeout
}
// From here, follow an exponential backoff maxing out at maxFastRetryBackoff.
// The below equation multiples the constantRetryTimeout by 2^n where n is the number of failed attempts
// we're on, starting at 1 now that we're past our maxFastConnRetries.
// For example if fastConnRetryTimeout == 8ms and maxFastConnRetries == 5, then at 6 failed retries
// we'll do 8ms * 2^1 = 16ms, then 8ms * 2^2 = 32ms, etc.
ms := fastConnRetryTimeout * (1 << (failedAttempts - maxFastConnRetries))
if ms > maxFastRetryBackoff {
return maxFastRetryBackoff
}
return ms
}
// Else we go with the default backoff from retryLoopBackoff.
if (1 << failedAttempts) < maxRetryBackoff {
return (1 << failedAttempts) * time.Second
}
return time.Duration(maxRetryBackoff) * time.Second
}
// isFailedPreconditionErr returns true if err is a gRPC error with code FailedPrecondition.
func isFailedPreconditionErr(err error) bool {
if err == nil {
return false
}
grpcErr, ok := grpcstatus.FromError(err)
if !ok {
return false
}
return grpcErr.Code() == codes.FailedPrecondition
}

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,12 @@
package consul
import (
"github.com/hashicorp/consul-net-rpc/net/rpc"
"github.com/hashicorp/go-hclog"
"google.golang.org/grpc"
"github.com/hashicorp/consul-net-rpc/net/rpc"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/router"
"github.com/hashicorp/consul/agent/rpc/middleware"
@ -13,6 +15,7 @@ import (
)
type Deps struct {
EventPublisher *stream.EventPublisher
Logger hclog.InterceptLogger
TLSConfigurator *tlsutil.Configurator
Tokens *token.Store

View File

@ -7,51 +7,58 @@ import (
"strconv"
"sync"
"google.golang.org/grpc"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/grpc-external/services/peerstream"
"github.com/hashicorp/consul/agent/rpc/peering"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
)
type peeringBackend struct {
type PeeringBackend struct {
// TODO(peering): accept a smaller interface; maybe just funcs from the server that we actually need: DC, IsLeader, etc
srv *Server
connPool GRPCClientConner
apply *peeringApply
addr *leaderAddr
leaderAddrLock sync.RWMutex
leaderAddr string
}
var _ peering.Backend = (*peeringBackend)(nil)
var _ peering.Backend = (*PeeringBackend)(nil)
var _ peerstream.Backend = (*PeeringBackend)(nil)
// NewPeeringBackend returns a peering.Backend implementation that is bound to the given server.
func NewPeeringBackend(srv *Server, connPool GRPCClientConner) peering.Backend {
return &peeringBackend{
func NewPeeringBackend(srv *Server) *PeeringBackend {
return &PeeringBackend{
srv: srv,
connPool: connPool,
apply: &peeringApply{srv: srv},
addr: &leaderAddr{},
}
}
// Forward should not be used to initiate forwarding over bidirectional streams
func (b *peeringBackend) Forward(info structs.RPCInfo, f func(*grpc.ClientConn) error) (handled bool, err error) {
// Only forward the request if the dc in the request matches the server's datacenter.
if info.RequestDatacenter() != "" && info.RequestDatacenter() != b.srv.config.Datacenter {
return false, fmt.Errorf("requests to generate peering tokens cannot be forwarded to remote datacenters")
// SetLeaderAddress is called on a raft.LeaderObservation in a go routine
// in the consul server; see trackLeaderChanges()
func (b *PeeringBackend) SetLeaderAddress(addr string) {
b.leaderAddrLock.Lock()
b.leaderAddr = addr
b.leaderAddrLock.Unlock()
}
return b.srv.ForwardGRPC(b.connPool, info, f)
// GetLeaderAddress provides the best hint for the current address of the
// leader. There is no guarantee that this is the actual address of the
// leader.
func (b *PeeringBackend) GetLeaderAddress() string {
b.leaderAddrLock.RLock()
defer b.leaderAddrLock.RUnlock()
return b.leaderAddr
}
// GetAgentCACertificates gets the server's raw CA data from its TLS Configurator.
func (b *peeringBackend) GetAgentCACertificates() ([]string, error) {
func (b *PeeringBackend) GetAgentCACertificates() ([]string, error) {
// TODO(peering): handle empty CA pems
return b.srv.tlsConfigurator.ManualCAPems(), nil
return b.srv.tlsConfigurator.GRPCManualCAPems(), nil
}
// GetServerAddresses looks up server node addresses from the state store.
func (b *peeringBackend) GetServerAddresses() ([]string, error) {
func (b *PeeringBackend) GetServerAddresses() ([]string, error) {
state := b.srv.fsm.State()
_, nodes, err := state.ServiceNodes(nil, "consul", structs.DefaultEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)
if err != nil {
@ -59,19 +66,26 @@ func (b *peeringBackend) GetServerAddresses() ([]string, error) {
}
var addrs []string
for _, node := range nodes {
addrs = append(addrs, node.Address+":"+strconv.Itoa(node.ServicePort))
grpcPortStr := node.ServiceMeta["grpc_port"]
if v, err := strconv.Atoi(grpcPortStr); err != nil || v < 1 {
continue // skip server that isn't exporting public gRPC properly
}
addrs = append(addrs, node.Address+":"+grpcPortStr)
}
if len(addrs) == 0 {
return nil, fmt.Errorf("a grpc bind port must be specified in the configuration for all servers")
}
return addrs, nil
}
// GetServerName returns the SNI to be returned in the peering token data which
// will be used by peers when establishing peering connections over TLS.
func (b *peeringBackend) GetServerName() string {
func (b *PeeringBackend) GetServerName() string {
return b.srv.tlsConfigurator.ServerSNI(b.srv.config.Datacenter, "")
}
// EncodeToken encodes a peering token as a bas64-encoded representation of JSON (for now).
func (b *peeringBackend) EncodeToken(tok *structs.PeeringToken) ([]byte, error) {
func (b *PeeringBackend) EncodeToken(tok *structs.PeeringToken) ([]byte, error) {
jsonToken, err := json.Marshal(tok)
if err != nil {
return nil, fmt.Errorf("failed to marshal token: %w", err)
@ -80,7 +94,7 @@ func (b *peeringBackend) EncodeToken(tok *structs.PeeringToken) ([]byte, error)
}
// DecodeToken decodes a peering token from a base64-encoded JSON byte array (for now).
func (b *peeringBackend) DecodeToken(tokRaw []byte) (*structs.PeeringToken, error) {
func (b *PeeringBackend) DecodeToken(tokRaw []byte) (*structs.PeeringToken, error) {
tokJSONRaw, err := base64.StdEncoding.DecodeString(string(tokRaw))
if err != nil {
return nil, fmt.Errorf("failed to decode token: %w", err)
@ -92,59 +106,28 @@ func (b *peeringBackend) DecodeToken(tokRaw []byte) (*structs.PeeringToken, erro
return &tok, nil
}
func (s peeringBackend) Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) {
func (s *PeeringBackend) Subscribe(req *stream.SubscribeRequest) (*stream.Subscription, error) {
return s.srv.publisher.Subscribe(req)
}
func (b *peeringBackend) Store() peering.Store {
func (b *PeeringBackend) Store() peering.Store {
return b.srv.fsm.State()
}
func (b *peeringBackend) Apply() peering.Apply {
return b.apply
}
func (b *peeringBackend) LeaderAddress() peering.LeaderAddress {
return b.addr
}
func (b *peeringBackend) EnterpriseCheckPartitions(partition string) error {
func (b *PeeringBackend) EnterpriseCheckPartitions(partition string) error {
return b.enterpriseCheckPartitions(partition)
}
func (b *peeringBackend) EnterpriseCheckNamespaces(namespace string) error {
func (b *PeeringBackend) EnterpriseCheckNamespaces(namespace string) error {
return b.enterpriseCheckNamespaces(namespace)
}
func (b *peeringBackend) IsLeader() bool {
func (b *PeeringBackend) IsLeader() bool {
return b.srv.IsLeader()
}
type leaderAddr struct {
lock sync.RWMutex
leaderAddr string
}
func (m *leaderAddr) Set(addr string) {
m.lock.Lock()
defer m.lock.Unlock()
m.leaderAddr = addr
}
func (m *leaderAddr) Get() string {
m.lock.RLock()
defer m.lock.RUnlock()
return m.leaderAddr
}
type peeringApply struct {
srv *Server
}
func (a *peeringApply) CheckPeeringUUID(id string) (bool, error) {
state := a.srv.fsm.State()
func (b *PeeringBackend) CheckPeeringUUID(id string) (bool, error) {
state := b.srv.fsm.State()
if _, existing, err := state.PeeringReadByID(nil, id); err != nil {
return false, err
} else if existing != nil {
@ -154,31 +137,41 @@ func (a *peeringApply) CheckPeeringUUID(id string) (bool, error) {
return true, nil
}
func (a *peeringApply) PeeringWrite(req *pbpeering.PeeringWriteRequest) error {
_, err := a.srv.raftApplyProtobuf(structs.PeeringWriteType, req)
func (b *PeeringBackend) ValidateProposedPeeringSecret(id string) (bool, error) {
return b.srv.fsm.State().ValidateProposedPeeringSecretUUID(id)
}
func (b *PeeringBackend) PeeringSecretsWrite(req *pbpeering.PeeringSecrets) error {
_, err := b.srv.raftApplyProtobuf(structs.PeeringSecretsWriteType, req)
return err
}
func (b *PeeringBackend) PeeringWrite(req *pbpeering.PeeringWriteRequest) error {
_, err := b.srv.raftApplyProtobuf(structs.PeeringWriteType, req)
return err
}
// TODO(peering): This needs RPC metrics interceptor since it's not triggered by an RPC.
func (a *peeringApply) PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error {
_, err := a.srv.raftApplyProtobuf(structs.PeeringTerminateByIDType, req)
func (b *PeeringBackend) PeeringTerminateByID(req *pbpeering.PeeringTerminateByIDRequest) error {
_, err := b.srv.raftApplyProtobuf(structs.PeeringTerminateByIDType, req)
return err
}
func (a *peeringApply) PeeringTrustBundleWrite(req *pbpeering.PeeringTrustBundleWriteRequest) error {
_, err := a.srv.raftApplyProtobuf(structs.PeeringTrustBundleWriteType, req)
func (b *PeeringBackend) PeeringTrustBundleWrite(req *pbpeering.PeeringTrustBundleWriteRequest) error {
_, err := b.srv.raftApplyProtobuf(structs.PeeringTrustBundleWriteType, req)
return err
}
func (a *peeringApply) CatalogRegister(req *structs.RegisterRequest) error {
_, err := a.srv.leaderRaftApply("Catalog.Register", structs.RegisterRequestType, req)
func (b *PeeringBackend) CatalogRegister(req *structs.RegisterRequest) error {
_, err := b.srv.leaderRaftApply("Catalog.Register", structs.RegisterRequestType, req)
return err
}
func (a *peeringApply) CatalogDeregister(req *structs.DeregisterRequest) error {
_, err := a.srv.leaderRaftApply("Catalog.Deregister", structs.DeregisterRequestType, req)
func (b *PeeringBackend) CatalogDeregister(req *structs.DeregisterRequest) error {
_, err := b.srv.leaderRaftApply("Catalog.Deregister", structs.DeregisterRequestType, req)
return err
}
var _ peering.Apply = (*peeringApply)(nil)
var _ peering.LeaderAddress = (*leaderAddr)(nil)
func (b *PeeringBackend) ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzCtx *acl.AuthorizerContext) (resolver.Result, error) {
return b.srv.ResolveTokenAndDefaultMeta(token, entMeta, authzCtx)
}

View File

@ -8,14 +8,14 @@ import (
"strings"
)
func (b *peeringBackend) enterpriseCheckPartitions(partition string) error {
func (b *PeeringBackend) enterpriseCheckPartitions(partition string) error {
if partition == "" || strings.EqualFold(partition, "default") {
return nil
}
return fmt.Errorf("Partitions are a Consul Enterprise feature")
}
func (b *peeringBackend) enterpriseCheckNamespaces(namespace string) error {
func (b *PeeringBackend) enterpriseCheckNamespaces(namespace string) error {
if namespace == "" || strings.EqualFold(namespace, "default") {
return nil
}

View File

@ -42,7 +42,6 @@ func TestPeeringBackend_RejectsPartition(t *testing.T) {
peeringClient := pbpeering.NewPeeringServiceClient(conn)
req := pbpeering.GenerateTokenRequest{
Datacenter: "dc1",
Partition: "test",
}
_, err = peeringClient.GenerateToken(ctx, &req)
@ -77,7 +76,6 @@ func TestPeeringBackend_IgnoresDefaultPartition(t *testing.T) {
peeringClient := pbpeering.NewPeeringServiceClient(conn)
req := pbpeering.GenerateTokenRequest{
Datacenter: "dc1",
PeerName: "my-peer",
Partition: "DeFaUlT",
}

View File

@ -15,43 +15,6 @@ import (
"github.com/hashicorp/consul/testrpc"
)
func TestPeeringBackend_DoesNotForwardToDifferentDC(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
_, s1 := testServerDC(t, "dc1")
_, s2 := testServerDC(t, "dc2")
joinWAN(t, s2, s1)
testrpc.WaitForLeader(t, s1.RPC, "dc1")
testrpc.WaitForLeader(t, s2.RPC, "dc2")
// make a grpc client to dial s2 directly
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err := gogrpc.DialContext(ctx, s2.config.RPCAddr.String(),
gogrpc.WithContextDialer(newServerDialer(s2.config.RPCAddr.String())),
gogrpc.WithInsecure(),
gogrpc.WithBlock())
require.NoError(t, err)
t.Cleanup(func() { conn.Close() })
peeringClient := pbpeering.NewPeeringServiceClient(conn)
// GenerateToken request should fail against dc1, because we are dialing dc2. The GenerateToken request should never be forwarded across datacenters.
req := pbpeering.GenerateTokenRequest{
PeerName: "peer1-usw1",
Datacenter: "dc1",
}
_, err = peeringClient.GenerateToken(ctx, &req)
require.Error(t, err)
require.Contains(t, err.Error(), "requests to generate peering tokens cannot be forwarded to remote datacenters")
}
func TestPeeringBackend_ForwardToLeader(t *testing.T) {
t.Parallel()
@ -86,7 +49,6 @@ func TestPeeringBackend_ForwardToLeader(t *testing.T) {
testutil.RunStep(t, "forward a write", func(t *testing.T) {
// Do the grpc Write call to server2
req := pbpeering.GenerateTokenRequest{
Datacenter: "dc1",
PeerName: "foo",
}
_, err := peeringClient.GenerateToken(ctx, &req)

View File

@ -22,7 +22,7 @@ var (
},
Service: structs.ServiceQuery{
Service: "${name.full}",
Failover: structs.QueryDatacenterOptions{
Failover: structs.QueryFailoverOptions{
Datacenters: []string{
"${name.full}",
"${name.prefix}",
@ -69,7 +69,7 @@ var (
},
Service: structs.ServiceQuery{
Service: "${name.full}",
Failover: structs.QueryDatacenterOptions{
Failover: structs.QueryFailoverOptions{
Datacenters: []string{
"dc1",
"dc2",

Some files were not shown because too many files have changed in this diff Show More