merge branch main

This commit is contained in:
Chris Thain 2022-06-20 09:13:51 -07:00
commit c258930569
1319 changed files with 51666 additions and 17615 deletions

3
.changelog/10996.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
ui: removed external dependencies for serving UI assets in favor of Go's native embed capabilities
```

3
.changelog/12722.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
checks: add UDP health checks..
```

3
.changelog/12914.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:enhancement
api: add the ability to specify a path prefix for when consul is behind a reverse proxy or API gateway
```

3
.changelog/13001.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:enhancement
api: `merge-central-config` query parameter support added to some catalog and health endpoints to view a fully resolved service definition (especially when not written into the catalog that way).
```

3
.changelog/13012.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
proxycfg: Fixed a minor bug that would cause configuring a terminating gateway to watch too many service resolvers and waste resources doing filtering.
```

3
.changelog/13051.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
deps: Update go-grpc/grpc, resolving connection memory leak
```

3
.changelog/13062.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
serf: upgrade serf to v0.9.8 which fixes a bug that crashes Consul when serf keyrings are listed
```

3
.changelog/13071.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
Fix a bug when configuring an `add_headers` directive named `Host` the header is not set for `v1/internal/ui/metrics-proxy/` endpoint.
```

5
.changelog/13091.txt Normal file
View File

@ -0,0 +1,5 @@
```release-note:improvement
config: introduce `telemetry.retry_failed_connection` in agent configuration to
retry on failed connection to any telemetry backend. This prevents the agent from
exiting if the given DogStatsD DNS name is unresolvable, for example.
```

3
.changelog/13118.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
config: fix backwards compatibility bug where setting the (deprecated) top-level `verify_incoming` option would enable TLS client authentication on the gRPC port
```

3
.changelog/13127.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
fix a bug that caused an error when creating `grpc` or `http2` ingress gateway listeners with multiple services
```

3
.changelog/13143.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
connect: Added a `max_inbound_connections` setting to service-defaults for limiting the number of concurrent inbound connections to each service instance.
```

3
.changelog/13183.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
ui: Re-instate '...' icon for row actions
```

3
.changelog/13256.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
agent: Fixed a bug in HTTP handlers where URLs were being decoded twice
```

3
.changelog/13304.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
telemetry: Added a `consul.server.isLeader` metric to track if a server is a leader or not.
```

3
.changelog/13344.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
kvs: Fixed a bug where query options were not being applied to KVS.Get RPC operations.
```

4
.changelog/13357.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:feature
agent: Added information about build date alongside other version information for Consul. Extended /agent/self endpoint and `consul version` commands
to report this. Agent also reports build date in log on startup.
```

3
.changelog/13394.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
ui: upgrade ember-composable-helpers to v5.x
```

3
.changelog/13409.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
ui: Fix incorrect text on certain page empty states
```

3
.changelog/13421.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
dns: Added support for specifying admin partition in node lookups.
```

3
.changelog/13431.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
connect: Update Envoy support matrix to latest patch releases (1.22.2, 1.21.3, 1.20.4, 1.19.5)
```

3
.changelog/13450.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:enhancement
api: `merge-central-config` query parameter support added to `/catalog/node-services/:node-name` API, to view a fully resolved service definition (especially when not written into the catalog that way).
```

View File

@ -4,4 +4,7 @@ export GIT_COMMIT=$(git rev-parse --short HEAD)
export GIT_COMMIT_YEAR=$(git show -s --format=%cd --date=format:%Y HEAD) export GIT_COMMIT_YEAR=$(git show -s --format=%cd --date=format:%Y HEAD)
export GIT_DIRTY=$(test -n "`git status --porcelain`" && echo "+CHANGES" || true) export GIT_DIRTY=$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)
export GIT_IMPORT=github.com/hashicorp/consul/version export GIT_IMPORT=github.com/hashicorp/consul/version
export GOLDFLAGS="-X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}${GIT_DIRTY}" # we're using this for build date because it's stable across platform builds
# the env -i and -noprofile are used to ensure we don't try to recursively call this profile when starting bash
export GIT_DATE=$(env -i /bin/bash --noprofile -norc ${CIRCLE_WORKING_DIRECTORY}/build-support/scripts/build-date.sh)
export GOLDFLAGS="-X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}${GIT_DIRTY} -X ${GIT_IMPORT}.BuildDate=${GIT_DATE}"

View File

@ -28,8 +28,9 @@ references:
# workflows section for go-test-lib jobs. # workflows section for go-test-lib jobs.
go: &GOLANG_IMAGE docker.mirror.hashicorp.services/cimg/go:1.18.1 go: &GOLANG_IMAGE docker.mirror.hashicorp.services/cimg/go:1.18.1
ember: &EMBER_IMAGE docker.mirror.hashicorp.services/circleci/node:14-browsers ember: &EMBER_IMAGE docker.mirror.hashicorp.services/circleci/node:14-browsers
ubuntu: &UBUNTU_CI_IMAGE ubuntu-2004:202201-02
cache: cache:
yarn: &YARN_CACHE_KEY consul-ui-v7-{{ checksum "ui/yarn.lock" }} yarn: &YARN_CACHE_KEY consul-ui-v8-{{ checksum "ui/yarn.lock" }}
steps: steps:
install-gotestsum: &install-gotestsum install-gotestsum: &install-gotestsum
@ -167,6 +168,14 @@ jobs:
- run: go install github.com/hashicorp/lint-consul-retry@master && lint-consul-retry - run: go install github.com/hashicorp/lint-consul-retry@master && lint-consul-retry
- run: *notify-slack-failure - run: *notify-slack-failure
lint-enums:
docker:
- image: *GOLANG_IMAGE
steps:
- checkout
- run: go install github.com/reillywatson/enumcover/cmd/enumcover@master && enumcover ./...
- run: *notify-slack-failure
lint: lint:
description: "Run golangci-lint" description: "Run golangci-lint"
parameters: parameters:
@ -184,9 +193,7 @@ jobs:
- run: go env - run: go env
- run: - run:
name: Install golangci-lint name: Install golangci-lint
command: | command: make lint-tools
download=https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh
wget -O- -q $download | sh -x -s -- -d -b /home/circleci/go/bin v1.45.2
- run: go mod download - run: go mod download
- run: - run:
name: lint name: lint
@ -238,10 +245,13 @@ jobs:
echo "Generated code was not updated correctly" echo "Generated code was not updated correctly"
exit 1 exit 1
fi fi
- run:
name: "Protobuf Lint"
command: make proto-lint
go-test-arm64: go-test-arm64:
machine: machine:
image: ubuntu-2004:202101-01 image: *UBUNTU_CI_IMAGE
resource_class: arm.large resource_class: arm.large
parallelism: 4 parallelism: 4
environment: environment:
@ -253,7 +263,7 @@ jobs:
steps: steps:
- checkout - checkout
- run: - run:
command: | command: |
sudo rm -rf /usr/local/go sudo rm -rf /usr/local/go
wget https://dl.google.com/go/go${GO_VERSION}.linux-arm64.tar.gz wget https://dl.google.com/go/go${GO_VERSION}.linux-arm64.tar.gz
sudo tar -C /usr/local -xzvf go${GO_VERSION}.linux-arm64.tar.gz sudo tar -C /usr/local -xzvf go${GO_VERSION}.linux-arm64.tar.gz
@ -547,17 +557,17 @@ jobs:
# Run integration tests on nomad/v0.8.7 # Run integration tests on nomad/v0.8.7
nomad-integration-0_8: nomad-integration-0_8:
docker: docker:
- image: docker.mirror.hashicorp.services/circleci/golang:1.10 - image: docker.mirror.hashicorp.services/cimg/go:1.10
environment: environment:
<<: *ENVIRONMENT <<: *ENVIRONMENT
NOMAD_WORKING_DIR: &NOMAD_WORKING_DIR /go/src/github.com/hashicorp/nomad NOMAD_WORKING_DIR: &NOMAD_WORKING_DIR /home/circleci/go/src/github.com/hashicorp/nomad
NOMAD_VERSION: v0.8.7 NOMAD_VERSION: v0.8.7
steps: &NOMAD_INTEGRATION_TEST_STEPS steps: &NOMAD_INTEGRATION_TEST_STEPS
- run: git clone https://github.com/hashicorp/nomad.git --branch ${NOMAD_VERSION} ${NOMAD_WORKING_DIR} - run: git clone https://github.com/hashicorp/nomad.git --branch ${NOMAD_VERSION} ${NOMAD_WORKING_DIR}
# get consul binary # get consul binary
- attach_workspace: - attach_workspace:
at: /go/bin at: /home/circleci/go/bin
# make dev build of nomad # make dev build of nomad
- run: - run:
@ -587,10 +597,10 @@ jobs:
# run integration tests on nomad/main # run integration tests on nomad/main
nomad-integration-main: nomad-integration-main:
docker: docker:
- image: docker.mirror.hashicorp.services/circleci/golang:1.17 # TODO: replace with cimg/go (requires steps update) - image: docker.mirror.hashicorp.services/cimg/go:1.18
environment: environment:
<<: *ENVIRONMENT <<: *ENVIRONMENT
NOMAD_WORKING_DIR: /go/src/github.com/hashicorp/nomad NOMAD_WORKING_DIR: /home/circleci/go/src/github.com/hashicorp/nomad
NOMAD_VERSION: main NOMAD_VERSION: main
steps: *NOMAD_INTEGRATION_TEST_STEPS steps: *NOMAD_INTEGRATION_TEST_STEPS
@ -662,23 +672,6 @@ jobs:
- packages/consul-ui/dist - packages/consul-ui/dist
- run: *notify-slack-failure - run: *notify-slack-failure
# build static-assets file
build-static-assets:
docker:
- image: *GOLANG_IMAGE
steps:
- checkout
- attach_workspace:
at: ./pkg
- run: mv pkg/packages/consul-ui/dist pkg/web_ui # 'make static-assets' looks for the 'pkg/web_ui' path
- run: make tools
- run: make static-assets
- persist_to_workspace:
root: .
paths:
- ./agent/uiserver/bindata_assetfs.go
- run: *notify-slack-failure
# commits static assets to git # commits static assets to git
publish-static-assets: publish-static-assets:
docker: docker:
@ -691,7 +684,12 @@ jobs:
- attach_workspace: - attach_workspace:
at: . at: .
- run: - run:
name: commit agent/uiserver/bindata_assetfs.go if there are UI changes name: move compiled ui files to agent/uiserver
command: |
rm -rf agent/uiserver/dist
mv packages/consul-ui/dist agent/uiserver
- run:
name: commit agent/uiserver/dist/ if there are UI changes
command: | command: |
# check if there are any changes in ui/ # check if there are any changes in ui/
# if there are, we commit the ui static asset file # if there are, we commit the ui static asset file
@ -705,8 +703,8 @@ jobs:
git checkout -B ci/main-assetfs-build main git checkout -B ci/main-assetfs-build main
short_sha=$(git rev-parse --short HEAD) short_sha=$(git rev-parse --short HEAD)
git add agent/uiserver/bindata_assetfs.go git add agent/uiserver/dist/
git commit -m "auto-updated agent/uiserver/bindata_assetfs.go from commit ${short_sha}" git commit -m "auto-updated agent/uiserver/dist/ from commit ${short_sha}"
git push --force origin ci/main-assetfs-build git push --force origin ci/main-assetfs-build
else else
echo "no UI changes so no static assets to publish" echo "no UI changes so no static assets to publish"
@ -800,10 +798,10 @@ jobs:
working_directory: ui/packages/consul-ui working_directory: ui/packages/consul-ui
command: make test-coverage-ci command: make test-coverage-ci
- run: *notify-slack-failure - run: *notify-slack-failure
compatibility-integration-test: compatibility-integration-test:
machine: machine:
image: ubuntu-2004:202101-01 image: *UBUNTU_CI_IMAGE
docker_layer_caching: true docker_layer_caching: true
parallelism: 1 parallelism: 1
steps: steps:
@ -824,14 +822,19 @@ jobs:
- run: - run:
name: Compatibility Integration Tests name: Compatibility Integration Tests
command: | command: |
subtests=$(ls -d test/integration/consul-container/*/ | grep -v libs | xargs -n 1 basename | circleci tests split)
echo "Running $(echo $subtests | wc -w) subtests"
echo "$subtests"
subtests_pipe_sepr=$(echo "$subtests" | xargs | sed 's/ /|/g')
mkdir -p /tmp/test-results/ mkdir -p /tmp/test-results/
docker run consul:local consul version
cd ./test/integration/consul-container cd ./test/integration/consul-container
gotestsum -- -timeout=30m ./$subtests_pipe_sepr --target-version local --latest-version latest docker run --rm consul:local consul version
gotestsum \
--format=short-verbose \
--debug \
--rerun-fails=3 \
--packages="./..." \
-- \
-timeout=30m \
./... \
--target-version local \
--latest-version latest
ls -lrt ls -lrt
environment: environment:
# this is needed because of incompatibility between RYUK container and circleci # this is needed because of incompatibility between RYUK container and circleci
@ -848,14 +851,14 @@ jobs:
- store_artifacts: - store_artifacts:
path: *TEST_RESULTS_DIR path: *TEST_RESULTS_DIR
- run: *notify-slack-failure - run: *notify-slack-failure
envoy-integration-test-1_19_3: &ENVOY_TESTS envoy-integration-test-1_19_5: &ENVOY_TESTS
machine: machine:
image: ubuntu-2004:202201-02 image: *UBUNTU_CI_IMAGE
parallelism: 4 parallelism: 4
resource_class: medium resource_class: medium
environment: environment:
ENVOY_VERSION: "1.19.3" ENVOY_VERSION: "1.19.5"
steps: &ENVOY_INTEGRATION_TEST_STEPS steps: &ENVOY_INTEGRATION_TEST_STEPS
- checkout - checkout
# Get go binary from workspace # Get go binary from workspace
@ -888,20 +891,20 @@ jobs:
path: *TEST_RESULTS_DIR path: *TEST_RESULTS_DIR
- run: *notify-slack-failure - run: *notify-slack-failure
envoy-integration-test-1_20_2: envoy-integration-test-1_20_4:
<<: *ENVOY_TESTS <<: *ENVOY_TESTS
environment: environment:
ENVOY_VERSION: "1.20.2" ENVOY_VERSION: "1.20.4"
envoy-integration-test-1_21_1: envoy-integration-test-1_21_3:
<<: *ENVOY_TESTS <<: *ENVOY_TESTS
environment: environment:
ENVOY_VERSION: "1.21.1" ENVOY_VERSION: "1.21.3"
envoy-integration-test-1_22_0: envoy-integration-test-1_22_2:
<<: *ENVOY_TESTS <<: *ENVOY_TESTS
environment: environment:
ENVOY_VERSION: "1.22.0" ENVOY_VERSION: "1.22.2"
# run integration tests for the connect ca providers # run integration tests for the connect ca providers
test-connect-ca-providers: test-connect-ca-providers:
@ -927,19 +930,6 @@ jobs:
path: *TEST_RESULTS_DIR path: *TEST_RESULTS_DIR
- run: *notify-slack-failure - run: *notify-slack-failure
# only runs on main: checks latest commit to see if the PR associated has a backport/* or docs* label to cherry-pick
cherry-picker:
docker:
- image: docker.mirror.hashicorp.services/alpine:3.12
steps:
- run: apk add --no-cache --no-progress git bash curl ncurses jq openssh-client
- checkout
- add_ssh_keys: # needs a key to push cherry-picked commits back to github
fingerprints:
- "fc:55:84:15:0a:1d:c8:e9:06:d0:e8:9c:7b:a9:b7:31"
- run: .circleci/scripts/cherry-picker.sh
- run: *notify-slack-failure
trigger-oss-merge: trigger-oss-merge:
docker: docker:
- image: docker.mirror.hashicorp.services/alpine:3.12 - image: docker.mirror.hashicorp.services/alpine:3.12
@ -987,6 +977,7 @@ jobs:
LOCAL_COMMIT_SHA="<< pipeline.parameters.commit >>" LOCAL_COMMIT_SHA="<< pipeline.parameters.commit >>"
fi fi
echo "export LOCAL_COMMIT_SHA=${LOCAL_COMMIT_SHA}" >> $BASH_ENV echo "export LOCAL_COMMIT_SHA=${LOCAL_COMMIT_SHA}" >> $BASH_ENV
git checkout ${LOCAL_COMMIT_SHA}
short_ref=$(git rev-parse --short ${LOCAL_COMMIT_SHA}) short_ref=$(git rev-parse --short ${LOCAL_COMMIT_SHA})
echo "export TF_VAR_ami_owners=$LOAD_TEST_AMI_OWNERS" >> $BASH_ENV echo "export TF_VAR_ami_owners=$LOAD_TEST_AMI_OWNERS" >> $BASH_ENV
@ -1006,7 +997,8 @@ jobs:
working_directory: .circleci/terraform/load-test working_directory: .circleci/terraform/load-test
name: terraform init name: terraform init
command: | command: |
echo "commit is ${LOCAL_COMMIT_SHA}" short_ref=$(git rev-parse --short HEAD)
echo "Testing commit id: $short_ref"
terraform init \ terraform init \
-backend-config="bucket=${BUCKET}" \ -backend-config="bucket=${BUCKET}" \
-backend-config="key=${LOCAL_COMMIT_SHA}" \ -backend-config="key=${LOCAL_COMMIT_SHA}" \
@ -1022,7 +1014,7 @@ jobs:
when: always when: always
name: terraform destroy name: terraform destroy
command: | command: |
terraform destroy -auto-approve for i in $(seq 1 5); do terraform destroy -auto-approve && s=0 && break || s=$? && sleep 20; done; (exit $s)
- run: *notify-slack-failure - run: *notify-slack-failure
# The noop job is a used as a very fast job in the verify-ci workflow because every workflow # The noop job is a used as a very fast job in the verify-ci workflow because every workflow
@ -1038,7 +1030,7 @@ workflows:
# verify-ci is a no-op workflow that must run on every PR. It is used in a # verify-ci is a no-op workflow that must run on every PR. It is used in a
# branch protection rule to detect when CI workflows are not running. # branch protection rule to detect when CI workflows are not running.
verify-ci: verify-ci:
jobs: [ noop ] jobs: [noop]
go-tests: go-tests:
unless: << pipeline.parameters.trigger-load-test >> unless: << pipeline.parameters.trigger-load-test >>
@ -1051,6 +1043,7 @@ workflows:
- /^docs\/.*/ - /^docs\/.*/
- /^ui\/.*/ - /^ui\/.*/
- check-generated-protobuf: *filter-ignore-non-go-branches - check-generated-protobuf: *filter-ignore-non-go-branches
- lint-enums: *filter-ignore-non-go-branches
- lint-consul-retry: *filter-ignore-non-go-branches - lint-consul-retry: *filter-ignore-non-go-branches
- lint: *filter-ignore-non-go-branches - lint: *filter-ignore-non-go-branches
- lint: - lint:
@ -1061,17 +1054,17 @@ workflows:
- go-test-arm64: *filter-ignore-non-go-branches - go-test-arm64: *filter-ignore-non-go-branches
- dev-build: *filter-ignore-non-go-branches - dev-build: *filter-ignore-non-go-branches
- go-test: - go-test:
requires: [ dev-build ] requires: [dev-build]
- go-test-lib: - go-test-lib:
name: "go-test-api go1.17" name: "go-test-api go1.17"
path: api path: api
go-version: "1.17" go-version: "1.17"
requires: [ dev-build ] requires: [dev-build]
- go-test-lib: - go-test-lib:
name: "go-test-api go1.18" name: "go-test-api go1.18"
path: api path: api
go-version: "1.18" go-version: "1.18"
requires: [ dev-build ] requires: [dev-build]
- go-test-lib: - go-test-lib:
name: "go-test-sdk go1.17" name: "go-test-sdk go1.17"
path: sdk path: sdk
@ -1084,6 +1077,7 @@ workflows:
<<: *filter-ignore-non-go-branches <<: *filter-ignore-non-go-branches
- go-test-race: *filter-ignore-non-go-branches - go-test-race: *filter-ignore-non-go-branches
- go-test-32bit: *filter-ignore-non-go-branches - go-test-32bit: *filter-ignore-non-go-branches
- noop
build-distros: build-distros:
unless: << pipeline.parameters.trigger-load-test >> unless: << pipeline.parameters.trigger-load-test >>
jobs: jobs:
@ -1102,20 +1096,12 @@ workflows:
- ember-build-prod: - ember-build-prod:
requires: requires:
- frontend-cache - frontend-cache
- build-static-assets: - publish-static-assets:
requires: requires:
- ember-build-prod - ember-build-prod
- publish-static-assets:
filters:
branches:
only:
- main
- /release\/\d+\.\d+\.x$/
requires:
- build-static-assets
- dev-build: - dev-build:
requires: requires:
- build-static-assets - ember-build-prod
- dev-upload-s3: - dev-upload-s3:
requires: requires:
- dev-build - dev-build
@ -1123,6 +1109,7 @@ workflows:
requires: requires:
- dev-build - dev-build
context: consul-ci context: consul-ci
- noop
test-integrations: test-integrations:
unless: << pipeline.parameters.trigger-load-test >> unless: << pipeline.parameters.trigger-load-test >>
jobs: jobs:
@ -1144,22 +1131,22 @@ workflows:
- nomad-integration-0_8: - nomad-integration-0_8:
requires: requires:
- dev-build - dev-build
- envoy-integration-test-1_19_3: - envoy-integration-test-1_19_5:
requires: requires:
- dev-build - dev-build
- envoy-integration-test-1_20_2: - envoy-integration-test-1_20_4:
requires: requires:
- dev-build - dev-build
- envoy-integration-test-1_21_1: - envoy-integration-test-1_21_3:
requires: requires:
- dev-build - dev-build
- envoy-integration-test-1_22_0: - envoy-integration-test-1_22_2:
requires: requires:
- dev-build - dev-build
- compatibility-integration-test: - compatibility-integration-test:
requires: requires:
- dev-build - dev-build
- noop
frontend: frontend:
unless: << pipeline.parameters.trigger-load-test >> unless: << pipeline.parameters.trigger-load-test >>
jobs: jobs:
@ -1192,6 +1179,7 @@ workflows:
- ember-coverage: - ember-coverage:
requires: requires:
- ember-build-ent - ember-build-ent
- noop
workflow-automation: workflow-automation:
unless: << pipeline.parameters.trigger-load-test >> unless: << pipeline.parameters.trigger-load-test >>
jobs: jobs:
@ -1202,13 +1190,7 @@ workflows:
only: only:
- main - main
- /release\/\d+\.\d+\.x$/ - /release\/\d+\.\d+\.x$/
- cherry-picker:
context: team-consul
filters:
branches:
only:
- main
- /release\/\d+\.\d+\.x$/
load-test: load-test:
when: << pipeline.parameters.trigger-load-test >> when: << pipeline.parameters.trigger-load-test >>
jobs: jobs:

View File

@ -1,195 +0,0 @@
#!/usr/bin/env bash
#
# This script is meant to run on every new commit to main in CircleCI. If the commit comes from a PR, it will
# check the PR associated with the commit for labels. If the label matches `docs*` it will be cherry-picked
# to stable-website. If the label matches `backport/*`, it will be cherry-picked to the appropriate `release/*`
# branch.
# Requires $CIRCLE_PROJECT_USERNAME, $CIRCLE_PROJECT_REPONAME, and $CIRCLE_SHA1 from CircleCI
set -o pipefail
# colorized status prompt
function status {
tput setaf 4
echo "$@"
tput sgr0
}
# Returns the latest GitHub "backport/*" label
function get_latest_backport_label {
local resp
local ret
local latest_backport_label
resp=$(curl -f -s -H "Authorization: token ${GITHUB_TOKEN}" "https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/labels?per_page=100")
ret="$?"
if [[ "$ret" -ne 0 ]]; then
status "The GitHub API returned $ret which means it was probably rate limited."
exit $ret
fi
latest_backport_label=$(echo "$resp" | jq -r '.[] | select(.name | startswith("backport/")) | .name' | sort -rV | head -n1)
echo "$latest_backport_label"
return 0
}
# This function will do the cherry-picking of a commit on a branch
# Exit 1 if cherry-picking fails
function cherry_pick_with_slack_notification {
# Arguments:
# $1 - branch to cherry-pick to
# $2 - commit to cherry-pick
# $3 - url to PR of commit
#
# Return:
# 0 for success
# 1 for error
local branch="$1"
local commit="$2"
local pr_url="$3"
git checkout "$branch" || exit 1
# If git cherry-pick fails or it fails to push, we send a failure notification
if ! (git cherry-pick --mainline 1 "$commit" && git push origin "$branch"); then
status "🍒❌ Cherry pick of commit ${commit:0:7} from $pr_url onto $branch failed!"
# send slack notification
curl -X POST -H 'Content-type: application/json' \
--data \
"{ \
\"attachments\": [ \
{ \
\"fallback\": \"Cherry pick failed!\", \
\"text\": \"🍒❌ Cherry picking of <$pr_url|${commit:0:7}> to \`$branch\` failed!\n\nBuild Log: ${CIRCLE_BUILD_URL}\", \
\"footer\": \"${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}\", \
\"ts\": \"$(date +%s)\", \
\"color\": \"danger\" \
} \
] \
}" "${CONSUL_SLACK_WEBHOOK_URL}"
# post PR comment to GitHub
github_message=":cherries::x: Cherry pick of commit ${commit} onto \`$branch\` failed! [Build Log]($CIRCLE_BUILD_URL)"
pr_id=$(basename ${pr_url})
curl -f -s -H "Authorization: token ${GITHUB_TOKEN}" \
-X POST \
-d "{ \"body\": \"${github_message}\"}" \
"https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/issues/${pr_id}/comments"
# run git status to leave error in CircleCI log
git status
return 1
# Else we send a success notification
else
status "🍒✅ Cherry picking of PR commit ${commit:0:7} from ${pr_url} succeeded!"
curl -X POST -H 'Content-type: application/json' \
--data \
"{ \
\"attachments\": [ \
{ \
\"fallback\": \"Cherry pick succeeded!\", \
\"text\": \"🍒✅ Cherry picking of <$pr_url|${commit:0:7}> to \`$branch\` succeeded!\", \
\"footer\": \"${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}\", \
\"ts\": \"$(date +%s)\", \
\"color\": \"good\" \
} \
] \
}" "${CONSUL_SLACK_WEBHOOK_URL}"
# post PR comment to GitHub
github_message=":cherries::white_check_mark: Cherry pick of commit ${commit} onto \`$branch\` succeeded!"
pr_id=$(basename ${pr_url})
curl -f -s -H "Authorization: token ${GITHUB_TOKEN}" \
-X POST \
-d "{ \"body\": \"${github_message}\"}" \
"https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/issues/${pr_id}/comments"
fi
return 0
}
# search for the PR labels applicable to the specified commit
resp=$(curl -f -s -H "Authorization: token ${GITHUB_TOKEN}" "https://api.github.com/search/issues?q=repo:${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}+sha:${CIRCLE_SHA1}")
ret="$?"
if [[ "$ret" -ne 0 ]]; then
status "The GitHub API returned $ret which means it was probably rate limited."
exit $ret
fi
# get the count from the GitHub API to check if the commit matched a PR
count=$(echo "$resp" | jq '.total_count')
if [[ "$count" -eq 0 ]]; then
status "This commit was not associated with a PR"
exit 0
fi
# save PR number
pr_number=$(echo "$resp" | jq '.items[].number')
# comment on the PR with the build number to make it easy to re-run the job when
# cherry-pick labels are added in the future
github_message=":cherries: If backport labels were added before merging, cherry-picking will start automatically.\n\nTo retroactively trigger a backport after merging, add backport labels and re-run ${CIRCLE_BUILD_URL}."
curl -f -s -H "Authorization: token ${GITHUB_TOKEN}" \
-X POST \
-d "{ \"body\": \"${github_message}\"}" \
"https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/issues/${pr_number}/comments"
# If the API returned a non-zero count, we have found a PR with that commit so we find
# the labels from the PR
# Sorts the labels from a PR via version sort
labels=$(echo "$resp" | jq --raw-output '.items[].labels[] | .name' | sort -rV)
ret="$?"
pr_url=$(echo "$resp" | jq --raw-output '.items[].pull_request.html_url')
if [[ "$ret" -ne 0 ]]; then
status "jq exited with $ret when trying to find label names. Are there labels applied to the PR ($pr_url)?"
# This can be a valid error but usually this means we do not have any labels so it doesn't signal
# cherry-picking is possible. Exit 0 for now unless we run into cases where these failures are important.
exit 0
fi
# Attach label for latest release branch if 'docs-cherrypick' is present. Will noop if already applied.
latest_backport_label=$(get_latest_backport_label)
status "latest backport label is $latest_backport_label"
if echo "$resp" | jq -e '.items[].labels[] | select(.name | contains("docs-cherrypick"))'; then
labels=$(curl -f -s -H "Authorization: token ${GITHUB_TOKEN}" -X POST -d "{\"labels\":[\"$latest_backport_label\"]}" "https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/issues/${pr_number}/labels" | jq --raw-output '.[].name' | sort -rV)
ret="$?"
if [[ "$ret" -ne 0 ]]; then
status "Error applying $latest_backport_label to $pr_url"
exit $ret
fi
fi
git config --local user.email "github-team-consul-core@hashicorp.com"
git config --local user.name "hc-github-team-consul-core"
backport_failures=0
# loop through all labels on the PR
for label in $labels; do
status "checking label: $label"
# if the label matches docs-cherrypick, it will attempt to cherry-pick to stable-website
if [[ $label =~ docs-cherrypick ]]; then
status "backporting to stable-website"
branch="stable-website"
cherry_pick_with_slack_notification "$branch" "$CIRCLE_SHA1" "$pr_url"
backport_failures=$((backport_failures + "$?"))
# else if the label matches backport/*, it will attempt to cherry-pick to the release branch
elif [[ $label =~ backport/* ]]; then
status "backporting to $label"
branch="${label/backport/release}.x"
cherry_pick_with_slack_notification "$branch" "$CIRCLE_SHA1" "$pr_url"
backport_failures=$((backport_failures + "$?"))
fi
# reset the working directory for the next label
git reset --hard
done
if [ "$backport_failures" -ne 0 ]; then
echo "$backport_failures backports failed"
exit 1
fi

View File

@ -10,15 +10,17 @@ provider "aws" {
} }
module "load-test" { module "load-test" {
source = "github.com/hashicorp/consul/test/load/terraform" source = "../../../test/load/terraform"
vpc_az = ["us-east-2a", "us-east-2b"] vpc_az = ["us-east-2a", "us-east-2b"]
vpc_name = var.vpc_name vpc_name = var.vpc_name
vpc_cidr = "10.0.0.0/16" vpc_cidr = "10.0.0.0/16"
vpc_allwed_ssh_cidr = "0.0.0.0/0"
public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24"] public_subnet_cidrs = ["10.0.1.0/24", "10.0.2.0/24"]
private_subnet_cidrs = ["10.0.3.0/24"] private_subnet_cidrs = ["10.0.3.0/24"]
test_public_ip = true test_public_ip = true
ami_owners = var.ami_owners ami_owners = var.ami_owners
consul_download_url = var.consul_download_url consul_download_url = var.consul_download_url
cluster_name = var.cluster_name cluster_name = var.cluster_name
cluster_tag_key = var.cluster_tag_key
} }

View File

@ -22,3 +22,9 @@ variable "cluster_name" {
type = string type = string
default = "consul-example" default = "consul-example"
} }
variable "cluster_tag_key" {
description = "The tag the EC2 Instances will look for to automatically discover each other and form a cluster."
type = string
default = "consul-ci-load-test"
}

3
.github/CODEOWNERS vendored
View File

@ -5,3 +5,6 @@
/website/content/api-docs/ @hashicorp/consul-docs /website/content/api-docs/ @hashicorp/consul-docs
# release configuration
/.release/ @hashicorp/release-engineering @hashicorp/github-consul-core
/.github/workflows/build.yml @hashicorp/release-engineering @hashicorp/github-consul-core

View File

@ -156,12 +156,25 @@ When you're ready to submit a pull request:
if your changes aren't finalized but would benefit from in-process feedback. if your changes aren't finalized but would benefit from in-process feedback.
5. If there's any reason Consul users might need to know about this change, 5. If there's any reason Consul users might need to know about this change,
[add a changelog entry](../docs/contributing/add-a-changelog-entry.md). [add a changelog entry](../docs/contributing/add-a-changelog-entry.md).
6. After you submit, the Consul maintainers team needs time to carefully review your 6. Add labels to your pull request. A table of commonly use labels is below.
If you have any questions about which to apply, feel free to call it out in the PR or comments.
| Label | When to Use |
| --- | --- |
| `pr/no-changelog` | This PR does not have an intended changelog entry |
| `pr/no-metrics-test` | This PR does not require any testing for metrics |
| `backport/stable-website` | This PR contains documentation changes that are ready to be deployed immediately. Changes will also automatically get backported to the latest release branch |
| `backport/1.12.x` | Backport the changes in this PR to the targeted release branch. Consult the [Consul Release Notes](https://www.consul.io/docs/release-notes) page to view active releases. |
Other labels may automatically be added by the Github Action CI.
7. After you submit, the Consul maintainers team needs time to carefully review your
contribution and ensure it is production-ready, considering factors such as: security, contribution and ensure it is production-ready, considering factors such as: security,
backwards-compatibility, potential regressions, etc. backwards-compatibility, potential regressions, etc.
7. After you address Consul maintainer feedback and the PR is approved, a Consul maintainer 8. After you address Consul maintainer feedback and the PR is approved, a Consul maintainer
will merge it. Your contribution will be available from the next major release (e.g., 1.x) will merge it. Your contribution will be available from the next major release (e.g., 1.x)
unless explicitly backported to an existing or previous major release by the maintainer. unless explicitly backported to an existing or previous major release by the maintainer.
9. Any backport labels will generate an additional PR to the targeted release branch.
These will be linked in the original PR.
Assuming the tests pass, the PR will be merged automatically.
If the tests fail, it is you responsibility to resolve the issues with backports and request another reviewer.
#### Checklists #### Checklists

View File

@ -16,4 +16,3 @@ Please be mindful not to leak any customer or confidential information. HashiCor
* [ ] updated test coverage * [ ] updated test coverage
* [ ] external facing docs updated * [ ] external facing docs updated
* [ ] not a security concern * [ ] not a security concern
* [ ] checklist [folder](./../docs/config) consulted

View File

@ -6,7 +6,7 @@ set -uo pipefail
### It is still up to the reviewer to make sure that any tests added are needed and meaningful. ### It is still up to the reviewer to make sure that any tests added are needed and meaningful.
# search for any "new" or modified metric emissions # search for any "new" or modified metric emissions
metrics_modified=$(git --no-pager diff HEAD origin/main | grep -i "SetGauge\|EmitKey\|IncrCounter\|AddSample\|MeasureSince\|UpdateFilter") metrics_modified=$(git --no-pager diff origin/main...HEAD | grep -i "SetGauge\|EmitKey\|IncrCounter\|AddSample\|MeasureSince\|UpdateFilter")
# search for PR body or title metric references # search for PR body or title metric references
metrics_in_pr_body=$(echo "${PR_BODY-""}" | grep -i "metric") metrics_in_pr_body=$(echo "${PR_BODY-""}" | grep -i "metric")
metrics_in_pr_title=$(echo "${PR_TITLE-""}" | grep -i "metric") metrics_in_pr_title=$(echo "${PR_TITLE-""}" | grep -i "metric")

240
.github/scripts/verify_artifact.sh vendored Executable file
View File

@ -0,0 +1,240 @@
#!/bin/bash
set -euo pipefail
# verify_artifact.sh is the top-level script that implements the logic to decide
# which individual verification script to invoke. It decides which verification
# script to use based on artifact name it is given. By putting the logic in here,
# it keeps the workflow file simpler and easier to manage. It also doubles as a means
# to run verifications locally when necessary.
# set this so we can locate and execute the individual verification scripts.
SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
function usage {
echo "verify_artifact.sh <artifact_path> <expect_version>"
}
# Arguments:
# $1 - artifact path (eg. /artifacts/consul-1.13.0~dev-1.i386.rpm)
# $2 - expected version to match against (eg. v1.13.0-dev)
function main {
local artifact_path="${1:-}"
local expect_version="${2:-}"
if [[ -z "${artifact_path}" ]]; then
echo "ERROR: artifact path argument is required"
usage
exit 1
fi
if [[ -z "${expect_version}" ]]; then
echo "ERROR: expected version argument is required"
usage
exit 1
fi
if [[ ! -e "${artifact_path}" ]]; then
echo "ERROR: ${artifact_path} does not exist"
usage
exit 1
fi
# match against the various artifact names:
# deb packages: consul_${version}-1_${arch}.deb
# rpm packages: consul-${version}-1.${arch}.rpm
# zip packages: consul_${version}_${os}_${arch}.zip
case "${artifact_path}" in
*.rpm) verify_rpm "${artifact_path}" "${expect_version}";;
*.deb) verify_deb "${artifact_path}" "${expect_version}";;
*.zip) verify_zip "${artifact_path}" "${expect_version}";;
*)
echo "${artifact_path} did not match known patterns"
exit 1
;;
esac
}
# Arguments:
# $1 - path to rpm (eg. consul-1.13.0~dev-1.aarch64.rpm)
# $2 - expected version to match against (eg. v1.13.0-dev)
function verify_rpm {
local artifact_path="${1:-}"
local expect_version="${2:-}"
local docker_image
local docker_platform
case "${artifact_path}" in
*.i386.rpm)
docker_platform="linux/386"
docker_image="i386/centos:7"
;;
*.x86_64.rpm)
docker_platform="linux/amd64"
docker_image="amd64/centos:7"
;;
*.arm.rpm)
docker_platform="linux/arm/v7"
docker_image="arm32v7/fedora:36"
;;
*.aarch64.rpm)
docker_platform="linux/arm64"
docker_image="arm64v8/fedora:36"
;;
*)
echo "${artifact_path} did not match known patterns for rpms"
exit 1
;;
esac
echo "executing RPM verification in Docker with these parameters:"
echo "PLATFORM=${docker_platform}"
echo "IMAGE=${docker_image}"
docker run \
--platform=${docker_platform} \
-v $(pwd):/workdir \
-v ${SCRIPT_DIR}:/scripts \
-w /workdir \
${docker_image} \
/scripts/verify_rpm.sh \
"/workdir/${artifact_path}" \
"${expect_version}"
}
# Arguments:
# $1 - path to deb (eg. consul_1.13.0~dev-1_arm64.deb)
# $2 - expected version to match against (eg. v1.13.0-dev)
function verify_deb {
local artifact_path="${1:-}"
local expect_version="${2:-}"
local docker_image
local docker_platform
case "${artifact_path}" in
*_i386.deb)
docker_platform="linux/386"
docker_image="i386/debian:bullseye"
;;
*_amd64.deb)
docker_platform="linux/amd64"
docker_image="amd64/debian:bullseye"
;;
*_arm.deb)
docker_platform="linux/arm/v7"
docker_image="arm32v7/debian:bullseye"
;;
*_arm64.deb)
docker_platform="linux/arm64"
docker_image="arm64v8/debian:bullseye"
;;
*)
echo "${artifact_path} did not match known patterns for debs"
exit 1
;;
esac
echo "executing DEB verification in Docker with these parameters:"
echo "PLATFORM=${docker_platform}"
echo "IMAGE=${docker_image}"
docker run \
--platform=${docker_platform} \
-v $(pwd):/workdir \
-v ${SCRIPT_DIR}:/scripts \
-w /workdir \
${docker_image} \
/scripts/verify_deb.sh \
"/workdir/${artifact_path}" \
"${expect_version}"
}
# Arguments:
# $1 - path to zip (eg. consul_1.13.0-dev_linux_amd64.zip)
# $2 - expected version to match against (eg. v1.13.0-dev)
function verify_zip {
local artifact_path="${1:-}"
local expect_version="${2:-}"
local machine_os=$(uname -s)
local machine_arch=$(uname -m)
unzip "${artifact_path}"
if [[ ! -e ./consul ]]; then
echo "ERROR: ${artifact_path} did not contain a consul binary"
exit 1
fi
case "${artifact_path}" in
*_darwin_amd64.zip)
if [[ "${machine_os}" = 'Darwin' ]]; then
# run the darwin binary if the host is Darwin.
${SCRIPT_DIR}/verify_bin.sh ./consul ${expect_version}
else
echo "cannot run darwin binary on a non-darwin host (${machine_os})"
fi
;;
*_linux_386.zip | *_linux_amd64.zip)
if [[ "${machine_os}" = 'Linux' && "${machine_arch}" = "x86_64" ]]; then
# run the binary directly on the host when it's x86_64 Linux
${SCRIPT_DIR}/verify_bin.sh ./consul ${expect_version}
else
# otherwise, use Docker/QEMU
docker run \
--platform=linux/amd64 \
-v $(pwd):/workdir \
-v ${SCRIPT_DIR}:/scripts \
-w /workdir \
amd64/debian \
/scripts/verify_bin.sh \
./consul \
"${expect_version}"
fi
;;
*_linux_arm.zip)
if [[ "${machine_os}" = 'Linux' && "${machine_arch}" = arm* ]]; then
# run the binary directly on the host when it's x86_64 Linux
${SCRIPT_DIR}/verify_bin.sh ./consul ${expect_version}
else
# otherwise, use Docker/QEMU
docker run \
--platform=linux/arm/v7 \
-v $(pwd):/workdir \
-v ${SCRIPT_DIR}:/scripts \
-w /workdir \
arm32v7/debian \
/scripts/verify_bin.sh \
./consul \
"${expect_version}"
fi
;;
*_linux_arm64.zip)
if [[ "${machine_os}" = 'Linux' && "${machine_arch}" = arm* ]]; then
# run the binary directly on the host when it's x86_64 Linux
${SCRIPT_DIR}/verify_bin.sh ./consul ${expect_version}
else
# otherwise, use Docker/QEMU
docker run \
--platform=linux/arm64 \
-v $(pwd):/workdir \
-v ${SCRIPT_DIR}:/scripts \
-w /workdir \
arm64v8/debian \
/scripts/verify_bin.sh \
./consul \
"${expect_version}"
fi
;;
*)
echo "${artifact_path} did not match known patterns for zips"
exit 1
;;
esac
}
main "$@"

44
.github/scripts/verify_bin.sh vendored Executable file
View File

@ -0,0 +1,44 @@
#!/bin/bash
set -euo pipefail
# verify_bin.sh validates the file at the path given and then runs `./consul version` and inspects its output. If its
# output doesn't match the version given, the script will exit 1 and report why it failed.
# This is meant to be run as part of the build workflow to verify the built .zip meets some basic criteria for validity.
function usage {
echo "./verify_bin.sh <path_to_bin> <expect_version>"
}
function main {
local bin_path="${1:-}"
local expect_version="${2:-}"
local got_version
if [[ -z "${bin_path}" ]]; then
echo "ERROR: path to binary argument is required"
usage
exit 1
fi
if [[ -z "${expect_version}" ]]; then
echo "ERROR: expected version argument is required"
usage
exit 1
fi
if [[ ! -e "${bin_path}" ]]; then
echo "ERROR: package at ${bin_path} does not exist."
exit 1
fi
got_version="$( awk '{print $2}' <(head -n1 <(${bin_path} version)) )"
if [ "${got_version}" != "${expect_version}" ]; then
echo "Test FAILED"
echo "Got: ${got_version}, Want: ${expect_version}"
exit 1
fi
echo "Test PASSED"
}
main "$@"

57
.github/scripts/verify_deb.sh vendored Executable file
View File

@ -0,0 +1,57 @@
#!/bin/bash
set -euo pipefail
# verify_deb.sh tries to install the .deb package at the path given before running `consul version`
# to inspect its output. If its output doesn't match the version given, the script will exit 1 and
# report why it failed. This is meant to be run as part of the build workflow to verify the built
# .deb meets some basic criteria for validity.
# set this so we can locate and execute the verify_bin.sh script for verifying version output
SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
function usage {
echo "./verify_deb.sh <path_to_deb> <expect_version>"
}
function main {
local deb_path="${1:-}"
local expect_version="${2:-}"
local got_version
if [[ -z "${deb_path}" ]]; then
echo "ERROR: package path argument is required"
usage
exit 1
fi
if [[ -z "${expect_version}" ]]; then
echo "ERROR: expected version argument is required"
usage
exit 1
fi
# expand globs for path names, if this fails, the script will exit
deb_path=$(echo ${deb_path})
if [[ ! -e "${deb_path}" ]]; then
echo "ERROR: package at ${deb_path} does not exist."
usage
exit 1
fi
# we have to install the 'arm' architecture in order to install the 'arm'
# package, otherwise we will git a 'package architecture does not match system' error
if [[ ${deb_path} = *_arm.deb ]]; then
dpkg --add-architecture arm
fi
apt -y update
apt -y install openssl
dpkg -i ${deb_path}
# use the script that should be located next to this one for verifying the output
exec "${SCRIPT_DIR}/verify_bin.sh" $(which consul) "${expect_version}"
}
main "$@"

44
.github/scripts/verify_docker.sh vendored Executable file
View File

@ -0,0 +1,44 @@
#!/bin/bash
set -euo pipefail
# verify_docker.sh invokes the given Docker image to run `consul version` and inspect its output.
# If its output doesn't match the version given, the script will exit 1 and report why it failed.
# This is meant to be run as part of the build workflow to verify the built image meets some basic
# criteria for validity.
#
# Because this is meant to be run as the `smoke_test` for the docker-build workflow, the script expects
# the image name parameter to be provided by the `IMAGE_NAME` environment variable, rather than a
# positional argument.
function usage {
echo "IMAGE_NAME=<image uri> ./verify_docker.sh <expect_version>"
}
function main {
local image_name="${IMAGE_NAME:-}"
local expect_version="${1:-}"
local got_version
if [[ -z "${image_name}" ]]; then
echo "ERROR: IMAGE_NAME is not set"
usage
exit 1
fi
if [[ -z "${expect_version}" ]]; then
echo "ERROR: expected version argument is required"
usage
exit 1
fi
got_version="$( awk '{print $2}' <(head -n1 <(docker run "${image_name}" version)) )"
if [ "${got_version}" != "${expect_version}" ]; then
echo "Test FAILED"
echo "Got: ${got_version}, Want: ${expect_version}"
exit 1
fi
echo "Test PASSED"
}
main "$@"

52
.github/scripts/verify_rpm.sh vendored Executable file
View File

@ -0,0 +1,52 @@
#!/bin/bash
set -euo pipefail
# verify_rpm.sh tries to install the .rpm package at the path given before running `consul version`
# to inspect its output. If its output doesn't match the version given, the script will exit 1 and
# report why it failed. This is meant to be run as part of the build workflow to verify the built
# .rpm meets some basic criteria for validity.
# set this so we can locate and execute the verify_bin.sh script for verifying version output
SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
function usage {
echo "./verify_rpm.sh <path_to_rpm> <expect_version>"
}
function main {
local rpm_path="${1:-}"
local expect_version="${2:-}"
local got_version
if [[ -z "${rpm_path}" ]]; then
echo "ERROR: package path argument is required"
usage
exit 1
fi
if [[ -z "${expect_version}" ]]; then
echo "ERROR: expected version argument is required"
usage
exit 1
fi
# expand globs for path names, if this fails, the script will exit
rpm_path=$(echo ${rpm_path})
if [[ ! -e "${rpm_path}" ]]; then
echo "ERROR: package at ${rpm_path} does not exist."
usage
exit 1
fi
yum -y clean all
yum -y update
yum -y install which openssl
rpm --ignorearch -i ${rpm_path}
# use the script that should be located next to this one for verifying the output
exec "${SCRIPT_DIR}/verify_bin.sh" $(which consul) "${expect_version}"
}
main "$@"

View File

@ -8,6 +8,9 @@ on:
types: types:
- closed - closed
- labeled - labeled
branches:
- main
- 'release/*.*.x'
jobs: jobs:
backport: backport:
@ -16,17 +19,34 @@ jobs:
container: hashicorpdev/backport-assistant:0.2.3 container: hashicorpdev/backport-assistant:0.2.3
steps: steps:
- name: Run Backport Assistant for stable-website - name: Run Backport Assistant for stable-website
# Update this to auto-merge when we have confidence in the process working and kill Circle
run: | run: |
backport-assistant backport -merge-method=rebase backport-assistant backport -merge-method=squash -automerge
env: env:
BACKPORT_LABEL_REGEXP: "type/docs-(?P<target>cherrypick)" BACKPORT_LABEL_REGEXP: "type/docs-(?P<target>cherrypick)"
BACKPORT_TARGET_TEMPLATE: "stable-website" BACKPORT_TARGET_TEMPLATE: "stable-website"
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- name: Run Backport Assistant for release branches - name: Backport changes to latest release branch
# Update this to auto-merge when we have confidence in the process working and kill Circle
run: | run: |
backport-assistant backport -merge-method=rebase # Use standard token here
resp=$(curl -f -s -H 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' "https://api.github.com/repos/$GITHUB_REPOSITORY/labels?per_page=100")
ret="$?"
if [[ "$ret" -ne 0 ]]; then
echo "The GitHub API returned $ret"
exit $ret
fi
# get the latest backport label excluding any website labels, ex: `backport/0.3.x` and not `backport/website`
latest_backport_label=$(echo "$resp" | jq -r '.[] | select(.name | (startswith("backport/") and (contains("website") | not))) | .name' | sort -rV | head -n1)
echo "Latest backport label: $latest_backport_label"
# set BACKPORT_TARGET_TEMPLATE for backport-assistant
# trims backport/ from the beginning with parameter substitution
export BACKPORT_TARGET_TEMPLATE="release/${latest_backport_label#backport/}.x"
backport-assistant backport -merge-method=squash -automerge
env:
BACKPORT_LABEL_REGEXP: "type/docs-(?P<target>cherrypick)"
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- name: Run Backport Assistant for release branches
run: |
backport-assistant backport -merge-method=squash -automerge
env: env:
BACKPORT_LABEL_REGEXP: "backport/(?P<target>\\d+\\.\\d+)" BACKPORT_LABEL_REGEXP: "backport/(?P<target>\\d+\\.\\d+)"
BACKPORT_TARGET_TEMPLATE: "release/{{.target}}.x" BACKPORT_TARGET_TEMPLATE: "release/{{.target}}.x"

View File

@ -1,6 +1,8 @@
# This workflow sends a reminder comment to PRs that have labels starting with # This workflow sends a reminder comment to PRs that have labels starting with
# `backport/` to check that the backport has run successfully. # `backport/` to check that the backport has run successfully.
name: Backport Assistant Reminder
on: on:
pull_request: pull_request:
types: [ labeled ] types: [ labeled ]
@ -17,7 +19,7 @@ jobs:
steps: steps:
- name: Comment on PR - name: Comment on PR
run: | run: |
github_message="After merging, confirm that you see messages like: 🍒✅ Cherry pick of commit ... onto ... succeeded!" github_message="After merging, confirm that you see linked PRs AND check them for CI errors."
curl -s -H "Authorization: token ${{ secrets.PR_COMMENT_TOKEN }}" \ curl -s -H "Authorization: token ${{ secrets.PR_COMMENT_TOKEN }}" \
-X POST \ -X POST \
-d "{ \"body\": \"${github_message}\"}" \ -d "{ \"body\": \"${github_message}\"}" \

View File

@ -15,6 +15,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
product-version: ${{ steps.get-product-version.outputs.product-version }} product-version: ${{ steps.get-product-version.outputs.product-version }}
product-date: ${{ steps.get-product-version.outputs.product-date }}
pre-version: ${{ steps.get-product-version.outputs.pre-version }} pre-version: ${{ steps.get-product-version.outputs.pre-version }}
pkg-version: ${{ steps.get-product-version.outputs.pkg-version }} pkg-version: ${{ steps.get-product-version.outputs.pkg-version }}
shared-ldflags: ${{ steps.shared-ldflags.outputs.shared-ldflags }} shared-ldflags: ${{ steps.shared-ldflags.outputs.shared-ldflags }}
@ -24,6 +25,7 @@ jobs:
id: get-product-version id: get-product-version
run: | run: |
CONSUL_VERSION=$(build-support/scripts/version.sh -r) CONSUL_VERSION=$(build-support/scripts/version.sh -r)
CONSUL_DATE=$(build-support/scripts/build-date.sh)
## TODO: This assumes `make version` outputs 1.1.1+ent-prerel ## TODO: This assumes `make version` outputs 1.1.1+ent-prerel
IFS="+" read VERSION _other <<< "$CONSUL_VERSION" IFS="+" read VERSION _other <<< "$CONSUL_VERSION"
IFS="-" read _other PREREL_VERSION <<< "$CONSUL_VERSION" IFS="-" read _other PREREL_VERSION <<< "$CONSUL_VERSION"
@ -32,12 +34,15 @@ jobs:
## [version]{-prerelease}+ent before then, we'll need to add ## [version]{-prerelease}+ent before then, we'll need to add
## logic to handle presense/absence of the prerelease ## logic to handle presense/absence of the prerelease
echo "::set-output name=product-version::${CONSUL_VERSION}" echo "::set-output name=product-version::${CONSUL_VERSION}"
echo "::set-output name=product-date::${CONSUL_DATE}"
echo "::set-output name=pre-version::${PREREL_VERSION}" echo "::set-output name=pre-version::${PREREL_VERSION}"
echo "::set-output name=pkg-version::${VERSION}" echo "::set-output name=pkg-version::${VERSION}"
- name: Set shared -ldflags - name: Set shared -ldflags
id: shared-ldflags id: shared-ldflags
run: echo "::set-output name=shared-ldflags::-X github.com/hashicorp/consul/version.GitCommit=${GITHUB_SHA::8} -X github.com/hashicorp/consul/version.GitDescribe=${{ steps.get-product-version.outputs.product-version }}" run: |
T="github.com/hashicorp/consul/version"
echo "::set-output name=shared-ldflags::-X ${T}.GitCommit=${GITHUB_SHA::8} -X ${T}.GitDescribe=${{ steps.get-product-version.outputs.product-version }} -X ${T}.BuildDate=${{ steps.get-product-version.outputs.product-date }}"
generate-metadata-file: generate-metadata-file:
needs: get-product-version needs: get-product-version
@ -85,11 +90,6 @@ jobs:
with: with:
go-version: ${{ matrix.go }} go-version: ${{ matrix.go }}
- name: Install project dependencies
run: |
go install github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@38087fe
go install github.com/hashicorp/go-bindata/go-bindata@bf7910a
- name: Setup with node and yarn - name: Setup with node and yarn
uses: actions/setup-node@v2 uses: actions/setup-node@v2
with: with:
@ -100,18 +100,16 @@ jobs:
- name: Build UI - name: Build UI
run: | run: |
CONSUL_VERSION=${{ needs.get-product-version.outputs.product-version }} CONSUL_VERSION=${{ needs.get-product-version.outputs.product-version }}
CONSUL_DATE=${{ needs.get-product-version.outputs.product-date }}
CONSUL_BINARY_TYPE=${CONSUL_BINARY_TYPE} CONSUL_BINARY_TYPE=${CONSUL_BINARY_TYPE}
CONSUL_COPYRIGHT_YEAR=$(git show -s --format=%cd --date=format:%Y HEAD) CONSUL_COPYRIGHT_YEAR=$(git show -s --format=%cd --date=format:%Y HEAD)
echo "consul_version is ${CONSUL_VERSION}" echo "consul_version is ${CONSUL_VERSION}"
echo "consul_date is ${CONSUL_DATE}"
echo "consul binary type is ${CONSUL_BINARY_TYPE}" echo "consul binary type is ${CONSUL_BINARY_TYPE}"
echo "consul copyright year is ${CONSUL_COPYRIGHT_YEAR}" echo "consul copyright year is ${CONSUL_COPYRIGHT_YEAR}"
cd ui && make && cd .. cd ui && make && cd ..
mkdir pkg rm -rf agent/uiserver/dist
mv ui/packages/consul-ui/dist pkg/web_ui mv ui/packages/consul-ui/dist agent/uiserver/
- name: Build static-assets
run: make static-assets
- name: Build - name: Build
env: env:
GOOS: ${{ matrix.goos }} GOOS: ${{ matrix.goos }}
@ -185,11 +183,6 @@ jobs:
with: with:
go-version: ${{ matrix.go }} go-version: ${{ matrix.go }}
- name: Install project dependencies
run: |
go install github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@38087fe
go install github.com/hashicorp/go-bindata/go-bindata@bf7910a
- name: Setup with node and yarn - name: Setup with node and yarn
uses: actions/setup-node@v2 uses: actions/setup-node@v2
with: with:
@ -206,11 +199,8 @@ jobs:
echo "consul binary type is ${CONSUL_BINARY_TYPE}" echo "consul binary type is ${CONSUL_BINARY_TYPE}"
echo "consul copyright year is ${CONSUL_COPYRIGHT_YEAR}" echo "consul copyright year is ${CONSUL_COPYRIGHT_YEAR}"
cd ui && make && cd .. cd ui && make && cd ..
mkdir pkg rm -rf agent/uiserver/dist
mv ui/packages/consul-ui/dist pkg/web_ui mv ui/packages/consul-ui/dist agent/uiserver/
- name: Build static-assets
run: make static-assets
- name: Build - name: Build
env: env:
@ -235,7 +225,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
arch: ["arm", "arm64", "386", "amd64"] arch: ["386", "amd64", "arm", "arm64"]
env: env:
repo: ${{github.event.repository.name}} repo: ${{github.event.repository.name}}
version: ${{needs.get-product-version.outputs.product-version}} version: ${{needs.get-product-version.outputs.product-version}}
@ -251,3 +241,159 @@ jobs:
tags: | tags: |
docker.io/hashicorp/${{env.repo}}:${{env.version}} docker.io/hashicorp/${{env.repo}}:${{env.version}}
public.ecr.aws/hashicorp/${{env.repo}}:${{env.version}} public.ecr.aws/hashicorp/${{env.repo}}:${{env.version}}
dev_tags: |
docker.io/hashicorppreview/${{ env.repo }}:${{ env.version }}
docker.io/hashicorppreview/${{ env.repo }}:${{ env.version }}-${{ github.sha }}
smoke_test: .github/scripts/verify_docker.sh v${{ env.version }}
build-docker-redhat:
name: Docker Build UBI Image for RedHat
needs:
- get-product-version
- build
runs-on: ubuntu-latest
env:
repo: ${{github.event.repository.name}}
version: ${{needs.get-product-version.outputs.product-version}}
steps:
- uses: actions/checkout@v2
- uses: hashicorp/actions-docker-build@v1
with:
version: ${{env.version}}
target: ubi
arch: amd64
redhat_tag: scan.connect.redhat.com/ospid-60f9fdbec3a80eac643abedf/${{env.repo}}:${{env.version}}-ubi
smoke_test: .github/scripts/verify_docker.sh v${{ env.version }}
verify-linux:
needs:
- get-product-version
- build
runs-on: ubuntu-latest
strategy:
matrix:
arch: ["386", "amd64", "arm", "arm64"]
fail-fast: true
env:
version: ${{ needs.get-product-version.outputs.product-version }}
zip_name: consul_${{ needs.get-product-version.outputs.product-version }}_linux_${{ matrix.arch }}.zip
name: Verify ${{ matrix.arch }} linux binary
steps:
- uses: actions/checkout@v2
- name: Download ${{ matrix.arch }} zip
uses: actions/download-artifact@v3
with:
name: ${{ env.zip_name }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
if: ${{ matrix.arch == 'arm' || matrix.arch == 'arm64' }}
with:
# this should be a comma-separated string as opposed to an array
platforms: arm,arm64
- name: Run verification for ${{ matrix.arch }} binary
run: .github/scripts/verify_artifact.sh ${{ env.zip_name }} v${{ env.version }}
verify-darwin:
needs:
- get-product-version
- build-darwin
runs-on: macos-latest
strategy:
fail-fast: true
env:
version: ${{needs.get-product-version.outputs.product-version}}
zip_name: consul_${{ needs.get-product-version.outputs.product-version }}_darwin_amd64.zip
name: Verify amd64 darwin binary
steps:
- uses: actions/checkout@v2
- name: Download amd64 darwin zip
uses: actions/download-artifact@v3
with:
name: ${{ env.zip_name }}
- name: Unzip amd64 darwin zip
run: unzip ${{ env.zip_name }}
- name: Run verification for amd64 darwin binary
run: .github/scripts/verify_bin.sh ./consul v${{ env.version }}
verify-linux-packages-deb:
needs:
- build
- get-product-version
runs-on: ubuntu-latest
strategy:
matrix:
arch: ["i386", "amd64", "arm", "arm64"]
# fail-fast: true
env:
version: ${{ needs.get-product-version.outputs.product-version }}
name: Verify ${{ matrix.arch }} debian package
steps:
- uses: actions/checkout@v2
- name: Set package version
run: |
echo "pkg_version=$(echo ${{ needs.get-product-version.outputs.product-version }} | sed 's/\-/~/g')" >> $GITHUB_ENV
- name: Set package name
run: |
echo "pkg_name=consul_${{ env.pkg_version }}-1_${{ matrix.arch }}.deb" >> $GITHUB_ENV
- name: Download workflow artifacts
uses: actions/download-artifact@v3
with:
name: ${{ env.pkg_name }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: Verify ${{ matrix.arch }} debian package
run: ./.github/scripts/verify_artifact.sh ${{ env.pkg_name }} v${{ env.version }}
verify-linux-packages-rpm:
needs:
- build
- get-product-version
runs-on: ubuntu-latest
strategy:
matrix:
arch: ["i386", "x86_64", "arm", "aarch64"]
# fail-fast: true
env:
version: ${{ needs.get-product-version.outputs.product-version }}
name: Verify ${{ matrix.arch }} rpm
steps:
- uses: actions/checkout@v2
- name: Set package version
run: |
echo "pkg_version=$(echo ${{ needs.get-product-version.outputs.product-version }} | sed 's/\-/~/g')" >> $GITHUB_ENV
- name: Set package name
run: |
echo "pkg_name=consul-${{ env.pkg_version }}-1.${{ matrix.arch }}.rpm" >> $GITHUB_ENV
- name: Download workflow artifacts
uses: actions/download-artifact@v3
with:
name: ${{ env.pkg_name }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: Verify ${{ matrix.arch }} rpm
run: ./.github/scripts/verify_artifact.sh ${{ env.pkg_name }} v${{ env.version }}

View File

@ -1,6 +1,8 @@
# This workflow checks that there is either a 'pr/no-changelog' label applied to a PR # This workflow checks that there is either a 'pr/no-changelog' label applied to a PR
# or there is a .changelog/<pr number>.txt file associated with a PR for a changelog entry # or there is a .changelog/<pr number>.txt file associated with a PR for a changelog entry
name: Changelog Checker
on: on:
pull_request: pull_request:
types: [opened, synchronize, labeled] types: [opened, synchronize, labeled]

View File

@ -0,0 +1,37 @@
# This workflow detects if there is a diff in the `agent/uiserver/dist` directory
# which is used by Consul to serve its embedded UI.
# `agent/uiserver/dist` should not be manually updated.
name: Embedded Asset Checker
on:
pull_request:
types: [opened, synchronize, labeled, unlabeled, reopened]
# Runs on PRs to main and all release branches
branches:
- main
- release/*
jobs:
dist-check:
if: "! ( contains(github.event.pull_request.labels.*.name, 'pr/update-ui-assets') || github.event.pull_request.user.login == 'hc-github-team-consul-core' )"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
fetch-depth: 0 # by default the checkout action doesn't checkout all branches
- name: Check for agent/uiserver/dist dir change in diff
run: |
dist_files=$(git --no-pager diff --name-only HEAD "$(git merge-base HEAD "origin/${{ github.event.pull_request.base.ref }}")" -- agent/uiserver/dist)
if [[ -z "${dist_files}" ]]; then
exit 0
fi
echo "Found diffs in dir agent/uiserver/dist"
github_message="This PR has diffs in \`agent/uiserver/dist\`. If the changes are intentional, add the label \`pr/update-ui-assets\`. Otherwise, revert changes to \`agent/uiserver/dist\`."
curl -s -H "Authorization: token ${{ secrets.PR_COMMENT_TOKEN }}" \
-X POST \
-d "{ \"body\": \"${github_message}\"}" \
"https://api.github.com/repos/${GITHUB_REPOSITORY}/issues/${{ github.event.pull_request.number }}/comments"
exit 1

View File

@ -1,8 +1,11 @@
name: Load Test
on: on:
pull_request: pull_request:
branches: branches:
- main - main
types: [labeled] types: [labeled]
workflow_dispatch: {}
jobs: jobs:
trigger-load-test: trigger-load-test:

View File

@ -0,0 +1,230 @@
name: Nightly 1.10.x Test
on:
schedule:
- cron: '0 4 * * *'
workflow_dispatch: {}
env:
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
BRANCH: "release/1.10.x"
BRANCH_NAME: "release-1.10.x" # Used for naming artifacts
jobs:
frontend-test-workspace-node:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: yarn install
- name: Workspace Tests
id: workspace-test
working-directory: ./ui
run: make test-workspace
- name: Node Tests
id: node-test
working-directory: ./ui/packages/consul-ui
run: make test-node
frontend-build-oss:
runs-on: ubuntu-latest
env:
JOBS: 2
CONSUL_NSPACES_ENABLED: 0
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: yarn install
- name: Ember Build OSS
id: build-oss
working-directory: ./ui/packages/consul-ui
run: make build-ci
- name: Upload OSS Frontend
uses: actions/upload-artifact@v3
with:
name: frontend-oss-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
if-no-files-found: error
frontend-test-oss:
runs-on: ubuntu-latest
needs: [frontend-build-oss]
strategy:
matrix:
partition: [ 1, 2, 3, 4 ]
env:
CONSUL_NSPACES_ENABLED: 0
EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CircleCI test summary
EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: yarn install
- name: Download OSS Frontend
uses: actions/download-artifact@v3
with:
name: frontend-oss-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
- name: Ember Test OSS
id: cache
working-directory: ./ui/packages/consul-ui
run: node_modules/.bin/ember exam --split=$EMBER_PARTITION_TOTAL --partition=${{ matrix.partition }} --path dist --silent -r xunit
frontend-build-ent:
runs-on: ubuntu-latest
env:
JOBS: 2
CONSUL_NSPACES_ENABLED: 1
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: yarn install
- name: Ember Build ENT
id: build-oss
working-directory: ./ui/packages/consul-ui
run: make build-ci
- name: Upload ENT Frontend
uses: actions/upload-artifact@v3
with:
name: frontend-ent-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
if-no-files-found: error
frontend-test-ent:
runs-on: ubuntu-latest
needs: [frontend-build-ent]
strategy:
matrix:
partition: [ 1, 2, 3, 4 ]
env:
CONSUL_NSPACES_ENABLED: 1
EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CircleCI test summary
EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: yarn install
- name: Download ENT Frontend
uses: actions/download-artifact@v3
with:
name: frontend-ent-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
- name: Ember Test ENT
id: cache
working-directory: ./ui/packages/consul-ui
run: node_modules/.bin/ember exam --split=$EMBER_PARTITION_TOTAL --partition=${{ matrix.partition }} --path dist --silent -r xunit
frontend-test-coverage-ent:
runs-on: ubuntu-latest
needs: [frontend-build-ent]
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: yarn install
- name: Download ENT Frontend
uses: actions/download-artifact@v3
with:
name: frontend-ent-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
- name: Run ENT Code Coverage
working-directory: ./ui/packages/consul-ui
run: make test-coverage-ci
slack-failure-notification:
runs-on: ubuntu-latest
needs: [frontend-test-oss, frontend-test-ent]
if: ${{ failure() }}
steps:
- name: Slack Notification
id: slack
uses: slackapi/slack-github-action@v1.19
with:
payload: |
{
"message": "One or more nightly UI tests have failed on branch ${{ env.BRANCH }} for Consul. ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.CONSUL_UI_SLACK_WEBHOOK }}

View File

@ -1,47 +1,21 @@
name: nightly-release-ui-test name: Nightly Test 1.11.x
on: on:
schedule: schedule:
- cron: '0 4 * * *' - cron: '0 4 * * *'
workflow_dispatch: {} workflow_dispatch: {}
env: env:
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partion EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
BRANCH: "release/1.11.x"
BRANCH_NAME: "release-1.11.x" # Used for naming artifacts
jobs: jobs:
frontend-cache:
runs-on: ubuntu-latest
strategy:
matrix:
branch: [ release/1.10.x, release/1.11.x, release/1.12.x ]
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
ref: ${{ matrix.branch }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: make deps
frontend-test-workspace-node: frontend-test-workspace-node:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [frontend-cache]
strategy:
matrix:
branch: [ release/1.10.x, release/1.11.x, release/1.12.x ]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
with: with:
fetch-depth: 0 ref: ${{ env.BRANCH }}
ref: ${{ matrix.branch }}
# Not necessary to use yarn, but enables caching # Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
@ -67,18 +41,13 @@ jobs:
frontend-build-oss: frontend-build-oss:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [frontend-cache]
strategy:
matrix:
branch: [ release/1.10.x, release/1.11.x, release/1.12.x ]
env: env:
JOBS: 2 JOBS: 2
CONSUL_NSPACES_ENABLED: 0 CONSUL_NSPACES_ENABLED: 0
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
with: with:
fetch-depth: 0 ref: ${{ env.BRANCH }}
ref: ${{ matrix.branch }}
# Not necessary to use yarn, but enables caching # Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
@ -100,16 +69,16 @@ jobs:
- name: Upload OSS Frontend - name: Upload OSS Frontend
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: frontend-oss-${{ matrix.branch }} name: frontend-oss-${{ env.BRANCH_NAME }}
path: .ui/packages/consul-ui/dist path: ./ui/packages/consul-ui/dist
if-no-files-found: error
frontend-test-oss: frontend-test-oss:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [frontend-build-oss] needs: [frontend-build-oss]
strategy: strategy:
matrix: matrix:
branch: [ release/1.10.x, release/1.11.x, release/1.12.x ] partition: [ 1, 2, 3, 4 ]
partion: [ 1, 2, 3, 4 ]
env: env:
CONSUL_NSPACES_ENABLED: 0 CONSUL_NSPACES_ENABLED: 0
EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CircleCI test summary EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CircleCI test summary
@ -117,8 +86,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
with: with:
fetch-depth: 0 ref: ${{ env.BRANCH }}
ref: ${{ matrix.branch }}
# Not necessary to use yarn, but enables caching # Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
@ -133,32 +101,25 @@ jobs:
run: make deps run: make deps
- name: Download OSS Frontend - name: Download OSS Frontend
uses: actions/upload-artifact@v3 uses: actions/download-artifact@v3
with: with:
name: frontend-oss-${{ matrix.branch }} name: frontend-oss-${{ env.BRANCH_NAME }}
path: .ui/packages/consul-ui/dist path: ./ui/packages/consul-ui/dist
- name: Ember Test OSS - name: Ember Test OSS
id: cache id: cache
working-directory: ./ui/packages/consul-ui working-directory: ./ui/packages/consul-ui
run: node_modules/.bin/ember exam --split=$EMBER_PARTITION_TOTAL --partition=${{ matrix.partition }} --path dist --silent -r xunit run: node_modules/.bin/ember exam --split=$EMBER_PARTITION_TOTAL --partition=${{ matrix.partition }} --path dist --silent -r xunit
# TODO: add test-reporter: https://github.com/marketplace/actions/test-reporter
frontend-build-ent: frontend-build-ent:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [frontend-cache]
strategy:
matrix:
branch: [ release/1.10.x, release/1.11.x, release/1.12.x ]
env: env:
JOBS: 2 JOBS: 2
CONSUL_NSPACES_ENABLED: 1 CONSUL_NSPACES_ENABLED: 1
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
with: with:
fetch-depth: 0 ref: ${{ env.BRANCH }}
ref: ${{ matrix.branch }}
# Not necessary to use yarn, but enables caching # Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
@ -180,16 +141,16 @@ jobs:
- name: Upload ENT Frontend - name: Upload ENT Frontend
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: frontend-ent-${{ matrix.branch }} name: frontend-ent-${{ env.BRANCH_NAME }}
path: .ui/packages/consul-ui/dist path: ./ui/packages/consul-ui/dist
if-no-files-found: error
frontend-test-ent: frontend-test-ent:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [frontend-build-ent] needs: [frontend-build-ent]
strategy: strategy:
matrix: matrix:
branch: [ release/1.10.x, release/1.11.x, release/1.12.x ] partition: [ 1, 2, 3, 4 ]
partion: [ 1, 2, 3, 4 ]
env: env:
CONSUL_NSPACES_ENABLED: 1 CONSUL_NSPACES_ENABLED: 1
EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CircleCI test summary EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CircleCI test summary
@ -197,8 +158,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
with: with:
fetch-depth: 0 ref: ${{ env.BRANCH }}
ref: ${{ matrix.branch }}
# Not necessary to use yarn, but enables caching # Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
@ -213,29 +173,23 @@ jobs:
run: make deps run: make deps
- name: Download ENT Frontend - name: Download ENT Frontend
uses: actions/upload-artifact@v3 uses: actions/download-artifact@v3
with: with:
name: frontend-ent-${{ matrix.branch }} name: frontend-ent-${{ env.BRANCH_NAME }}
path: .ui/packages/consul-ui/dist path: ./ui/packages/consul-ui/dist
- name: Ember Test ENT - name: Ember Test ENT
id: cache id: cache
working-directory: ./ui/packages/consul-ui working-directory: ./ui/packages/consul-ui
run: node_modules/.bin/ember exam --split=$EMBER_PARTITION_TOTAL --partition=${{ matrix.partition }} --path dist --silent -r xunit run: node_modules/.bin/ember exam --split=$EMBER_PARTITION_TOTAL --partition=${{ matrix.partition }} --path dist --silent -r xunit
# TODO: add test-reporter: https://github.com/marketplace/actions/test-reporter
frontend-test-coverage-ent: frontend-test-coverage-ent:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [frontend-build-ent] needs: [frontend-build-ent]
strategy:
matrix:
branch: [ release/1.10.x, release/1.11.x, release/1.12.x ]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
with: with:
fetch-depth: 0 ref: ${{ env.BRANCH }}
ref: ${{ matrix.branch }}
# Not necessary to use yarn, but enables caching # Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
@ -249,6 +203,12 @@ jobs:
working-directory: ./ui working-directory: ./ui
run: make deps run: make deps
- name: Download ENT Frontend
uses: actions/download-artifact@v3
with:
name: frontend-ent-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
- name: Run ENT Code Coverage - name: Run ENT Code Coverage
working-directory: ./ui/packages/consul-ui working-directory: ./ui/packages/consul-ui
run: make test-coverage-ci run: make test-coverage-ci
@ -260,8 +220,11 @@ jobs:
steps: steps:
- name: Slack Notification - name: Slack Notification
id: slack id: slack
uses: slackapi/slack-github-action@main uses: slackapi/slack-github-action@v1.19
with: with:
payload: "{\"message\":\"One or more nightly UI test have failed on a release branch for Consul. [Link to Failed Action](${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}/actions/runs/${GITHUB_RUN_ID}). \"}" payload: |
{
"message": "One or more nightly UI tests have failed on branch ${{ env.BRANCH }} for Consul. ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
}
env: env:
SLACK_WEBHOOK_URL: ${{ secrets.CONSUL_CORE_SLACK_WEBHOOK }} SLACK_WEBHOOK_URL: ${{ secrets.CONSUL_UI_SLACK_WEBHOOK }}

View File

@ -0,0 +1,230 @@
name: Nightly Test 1.12.x
on:
schedule:
- cron: '0 4 * * *'
workflow_dispatch: {}
env:
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
BRANCH: "release/1.12.x"
BRANCH_NAME: "release-1.12.x" # Used for naming artifacts
jobs:
frontend-test-workspace-node:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: make deps
- name: Workspace Tests
id: workspace-test
working-directory: ./ui
run: make test-workspace
- name: Node Tests
id: node-test
working-directory: ./ui/packages/consul-ui
run: make test-node
frontend-build-oss:
runs-on: ubuntu-latest
env:
JOBS: 2
CONSUL_NSPACES_ENABLED: 0
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: make deps
- name: Ember Build OSS
id: build-oss
working-directory: ./ui/packages/consul-ui
run: make build-ci
- name: Upload OSS Frontend
uses: actions/upload-artifact@v3
with:
name: frontend-oss-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
if-no-files-found: error
frontend-test-oss:
runs-on: ubuntu-latest
needs: [frontend-build-oss]
strategy:
matrix:
partition: [ 1, 2, 3, 4 ]
env:
CONSUL_NSPACES_ENABLED: 0
EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CircleCI test summary
EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: make deps
- name: Download OSS Frontend
uses: actions/download-artifact@v3
with:
name: frontend-oss-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
- name: Ember Test OSS
id: cache
working-directory: ./ui/packages/consul-ui
run: node_modules/.bin/ember exam --split=$EMBER_PARTITION_TOTAL --partition=${{ matrix.partition }} --path dist --silent -r xunit
frontend-build-ent:
runs-on: ubuntu-latest
env:
JOBS: 2
CONSUL_NSPACES_ENABLED: 1
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: make deps
- name: Ember Build ENT
id: build-oss
working-directory: ./ui/packages/consul-ui
run: make build-ci
- name: Upload ENT Frontend
uses: actions/upload-artifact@v3
with:
name: frontend-ent-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
if-no-files-found: error
frontend-test-ent:
runs-on: ubuntu-latest
needs: [frontend-build-ent]
strategy:
matrix:
partition: [ 1, 2, 3, 4 ]
env:
CONSUL_NSPACES_ENABLED: 1
EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CircleCI test summary
EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: make deps
- name: Download ENT Frontend
uses: actions/download-artifact@v3
with:
name: frontend-ent-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
- name: Ember Test ENT
id: cache
working-directory: ./ui/packages/consul-ui
run: node_modules/.bin/ember exam --split=$EMBER_PARTITION_TOTAL --partition=${{ matrix.partition }} --path dist --silent -r xunit
frontend-test-coverage-ent:
runs-on: ubuntu-latest
needs: [frontend-build-ent]
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: make deps
- name: Download ENT Frontend
uses: actions/download-artifact@v3
with:
name: frontend-ent-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
- name: Run ENT Code Coverage
working-directory: ./ui/packages/consul-ui
run: make test-coverage-ci
slack-failure-notification:
runs-on: ubuntu-latest
needs: [frontend-test-oss, frontend-test-ent]
if: ${{ failure() }}
steps:
- name: Slack Notification
id: slack
uses: slackapi/slack-github-action@v1.19
with:
payload: |
{
"message": "One or more nightly UI tests have failed on branch ${{ env.BRANCH }} for Consul. ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.CONSUL_UI_SLACK_WEBHOOK }}

230
.github/workflows/nightly-test-main.yaml vendored Normal file
View File

@ -0,0 +1,230 @@
name: Nightly Test Main
on:
schedule:
- cron: '0 4 * * *'
workflow_dispatch: {}
env:
EMBER_PARTITION_TOTAL: 4 # Has to be changed in tandem with the matrix.partition
BRANCH: "main"
BRANCH_NAME: "main" # Used for naming artifacts
jobs:
frontend-test-workspace-node:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: make deps
- name: Workspace Tests
id: workspace-test
working-directory: ./ui
run: make test-workspace
- name: Node Tests
id: node-test
working-directory: ./ui/packages/consul-ui
run: make test-node
frontend-build-oss:
runs-on: ubuntu-latest
env:
JOBS: 2
CONSUL_NSPACES_ENABLED: 0
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: make deps
- name: Ember Build OSS
id: build-oss
working-directory: ./ui/packages/consul-ui
run: make build-ci
- name: Upload OSS Frontend
uses: actions/upload-artifact@v3
with:
name: frontend-oss-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
if-no-files-found: error
frontend-test-oss:
runs-on: ubuntu-latest
needs: [frontend-build-oss]
strategy:
matrix:
partition: [ 1, 2, 3, 4 ]
env:
CONSUL_NSPACES_ENABLED: 0
EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CircleCI test summary
EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: make deps
- name: Download OSS Frontend
uses: actions/download-artifact@v3
with:
name: frontend-oss-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
- name: Ember Test OSS
id: cache
working-directory: ./ui/packages/consul-ui
run: node_modules/.bin/ember exam --split=$EMBER_PARTITION_TOTAL --partition=${{ matrix.partition }} --path dist --silent -r xunit
frontend-build-ent:
runs-on: ubuntu-latest
env:
JOBS: 2
CONSUL_NSPACES_ENABLED: 1
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: make deps
- name: Ember Build ENT
id: build-oss
working-directory: ./ui/packages/consul-ui
run: make build-ci
- name: Upload ENT Frontend
uses: actions/upload-artifact@v3
with:
name: frontend-ent-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
if-no-files-found: error
frontend-test-ent:
runs-on: ubuntu-latest
needs: [frontend-build-ent]
strategy:
matrix:
partition: [ 1, 2, 3, 4 ]
env:
CONSUL_NSPACES_ENABLED: 1
EMBER_TEST_REPORT: test-results/report-oss.xml #outputs test report for CircleCI test summary
EMBER_TEST_PARALLEL: true #enables test parallelization with ember-exam
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: make deps
- name: Download ENT Frontend
uses: actions/download-artifact@v3
with:
name: frontend-ent-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
- name: Ember Test ENT
id: cache
working-directory: ./ui/packages/consul-ui
run: node_modules/.bin/ember exam --split=$EMBER_PARTITION_TOTAL --partition=${{ matrix.partition }} --path dist --silent -r xunit
frontend-test-coverage-ent:
runs-on: ubuntu-latest
needs: [frontend-build-ent]
steps:
- uses: actions/checkout@v2
with:
ref: ${{ env.BRANCH }}
# Not necessary to use yarn, but enables caching
- uses: actions/setup-node@v3
with:
node-version: 14
cache: 'yarn'
cache-dependency-path: ./ui/yarn.lock
- name: Install
id: install
working-directory: ./ui
run: make deps
- name: Download ENT Frontend
uses: actions/download-artifact@v3
with:
name: frontend-ent-${{ env.BRANCH_NAME }}
path: ./ui/packages/consul-ui/dist
- name: Run ENT Code Coverage
working-directory: ./ui/packages/consul-ui
run: make test-coverage-ci
slack-failure-notification:
runs-on: ubuntu-latest
needs: [frontend-test-oss, frontend-test-ent]
if: ${{ failure() }}
steps:
- name: Slack Notification
id: slack
uses: slackapi/slack-github-action@v1.19
with:
payload: |
{
"message": "One or more nightly UI tests have failed on branch ${{ env.BRANCH }} for Consul. ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.CONSUL_UI_SLACK_WEBHOOK }}

26
.github/workflows/oss-merge-trigger.yml vendored Normal file
View File

@ -0,0 +1,26 @@
name: Trigger OSS to Enterprise Merge
on:
pull_request_target:
types:
- closed
branches:
- main
- 'release/*.*.x'
jobs:
trigger-oss-merge:
# run this only on merge events in OSS repo
if: ${{ github.event.pull_request.merged && github.repository == 'hashicorp/consul' }}
runs-on: ubuntu-latest
steps:
- name: Trigger Merge
env:
GIT_REF: ${{ github.ref_name }}
GIT_SHA: ${{ github.sha }}
GH_PAT: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
GIT_ACTOR: ${{ github.actor }}
run: |
curl -H "Authorization: token $GH_PAT" \
-H 'Accept: application/json' \
-d "{\"event_type\": \"oss-merge\", \"client_payload\": {\"git-ref\": \"${GIT_REF}\", \"git-sha\": \"${GIT_SHA}\", \"git-actor\": \"${GIT_ACTOR}\" }}" \
"https://api.github.com/repos/hashicorp/consul-enterprise/dispatches"

View File

@ -1,6 +1,6 @@
name: "Pull Request Labeler" name: "Pull Request Labeler"
on: on:
pull_request: pull_request_target:
types: [opened] types: [opened]
jobs: jobs:

View File

@ -8,7 +8,7 @@ on:
jobs: jobs:
metrics_test_check: metrics_test_check:
if: "!contains(github.event.pull_request.labels.*.name, 'pr/no-metrics-test')" if: "! ( contains(github.event.pull_request.labels.*.name, 'pr/no-metrics-test') || github.event.pull_request.user.login == 'hc-github-team-consul-core' )"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2

View File

@ -10,6 +10,8 @@
# in the PR and if they need to be cherry-picked to the stable-website branch, the # in the PR and if they need to be cherry-picked to the stable-website branch, the
# 'type/docs-cherrypick' label needs to be applied. # 'type/docs-cherrypick' label needs to be applied.
name: Website Checker
on: on:
pull_request_target: pull_request_target:
types: [opened] types: [opened]

View File

@ -26,6 +26,15 @@ issues:
- linters: [staticcheck] - linters: [staticcheck]
text: 'SA1019: Package github.com/golang/protobuf/proto is deprecated' text: 'SA1019: Package github.com/golang/protobuf/proto is deprecated'
- linters: [staticcheck]
text: 'SA1019: ptypes.MarshalAny is deprecated'
- linters: [staticcheck]
text: 'SA1019: ptypes.UnmarshalAny is deprecated'
- linters: [staticcheck]
text: 'SA1019: package github.com/golang/protobuf/ptypes is deprecated'
# An argument that always receives the same value is often not a problem. # An argument that always receives the same value is often not a problem.
- linters: [unparam] - linters: [unparam]
text: 'always receives' text: 'always receives'

View File

@ -164,6 +164,20 @@ event "verify" {
} }
} }
event "promote-dev-docker" {
depends = ["verify"]
action "promote-dev-docker" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "promote-dev-docker"
depends = ["verify"]
}
notification {
on = "fail"
}
}
## These are promotion and post-publish events ## These are promotion and post-publish events
## they should be added to the end of the file after the verify event stanza. ## they should be added to the end of the file after the verify event stanza.

View File

@ -0,0 +1,82 @@
#!/usr/bin/dumb-init /bin/sh
set -e
# Note above that we run dumb-init as PID 1 in order to reap zombie processes
# as well as forward signals to all processes in its session. Normally, sh
# wouldn't do either of these functions so we'd leak zombies as well as do
# unclean termination of all our sub-processes.
# As of docker 1.13, using docker run --init achieves the same outcome.
# You can set CONSUL_BIND_INTERFACE to the name of the interface you'd like to
# bind to and this will look up the IP and pass the proper -bind= option along
# to Consul.
CONSUL_BIND=
if [ -n "$CONSUL_BIND_INTERFACE" ]; then
CONSUL_BIND_ADDRESS=$(ip -o -4 addr list $CONSUL_BIND_INTERFACE | head -n1 | awk '{print $4}' | cut -d/ -f1)
if [ -z "$CONSUL_BIND_ADDRESS" ]; then
echo "Could not find IP for interface '$CONSUL_BIND_INTERFACE', exiting"
exit 1
fi
CONSUL_BIND="-bind=$CONSUL_BIND_ADDRESS"
echo "==> Found address '$CONSUL_BIND_ADDRESS' for interface '$CONSUL_BIND_INTERFACE', setting bind option..."
fi
# You can set CONSUL_CLIENT_INTERFACE to the name of the interface you'd like to
# bind client intefaces (HTTP, DNS, and RPC) to and this will look up the IP and
# pass the proper -client= option along to Consul.
CONSUL_CLIENT=
if [ -n "$CONSUL_CLIENT_INTERFACE" ]; then
CONSUL_CLIENT_ADDRESS=$(ip -o -4 addr list $CONSUL_CLIENT_INTERFACE | head -n1 | awk '{print $4}' | cut -d/ -f1)
if [ -z "$CONSUL_CLIENT_ADDRESS" ]; then
echo "Could not find IP for interface '$CONSUL_CLIENT_INTERFACE', exiting"
exit 1
fi
CONSUL_CLIENT="-client=$CONSUL_CLIENT_ADDRESS"
echo "==> Found address '$CONSUL_CLIENT_ADDRESS' for interface '$CONSUL_CLIENT_INTERFACE', setting client option..."
fi
# CONSUL_DATA_DIR is exposed as a volume for possible persistent storage. The
# CONSUL_CONFIG_DIR isn't exposed as a volume but you can compose additional
# config files in there if you use this image as a base, or use CONSUL_LOCAL_CONFIG
# below.
CONSUL_DATA_DIR=/consul/data
CONSUL_CONFIG_DIR=/consul/config
# You can also set the CONSUL_LOCAL_CONFIG environemnt variable to pass some
# Consul configuration JSON without having to bind any volumes.
if [ -n "$CONSUL_LOCAL_CONFIG" ]; then
echo "$CONSUL_LOCAL_CONFIG" > "$CONSUL_CONFIG_DIR/local.json"
fi
# If the user is trying to run Consul directly with some arguments, then
# pass them to Consul.
if [ "${1:0:1}" = '-' ]; then
set -- consul "$@"
fi
# Look for Consul subcommands.
if [ "$1" = 'agent' ]; then
shift
set -- consul agent \
-data-dir="$CONSUL_DATA_DIR" \
-config-dir="$CONSUL_CONFIG_DIR" \
$CONSUL_BIND \
$CONSUL_CLIENT \
"$@"
elif [ "$1" = 'version' ]; then
# This needs a special case because there's no help output.
set -- consul "$@"
elif consul --help "$1" 2>&1 | grep -q "consul $1"; then
# We can't use the return code to check for the existence of a subcommand, so
# we have to use grep to look for a pattern in the help output.
set -- consul "$@"
fi
# NOTE: Unlike in the regular Consul Docker image, we don't have code here
# for changing data-dir directory ownership or using su-exec because OpenShift
# won't run this container as root and so we can't change data dir ownership,
# and there's no need to use su-exec.
exec "$@"

View File

@ -1,7 +1,45 @@
## 1.12.2 (June 3, 2022)
BUG FIXES:
* kvs: Fixed a bug where query options were not being applied to KVS.Get RPC operations. [[GH-13344](https://github.com/hashicorp/consul/issues/13344)]
## 1.12.1 (May 25, 2022)
FEATURES:
* xds: Add the ability to invoke AWS Lambdas through sidecar proxies. [[GH-12956](https://github.com/hashicorp/consul/issues/12956)]
IMPROVEMENTS:
* config: introduce `telemetry.retry_failed_connection` in agent configuration to
retry on failed connection to any telemetry backend. This prevents the agent from
exiting if the given DogStatsD DNS name is unresolvable, for example. [[GH-13091](https://github.com/hashicorp/consul/issues/13091)]
* sentinel: **(Enterprise Only)** Sentinel now uses SHA256 to generate policy ids
* xds: Envoy now inserts x-forwarded-client-cert for incoming proxy connections [[GH-12878](https://github.com/hashicorp/consul/issues/12878)]
BUG FIXES:
* Fix a bug when configuring an `add_headers` directive named `Host` the header is not set for `v1/internal/ui/metrics-proxy/` endpoint. [[GH-13071](https://github.com/hashicorp/consul/issues/13071)]
* api: Fix a bug that causes partition to be ignored when creating a namespace [[GH-12845](https://github.com/hashicorp/consul/issues/12845)]
* api: agent/self now returns version with +ent suffix for Enterprise Consul [[GH-12961](https://github.com/hashicorp/consul/issues/12961)]
* areas: **(Enterprise Only)** Fixes a bug when using Yamux pool ( for servers version 1.7.3 and later), the entire pool was locked while connecting to a remote location, which could potentially take a long time. [[GH-1368](https://github.com/hashicorp/consul/issues/1368)]
* ca: fix a bug that caused a non blocking leaf cert query after a blocking leaf cert query to block [[GH-12820](https://github.com/hashicorp/consul/issues/12820)]
* config: fix backwards compatibility bug where setting the (deprecated) top-level `verify_incoming` option would enable TLS client authentication on the gRPC port [[GH-13118](https://github.com/hashicorp/consul/issues/13118)]
* health: ensure /v1/health/service/:service endpoint returns the most recent results when a filter is used with streaming #12640 [[GH-12640](https://github.com/hashicorp/consul/issues/12640)]
* snapshot-agent: **(Enterprise only)** Fix a bug where providing the ACL token to the snapshot agent via a CLI or ENV variable without a license configured results in an error during license auto-retrieval.
* ui: Re-instate '...' icon for row actions [[GH-13183](https://github.com/hashicorp/consul/issues/13183)]
NOTES:
* ci: change action to pull v1 instead of main [[GH-12846](https://github.com/hashicorp/consul/issues/12846)]
## 1.12.0 (April 20, 2022) ## 1.12.0 (April 20, 2022)
BREAKING CHANGES: BREAKING CHANGES:
* connect: Removes support for Envoy 1.17.4 [[GH-12777](https://github.com/hashicorp/consul/issues/12777)]
* connect: Removes support for Envoy 1.18.6 [[GH-12805](https://github.com/hashicorp/consul/issues/12805)]
* sdk: several changes to the testutil configuration structs (removed `ACLMasterToken`, renamed `Master` to `InitialManagement`, and `AgentMaster` to `AgentRecovery`) [[GH-11827](https://github.com/hashicorp/consul/issues/11827)] * sdk: several changes to the testutil configuration structs (removed `ACLMasterToken`, renamed `Master` to `InitialManagement`, and `AgentMaster` to `AgentRecovery`) [[GH-11827](https://github.com/hashicorp/consul/issues/11827)]
* telemetry: the disable_compat_1.9 option now defaults to true. 1.9 style `consul.http...` metrics can still be enabled by setting `disable_compat_1.9 = false`. However, we will remove these metrics in 1.13. [[GH-12675](https://github.com/hashicorp/consul/issues/12675)] * telemetry: the disable_compat_1.9 option now defaults to true. 1.9 style `consul.http...` metrics can still be enabled by setting `disable_compat_1.9 = false`. However, we will remove these metrics in 1.13. [[GH-12675](https://github.com/hashicorp/consul/issues/12675)]
@ -78,6 +116,24 @@ NOTES:
* Forked net/rpc to add middleware support: https://github.com/hashicorp/consul-net-rpc/ . [[GH-12311](https://github.com/hashicorp/consul/issues/12311)] * Forked net/rpc to add middleware support: https://github.com/hashicorp/consul-net-rpc/ . [[GH-12311](https://github.com/hashicorp/consul/issues/12311)]
* dependency: Upgrade to use Go 1.18.1 [[GH-12808](https://github.com/hashicorp/consul/issues/12808)] * dependency: Upgrade to use Go 1.18.1 [[GH-12808](https://github.com/hashicorp/consul/issues/12808)]
## 1.11.6 (May 25, 2022)
IMPROVEMENTS:
* sentinel: **(Enterprise Only)** Sentinel now uses SHA256 to generate policy ids
BUG FIXES:
* Fix a bug when configuring an `add_headers` directive named `Host` the header is not set for `v1/internal/ui/metrics-proxy/` endpoint. [[GH-13071](https://github.com/hashicorp/consul/issues/13071)]
* areas: **(Enterprise Only)** Fixes a bug when using Yamux pool ( for servers version 1.7.3 and later), the entire pool was locked while connecting to a remote location, which could potentially take a long time. [[GH-1368](https://github.com/hashicorp/consul/issues/1368)]
* ca: fix a bug that caused a non blocking leaf cert query after a blocking leaf cert query to block [[GH-12820](https://github.com/hashicorp/consul/issues/12820)]
* health: ensure /v1/health/service/:service endpoint returns the most recent results when a filter is used with streaming #12640 [[GH-12640](https://github.com/hashicorp/consul/issues/12640)]
* snapshot-agent: **(Enterprise only)** Fix a bug where providing the ACL token to the snapshot agent via a CLI or ENV variable without a license configured results in an error during license auto-retrieval.
NOTES:
* ci: change action to pull v1 instead of main [[GH-12846](https://github.com/hashicorp/consul/issues/12846)]
## 1.11.5 (April 13, 2022) ## 1.11.5 (April 13, 2022)
SECURITY: SECURITY:
@ -342,6 +398,50 @@ NOTES:
* Renamed the `agent_master` field to `agent_recovery` in the `acl-tokens.json` file in which tokens are persisted on-disk (when `acl.enable_token_persistence` is enabled) [[GH-11744](https://github.com/hashicorp/consul/issues/11744)] * Renamed the `agent_master` field to `agent_recovery` in the `acl-tokens.json` file in which tokens are persisted on-disk (when `acl.enable_token_persistence` is enabled) [[GH-11744](https://github.com/hashicorp/consul/issues/11744)]
## 1.10.11 (May 25, 2022)
SECURITY:
* agent: Use SHA256 instead of MD5 to generate persistence file names.
IMPROVEMENTS:
* sentinel: **(Enterprise Only)** Sentinel now uses SHA256 to generate policy ids
BUG FIXES:
* Fix a bug when configuring an `add_headers` directive named `Host` the header is not set for `v1/internal/ui/metrics-proxy/` endpoint. [[GH-13071](https://github.com/hashicorp/consul/issues/13071)]
* areas: **(Enterprise Only)** Fixes a bug when using Yamux pool ( for servers version 1.7.3 and later), the entire pool was locked while connecting to a remote location, which could potentially take a long time. [[GH-1368](https://github.com/hashicorp/consul/issues/1368)]
* ca: fix a bug that caused a non blocking leaf cert query after a locking leaf cert query to block [[GH-12820](https://github.com/hashicorp/consul/issues/12820)]
* health: ensure /v1/health/service/:service endpoint returns the most recent results when a filter is used with streaming #12640 [[GH-12640](https://github.com/hashicorp/consul/issues/12640)]
* snapshot-agent: **(Enterprise only)** Fix a bug where providing the ACL token to the snapshot agent via a CLI or ENV variable without a license configured results in an error during license auto-retrieval.
NOTES:
* ci: change action to pull v1 instead of main [[GH-12846](https://github.com/hashicorp/consul/issues/12846)]
## 1.10.10 (April 13, 2022)
SECURITY:
* agent: Added a new check field, `disable_redirects`, that allows for disabling the following of redirects for HTTP checks. The intention is to default this to true in a future release so that redirects must explicitly be enabled. [[GH-12685](https://github.com/hashicorp/consul/issues/12685)]
* connect: Properly set SNI when configured for services behind a terminating gateway. [[GH-12672](https://github.com/hashicorp/consul/issues/12672)]
IMPROVEMENTS:
* xds: ensure that all connect timeout configs can apply equally to tproxy direct dial connections [[GH-12711](https://github.com/hashicorp/consul/issues/12711)]
DEPRECATIONS:
* tls: With the upgrade to Go 1.17, the ordering of `tls_cipher_suites` will no longer be honored, and `tls_prefer_server_cipher_suites` is now ignored. [[GH-12766](https://github.com/hashicorp/consul/issues/12766)]
BUG FIXES:
* connect/ca: cancel old Vault renewal on CA configuration. Provide a 1 - 6 second backoff on repeated token renewal requests to prevent overwhelming Vault. [[GH-12607](https://github.com/hashicorp/consul/issues/12607)]
* raft: upgrade to v1.3.6 which fixes a bug where a read replica node could attempt bootstrapping raft and prevent other nodes from bootstrapping at all [[GH-12496](https://github.com/hashicorp/consul/issues/12496)]
* replication: Fixed a bug which could prevent ACL replication from continuing successfully after a leader election. [[GH-12565](https://github.com/hashicorp/consul/issues/12565)]
* server: fix spurious blocking query suppression for discovery chains [[GH-12512](https://github.com/hashicorp/consul/issues/12512)]
## 1.10.9 (February 28, 2022) ## 1.10.9 (February 28, 2022)
SECURITY: SECURITY:

View File

@ -188,6 +188,99 @@ COPY .release/docker/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
RUN chmod +x /usr/local/bin/docker-entrypoint.sh RUN chmod +x /usr/local/bin/docker-entrypoint.sh
ENTRYPOINT ["docker-entrypoint.sh"] ENTRYPOINT ["docker-entrypoint.sh"]
# By default you'll get an insecure single-node development server that stores
# everything in RAM, exposes a web UI and HTTP endpoints, and bootstraps itself.
# Don't use this configuration for production.
CMD ["agent", "-dev", "-client", "0.0.0.0"]
# Red Hat UBI-based image
# This target is used to build a Consul image for use on OpenShift.
FROM registry.access.redhat.com/ubi8/ubi-minimal:8.6 as ubi
ARG PRODUCT_NAME
ARG PRODUCT_VERSION
ARG PRODUCT_REVISION
ARG BIN_NAME
# PRODUCT_NAME and PRODUCT_VERSION are the name of the software on releases.hashicorp.com
# and the version to download. Example: PRODUCT_NAME=consul PRODUCT_VERSION=1.2.3.
ENV BIN_NAME=$BIN_NAME
ENV PRODUCT_VERSION=$PRODUCT_VERSION
ARG PRODUCT_NAME=$BIN_NAME
# TARGETOS and TARGETARCH are set automatically when --platform is provided.
ARG TARGETOS TARGETARCH
LABEL org.opencontainers.image.authors="Consul Team <consul@hashicorp.com>" \
org.opencontainers.image.url="https://www.consul.io/" \
org.opencontainers.image.documentation="https://www.consul.io/docs" \
org.opencontainers.image.source="https://github.com/hashicorp/consul" \
org.opencontainers.image.version=$VERSION \
org.opencontainers.image.vendor="HashiCorp" \
org.opencontainers.image.title="consul" \
org.opencontainers.image.description="Consul is a datacenter runtime that provides service discovery, configuration, and orchestration."
# Copy license for Red Hat certification.
COPY LICENSE /licenses/mozilla.txt
# Set up certificates and base tools.
# dumb-init is downloaded directly from GitHub because there's no RPM package.
# Its shasum is hardcoded. If you upgrade the dumb-init verion you'll need to
# also update the shasum.
RUN set -eux && \
microdnf install -y ca-certificates curl gnupg libcap openssl iputils jq iptables wget unzip tar && \
wget -O /usr/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.5/dumb-init_1.2.5_x86_64 && \
echo 'e874b55f3279ca41415d290c512a7ba9d08f98041b28ae7c2acb19a545f1c4df /usr/bin/dumb-init' > dumb-init-shasum && \
sha256sum --check dumb-init-shasum && \
chmod +x /usr/bin/dumb-init
# Create a non-root user to run the software. On OpenShift, this
# will not matter since the container is run as a random user and group
# but this is kept for consistency with our other images.
RUN groupadd $BIN_NAME && \
adduser --uid 100 --system -g $BIN_NAME $BIN_NAME
COPY dist/$TARGETOS/$TARGETARCH/$BIN_NAME /bin/
# The /consul/data dir is used by Consul to store state. The agent will be started
# with /consul/config as the configuration directory so you can add additional
# config files in that location.
# In addition, change the group of the /consul directory to 0 since OpenShift
# will always execute the container with group 0.
RUN mkdir -p /consul/data && \
mkdir -p /consul/config && \
chown -R consul /consul && \
chgrp -R 0 /consul && chmod -R g+rwX /consul
# set up nsswitch.conf for Go's "netgo" implementation which is used by Consul,
# otherwise DNS supercedes the container's hosts file, which we don't want.
RUN test -e /etc/nsswitch.conf || echo 'hosts: files dns' > /etc/nsswitch.conf
# Expose the consul data directory as a volume since there's mutable state in there.
VOLUME /consul/data
# Server RPC is used for communication between Consul clients and servers for internal
# request forwarding.
EXPOSE 8300
# Serf LAN and WAN (WAN is used only by Consul servers) are used for gossip between
# Consul agents. LAN is within the datacenter and WAN is between just the Consul
# servers in all datacenters.
EXPOSE 8301 8301/udp 8302 8302/udp
# HTTP and DNS (both TCP and UDP) are the primary interfaces that applications
# use to interact with Consul.
EXPOSE 8500 8600 8600/udp
COPY .release/docker/docker-entrypoint-ubi.sh /usr/local/bin/docker-entrypoint.sh
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
ENTRYPOINT ["docker-entrypoint.sh"]
# OpenShift by default will run containers with a random user, however their
# scanner requires that containers set a non-root user.
USER 100
# By default you'll get an insecure single-node development server that stores # By default you'll get an insecure single-node development server that stores
# everything in RAM, exposes a web UI and HTTP endpoints, and bootstraps itself. # everything in RAM, exposes a web UI and HTTP endpoints, and bootstraps itself.
# Don't use this configuration for production. # Don't use this configuration for production.

View File

@ -2,24 +2,16 @@
# https://www.consul.io/docs/install#compiling-from-source # https://www.consul.io/docs/install#compiling-from-source
SHELL = bash SHELL = bash
GOTOOLS = \
github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@master \
github.com/hashicorp/go-bindata/go-bindata@master \
github.com/vektra/mockery/v2@latest \
github.com/golangci/golangci-lint/cmd/golangci-lint@v1.45.2 \
github.com/hashicorp/lint-consul-retry@master
PROTOC_VERSION=3.15.8
### ###
# MOG_VERSION can be either a valid string for "go install <module>@<version>" # These version variables can either be a valid string for "go install <module>@<version>"
# or the string @DEV to imply use whatever is currently installed locally. # or the string @DEV to imply use what is currently installed locally.
### ###
GOLANGCI_LINT_VERSION='v1.46.2'
MOCKERY_VERSION='v2.12.2'
BUF_VERSION='v1.4.0'
PROTOC_GEN_GO_GRPC_VERSION="v1.2.0"
MOG_VERSION='v0.3.0' MOG_VERSION='v0.3.0'
###
# PROTOC_GO_INJECT_TAG_VERSION can be either a valid string for "go install <module>@<version>"
# or the string @DEV to imply use whatever is currently installed locally.
###
PROTOC_GO_INJECT_TAG_VERSION='v1.3.0' PROTOC_GO_INJECT_TAG_VERSION='v1.3.0'
GOTAGS ?= GOTAGS ?=
@ -28,13 +20,14 @@ MAIN_GOPATH=$(shell go env GOPATH | cut -d: -f1)
export PATH := $(PWD)/bin:$(GOPATH)/bin:$(PATH) export PATH := $(PWD)/bin:$(GOPATH)/bin:$(PATH)
ASSETFS_PATH?=agent/uiserver/bindata_assetfs.go
# Get the git commit # Get the git commit
GIT_COMMIT?=$(shell git rev-parse --short HEAD) GIT_COMMIT?=$(shell git rev-parse --short HEAD)
GIT_COMMIT_YEAR?=$(shell git show -s --format=%cd --date=format:%Y HEAD) GIT_COMMIT_YEAR?=$(shell git show -s --format=%cd --date=format:%Y HEAD)
GIT_DIRTY?=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true) GIT_DIRTY?=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true)
GIT_IMPORT=github.com/hashicorp/consul/version GIT_IMPORT=github.com/hashicorp/consul/version
GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY) DATE_FORMAT="%Y-%m-%dT%H:%M:%SZ" # it's tricky to do an RFC3339 format in a cross platform way, so we hardcode UTC
GIT_DATE=$(shell $(CURDIR)/build-support/scripts/build-date.sh) # we're using this for build date because it's stable across platform builds
GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY) -X $(GIT_IMPORT).BuildDate=$(GIT_DATE)
ifeq ($(FORCE_REBUILD),1) ifeq ($(FORCE_REBUILD),1)
NOCACHE=--no-cache NOCACHE=--no-cache
@ -273,34 +266,36 @@ other-consul:
exit 1 ; \ exit 1 ; \
fi fi
lint: lint: lint-tools
@echo "--> Running go golangci-lint" @echo "--> Running golangci-lint"
@golangci-lint run --build-tags '$(GOTAGS)' && \ @golangci-lint run --build-tags '$(GOTAGS)' && \
(cd api && golangci-lint run --build-tags '$(GOTAGS)') && \ (cd api && golangci-lint run --build-tags '$(GOTAGS)') && \
(cd sdk && golangci-lint run --build-tags '$(GOTAGS)') (cd sdk && golangci-lint run --build-tags '$(GOTAGS)')
@echo "--> Running lint-consul-retry"
@lint-consul-retry
@echo "--> Running enumcover"
@enumcover ./...
# If you've run "make ui" manually then this will get called for you. This is # Build the static web ui inside a Docker container. For local testing only; do not commit these assets.
# also run as part of the release build script when it verifies that there are no ui: ui-docker
# changes to the UI assets that aren't checked in.
static-assets:
@go-bindata-assetfs -pkg uiserver -prefix pkg -o $(ASSETFS_PATH) ./pkg/web_ui/...
@go fmt $(ASSETFS_PATH)
# Build the static web ui with yarn. This is the version to commit.
.PHONY: ui-regen
ui-regen:
cd $(CURDIR)/ui && make && cd ..
rm -rf $(CURDIR)/agent/uiserver/dist
mv $(CURDIR)/ui/packages/consul-ui/dist $(CURDIR)/agent/uiserver/
# Build the static web ui and build static assets inside a Docker container tools:
ui: ui-docker static-assets-docker @$(SHELL) $(CURDIR)/build-support/scripts/devtools.sh
tools: proto-tools .PHONY: lint-tools
@if [[ -d .gotools ]]; then rm -rf .gotools ; fi lint-tools:
@for TOOL in $(GOTOOLS); do \ @$(SHELL) $(CURDIR)/build-support/scripts/devtools.sh -lint
echo "=== TOOL: $$TOOL" ; \
go install -v $$TOOL ; \
done
.PHONY: proto-tools
proto-tools: proto-tools:
@$(SHELL) $(CURDIR)/build-support/scripts/protobuf.sh \ @$(SHELL) $(CURDIR)/build-support/scripts/devtools.sh -protobuf
--protoc-version "$(PROTOC_VERSION)" \
--tools-only
version: version:
@echo -n "Version: " @echo -n "Version: "
@ -317,15 +312,12 @@ docker-images: go-build-image ui-build-image
go-build-image: go-build-image:
@echo "Building Golang build container" @echo "Building Golang build container"
@docker build $(NOCACHE) $(QUIET) --build-arg 'GOTOOLS=$(GOTOOLS)' -t $(GO_BUILD_TAG) - < build-support/docker/Build-Go.dockerfile @docker build $(NOCACHE) $(QUIET) -t $(GO_BUILD_TAG) - < build-support/docker/Build-Go.dockerfile
ui-build-image: ui-build-image:
@echo "Building UI build container" @echo "Building UI build container"
@docker build $(NOCACHE) $(QUIET) -t $(UI_BUILD_TAG) - < build-support/docker/Build-UI.dockerfile @docker build $(NOCACHE) $(QUIET) -t $(UI_BUILD_TAG) - < build-support/docker/Build-UI.dockerfile
static-assets-docker: go-build-image
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh static-assets
consul-docker: go-build-image consul-docker: go-build-image
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh consul @$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh consul
@ -335,6 +327,27 @@ ui-docker: ui-build-image
test-envoy-integ: $(ENVOY_INTEG_DEPS) test-envoy-integ: $(ENVOY_INTEG_DEPS)
@go test -v -timeout=30m -tags integration ./test/integration/connect/envoy @go test -v -timeout=30m -tags integration ./test/integration/connect/envoy
.PHONY: test-compat-integ
test-compat-integ: dev-docker
ifeq ("$(GOTAGS)","")
@docker tag consul-dev:latest consul:local
@docker run --rm -t consul:local consul version
@cd ./test/integration/consul-container && \
go test -v -timeout=30m ./upgrade --target-version local --latest-version latest
else
@docker tag consul-dev:latest hashicorp/consul-enterprise:local
@docker run --rm -t hashicorp/consul-enterprise:local consul version
@cd ./test/integration/consul-container && \
go test -v -timeout=30m ./upgrade --tags $(GOTAGS) --target-version local --latest-version latest
endif
.PHONY: test-metrics-integ
test-metrics-integ: dev-docker
@docker tag consul-dev:latest consul:local
@docker run --rm -t consul:local consul version
@cd ./test/integration/consul-container && \
go test -v -timeout=7m ./metrics --target-version local
test-connect-ca-providers: test-connect-ca-providers:
ifeq ("$(CIRCLECI)","true") ifeq ("$(CIRCLECI)","true")
# Run in CI # Run in CI
@ -352,9 +365,17 @@ else
endif endif
.PHONY: proto .PHONY: proto
proto: proto: proto-tools
@$(SHELL) $(CURDIR)/build-support/scripts/protobuf.sh \ @$(SHELL) $(CURDIR)/build-support/scripts/protobuf.sh
--protoc-version "$(PROTOC_VERSION)"
.PHONY: proto-format
proto-format: proto-tools
@buf format -w
.PHONY: proto-lint
proto-lint: proto-tools
@buf lint --config proto/buf.yaml --path proto
@buf lint --config proto-public/buf.yaml --path proto-public
# utility to echo a makefile variable (i.e. 'make print-PROTOC_VERSION') # utility to echo a makefile variable (i.e. 'make print-PROTOC_VERSION')
print-% : ; @echo $($*) print-% : ; @echo $($*)
@ -381,6 +402,12 @@ envoy-regen:
@find "command/connect/envoy/testdata" -name '*.golden' -delete @find "command/connect/envoy/testdata" -name '*.golden' -delete
@go test -tags '$(GOTAGS)' ./command/connect/envoy -update @go test -tags '$(GOTAGS)' ./command/connect/envoy -update
.PHONY: all bin dev dist cov test test-internal cover lint ui static-assets tools proto-tools .PHONY: help
.PHONY: docker-images go-build-image ui-build-image static-assets-docker consul-docker ui-docker help:
$(info available make targets)
$(info ----------------------)
@grep "^[a-z0-9-][a-z0-9.-]*:" GNUmakefile | cut -d':' -f1 | sort
.PHONY: all bin dev dist cov test test-internal cover lint ui tools
.PHONY: docker-images go-build-image ui-build-image consul-docker ui-docker
.PHONY: version test-envoy-integ .PHONY: version test-envoy-integ

View File

@ -58,6 +58,10 @@ func (m *EnterpriseMeta) NamespaceOrDefault() string {
return DefaultNamespaceName return DefaultNamespaceName
} }
func EqualNamespaces(_, _ string) bool {
return true
}
func NamespaceOrDefault(_ string) string { func NamespaceOrDefault(_ string) string {
return DefaultNamespaceName return DefaultNamespaceName
} }
@ -78,7 +82,9 @@ func (m *EnterpriseMeta) MergeNoWildcard(_ *EnterpriseMeta) {
// do nothing // do nothing
} }
func (_ *EnterpriseMeta) Normalize() {} func (_ *EnterpriseMeta) Normalize() {}
func (_ *EnterpriseMeta) NormalizePartition() {}
func (_ *EnterpriseMeta) NormalizeNamespace() {}
func (m *EnterpriseMeta) Matches(_ *EnterpriseMeta) bool { func (m *EnterpriseMeta) Matches(_ *EnterpriseMeta) bool {
return true return true
@ -100,6 +106,10 @@ func (m *EnterpriseMeta) UnsetPartition() {
// do nothing // do nothing
} }
func (m *EnterpriseMeta) OverridePartition(_ string) {
// do nothing
}
func NewEnterpriseMetaWithPartition(_, _ string) EnterpriseMeta { func NewEnterpriseMetaWithPartition(_, _ string) EnterpriseMeta {
return emptyEnterpriseMeta return emptyEnterpriseMeta
} }

27
acl/resolver/result.go Normal file
View File

@ -0,0 +1,27 @@
package resolver
import (
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/structs"
)
type Result struct {
acl.Authorizer
// TODO: likely we can reduce this interface
ACLIdentity structs.ACLIdentity
}
func (a Result) AccessorID() string {
if a.ACLIdentity == nil {
return ""
}
return a.ACLIdentity.ID()
}
func (a Result) Identity() structs.ACLIdentity {
return a.ACLIdentity
}
func (a Result) ToAllowAuthorizer() acl.AllowAuthorizer {
return acl.AllowAuthorizer{Authorizer: a, AccessorID: a.AccessorID()}
}

View File

@ -122,10 +122,7 @@ func (s *HTTPHandlers) ACLPolicyCRUD(resp http.ResponseWriter, req *http.Request
return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}} return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}}
} }
policyID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/acl/policy/") policyID := strings.TrimPrefix(req.URL.Path, "/v1/acl/policy/")
if err != nil {
return nil, err
}
if policyID == "" && req.Method != "PUT" { if policyID == "" && req.Method != "PUT" {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing policy ID"} return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing policy ID"}
} }
@ -170,10 +167,7 @@ func (s *HTTPHandlers) ACLPolicyReadByName(resp http.ResponseWriter, req *http.R
return nil, aclDisabled return nil, aclDisabled
} }
policyName, err := getPathSuffixUnescaped(req.URL.Path, "/v1/acl/policy/name/") policyName := strings.TrimPrefix(req.URL.Path, "/v1/acl/policy/name/")
if err != nil {
return nil, err
}
if policyName == "" { if policyName == "" {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing policy Name"} return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing policy Name"}
} }
@ -308,10 +302,7 @@ func (s *HTTPHandlers) ACLTokenCRUD(resp http.ResponseWriter, req *http.Request)
return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}} return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}}
} }
tokenID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/acl/token/") tokenID := strings.TrimPrefix(req.URL.Path, "/v1/acl/token/")
if err != nil {
return nil, err
}
if strings.HasSuffix(tokenID, "/clone") && req.Method == "PUT" { if strings.HasSuffix(tokenID, "/clone") && req.Method == "PUT" {
tokenID = tokenID[:len(tokenID)-6] tokenID = tokenID[:len(tokenID)-6]
fn = s.ACLTokenClone fn = s.ACLTokenClone
@ -541,10 +532,7 @@ func (s *HTTPHandlers) ACLRoleCRUD(resp http.ResponseWriter, req *http.Request)
return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}} return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}}
} }
roleID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/acl/role/") roleID := strings.TrimPrefix(req.URL.Path, "/v1/acl/role/")
if err != nil {
return nil, err
}
if roleID == "" && req.Method != "PUT" { if roleID == "" && req.Method != "PUT" {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing role ID"} return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing role ID"}
} }
@ -557,10 +545,7 @@ func (s *HTTPHandlers) ACLRoleReadByName(resp http.ResponseWriter, req *http.Req
return nil, aclDisabled return nil, aclDisabled
} }
roleName, err := getPathSuffixUnescaped(req.URL.Path, "/v1/acl/role/name/") roleName := strings.TrimPrefix(req.URL.Path, "/v1/acl/role/name/")
if err != nil {
return nil, err
}
if roleName == "" { if roleName == "" {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing role Name"} return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing role Name"}
} }
@ -711,10 +696,7 @@ func (s *HTTPHandlers) ACLBindingRuleCRUD(resp http.ResponseWriter, req *http.Re
return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}} return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}}
} }
bindingRuleID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/acl/binding-rule/") bindingRuleID := strings.TrimPrefix(req.URL.Path, "/v1/acl/binding-rule/")
if err != nil {
return nil, err
}
if bindingRuleID == "" && req.Method != "PUT" { if bindingRuleID == "" && req.Method != "PUT" {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing binding rule ID"} return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing binding rule ID"}
} }
@ -857,10 +839,7 @@ func (s *HTTPHandlers) ACLAuthMethodCRUD(resp http.ResponseWriter, req *http.Req
return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}} return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}}
} }
methodName, err := getPathSuffixUnescaped(req.URL.Path, "/v1/acl/auth-method/") methodName := strings.TrimPrefix(req.URL.Path, "/v1/acl/auth-method/")
if err != nil {
return nil, err
}
if methodName == "" && req.Method != "PUT" { if methodName == "" && req.Method != "PUT" {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing auth method name"} return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing auth method name"}
} }

View File

@ -11,6 +11,7 @@ import (
"github.com/hashicorp/serf/serf" "github.com/hashicorp/serf/serf"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/local"
@ -71,7 +72,9 @@ func NewTestACLAgent(t *testing.T, name string, hcl string, resolveAuthz authzRe
Output: logBuffer, Output: logBuffer,
TimeFormat: "04:05.000", TimeFormat: "04:05.000",
}) })
bd.MetricsHandler = metrics.NewInmemSink(1*time.Second, time.Minute) bd.MetricsConfig = &lib.MetricsConfig{
Handler: metrics.NewInmemSink(1*time.Second, time.Minute),
}
agent, err := New(bd) agent, err := New(bd)
require.NoError(t, err) require.NoError(t, err)
@ -92,15 +95,15 @@ func (a *TestACLAgent) ResolveToken(secretID string) (acl.Authorizer, error) {
return authz, err return authz, err
} }
func (a *TestACLAgent) ResolveTokenAndDefaultMeta(secretID string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (consul.ACLResolveResult, error) { func (a *TestACLAgent) ResolveTokenAndDefaultMeta(secretID string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (resolver.Result, error) {
authz, err := a.ResolveToken(secretID) authz, err := a.ResolveToken(secretID)
if err != nil { if err != nil {
return consul.ACLResolveResult{}, err return resolver.Result{}, err
} }
identity, err := a.resolveIdentFn(secretID) identity, err := a.resolveIdentFn(secretID)
if err != nil { if err != nil {
return consul.ACLResolveResult{}, err return resolver.Result{}, err
} }
// Default the EnterpriseMeta based on the Tokens meta or actual defaults // Default the EnterpriseMeta based on the Tokens meta or actual defaults
@ -114,7 +117,7 @@ func (a *TestACLAgent) ResolveTokenAndDefaultMeta(secretID string, entMeta *acl.
// Use the meta to fill in the ACL authorization context // Use the meta to fill in the ACL authorization context
entMeta.FillAuthzContext(authzContext) entMeta.FillAuthzContext(authzContext)
return consul.ACLResolveResult{Authorizer: authz, ACLIdentity: identity}, err return resolver.Result{Authorizer: authz, ACLIdentity: identity}, err
} }
// All of these are stubs to satisfy the interface // All of these are stubs to satisfy the interface

View File

@ -20,7 +20,6 @@ import (
"github.com/armon/go-metrics" "github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus" "github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/go-connlimit" "github.com/hashicorp/go-connlimit"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
@ -31,6 +30,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/ae" "github.com/hashicorp/consul/agent/ae"
"github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types" cachetype "github.com/hashicorp/consul/agent/cache-types"
@ -41,6 +41,9 @@ import (
publicgrpc "github.com/hashicorp/consul/agent/grpc/public" publicgrpc "github.com/hashicorp/consul/agent/grpc/public"
"github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/proxycfg"
proxycfgglue "github.com/hashicorp/consul/agent/proxycfg-glue"
catalogproxycfg "github.com/hashicorp/consul/agent/proxycfg-sources/catalog"
localproxycfg "github.com/hashicorp/consul/agent/proxycfg-sources/local"
"github.com/hashicorp/consul/agent/rpcclient/health" "github.com/hashicorp/consul/agent/rpcclient/health"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/systemd" "github.com/hashicorp/consul/agent/systemd"
@ -54,6 +57,7 @@ import (
"github.com/hashicorp/consul/lib/mutex" "github.com/hashicorp/consul/lib/mutex"
"github.com/hashicorp/consul/lib/routine" "github.com/hashicorp/consul/lib/routine"
"github.com/hashicorp/consul/logging" "github.com/hashicorp/consul/logging"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/tlsutil"
"github.com/hashicorp/consul/types" "github.com/hashicorp/consul/types"
) )
@ -174,7 +178,7 @@ type delegate interface {
// actions based on the permissions granted to the token. // actions based on the permissions granted to the token.
// If either entMeta or authzContext are non-nil they will be populated with the // If either entMeta or authzContext are non-nil they will be populated with the
// default partition and namespace from the token. // default partition and namespace from the token.
ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (consul.ACLResolveResult, error) ResolveTokenAndDefaultMeta(token string, entMeta *acl.EnterpriseMeta, authzContext *acl.AuthorizerContext) (resolver.Result, error)
RPC(method string, args interface{}, reply interface{}) error RPC(method string, args interface{}, reply interface{}) error
SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer, replyFn structs.SnapshotReplyFn) error SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer, replyFn structs.SnapshotReplyFn) error
@ -245,6 +249,9 @@ type Agent struct {
// checkTCPs maps the check ID to an associated TCP check // checkTCPs maps the check ID to an associated TCP check
checkTCPs map[structs.CheckID]*checks.CheckTCP checkTCPs map[structs.CheckID]*checks.CheckTCP
// checkUDPs maps the check ID to an associated UDP check
checkUDPs map[structs.CheckID]*checks.CheckUDP
// checkGRPCs maps the check ID to an associated GRPC check // checkGRPCs maps the check ID to an associated GRPC check
checkGRPCs map[structs.CheckID]*checks.CheckGRPC checkGRPCs map[structs.CheckID]*checks.CheckGRPC
@ -398,6 +405,7 @@ func New(bd BaseDeps) (*Agent, error) {
checkHTTPs: make(map[structs.CheckID]*checks.CheckHTTP), checkHTTPs: make(map[structs.CheckID]*checks.CheckHTTP),
checkH2PINGs: make(map[structs.CheckID]*checks.CheckH2PING), checkH2PINGs: make(map[structs.CheckID]*checks.CheckH2PING),
checkTCPs: make(map[structs.CheckID]*checks.CheckTCP), checkTCPs: make(map[structs.CheckID]*checks.CheckTCP),
checkUDPs: make(map[structs.CheckID]*checks.CheckUDP),
checkGRPCs: make(map[structs.CheckID]*checks.CheckGRPC), checkGRPCs: make(map[structs.CheckID]*checks.CheckGRPC),
checkDockers: make(map[structs.CheckID]*checks.CheckDocker), checkDockers: make(map[structs.CheckID]*checks.CheckDocker),
checkAliases: make(map[structs.CheckID]*checks.CheckAlias), checkAliases: make(map[structs.CheckID]*checks.CheckAlias),
@ -625,12 +633,31 @@ func (a *Agent) Start(ctx context.Context) error {
go a.baseDeps.ViewStore.Run(&lib.StopChannelContext{StopCh: a.shutdownCh}) go a.baseDeps.ViewStore.Run(&lib.StopChannelContext{StopCh: a.shutdownCh})
// Start the proxy config manager. // Start the proxy config manager.
proxyDataSources := proxycfg.DataSources{
CARoots: proxycfgglue.CacheCARoots(a.cache),
CompiledDiscoveryChain: proxycfgglue.CacheCompiledDiscoveryChain(a.cache),
ConfigEntry: proxycfgglue.CacheConfigEntry(a.cache),
ConfigEntryList: proxycfgglue.CacheConfigEntryList(a.cache),
Datacenters: proxycfgglue.CacheDatacenters(a.cache),
FederationStateListMeshGateways: proxycfgglue.CacheFederationStateListMeshGateways(a.cache),
GatewayServices: proxycfgglue.CacheGatewayServices(a.cache),
Health: proxycfgglue.Health(a.rpcClientHealth),
HTTPChecks: proxycfgglue.CacheHTTPChecks(a.cache),
Intentions: proxycfgglue.CacheIntentions(a.cache),
IntentionUpstreams: proxycfgglue.CacheIntentionUpstreams(a.cache),
InternalServiceDump: proxycfgglue.CacheInternalServiceDump(a.cache),
LeafCertificate: proxycfgglue.CacheLeafCertificate(a.cache),
PreparedQuery: proxycfgglue.CachePrepraredQuery(a.cache),
ResolvedServiceConfig: proxycfgglue.CacheResolvedServiceConfig(a.cache),
ServiceList: proxycfgglue.CacheServiceList(a.cache),
TrustBundle: proxycfgglue.CacheTrustBundle(a.cache),
TrustBundleList: proxycfgglue.CacheTrustBundleList(a.cache),
ExportedPeeredServices: proxycfgglue.CacheExportedPeeredServices(a.cache),
}
a.fillEnterpriseProxyDataSources(&proxyDataSources)
a.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{ a.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{
Cache: a.cache, DataSources: proxyDataSources,
Health: a.rpcClientHealth, Logger: a.logger.Named(logging.ProxyConfig),
Logger: a.logger.Named(logging.ProxyConfig),
State: a.State,
Tokens: a.baseDeps.Tokens,
Source: &structs.QuerySource{ Source: &structs.QuerySource{
Datacenter: a.config.Datacenter, Datacenter: a.config.Datacenter,
Segment: a.config.SegmentName, Segment: a.config.SegmentName,
@ -646,11 +673,17 @@ func (a *Agent) Start(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
go func() {
if err := a.proxyConfig.Run(); err != nil { go localproxycfg.Sync(
a.logger.Error("proxy config manager exited with error", "error", err) &lib.StopChannelContext{StopCh: a.shutdownCh},
} localproxycfg.SyncConfig{
}() Manager: a.proxyConfig,
State: a.State,
Logger: a.proxyConfig.Logger.Named("agent-state"),
Tokens: a.baseDeps.Tokens,
NodeName: a.config.NodeName,
},
)
// Start watching for critical services to deregister, based on their // Start watching for critical services to deregister, based on their
// checks. // checks.
@ -769,15 +802,34 @@ func (a *Agent) listenAndServeGRPC() error {
return nil return nil
} }
// TODO(agentless): rather than asserting the concrete type of delegate, we
// should add a method to the Delegate interface to build a ConfigSource.
var cfg xds.ProxyConfigSource = localproxycfg.NewConfigSource(a.proxyConfig)
if server, ok := a.delegate.(*consul.Server); ok {
catalogCfg := catalogproxycfg.NewConfigSource(catalogproxycfg.Config{
NodeName: a.config.NodeName,
LocalState: a.State,
LocalConfigSource: cfg,
Manager: a.proxyConfig,
GetStore: func() catalogproxycfg.Store { return server.FSM().State() },
Logger: a.proxyConfig.Logger.Named("server-catalog"),
})
go func() {
<-a.shutdownCh
catalogCfg.Shutdown()
}()
cfg = catalogCfg
}
a.xdsServer = xds.NewServer( a.xdsServer = xds.NewServer(
a.config.NodeName,
a.logger.Named(logging.Envoy), a.logger.Named(logging.Envoy),
a.config.ConnectServerlessPluginEnabled, a.config.ConnectServerlessPluginEnabled,
a.proxyConfig, cfg,
func(id string) (acl.Authorizer, error) { func(id string) (acl.Authorizer, error) {
return a.delegate.ResolveTokenAndDefaultMeta(id, nil, nil) return a.delegate.ResolveTokenAndDefaultMeta(id, nil, nil)
}, },
a, a,
a,
) )
a.xdsServer.Register(a.publicGRPCServer) a.xdsServer.Register(a.publicGRPCServer)
@ -1429,6 +1481,7 @@ func (a *Agent) ShutdownAgent() error {
// this would be cancelled anyways (by the closing of the shutdown ch) but // this would be cancelled anyways (by the closing of the shutdown ch) but
// this should help them to be stopped more quickly // this should help them to be stopped more quickly
a.baseDeps.AutoConfig.Stop() a.baseDeps.AutoConfig.Stop()
a.baseDeps.MetricsConfig.Cancel()
a.stateLock.Lock() a.stateLock.Lock()
defer a.stateLock.Unlock() defer a.stateLock.Unlock()
@ -1450,6 +1503,9 @@ func (a *Agent) ShutdownAgent() error {
for _, chk := range a.checkTCPs { for _, chk := range a.checkTCPs {
chk.Stop() chk.Stop()
} }
for _, chk := range a.checkUDPs {
chk.Stop()
}
for _, chk := range a.checkGRPCs { for _, chk := range a.checkGRPCs {
chk.Stop() chk.Stop()
} }
@ -2749,6 +2805,31 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
tcp.Start() tcp.Start()
a.checkTCPs[cid] = tcp a.checkTCPs[cid] = tcp
case chkType.IsUDP():
if existing, ok := a.checkUDPs[cid]; ok {
existing.Stop()
delete(a.checkUDPs, cid)
}
if chkType.Interval < checks.MinInterval {
a.logger.Warn("check has interval below minimum",
"check", cid.String(),
"minimum_interval", checks.MinInterval,
)
chkType.Interval = checks.MinInterval
}
udp := &checks.CheckUDP{
CheckID: cid,
ServiceID: sid,
UDP: chkType.UDP,
Interval: chkType.Interval,
Timeout: chkType.Timeout,
Logger: a.logger,
StatusHandler: statusHandler,
}
udp.Start()
a.checkUDPs[cid] = udp
case chkType.IsGRPC(): case chkType.IsGRPC():
if existing, ok := a.checkGRPCs[cid]; ok { if existing, ok := a.checkGRPCs[cid]; ok {
existing.Stop() existing.Stop()
@ -3048,6 +3129,10 @@ func (a *Agent) cancelCheckMonitors(checkID structs.CheckID) {
check.Stop() check.Stop()
delete(a.checkTCPs, checkID) delete(a.checkTCPs, checkID)
} }
if check, ok := a.checkUDPs[checkID]; ok {
check.Stop()
delete(a.checkUDPs, checkID)
}
if check, ok := a.checkGRPCs[checkID]; ok { if check, ok := a.checkGRPCs[checkID]; ok {
check.Stop() check.Stop()
delete(a.checkGRPCs, checkID) delete(a.checkGRPCs, checkID)
@ -3773,7 +3858,7 @@ func (a *Agent) reloadConfig(autoReload bool) error {
// breaking some existing behavior. // breaking some existing behavior.
newCfg.NodeID = a.config.NodeID newCfg.NodeID = a.config.NodeID
//if auto reload is enabled, make sure we have the right certs file watched. // if auto reload is enabled, make sure we have the right certs file watched.
if autoReload { if autoReload {
for _, f := range []struct { for _, f := range []struct {
oldCfg tlsutil.ProtocolConfig oldCfg tlsutil.ProtocolConfig
@ -4045,15 +4130,21 @@ func (a *Agent) registerCache() {
a.cache.RegisterType(cachetype.GatewayServicesName, &cachetype.GatewayServices{RPC: a}) a.cache.RegisterType(cachetype.GatewayServicesName, &cachetype.GatewayServices{RPC: a})
a.cache.RegisterType(cachetype.ConfigEntriesName, &cachetype.ConfigEntries{RPC: a}) a.cache.RegisterType(cachetype.ConfigEntryListName, &cachetype.ConfigEntryList{RPC: a})
a.cache.RegisterType(cachetype.ConfigEntryName, &cachetype.ConfigEntry{RPC: a}) a.cache.RegisterType(cachetype.ConfigEntryName, &cachetype.ConfigEntry{RPC: a})
a.cache.RegisterType(cachetype.ServiceHTTPChecksName, &cachetype.ServiceHTTPChecks{Agent: a}) a.cache.RegisterType(cachetype.ServiceHTTPChecksName, &cachetype.ServiceHTTPChecks{Agent: a})
a.cache.RegisterType(cachetype.TrustBundleReadName, &cachetype.TrustBundle{Client: a.rpcClientPeering})
a.cache.RegisterType(cachetype.ExportedPeeredServicesName, &cachetype.ExportedPeeredServices{RPC: a})
a.cache.RegisterType(cachetype.FederationStateListMeshGatewaysName, a.cache.RegisterType(cachetype.FederationStateListMeshGatewaysName,
&cachetype.FederationStateListMeshGateways{RPC: a}) &cachetype.FederationStateListMeshGateways{RPC: a})
a.cache.RegisterType(cachetype.TrustBundleListName, &cachetype.TrustBundles{Client: a.rpcClientPeering})
a.registerEntCache() a.registerEntCache()
} }

View File

@ -91,6 +91,7 @@ func (s *HTTPHandlers) AgentSelf(resp http.ResponseWriter, req *http.Request) (i
Revision string Revision string
Server bool Server bool
Version string Version string
BuildDate string
}{ }{
Datacenter: s.agent.config.Datacenter, Datacenter: s.agent.config.Datacenter,
PrimaryDatacenter: s.agent.config.PrimaryDatacenter, PrimaryDatacenter: s.agent.config.PrimaryDatacenter,
@ -100,8 +101,10 @@ func (s *HTTPHandlers) AgentSelf(resp http.ResponseWriter, req *http.Request) (i
Revision: s.agent.config.Revision, Revision: s.agent.config.Revision,
Server: s.agent.config.ServerMode, Server: s.agent.config.ServerMode,
// We expect the ent version to be part of the reported version string, and that's now part of the metadata, not the actual version. // We expect the ent version to be part of the reported version string, and that's now part of the metadata, not the actual version.
Version: s.agent.config.VersionWithMetadata(), Version: s.agent.config.VersionWithMetadata(),
BuildDate: s.agent.config.BuildDate.Format(time.RFC3339),
} }
return Self{ return Self{
Config: config, Config: config,
DebugConfig: s.agent.config.Sanitized(), DebugConfig: s.agent.config.Sanitized(),
@ -173,7 +176,7 @@ func (s *HTTPHandlers) AgentMetrics(resp http.ResponseWriter, req *http.Request)
handler.ServeHTTP(resp, req) handler.ServeHTTP(resp, req)
return nil, nil return nil, nil
} }
return s.agent.baseDeps.MetricsHandler.DisplayMetrics(resp, req) return s.agent.baseDeps.MetricsConfig.Handler.DisplayMetrics(resp, req)
} }
func (s *HTTPHandlers) AgentMetricsStream(resp http.ResponseWriter, req *http.Request) (interface{}, error) { func (s *HTTPHandlers) AgentMetricsStream(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
@ -210,7 +213,7 @@ func (s *HTTPHandlers) AgentMetricsStream(resp http.ResponseWriter, req *http.Re
flusher: flusher, flusher: flusher,
} }
enc.encoder.SetIndent("", " ") enc.encoder.SetIndent("", " ")
s.agent.baseDeps.MetricsHandler.Stream(req.Context(), enc) s.agent.baseDeps.MetricsConfig.Handler.Stream(req.Context(), enc)
return nil, nil return nil, nil
} }
@ -382,10 +385,7 @@ func (s *HTTPHandlers) AgentServices(resp http.ResponseWriter, req *http.Request
// blocking watch using hash-based blocking. // blocking watch using hash-based blocking.
func (s *HTTPHandlers) AgentService(resp http.ResponseWriter, req *http.Request) (interface{}, error) { func (s *HTTPHandlers) AgentService(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Get the proxy ID. Note that this is the ID of a proxy's service instance. // Get the proxy ID. Note that this is the ID of a proxy's service instance.
id, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/service/") id := strings.TrimPrefix(req.URL.Path, "/v1/agent/service/")
if err != nil {
return nil, err
}
// Maybe block // Maybe block
var queryOpts structs.QueryOptions var queryOpts structs.QueryOptions
@ -404,7 +404,7 @@ func (s *HTTPHandlers) AgentService(resp http.ResponseWriter, req *http.Request)
} }
// need to resolve to default the meta // need to resolve to default the meta
_, err = s.agent.delegate.ResolveTokenAndDefaultMeta(token, &entMeta, nil) _, err := s.agent.delegate.ResolveTokenAndDefaultMeta(token, &entMeta, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -640,10 +640,7 @@ func (s *HTTPHandlers) AgentJoin(resp http.ResponseWriter, req *http.Request) (i
} }
// Get the address // Get the address
addr, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/join/") addr := strings.TrimPrefix(req.URL.Path, "/v1/agent/join/")
if err != nil {
return nil, err
}
if wan { if wan {
if s.agent.config.ConnectMeshGatewayWANFederationEnabled { if s.agent.config.ConnectMeshGatewayWANFederationEnabled {
@ -703,10 +700,7 @@ func (s *HTTPHandlers) AgentForceLeave(resp http.ResponseWriter, req *http.Reque
// Check if the WAN is being queried // Check if the WAN is being queried
_, wan := req.URL.Query()["wan"] _, wan := req.URL.Query()["wan"]
addr, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/force-leave/") addr := strings.TrimPrefix(req.URL.Path, "/v1/agent/force-leave/")
if err != nil {
return nil, err
}
if wan { if wan {
return nil, s.agent.ForceLeaveWAN(addr, prune, entMeta) return nil, s.agent.ForceLeaveWAN(addr, prune, entMeta)
} else { } else {
@ -792,11 +786,8 @@ func (s *HTTPHandlers) AgentRegisterCheck(resp http.ResponseWriter, req *http.Re
} }
func (s *HTTPHandlers) AgentDeregisterCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) { func (s *HTTPHandlers) AgentDeregisterCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
ID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/check/deregister/") id := strings.TrimPrefix(req.URL.Path, "/v1/agent/check/deregister/")
if err != nil { checkID := structs.NewCheckID(types.CheckID(id), nil)
return nil, err
}
checkID := structs.NewCheckID(types.CheckID(ID), nil)
// Get the provided token, if any, and vet against any ACL policies. // Get the provided token, if any, and vet against any ACL policies.
var token string var token string
@ -829,21 +820,15 @@ func (s *HTTPHandlers) AgentDeregisterCheck(resp http.ResponseWriter, req *http.
} }
func (s *HTTPHandlers) AgentCheckPass(resp http.ResponseWriter, req *http.Request) (interface{}, error) { func (s *HTTPHandlers) AgentCheckPass(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
ID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/check/pass/") id := strings.TrimPrefix(req.URL.Path, "/v1/agent/check/pass/")
if err != nil { checkID := types.CheckID(id)
return nil, err
}
checkID := types.CheckID(ID)
note := req.URL.Query().Get("note") note := req.URL.Query().Get("note")
return s.agentCheckUpdate(resp, req, checkID, api.HealthPassing, note) return s.agentCheckUpdate(resp, req, checkID, api.HealthPassing, note)
} }
func (s *HTTPHandlers) AgentCheckWarn(resp http.ResponseWriter, req *http.Request) (interface{}, error) { func (s *HTTPHandlers) AgentCheckWarn(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
ID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/check/warn/") id := strings.TrimPrefix(req.URL.Path, "/v1/agent/check/warn/")
if err != nil { checkID := types.CheckID(id)
return nil, err
}
checkID := types.CheckID(ID)
note := req.URL.Query().Get("note") note := req.URL.Query().Get("note")
return s.agentCheckUpdate(resp, req, checkID, api.HealthWarning, note) return s.agentCheckUpdate(resp, req, checkID, api.HealthWarning, note)
@ -851,11 +836,8 @@ func (s *HTTPHandlers) AgentCheckWarn(resp http.ResponseWriter, req *http.Reques
} }
func (s *HTTPHandlers) AgentCheckFail(resp http.ResponseWriter, req *http.Request) (interface{}, error) { func (s *HTTPHandlers) AgentCheckFail(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
ID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/check/fail/") id := strings.TrimPrefix(req.URL.Path, "/v1/agent/check/fail/")
if err != nil { checkID := types.CheckID(id)
return nil, err
}
checkID := types.CheckID(ID)
note := req.URL.Query().Get("note") note := req.URL.Query().Get("note")
return s.agentCheckUpdate(resp, req, checkID, api.HealthCritical, note) return s.agentCheckUpdate(resp, req, checkID, api.HealthCritical, note)
@ -890,12 +872,8 @@ func (s *HTTPHandlers) AgentCheckUpdate(resp http.ResponseWriter, req *http.Requ
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Invalid check status: '%s'", update.Status)} return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: fmt.Sprintf("Invalid check status: '%s'", update.Status)}
} }
ID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/check/update/") id := strings.TrimPrefix(req.URL.Path, "/v1/agent/check/update/")
if err != nil { checkID := types.CheckID(id)
return nil, err
}
checkID := types.CheckID(ID)
return s.agentCheckUpdate(resp, req, checkID, update.Status, update.Output) return s.agentCheckUpdate(resp, req, checkID, update.Status, update.Output)
} }
@ -977,10 +955,7 @@ func returnTextPlain(req *http.Request) bool {
// AgentHealthServiceByID return the local Service Health given its ID // AgentHealthServiceByID return the local Service Health given its ID
func (s *HTTPHandlers) AgentHealthServiceByID(resp http.ResponseWriter, req *http.Request) (interface{}, error) { func (s *HTTPHandlers) AgentHealthServiceByID(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Pull out the service id (service id since there may be several instance of the same service on this host) // Pull out the service id (service id since there may be several instance of the same service on this host)
serviceID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/health/service/id/") serviceID := strings.TrimPrefix(req.URL.Path, "/v1/agent/health/service/id/")
if err != nil {
return nil, err
}
if serviceID == "" { if serviceID == "" {
return nil, &HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing serviceID"} return nil, &HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing serviceID"}
} }
@ -1038,11 +1013,7 @@ func (s *HTTPHandlers) AgentHealthServiceByID(resp http.ResponseWriter, req *htt
// AgentHealthServiceByName return the worse status of all the services with given name on an agent // AgentHealthServiceByName return the worse status of all the services with given name on an agent
func (s *HTTPHandlers) AgentHealthServiceByName(resp http.ResponseWriter, req *http.Request) (interface{}, error) { func (s *HTTPHandlers) AgentHealthServiceByName(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Pull out the service name // Pull out the service name
serviceName, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/health/service/name/") serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/health/service/name/")
if err != nil {
return nil, err
}
if serviceName == "" { if serviceName == "" {
return nil, &HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing service Name"} return nil, &HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing service Name"}
} }
@ -1247,11 +1218,7 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
} }
func (s *HTTPHandlers) AgentDeregisterService(resp http.ResponseWriter, req *http.Request) (interface{}, error) { func (s *HTTPHandlers) AgentDeregisterService(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
serviceID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/service/deregister/") serviceID := strings.TrimPrefix(req.URL.Path, "/v1/agent/service/deregister/")
if err != nil {
return nil, err
}
sid := structs.NewServiceID(serviceID, nil) sid := structs.NewServiceID(serviceID, nil)
// Get the provided token, if any, and vet against any ACL policies. // Get the provided token, if any, and vet against any ACL policies.
@ -1287,11 +1254,7 @@ func (s *HTTPHandlers) AgentDeregisterService(resp http.ResponseWriter, req *htt
func (s *HTTPHandlers) AgentServiceMaintenance(resp http.ResponseWriter, req *http.Request) (interface{}, error) { func (s *HTTPHandlers) AgentServiceMaintenance(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Ensure we have a service ID // Ensure we have a service ID
serviceID, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/service/maintenance/") serviceID := strings.TrimPrefix(req.URL.Path, "/v1/agent/service/maintenance/")
if err != nil {
return nil, err
}
sid := structs.NewServiceID(serviceID, nil) sid := structs.NewServiceID(serviceID, nil)
if sid.ID == "" { if sid.ID == "" {
@ -1489,10 +1452,7 @@ func (s *HTTPHandlers) AgentToken(resp http.ResponseWriter, req *http.Request) (
} }
// Figure out the target token. // Figure out the target token.
target, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/token/") target := strings.TrimPrefix(req.URL.Path, "/v1/agent/token/")
if err != nil {
return nil, err
}
err = s.agent.tokens.WithPersistenceLock(func() error { err = s.agent.tokens.WithPersistenceLock(func() error {
triggerAntiEntropySync := false triggerAntiEntropySync := false
@ -1565,10 +1525,9 @@ func (s *HTTPHandlers) AgentConnectCARoots(resp http.ResponseWriter, req *http.R
func (s *HTTPHandlers) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.Request) (interface{}, error) { func (s *HTTPHandlers) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Get the service name. Note that this is the name of the service, // Get the service name. Note that this is the name of the service,
// not the ID of the service instance. // not the ID of the service instance.
serviceName, err := getPathSuffixUnescaped(req.URL.Path, "/v1/agent/connect/ca/leaf/") serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/ca/leaf/")
if err != nil {
return nil, err // TODO(peering): expose way to get kind=mesh-gateway type cert with appropriate ACLs
}
args := cachetype.ConnectCALeafRequest{ args := cachetype.ConnectCALeafRequest{
Service: serviceName, // Need name not ID Service: serviceName, // Need name not ID

View File

@ -28,6 +28,7 @@ import (
"golang.org/x/time/rate" "golang.org/x/time/rate"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/connect/ca" "github.com/hashicorp/consul/agent/connect/ca"
@ -39,6 +40,7 @@ import (
tokenStore "github.com/hashicorp/consul/agent/token" tokenStore "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/agent/xds/proxysupport" "github.com/hashicorp/consul/agent/xds/proxysupport"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/testrpc"
@ -1563,7 +1565,9 @@ func TestHTTPHandlers_AgentMetricsStream_ACLDeny(t *testing.T) {
bd := BaseDeps{} bd := BaseDeps{}
bd.Tokens = new(tokenStore.Store) bd.Tokens = new(tokenStore.Store)
sink := metrics.NewInmemSink(30*time.Millisecond, time.Second) sink := metrics.NewInmemSink(30*time.Millisecond, time.Second)
bd.MetricsHandler = sink bd.MetricsConfig = &lib.MetricsConfig{
Handler: sink,
}
d := fakeResolveTokenDelegate{authorizer: acl.DenyAll()} d := fakeResolveTokenDelegate{authorizer: acl.DenyAll()}
agent := &Agent{ agent := &Agent{
baseDeps: bd, baseDeps: bd,
@ -1590,7 +1594,9 @@ func TestHTTPHandlers_AgentMetricsStream(t *testing.T) {
bd := BaseDeps{} bd := BaseDeps{}
bd.Tokens = new(tokenStore.Store) bd.Tokens = new(tokenStore.Store)
sink := metrics.NewInmemSink(20*time.Millisecond, time.Second) sink := metrics.NewInmemSink(20*time.Millisecond, time.Second)
bd.MetricsHandler = sink bd.MetricsConfig = &lib.MetricsConfig{
Handler: sink,
}
d := fakeResolveTokenDelegate{authorizer: acl.ManageAll()} d := fakeResolveTokenDelegate{authorizer: acl.ManageAll()}
agent := &Agent{ agent := &Agent{
baseDeps: bd, baseDeps: bd,
@ -1640,8 +1646,8 @@ type fakeResolveTokenDelegate struct {
authorizer acl.Authorizer authorizer acl.Authorizer
} }
func (f fakeResolveTokenDelegate) ResolveTokenAndDefaultMeta(_ string, _ *acl.EnterpriseMeta, _ *acl.AuthorizerContext) (consul.ACLResolveResult, error) { func (f fakeResolveTokenDelegate) ResolveTokenAndDefaultMeta(_ string, _ *acl.EnterpriseMeta, _ *acl.AuthorizerContext) (resolver.Result, error) {
return consul.ACLResolveResult{Authorizer: f.authorizer}, nil return resolver.Result{Authorizer: f.authorizer}, nil
} }
func TestAgent_Reload(t *testing.T) { func TestAgent_Reload(t *testing.T) {
@ -1698,6 +1704,7 @@ func TestAgent_Reload(t *testing.T) {
}) })
shim := &delegateConfigReloadShim{delegate: a.delegate} shim := &delegateConfigReloadShim{delegate: a.delegate}
// NOTE: this may require refactoring to remove a potential test race
a.delegate = shim a.delegate = shim
if err := a.reloadConfigInternal(cfg2); err != nil { if err := a.reloadConfigInternal(cfg2); err != nil {
t.Fatalf("got error %v want nil", err) t.Fatalf("got error %v want nil", err)
@ -2508,6 +2515,48 @@ func TestAgent_RegisterCheck(t *testing.T) {
} }
} }
func TestAgent_RegisterCheck_UDP(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
args := &structs.CheckDefinition{
UDP: "1.1.1.1",
Name: "test",
Interval: 10 * time.Second,
}
req, _ := http.NewRequest("PUT", "/v1/agent/check/register?token=abc123", jsonReader(args))
resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req)
require.Equal(t, http.StatusOK, resp.Code)
// Ensure we have a check mapping
checkID := structs.NewCheckID("test", nil)
if existing := a.State.Check(checkID); existing == nil {
t.Fatalf("missing test check")
}
if _, ok := a.checkUDPs[checkID]; !ok {
t.Fatalf("missing test check udp")
}
// Ensure the token was configured
if token := a.State.CheckToken(checkID); token == "" {
t.Fatalf("missing token")
}
// By default, checks start in critical state.
state := a.State.Check(checkID)
if state.Status != api.HealthCritical {
t.Fatalf("bad: %v", state)
}
}
// This verifies all the forms of the new args-style check that we need to // This verifies all the forms of the new args-style check that we need to
// support as a result of https://github.com/hashicorp/consul/issues/3587. // support as a result of https://github.com/hashicorp/consul/issues/3587.
func TestAgent_RegisterCheck_Scripts(t *testing.T) { func TestAgent_RegisterCheck_Scripts(t *testing.T) {
@ -3270,6 +3319,10 @@ func testAgent_RegisterService(t *testing.T, extraHCL string) {
{ {
TTL: 30 * time.Second, TTL: 30 * time.Second,
}, },
{
UDP: "1.1.1.1",
Interval: 5 * time.Second,
},
}, },
Weights: &structs.Weights{ Weights: &structs.Weights{
Passing: 100, Passing: 100,
@ -3301,12 +3354,12 @@ func testAgent_RegisterService(t *testing.T, extraHCL string) {
// Ensure we have a check mapping // Ensure we have a check mapping
checks := a.State.Checks(structs.WildcardEnterpriseMetaInDefaultPartition()) checks := a.State.Checks(structs.WildcardEnterpriseMetaInDefaultPartition())
if len(checks) != 3 { if len(checks) != 4 {
t.Fatalf("bad: %v", checks) t.Fatalf("bad: %v", checks)
} }
for _, c := range checks { for _, c := range checks {
if c.Type != "ttl" { if c.Type != "ttl" && c.Type != "udp" {
t.Fatalf("expected ttl check type, got %s", c.Type) t.Fatalf("expected ttl or udp check type, got %s", c.Type)
} }
} }
@ -3356,6 +3409,11 @@ func testAgent_RegisterService_ReRegister(t *testing.T, extraHCL string) {
CheckID: types.CheckID("check_2"), CheckID: types.CheckID("check_2"),
TTL: 30 * time.Second, TTL: 30 * time.Second,
}, },
{
CheckID: types.CheckID("check_3"),
UDP: "1.1.1.1",
Interval: 5 * time.Second,
},
}, },
Weights: &structs.Weights{ Weights: &structs.Weights{
Passing: 100, Passing: 100,
@ -3381,6 +3439,11 @@ func testAgent_RegisterService_ReRegister(t *testing.T, extraHCL string) {
CheckID: types.CheckID("check_3"), CheckID: types.CheckID("check_3"),
TTL: 30 * time.Second, TTL: 30 * time.Second,
}, },
{
CheckID: types.CheckID("check_3"),
UDP: "1.1.1.1",
Interval: 5 * time.Second,
},
}, },
Weights: &structs.Weights{ Weights: &structs.Weights{
Passing: 100, Passing: 100,
@ -3708,6 +3771,231 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
} }
} }
func TestAgent_RegisterService_TranslateKeys_UDP(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Run("normal", func(t *testing.T) {
t.Parallel()
testAgent_RegisterService_TranslateKeys(t, "enable_central_service_config = false")
})
t.Run("service manager", func(t *testing.T) {
t.Parallel()
testAgent_RegisterService_TranslateKeys(t, "enable_central_service_config = true")
})
}
func testAgent_RegisterService_TranslateKeys_UDP(t *testing.T, extraHCL string) {
t.Helper()
tests := []struct {
ip string
expectedUDPCheckStart string
}{
{"127.0.0.1", "127.0.0.1:"}, // private network address
{"::1", "[::1]:"}, // shared address space
}
for _, tt := range tests {
t.Run(tt.ip, func(t *testing.T) {
a := NewTestAgent(t, `
connect {}
`+extraHCL)
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
json := `
{
"name":"test",
"port":8000,
"enable_tag_override": true,
"tagged_addresses": {
"lan": {
"address": "1.2.3.4",
"port": 5353
},
"wan": {
"address": "2.3.4.5",
"port": 53
}
},
"meta": {
"some": "meta",
"enable_tag_override": "meta is 'opaque' so should not get translated"
},
"kind": "connect-proxy",` +
// Note the uppercase P is important here - it ensures translation works
// correctly in case-insensitive way. Without it this test can pass even
// when translation is broken for other valid inputs.
`"Proxy": {
"destination_service_name": "web",
"destination_service_id": "web",
"local_service_port": 1234,
"local_service_address": "` + tt.ip + `",
"config": {
"destination_type": "proxy.config is 'opaque' so should not get translated"
},
"upstreams": [
{
"destination_type": "service",
"destination_namespace": "default",
"destination_partition": "default",
"destination_name": "db",
"local_bind_address": "` + tt.ip + `",
"local_bind_port": 1234,
"config": {
"destination_type": "proxy.upstreams.config is 'opaque' so should not get translated"
}
}
]
},
"connect": {
"sidecar_service": {
"name":"test-proxy",
"port":8001,
"enable_tag_override": true,
"meta": {
"some": "meta",
"enable_tag_override": "sidecar_service.meta is 'opaque' so should not get translated"
},
"kind": "connect-proxy",
"proxy": {
"destination_service_name": "test",
"destination_service_id": "test",
"local_service_port": 4321,
"local_service_address": "` + tt.ip + `",
"upstreams": [
{
"destination_type": "service",
"destination_namespace": "default",
"destination_partition": "default",
"destination_name": "db",
"local_bind_address": "` + tt.ip + `",
"local_bind_port": 1234,
"config": {
"destination_type": "sidecar_service.proxy.upstreams.config is 'opaque' so should not get translated"
}
}
]
}
}
},
"weights":{
"passing": 16
}
}`
req, _ := http.NewRequest("PUT", "/v1/agent/service/register", strings.NewReader(json))
rr := httptest.NewRecorder()
a.srv.h.ServeHTTP(rr, req)
require.Equal(t, 200, rr.Code, "body: %s", rr.Body)
svc := &structs.NodeService{
ID: "test",
Service: "test",
TaggedAddresses: map[string]structs.ServiceAddress{
"lan": {
Address: "1.2.3.4",
Port: 5353,
},
"wan": {
Address: "2.3.4.5",
Port: 53,
},
},
Meta: map[string]string{
"some": "meta",
"enable_tag_override": "meta is 'opaque' so should not get translated",
},
Port: 8000,
EnableTagOverride: true,
Weights: &structs.Weights{Passing: 16, Warning: 0},
Kind: structs.ServiceKindConnectProxy,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
DestinationServiceID: "web",
LocalServiceAddress: tt.ip,
LocalServicePort: 1234,
Config: map[string]interface{}{
"destination_type": "proxy.config is 'opaque' so should not get translated",
},
Upstreams: structs.Upstreams{
{
DestinationType: structs.UpstreamDestTypeService,
DestinationName: "db",
DestinationNamespace: "default",
DestinationPartition: "default",
LocalBindAddress: tt.ip,
LocalBindPort: 1234,
Config: map[string]interface{}{
"destination_type": "proxy.upstreams.config is 'opaque' so should not get translated",
},
},
},
},
Connect: structs.ServiceConnect{
// The sidecar service is nilled since it is only config sugar and
// shouldn't be represented in state. We assert that the translations
// there worked by inspecting the registered sidecar below.
SidecarService: nil,
},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
}
got := a.State.Service(structs.NewServiceID("test", nil))
require.Equal(t, svc, got)
sidecarSvc := &structs.NodeService{
Kind: structs.ServiceKindConnectProxy,
ID: "test-sidecar-proxy",
Service: "test-proxy",
Meta: map[string]string{
"some": "meta",
"enable_tag_override": "sidecar_service.meta is 'opaque' so should not get translated",
},
TaggedAddresses: map[string]structs.ServiceAddress{},
Port: 8001,
EnableTagOverride: true,
Weights: &structs.Weights{Passing: 1, Warning: 1},
LocallyRegisteredAsSidecar: true,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "test",
DestinationServiceID: "test",
LocalServiceAddress: tt.ip,
LocalServicePort: 4321,
Upstreams: structs.Upstreams{
{
DestinationType: structs.UpstreamDestTypeService,
DestinationName: "db",
DestinationNamespace: "default",
DestinationPartition: "default",
LocalBindAddress: tt.ip,
LocalBindPort: 1234,
Config: map[string]interface{}{
"destination_type": "sidecar_service.proxy.upstreams.config is 'opaque' so should not get translated",
},
},
},
},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
}
gotSidecar := a.State.Service(structs.NewServiceID("test-sidecar-proxy", nil))
hasNoCorrectUDPCheck := true
for _, v := range a.checkUDPs {
if strings.HasPrefix(v.UDP, tt.expectedUDPCheckStart) {
hasNoCorrectUDPCheck = false
break
}
fmt.Println("UDP Check:= ", v)
}
if hasNoCorrectUDPCheck {
t.Fatalf("Did not find the expected UDP Healtcheck '%s' in %#v ", tt.expectedUDPCheckStart, a.checkUDPs)
}
require.Equal(t, sidecarSvc, gotSidecar)
})
}
}
func TestAgent_RegisterService_ACLDeny(t *testing.T) { func TestAgent_RegisterService_ACLDeny(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")
@ -4457,6 +4745,503 @@ func testAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T, extraHCL s
} }
} }
// This tests local agent service registration with a sidecar service. Note we
// only test simple defaults for the sidecar here since the actual logic for
// handling sidecar defaults and port assignment is tested thoroughly in
// TestAgent_sidecarServiceFromNodeService. Note it also tests Deregister
// explicitly too since setup is identical.
func TestAgent_RegisterServiceDeregisterService_Sidecar_UDP(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Run("normal", func(t *testing.T) {
t.Parallel()
testAgent_RegisterServiceDeregisterService_Sidecar_UDP(t, "enable_central_service_config = false")
})
t.Run("service manager", func(t *testing.T) {
t.Parallel()
testAgent_RegisterServiceDeregisterService_Sidecar_UDP(t, "enable_central_service_config = true")
})
}
func testAgent_RegisterServiceDeregisterService_Sidecar_UDP(t *testing.T, extraHCL string) {
t.Helper()
tests := []struct {
name string
preRegister, preRegister2 *structs.NodeService
// Use raw JSON payloads rather than encoding to avoid subtleties with some
// internal representations and different ways they encode and decode. We
// rely on the payload being Unmarshalable to structs.ServiceDefinition
// directly.
json string
enableACL bool
tokenRules string
wantNS *structs.NodeService
wantErr string
wantSidecarIDLeftAfterDereg bool
assertStateFn func(t *testing.T, state *local.State)
}{
{
name: "sanity check no sidecar case",
json: `
{
"name": "web",
"port": 1111
}
`,
wantNS: nil,
wantErr: "",
},
{
name: "default sidecar",
json: `
{
"name": "web",
"port": 1111,
"connect": {
"SidecarService": {}
}
}
`,
wantNS: testDefaultSidecar("web", 1111),
wantErr: "",
},
{
name: "ACL OK defaults",
json: `
{
"name": "web",
"port": 1111,
"connect": {
"SidecarService": {}
}
}
`,
enableACL: true,
tokenRules: `
service "web-sidecar-proxy" {
policy = "write"
}
service "web" {
policy = "write"
}`,
wantNS: testDefaultSidecar("web", 1111),
wantErr: "",
},
{
name: "ACL denied",
json: `
{
"name": "web",
"port": 1111,
"connect": {
"SidecarService": {}
}
}
`,
enableACL: true,
tokenRules: ``, // No token rules means no valid token
wantNS: nil,
wantErr: "Permission denied",
},
{
name: "ACL OK for service but not for sidecar",
json: `
{
"name": "web",
"port": 1111,
"connect": {
"SidecarService": {}
}
}
`,
enableACL: true,
// This will become more common/reasonable when ACLs support exact match.
tokenRules: `
service "web-sidecar-proxy" {
policy = "deny"
}
service "web" {
policy = "write"
}`,
wantNS: nil,
wantErr: "Permission denied",
},
{
name: "ACL OK for service and sidecar but not sidecar's overridden destination",
json: `
{
"name": "web",
"port": 1111,
"connect": {
"SidecarService": {
"proxy": {
"DestinationServiceName": "foo"
}
}
}
}
`,
enableACL: true,
tokenRules: `
service "web-sidecar-proxy" {
policy = "write"
}
service "web" {
policy = "write"
}`,
wantNS: nil,
wantErr: "Permission denied",
},
{
name: "ACL OK for service but not for overridden sidecar",
json: `
{
"name": "web",
"port": 1111,
"connect": {
"SidecarService": {
"name": "foo-sidecar-proxy"
}
}
}
`,
enableACL: true,
tokenRules: `
service "web-sidecar-proxy" {
policy = "write"
}
service "web" {
policy = "write"
}`,
wantNS: nil,
wantErr: "Permission denied",
},
{
name: "ACL OK for service but and overridden for sidecar",
// This test ensures that if the sidecar embeds it's own token with
// different privs from the main request token it will be honored for the
// sidecar registration. We use the test root token since that should have
// permission.
json: `
{
"name": "web",
"port": 1111,
"connect": {
"SidecarService": {
"name": "foo",
"token": "root"
}
}
}
`,
enableACL: true,
tokenRules: `
service "web-sidecar-proxy" {
policy = "write"
}
service "web" {
policy = "write"
}`,
wantNS: testDefaultSidecar("web", 1111, func(ns *structs.NodeService) {
ns.Service = "foo"
}),
wantErr: "",
},
{
name: "invalid check definition in sidecar",
// Note no interval in the UDP check should fail validation
json: `
{
"name": "web",
"port": 1111,
"connect": {
"SidecarService": {
"check": {
"UDP": "foo"
}
}
}
}
`,
wantNS: nil,
wantErr: "invalid check in sidecar_service",
},
{
name: "invalid checks definitions in sidecar",
// Note no interval in the UDP check should fail validation
json: `
{
"name": "web",
"port": 1111,
"connect": {
"SidecarService": {
"checks": [{
"UDP": "foo"
}]
}
}
}
`,
wantNS: nil,
wantErr: "invalid check in sidecar_service",
},
{
name: "invalid check status in sidecar",
// Note no interval in the UDP check should fail validation
json: `
{
"name": "web",
"port": 1111,
"connect": {
"SidecarService": {
"check": {
"UDP": "foo",
"Interval": 10,
"Status": "unsupported-status"
}
}
}
}
`,
wantNS: nil,
wantErr: "Status for checks must 'passing', 'warning', 'critical'",
},
{
name: "invalid checks status in sidecar",
// Note no interval in the UDP check should fail validation
json: `
{
"name": "web",
"port": 1111,
"connect": {
"SidecarService": {
"checks": [{
"UDP": "foo",
"Interval": 10,
"Status": "unsupported-status"
}]
}
}
}
`,
wantNS: nil,
wantErr: "Status for checks must 'passing', 'warning', 'critical'",
},
{
name: "another service registered with same ID as a sidecar should not be deregistered",
// Add another service with the same ID that a sidecar for web would have
preRegister: &structs.NodeService{
ID: "web-sidecar-proxy",
Service: "fake-sidecar",
Port: 9999,
},
// Register web with NO SIDECAR
json: `
{
"name": "web",
"port": 1111
}
`,
// Note here that although the registration here didn't register it, we
// should still see the NodeService we pre-registered here.
wantNS: &structs.NodeService{
ID: "web-sidecar-proxy",
Service: "fake-sidecar",
Port: 9999,
TaggedAddresses: map[string]structs.ServiceAddress{},
Weights: &structs.Weights{
Passing: 1,
Warning: 1,
},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
},
// After we deregister the web service above, the fake sidecar with
// clashing ID SHOULD NOT have been removed since it wasn't part of the
// original registration.
wantSidecarIDLeftAfterDereg: true,
},
{
name: "updates to sidecar should work",
// Add a valid sidecar already registered
preRegister: &structs.NodeService{
ID: "web-sidecar-proxy",
Service: "web-sidecar-proxy",
LocallyRegisteredAsSidecar: true,
Port: 9999,
},
// Register web with Sidecar on different port
json: `
{
"name": "web",
"port": 1111,
"connect": {
"SidecarService": {
"Port": 6666
}
}
}
`,
// Note here that although the registration here didn't register it, we
// should still see the NodeService we pre-registered here.
wantNS: &structs.NodeService{
Kind: "connect-proxy",
ID: "web-sidecar-proxy",
Service: "web-sidecar-proxy",
LocallyRegisteredAsSidecar: true,
Port: 6666,
TaggedAddresses: map[string]structs.ServiceAddress{},
Weights: &structs.Weights{
Passing: 1,
Warning: 1,
},
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
DestinationServiceID: "web",
LocalServiceAddress: "127.0.0.1",
LocalServicePort: 1111,
},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
},
},
{
name: "update that removes sidecar should NOT deregister it",
// Add web with a valid sidecar already registered
preRegister: &structs.NodeService{
ID: "web",
Service: "web",
Port: 1111,
},
preRegister2: testDefaultSidecar("web", 1111),
// Register (update) web and remove sidecar (and port for sanity check)
json: `
{
"name": "web",
"port": 2222
}
`,
// Sidecar should still be there such that API can update registration
// without accidentally removing a sidecar. This is equivalent to embedded
// checks which are not removed by just not being included in an update.
// We will document that sidecar registrations via API must be explicitiy
// deregistered.
wantNS: testDefaultSidecar("web", 1111),
// Sanity check the rest of the update happened though.
assertStateFn: func(t *testing.T, state *local.State) {
svc := state.Service(structs.NewServiceID("web", nil))
require.NotNil(t, svc)
require.Equal(t, 2222, svc.Port)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Constrain auto ports to 1 available to make it deterministic
hcl := `ports {
sidecar_min_port = 2222
sidecar_max_port = 2222
}
`
if tt.enableACL {
hcl = hcl + TestACLConfig()
}
a := NewTestAgent(t, hcl+" "+extraHCL)
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
if tt.preRegister != nil {
require.NoError(t, a.addServiceFromSource(tt.preRegister, nil, false, "", ConfigSourceLocal))
}
if tt.preRegister2 != nil {
require.NoError(t, a.addServiceFromSource(tt.preRegister2, nil, false, "", ConfigSourceLocal))
}
// Create an ACL token with require policy
var token string
if tt.enableACL && tt.tokenRules != "" {
token = testCreateToken(t, a, tt.tokenRules)
}
br := bytes.NewBufferString(tt.json)
req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token="+token, br)
resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req)
if tt.wantErr != "" {
require.Contains(t, strings.ToLower(resp.Body.String()), strings.ToLower(tt.wantErr))
return
}
require.Equal(t, 200, resp.Code, "request failed with body: %s",
resp.Body.String())
// Sanity the target service registration
svcs := a.State.AllServices()
// Parse the expected definition into a ServiceDefinition
var sd structs.ServiceDefinition
err := json.Unmarshal([]byte(tt.json), &sd)
require.NoError(t, err)
require.NotEmpty(t, sd.Name)
svcID := sd.ID
if svcID == "" {
svcID = sd.Name
}
sid := structs.NewServiceID(svcID, nil)
svc, ok := svcs[sid]
require.True(t, ok, "has service "+sid.String())
assert.Equal(t, sd.Name, svc.Service)
assert.Equal(t, sd.Port, svc.Port)
// Ensure that the actual registered service _doesn't_ still have it's
// sidecar info since it's duplicate and we don't want that synced up to
// the catalog or included in responses particularly - it's just
// registration syntax sugar.
assert.Nil(t, svc.Connect.SidecarService)
if tt.wantNS == nil {
// Sanity check that there was no service registered, we rely on there
// being no services at start of test so we can just use the count.
assert.Len(t, svcs, 1, "should be no sidecar registered")
return
}
// Ensure sidecar
svc, ok = svcs[structs.NewServiceID(tt.wantNS.ID, nil)]
require.True(t, ok, "no sidecar registered at "+tt.wantNS.ID)
assert.Equal(t, tt.wantNS, svc)
if tt.assertStateFn != nil {
tt.assertStateFn(t, a.State)
}
// Now verify deregistration also removes sidecar (if there was one and it
// was added via sidecar not just coincidental ID clash)
{
req := httptest.NewRequest("PUT",
"/v1/agent/service/deregister/"+svcID+"?token="+token, nil)
resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req)
require.Equal(t, http.StatusOK, resp.Code)
svcs := a.State.AllServices()
_, ok = svcs[structs.NewServiceID(tt.wantNS.ID, nil)]
if tt.wantSidecarIDLeftAfterDereg {
require.True(t, ok, "removed non-sidecar service at "+tt.wantNS.ID)
} else {
require.False(t, ok, "sidecar not deregistered with service "+svcID)
}
}
})
}
}
// END HERE
// This tests that connect proxy validation is done for local agent // This tests that connect proxy validation is done for local agent
// registration. This doesn't need to test validation exhaustively since // registration. This doesn't need to test validation exhaustively since
// that is done via a table test in the structs package. // that is done via a table test in the structs package.
@ -6205,13 +6990,6 @@ func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) {
func TestAgentConnectCALeafCert_nonBlockingQuery_after_blockingQuery_shouldNotBlock(t *testing.T) { func TestAgentConnectCALeafCert_nonBlockingQuery_after_blockingQuery_shouldNotBlock(t *testing.T) {
// see: https://github.com/hashicorp/consul/issues/12048 // see: https://github.com/hashicorp/consul/issues/12048
runStep := func(t *testing.T, name string, fn func(t *testing.T)) {
t.Helper()
if !t.Run(name, fn) {
t.FailNow()
}
}
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")
} }
@ -6246,7 +7024,7 @@ func TestAgentConnectCALeafCert_nonBlockingQuery_after_blockingQuery_shouldNotBl
index string index string
issued structs.IssuedCert issued structs.IssuedCert
) )
runStep(t, "do initial non-blocking query", func(t *testing.T) { testutil.RunStep(t, "do initial non-blocking query", func(t *testing.T) {
req := httptest.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) req := httptest.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil)
resp := httptest.NewRecorder() resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req) a.srv.h.ServeHTTP(resp, req)
@ -6278,7 +7056,7 @@ func TestAgentConnectCALeafCert_nonBlockingQuery_after_blockingQuery_shouldNotBl
// in between both of these steps the data should still be there, causing // in between both of these steps the data should still be there, causing
// this to be a HIT that completes in less than 10m (the default inner leaf // this to be a HIT that completes in less than 10m (the default inner leaf
// cert blocking query timeout). // cert blocking query timeout).
runStep(t, "do a non-blocking query that should not block", func(t *testing.T) { testutil.RunStep(t, "do a non-blocking query that should not block", func(t *testing.T) {
req := httptest.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) req := httptest.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil)
resp := httptest.NewRecorder() resp := httptest.NewRecorder()
a.srv.h.ServeHTTP(resp, req) a.srv.h.ServeHTTP(resp, req)

View File

@ -9,6 +9,7 @@ import (
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/proxycfg"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
) )
@ -58,3 +59,5 @@ func (a *Agent) AgentEnterpriseMeta() *acl.EnterpriseMeta {
} }
func (a *Agent) registerEntCache() {} func (a *Agent) registerEntCache() {}
func (*Agent) fillEnterpriseProxyDataSources(*proxycfg.DataSources) {}

View File

@ -9,17 +9,17 @@ import (
// Recommended name for registration. // Recommended name for registration.
const ( const (
ConfigEntriesName = "config-entries" ConfigEntryListName = "config-entries"
ConfigEntryName = "config-entry" ConfigEntryName = "config-entry"
) )
// ConfigEntries supports fetching discovering configuration entries // ConfigEntryList supports fetching discovering configuration entries
type ConfigEntries struct { type ConfigEntryList struct {
RegisterOptionsBlockingRefresh RegisterOptionsBlockingRefresh
RPC RPC RPC RPC
} }
func (c *ConfigEntries) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) { func (c *ConfigEntryList) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) {
var result cache.FetchResult var result cache.FetchResult
// The request should be a ConfigEntryQuery. // The request should be a ConfigEntryQuery.

View File

@ -12,7 +12,7 @@ import (
func TestConfigEntries(t *testing.T) { func TestConfigEntries(t *testing.T) {
rpc := TestRPC(t) rpc := TestRPC(t)
typ := &ConfigEntries{RPC: rpc} typ := &ConfigEntryList{RPC: rpc}
// Expect the proper RPC call. This also sets the expected value // Expect the proper RPC call. This also sets the expected value
// since that is return-by-pointer in the arguments. // since that is return-by-pointer in the arguments.
@ -99,7 +99,7 @@ func TestConfigEntry(t *testing.T) {
func TestConfigEntries_badReqType(t *testing.T) { func TestConfigEntries_badReqType(t *testing.T) {
rpc := TestRPC(t) rpc := TestRPC(t)
typ := &ConfigEntries{RPC: rpc} typ := &ConfigEntryList{RPC: rpc}
// Fetch // Fetch
_, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest( _, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest(

View File

@ -558,8 +558,19 @@ func (c *ConnectCALeaf) generateNewLeaf(req *ConnectCALeafRequest,
} }
dnsNames = append([]string{"localhost"}, req.DNSSAN...) dnsNames = append([]string{"localhost"}, req.DNSSAN...)
ipAddresses = append([]net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, req.IPSAN...) ipAddresses = append([]net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")}, req.IPSAN...)
} else if req.Kind != "" {
if req.Kind != structs.ServiceKindMeshGateway {
return result, fmt.Errorf("unsupported kind: %s", req.Kind)
}
id = &connect.SpiffeIDMeshGateway{
Host: roots.TrustDomain,
Datacenter: req.Datacenter,
Partition: req.TargetPartition(),
}
dnsNames = append(dnsNames, req.DNSSAN...)
} else { } else {
return result, errors.New("URI must be either service or agent") return result, errors.New("URI must be either service, agent, or kind")
} }
// Create a new private key // Create a new private key
@ -665,8 +676,9 @@ func (c *ConnectCALeaf) generateNewLeaf(req *ConnectCALeafRequest,
type ConnectCALeafRequest struct { type ConnectCALeafRequest struct {
Token string Token string
Datacenter string Datacenter string
Service string // Service name, not ID Service string // Service name, not ID
Agent string // Agent name, not ID Agent string // Agent name, not ID
Kind structs.ServiceKind // only mesh-gateway for now
DNSSAN []string DNSSAN []string
IPSAN []net.IP IPSAN []net.IP
MinQueryIndex uint64 MinQueryIndex uint64
@ -677,20 +689,38 @@ type ConnectCALeafRequest struct {
} }
func (r *ConnectCALeafRequest) Key() string { func (r *ConnectCALeafRequest) Key() string {
if len(r.Agent) > 0 {
return fmt.Sprintf("agent:%s", r.Agent)
}
r.EnterpriseMeta.Normalize() r.EnterpriseMeta.Normalize()
v, err := hashstructure.Hash([]interface{}{ switch {
r.Service, case r.Agent != "":
r.EnterpriseMeta, v, err := hashstructure.Hash([]interface{}{
r.DNSSAN, r.Agent,
r.IPSAN, r.PartitionOrDefault(),
}, nil) }, nil)
if err == nil { if err == nil {
return fmt.Sprintf("service:%d", v) return fmt.Sprintf("agent:%d", v)
}
case r.Kind == structs.ServiceKindMeshGateway:
v, err := hashstructure.Hash([]interface{}{
r.PartitionOrDefault(),
r.DNSSAN,
r.IPSAN,
}, nil)
if err == nil {
return fmt.Sprintf("kind:%d", v)
}
case r.Kind != "":
// this is not valid
default:
v, err := hashstructure.Hash([]interface{}{
r.Service,
r.EnterpriseMeta,
r.DNSSAN,
r.IPSAN,
}, nil)
if err == nil {
return fmt.Sprintf("service:%d", v)
}
} }
// If there is an error, we don't set the key. A blank key forces // If there is an error, we don't set the key. A blank key forces

View File

@ -1104,29 +1104,64 @@ func (r *testGatedRootsRPC) RPC(method string, args interface{}, reply interface
} }
func TestConnectCALeaf_Key(t *testing.T) { func TestConnectCALeaf_Key(t *testing.T) {
r1 := ConnectCALeafRequest{Service: "web"} key := func(r ConnectCALeafRequest) string {
r2 := ConnectCALeafRequest{Service: "api"} return r.Key()
}
r3 := ConnectCALeafRequest{DNSSAN: []string{"a.com"}} t.Run("service", func(t *testing.T) {
r4 := ConnectCALeafRequest{DNSSAN: []string{"b.com"}} t.Run("name", func(t *testing.T) {
r1 := key(ConnectCALeafRequest{Service: "web"})
r5 := ConnectCALeafRequest{IPSAN: []net.IP{net.ParseIP("192.168.4.139")}} r2 := key(ConnectCALeafRequest{Service: "api"})
r6 := ConnectCALeafRequest{IPSAN: []net.IP{net.ParseIP("192.168.4.140")}} require.True(t, strings.HasPrefix(r1, "service:"), "Key %s does not start with service:", r1)
// hashstructure will hash the service name + ent meta to produce this key require.True(t, strings.HasPrefix(r2, "service:"), "Key %s does not start with service:", r2)
r1Key := r1.Key() require.NotEqual(t, r1, r2, "Cache keys for different services should not be equal")
r2Key := r2.Key() })
t.Run("dns-san", func(t *testing.T) {
r3Key := r3.Key() r3 := key(ConnectCALeafRequest{Service: "foo", DNSSAN: []string{"a.com"}})
r4Key := r4.Key() r4 := key(ConnectCALeafRequest{Service: "foo", DNSSAN: []string{"b.com"}})
require.NotEqual(t, r3, r4, "Cache keys for different DNSSAN should not be equal")
r5Key := r5.Key() })
r6Key := r6.Key() t.Run("ip-san", func(t *testing.T) {
r5 := key(ConnectCALeafRequest{Service: "foo", IPSAN: []net.IP{net.ParseIP("192.168.4.139")}})
require.True(t, strings.HasPrefix(r1Key, "service:"), "Key %s does not start with service:", r1Key) r6 := key(ConnectCALeafRequest{Service: "foo", IPSAN: []net.IP{net.ParseIP("192.168.4.140")}})
require.True(t, strings.HasPrefix(r2Key, "service:"), "Key %s does not start with service:", r2Key) require.NotEqual(t, r5, r6, "Cache keys for different IPSAN should not be equal")
require.NotEqual(t, r1Key, r2Key, "Cache keys for different services are not equal") })
require.NotEqual(t, r3Key, r4Key, "Cache keys for different DNSSAN are not equal") })
require.NotEqual(t, r5Key, r6Key, "Cache keys for different IPSAN are not equal") t.Run("agent", func(t *testing.T) {
r := ConnectCALeafRequest{Agent: "abc"} t.Run("name", func(t *testing.T) {
require.Equal(t, "agent:abc", r.Key()) r1 := key(ConnectCALeafRequest{Agent: "abc"})
require.True(t, strings.HasPrefix(r1, "agent:"), "Key %s does not start with agent:", r1)
})
t.Run("dns-san ignored", func(t *testing.T) {
r3 := key(ConnectCALeafRequest{Agent: "foo", DNSSAN: []string{"a.com"}})
r4 := key(ConnectCALeafRequest{Agent: "foo", DNSSAN: []string{"b.com"}})
require.Equal(t, r3, r4, "DNSSAN is ignored for agent type")
})
t.Run("ip-san ignored", func(t *testing.T) {
r5 := key(ConnectCALeafRequest{Agent: "foo", IPSAN: []net.IP{net.ParseIP("192.168.4.139")}})
r6 := key(ConnectCALeafRequest{Agent: "foo", IPSAN: []net.IP{net.ParseIP("192.168.4.140")}})
require.Equal(t, r5, r6, "IPSAN is ignored for agent type")
})
})
t.Run("kind", func(t *testing.T) {
t.Run("invalid", func(t *testing.T) {
r1 := key(ConnectCALeafRequest{Kind: "terminating-gateway"})
require.Empty(t, r1)
})
t.Run("mesh-gateway", func(t *testing.T) {
t.Run("normal", func(t *testing.T) {
r1 := key(ConnectCALeafRequest{Kind: "mesh-gateway"})
require.True(t, strings.HasPrefix(r1, "kind:"), "Key %s does not start with kind:", r1)
})
t.Run("dns-san", func(t *testing.T) {
r3 := key(ConnectCALeafRequest{Kind: "mesh-gateway", DNSSAN: []string{"a.com"}})
r4 := key(ConnectCALeafRequest{Kind: "mesh-gateway", DNSSAN: []string{"b.com"}})
require.NotEqual(t, r3, r4, "Cache keys for different DNSSAN should not be equal")
})
t.Run("ip-san", func(t *testing.T) {
r5 := key(ConnectCALeafRequest{Kind: "mesh-gateway", IPSAN: []net.IP{net.ParseIP("192.168.4.139")}})
r6 := key(ConnectCALeafRequest{Kind: "mesh-gateway", IPSAN: []net.IP{net.ParseIP("192.168.4.140")}})
require.NotEqual(t, r5, r6, "Cache keys for different IPSAN should not be equal")
})
})
})
} }

View File

@ -0,0 +1,51 @@
package cachetype
import (
"fmt"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/structs"
)
// Recommended name for registration.
const ExportedPeeredServicesName = "exported-peered-services"
type ExportedPeeredServices struct {
RegisterOptionsBlockingRefresh
RPC RPC
}
func (c *ExportedPeeredServices) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) {
var result cache.FetchResult
// The request should be a DCSpecificRequest.
reqReal, ok := req.(*structs.DCSpecificRequest)
if !ok {
return result, fmt.Errorf(
"Internal cache failure: request wrong type: %T", req)
}
// Lightweight copy this object so that manipulating QueryOptions doesn't race.
dup := *reqReal
reqReal = &dup
// Set the minimum query index to our current index so we block
reqReal.QueryOptions.MinQueryIndex = opts.MinIndex
reqReal.QueryOptions.MaxQueryTime = opts.Timeout
// Always allow stale - there's no point in hitting leader if the request is
// going to be served from cache and end up arbitrarily stale anyway. This
// allows cached service-discover to automatically read scale across all
// servers too.
reqReal.AllowStale = true
// Fetch
var reply structs.IndexedExportedServiceList
if err := c.RPC.RPC("Internal.ExportedPeeredServices", reqReal, &reply); err != nil {
return result, err
}
result.Value = &reply
result.Index = reply.QueryMeta.Index
return result, nil
}

View File

@ -0,0 +1,69 @@
package cachetype
import (
"testing"
"time"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/structs"
)
func TestExportedPeeredServices(t *testing.T) {
rpc := TestRPC(t)
typ := &ExportedPeeredServices{RPC: rpc}
// Expect the proper RPC call. This also sets the expected value
// since that is return-by-pointer in the arguments.
var resp *structs.IndexedExportedServiceList
rpc.On("RPC", "Internal.ExportedPeeredServices", mock.Anything, mock.Anything).Return(nil).
Run(func(args mock.Arguments) {
req := args.Get(1).(*structs.DCSpecificRequest)
require.Equal(t, uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(t, 1*time.Second, req.QueryOptions.MaxQueryTime)
require.True(t, req.AllowStale)
reply := args.Get(2).(*structs.IndexedExportedServiceList)
reply.Services = map[string]structs.ServiceList{
"my-peer": {
structs.ServiceName{
Name: "foo",
},
structs.ServiceName{
Name: "bar",
},
},
}
reply.QueryMeta.Index = 48
resp = reply
})
// Fetch
resultA, err := typ.Fetch(cache.FetchOptions{
MinIndex: 24,
Timeout: 1 * time.Second,
}, &structs.DCSpecificRequest{
Datacenter: "dc1",
})
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
Value: resp,
Index: 48,
}, resultA)
rpc.AssertExpectations(t)
}
func TestExportedPeeredServices_badReqType(t *testing.T) {
rpc := TestRPC(t)
typ := &ExportedPeeredServices{RPC: rpc}
// Fetch
_, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest(
t, cache.RequestInfo{Key: "foo", MinIndex: 64}))
require.Error(t, err)
require.Contains(t, err.Error(), "wrong type")
rpc.AssertExpectations(t)
}

View File

@ -0,0 +1,52 @@
package cachetype
import (
"fmt"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/structs"
)
// IntentionUpstreamsDestinationName Recommended name for registration.
const IntentionUpstreamsDestinationName = "intention-upstreams-destination"
// IntentionUpstreamsDestination supports fetching upstreams for a given gateway name.
type IntentionUpstreamsDestination struct {
RegisterOptionsBlockingRefresh
RPC RPC
}
func (i *IntentionUpstreamsDestination) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) {
var result cache.FetchResult
// The request should be a ServiceSpecificRequest.
reqReal, ok := req.(*structs.ServiceSpecificRequest)
if !ok {
return result, fmt.Errorf(
"Internal cache failure: request wrong type: %T", req)
}
// Lightweight copy this object so that manipulating QueryOptions doesn't race.
dup := *reqReal
reqReal = &dup
// Set the minimum query index to our current index so we block
reqReal.QueryOptions.MinQueryIndex = opts.MinIndex
reqReal.QueryOptions.MaxQueryTime = opts.Timeout
// Always allow stale - there's no point in hitting leader if the request is
// going to be served from cache and end up arbitrarily stale anyway. This
// allows cached service-discover to automatically read scale across all
// servers too.
reqReal.AllowStale = true
// Fetch
var reply structs.IndexedServiceList
if err := i.RPC.RPC("Internal.IntentionUpstreamsDestination", reqReal, &reply); err != nil {
return result, err
}
result.Value = &reply
result.Index = reply.QueryMeta.Index
return result, nil
}

View File

@ -0,0 +1,52 @@
package cachetype
import (
"testing"
"time"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/structs"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestIntentionUpstreamsDestination(t *testing.T) {
rpc := TestRPC(t)
typ := &IntentionUpstreamsDestination{RPC: rpc}
// Expect the proper RPC call. This also sets the expected value
// since that is return-by-pointer in the arguments.
var resp *structs.IndexedServiceList
rpc.On("RPC", "Internal.IntentionUpstreamsDestination", mock.Anything, mock.Anything).Return(nil).
Run(func(args mock.Arguments) {
req := args.Get(1).(*structs.ServiceSpecificRequest)
require.Equal(t, uint64(24), req.QueryOptions.MinQueryIndex)
require.Equal(t, 1*time.Second, req.QueryOptions.MaxQueryTime)
require.True(t, req.AllowStale)
require.Equal(t, "foo", req.ServiceName)
services := structs.ServiceList{
{Name: "foo"},
}
reply := args.Get(2).(*structs.IndexedServiceList)
reply.Services = services
reply.QueryMeta.Index = 48
resp = reply
})
// Fetch
resultA, err := typ.Fetch(cache.FetchOptions{
MinIndex: 24,
Timeout: 1 * time.Second,
}, &structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: "foo",
})
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
Value: resp,
Index: 48,
}, resultA)
rpc.AssertExpectations(t)
}

View File

@ -0,0 +1,60 @@
// Code generated by mockery v2.12.2. DO NOT EDIT.
package cachetype
import (
context "context"
grpc "google.golang.org/grpc"
mock "github.com/stretchr/testify/mock"
pbpeering "github.com/hashicorp/consul/proto/pbpeering"
testing "testing"
)
// MockTrustBundleReader is an autogenerated mock type for the TrustBundleReader type
type MockTrustBundleReader struct {
mock.Mock
}
// TrustBundleRead provides a mock function with given fields: ctx, in, opts
func (_m *MockTrustBundleReader) TrustBundleRead(ctx context.Context, in *pbpeering.TrustBundleReadRequest, opts ...grpc.CallOption) (*pbpeering.TrustBundleReadResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 *pbpeering.TrustBundleReadResponse
if rf, ok := ret.Get(0).(func(context.Context, *pbpeering.TrustBundleReadRequest, ...grpc.CallOption) *pbpeering.TrustBundleReadResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*pbpeering.TrustBundleReadResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *pbpeering.TrustBundleReadRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewMockTrustBundleReader creates a new instance of MockTrustBundleReader. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockTrustBundleReader(t testing.TB) *MockTrustBundleReader {
mock := &MockTrustBundleReader{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,51 @@
package cachetype
import (
"context"
"fmt"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/proto/pbpeering"
"google.golang.org/grpc"
)
// Recommended name for registration.
const TrustBundleReadName = "peer-trust-bundle"
// TrustBundle supports fetching discovering service instances via prepared
// queries.
type TrustBundle struct {
RegisterOptionsNoRefresh
Client TrustBundleReader
}
//go:generate mockery --name TrustBundleReader --inpackage --testonly
type TrustBundleReader interface {
TrustBundleRead(
ctx context.Context, in *pbpeering.TrustBundleReadRequest, opts ...grpc.CallOption,
) (*pbpeering.TrustBundleReadResponse, error)
}
func (t *TrustBundle) Fetch(_ cache.FetchOptions, req cache.Request) (cache.FetchResult, error) {
var result cache.FetchResult
// The request should be a TrustBundleReadRequest.
// We do not need to make a copy of this request type like in other cache types
// because the RequestInfo is synthetic.
reqReal, ok := req.(*pbpeering.TrustBundleReadRequest)
if !ok {
return result, fmt.Errorf(
"Internal cache failure: request wrong type: %T", req)
}
// Fetch
reply, err := t.Client.TrustBundleRead(context.Background(), reqReal)
if err != nil {
return result, err
}
result.Value = reply
result.Index = reply.Index
return result, nil
}

View File

@ -0,0 +1,104 @@
package cachetype
import (
"context"
"testing"
"time"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestTrustBundle(t *testing.T) {
client := NewMockTrustBundleReader(t)
typ := &TrustBundle{Client: client}
resp := &pbpeering.TrustBundleReadResponse{
Index: 48,
Bundle: &pbpeering.PeeringTrustBundle{
PeerName: "peer1",
RootPEMs: []string{"peer1-roots"},
},
}
// Expect the proper call.
// This also returns the canned response above.
client.On("TrustBundleRead", mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
req := args.Get(1).(*pbpeering.TrustBundleReadRequest)
require.Equal(t, "foo", req.Name)
}).
Return(resp, nil)
// Fetch and assert against the result.
result, err := typ.Fetch(cache.FetchOptions{}, &pbpeering.TrustBundleReadRequest{
Name: "foo",
})
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
Value: resp,
Index: 48,
}, result)
}
func TestTrustBundle_badReqType(t *testing.T) {
client := pbpeering.NewPeeringServiceClient(nil)
typ := &TrustBundle{Client: client}
// Fetch
_, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest(
t, cache.RequestInfo{Key: "foo", MinIndex: 64}))
require.Error(t, err)
require.Contains(t, err.Error(), "wrong type")
}
// This test asserts that we can continuously poll this cache type, given that it doesn't support blocking.
func TestTrustBundle_MultipleUpdates(t *testing.T) {
c := cache.New(cache.Options{})
client := NewMockTrustBundleReader(t)
// On each mock client call to TrustBundleList by service we will increment the index by 1
// to simulate new data arriving.
resp := &pbpeering.TrustBundleReadResponse{
Index: uint64(0),
}
client.On("TrustBundleRead", mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
req := args.Get(1).(*pbpeering.TrustBundleReadRequest)
require.Equal(t, "foo", req.Name)
// Increment on each call.
resp.Index++
}).
Return(resp, nil)
c.RegisterType(TrustBundleReadName, &TrustBundle{Client: client})
ch := make(chan cache.UpdateEvent)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
t.Cleanup(cancel)
err := c.Notify(ctx, TrustBundleReadName, &pbpeering.TrustBundleReadRequest{Name: "foo"}, "updates", ch)
require.NoError(t, err)
i := uint64(1)
for {
select {
case <-ctx.Done():
return
case update := <-ch:
// Expect to receive updates for increasing indexes serially.
resp := update.Result.(*pbpeering.TrustBundleReadResponse)
require.Equal(t, i, resp.Index)
i++
if i > 3 {
return
}
}
}
}

View File

@ -0,0 +1,50 @@
package cachetype
import (
"context"
"fmt"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/proto/pbpeering"
"google.golang.org/grpc"
)
// Recommended name for registration.
const TrustBundleListName = "trust-bundles"
// TrustBundles supports fetching discovering service instances via prepared
// queries.
type TrustBundles struct {
RegisterOptionsNoRefresh
Client TrustBundleLister
}
type TrustBundleLister interface {
TrustBundleListByService(
ctx context.Context, in *pbpeering.TrustBundleListByServiceRequest, opts ...grpc.CallOption,
) (*pbpeering.TrustBundleListByServiceResponse, error)
}
func (t *TrustBundles) Fetch(_ cache.FetchOptions, req cache.Request) (cache.FetchResult, error) {
var result cache.FetchResult
// The request should be a TrustBundleListByServiceRequest.
// We do not need to make a copy of this request type like in other cache types
// because the RequestInfo is synthetic.
reqReal, ok := req.(*pbpeering.TrustBundleListByServiceRequest)
if !ok {
return result, fmt.Errorf(
"Internal cache failure: request wrong type: %T", req)
}
// Fetch
reply, err := t.Client.TrustBundleListByService(context.Background(), reqReal)
if err != nil {
return result, err
}
result.Value = reply
result.Index = reply.Index
return result, nil
}

View File

@ -0,0 +1,152 @@
package cachetype
import (
"context"
"testing"
"time"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
)
func TestTrustBundles(t *testing.T) {
client := NewMockTrustBundleLister(t)
typ := &TrustBundles{Client: client}
resp := &pbpeering.TrustBundleListByServiceResponse{
Index: 48,
Bundles: []*pbpeering.PeeringTrustBundle{
{
PeerName: "peer1",
RootPEMs: []string{"peer1-roots"},
},
},
}
// Expect the proper call.
// This also returns the canned response above.
client.On("TrustBundleListByService", mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
req := args.Get(1).(*pbpeering.TrustBundleListByServiceRequest)
require.Equal(t, "foo", req.ServiceName)
}).
Return(resp, nil)
// Fetch and assert against the result.
result, err := typ.Fetch(cache.FetchOptions{}, &pbpeering.TrustBundleListByServiceRequest{
ServiceName: "foo",
})
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
Value: resp,
Index: 48,
}, result)
}
func TestTrustBundles_badReqType(t *testing.T) {
client := pbpeering.NewPeeringServiceClient(nil)
typ := &TrustBundles{Client: client}
// Fetch
_, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest(
t, cache.RequestInfo{Key: "foo", MinIndex: 64}))
require.Error(t, err)
require.Contains(t, err.Error(), "wrong type")
}
// This test asserts that we can continuously poll this cache type, given that it doesn't support blocking.
func TestTrustBundles_MultipleUpdates(t *testing.T) {
c := cache.New(cache.Options{})
client := NewMockTrustBundleLister(t)
// On each mock client call to TrustBundleList by service we will increment the index by 1
// to simulate new data arriving.
resp := &pbpeering.TrustBundleListByServiceResponse{
Index: uint64(0),
}
client.On("TrustBundleListByService", mock.Anything, mock.Anything).
Run(func(args mock.Arguments) {
req := args.Get(1).(*pbpeering.TrustBundleListByServiceRequest)
require.Equal(t, "foo", req.ServiceName)
// Increment on each call.
resp.Index++
}).
Return(resp, nil)
c.RegisterType(TrustBundleListName, &TrustBundles{Client: client})
ch := make(chan cache.UpdateEvent)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
t.Cleanup(cancel)
err := c.Notify(ctx, TrustBundleListName, &pbpeering.TrustBundleListByServiceRequest{ServiceName: "foo"}, "updates", ch)
require.NoError(t, err)
i := uint64(1)
for {
select {
case <-ctx.Done():
return
case update := <-ch:
// Expect to receive updates for increasing indexes serially.
resp := update.Result.(*pbpeering.TrustBundleListByServiceResponse)
require.Equal(t, i, resp.Index)
i++
if i > 3 {
return
}
}
}
}
// MockTrustBundleLister is an autogenerated mock type for the TrustBundleLister type
type MockTrustBundleLister struct {
mock.Mock
}
// TrustBundleListByService provides a mock function with given fields: ctx, in, opts
func (_m *MockTrustBundleLister) TrustBundleListByService(ctx context.Context, in *pbpeering.TrustBundleListByServiceRequest, opts ...grpc.CallOption) (*pbpeering.TrustBundleListByServiceResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 *pbpeering.TrustBundleListByServiceResponse
if rf, ok := ret.Get(0).(func(context.Context, *pbpeering.TrustBundleListByServiceRequest, ...grpc.CallOption) *pbpeering.TrustBundleListByServiceResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*pbpeering.TrustBundleListByServiceResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *pbpeering.TrustBundleListByServiceRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// NewMockTrustBundleLister creates a new instance of MockTrustBundleLister. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockTrustBundleLister(t testing.TB) *MockTrustBundleLister {
mock := &MockTrustBundleLister{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

43
agent/cache/watch.go vendored
View File

@ -23,6 +23,9 @@ type UpdateEvent struct {
Err error Err error
} }
// Callback is the function type accepted by NotifyCallback.
type Callback func(ctx context.Context, event UpdateEvent)
// Notify registers a desire to be updated about changes to a cache result. // Notify registers a desire to be updated about changes to a cache result.
// //
// It is a helper that abstracts code from performing their own "blocking" query // It is a helper that abstracts code from performing their own "blocking" query
@ -56,6 +59,24 @@ func (c *Cache) Notify(
r Request, r Request,
correlationID string, correlationID string,
ch chan<- UpdateEvent, ch chan<- UpdateEvent,
) error {
return c.NotifyCallback(ctx, t, r, correlationID, func(ctx context.Context, event UpdateEvent) {
select {
case ch <- event:
case <-ctx.Done():
}
})
}
// NotifyCallback allows you to receive notifications about changes to a cache
// result in the same way as Notify, but accepts a callback function instead of
// a channel.
func (c *Cache) NotifyCallback(
ctx context.Context,
t string,
r Request,
correlationID string,
cb Callback,
) error { ) error {
c.typesLock.RLock() c.typesLock.RLock()
tEntry, ok := c.types[t] tEntry, ok := c.types[t]
@ -65,7 +86,7 @@ func (c *Cache) Notify(
} }
if tEntry.Opts.SupportsBlocking { if tEntry.Opts.SupportsBlocking {
go c.notifyBlockingQuery(ctx, newGetOptions(tEntry, r), correlationID, ch) go c.notifyBlockingQuery(ctx, newGetOptions(tEntry, r), correlationID, cb)
return nil return nil
} }
@ -73,11 +94,11 @@ func (c *Cache) Notify(
if info.MaxAge == 0 { if info.MaxAge == 0 {
return fmt.Errorf("Cannot use Notify for polling cache types without specifying the MaxAge") return fmt.Errorf("Cannot use Notify for polling cache types without specifying the MaxAge")
} }
go c.notifyPollingQuery(ctx, newGetOptions(tEntry, r), correlationID, ch) go c.notifyPollingQuery(ctx, newGetOptions(tEntry, r), correlationID, cb)
return nil return nil
} }
func (c *Cache) notifyBlockingQuery(ctx context.Context, r getOptions, correlationID string, ch chan<- UpdateEvent) { func (c *Cache) notifyBlockingQuery(ctx context.Context, r getOptions, correlationID string, cb Callback) {
// Always start at 0 index to deliver the initial (possibly currently cached // Always start at 0 index to deliver the initial (possibly currently cached
// value). // value).
index := uint64(0) index := uint64(0)
@ -101,12 +122,7 @@ func (c *Cache) notifyBlockingQuery(ctx context.Context, r getOptions, correlati
// Check the index of the value returned in the cache entry to be sure it // Check the index of the value returned in the cache entry to be sure it
// changed // changed
if index == 0 || index < meta.Index { if index == 0 || index < meta.Index {
u := UpdateEvent{correlationID, res, meta, err} cb(ctx, UpdateEvent{correlationID, res, meta, err})
select {
case ch <- u:
case <-ctx.Done():
return
}
// Update index for next request // Update index for next request
index = meta.Index index = meta.Index
@ -143,7 +159,7 @@ func (c *Cache) notifyBlockingQuery(ctx context.Context, r getOptions, correlati
} }
} }
func (c *Cache) notifyPollingQuery(ctx context.Context, r getOptions, correlationID string, ch chan<- UpdateEvent) { func (c *Cache) notifyPollingQuery(ctx context.Context, r getOptions, correlationID string, cb Callback) {
index := uint64(0) index := uint64(0)
failures := uint(0) failures := uint(0)
@ -166,12 +182,7 @@ func (c *Cache) notifyPollingQuery(ctx context.Context, r getOptions, correlatio
// Check for a change in the value or an index change // Check for a change in the value or an index change
if index < meta.Index || !reflect.DeepEqual(lastValue, res) { if index < meta.Index || !reflect.DeepEqual(lastValue, res) {
u := UpdateEvent{correlationID, res, meta, err} cb(ctx, UpdateEvent{correlationID, res, meta, err})
select {
case ch <- u:
case <-ctx.Done():
return
}
// Update index and lastValue // Update index and lastValue
lastValue = res lastValue = res

View File

@ -3,6 +3,7 @@ package agent
import ( import (
"fmt" "fmt"
"net/http" "net/http"
"strings"
metrics "github.com/armon/go-metrics" metrics "github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus" "github.com/armon/go-metrics/prometheus"
@ -356,12 +357,12 @@ func (s *HTTPHandlers) catalogServiceNodes(resp http.ResponseWriter, req *http.R
args.TagFilter = true args.TagFilter = true
} }
// Pull out the service name if _, ok := params["merge-central-config"]; ok {
var err error args.MergeCentralConfig = true
args.ServiceName, err = getPathSuffixUnescaped(req.URL.Path, pathPrefix)
if err != nil {
return nil, err
} }
// Pull out the service name
args.ServiceName = strings.TrimPrefix(req.URL.Path, pathPrefix)
if args.ServiceName == "" { if args.ServiceName == "" {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing service name"} return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing service name"}
} }
@ -432,11 +433,7 @@ func (s *HTTPHandlers) CatalogNodeServices(resp http.ResponseWriter, req *http.R
} }
// Pull out the node name // Pull out the node name
var err error args.Node = strings.TrimPrefix(req.URL.Path, "/v1/catalog/node/")
args.Node, err = getPathSuffixUnescaped(req.URL.Path, "/v1/catalog/node/")
if err != nil {
return nil, err
}
if args.Node == "" { if args.Node == "" {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing node name"} return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing node name"}
} }
@ -497,15 +494,15 @@ func (s *HTTPHandlers) CatalogNodeServiceList(resp http.ResponseWriter, req *htt
} }
// Pull out the node name // Pull out the node name
var err error args.Node = strings.TrimPrefix(req.URL.Path, "/v1/catalog/node-services/")
args.Node, err = getPathSuffixUnescaped(req.URL.Path, "/v1/catalog/node-services/")
if err != nil {
return nil, err
}
if args.Node == "" { if args.Node == "" {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing node name"} return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing node name"}
} }
if _, ok := req.URL.Query()["merge-central-config"]; ok {
args.MergeCentralConfig = true
}
// Make the RPC request // Make the RPC request
var out structs.IndexedNodeServiceList var out structs.IndexedNodeServiceList
defer setMeta(resp, &out.QueryMeta) defer setMeta(resp, &out.QueryMeta)
@ -548,11 +545,7 @@ func (s *HTTPHandlers) CatalogGatewayServices(resp http.ResponseWriter, req *htt
} }
// Pull out the gateway's service name // Pull out the gateway's service name
var err error args.ServiceName = strings.TrimPrefix(req.URL.Path, "/v1/catalog/gateway-services/")
args.ServiceName, err = getPathSuffixUnescaped(req.URL.Path, "/v1/catalog/gateway-services/")
if err != nil {
return nil, err
}
if args.ServiceName == "" { if args.ServiceName == "" {
return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing gateway name"} return nil, HTTPError{StatusCode: http.StatusBadRequest, Reason: "Missing gateway name"}
} }

View File

@ -8,6 +8,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/serf/coordinate" "github.com/hashicorp/serf/coordinate"
@ -602,6 +603,63 @@ func TestCatalogRegister_checkRegistration(t *testing.T) {
}) })
} }
func TestCatalogRegister_checkRegistration_UDP(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
a := NewTestAgent(t, "")
defer a.Shutdown()
// Register node with a service and check
check := structs.HealthCheck{
Node: "foo",
CheckID: "foo-check",
Name: "foo check",
ServiceID: "api",
Definition: structs.HealthCheckDefinition{
UDP: "localhost:8888",
Interval: 5 * time.Second,
},
}
args := &structs.RegisterRequest{
Datacenter: "dc1",
Node: "foo",
Address: "127.0.0.1",
Service: &structs.NodeService{
Service: "api",
},
Check: &check,
}
var out struct{}
if err := a.RPC("Catalog.Register", args, &out); err != nil {
t.Fatalf("err: %v", err)
}
retry.Run(t, func(r *retry.R) {
req, _ := http.NewRequest("GET", "/v1/health/checks/api", nil)
resp := httptest.NewRecorder()
obj, err := a.srv.HealthServiceChecks(resp, req)
if err != nil {
r.Fatalf("err: %v", err)
}
checks := obj.(structs.HealthChecks)
if len(checks) != 1 {
r.Fatalf("expected 1 check, got: %d", len(checks))
}
if checks[0].CheckID != check.CheckID {
r.Fatalf("expected check id %s, got %s", check.Type, checks[0].Type)
}
if checks[0].Type != "udp" {
r.Fatalf("expected check type udp, got %s", checks[0].Type)
}
})
}
func TestCatalogServiceNodes(t *testing.T) { func TestCatalogServiceNodes(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")
@ -1052,6 +1110,249 @@ func TestCatalogServiceNodes_ConnectProxy(t *testing.T) {
assert.Equal(t, args.Service.Proxy, nodes[0].ServiceProxy) assert.Equal(t, args.Service.Proxy, nodes[0].ServiceProxy)
} }
func registerService(t *testing.T, a *TestAgent) (registerServiceReq *structs.RegisterRequest) {
t.Helper()
entMeta := acl.DefaultEnterpriseMeta()
registerServiceReq = structs.TestRegisterRequestProxy(t)
registerServiceReq.EnterpriseMeta = *entMeta
registerServiceReq.Service.EnterpriseMeta = *entMeta
registerServiceReq.Service.Proxy.Upstreams = structs.TestAddDefaultsToUpstreams(t, registerServiceReq.Service.Proxy.Upstreams, *entMeta)
registerServiceReq.Check = &structs.HealthCheck{
Node: registerServiceReq.Node,
Name: "check1",
}
var out struct{}
require.NoError(t, a.RPC("Catalog.Register", registerServiceReq, &out))
return
}
func registerProxyDefaults(t *testing.T, a *TestAgent) (proxyGlobalEntry structs.ProxyConfigEntry) {
t.Helper()
// Register proxy-defaults
proxyGlobalEntry = structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
Mode: structs.ProxyModeDirect,
Config: map[string]interface{}{
"local_connect_timeout_ms": uint64(1000),
"handshake_timeout_ms": uint64(1000),
},
EnterpriseMeta: *acl.DefaultEnterpriseMeta(),
}
proxyDefaultsConfigEntryReq := &structs.ConfigEntryRequest{
Op: structs.ConfigEntryUpsert,
Datacenter: "dc1",
Entry: &proxyGlobalEntry,
}
var proxyDefaultsConfigEntryResp bool
require.NoError(t, a.RPC("ConfigEntry.Apply", &proxyDefaultsConfigEntryReq, &proxyDefaultsConfigEntryResp))
return
}
func registerServiceDefaults(t *testing.T, a *TestAgent, serviceName string) (serviceDefaultsConfigEntry structs.ServiceConfigEntry) {
t.Helper()
limits := 512
serviceDefaultsConfigEntry = structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: serviceName,
Mode: structs.ProxyModeTransparent,
UpstreamConfig: &structs.UpstreamConfiguration{
Defaults: &structs.UpstreamConfig{
MeshGateway: structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeLocal,
},
Limits: &structs.UpstreamLimits{
MaxConnections: &limits,
},
},
},
EnterpriseMeta: *acl.DefaultEnterpriseMeta(),
}
serviceDefaultsConfigEntryReq := &structs.ConfigEntryRequest{
Op: structs.ConfigEntryUpsert,
Datacenter: "dc1",
Entry: &serviceDefaultsConfigEntry,
}
var serviceDefaultsConfigEntryResp bool
require.NoError(t, a.RPC("ConfigEntry.Apply", &serviceDefaultsConfigEntryReq, &serviceDefaultsConfigEntryResp))
return
}
func validateMergeCentralConfigResponse(t *testing.T, v *structs.ServiceNode,
registerServiceReq *structs.RegisterRequest,
proxyGlobalEntry structs.ProxyConfigEntry,
serviceDefaultsConfigEntry structs.ServiceConfigEntry) {
t.Helper()
require.Equal(t, registerServiceReq.Service.Service, v.ServiceName)
// validate proxy global defaults are resolved in the merged service config
require.Equal(t, proxyGlobalEntry.Config, v.ServiceProxy.Config)
// validate service defaults override proxy-defaults/global
require.NotEqual(t, proxyGlobalEntry.Mode, v.ServiceProxy.Mode)
require.Equal(t, serviceDefaultsConfigEntry.Mode, v.ServiceProxy.Mode)
// validate service defaults are resolved in the merged service config
// expected number of upstreams = (number of upstreams defined in the register request proxy config +
// 1 centrally configured default from service defaults)
require.Equal(t, len(registerServiceReq.Service.Proxy.Upstreams)+1, len(v.ServiceProxy.Upstreams))
for _, up := range v.ServiceProxy.Upstreams {
if up.DestinationType != "" && up.DestinationType != structs.UpstreamDestTypeService {
continue
}
require.Contains(t, up.Config, "limits")
upstreamLimits := up.Config["limits"].(*structs.UpstreamLimits)
require.Equal(t, serviceDefaultsConfigEntry.UpstreamConfig.Defaults.Limits.MaxConnections, upstreamLimits.MaxConnections)
require.Equal(t, serviceDefaultsConfigEntry.UpstreamConfig.Defaults.MeshGateway.Mode, up.MeshGateway.Mode)
}
}
func TestListServiceNodes_MergeCentralConfig(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
// Register the service
registerServiceReq := registerService(t, a)
// Register proxy-defaults
proxyGlobalEntry := registerProxyDefaults(t, a)
// Register service-defaults
serviceDefaultsConfigEntry := registerServiceDefaults(t, a, registerServiceReq.Service.Proxy.DestinationServiceName)
type testCase struct {
testCaseName string
serviceName string
connect bool
}
run := func(t *testing.T, tc testCase) {
url := fmt.Sprintf("/v1/catalog/service/%s?merge-central-config", tc.serviceName)
if tc.connect {
url = fmt.Sprintf("/v1/catalog/connect/%s?merge-central-config", tc.serviceName)
}
req, _ := http.NewRequest("GET", url, nil)
resp := httptest.NewRecorder()
var obj interface{}
var err error
if tc.connect {
obj, err = a.srv.CatalogConnectServiceNodes(resp, req)
} else {
obj, err = a.srv.CatalogServiceNodes(resp, req)
}
require.NoError(t, err)
assertIndex(t, resp)
serviceNodes := obj.(structs.ServiceNodes)
// validate response
require.Len(t, serviceNodes, 1)
v := serviceNodes[0]
validateMergeCentralConfigResponse(t, v, registerServiceReq, proxyGlobalEntry, serviceDefaultsConfigEntry)
}
testCases := []testCase{
{
testCaseName: "List service instances with merge-central-config",
serviceName: registerServiceReq.Service.Service,
},
{
testCaseName: "List connect capable service instances with merge-central-config",
serviceName: registerServiceReq.Service.Proxy.DestinationServiceName,
connect: true,
},
}
for _, tc := range testCases {
t.Run(tc.testCaseName, func(t *testing.T) {
run(t, tc)
})
}
}
func TestCatalogServiceNodes_MergeCentralConfigBlocking(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
// Register the service
registerServiceReq := registerService(t, a)
// Register proxy-defaults
proxyGlobalEntry := registerProxyDefaults(t, a)
// Run the query
rpcReq := structs.ServiceSpecificRequest{
Datacenter: "dc1",
ServiceName: registerServiceReq.Service.Service,
MergeCentralConfig: true,
}
var rpcResp structs.IndexedServiceNodes
require.NoError(t, a.RPC("Catalog.ServiceNodes", &rpcReq, &rpcResp))
require.Len(t, rpcResp.ServiceNodes, 1)
serviceNode := rpcResp.ServiceNodes[0]
require.Equal(t, registerServiceReq.Service.Service, serviceNode.ServiceName)
// validate proxy global defaults are resolved in the merged service config
require.Equal(t, proxyGlobalEntry.Config, serviceNode.ServiceProxy.Config)
require.Equal(t, proxyGlobalEntry.Mode, serviceNode.ServiceProxy.Mode)
// Async cause a change - register service defaults
waitIndex := rpcResp.Index
start := time.Now()
var serviceDefaultsConfigEntry structs.ServiceConfigEntry
go func() {
time.Sleep(100 * time.Millisecond)
// Register service-defaults
serviceDefaultsConfigEntry = registerServiceDefaults(t, a, registerServiceReq.Service.Proxy.DestinationServiceName)
}()
const waitDuration = 3 * time.Second
RUN_BLOCKING_QUERY:
url := fmt.Sprintf("/v1/catalog/service/%s?merge-central-config&wait=%s&index=%d",
registerServiceReq.Service.Service, waitDuration.String(), waitIndex)
req, _ := http.NewRequest("GET", url, nil)
resp := httptest.NewRecorder()
obj, err := a.srv.CatalogServiceNodes(resp, req)
require.NoError(t, err)
assertIndex(t, resp)
elapsed := time.Since(start)
idx := getIndex(t, resp)
if idx < waitIndex {
t.Fatalf("bad index returned: %v", idx)
} else if idx == waitIndex {
if elapsed > waitDuration {
// This should prevent the loop from running longer than the waitDuration
t.Fatalf("too slow: %v", elapsed)
}
goto RUN_BLOCKING_QUERY
}
// Should block at least 100ms before getting the changed results
if elapsed < 100*time.Millisecond {
t.Fatalf("too fast: %v", elapsed)
}
serviceNodes := obj.(structs.ServiceNodes)
// validate response
require.Len(t, serviceNodes, 1)
v := serviceNodes[0]
validateMergeCentralConfigResponse(t, v, registerServiceReq, proxyGlobalEntry, serviceDefaultsConfigEntry)
}
// Test that the Connect-compatible endpoints can be queried for a // Test that the Connect-compatible endpoints can be queried for a
// service via /v1/catalog/connect/:service. // service via /v1/catalog/connect/:service.
func TestCatalogConnectServiceNodes_good(t *testing.T) { func TestCatalogConnectServiceNodes_good(t *testing.T) {
@ -1228,6 +1529,111 @@ func TestCatalogNodeServiceList(t *testing.T) {
require.Equal(t, args.Service.Proxy, proxySvc.Proxy) require.Equal(t, args.Service.Proxy, proxySvc.Proxy)
} }
func TestCatalogNodeServiceList_MergeCentralConfig(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
// Register the service
registerServiceReq := registerService(t, a)
// Register proxy-defaults
proxyGlobalEntry := registerProxyDefaults(t, a)
// Register service-defaults
serviceDefaultsConfigEntry := registerServiceDefaults(t, a, registerServiceReq.Service.Proxy.DestinationServiceName)
url := fmt.Sprintf("/v1/catalog/node-services/%s?merge-central-config", registerServiceReq.Node)
req, _ := http.NewRequest("GET", url, nil)
resp := httptest.NewRecorder()
obj, err := a.srv.CatalogNodeServiceList(resp, req)
require.NoError(t, err)
assertIndex(t, resp)
nodeServices := obj.(*structs.NodeServiceList)
// validate response
require.Len(t, nodeServices.Services, 1)
validateMergeCentralConfigResponse(t, nodeServices.Services[0].ToServiceNode(nodeServices.Node.Node), registerServiceReq, proxyGlobalEntry, serviceDefaultsConfigEntry)
}
func TestCatalogNodeServiceList_MergeCentralConfigBlocking(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForLeader(t, a.RPC, "dc1")
// Register the service
registerServiceReq := registerService(t, a)
// Register proxy-defaults
proxyGlobalEntry := registerProxyDefaults(t, a)
// Run the query
rpcReq := structs.NodeSpecificRequest{
Datacenter: "dc1",
Node: registerServiceReq.Node,
MergeCentralConfig: true,
}
var rpcResp structs.IndexedNodeServiceList
require.NoError(t, a.RPC("Catalog.NodeServiceList", &rpcReq, &rpcResp))
require.Len(t, rpcResp.NodeServices.Services, 1)
nodeService := rpcResp.NodeServices.Services[0]
require.Equal(t, registerServiceReq.Service.Service, nodeService.Service)
// validate proxy global defaults are resolved in the merged service config
require.Equal(t, proxyGlobalEntry.Config, nodeService.Proxy.Config)
require.Equal(t, proxyGlobalEntry.Mode, nodeService.Proxy.Mode)
// Async cause a change - register service defaults
waitIndex := rpcResp.Index
start := time.Now()
var serviceDefaultsConfigEntry structs.ServiceConfigEntry
go func() {
time.Sleep(100 * time.Millisecond)
// Register service-defaults
serviceDefaultsConfigEntry = registerServiceDefaults(t, a, registerServiceReq.Service.Proxy.DestinationServiceName)
}()
const waitDuration = 3 * time.Second
RUN_BLOCKING_QUERY:
url := fmt.Sprintf("/v1/catalog/node-services/%s?merge-central-config&wait=%s&index=%d",
registerServiceReq.Node, waitDuration.String(), waitIndex)
req, _ := http.NewRequest("GET", url, nil)
resp := httptest.NewRecorder()
obj, err := a.srv.CatalogNodeServiceList(resp, req)
require.NoError(t, err)
assertIndex(t, resp)
elapsed := time.Since(start)
idx := getIndex(t, resp)
if idx < waitIndex {
t.Fatalf("bad index returned: %v", idx)
} else if idx == waitIndex {
if elapsed > waitDuration {
// This should prevent the loop from running longer than the waitDuration
t.Fatalf("too slow: %v", elapsed)
}
goto RUN_BLOCKING_QUERY
}
// Should block at least 100ms before getting the changed results
if elapsed < 100*time.Millisecond {
t.Fatalf("too fast: %v", elapsed)
}
nodeServices := obj.(*structs.NodeServiceList)
// validate response
require.Len(t, nodeServices.Services, 1)
validateMergeCentralConfigResponse(t, nodeServices.Services[0].ToServiceNode(nodeServices.Node.Node), registerServiceReq, proxyGlobalEntry, serviceDefaultsConfigEntry)
}
func TestCatalogNodeServices_Filter(t *testing.T) { func TestCatalogNodeServices_Filter(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")

View File

@ -1,6 +1,7 @@
package checks package checks
import ( import (
"bufio"
"context" "context"
"crypto/tls" "crypto/tls"
"fmt" "fmt"
@ -703,6 +704,135 @@ func (c *CheckTCP) check() {
c.StatusHandler.updateCheck(c.CheckID, api.HealthPassing, fmt.Sprintf("TCP connect %s: Success", c.TCP)) c.StatusHandler.updateCheck(c.CheckID, api.HealthPassing, fmt.Sprintf("TCP connect %s: Success", c.TCP))
} }
// CheckUDP is used to periodically send a UDP datagram to determine the health of a given check.
// The check is passing if the connection succeeds, the response is bytes.Equal to the bytes passed
// in or if the error returned is a timeout error
// The check is critical if: the connection succeeds but the response is not equal to the bytes passed in,
// the connection succeeds but the error returned is not a timeout error or the connection fails
type CheckUDP struct {
CheckID structs.CheckID
ServiceID structs.ServiceID
UDP string
Message string
Interval time.Duration
Timeout time.Duration
Logger hclog.Logger
StatusHandler *StatusHandler
dialer *net.Dialer
stop bool
stopCh chan struct{}
stopLock sync.Mutex
}
func (c *CheckUDP) Start() {
c.stopLock.Lock()
defer c.stopLock.Unlock()
if c.dialer == nil {
// Create the socket dialer
c.dialer = &net.Dialer{
Timeout: 10 * time.Second,
}
if c.Timeout > 0 {
c.dialer.Timeout = c.Timeout
}
}
c.stop = false
c.stopCh = make(chan struct{})
go c.run()
}
func (c *CheckUDP) Stop() {
c.stopLock.Lock()
defer c.stopLock.Unlock()
if !c.stop {
c.stop = true
close(c.stopCh)
}
}
func (c *CheckUDP) run() {
// Get the randomized initial pause time
initialPauseTime := lib.RandomStagger(c.Interval)
next := time.After(initialPauseTime)
for {
select {
case <-next:
c.check()
next = time.After(c.Interval)
case <-c.stopCh:
return
}
}
}
func (c *CheckUDP) check() {
conn, err := c.dialer.Dial(`udp`, c.UDP)
if err != nil {
if e, ok := err.(net.Error); ok && e.Timeout() {
c.StatusHandler.updateCheck(c.CheckID, api.HealthPassing, fmt.Sprintf("UDP connect %s: Success", c.UDP))
return
} else {
c.Logger.Warn("Check socket connection failed",
"check", c.CheckID.String(),
"error", err,
)
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
return
}
}
defer conn.Close()
n, err := fmt.Fprintf(conn, c.Message)
if err != nil {
c.Logger.Warn("Check socket write failed",
"check", c.CheckID.String(),
"error", err,
)
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
return
}
if n != len(c.Message) {
c.Logger.Warn("Check socket short write",
"check", c.CheckID.String(),
"error", err,
)
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
return
}
if err != nil {
c.Logger.Warn("Check socket write failed",
"check", c.CheckID.String(),
"error", err,
)
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
return
}
_, err = bufio.NewReader(conn).Read(make([]byte, 1))
if err != nil {
if strings.Contains(err.Error(), "i/o timeout") {
c.StatusHandler.updateCheck(c.CheckID, api.HealthPassing, fmt.Sprintf("UDP connect %s: Success", c.UDP))
return
} else {
c.Logger.Warn("Check socket read failed",
"check", c.CheckID.String(),
"error", err,
)
c.StatusHandler.updateCheck(c.CheckID, api.HealthCritical, err.Error())
return
}
} else if err == nil {
c.StatusHandler.updateCheck(c.CheckID, api.HealthPassing, fmt.Sprintf("UDP connect %s: Success", c.UDP))
}
}
// CheckDocker is used to periodically invoke a script to // CheckDocker is used to periodically invoke a script to
// determine the health of an application running inside a // determine the health of an application running inside a
// Docker Container. We assume that the script is compatible // Docker Container. We assume that the script is compatible

View File

@ -2,20 +2,25 @@ package checks
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"log"
"net" "net"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"os" "os"
"reflect" "reflect"
"regexp" "regexp"
"strconv"
"strings" "strings"
"sync"
"testing" "testing"
"time" "time"
"github.com/hashicorp/consul/agent/mock" "github.com/hashicorp/consul/agent/mock"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/freeport"
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/go-uuid" "github.com/hashicorp/go-uuid"
@ -1141,6 +1146,152 @@ func TestCheckTCPPassing(t *testing.T) {
tcpServer.Close() tcpServer.Close()
} }
func sendResponse(conn *net.UDPConn, addr *net.UDPAddr) {
_, err := conn.WriteToUDP([]byte("healthy"), addr)
if err != nil {
fmt.Printf("Couldn't send response %v", err)
}
}
func mockUDPServer(ctx context.Context, network string, port int) {
b := make([]byte, 1024)
addr := fmt.Sprintf(`127.0.0.1:%d`, port)
udpAddr, err := net.ResolveUDPAddr(network, addr)
if err != nil {
log.Fatal("Error resolving UDP address: ", err)
}
ser, err := net.ListenUDP("udp", udpAddr)
if err != nil {
log.Fatal("Error listening UDP: ", err)
}
defer ser.Close()
chClose := make(chan interface{})
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
for {
log.Print("Waiting for UDP message")
_, remoteaddr, err := ser.ReadFromUDP(b)
log.Printf("Read a message from %v %s \n", remoteaddr, b)
if err != nil {
log.Fatalf("Error reading from UDP %s", err.Error())
}
sendResponse(ser, remoteaddr)
select {
case <-chClose:
fmt.Println("cancelled")
wg.Done()
return
default:
}
}
}()
select {
case <-ctx.Done():
fmt.Println("cancelled")
close(chClose)
}
wg.Wait()
return
}
func expectUDPStatus(t *testing.T, udp string, status string) {
notif := mock.NewNotify()
logger := testutil.Logger(t)
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
cid := structs.NewCheckID("foo", nil)
check := &CheckUDP{
CheckID: cid,
UDP: udp,
Interval: 10 * time.Millisecond,
Logger: logger,
StatusHandler: statusHandler,
}
check.Start()
defer check.Stop()
retry.Run(t, func(r *retry.R) {
if got, want := notif.Updates(cid), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if got, want := notif.State(cid), status; got != want {
r.Fatalf("got state %q want %q", got, want)
}
})
}
func expectUDPTimeout(t *testing.T, udp string, status string) {
notif := mock.NewNotify()
logger := testutil.Logger(t)
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
cid := structs.NewCheckID("foo", nil)
check := &CheckUDP{
CheckID: cid,
UDP: udp,
Interval: 10 * time.Millisecond,
Timeout: 5 * time.Nanosecond,
Logger: logger,
StatusHandler: statusHandler,
}
check.Start()
defer check.Stop()
retry.Run(t, func(r *retry.R) {
if got, want := notif.Updates(cid), 2; got < want {
r.Fatalf("got %d updates want at least %d", got, want)
}
if got, want := notif.State(cid), status; got != want {
r.Fatalf("got state %q want %q", got, want)
}
})
}
func TestCheckUDPTimeoutPassing(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
port := freeport.GetOne(t)
serverUrl := "127.0.0.1:" + strconv.Itoa(port)
go mockUDPServer(ctx, `udp`, port)
expectUDPTimeout(t, serverUrl, api.HealthPassing) // Should pass since timeout is handled as success, from specification
}
func TestCheckUDPCritical(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
port := freeport.GetOne(t)
notExistentPort := freeport.GetOne(t)
serverUrl := "127.0.0.1:" + strconv.Itoa(notExistentPort)
go mockUDPServer(ctx, `udp`, port)
expectUDPStatus(t, serverUrl, api.HealthCritical) // Should be unhealthy since we never connect to mocked udp server.
}
func TestCheckUDPPassing(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
port := freeport.GetOne(t)
serverUrl := "127.0.0.1:" + strconv.Itoa(port)
go mockUDPServer(ctx, `udp`, port)
expectUDPStatus(t, serverUrl, api.HealthPassing)
}
func TestCheckH2PING(t *testing.T) { func TestCheckH2PING(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -804,6 +804,8 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
Version: stringVal(c.Version), Version: stringVal(c.Version),
VersionPrerelease: stringVal(c.VersionPrerelease), VersionPrerelease: stringVal(c.VersionPrerelease),
VersionMetadata: stringVal(c.VersionMetadata), VersionMetadata: stringVal(c.VersionMetadata),
// What is a sensible default for BuildDate?
BuildDate: timeValWithDefault(c.BuildDate, time.Date(1970, 1, 00, 00, 00, 01, 0, time.UTC)),
// consul configuration // consul configuration
ConsulCoordinateUpdateBatchSize: intVal(c.Consul.Coordinate.UpdateBatchSize), ConsulCoordinateUpdateBatchSize: intVal(c.Consul.Coordinate.UpdateBatchSize),
@ -917,6 +919,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
DisableHostname: boolVal(c.Telemetry.DisableHostname), DisableHostname: boolVal(c.Telemetry.DisableHostname),
DogstatsdAddr: stringVal(c.Telemetry.DogstatsdAddr), DogstatsdAddr: stringVal(c.Telemetry.DogstatsdAddr),
DogstatsdTags: c.Telemetry.DogstatsdTags, DogstatsdTags: c.Telemetry.DogstatsdTags,
RetryFailedConfiguration: boolVal(c.Telemetry.RetryFailedConfiguration),
FilterDefault: boolVal(c.Telemetry.FilterDefault), FilterDefault: boolVal(c.Telemetry.FilterDefault),
AllowedPrefixes: telemetryAllowedPrefixes, AllowedPrefixes: telemetryAllowedPrefixes,
BlockedPrefixes: telemetryBlockedPrefixes, BlockedPrefixes: telemetryBlockedPrefixes,
@ -1945,6 +1948,13 @@ func stringVal(v *string) string {
return *v return *v
} }
func timeValWithDefault(v *time.Time, defaultVal time.Time) time.Time {
if v == nil {
return defaultVal
}
return *v
}
func float64ValWithDefault(v *float64, defaultVal float64) float64 { func float64ValWithDefault(v *float64, defaultVal float64) float64 {
if v == nil { if v == nil {
return defaultVal return defaultVal
@ -2522,7 +2532,7 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error
// TLS is only enabled on the gRPC listener if there's an HTTPS port configured // TLS is only enabled on the gRPC listener if there's an HTTPS port configured
// for historic and backwards-compatibility reasons. // for historic and backwards-compatibility reasons.
if rt.HTTPSPort <= 0 && (t.GRPC != TLSProtocolConfig{}) { if rt.HTTPSPort <= 0 && (t.GRPC != TLSProtocolConfig{} && t.GRPCModifiedByDeprecatedConfig == nil) {
b.warn("tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)") b.warn("tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)")
} }

View File

@ -3,6 +3,7 @@ package config
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"time"
"github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/consul"
@ -261,18 +262,19 @@ type Config struct {
SnapshotAgent map[string]interface{} `mapstructure:"snapshot_agent"` SnapshotAgent map[string]interface{} `mapstructure:"snapshot_agent"`
// non-user configurable values // non-user configurable values
AEInterval *string `mapstructure:"ae_interval"` AEInterval *string `mapstructure:"ae_interval"`
CheckDeregisterIntervalMin *string `mapstructure:"check_deregister_interval_min"` CheckDeregisterIntervalMin *string `mapstructure:"check_deregister_interval_min"`
CheckReapInterval *string `mapstructure:"check_reap_interval"` CheckReapInterval *string `mapstructure:"check_reap_interval"`
Consul Consul `mapstructure:"consul"` Consul Consul `mapstructure:"consul"`
Revision *string `mapstructure:"revision"` Revision *string `mapstructure:"revision"`
SegmentLimit *int `mapstructure:"segment_limit"` SegmentLimit *int `mapstructure:"segment_limit"`
SegmentNameLimit *int `mapstructure:"segment_name_limit"` SegmentNameLimit *int `mapstructure:"segment_name_limit"`
SyncCoordinateIntervalMin *string `mapstructure:"sync_coordinate_interval_min"` SyncCoordinateIntervalMin *string `mapstructure:"sync_coordinate_interval_min"`
SyncCoordinateRateTarget *float64 `mapstructure:"sync_coordinate_rate_target"` SyncCoordinateRateTarget *float64 `mapstructure:"sync_coordinate_rate_target"`
Version *string `mapstructure:"version"` Version *string `mapstructure:"version"`
VersionPrerelease *string `mapstructure:"version_prerelease"` VersionPrerelease *string `mapstructure:"version_prerelease"`
VersionMetadata *string `mapstructure:"version_metadata"` VersionMetadata *string `mapstructure:"version_metadata"`
BuildDate *time.Time `mapstructure:"build_date"`
// Enterprise Only // Enterprise Only
Audit Audit `mapstructure:"audit"` Audit Audit `mapstructure:"audit"`
@ -403,6 +405,7 @@ type CheckDefinition struct {
DisableRedirects *bool `mapstructure:"disable_redirects"` DisableRedirects *bool `mapstructure:"disable_redirects"`
OutputMaxSize *int `mapstructure:"output_max_size"` OutputMaxSize *int `mapstructure:"output_max_size"`
TCP *string `mapstructure:"tcp"` TCP *string `mapstructure:"tcp"`
UDP *string `mapstructure:"udp"`
Interval *string `mapstructure:"interval"` Interval *string `mapstructure:"interval"`
DockerContainerID *string `mapstructure:"docker_container_id" alias:"dockercontainerid"` DockerContainerID *string `mapstructure:"docker_container_id" alias:"dockercontainerid"`
Shell *string `mapstructure:"shell"` Shell *string `mapstructure:"shell"`
@ -674,6 +677,7 @@ type Telemetry struct {
DisableHostname *bool `mapstructure:"disable_hostname"` DisableHostname *bool `mapstructure:"disable_hostname"`
DogstatsdAddr *string `mapstructure:"dogstatsd_addr"` DogstatsdAddr *string `mapstructure:"dogstatsd_addr"`
DogstatsdTags []string `mapstructure:"dogstatsd_tags"` DogstatsdTags []string `mapstructure:"dogstatsd_tags"`
RetryFailedConfiguration *bool `mapstructure:"retry_failed_connection"`
FilterDefault *bool `mapstructure:"filter_default"` FilterDefault *bool `mapstructure:"filter_default"`
PrefixFilter []string `mapstructure:"prefix_filter"` PrefixFilter []string `mapstructure:"prefix_filter"`
MetricsPrefix *string `mapstructure:"metrics_prefix"` MetricsPrefix *string `mapstructure:"metrics_prefix"`
@ -870,4 +874,17 @@ type TLS struct {
InternalRPC TLSProtocolConfig `mapstructure:"internal_rpc"` InternalRPC TLSProtocolConfig `mapstructure:"internal_rpc"`
HTTPS TLSProtocolConfig `mapstructure:"https"` HTTPS TLSProtocolConfig `mapstructure:"https"`
GRPC TLSProtocolConfig `mapstructure:"grpc"` GRPC TLSProtocolConfig `mapstructure:"grpc"`
// GRPCModifiedByDeprecatedConfig is a flag used to indicate that GRPC was
// modified by the deprecated field mapping (as apposed to a user-provided
// a grpc stanza). This prevents us from emitting a warning about an
// ineffectual grpc stanza when we modify GRPC to honor the legacy behaviour
// that setting `verify_incoming = true` at the top-level *does not* enable
// client certificate verification on the gRPC port.
//
// See: applyDeprecatedTLSConfig.
//
// Note: we use a *struct{} here because a simple bool isn't supported by our
// config merging logic.
GRPCModifiedByDeprecatedConfig *struct{} `mapstructure:"-"`
} }

View File

@ -2,6 +2,7 @@ package config
import ( import (
"strconv" "strconv"
"time"
"github.com/hashicorp/raft" "github.com/hashicorp/raft"
@ -128,6 +129,7 @@ func DefaultSource() Source {
metrics_prefix = "consul" metrics_prefix = "consul"
filter_default = true filter_default = true
prefix_filter = [] prefix_filter = []
retry_failed_connection = true
} }
raft_snapshot_threshold = ` + strconv.Itoa(int(cfg.RaftConfig.SnapshotThreshold)) + ` raft_snapshot_threshold = ` + strconv.Itoa(int(cfg.RaftConfig.SnapshotThreshold)) + `
raft_snapshot_interval = "` + cfg.RaftConfig.SnapshotInterval.String() + `" raft_snapshot_interval = "` + cfg.RaftConfig.SnapshotInterval.String() + `"
@ -196,8 +198,8 @@ func NonUserSource() Source {
# SegmentNameLimit is the maximum segment name length. # SegmentNameLimit is the maximum segment name length.
segment_name_limit = 64 segment_name_limit = 64
connect = { connect = {
# 0s causes the value to be ignored and operate without capping # 0s causes the value to be ignored and operate without capping
# the max time before leaf certs can be generated after a roots change. # the max time before leaf certs can be generated after a roots change.
test_ca_leaf_root_change_spread = "0s" test_ca_leaf_root_change_spread = "0s"
@ -209,7 +211,7 @@ func NonUserSource() Source {
// versionSource creates a config source for the version parameters. // versionSource creates a config source for the version parameters.
// This should be merged in the tail since these values are not // This should be merged in the tail since these values are not
// user configurable. // user configurable.
func versionSource(rev, ver, verPre, meta string) Source { func versionSource(rev, ver, verPre, meta string, buildDate time.Time) Source {
return LiteralSource{ return LiteralSource{
Name: "version", Name: "version",
Config: Config{ Config: Config{
@ -217,6 +219,7 @@ func versionSource(rev, ver, verPre, meta string) Source {
Version: &ver, Version: &ver,
VersionPrerelease: &verPre, VersionPrerelease: &verPre,
VersionMetadata: &meta, VersionMetadata: &meta,
BuildDate: &buildDate,
}, },
} }
} }
@ -224,7 +227,8 @@ func versionSource(rev, ver, verPre, meta string) Source {
// defaultVersionSource returns the version config source for the embedded // defaultVersionSource returns the version config source for the embedded
// version numbers. // version numbers.
func defaultVersionSource() Source { func defaultVersionSource() Source {
return versionSource(version.GitCommit, version.Version, version.VersionPrerelease, version.VersionMetadata) buildDate, _ := time.Parse(time.RFC3339, version.BuildDate) // This has been checked elsewhere
return versionSource(version.GitCommit, version.Version, version.VersionPrerelease, version.VersionMetadata, buildDate)
} }
// DefaultConsulSource returns the default configuration for the consul agent. // DefaultConsulSource returns the default configuration for the consul agent.

View File

@ -180,9 +180,11 @@ func applyDeprecatedConfig(d *decodeTarget) (Config, []string) {
func applyDeprecatedTLSConfig(dep DeprecatedConfig, cfg *Config) []string { func applyDeprecatedTLSConfig(dep DeprecatedConfig, cfg *Config) []string {
var warns []string var warns []string
defaults := &cfg.TLS.Defaults tls := &cfg.TLS
internalRPC := &cfg.TLS.InternalRPC defaults := &tls.Defaults
https := &cfg.TLS.HTTPS internalRPC := &tls.InternalRPC
https := &tls.HTTPS
grpc := &tls.GRPC
if v := dep.CAFile; v != nil { if v := dep.CAFile; v != nil {
if defaults.CAFile == nil { if defaults.CAFile == nil {
@ -239,6 +241,16 @@ func applyDeprecatedTLSConfig(dep DeprecatedConfig, cfg *Config) []string {
if defaults.VerifyIncoming == nil { if defaults.VerifyIncoming == nil {
defaults.VerifyIncoming = v defaults.VerifyIncoming = v
} }
// Prior to Consul 1.12 it was not possible to enable client certificate
// verification on the gRPC port. We must override GRPC.VerifyIncoming to
// prevent it from inheriting Defaults.VerifyIncoming when we've mapped the
// deprecated top-level verify_incoming field.
if grpc.VerifyIncoming == nil {
grpc.VerifyIncoming = pBool(false)
tls.GRPCModifiedByDeprecatedConfig = &struct{}{}
}
warns = append(warns, deprecationWarning("verify_incoming", "tls.defaults.verify_incoming")) warns = append(warns, deprecationWarning("verify_incoming", "tls.defaults.verify_incoming"))
} }

View File

@ -98,7 +98,7 @@ tls_prefer_server_cipher_suites = true
require.False(t, rt.TLS.InternalRPC.VerifyIncoming) require.False(t, rt.TLS.InternalRPC.VerifyIncoming)
require.False(t, rt.TLS.HTTPS.VerifyIncoming) require.False(t, rt.TLS.HTTPS.VerifyIncoming)
require.True(t, rt.TLS.GRPC.VerifyIncoming) require.False(t, rt.TLS.GRPC.VerifyIncoming)
require.True(t, rt.TLS.InternalRPC.VerifyOutgoing) require.True(t, rt.TLS.InternalRPC.VerifyOutgoing)
require.True(t, rt.TLS.HTTPS.VerifyOutgoing) require.True(t, rt.TLS.HTTPS.VerifyOutgoing)
require.True(t, rt.TLS.InternalRPC.VerifyServerHostname) require.True(t, rt.TLS.InternalRPC.VerifyServerHostname)

View File

@ -62,6 +62,7 @@ type RuntimeConfig struct {
Version string Version string
VersionPrerelease string VersionPrerelease string
VersionMetadata string VersionMetadata string
BuildDate time.Time
// consul config // consul config
ConsulCoordinateUpdateMaxBatches int ConsulCoordinateUpdateMaxBatches int
@ -1700,6 +1701,10 @@ func sanitize(name string, v reflect.Value) reflect.Value {
x := v.Interface().(time.Duration) x := v.Interface().(time.Duration)
return reflect.ValueOf(x.String()) return reflect.ValueOf(x.String())
case isTime(typ):
x := v.Interface().(time.Time)
return reflect.ValueOf(x.String())
case isString(typ): case isString(typ):
if strings.HasPrefix(name, "RetryJoinLAN[") || strings.HasPrefix(name, "RetryJoinWAN[") { if strings.HasPrefix(name, "RetryJoinLAN[") || strings.HasPrefix(name, "RetryJoinWAN[") {
x := v.Interface().(string) x := v.Interface().(string)
@ -1771,6 +1776,7 @@ func sanitize(name string, v reflect.Value) reflect.Value {
} }
func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) } func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) }
func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) }
func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map } func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map }
func isNetAddr(t reflect.Type) bool { return t.Implements(reflect.TypeOf((*net.Addr)(nil)).Elem()) } func isNetAddr(t reflect.Type) bool { return t.Implements(reflect.TypeOf((*net.Addr)(nil)).Elem()) }
func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr } func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr }

View File

@ -5661,6 +5661,7 @@ func TestLoad_FullConfig(t *testing.T) {
Version: "R909Hblt", Version: "R909Hblt",
VersionPrerelease: "ZT1JOQLn", VersionPrerelease: "ZT1JOQLn",
VersionMetadata: "GtTCa13", VersionMetadata: "GtTCa13",
BuildDate: time.Date(2019, 11, 20, 5, 0, 0, 0, time.UTC),
// consul configuration // consul configuration
ConsulCoordinateUpdateBatchSize: 128, ConsulCoordinateUpdateBatchSize: 128,
@ -6306,6 +6307,7 @@ func TestLoad_FullConfig(t *testing.T) {
DisableHostname: true, DisableHostname: true,
DogstatsdAddr: "0wSndumK", DogstatsdAddr: "0wSndumK",
DogstatsdTags: []string{"3N81zSUB", "Xtj8AnXZ"}, DogstatsdTags: []string{"3N81zSUB", "Xtj8AnXZ"},
RetryFailedConfiguration: true,
FilterDefault: true, FilterDefault: true,
AllowedPrefixes: []string{"oJotS8XJ"}, AllowedPrefixes: []string{"oJotS8XJ"},
BlockedPrefixes: []string{"cazlEhGn", "ftO6DySn.rpc.server.call"}, BlockedPrefixes: []string{"cazlEhGn", "ftO6DySn.rpc.server.call"},
@ -6446,7 +6448,8 @@ func TestLoad_FullConfig(t *testing.T) {
ConfigFiles: []string{"testdata/full-config." + format}, ConfigFiles: []string{"testdata/full-config." + format},
HCL: []string{fmt.Sprintf(`data_dir = "%s"`, dataDir)}, HCL: []string{fmt.Sprintf(`data_dir = "%s"`, dataDir)},
} }
opts.Overrides = append(opts.Overrides, versionSource("JNtPSav3", "R909Hblt", "ZT1JOQLn", "GtTCa13")) opts.Overrides = append(opts.Overrides, versionSource("JNtPSav3", "R909Hblt", "ZT1JOQLn", "GtTCa13",
time.Date(2019, 11, 20, 5, 0, 0, 0, time.UTC)))
r, err := Load(opts) r, err := Load(opts)
require.NoError(t, err) require.NoError(t, err)
prototest.AssertDeepEqual(t, expected, r.RuntimeConfig) prototest.AssertDeepEqual(t, expected, r.RuntimeConfig)
@ -6640,6 +6643,7 @@ func parseCIDR(t *testing.T, cidr string) *net.IPNet {
func TestRuntimeConfig_Sanitize(t *testing.T) { func TestRuntimeConfig_Sanitize(t *testing.T) {
rt := RuntimeConfig{ rt := RuntimeConfig{
BindAddr: &net.IPAddr{IP: net.ParseIP("127.0.0.1")}, BindAddr: &net.IPAddr{IP: net.ParseIP("127.0.0.1")},
BuildDate: time.Date(2019, 11, 20, 5, 0, 0, 0, time.UTC),
CheckOutputMaxSize: checks.DefaultBufSize, CheckOutputMaxSize: checks.DefaultBufSize,
SerfAdvertiseAddrLAN: &net.TCPAddr{IP: net.ParseIP("1.2.3.4"), Port: 5678}, SerfAdvertiseAddrLAN: &net.TCPAddr{IP: net.ParseIP("1.2.3.4"), Port: 5678},
DNSAddrs: []net.Addr{ DNSAddrs: []net.Addr{

View File

@ -76,6 +76,7 @@
"BindAddr": "127.0.0.1", "BindAddr": "127.0.0.1",
"Bootstrap": false, "Bootstrap": false,
"BootstrapExpect": 0, "BootstrapExpect": 0,
"BuildDate": "2019-11-20 05:00:00 +0000 UTC",
"Cache": { "Cache": {
"EntryFetchMaxBurst": 42, "EntryFetchMaxBurst": 42,
"EntryFetchRate": 0.334, "EntryFetchRate": 0.334,
@ -118,7 +119,8 @@
"TLSSkipVerify": false, "TLSSkipVerify": false,
"TTL": "0s", "TTL": "0s",
"Timeout": "0s", "Timeout": "0s",
"Token": "hidden" "Token": "hidden",
"UDP": ""
} }
], ],
"ClientAddrs": [], "ClientAddrs": [],
@ -324,7 +326,8 @@
"TLSServerName": "", "TLSServerName": "",
"TLSSkipVerify": false, "TLSSkipVerify": false,
"TTL": "0s", "TTL": "0s",
"Timeout": "0s" "Timeout": "0s",
"UDP": ""
}, },
"Checks": [], "Checks": [],
"Connect": null, "Connect": null,
@ -428,6 +431,7 @@
"Registerer": null, "Registerer": null,
"SummaryDefinitions": [] "SummaryDefinitions": []
}, },
"RetryFailedConfiguration": false,
"StatsdAddr": "", "StatsdAddr": "",
"StatsiteAddr": "" "StatsiteAddr": ""
}, },

View File

@ -647,6 +647,7 @@ telemetry {
disable_hostname = true disable_hostname = true
dogstatsd_addr = "0wSndumK" dogstatsd_addr = "0wSndumK"
dogstatsd_tags = [ "3N81zSUB","Xtj8AnXZ" ] dogstatsd_tags = [ "3N81zSUB","Xtj8AnXZ" ]
retry_failed_connection = true
filter_default = true filter_default = true
prefix_filter = [ "+oJotS8XJ","-cazlEhGn" ] prefix_filter = [ "+oJotS8XJ","-cazlEhGn" ]
metrics_prefix = "ftO6DySn" metrics_prefix = "ftO6DySn"

View File

@ -644,6 +644,7 @@
"disable_hostname": true, "disable_hostname": true,
"dogstatsd_addr": "0wSndumK", "dogstatsd_addr": "0wSndumK",
"dogstatsd_tags": [ "3N81zSUB","Xtj8AnXZ" ], "dogstatsd_tags": [ "3N81zSUB","Xtj8AnXZ" ],
"retry_failed_connection": true,
"filter_default": true, "filter_default": true,
"prefix_filter": [ "+oJotS8XJ","-cazlEhGn" ], "prefix_filter": [ "+oJotS8XJ","-cazlEhGn" ],
"metrics_prefix": "ftO6DySn", "metrics_prefix": "ftO6DySn",

View File

@ -33,10 +33,7 @@ func (s *HTTPHandlers) configGet(resp http.ResponseWriter, req *http.Request) (i
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil return nil, nil
} }
kindAndName, err := getPathSuffixUnescaped(req.URL.Path, "/v1/config/") kindAndName := strings.TrimPrefix(req.URL.Path, "/v1/config/")
if err != nil {
return nil, err
}
pathArgs := strings.SplitN(kindAndName, "/", 2) pathArgs := strings.SplitN(kindAndName, "/", 2)
switch len(pathArgs) { switch len(pathArgs) {
@ -84,10 +81,7 @@ func (s *HTTPHandlers) configDelete(resp http.ResponseWriter, req *http.Request)
var args structs.ConfigEntryRequest var args structs.ConfigEntryRequest
s.parseDC(req, &args.Datacenter) s.parseDC(req, &args.Datacenter)
s.parseToken(req, &args.Token) s.parseToken(req, &args.Token)
kindAndName, err := getPathSuffixUnescaped(req.URL.Path, "/v1/config/") kindAndName := strings.TrimPrefix(req.URL.Path, "/v1/config/")
if err != nil {
return nil, err
}
pathArgs := strings.SplitN(kindAndName, "/", 2) pathArgs := strings.SplitN(kindAndName, "/", 2)
if len(pathArgs) != 2 { if len(pathArgs) != 2 {

View File

@ -4,7 +4,6 @@ import (
"bytes" "bytes"
"crypto/x509" "crypto/x509"
"fmt" "fmt"
"strings"
"github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/connect"
) )
@ -92,15 +91,3 @@ func validateSignIntermediate(csr *x509.CertificateRequest, spiffeID *connect.Sp
} }
return nil return nil
} }
// EnsureTrailingNewline this is used to fix a case where the provider do not return a new line after
// the certificate as per the specification see GH-8178 for more context
func EnsureTrailingNewline(cert string) string {
if cert == "" {
return cert
}
if strings.HasSuffix(cert, "\n") {
return cert
}
return fmt.Sprintf("%s\n", cert)
}

View File

@ -18,6 +18,7 @@ import (
"github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib"
) )
const ( const (
@ -363,15 +364,15 @@ func (a *AWSProvider) loadCACerts() error {
if a.isPrimary { if a.isPrimary {
// Just use the cert as a root // Just use the cert as a root
a.rootPEM = EnsureTrailingNewline(*output.Certificate) a.rootPEM = lib.EnsureTrailingNewline(*output.Certificate)
} else { } else {
a.intermediatePEM = EnsureTrailingNewline(*output.Certificate) a.intermediatePEM = lib.EnsureTrailingNewline(*output.Certificate)
// TODO(banks) support user-supplied CA being a Subordinate even in the // TODO(banks) support user-supplied CA being a Subordinate even in the
// primary DC. For now this assumes there is only one cert in the chain // primary DC. For now this assumes there is only one cert in the chain
if output.CertificateChain == nil { if output.CertificateChain == nil {
return fmt.Errorf("Subordinate CA %s returned no chain", a.arn) return fmt.Errorf("Subordinate CA %s returned no chain", a.arn)
} }
a.rootPEM = EnsureTrailingNewline(*output.CertificateChain) a.rootPEM = lib.EnsureTrailingNewline(*output.CertificateChain)
} }
return nil return nil
} }
@ -489,7 +490,7 @@ func (a *AWSProvider) signCSR(csrPEM string, templateARN string, ttl time.Durati
} }
if certOutput.Certificate != nil { if certOutput.Certificate != nil {
return true, EnsureTrailingNewline(*certOutput.Certificate), nil return true, lib.EnsureTrailingNewline(*certOutput.Certificate), nil
} }
return false, "", nil return false, "", nil
@ -532,8 +533,8 @@ func (a *AWSProvider) SetIntermediate(intermediatePEM string, rootPEM string) er
} }
// We successfully initialized, keep track of the root and intermediate certs. // We successfully initialized, keep track of the root and intermediate certs.
a.rootPEM = EnsureTrailingNewline(rootPEM) a.rootPEM = lib.EnsureTrailingNewline(rootPEM)
a.intermediatePEM = EnsureTrailingNewline(intermediatePEM) a.intermediatePEM = lib.EnsureTrailingNewline(intermediatePEM)
return nil return nil
} }

View File

@ -13,14 +13,15 @@ import (
"sync" "sync"
"time" "time"
"github.com/hashicorp/consul/lib/decode"
"github.com/hashicorp/consul/lib/retry"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
vaultapi "github.com/hashicorp/vault/api" vaultapi "github.com/hashicorp/vault/api"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
"github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/lib/decode"
"github.com/hashicorp/consul/lib/retry"
) )
const ( const (
@ -506,7 +507,7 @@ func (v *VaultProvider) getCA(namespace, path string) (string, error) {
return "", err return "", err
} }
root := EnsureTrailingNewline(string(bytes)) root := lib.EnsureTrailingNewline(string(bytes))
if root == "" { if root == "" {
return "", ErrBackendNotInitialized return "", ErrBackendNotInitialized
} }
@ -535,7 +536,7 @@ func (v *VaultProvider) getCAChain(namespace, path string) (string, error) {
return "", err return "", err
} }
root := EnsureTrailingNewline(string(raw)) root := lib.EnsureTrailingNewline(string(raw))
return root, nil return root, nil
} }
@ -600,7 +601,7 @@ func (v *VaultProvider) Sign(csr *x509.CertificateRequest) (string, error) {
if !ok { if !ok {
return "", fmt.Errorf("certificate was not a string") return "", fmt.Errorf("certificate was not a string")
} }
return EnsureTrailingNewline(cert), nil return lib.EnsureTrailingNewline(cert), nil
} }
// SignIntermediate returns a signed CA certificate with a path length constraint // SignIntermediate returns a signed CA certificate with a path length constraint
@ -637,7 +638,7 @@ func (v *VaultProvider) SignIntermediate(csr *x509.CertificateRequest) (string,
return "", fmt.Errorf("signed intermediate result is not a string") return "", fmt.Errorf("signed intermediate result is not a string")
} }
return EnsureTrailingNewline(intermediate), nil return lib.EnsureTrailingNewline(intermediate), nil
} }
// CrossSignCA takes a CA certificate and cross-signs it to form a trust chain // CrossSignCA takes a CA certificate and cross-signs it to form a trust chain
@ -677,7 +678,7 @@ func (v *VaultProvider) CrossSignCA(cert *x509.Certificate) (string, error) {
return "", fmt.Errorf("certificate was not a string") return "", fmt.Errorf("certificate was not a string")
} }
return EnsureTrailingNewline(xcCert), nil return lib.EnsureTrailingNewline(xcCert), nil
} }
// SupportsCrossSigning implements Provider // SupportsCrossSigning implements Provider

View File

@ -11,6 +11,7 @@ const (
internal = "internal" internal = "internal"
version = "v1" version = "v1"
internalVersion = internal + "-" + version internalVersion = internal + "-" + version
external = "external"
) )
func UpstreamSNI(u *structs.Upstream, subset string, dc string, trustDomain string) string { func UpstreamSNI(u *structs.Upstream, subset string, dc string, trustDomain string) string {
@ -64,6 +65,21 @@ func ServiceSNI(service string, subset string, namespace string, partition strin
} }
} }
func PeeredServiceSNI(service, namespace, partition, peerName, trustDomain string) string {
if peerName == "" {
panic("peer name is a requirement for this function and does not make sense without it")
}
if namespace == "" {
namespace = structs.IntentionDefaultNamespace
}
if partition == "" {
// TODO(partitions) Make default available in OSS as a constant for uses like this one
partition = "default"
}
return dotJoin(service, namespace, partition, peerName, external, trustDomain)
}
func dotJoin(parts ...string) string { func dotJoin(parts ...string) string {
return strings.Join(parts, ".") return strings.Join(parts, ".")
} }

View File

@ -3,8 +3,9 @@ package connect
import ( import (
"testing" "testing"
"github.com/hashicorp/consul/agent/structs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/structs"
) )
const ( const (
@ -164,6 +165,11 @@ func TestServiceSNI(t *testing.T) {
ServiceSNI("api", "canary", "neighbor", "part1", "foo", testTrustDomain2)) ServiceSNI("api", "canary", "neighbor", "part1", "foo", testTrustDomain2))
} }
func TestPeeredServiceSNI(t *testing.T) {
require.Equal(t, "api.billing.default.webstuff.external."+testTrustDomainSuffix1,
PeeredServiceSNI("api", "billing", "", "webstuff", testTrustDomainSuffix1))
}
func TestQuerySNI(t *testing.T) { func TestQuerySNI(t *testing.T) {
require.Equal(t, "magicquery.default.foo.query."+testTrustDomain1, require.Equal(t, "magicquery.default.foo.query."+testTrustDomain1,
QuerySNI("magicquery", "foo", testTrustDomain1)) QuerySNI("magicquery", "foo", testTrustDomain1))

View File

@ -16,6 +16,7 @@ import (
"github.com/hashicorp/go-uuid" "github.com/hashicorp/go-uuid"
"github.com/mitchellh/go-testing-interface" "github.com/mitchellh/go-testing-interface"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
) )
@ -23,6 +24,7 @@ import (
// //
// NOTE: this is duplicated in the api package as testClusterID // NOTE: this is duplicated in the api package as testClusterID
const TestClusterID = "11111111-2222-3333-4444-555555555555" const TestClusterID = "11111111-2222-3333-4444-555555555555"
const TestTrustDomain = TestClusterID + ".consul"
// testCACounter is just an atomically incremented counter for creating // testCACounter is just an atomically incremented counter for creating
// unique names for the CA certs. // unique names for the CA certs.
@ -295,6 +297,21 @@ func TestLeafWithNamespace(t testing.T, service, namespace string, root *structs
return certPEM, keyPEM return certPEM, keyPEM
} }
func TestMeshGatewayLeaf(t testing.T, partition string, root *structs.CARoot) (string, string) {
// Build the SPIFFE ID
spiffeId := &SpiffeIDMeshGateway{
Host: fmt.Sprintf("%s.consul", TestClusterID),
Partition: acl.PartitionOrDefault(partition),
Datacenter: "dc1",
}
certPEM, keyPEM, err := testLeafWithID(t, spiffeId, root, DefaultPrivateKeyType, DefaultPrivateKeyBits, 0)
if err != nil {
t.Fatalf(err.Error())
}
return certPEM, keyPEM
}
// TestCSR returns a CSR to sign the given service along with the PEM-encoded // TestCSR returns a CSR to sign the given service along with the PEM-encoded
// private key for this certificate. // private key for this certificate.
func TestCSR(t testing.T, uri CertURI) (string, string) { func TestCSR(t testing.T, uri CertURI) (string, string) {

View File

@ -24,6 +24,8 @@ var (
`^(?:/ap/([^/]+))?/ns/([^/]+)/dc/([^/]+)/svc/([^/]+)$`) `^(?:/ap/([^/]+))?/ns/([^/]+)/dc/([^/]+)/svc/([^/]+)$`)
spiffeIDAgentRegexp = regexp.MustCompile( spiffeIDAgentRegexp = regexp.MustCompile(
`^(?:/ap/([^/]+))?/agent/client/dc/([^/]+)/id/([^/]+)$`) `^(?:/ap/([^/]+))?/agent/client/dc/([^/]+)/id/([^/]+)$`)
spiffeIDMeshGatewayRegexp = regexp.MustCompile(
`^(?:/ap/([^/]+))?/gateway/mesh/dc/([^/]+)$`)
) )
// ParseCertURIFromString attempts to parse a string representation of a // ParseCertURIFromString attempts to parse a string representation of a
@ -117,6 +119,31 @@ func ParseCertURI(input *url.URL) (CertURI, error) {
Datacenter: dc, Datacenter: dc,
Agent: agent, Agent: agent,
}, nil }, nil
} else if v := spiffeIDMeshGatewayRegexp.FindStringSubmatch(path); v != nil {
// Determine the values. We assume they're reasonable to save cycles,
// but if the raw path is not empty that means that something is
// URL encoded so we go to the slow path.
ap := v[1]
dc := v[2]
if input.RawPath != "" {
var err error
if ap, err = url.PathUnescape(v[1]); err != nil {
return nil, fmt.Errorf("Invalid admin partition: %s", err)
}
if dc, err = url.PathUnescape(v[2]); err != nil {
return nil, fmt.Errorf("Invalid datacenter: %s", err)
}
}
if ap == "" {
ap = "default"
}
return &SpiffeIDMeshGateway{
Host: input.Host,
Partition: ap,
Datacenter: dc,
}, nil
} }
// Test for signing ID // Test for signing ID

Some files were not shown because too many files have changed in this diff Show More