mirror of https://github.com/status-im/consul.git
Merge branch 'hashicorp-main' into serve-panic-recovery
This commit is contained in:
commit
ab75b4ee92
|
@ -0,0 +1,3 @@
|
|||
```release-note:enhancement
|
||||
Allow configuring graceful stop in testutil.
|
||||
```
|
|
@ -1,4 +0,0 @@
|
|||
```release-note:deprecation
|
||||
config: the `ports.grpc` and `addresses.grpc` configuration settings have been renamed to `ports.xds` and `addresses.xds` to better match their function.
|
||||
```
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
connect: Support manipulating HTTP headers in the mesh.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
grpc: ensure that streaming gRPC requests work over mesh gateway based wan federation
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
ui: Hide all metrics for ingress gateway services
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:feature
|
||||
ui: Adding support in Topology view for Routing Configurations
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
ui: Properly encode non-URL safe characters in OIDC responses
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
ui: Disabling policy form fields from users with 'read' permissions
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
connect: Add low-level feature to allow an Ingress to retrieve TLS certificates from SDS.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
ui: fixes a bug with some service failovers not showing the routing tab visualization
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
ui: hide create button for policies/roles/namespace if users token has no write permissions to those areas
|
||||
```
|
|
@ -0,0 +1,4 @@
|
|||
```release-note:bug
|
||||
ui: Ignore reported permissions for KV area meaning the KV is always enabled
|
||||
for both read/write access if the HTTP API allows.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:security
|
||||
rpc: authorize raft requests [CVE-2021-37219](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37219)
|
||||
```
|
|
@ -0,0 +1,4 @@
|
|||
```release-note:bug
|
||||
api: Revert early out errors from license APIs to allow v1.10+ clients to
|
||||
manage licenses on older servers
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
connect: update supported envoy versions to 1.18.4, 1.17.4, 1.16.5
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
tls: consider presented intermediates during server connection tls handshake.
|
||||
```
|
|
@ -0,0 +1,4 @@
|
|||
```release-note:improvement
|
||||
checks: add failures_before_warning setting for interval checks.
|
||||
```
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
xds: fixed a bug where Envoy sidecars could enter a state where they failed to receive xds updates from Consul
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:feature
|
||||
sso/oidc: **(Enterprise only)** Add support for providing acr_values in OIDC auth flow
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
ui: Gracefully recover from non-existant DC errors
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
api: add partition field to acl structs
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
xds: ensure the active streams counters are 64 bit aligned on 32 bit systems
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
telemetry: Add new metrics for the count of KV entries in the Consul store.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
ui: **(Enterprise Only)** Fix saving intentions with namespaced source/destination
|
||||
```
|
|
@ -0,0 +1,6 @@
|
|||
```release-note:bug
|
||||
grpc: strip local ACL tokens from RPCs during forwarding if crossing datacenters
|
||||
```
|
||||
```release-note:feature
|
||||
partitions: allow for partition queries to be forwarded
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
audit-logging: **(Enterprise Only)** Audit logs will now include select HTTP headers in each logs payload. Those headers are: `Forwarded`, `Via`, `X-Forwarded-For`, `X-Forwarded-Host` and `X-Forwarded-Proto`.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
connect: Fix upstream listener escape hatch for prepared queries
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
connect: update supported envoy versions to 1.19.1, 1.18.4, 1.17.4, 1.16.5
|
||||
```
|
|
@ -0,0 +1,4 @@
|
|||
```release-note:improvement
|
||||
ui: Add uri guard to prevent future URL encoding issues
|
||||
```
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
```release-note:improvement
|
||||
ui: Add initial support for partitions to intentions
|
||||
```
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
```release-note:improvement
|
||||
ui: Removed informational panel from the namespace selector menu when editing
|
||||
namespaces
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
acl: fixes the fallback behaviour of down_policy with setting extend-cache/async-cache when the token is not cached.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
ui: Don't show a CRD warning for read-only intentions
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:feature
|
||||
ui: Added initial support for admin partition CRUD
|
||||
```
|
|
@ -0,0 +1,5 @@
|
|||
```release-note:improvement
|
||||
ui: Move the majority of our SASS variables to use native CSS custom
|
||||
properties
|
||||
```
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
ui: Topology - Fix up Default Allow and Permissive Intentions notices
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
telemetry: Add new metrics for the count of connect service instances and configuration entries.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
telemetry: fixes a bug with Prometheus consul_autopilot_healthy metric where 0 is reported instead of NaN on servers.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
ui: Ensure all types of data get reconciled with the backend data
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
telemetry: Consul Clients no longer emit Autopilot metrics.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
acl: **(Enterprise only)** Fix bug in 'consul members' filtering with partitions.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
ui: Fixed styling of Role remove dialog on the Token edit page
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
acl: **(Enterprise only)** ensure that auth methods with namespace rules work with partitions
|
||||
```
|
|
@ -0,0 +1,7 @@
|
|||
```release-note:improvement
|
||||
areas: **(Enterprise only)** Make implementation of WriteToAddress non-blocking to avoid slowing down memberlist's packetListen routine.
|
||||
```
|
||||
|
||||
```release-note:improvement
|
||||
areas: **(Enterprise only)** Apply backpressure to area gossip packet ingestion when more than 512 packets are waiting to be ingested.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
areas: **(Enterprise only)** Add 15s timeout to opening streams over pooled connections.
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:bug
|
||||
server: **(Enterprise only)** Ensure that servers leave network segments when leaving other gossip pools
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:improvement
|
||||
state: reads of partitions now accept an optional memdb.WatchSet
|
||||
```
|
|
@ -0,0 +1,3 @@
|
|||
```release-note:feature
|
||||
partitions: **(Enterprise only)** Adds admin partitions, a new feature to enhance Consul's multitenancy capabilites.
|
||||
```
|
|
@ -22,7 +22,7 @@ references:
|
|||
test-results: &TEST_RESULTS_DIR /tmp/test-results
|
||||
|
||||
cache:
|
||||
yarn: &YARN_CACHE_KEY consul-ui-v4-{{ checksum "ui/yarn.lock" }}
|
||||
yarn: &YARN_CACHE_KEY consul-ui-v5-{{ checksum "ui/yarn.lock" }}
|
||||
rubygem: &RUBYGEM_CACHE_KEY static-site-gems-v1-{{ checksum "Gemfile.lock" }}
|
||||
|
||||
environment: &ENVIRONMENT
|
||||
|
@ -220,7 +220,6 @@ jobs:
|
|||
- run:
|
||||
name: go test
|
||||
command: |
|
||||
mkdir -p $TEST_RESULTS_DIR
|
||||
mkdir -p $TEST_RESULTS_DIR /tmp/jsonfile
|
||||
PACKAGE_NAMES=$(go list -tags "$GOTAGS" ./... | circleci tests split --split-by=timings --timings-type=classname)
|
||||
echo "Running $(echo $PACKAGE_NAMES | wc -w) packages"
|
||||
|
@ -228,10 +227,14 @@ jobs:
|
|||
gotestsum \
|
||||
--format=short-verbose \
|
||||
--jsonfile /tmp/jsonfile/go-test-${CIRCLE_NODE_INDEX}.log \
|
||||
--debug \
|
||||
--rerun-fails=3 \
|
||||
--rerun-fails-max-failures=40 \
|
||||
--rerun-fails-report=/tmp/gotestsum-rerun-fails \
|
||||
--packages="$PACKAGE_NAMES" \
|
||||
--junitfile $TEST_RESULTS_DIR/gotestsum-report.xml -- \
|
||||
-tags="$GOTAGS" -p 2 \
|
||||
-cover -coverprofile=coverage.txt \
|
||||
$PACKAGE_NAMES
|
||||
-cover -coverprofile=coverage.txt
|
||||
|
||||
- store_test_results:
|
||||
path: *TEST_RESULTS_DIR
|
||||
|
@ -239,6 +242,10 @@ jobs:
|
|||
path: *TEST_RESULTS_DIR
|
||||
- store_artifacts:
|
||||
path: /tmp/jsonfile
|
||||
- run: &rerun-fails-report
|
||||
name: "Re-run fails report"
|
||||
command: |
|
||||
.circleci/scripts/rerun-fails-report.sh /tmp/gotestsum-rerun-fails
|
||||
- run: *notify-slack-failure
|
||||
|
||||
go-test-race:
|
||||
|
@ -301,11 +308,16 @@ jobs:
|
|||
command: |
|
||||
mkdir -p $TEST_RESULTS_DIR /tmp/jsonfile
|
||||
go env
|
||||
PACKAGE_NAMES=$(go list -tags "$GOTAGS" ./...)
|
||||
gotestsum \
|
||||
--jsonfile /tmp/jsonfile/go-test-32bit.log \
|
||||
--rerun-fails=3 \
|
||||
--rerun-fails-max-failures=40 \
|
||||
--rerun-fails-report=/tmp/gotestsum-rerun-fails \
|
||||
--packages="$PACKAGE_NAMES" \
|
||||
--junitfile $TEST_RESULTS_DIR/gotestsum-report.xml -- \
|
||||
-tags="$GOTAGS" -p 2 \
|
||||
-short ./...
|
||||
-short
|
||||
|
||||
- store_test_results:
|
||||
path: *TEST_RESULTS_DIR
|
||||
|
@ -590,7 +602,7 @@ jobs:
|
|||
|
||||
- run:
|
||||
name: install yarn packages
|
||||
command: cd ui && yarn install
|
||||
command: cd ui && make deps
|
||||
|
||||
- save_cache:
|
||||
key: *YARN_CACHE_KEY
|
||||
|
@ -780,14 +792,14 @@ jobs:
|
|||
command: make test-coverage-ci
|
||||
- run: *notify-slack-failure
|
||||
|
||||
envoy-integration-test-1_15_5: &ENVOY_TESTS
|
||||
envoy-integration-test-1_16_5: &ENVOY_TESTS
|
||||
docker:
|
||||
# We only really need bash and docker-compose which is installed on all
|
||||
# Circle images but pick Go since we have to pick one of them.
|
||||
- image: *GOLANG_IMAGE
|
||||
parallelism: 2
|
||||
environment:
|
||||
ENVOY_VERSION: "1.15.5"
|
||||
ENVOY_VERSION: "1.16.5"
|
||||
steps: &ENVOY_INTEGRATION_TEST_STEPS
|
||||
- checkout
|
||||
# Get go binary from workspace
|
||||
|
@ -820,32 +832,26 @@ jobs:
|
|||
path: *TEST_RESULTS_DIR
|
||||
- run: *notify-slack-failure
|
||||
|
||||
envoy-integration-test-1_15_5-v2compat:
|
||||
envoy-integration-test-1_16_5-v2compat:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.15.5"
|
||||
ENVOY_VERSION: "1.16.5"
|
||||
TEST_V2_XDS: "1"
|
||||
|
||||
envoy-integration-test-1_16_4:
|
||||
envoy-integration-test-1_17_4:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.16.4"
|
||||
ENVOY_VERSION: "1.17.4"
|
||||
|
||||
envoy-integration-test-1_16_4-v2compat:
|
||||
envoy-integration-test-1_18_4:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.16.4"
|
||||
TEST_V2_XDS: "1"
|
||||
ENVOY_VERSION: "1.18.4"
|
||||
|
||||
envoy-integration-test-1_17_3:
|
||||
envoy-integration-test-1_19_1:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.17.3"
|
||||
|
||||
envoy-integration-test-1_18_3:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.18.3"
|
||||
ENVOY_VERSION: "1.19.1"
|
||||
|
||||
# run integration tests for the connect ca providers
|
||||
test-connect-ca-providers:
|
||||
|
@ -1087,22 +1093,19 @@ workflows:
|
|||
- nomad-integration-0_8:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_15_5:
|
||||
- envoy-integration-test-1_16_5:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_15_5-v2compat:
|
||||
- envoy-integration-test-1_16_5-v2compat:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_16_4:
|
||||
- envoy-integration-test-1_17_4:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_16_4-v2compat:
|
||||
- envoy-integration-test-1_18_4:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_17_3:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_18_3:
|
||||
- envoy-integration-test-1_19_1:
|
||||
requires:
|
||||
- dev-build
|
||||
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# Add a comment on the github PR if there were any rerun tests.
|
||||
#
|
||||
set -eu -o pipefail
|
||||
|
||||
report_filename="${1?report filename is required}"
|
||||
if [ ! -s "$report_filename" ]; then
|
||||
echo "gotestsum rerun report file is empty or missing"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
function report {
|
||||
echo ":repeat: gotestsum re-ran some tests in $CIRCLE_BUILD_URL"
|
||||
echo
|
||||
echo '```'
|
||||
cat "$report_filename"
|
||||
echo '```'
|
||||
}
|
||||
|
||||
report
|
|
@ -0,0 +1,7 @@
|
|||
# Techical Writer Review
|
||||
|
||||
/website/content/docs/ @hashicorp/consul-docs
|
||||
/website/content/commands/ @hashicorp/consul-docs
|
||||
/website/content/api-docs/ @hashicorp/consul-docs
|
||||
|
||||
|
|
@ -26,7 +26,7 @@ are deployed from this repo.
|
|||
|
||||
### Reporting an Issue:
|
||||
>Note: Issues on GitHub for Consul are intended to be related to bugs or feature requests.
|
||||
>Questions should be directed to other community resources such as the: [Mailing List](https://groups.google.com/group/consul-tool/), [FAQ](https://www.consul.io/docs/faq.html), or [Guides](https://www.consul.io/docs/guides/index.html).
|
||||
>Questions should be directed to other community resources such as the: [Discuss Forum](https://discuss.hashicorp.com/c/consul/29), [FAQ](https://www.consul.io/docs/faq.html), or [Guides](https://www.consul.io/docs/guides/index.html).
|
||||
|
||||
* Make sure you test against the latest released version. It is possible we
|
||||
already fixed the bug you're experiencing. However, if you are on an older
|
||||
|
@ -86,69 +86,35 @@ to work on the fork is to set it as a remote of the Consul project:
|
|||
By following these steps you can push to your fork to create a PR, but the code on disk still
|
||||
lives in the spot where the go cli tools are expecting to find it.
|
||||
|
||||
>Note: If you make any changes to the code, run `make format` to automatically format the code according to Go standards.
|
||||
>Note: If you make any changes to the code, run `gofmt -s -w` to automatically format the code according to Go standards.
|
||||
|
||||
## Testing
|
||||
|
||||
### During Development: Run Relevant Test(s)
|
||||
|
||||
During development, it may be more convenient to check your work-in-progress by running only the tests which you expect to be affected by your changes, as the full test suite can take several minutes to execute. [Go's built-in test tool](https://golang.org/pkg/cmd/go/internal/test/) allows specifying a list of packages to test and the `-run` option to only include test names matching a regular expression.
|
||||
The `go test -short` flag can also be used to skip slower tests.
|
||||
|
||||
Examples (run from the repository root):
|
||||
- `go test -v ./connect` will run all tests in the connect package (see `./connect` folder)
|
||||
- `go test -v -run TestRetryJoin ./command/agent` will run all tests in the agent package (see `./command/agent` folder) with name substring `TestRetryJoin`
|
||||
|
||||
### Before Submitting Changes: Run All Tests
|
||||
When a pull request is opened CI will run all tests and lint to verify the change.
|
||||
|
||||
Before submitting changes, run **all** tests locally by typing `make test`.
|
||||
The test suite may fail if over-parallelized, so if you are seeing stochastic
|
||||
failures try `GOTEST_FLAGS="-p 2 -parallel 2" make test`.
|
||||
## Go Module Dependencies
|
||||
|
||||
Certain testing patterns such as creating a test `Client` in the `api` pkg
|
||||
or a `TestAgent` followed by a session can lead to flaky tests. More generally,
|
||||
any tests with components that rely on readiness of other components are often
|
||||
flaky.
|
||||
If a dependency is added or change, run `go mod tidy` to update `go.mod` and `go.sum`.
|
||||
|
||||
Our makefile has some tooling built in to help validate the stability of single
|
||||
or package-wide tests. By running the `test-flake` goal we spin up a local docker
|
||||
container that mirrors a CPU constrained version of our CI environment. Here we can
|
||||
surface uncommon failures that are typically hard to reproduce by re-running
|
||||
tests multiple times.
|
||||
## Developer Documentation
|
||||
|
||||
The makefile goal accepts the following variables as arguments:
|
||||
Documentation about the Consul code base is under [./docs],
|
||||
and godoc package document can be read at [pkg.go.dev/github.com/hashicorp/consul].
|
||||
|
||||
* **FLAKE_PKG** Target package (required)
|
||||
[./docs]: ../docs/README.md
|
||||
[pkg.go.dev/github.com/hashicorp/consul]: https://pkg.go.dev/github.com/hashicorp/consul
|
||||
|
||||
* **FLAKE_TEST** Target test
|
||||
|
||||
* **FLAKE_CPUS** Amount of CPU resources for container
|
||||
|
||||
* **FLAKE_N** Number of times to run tests
|
||||
|
||||
Examples:
|
||||
`make test-flake FLAKE_PKG=connect/proxy`
|
||||
`make test-flake FLAKE_PKG=connect/proxy FLAKE_TEST=TestUpstreamListener`
|
||||
`make test-flake FLAKE_PKG=connect/proxy FLAKE_TEST=TestUpstreamListener FLAKE_CPUS=0.15 FLAKE_N=30`
|
||||
|
||||
The underlying script dumps the full Consul log output to `test.log` in
|
||||
the directory of the target package. In the example above it would be
|
||||
located at `consul/connect/proxy/test.log`.
|
||||
|
||||
Historically, the defaults for `FLAKE_CPUS` (0.15) and `FLAKE_N` (30) have been
|
||||
sufficient to surface a flaky test. If a test is run in this environment and
|
||||
it does not fail after 30 iterations, it should be sufficiently stable.
|
||||
|
||||
## Vendoring
|
||||
|
||||
Consul currently uses Go Modules for vendoring.
|
||||
|
||||
Please only apply the minimal vendor changes to get your PR to work.
|
||||
Consul does not attempt to track the latest version for each dependency.
|
||||
|
||||
## Checklists
|
||||
### Checklists
|
||||
|
||||
Some common changes that many PRs require such as adding config fields, are
|
||||
documented through checklists.
|
||||
|
||||
Please check in `contributing/` for any `checklist-*.md` files that might help
|
||||
Please check in [docs/](../docs/) for any `checklist-*.md` files that might help
|
||||
with your change.
|
||||
|
|
|
@ -1,19 +1,44 @@
|
|||
---
|
||||
name: Web UI Feedback
|
||||
about: You have usage feedback for either version of the web UI
|
||||
about: You have usage feedback for the browser based UI
|
||||
|
||||
---
|
||||
|
||||
**Old UI or New UI**
|
||||
If you're using Consul 1.1.0 or later, you can use the new UI with the change of
|
||||
an environment variable. Let us know which UI you're using so that we can help.
|
||||
When filing a bug, please include the following headings if possible. Any example text in this template can be deleted.
|
||||
|
||||
**Describe the problem you're having**
|
||||
A clear and concise description of what could be better. If you have screenshots
|
||||
of a bug, place them here.
|
||||
### Overview of the Issue
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
A paragraph or two about the issue you're experiencing / suggestion for
|
||||
improvement.
|
||||
|
||||
### Reproduction Steps
|
||||
|
||||
Steps to reproduce this issue/view the area for improvement, eg:
|
||||
|
||||
1. Visit the UI page at `/ui/services`
|
||||
1. Click .... then click...etc.
|
||||
1. View error/area.
|
||||
|
||||
### Describe the solution you'd like
|
||||
|
||||
If this is an improvement rather than a bug, a clear and concise description
|
||||
of what you want to happen. How have you seen this problem solved in other
|
||||
UIs?
|
||||
|
||||
### Consul Version
|
||||
|
||||
This can be found either in the footer of the UI (Consul versions pre 1.10) or
|
||||
at the top of the help menu that is in the top right side of the UI.
|
||||
|
||||
### Browser and Operating system details
|
||||
|
||||
Browser, Browser Version, OS, and any other information you can provide about the environment that may be relevant.
|
||||
|
||||
### Screengrabs / Web Inspector logs
|
||||
|
||||
If you think it's worthwhile, include appropriate screengrabs showing the
|
||||
error/area for improvement. Try to include the URL bar of the browser so we
|
||||
can see the current URL where the error manifests. Please be careful to
|
||||
obfuscate any sensitive information either in the URL or in the browser page
|
||||
itself.
|
||||
|
||||
**Share inspiration**
|
||||
How have you seen this problem solved well in other UIs?
|
||||
|
|
228
CHANGELOG.md
228
CHANGELOG.md
|
@ -1,5 +1,137 @@
|
|||
## UNRELEASED
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* Fixing SOA record to return proper domain when alt domain in use. [[GH-10431]](https://github.com/hashicorp/consul/pull/10431)
|
||||
|
||||
## 1.11.0-alpha (September 16, 2021)
|
||||
|
||||
SECURITY:
|
||||
|
||||
* rpc: authorize raft requests [CVE-2021-37219](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37219) [[GH-10925](https://github.com/hashicorp/consul/issues/10925)]
|
||||
|
||||
FEATURES:
|
||||
|
||||
* config: add agent config flag for enterprise clients to indicate they wish to join a particular partition [[GH-10572](https://github.com/hashicorp/consul/issues/10572)]
|
||||
* connect: include optional partition prefixes in SPIFFE identifiers [[GH-10507](https://github.com/hashicorp/consul/issues/10507)]
|
||||
* partitions: **(Enterprise only)** Adds admin partitions, a new feature to enhance Consul's multitenancy capabilites.
|
||||
* ui: Add UI support to use Vault as an external source for a service [[GH-10769](https://github.com/hashicorp/consul/issues/10769)]
|
||||
* ui: Adds a copy button to each composite row in tokens list page, if Secret ID returns an actual ID [[GH-10735](https://github.com/hashicorp/consul/issues/10735)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* acl: replication routine to report the last error message. [[GH-10612](https://github.com/hashicorp/consul/issues/10612)]
|
||||
* api: Enable setting query options on agent health and maintenance endpoints. [[GH-10691](https://github.com/hashicorp/consul/issues/10691)]
|
||||
* checks: add failures_before_warning setting for interval checks. [[GH-10969](https://github.com/hashicorp/consul/issues/10969)]
|
||||
* config: **(Enterprise Only)** Allow specifying permission mode for audit logs. [[GH-10732](https://github.com/hashicorp/consul/issues/10732)]
|
||||
* config: add `dns_config.recursor_strategy` flag to control the order which DNS recursors are queried [[GH-10611](https://github.com/hashicorp/consul/issues/10611)]
|
||||
* connect/ca: cease including the common name field in generated x509 non-CA certificates [[GH-10424](https://github.com/hashicorp/consul/issues/10424)]
|
||||
* connect: Support manipulating HTTP headers in the mesh. [[GH-10613](https://github.com/hashicorp/consul/issues/10613)]
|
||||
* connect: update supported envoy versions to 1.18.4, 1.17.4, 1.16.5 [[GH-10961](https://github.com/hashicorp/consul/issues/10961)]
|
||||
* debug: Add a new /v1/agent/metrics/stream API endpoint for streaming of metrics [[GH-10399](https://github.com/hashicorp/consul/issues/10399)]
|
||||
* debug: rename cluster capture target to members, to be more consistent with the terms used by the API. [[GH-10804](https://github.com/hashicorp/consul/issues/10804)]
|
||||
* structs: prohibit config entries from referencing more than one partition at a time [[GH-10478](https://github.com/hashicorp/consul/issues/10478)]
|
||||
* telemetry: add a new `agent.tls.cert.expiry` metric for tracking when the Agent TLS certificate expires. [[GH-10768](https://github.com/hashicorp/consul/issues/10768)]
|
||||
* telemetry: add a new `mesh.active-root-ca.expiry` metric for tracking when the root certificate expires. [[GH-9924](https://github.com/hashicorp/consul/issues/9924)]
|
||||
|
||||
DEPRECATIONS:
|
||||
|
||||
* config: the `ports.grpc` and `addresses.grpc` configuration settings have been renamed to `ports.xds` and `addresses.xds` to better match their function. [[GH-10588](https://github.com/hashicorp/consul/issues/10588)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* api: Fix default values used for optional fields in autopilot configuration update (POST to `/v1/operator/autopilot/configuration`) [[GH-10558](https://github.com/hashicorp/consul/issues/10558)] [[GH-10559](https://github.com/hashicorp/consul/issues/10559)]
|
||||
* api: Revert early out errors from license APIs to allow v1.10+ clients to
|
||||
manage licenses on older servers [[GH-10952](https://github.com/hashicorp/consul/issues/10952)]
|
||||
* check root and intermediate CA expiry before using it to sign a leaf certificate. [[GH-10500](https://github.com/hashicorp/consul/issues/10500)]
|
||||
* connect/ca: ensure edits to the key type/bits for the connect builtin CA will regenerate the roots [[GH-10330](https://github.com/hashicorp/consul/issues/10330)]
|
||||
* connect/ca: require new vault mount points when updating the key type/bits for the vault connect CA provider [[GH-10331](https://github.com/hashicorp/consul/issues/10331)]
|
||||
* dns: return an empty answer when asked for an addr dns with type other then A and AAAA. [[GH-10401](https://github.com/hashicorp/consul/issues/10401)]
|
||||
* tls: consider presented intermediates during server connection tls handshake. [[GH-10964](https://github.com/hashicorp/consul/issues/10964)]
|
||||
* use the MaxQueryTime instead of RPCHoldTimeout for blocking RPC queries
|
||||
[[GH-8978](https://github.com/hashicorp/consul/pull/8978)]. [[GH-10299](https://github.com/hashicorp/consul/issues/10299)]
|
||||
|
||||
## 1.10.3 (September 27, 2021)
|
||||
|
||||
FEATURES:
|
||||
|
||||
* sso/oidc: **(Enterprise only)** Add support for providing acr_values in OIDC auth flow [[GH-11026](https://github.com/hashicorp/consul/issues/11026)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* audit-logging: **(Enterprise Only)** Audit logs will now include select HTTP headers in each logs payload. Those headers are: `Forwarded`, `Via`, `X-Forwarded-For`, `X-Forwarded-Host` and `X-Forwarded-Proto`. [[GH-11107](https://github.com/hashicorp/consul/issues/11107)]
|
||||
* connect: update supported envoy versions to 1.18.4, 1.17.4, 1.16.5 [[GH-10961](https://github.com/hashicorp/consul/issues/10961)]
|
||||
* telemetry: Add new metrics for the count of KV entries in the Consul store. [[GH-11090](https://github.com/hashicorp/consul/issues/11090)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* api: Revert early out errors from license APIs to allow v1.10+ clients to
|
||||
manage licenses on older servers [[GH-10952](https://github.com/hashicorp/consul/issues/10952)]
|
||||
* connect: Fix upstream listener escape hatch for prepared queries [[GH-11109](https://github.com/hashicorp/consul/issues/11109)]
|
||||
* grpc: strip local ACL tokens from RPCs during forwarding if crossing datacenters [[GH-11099](https://github.com/hashicorp/consul/issues/11099)]
|
||||
* tls: consider presented intermediates during server connection tls handshake. [[GH-10964](https://github.com/hashicorp/consul/issues/10964)]
|
||||
* ui: **(Enterprise Only)** Fix saving intentions with namespaced source/destination [[GH-11095](https://github.com/hashicorp/consul/issues/11095)]
|
||||
* ui: Don't show a CRD warning for read-only intentions [[GH-11149](https://github.com/hashicorp/consul/issues/11149)]
|
||||
* ui: Ensure routing-config page blocking queries are cleaned up correctly [[GH-10915](https://github.com/hashicorp/consul/issues/10915)]
|
||||
* ui: Ignore reported permissions for KV area meaning the KV is always enabled
|
||||
for both read/write access if the HTTP API allows. [[GH-10916](https://github.com/hashicorp/consul/issues/10916)]
|
||||
* ui: hide create button for policies/roles/namespace if users token has no write permissions to those areas [[GH-10914](https://github.com/hashicorp/consul/issues/10914)]
|
||||
* xds: ensure the active streams counters are 64 bit aligned on 32 bit systems [[GH-11085](https://github.com/hashicorp/consul/issues/11085)]
|
||||
* xds: fixed a bug where Envoy sidecars could enter a state where they failed to receive xds updates from Consul [[GH-10987](https://github.com/hashicorp/consul/issues/10987)]
|
||||
|
||||
## 1.10.2 (August 27, 2021)
|
||||
|
||||
KNOWN ISSUES:
|
||||
|
||||
* tls: The fix for [CVE-2021-37219](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37219) introduced an issue that could prevent TLS certificate validation when intermediate CA certificates used to sign server certificates are transmitted in the TLS session but are not present in all Consul server's configured CA certificates. This has the effect of preventing Raft RPCs between the affected servers. As a work around until the next patch releases, ensure that all intermediate CA certificates are present in all Consul server configurations prior to using certificates that they have signed.
|
||||
|
||||
SECURITY:
|
||||
|
||||
* rpc: authorize raft requests [CVE-2021-37219](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37219) [[GH-10931](https://github.com/hashicorp/consul/issues/10931)]
|
||||
|
||||
FEATURES:
|
||||
|
||||
* connect: add support for unix domain socket config via API/CLI [[GH-10758](https://github.com/hashicorp/consul/issues/10758)]
|
||||
* ui: Adding support in Topology view for Routing Configurations [[GH-10872](https://github.com/hashicorp/consul/issues/10872)]
|
||||
* ui: Create Routing Configurations route and page [[GH-10835](https://github.com/hashicorp/consul/issues/10835)]
|
||||
* ui: Splitting up the socket mode and socket path in the Upstreams Instance List [[GH-10581](https://github.com/hashicorp/consul/issues/10581)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* areas: **(Enterprise only)** Add 15s timeout to opening streams over pooled connections.
|
||||
* areas: **(Enterprise only)** Apply backpressure to area gossip packet ingestion when more than 512 packets are waiting to be ingested.
|
||||
* areas: **(Enterprise only)** Make implementation of WriteToAddress non-blocking to avoid slowing down memberlist's packetListen routine.
|
||||
* checks: Add Interval and Timeout to API response. [[GH-10717](https://github.com/hashicorp/consul/issues/10717)]
|
||||
* ci: make changelog-checker only validate PR number against main base [[GH-10844](https://github.com/hashicorp/consul/issues/10844)]
|
||||
* ci: upgrade to use Go 1.16.7 [[GH-10856](https://github.com/hashicorp/consul/issues/10856)]
|
||||
* deps: update to gogo/protobuf v1.3.2 [[GH-10813](https://github.com/hashicorp/consul/issues/10813)]
|
||||
* proxycfg: log correlation IDs for the proxy configuration snapshot's blocking queries. [[GH-10689](https://github.com/hashicorp/consul/issues/10689)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* acl: fixes a bug that prevented the default user token from being used to authorize service registration for connect proxies. [[GH-10824](https://github.com/hashicorp/consul/issues/10824)]
|
||||
* ca: fixed a bug when ca provider fail and provider state is stuck in `INITIALIZING` state. [[GH-10630](https://github.com/hashicorp/consul/issues/10630)]
|
||||
* ca: report an error when setting the ca config fail because of an index check. [[GH-10657](https://github.com/hashicorp/consul/issues/10657)]
|
||||
* cli: Ensure the metrics endpoint is accessible when Envoy is configured to use
|
||||
a non-default admin bind address. [[GH-10757](https://github.com/hashicorp/consul/issues/10757)]
|
||||
* cli: Fix a bug which prevented initializing a watch when using a namespaced
|
||||
token. [[GH-10795](https://github.com/hashicorp/consul/issues/10795)]
|
||||
* cli: Fix broken KV import command on Windows. [[GH-10820](https://github.com/hashicorp/consul/issues/10820)]
|
||||
* connect: ensure SAN validation for prepared queries validates against all possible prepared query targets [[GH-10873](https://github.com/hashicorp/consul/issues/10873)]
|
||||
* connect: fix crash that would result from multiple instances of a service resolving service config on a single agent. [[GH-10647](https://github.com/hashicorp/consul/issues/10647)]
|
||||
* connect: proxy upstreams inherit namespace from service if none are defined. [[GH-10688](https://github.com/hashicorp/consul/issues/10688)]
|
||||
* dns: fixes a bug with edns truncation where the response could exceed the size limit in some cases. [[GH-10009](https://github.com/hashicorp/consul/issues/10009)]
|
||||
* grpc: ensure that streaming gRPC requests work over mesh gateway based wan federation [[GH-10838](https://github.com/hashicorp/consul/issues/10838)]
|
||||
* http: log cancelled requests as such at the INFO level, instead of logging them as errored requests. [[GH-10707](https://github.com/hashicorp/consul/issues/10707)]
|
||||
* streaming: set the default wait timeout for health queries [[GH-10707](https://github.com/hashicorp/consul/issues/10707)]
|
||||
* txn: fixes Txn.Apply to properly authorize service registrations. [[GH-10798](https://github.com/hashicorp/consul/issues/10798)]
|
||||
* ui: Disabling policy form fields from users with 'read' permissions [[GH-10902](https://github.com/hashicorp/consul/issues/10902)]
|
||||
* ui: Fix Health Checks in K/V form Lock Sessions Info section [[GH-10767](https://github.com/hashicorp/consul/issues/10767)]
|
||||
* ui: Fix dropdown option duplication in the new intentions form [[GH-10706](https://github.com/hashicorp/consul/issues/10706)]
|
||||
* ui: Hide all metrics for ingress gateway services [[GH-10858](https://github.com/hashicorp/consul/issues/10858)]
|
||||
* ui: Properly encode non-URL safe characters in OIDC responses [[GH-10901](https://github.com/hashicorp/consul/issues/10901)]
|
||||
* ui: fixes a bug with some service failovers not showing the routing tab visualization [[GH-10913](https://github.com/hashicorp/consul/issues/10913)]
|
||||
|
||||
## 1.10.1 (July 15, 2021)
|
||||
|
||||
KNOWN ISSUES:
|
||||
|
@ -149,6 +281,57 @@ NOTES:
|
|||
|
||||
* legal: **(Enterprise only)** Enterprise binary downloads will now include a copy of the EULA and Terms of Evaluation in the zip archive
|
||||
|
||||
## 1.9.10 (September 27, 2021)
|
||||
|
||||
FEATURES:
|
||||
|
||||
* sso/oidc: **(Enterprise only)** Add support for providing acr_values in OIDC auth flow [[GH-11026](https://github.com/hashicorp/consul/issues/11026)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* audit-logging: **(Enterprise Only)** Audit logs will now include select HTTP headers in each logs payload. Those headers are: `Forwarded`, `Via`, `X-Forwarded-For`, `X-Forwarded-Host` and `X-Forwarded-Proto`. [[GH-11107](https://github.com/hashicorp/consul/issues/11107)]
|
||||
* connect: update supported envoy versions to 1.16.5 [[GH-10961](https://github.com/hashicorp/consul/issues/10961)]
|
||||
* telemetry: Add new metrics for the count of KV entries in the Consul store. [[GH-11090](https://github.com/hashicorp/consul/issues/11090)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* tls: consider presented intermediates during server connection tls handshake. [[GH-10964](https://github.com/hashicorp/consul/issues/10964)]
|
||||
* ui: **(Enterprise Only)** Fix saving intentions with namespaced source/destination [[GH-11095](https://github.com/hashicorp/consul/issues/11095)]
|
||||
|
||||
## 1.9.9 (August 27, 2021)
|
||||
|
||||
KNOWN ISSUES:
|
||||
|
||||
* tls: The fix for [CVE-2021-37219](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37219) introduced an issue that could prevent TLS certificate validation when intermediate CA certificates used to sign server certificates are transmitted in the TLS session but are not present in all Consul server's configured CA certificates. This has the effect of preventing Raft RPCs between the affected servers. As a work around until the next patch releases, ensure that all intermediate CA certificates are present in all Consul server configurations prior to using certificates that they have signed.
|
||||
|
||||
SECURITY:
|
||||
|
||||
* rpc: authorize raft requests [CVE-2021-37219](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37219) [[GH-10932](https://github.com/hashicorp/consul/issues/10932)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* areas: **(Enterprise only)** Add 15s timeout to opening streams over pooled connections.
|
||||
* areas: **(Enterprise only)** Apply backpressure to area gossip packet ingestion when more than 512 packets are waiting to be ingested.
|
||||
* areas: **(Enterprise only)** Make implementation of WriteToAddress non-blocking to avoid slowing down memberlist's packetListen routine.
|
||||
* deps: update to gogo/protobuf v1.3.2 [[GH-10813](https://github.com/hashicorp/consul/issues/10813)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* acl: fixes a bug that prevented the default user token from being used to authorize service registration for connect proxies. [[GH-10824](https://github.com/hashicorp/consul/issues/10824)]
|
||||
* ca: fixed a bug when ca provider fail and provider state is stuck in `INITIALIZING` state. [[GH-10630](https://github.com/hashicorp/consul/issues/10630)]
|
||||
* ca: report an error when setting the ca config fail because of an index check. [[GH-10657](https://github.com/hashicorp/consul/issues/10657)]
|
||||
* cli: Ensure the metrics endpoint is accessible when Envoy is configured to use
|
||||
a non-default admin bind address. [[GH-10757](https://github.com/hashicorp/consul/issues/10757)]
|
||||
* cli: Fix a bug which prevented initializing a watch when using a namespaced
|
||||
token. [[GH-10795](https://github.com/hashicorp/consul/issues/10795)]
|
||||
* connect: proxy upstreams inherit namespace from service if none are defined. [[GH-10688](https://github.com/hashicorp/consul/issues/10688)]
|
||||
* dns: fixes a bug with edns truncation where the response could exceed the size limit in some cases. [[GH-10009](https://github.com/hashicorp/consul/issues/10009)]
|
||||
* txn: fixes Txn.Apply to properly authorize service registrations. [[GH-10798](https://github.com/hashicorp/consul/issues/10798)]
|
||||
* ui: Fix dropdown option duplication in the new intentions form [[GH-10706](https://github.com/hashicorp/consul/issues/10706)]
|
||||
* ui: Hide all metrics for ingress gateway services [[GH-10858](https://github.com/hashicorp/consul/issues/10858)]
|
||||
* ui: Properly encode non-URL safe characters in OIDC responses [[GH-10901](https://github.com/hashicorp/consul/issues/10901)]
|
||||
* ui: fixes a bug with some service failovers not showing the routing tab visualization [[GH-10913](https://github.com/hashicorp/consul/issues/10913)]
|
||||
|
||||
## 1.9.8 (July 15, 2021)
|
||||
|
||||
SECURITY:
|
||||
|
@ -456,6 +639,51 @@ BUG FIXES:
|
|||
* telemetry: fixed a bug that caused logs to be flooded with `[WARN] agent.router: Non-server in server-only area` [[GH-8685](https://github.com/hashicorp/consul/issues/8685)]
|
||||
* ui: show correct datacenter for gateways [[GH-8704](https://github.com/hashicorp/consul/issues/8704)]
|
||||
|
||||
## 1.8.16 (September 27, 2021)
|
||||
|
||||
FEATURES:
|
||||
|
||||
* sso/oidc: **(Enterprise only)** Add support for providing acr_values in OIDC auth flow [[GH-11026](https://github.com/hashicorp/consul/issues/11026)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* audit-logging: **(Enterprise Only)** Audit logs will now include select HTTP headers in each logs payload. Those headers are: `Forwarded`, `Via`, `X-Forwarded-For`, `X-Forwarded-Host` and `X-Forwarded-Proto`. [[GH-11107](https://github.com/hashicorp/consul/issues/11107)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* tls: consider presented intermediates during server connection tls handshake. [[GH-10964](https://github.com/hashicorp/consul/issues/10964)]
|
||||
* ui: **(Enterprise Only)** Fixes a visual issue where namespaces would "double up" in the Source/Destination select menu when creating/editing intentions [[GH-11102](https://github.com/hashicorp/consul/issues/11102)]
|
||||
|
||||
## 1.8.15 (August 27, 2021)
|
||||
|
||||
KNOWN ISSUES:
|
||||
|
||||
* tls: The fix for [CVE-2021-37219](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37219) introduced an issue that could prevent TLS certificate validation when intermediate CA certificates used to sign server certificates are transmitted in the TLS session but are not present in all Consul server's configured CA certificates. This has the effect of preventing Raft RPCs between the affected servers. As a work around until the next patch releases, ensure that all intermediate CA certificates are present in all Consul server configurations prior to using certificates that they have signed.
|
||||
|
||||
SECURITY:
|
||||
|
||||
* rpc: authorize raft requests [CVE-2021-37219](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37219) [[GH-10933](https://github.com/hashicorp/consul/issues/10933)]
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* areas: **(Enterprise only)** Add 15s timeout to opening streams over pooled connections.
|
||||
* areas: **(Enterprise only)** Apply backpressure to area gossip packet ingestion when more than 512 packets are waiting to be ingested.
|
||||
* areas: **(Enterprise only)** Make implementation of WriteToAddress non-blocking to avoid slowing down memberlist's packetListen routine.
|
||||
* deps: update to gogo/protobuf v1.3.2 [[GH-10813](https://github.com/hashicorp/consul/issues/10813)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* acl: fixes a bug that prevented the default user token from being used to authorize service registration for connect proxies. [[GH-10824](https://github.com/hashicorp/consul/issues/10824)]
|
||||
* ca: fixed a bug when ca provider fail and provider state is stuck in `INITIALIZING` state. [[GH-10630](https://github.com/hashicorp/consul/issues/10630)]
|
||||
* ca: report an error when setting the ca config fail because of an index check. [[GH-10657](https://github.com/hashicorp/consul/issues/10657)]
|
||||
* cli: Ensure the metrics endpoint is accessible when Envoy is configured to use
|
||||
a non-default admin bind address. [[GH-10757](https://github.com/hashicorp/consul/issues/10757)]
|
||||
* cli: Fix a bug which prevented initializing a watch when using a namespaced
|
||||
token. [[GH-10795](https://github.com/hashicorp/consul/issues/10795)]
|
||||
* connect: proxy upstreams inherit namespace from service if none are defined. [[GH-10688](https://github.com/hashicorp/consul/issues/10688)]
|
||||
* dns: fixes a bug with edns truncation where the response could exceed the size limit in some cases. [[GH-10009](https://github.com/hashicorp/consul/issues/10009)]
|
||||
* txn: fixes Txn.Apply to properly authorize service registrations. [[GH-10798](https://github.com/hashicorp/consul/issues/10798)]
|
||||
|
||||
## 1.8.14 (July 15, 2021)
|
||||
|
||||
SECURITY:
|
||||
|
|
|
@ -1 +1 @@
|
|||
Moved to [contributing/INTERNALS.md].
|
||||
Moved to [docs/README.md](./docs/README.md).
|
||||
|
|
16
README.md
16
README.md
|
@ -29,13 +29,14 @@ Consul provides several key features:
|
|||
dynamic configuration, feature flagging, coordination, leader election and
|
||||
more. The simple HTTP API makes it easy to use anywhere.
|
||||
|
||||
Consul runs on Linux, macOS, FreeBSD, Solaris, and Windows. A commercial
|
||||
version called [Consul Enterprise](https://www.hashicorp.com/products/consul)
|
||||
is also available.
|
||||
Consul runs on Linux, macOS, FreeBSD, Solaris, and Windows and includes an
|
||||
optional [browser based UI](https://demo.consul.io). A commercial version
|
||||
called [Consul Enterprise](https://www.hashicorp.com/products/consul) is also
|
||||
available.
|
||||
|
||||
**Please note**: We take Consul's security and our users' trust very seriously. If you
|
||||
believe you have found a security issue in Consul, please [responsibly disclose](https://www.hashicorp.com/security#vulnerability-reporting) by
|
||||
contacting us at security@hashicorp.com.
|
||||
believe you have found a security issue in Consul, please [responsibly disclose](https://www.hashicorp.com/security#vulnerability-reporting)
|
||||
by contacting us at security@hashicorp.com.
|
||||
|
||||
## Quick Start
|
||||
|
||||
|
@ -54,4 +55,7 @@ https://www.consul.io/docs
|
|||
|
||||
## Contributing
|
||||
|
||||
Thank you for your interest in contributing! Please refer to [CONTRIBUTING.md](https://github.com/hashicorp/consul/blob/main/.github/CONTRIBUTING.md) for guidance.
|
||||
Thank you for your interest in contributing! Please refer to [CONTRIBUTING.md](https://github.com/hashicorp/consul/blob/main/.github/CONTRIBUTING.md)
|
||||
for guidance. For contributions specifically to the browser based UI, please
|
||||
refer to the UI's [README.md](https://github.com/hashicorp/consul/blob/main/ui/packages/consul-ui/README.md)
|
||||
for guidance.
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
package acl
|
||||
|
||||
const DefaultPartitionName = ""
|
||||
|
||||
type EnterpriseConfig struct {
|
||||
// no fields in OSS
|
||||
}
|
||||
|
|
385
acl/acl_test.go
385
acl/acl_test.go
|
@ -26,6 +26,7 @@ func legacyPolicy(policy *Policy) *Policy {
|
|||
PreparedQueryPrefixes: policy.PreparedQueries,
|
||||
Keyring: policy.Keyring,
|
||||
Operator: policy.Operator,
|
||||
Mesh: policy.Mesh,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -108,6 +109,14 @@ func checkAllowNodeWrite(t *testing.T, authz Authorizer, prefix string, entCtx *
|
|||
require.Equal(t, Allow, authz.NodeWrite(prefix, entCtx))
|
||||
}
|
||||
|
||||
func checkAllowMeshRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Allow, authz.MeshRead(entCtx))
|
||||
}
|
||||
|
||||
func checkAllowMeshWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Allow, authz.MeshWrite(entCtx))
|
||||
}
|
||||
|
||||
func checkAllowOperatorRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Allow, authz.OperatorRead(entCtx))
|
||||
}
|
||||
|
@ -220,6 +229,14 @@ func checkDenyNodeWrite(t *testing.T, authz Authorizer, prefix string, entCtx *A
|
|||
require.Equal(t, Deny, authz.NodeWrite(prefix, entCtx))
|
||||
}
|
||||
|
||||
func checkDenyMeshRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Deny, authz.MeshRead(entCtx))
|
||||
}
|
||||
|
||||
func checkDenyMeshWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Deny, authz.MeshWrite(entCtx))
|
||||
}
|
||||
|
||||
func checkDenyOperatorRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Deny, authz.OperatorRead(entCtx))
|
||||
}
|
||||
|
@ -332,6 +349,14 @@ func checkDefaultNodeWrite(t *testing.T, authz Authorizer, prefix string, entCtx
|
|||
require.Equal(t, Default, authz.NodeWrite(prefix, entCtx))
|
||||
}
|
||||
|
||||
func checkDefaultMeshRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Default, authz.MeshRead(entCtx))
|
||||
}
|
||||
|
||||
func checkDefaultMeshWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Default, authz.MeshWrite(entCtx))
|
||||
}
|
||||
|
||||
func checkDefaultOperatorRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Default, authz.OperatorRead(entCtx))
|
||||
}
|
||||
|
@ -407,6 +432,8 @@ func TestACL(t *testing.T) {
|
|||
{name: "DenyNodeRead", check: checkDenyNodeRead},
|
||||
{name: "DenyNodeReadAll", check: checkDenyNodeReadAll},
|
||||
{name: "DenyNodeWrite", check: checkDenyNodeWrite},
|
||||
{name: "DenyMeshRead", check: checkDenyMeshRead},
|
||||
{name: "DenyMeshWrite", check: checkDenyMeshWrite},
|
||||
{name: "DenyOperatorRead", check: checkDenyOperatorRead},
|
||||
{name: "DenyOperatorWrite", check: checkDenyOperatorWrite},
|
||||
{name: "DenyPreparedQueryRead", check: checkDenyPreparedQueryRead},
|
||||
|
@ -439,6 +466,8 @@ func TestACL(t *testing.T) {
|
|||
{name: "AllowNodeRead", check: checkAllowNodeRead},
|
||||
{name: "AllowNodeReadAll", check: checkAllowNodeReadAll},
|
||||
{name: "AllowNodeWrite", check: checkAllowNodeWrite},
|
||||
{name: "AllowMeshRead", check: checkAllowMeshRead},
|
||||
{name: "AllowMeshWrite", check: checkAllowMeshWrite},
|
||||
{name: "AllowOperatorRead", check: checkAllowOperatorRead},
|
||||
{name: "AllowOperatorWrite", check: checkAllowOperatorWrite},
|
||||
{name: "AllowPreparedQueryRead", check: checkAllowPreparedQueryRead},
|
||||
|
@ -471,6 +500,8 @@ func TestACL(t *testing.T) {
|
|||
{name: "AllowNodeRead", check: checkAllowNodeRead},
|
||||
{name: "AllowNodeReadAll", check: checkAllowNodeReadAll},
|
||||
{name: "AllowNodeWrite", check: checkAllowNodeWrite},
|
||||
{name: "AllowMeshRead", check: checkAllowMeshRead},
|
||||
{name: "AllowMeshWrite", check: checkAllowMeshWrite},
|
||||
{name: "AllowOperatorRead", check: checkAllowOperatorRead},
|
||||
{name: "AllowOperatorWrite", check: checkAllowOperatorWrite},
|
||||
{name: "AllowPreparedQueryRead", check: checkAllowPreparedQueryRead},
|
||||
|
@ -861,6 +892,319 @@ func TestACL(t *testing.T) {
|
|||
{name: "WriteDenied", check: checkDenyKeyringWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MeshDefaultAllowPolicyDeny",
|
||||
defaultPolicy: AllowAll(),
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Mesh: PolicyDeny,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadDenied", check: checkDenyMeshRead},
|
||||
{name: "WriteDenied", check: checkDenyMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MeshDefaultAllowPolicyRead",
|
||||
defaultPolicy: AllowAll(),
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Mesh: PolicyRead,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadAllowed", check: checkAllowMeshRead},
|
||||
{name: "WriteDenied", check: checkDenyMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MeshDefaultAllowPolicyWrite",
|
||||
defaultPolicy: AllowAll(),
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Mesh: PolicyWrite,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadAllowed", check: checkAllowMeshRead},
|
||||
{name: "WriteAllowed", check: checkAllowMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MeshDefaultAllowPolicyNone",
|
||||
defaultPolicy: AllowAll(),
|
||||
policyStack: []*Policy{
|
||||
{},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadAllowed", check: checkAllowMeshRead},
|
||||
{name: "WriteAllowed", check: checkAllowMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MeshDefaultDenyPolicyDeny",
|
||||
defaultPolicy: DenyAll(),
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Mesh: PolicyDeny,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadDenied", check: checkDenyMeshRead},
|
||||
{name: "WriteDenied", check: checkDenyMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MeshDefaultDenyPolicyRead",
|
||||
defaultPolicy: DenyAll(),
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Mesh: PolicyRead,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadAllowed", check: checkAllowMeshRead},
|
||||
{name: "WriteDenied", check: checkDenyMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MeshDefaultDenyPolicyWrite",
|
||||
defaultPolicy: DenyAll(),
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Mesh: PolicyWrite,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadAllowed", check: checkAllowMeshRead},
|
||||
{name: "WriteAllowed", check: checkAllowMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MeshDefaultDenyPolicyNone",
|
||||
defaultPolicy: DenyAll(),
|
||||
policyStack: []*Policy{
|
||||
{},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadDenied", check: checkDenyMeshRead},
|
||||
{name: "WriteDenied", check: checkDenyMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
// o:deny, m:deny = deny
|
||||
name: "MeshOperatorDenyPolicyDeny",
|
||||
defaultPolicy: nil, // test both
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Operator: PolicyDeny,
|
||||
Mesh: PolicyDeny,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadDenied", check: checkDenyMeshRead},
|
||||
{name: "WriteDenied", check: checkDenyMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
// o:read, m:deny = deny
|
||||
name: "MeshOperatorReadPolicyDeny",
|
||||
defaultPolicy: nil, // test both
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Operator: PolicyRead,
|
||||
Mesh: PolicyDeny,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadDenied", check: checkDenyMeshRead},
|
||||
{name: "WriteDenied", check: checkDenyMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
// o:write, m:deny = deny
|
||||
name: "MeshOperatorWritePolicyDeny",
|
||||
defaultPolicy: nil, // test both
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Operator: PolicyWrite,
|
||||
Mesh: PolicyDeny,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadDenied", check: checkDenyMeshRead},
|
||||
{name: "WriteDenied", check: checkDenyMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
// o:deny, m:read = read
|
||||
name: "MeshOperatorDenyPolicyRead",
|
||||
defaultPolicy: nil, // test both
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Operator: PolicyDeny,
|
||||
Mesh: PolicyRead,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadAllowed", check: checkAllowMeshRead},
|
||||
{name: "WriteDenied", check: checkDenyMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
// o:read, m:read = read
|
||||
name: "MeshOperatorReadPolicyRead",
|
||||
defaultPolicy: nil, // test both
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Operator: PolicyRead,
|
||||
Mesh: PolicyRead,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadAllowed", check: checkAllowMeshRead},
|
||||
{name: "WriteDenied", check: checkDenyMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
// o:write, m:read = read
|
||||
name: "MeshOperatorWritePolicyRead",
|
||||
defaultPolicy: nil, // test both
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Operator: PolicyWrite,
|
||||
Mesh: PolicyRead,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadAllowed", check: checkAllowMeshRead},
|
||||
{name: "WriteDenied", check: checkDenyMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
// o:deny, m:write = write
|
||||
name: "MeshOperatorDenyPolicyWrite",
|
||||
defaultPolicy: nil, // test both
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Operator: PolicyDeny,
|
||||
Mesh: PolicyWrite,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadAllowed", check: checkAllowMeshRead},
|
||||
{name: "WriteAllowed", check: checkAllowMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
// o:read, m:write = write
|
||||
name: "MeshOperatorReadPolicyWrite",
|
||||
defaultPolicy: nil, // test both
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Operator: PolicyRead,
|
||||
Mesh: PolicyWrite,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadAllowed", check: checkAllowMeshRead},
|
||||
{name: "WriteAllowed", check: checkAllowMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
// o:write, m:write = write
|
||||
name: "MeshOperatorWritePolicyWrite",
|
||||
defaultPolicy: nil, // test both
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Operator: PolicyWrite,
|
||||
Mesh: PolicyWrite,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadAllowed", check: checkAllowMeshRead},
|
||||
{name: "WriteAllowed", check: checkAllowMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
// o:deny, m:<none> = deny
|
||||
name: "MeshOperatorDenyPolicyNone",
|
||||
defaultPolicy: nil, // test both
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Operator: PolicyDeny,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadDenied", check: checkDenyMeshRead},
|
||||
{name: "WriteDenied", check: checkDenyMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
// o:read, m:<none> = read
|
||||
name: "MeshOperatorReadPolicyNone",
|
||||
defaultPolicy: nil, // test both
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Operator: PolicyRead,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadAllowed", check: checkAllowMeshRead},
|
||||
{name: "WriteDenied", check: checkDenyMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
// o:write, m:<none> = write
|
||||
name: "MeshOperatorWritePolicyNone",
|
||||
defaultPolicy: nil, // test both
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Operator: PolicyWrite,
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "ReadAllowed", check: checkAllowMeshRead},
|
||||
{name: "WriteAllowed", check: checkAllowMeshWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "OperatorDefaultAllowPolicyDeny",
|
||||
defaultPolicy: AllowAll(),
|
||||
|
@ -2002,23 +2346,36 @@ func TestACL(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
run := func(t *testing.T, tcase aclTest, defaultPolicy Authorizer) {
|
||||
acl := defaultPolicy
|
||||
for _, policy := range tcase.policyStack {
|
||||
newACL, err := NewPolicyAuthorizerWithDefaults(acl, []*Policy{policy}, nil)
|
||||
require.NoError(t, err)
|
||||
acl = newACL
|
||||
}
|
||||
|
||||
for _, check := range tcase.checks {
|
||||
checkName := check.name
|
||||
if check.prefix != "" {
|
||||
checkName = fmt.Sprintf("%s.Prefix(%s)", checkName, check.prefix)
|
||||
}
|
||||
t.Run(checkName, func(t *testing.T) {
|
||||
check.check(t, acl, check.prefix, nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, tcase := range tests {
|
||||
t.Run(tcase.name, func(t *testing.T) {
|
||||
acl := tcase.defaultPolicy
|
||||
for _, policy := range tcase.policyStack {
|
||||
newACL, err := NewPolicyAuthorizerWithDefaults(acl, []*Policy{policy}, nil)
|
||||
require.NoError(t, err)
|
||||
acl = newACL
|
||||
}
|
||||
|
||||
for _, check := range tcase.checks {
|
||||
checkName := check.name
|
||||
if check.prefix != "" {
|
||||
checkName = fmt.Sprintf("%s.Prefix(%s)", checkName, check.prefix)
|
||||
}
|
||||
t.Run(checkName, func(t *testing.T) {
|
||||
check.check(t, acl, check.prefix, nil)
|
||||
if tcase.defaultPolicy == nil {
|
||||
t.Run("default-allow", func(t *testing.T) {
|
||||
run(t, tcase, AllowAll())
|
||||
})
|
||||
t.Run("default-deny", func(t *testing.T) {
|
||||
run(t, tcase, DenyAll())
|
||||
})
|
||||
} else {
|
||||
run(t, tcase, tcase.defaultPolicy)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -45,6 +45,7 @@ const (
|
|||
ResourceKeyring Resource = "keyring"
|
||||
ResourceNode Resource = "node"
|
||||
ResourceOperator Resource = "operator"
|
||||
ResourceMesh Resource = "mesh"
|
||||
ResourceQuery Resource = "query"
|
||||
ResourceService Resource = "service"
|
||||
ResourceSession Resource = "session"
|
||||
|
@ -104,6 +105,14 @@ type Authorizer interface {
|
|||
// KeyringWrite determines if the keyring can be manipulated
|
||||
KeyringWrite(*AuthorizerContext) EnforcementDecision
|
||||
|
||||
// MeshRead determines if the read-only Consul mesh functions
|
||||
// can be used.
|
||||
MeshRead(*AuthorizerContext) EnforcementDecision
|
||||
|
||||
// MeshWrite determines if the state-changing Consul mesh
|
||||
// functions can be used.
|
||||
MeshWrite(*AuthorizerContext) EnforcementDecision
|
||||
|
||||
// NodeRead checks for permission to read (discover) a given node.
|
||||
NodeRead(string, *AuthorizerContext) EnforcementDecision
|
||||
|
||||
|
@ -204,6 +213,13 @@ func Enforce(authz Authorizer, rsc Resource, segment string, access string, ctx
|
|||
case "write":
|
||||
return authz.KeyringWrite(ctx), nil
|
||||
}
|
||||
case ResourceMesh:
|
||||
switch lowerAccess {
|
||||
case "read":
|
||||
return authz.MeshRead(ctx), nil
|
||||
case "write":
|
||||
return authz.MeshWrite(ctx), nil
|
||||
}
|
||||
case ResourceNode:
|
||||
switch lowerAccess {
|
||||
case "read":
|
||||
|
|
|
@ -129,6 +129,16 @@ func (m *mockAuthorizer) NodeWrite(segment string, ctx *AuthorizerContext) Enfor
|
|||
return ret.Get(0).(EnforcementDecision)
|
||||
}
|
||||
|
||||
func (m *mockAuthorizer) MeshRead(ctx *AuthorizerContext) EnforcementDecision {
|
||||
ret := m.Called(ctx)
|
||||
return ret.Get(0).(EnforcementDecision)
|
||||
}
|
||||
|
||||
func (m *mockAuthorizer) MeshWrite(ctx *AuthorizerContext) EnforcementDecision {
|
||||
ret := m.Called(ctx)
|
||||
return ret.Get(0).(EnforcementDecision)
|
||||
}
|
||||
|
||||
// OperatorRead determines if the read-only Consul operator functions
|
||||
// can be used. ret := m.Called(segment, ctx)
|
||||
func (m *mockAuthorizer) OperatorRead(ctx *AuthorizerContext) EnforcementDecision {
|
||||
|
|
|
@ -145,6 +145,22 @@ func (c *ChainedAuthorizer) KeyringWrite(entCtx *AuthorizerContext) EnforcementD
|
|||
})
|
||||
}
|
||||
|
||||
// MeshRead determines if the read-only Consul mesh functions
|
||||
// can be used.
|
||||
func (c *ChainedAuthorizer) MeshRead(entCtx *AuthorizerContext) EnforcementDecision {
|
||||
return c.executeChain(func(authz Authorizer) EnforcementDecision {
|
||||
return authz.MeshRead(entCtx)
|
||||
})
|
||||
}
|
||||
|
||||
// MeshWrite determines if the state-changing Consul mesh
|
||||
// functions can be used.
|
||||
func (c *ChainedAuthorizer) MeshWrite(entCtx *AuthorizerContext) EnforcementDecision {
|
||||
return c.executeChain(func(authz Authorizer) EnforcementDecision {
|
||||
return authz.MeshWrite(entCtx)
|
||||
})
|
||||
}
|
||||
|
||||
// NodeRead checks for permission to read (discover) a given node.
|
||||
func (c *ChainedAuthorizer) NodeRead(node string, entCtx *AuthorizerContext) EnforcementDecision {
|
||||
return c.executeChain(func(authz Authorizer) EnforcementDecision {
|
||||
|
|
|
@ -62,6 +62,12 @@ func (authz testAuthorizer) NodeReadAll(*AuthorizerContext) EnforcementDecision
|
|||
func (authz testAuthorizer) NodeWrite(string, *AuthorizerContext) EnforcementDecision {
|
||||
return EnforcementDecision(authz)
|
||||
}
|
||||
func (authz testAuthorizer) MeshRead(*AuthorizerContext) EnforcementDecision {
|
||||
return EnforcementDecision(authz)
|
||||
}
|
||||
func (authz testAuthorizer) MeshWrite(*AuthorizerContext) EnforcementDecision {
|
||||
return EnforcementDecision(authz)
|
||||
}
|
||||
func (authz testAuthorizer) OperatorRead(*AuthorizerContext) EnforcementDecision {
|
||||
return EnforcementDecision(authz)
|
||||
}
|
||||
|
@ -113,6 +119,8 @@ func TestChainedAuthorizer(t *testing.T) {
|
|||
checkDenyKeyWritePrefix(t, authz, "foo", nil)
|
||||
checkDenyNodeRead(t, authz, "foo", nil)
|
||||
checkDenyNodeWrite(t, authz, "foo", nil)
|
||||
checkDenyMeshRead(t, authz, "foo", nil)
|
||||
checkDenyMeshWrite(t, authz, "foo", nil)
|
||||
checkDenyOperatorRead(t, authz, "foo", nil)
|
||||
checkDenyOperatorWrite(t, authz, "foo", nil)
|
||||
checkDenyPreparedQueryRead(t, authz, "foo", nil)
|
||||
|
@ -143,6 +151,8 @@ func TestChainedAuthorizer(t *testing.T) {
|
|||
checkDenyKeyWritePrefix(t, authz, "foo", nil)
|
||||
checkDenyNodeRead(t, authz, "foo", nil)
|
||||
checkDenyNodeWrite(t, authz, "foo", nil)
|
||||
checkDenyMeshRead(t, authz, "foo", nil)
|
||||
checkDenyMeshWrite(t, authz, "foo", nil)
|
||||
checkDenyOperatorRead(t, authz, "foo", nil)
|
||||
checkDenyOperatorWrite(t, authz, "foo", nil)
|
||||
checkDenyPreparedQueryRead(t, authz, "foo", nil)
|
||||
|
@ -173,6 +183,8 @@ func TestChainedAuthorizer(t *testing.T) {
|
|||
checkAllowKeyWritePrefix(t, authz, "foo", nil)
|
||||
checkAllowNodeRead(t, authz, "foo", nil)
|
||||
checkAllowNodeWrite(t, authz, "foo", nil)
|
||||
checkAllowMeshRead(t, authz, "foo", nil)
|
||||
checkAllowMeshWrite(t, authz, "foo", nil)
|
||||
checkAllowOperatorRead(t, authz, "foo", nil)
|
||||
checkAllowOperatorWrite(t, authz, "foo", nil)
|
||||
checkAllowPreparedQueryRead(t, authz, "foo", nil)
|
||||
|
@ -203,6 +215,8 @@ func TestChainedAuthorizer(t *testing.T) {
|
|||
checkDenyKeyWritePrefix(t, authz, "foo", nil)
|
||||
checkDenyNodeRead(t, authz, "foo", nil)
|
||||
checkDenyNodeWrite(t, authz, "foo", nil)
|
||||
checkDenyMeshRead(t, authz, "foo", nil)
|
||||
checkDenyMeshWrite(t, authz, "foo", nil)
|
||||
checkDenyOperatorRead(t, authz, "foo", nil)
|
||||
checkDenyOperatorWrite(t, authz, "foo", nil)
|
||||
checkDenyPreparedQueryRead(t, authz, "foo", nil)
|
||||
|
@ -231,6 +245,8 @@ func TestChainedAuthorizer(t *testing.T) {
|
|||
checkAllowKeyWritePrefix(t, authz, "foo", nil)
|
||||
checkAllowNodeRead(t, authz, "foo", nil)
|
||||
checkAllowNodeWrite(t, authz, "foo", nil)
|
||||
checkAllowMeshRead(t, authz, "foo", nil)
|
||||
checkAllowMeshWrite(t, authz, "foo", nil)
|
||||
checkAllowOperatorRead(t, authz, "foo", nil)
|
||||
checkAllowOperatorWrite(t, authz, "foo", nil)
|
||||
checkAllowPreparedQueryRead(t, authz, "foo", nil)
|
||||
|
|
|
@ -84,6 +84,7 @@ type PolicyRules struct {
|
|||
PreparedQueryPrefixes []*PreparedQueryRule `hcl:"query_prefix,expand"`
|
||||
Keyring string `hcl:"keyring"`
|
||||
Operator string `hcl:"operator"`
|
||||
Mesh string `hcl:"mesh"`
|
||||
}
|
||||
|
||||
// Policy is used to represent the policy specified by an ACL configuration.
|
||||
|
@ -285,6 +286,11 @@ func (pr *PolicyRules) Validate(conf *Config) error {
|
|||
return fmt.Errorf("Invalid operator policy: %#v", pr.Operator)
|
||||
}
|
||||
|
||||
// Validate the mesh policy - this one is allowed to be empty
|
||||
if pr.Mesh != "" && !isPolicyValid(pr.Mesh, false) {
|
||||
return fmt.Errorf("Invalid mesh policy: %#v", pr.Mesh)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -318,6 +324,7 @@ func parseLegacy(rules string, conf *Config) (*Policy, error) {
|
|||
PreparedQueries []*PreparedQueryRule `hcl:"query,expand"`
|
||||
Keyring string `hcl:"keyring"`
|
||||
Operator string `hcl:"operator"`
|
||||
// NOTE: mesh resources not supported here
|
||||
}
|
||||
|
||||
lp := &LegacyPolicy{}
|
||||
|
@ -446,52 +453,6 @@ func NewPolicyFromSource(id string, revision uint64, rules string, syntax Syntax
|
|||
return policy, err
|
||||
}
|
||||
|
||||
func (policy *Policy) ConvertToLegacy() *Policy {
|
||||
converted := &Policy{
|
||||
ID: policy.ID,
|
||||
Revision: policy.Revision,
|
||||
PolicyRules: PolicyRules{
|
||||
ACL: policy.ACL,
|
||||
Keyring: policy.Keyring,
|
||||
Operator: policy.Operator,
|
||||
},
|
||||
}
|
||||
|
||||
converted.Agents = append(converted.Agents, policy.Agents...)
|
||||
converted.Agents = append(converted.Agents, policy.AgentPrefixes...)
|
||||
converted.Keys = append(converted.Keys, policy.Keys...)
|
||||
converted.Keys = append(converted.Keys, policy.KeyPrefixes...)
|
||||
converted.Nodes = append(converted.Nodes, policy.Nodes...)
|
||||
converted.Nodes = append(converted.Nodes, policy.NodePrefixes...)
|
||||
converted.Services = append(converted.Services, policy.Services...)
|
||||
converted.Services = append(converted.Services, policy.ServicePrefixes...)
|
||||
converted.Sessions = append(converted.Sessions, policy.Sessions...)
|
||||
converted.Sessions = append(converted.Sessions, policy.SessionPrefixes...)
|
||||
converted.Events = append(converted.Events, policy.Events...)
|
||||
converted.Events = append(converted.Events, policy.EventPrefixes...)
|
||||
converted.PreparedQueries = append(converted.PreparedQueries, policy.PreparedQueries...)
|
||||
converted.PreparedQueries = append(converted.PreparedQueries, policy.PreparedQueryPrefixes...)
|
||||
return converted
|
||||
}
|
||||
|
||||
func (policy *Policy) ConvertFromLegacy() *Policy {
|
||||
return &Policy{
|
||||
ID: policy.ID,
|
||||
Revision: policy.Revision,
|
||||
PolicyRules: PolicyRules{
|
||||
AgentPrefixes: policy.Agents,
|
||||
KeyPrefixes: policy.Keys,
|
||||
NodePrefixes: policy.Nodes,
|
||||
ServicePrefixes: policy.Services,
|
||||
SessionPrefixes: policy.Sessions,
|
||||
EventPrefixes: policy.Events,
|
||||
PreparedQueryPrefixes: policy.PreparedQueries,
|
||||
Keyring: policy.Keyring,
|
||||
Operator: policy.Operator,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// takesPrecedenceOver returns true when permission a
|
||||
// should take precedence over permission b
|
||||
func takesPrecedenceOver(a, b string) bool {
|
||||
|
|
|
@ -40,6 +40,9 @@ type policyAuthorizer struct {
|
|||
// operatorRule contains the operator policies.
|
||||
operatorRule *policyAuthorizerRule
|
||||
|
||||
// meshRule contains the mesh policies.
|
||||
meshRule *policyAuthorizerRule
|
||||
|
||||
// embedded enterprise policy authorizer
|
||||
enterprisePolicyAuthorizer
|
||||
}
|
||||
|
@ -310,6 +313,15 @@ func (p *policyAuthorizer) loadRules(policy *PolicyRules) error {
|
|||
p.operatorRule = &policyAuthorizerRule{access: access}
|
||||
}
|
||||
|
||||
// Load the mesh policy
|
||||
if policy.Mesh != "" {
|
||||
access, err := AccessLevelFromString(policy.Mesh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.meshRule = &policyAuthorizerRule{access: access}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -524,9 +536,6 @@ func (p *policyAuthorizer) IntentionRead(prefix string, _ *AuthorizerContext) En
|
|||
// IntentionWrite checks if writing (creating, updating, or deleting) of an
|
||||
// intention is allowed.
|
||||
func (p *policyAuthorizer) IntentionWrite(prefix string, _ *AuthorizerContext) EnforcementDecision {
|
||||
if prefix == "" {
|
||||
return Deny
|
||||
}
|
||||
if prefix == "*" {
|
||||
return p.allAllowed(p.intentionRules, AccessWrite)
|
||||
}
|
||||
|
@ -664,6 +673,25 @@ func (p *policyAuthorizer) KeyringWrite(*AuthorizerContext) EnforcementDecision
|
|||
return Default
|
||||
}
|
||||
|
||||
// MeshRead determines if the read-only mesh functions are allowed.
|
||||
func (p *policyAuthorizer) MeshRead(ctx *AuthorizerContext) EnforcementDecision {
|
||||
if p.meshRule != nil {
|
||||
return enforce(p.meshRule.access, AccessRead)
|
||||
}
|
||||
// default to OperatorRead access
|
||||
return p.OperatorRead(ctx)
|
||||
}
|
||||
|
||||
// MeshWrite determines if the state-changing mesh functions are
|
||||
// allowed.
|
||||
func (p *policyAuthorizer) MeshWrite(ctx *AuthorizerContext) EnforcementDecision {
|
||||
if p.meshRule != nil {
|
||||
return enforce(p.meshRule.access, AccessWrite)
|
||||
}
|
||||
// default to OperatorWrite access
|
||||
return p.OperatorWrite(ctx)
|
||||
}
|
||||
|
||||
// OperatorRead determines if the read-only operator functions are allowed.
|
||||
func (p *policyAuthorizer) OperatorRead(*AuthorizerContext) EnforcementDecision {
|
||||
if p.operatorRule != nil {
|
||||
|
|
|
@ -48,6 +48,8 @@ func TestPolicyAuthorizer(t *testing.T) {
|
|||
{name: "DefaultKeyWritePrefix", prefix: "foo", check: checkDefaultKeyWritePrefix},
|
||||
{name: "DefaultNodeRead", prefix: "foo", check: checkDefaultNodeRead},
|
||||
{name: "DefaultNodeWrite", prefix: "foo", check: checkDefaultNodeWrite},
|
||||
{name: "DefaultMeshRead", prefix: "foo", check: checkDefaultMeshRead},
|
||||
{name: "DefaultMeshWrite", prefix: "foo", check: checkDefaultMeshWrite},
|
||||
{name: "DefaultOperatorRead", prefix: "foo", check: checkDefaultOperatorRead},
|
||||
{name: "DefaultOperatorWrite", prefix: "foo", check: checkDefaultOperatorWrite},
|
||||
{name: "DefaultPreparedQueryRead", prefix: "foo", check: checkDefaultPreparedQueryRead},
|
||||
|
|
|
@ -17,6 +17,7 @@ type policyRulesMergeContext struct {
|
|||
keyringRule string
|
||||
keyRules map[string]*KeyRule
|
||||
keyPrefixRules map[string]*KeyRule
|
||||
meshRule string
|
||||
nodeRules map[string]*NodeRule
|
||||
nodePrefixRules map[string]*NodeRule
|
||||
operatorRule string
|
||||
|
@ -37,6 +38,7 @@ func (p *policyRulesMergeContext) init() {
|
|||
p.keyringRule = ""
|
||||
p.keyRules = make(map[string]*KeyRule)
|
||||
p.keyPrefixRules = make(map[string]*KeyRule)
|
||||
p.meshRule = ""
|
||||
p.nodeRules = make(map[string]*NodeRule)
|
||||
p.nodePrefixRules = make(map[string]*NodeRule)
|
||||
p.operatorRule = ""
|
||||
|
@ -123,6 +125,10 @@ func (p *policyRulesMergeContext) merge(policy *PolicyRules) {
|
|||
}
|
||||
}
|
||||
|
||||
if takesPrecedenceOver(policy.Mesh, p.meshRule) {
|
||||
p.meshRule = policy.Mesh
|
||||
}
|
||||
|
||||
for _, np := range policy.Nodes {
|
||||
update := true
|
||||
if permission, found := p.nodeRules[np.Name]; found {
|
||||
|
@ -230,10 +236,11 @@ func (p *policyRulesMergeContext) merge(policy *PolicyRules) {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *policyRulesMergeContext) update(merged *PolicyRules) {
|
||||
func (p *policyRulesMergeContext) fill(merged *PolicyRules) {
|
||||
merged.ACL = p.aclRule
|
||||
merged.Keyring = p.keyringRule
|
||||
merged.Operator = p.operatorRule
|
||||
merged.Mesh = p.meshRule
|
||||
|
||||
// All the for loop appends are ugly but Go doesn't have a way to get
|
||||
// a slice of all values within a map so this is necessary
|
||||
|
@ -347,8 +354,8 @@ func (m *PolicyMerger) Policy() *Policy {
|
|||
ID: fmt.Sprintf("%x", m.idHasher.Sum(nil)),
|
||||
}
|
||||
|
||||
m.policyRulesMergeContext.update(&merged.PolicyRules)
|
||||
m.enterprisePolicyRulesMergeContext.update(&merged.EnterprisePolicyRules)
|
||||
m.policyRulesMergeContext.fill(&merged.PolicyRules)
|
||||
m.enterprisePolicyRulesMergeContext.fill(&merged.EnterprisePolicyRules)
|
||||
|
||||
return merged
|
||||
}
|
||||
|
|
|
@ -12,6 +12,6 @@ func (ctx *enterprisePolicyRulesMergeContext) merge(*EnterprisePolicyRules) {
|
|||
// do nothing
|
||||
}
|
||||
|
||||
func (ctx *enterprisePolicyRulesMergeContext) update(*EnterprisePolicyRules) {
|
||||
func (ctx *enterprisePolicyRulesMergeContext) fill(*EnterprisePolicyRules) {
|
||||
// do nothing
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -156,6 +156,20 @@ func (s *staticAuthorizer) NodeWrite(string, *AuthorizerContext) EnforcementDeci
|
|||
return Deny
|
||||
}
|
||||
|
||||
func (s *staticAuthorizer) MeshRead(*AuthorizerContext) EnforcementDecision {
|
||||
if s.defaultAllow {
|
||||
return Allow
|
||||
}
|
||||
return Deny
|
||||
}
|
||||
|
||||
func (s *staticAuthorizer) MeshWrite(*AuthorizerContext) EnforcementDecision {
|
||||
if s.defaultAllow {
|
||||
return Allow
|
||||
}
|
||||
return Deny
|
||||
}
|
||||
|
||||
func (s *staticAuthorizer) OperatorRead(*AuthorizerContext) EnforcementDecision {
|
||||
if s.defaultAllow {
|
||||
return Allow
|
||||
|
@ -241,7 +255,11 @@ func ManageAll() Authorizer {
|
|||
return manageAll
|
||||
}
|
||||
|
||||
// RootAuthorizer returns a possible Authorizer if the ID matches a root policy
|
||||
// RootAuthorizer returns a possible Authorizer if the ID matches a root policy.
|
||||
//
|
||||
// TODO: rename this function. While the returned authorizer is used as a root
|
||||
// authorizer in some cases, in others it is not. A more appropriate name might
|
||||
// be NewAuthorizerFromPolicyName.
|
||||
func RootAuthorizer(id string) Authorizer {
|
||||
switch id {
|
||||
case "allow":
|
||||
|
|
15
agent/acl.go
15
agent/acl.go
|
@ -62,6 +62,7 @@ func (a *Agent) vetServiceRegisterWithAuthorizer(authz acl.Authorizer, service *
|
|||
if service.Kind == structs.ServiceKindConnectProxy {
|
||||
service.FillAuthzContext(&authzContext)
|
||||
if authz.ServiceWrite(service.Proxy.DestinationServiceName, &authzContext) != acl.Allow {
|
||||
// TODO(partitions) fix this to include namespace and partition
|
||||
return acl.PermissionDenied("Missing service:write on %s", service.Proxy.DestinationServiceName)
|
||||
}
|
||||
}
|
||||
|
@ -98,7 +99,7 @@ func (a *Agent) vetCheckRegisterWithAuthorizer(authz acl.Authorizer, check *stru
|
|||
}
|
||||
} else {
|
||||
if authz.NodeWrite(a.config.NodeName, &authzContext) != acl.Allow {
|
||||
return acl.PermissionDenied("Missing node:write on %s", a.config.NodeName)
|
||||
return acl.PermissionDenied("Missing node:write on %s", structs.NodeNameString(a.config.NodeName, a.agentEnterpriseMeta()))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -110,7 +111,7 @@ func (a *Agent) vetCheckRegisterWithAuthorizer(authz acl.Authorizer, check *stru
|
|||
}
|
||||
} else {
|
||||
if authz.NodeWrite(a.config.NodeName, &authzContext) != acl.Allow {
|
||||
return acl.PermissionDenied("Missing node:write on %s", a.config.NodeName)
|
||||
return acl.PermissionDenied("Missing node:write on %s", structs.NodeNameString(a.config.NodeName, a.agentEnterpriseMeta()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -126,11 +127,11 @@ func (a *Agent) vetCheckUpdateWithAuthorizer(authz acl.Authorizer, checkID struc
|
|||
if existing := a.State.Check(checkID); existing != nil {
|
||||
if len(existing.ServiceName) > 0 {
|
||||
if authz.ServiceWrite(existing.ServiceName, &authzContext) != acl.Allow {
|
||||
return acl.PermissionDenied("Missing service:write on %s", existing.ServiceName)
|
||||
return acl.PermissionDenied("Missing service:write on %s", structs.ServiceIDString(existing.ServiceName, &existing.EnterpriseMeta))
|
||||
}
|
||||
} else {
|
||||
if authz.NodeWrite(a.config.NodeName, &authzContext) != acl.Allow {
|
||||
return acl.PermissionDenied("Missing node:write on %s", a.config.NodeName)
|
||||
return acl.PermissionDenied("Missing node:write on %s", structs.NodeNameString(a.config.NodeName, a.agentEnterpriseMeta()))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -149,11 +150,11 @@ func (a *Agent) filterMembers(token string, members *[]serf.Member) error {
|
|||
}
|
||||
|
||||
var authzContext acl.AuthorizerContext
|
||||
a.agentEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||
// Filter out members based on the node policy.
|
||||
m := *members
|
||||
for i := 0; i < len(m); i++ {
|
||||
node := m[i].Name
|
||||
serfMemberFillAuthzContext(&m[i], &authzContext)
|
||||
if authz.NodeRead(node, &authzContext) == acl.Allow {
|
||||
continue
|
||||
}
|
||||
|
@ -184,14 +185,12 @@ func (a *Agent) filterChecksWithAuthorizer(authz acl.Authorizer, checks *map[str
|
|||
var authzContext acl.AuthorizerContext
|
||||
// Filter out checks based on the node or service policy.
|
||||
for id, check := range *checks {
|
||||
check.FillAuthzContext(&authzContext)
|
||||
if len(check.ServiceName) > 0 {
|
||||
check.FillAuthzContext(&authzContext)
|
||||
if authz.ServiceRead(check.ServiceName, &authzContext) == acl.Allow {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// TODO(partition): should this be a Default or Node flavored entmeta?
|
||||
check.NodeEnterpriseMetaForPartition().FillAuthzContext(&authzContext)
|
||||
if authz.NodeRead(a.config.NodeName, &authzContext) == acl.Allow {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -1963,6 +1963,14 @@ func TestACL_Authorize(t *testing.T) {
|
|||
Resource: "operator",
|
||||
Access: "write",
|
||||
},
|
||||
{
|
||||
Resource: "mesh",
|
||||
Access: "read",
|
||||
},
|
||||
{
|
||||
Resource: "mesh",
|
||||
Access: "write",
|
||||
},
|
||||
{
|
||||
Resource: "query",
|
||||
Segment: "foo",
|
||||
|
@ -2097,6 +2105,14 @@ func TestACL_Authorize(t *testing.T) {
|
|||
Resource: "operator",
|
||||
Access: "write",
|
||||
},
|
||||
{
|
||||
Resource: "mesh",
|
||||
Access: "read",
|
||||
},
|
||||
{
|
||||
Resource: "mesh",
|
||||
Access: "write",
|
||||
},
|
||||
{
|
||||
Resource: "query",
|
||||
Segment: "foo",
|
||||
|
@ -2147,6 +2163,8 @@ func TestACL_Authorize(t *testing.T) {
|
|||
true, // node:write
|
||||
true, // operator:read
|
||||
true, // operator:write
|
||||
true, // mesh:read
|
||||
true, // mesh:write
|
||||
false, // query:read
|
||||
false, // query:write
|
||||
true, // service:read
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
// +build !consulent
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/serf/serf"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
)
|
||||
|
||||
func serfMemberFillAuthzContext(m *serf.Member, ctx *acl.AuthorizerContext) {
|
||||
// no-op
|
||||
}
|
|
@ -76,10 +76,6 @@ func NewTestACLAgent(t *testing.T, name string, hcl string, resolveAuthz authzRe
|
|||
return a
|
||||
}
|
||||
|
||||
func (a *TestACLAgent) UseLegacyACLs() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *TestACLAgent) ResolveToken(secretID string) (acl.Authorizer, error) {
|
||||
if a.resolveAuthzFn == nil {
|
||||
return nil, fmt.Errorf("ResolveToken call is unexpected - no authz resolver callback set")
|
||||
|
|
|
@ -143,7 +143,6 @@ type delegate interface {
|
|||
ResolveTokenAndDefaultMeta(token string, entMeta *structs.EnterpriseMeta, authzContext *acl.AuthorizerContext) (acl.Authorizer, error)
|
||||
|
||||
RPC(method string, args interface{}, reply interface{}) error
|
||||
UseLegacyACLs() bool
|
||||
SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer, replyFn structs.SnapshotReplyFn) error
|
||||
Shutdown() error
|
||||
Stats() map[string]map[string]string
|
||||
|
@ -621,7 +620,8 @@ func (a *Agent) Start(ctx context.Context) error {
|
|||
a.apiServers.Start(srv)
|
||||
}
|
||||
|
||||
if err := a.listenAndServeXDS(); err != nil {
|
||||
// Start gRPC server.
|
||||
if err := a.listenAndServeGRPC(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -669,8 +669,8 @@ func (a *Agent) Failed() <-chan struct{} {
|
|||
return a.apiServers.failed
|
||||
}
|
||||
|
||||
func (a *Agent) listenAndServeXDS() error {
|
||||
if len(a.config.XDSAddrs) < 1 {
|
||||
func (a *Agent) listenAndServeGRPC() error {
|
||||
if len(a.config.GRPCAddrs) < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -690,9 +690,10 @@ func (a *Agent) listenAndServeXDS() error {
|
|||
if a.config.HTTPSPort <= 0 {
|
||||
tlsConfig = nil
|
||||
}
|
||||
var err error
|
||||
a.grpcServer = xds.NewGRPCServer(xdsServer, tlsConfig)
|
||||
|
||||
ln, err := a.startListeners(a.config.XDSAddrs)
|
||||
ln, err := a.startListeners(a.config.GRPCAddrs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -2459,6 +2460,11 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
|||
maxOutputSize = chkType.OutputMaxSize
|
||||
}
|
||||
|
||||
// FailuresBeforeWarning has to default to same value as FailuresBeforeCritical
|
||||
if chkType.FailuresBeforeWarning == 0 {
|
||||
chkType.FailuresBeforeWarning = chkType.FailuresBeforeCritical
|
||||
}
|
||||
|
||||
// Get the address of the proxy for this service if it exists
|
||||
// Need its config to know whether we should reroute checks to it
|
||||
var proxy *structs.NodeService
|
||||
|
@ -2473,7 +2479,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
|||
}
|
||||
}
|
||||
|
||||
statusHandler := checks.NewStatusHandler(a.State, a.logger, chkType.SuccessBeforePassing, chkType.FailuresBeforeCritical)
|
||||
statusHandler := checks.NewStatusHandler(a.State, a.logger, chkType.SuccessBeforePassing, chkType.FailuresBeforeWarning, chkType.FailuresBeforeCritical)
|
||||
sid := check.CompoundServiceID()
|
||||
|
||||
cid := check.CompoundCheckID()
|
||||
|
@ -2859,44 +2865,6 @@ func (a *Agent) AdvertiseAddrLAN() string {
|
|||
return a.config.AdvertiseAddrLAN.String()
|
||||
}
|
||||
|
||||
// resolveProxyCheckAddress returns the best address to use for a TCP check of
|
||||
// the proxy's public listener. It expects the input to already have default
|
||||
// values populated by applyProxyConfigDefaults. It may return an empty string
|
||||
// indicating that the TCP check should not be created at all.
|
||||
//
|
||||
// By default this uses the proxy's bind address which in turn defaults to the
|
||||
// agent's bind address. If the proxy bind address ends up being 0.0.0.0 we have
|
||||
// to assume the agent can dial it over loopback which is usually true.
|
||||
//
|
||||
// In some topologies such as proxy being in a different container, the IP the
|
||||
// agent used to dial proxy over a local bridge might not be the same as the
|
||||
// container's public routable IP address so we allow a manual override of the
|
||||
// check address in config "tcp_check_address" too.
|
||||
//
|
||||
// Finally the TCP check can be disabled by another manual override
|
||||
// "disable_tcp_check" in cases where the agent will never be able to dial the
|
||||
// proxy directly for some reason.
|
||||
func (a *Agent) resolveProxyCheckAddress(proxyCfg map[string]interface{}) string {
|
||||
// If user disabled the check return empty string
|
||||
if disable, ok := proxyCfg["disable_tcp_check"].(bool); ok && disable {
|
||||
return ""
|
||||
}
|
||||
|
||||
// If user specified a custom one, use that
|
||||
if chkAddr, ok := proxyCfg["tcp_check_address"].(string); ok && chkAddr != "" {
|
||||
return chkAddr
|
||||
}
|
||||
|
||||
// If we have a bind address and its diallable, use that
|
||||
if bindAddr, ok := proxyCfg["bind_address"].(string); ok &&
|
||||
bindAddr != "" && bindAddr != "0.0.0.0" && bindAddr != "[::]" {
|
||||
return bindAddr
|
||||
}
|
||||
|
||||
// Default to localhost
|
||||
return "127.0.0.1"
|
||||
}
|
||||
|
||||
func (a *Agent) cancelCheckMonitors(checkID structs.CheckID) {
|
||||
// Stop any monitors
|
||||
delete(a.checkReapAfter, checkID)
|
||||
|
|
|
@ -73,7 +73,7 @@ func (s *HTTPHandlers) AgentSelf(resp http.ResponseWriter, req *http.Request) (i
|
|||
SupportedProxies: map[string][]string{
|
||||
"envoy": proxysupport.EnvoyVersions,
|
||||
},
|
||||
Port: s.agent.config.XDSPort,
|
||||
Port: s.agent.config.GRPCPort,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -96,14 +96,9 @@ func (s *HTTPHandlers) AgentSelf(resp http.ResponseWriter, req *http.Request) (i
|
|||
Server: s.agent.config.ServerMode,
|
||||
Version: s.agent.config.Version,
|
||||
}
|
||||
debugConfig := s.agent.config.Sanitized()
|
||||
// Backwards compat for the envoy command. Never use DebugConfig for
|
||||
// programmatic access to data.
|
||||
debugConfig["GRPCPort"] = s.agent.config.XDSPort
|
||||
|
||||
return Self{
|
||||
Config: config,
|
||||
DebugConfig: debugConfig,
|
||||
DebugConfig: s.agent.config.Sanitized(),
|
||||
Coord: cs[s.agent.config.SegmentName],
|
||||
Member: s.agent.LocalMember(),
|
||||
Stats: s.agent.Stats(),
|
||||
|
@ -584,6 +579,7 @@ func (s *HTTPHandlers) AgentForceLeave(resp http.ResponseWriter, req *http.Reque
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(partitions): should this be possible in a partition?
|
||||
if authz.OperatorWrite(nil) != acl.Allow {
|
||||
return nil, acl.ErrPermissionDenied
|
||||
}
|
||||
|
@ -1076,6 +1072,11 @@ func (s *HTTPHandlers) AgentRegisterService(resp http.ResponseWriter, req *http.
|
|||
Reason: fmt.Sprintf("Invalid SidecarService: %s", err)}
|
||||
}
|
||||
if sidecar != nil {
|
||||
if err := sidecar.Validate(); err != nil {
|
||||
resp.WriteHeader(http.StatusBadRequest)
|
||||
fmt.Fprint(resp, err.Error())
|
||||
return nil, nil
|
||||
}
|
||||
// Make sure we are allowed to register the sidecar using the token
|
||||
// specified (might be specific to sidecar or the same one as the overall
|
||||
// request).
|
||||
|
@ -1247,7 +1248,10 @@ func (s *HTTPHandlers) AgentNodeMaintenance(resp http.ResponseWriter, req *http.
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if authz.NodeWrite(s.agent.config.NodeName, nil) != acl.Allow {
|
||||
|
||||
var authzContext acl.AuthorizerContext
|
||||
s.agent.agentEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||
if authz.NodeWrite(s.agent.config.NodeName, &authzContext) != acl.Allow {
|
||||
return nil, acl.ErrPermissionDenied
|
||||
}
|
||||
|
||||
|
@ -1533,6 +1537,7 @@ func (s *HTTPHandlers) AgentHost(resp http.ResponseWriter, req *http.Request) (i
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(partitions): should this be possible in a partition?
|
||||
if authz.OperatorRead(nil) != acl.Allow {
|
||||
return nil, acl.ErrPermissionDenied
|
||||
}
|
||||
|
|
|
@ -371,6 +371,9 @@ func (f fakeGRPCConnPool) ClientConnLeader() (*grpc.ClientConn, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (f fakeGRPCConnPool) SetGatewayResolver(_ func(string) string) {
|
||||
}
|
||||
|
||||
func TestAgent_ReconnectConfigWanDisabled(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
@ -4524,6 +4527,9 @@ LOOP:
|
|||
}
|
||||
|
||||
// This is a mirror of a similar test in agent/consul/server_test.go
|
||||
//
|
||||
// TODO(rb): implement something similar to this as a full containerized test suite with proper
|
||||
// isolation so requests can't "cheat" and bypass the mesh gateways
|
||||
func TestAgent_JoinWAN_viaMeshGateway(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
@ -4771,6 +4777,9 @@ func TestAgent_JoinWAN_viaMeshGateway(t *testing.T) {
|
|||
})
|
||||
|
||||
// Ensure we can do some trivial RPC in all directions.
|
||||
//
|
||||
// NOTE: we explicitly make streaming and non-streaming assertions here to
|
||||
// verify both rpc and grpc codepaths.
|
||||
agents := map[string]*TestAgent{"dc1": a1, "dc2": a2, "dc3": a3}
|
||||
names := map[string]string{"dc1": "bob", "dc2": "betty", "dc3": "bonnie"}
|
||||
for _, srcDC := range []string{"dc1", "dc2", "dc3"} {
|
||||
|
@ -4780,20 +4789,39 @@ func TestAgent_JoinWAN_viaMeshGateway(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
t.Run(srcDC+" to "+dstDC, func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/v1/catalog/nodes?dc="+dstDC, nil)
|
||||
require.NoError(t, err)
|
||||
t.Run("normal-rpc", func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/v1/catalog/nodes?dc="+dstDC, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.CatalogNodes(resp, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, obj)
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.CatalogNodes(resp, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, obj)
|
||||
|
||||
nodes, ok := obj.(structs.Nodes)
|
||||
require.True(t, ok)
|
||||
require.Len(t, nodes, 1)
|
||||
node := nodes[0]
|
||||
require.Equal(t, dstDC, node.Datacenter)
|
||||
require.Equal(t, names[dstDC], node.Node)
|
||||
nodes, ok := obj.(structs.Nodes)
|
||||
require.True(t, ok)
|
||||
require.Len(t, nodes, 1)
|
||||
node := nodes[0]
|
||||
require.Equal(t, dstDC, node.Datacenter)
|
||||
require.Equal(t, names[dstDC], node.Node)
|
||||
})
|
||||
t.Run("streaming-grpc", func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/v1/health/service/consul?cached&dc="+dstDC, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
resp := httptest.NewRecorder()
|
||||
obj, err := a.srv.HealthServiceNodes(resp, req)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, obj)
|
||||
|
||||
csns, ok := obj.(structs.CheckServiceNodes)
|
||||
require.True(t, ok)
|
||||
require.Len(t, csns, 1)
|
||||
|
||||
csn := csns[0]
|
||||
require.Equal(t, dstDC, csn.Node.Datacenter)
|
||||
require.Equal(t, names[dstDC], csn.Node.Node)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -216,7 +216,7 @@ func (ac *AutoConfig) generateCSR() (csr string, key string, err error) {
|
|||
Host: unknownTrustDomain,
|
||||
Datacenter: ac.config.Datacenter,
|
||||
Agent: ac.config.NodeName,
|
||||
// TODO(rb)(partitions): populate the partition field from the agent config
|
||||
Partition: ac.config.PartitionOrDefault(),
|
||||
}
|
||||
|
||||
caConfig, err := ac.config.ConnectCAConfiguration()
|
||||
|
|
|
@ -16,7 +16,7 @@ func TestCompiledDiscoveryChain(t *testing.T) {
|
|||
typ := &CompiledDiscoveryChain{RPC: rpc}
|
||||
|
||||
// just do the default chain
|
||||
chain := discoverychain.TestCompileConfigEntries(t, "web", "default", "dc1", "trustdomain.consul", "dc1", nil)
|
||||
chain := discoverychain.TestCompileConfigEntries(t, "web", "default", "default", "dc1", "trustdomain.consul", "dc1", nil)
|
||||
|
||||
// Expect the proper RPC call. This also sets the expected value
|
||||
// since that is return-by-pointer in the arguments.
|
||||
|
|
|
@ -235,7 +235,7 @@ func (s *HTTPHandlers) CatalogNodes(resp http.ResponseWriter, req *http.Request)
|
|||
// Setup the request
|
||||
args := structs.DCSpecificRequest{}
|
||||
s.parseSource(req, &args.Source)
|
||||
if err := parseEntMetaPartition(req, &args.EnterpriseMeta); err != nil {
|
||||
if err := s.parseEntMetaPartition(req, &args.EnterpriseMeta); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args.NodeMetaFilters = s.parseMetaFilter(req)
|
||||
|
|
|
@ -108,7 +108,7 @@ func (c *CheckAlias) runLocal(stopCh chan struct{}) {
|
|||
}
|
||||
|
||||
updateStatus := func() {
|
||||
checks := c.Notify.Checks(c.WildcardEnterpriseMetaForPartition())
|
||||
checks := c.Notify.Checks(c.WithWildcardNamespace())
|
||||
checksList := make([]*structs.HealthCheck, 0, len(checks))
|
||||
for _, chk := range checks {
|
||||
checksList = append(checksList, chk)
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
http2 "golang.org/x/net/http2"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
|
@ -16,6 +15,8 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
http2 "golang.org/x/net/http2"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
|
@ -907,17 +908,19 @@ type StatusHandler struct {
|
|||
logger hclog.Logger
|
||||
successBeforePassing int
|
||||
successCounter int
|
||||
failuresBeforeWarning int
|
||||
failuresBeforeCritical int
|
||||
failuresCounter int
|
||||
}
|
||||
|
||||
// NewStatusHandler set counters values to threshold in order to immediatly update status after first check.
|
||||
func NewStatusHandler(inner CheckNotifier, logger hclog.Logger, successBeforePassing, failuresBeforeCritical int) *StatusHandler {
|
||||
func NewStatusHandler(inner CheckNotifier, logger hclog.Logger, successBeforePassing, failuresBeforeWarning, failuresBeforeCritical int) *StatusHandler {
|
||||
return &StatusHandler{
|
||||
logger: logger,
|
||||
inner: inner,
|
||||
successBeforePassing: successBeforePassing,
|
||||
successCounter: successBeforePassing,
|
||||
failuresBeforeWarning: failuresBeforeWarning,
|
||||
failuresBeforeCritical: failuresBeforeCritical,
|
||||
failuresCounter: failuresBeforeCritical,
|
||||
}
|
||||
|
@ -950,10 +953,17 @@ func (s *StatusHandler) updateCheck(checkID structs.CheckID, status, output stri
|
|||
s.inner.UpdateCheck(checkID, status, output)
|
||||
return
|
||||
}
|
||||
s.logger.Warn("Check failed but has not reached failure threshold",
|
||||
// Defaults to same value as failuresBeforeCritical if not set.
|
||||
if s.failuresCounter >= s.failuresBeforeWarning {
|
||||
s.logger.Warn("Check is now warning", "check", checkID.String())
|
||||
s.inner.UpdateCheck(checkID, api.HealthWarning, output)
|
||||
return
|
||||
}
|
||||
s.logger.Warn("Check failed but has not reached warning/failure threshold",
|
||||
"check", checkID.String(),
|
||||
"status", status,
|
||||
"failure_count", s.failuresCounter,
|
||||
"warning_threshold", s.failuresBeforeWarning,
|
||||
"failure_threshold", s.failuresBeforeCritical,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ func TestCheckMonitor_Script(t *testing.T) {
|
|||
t.Run(tt.status, func(t *testing.T) {
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
check := &CheckMonitor{
|
||||
|
@ -94,7 +94,7 @@ func TestCheckMonitor_Args(t *testing.T) {
|
|||
t.Run(tt.status, func(t *testing.T) {
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
check := &CheckMonitor{
|
||||
|
@ -128,7 +128,7 @@ func TestCheckMonitor_Timeout(t *testing.T) {
|
|||
// t.Parallel() // timing test. no parallel
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
check := &CheckMonitor{
|
||||
|
@ -163,7 +163,7 @@ func TestCheckMonitor_RandomStagger(t *testing.T) {
|
|||
// t.Parallel() // timing test. no parallel
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
|
@ -195,7 +195,7 @@ func TestCheckMonitor_LimitOutput(t *testing.T) {
|
|||
t.Parallel()
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
check := &CheckMonitor{
|
||||
|
@ -354,7 +354,7 @@ func TestCheckHTTP(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
|
@ -397,7 +397,7 @@ func TestCheckHTTP_Proxied(t *testing.T) {
|
|||
notif := mock.NewNotify()
|
||||
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
check := &CheckHTTP{
|
||||
|
@ -433,7 +433,7 @@ func TestCheckHTTP_NotProxied(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
check := &CheckHTTP{
|
||||
|
@ -558,7 +558,7 @@ func TestCheckMaxOutputSize(t *testing.T) {
|
|||
Interval: 2 * time.Millisecond,
|
||||
Logger: logger,
|
||||
OutputMaxSize: maxOutputSize,
|
||||
StatusHandler: NewStatusHandler(notif, logger, 0, 0),
|
||||
StatusHandler: NewStatusHandler(notif, logger, 0, 0, 0),
|
||||
}
|
||||
|
||||
check.Start()
|
||||
|
@ -586,7 +586,7 @@ func TestCheckHTTPTimeout(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
|
||||
cid := structs.NewCheckID("bar", nil)
|
||||
|
||||
|
@ -659,7 +659,7 @@ func TestCheckHTTPBody(t *testing.T) {
|
|||
Timeout: timeout,
|
||||
Interval: 2 * time.Millisecond,
|
||||
Logger: logger,
|
||||
StatusHandler: NewStatusHandler(notif, logger, 0, 0),
|
||||
StatusHandler: NewStatusHandler(notif, logger, 0, 0, 0),
|
||||
}
|
||||
check.Start()
|
||||
defer check.Stop()
|
||||
|
@ -690,7 +690,7 @@ func TestCheckHTTP_disablesKeepAlives(t *testing.T) {
|
|||
HTTP: "http://foo.bar/baz",
|
||||
Interval: 10 * time.Second,
|
||||
Logger: logger,
|
||||
StatusHandler: NewStatusHandler(notif, logger, 0, 0),
|
||||
StatusHandler: NewStatusHandler(notif, logger, 0, 0, 0),
|
||||
}
|
||||
|
||||
check.Start()
|
||||
|
@ -725,7 +725,7 @@ func TestCheckHTTP_TLS_SkipVerify(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
|
||||
cid := structs.NewCheckID("skipverify_true", nil)
|
||||
check := &CheckHTTP{
|
||||
|
@ -767,7 +767,7 @@ func TestCheckHTTP_TLS_BadVerify(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("skipverify_false", nil)
|
||||
|
||||
check := &CheckHTTP{
|
||||
|
@ -819,7 +819,7 @@ func mockTCPServer(network string) net.Listener {
|
|||
func expectTCPStatus(t *testing.T, tcp string, status string) {
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
check := &CheckTCP{
|
||||
|
@ -846,13 +846,12 @@ func TestStatusHandlerUpdateStatusAfterConsecutiveChecksThresholdIsReached(t *te
|
|||
cid := structs.NewCheckID("foo", nil)
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 2, 3)
|
||||
statusHandler := NewStatusHandler(notif, logger, 2, 2, 3)
|
||||
|
||||
// Set the initial status to passing after a single success
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
// Status should become critical after 3 failed checks only
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
// Status should still be passing after 1 failed check only
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
@ -860,10 +859,19 @@ func TestStatusHandlerUpdateStatusAfterConsecutiveChecksThresholdIsReached(t *te
|
|||
require.Equal(r, api.HealthPassing, notif.State(cid))
|
||||
})
|
||||
|
||||
// Status should become warning after 2 failed checks only
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 2, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthWarning, notif.State(cid))
|
||||
})
|
||||
|
||||
// Status should become critical after 4 failed checks only
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 3, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthCritical, notif.State(cid))
|
||||
})
|
||||
|
||||
|
@ -871,14 +879,14 @@ func TestStatusHandlerUpdateStatusAfterConsecutiveChecksThresholdIsReached(t *te
|
|||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 2, notif.Updates(cid))
|
||||
require.Equal(r, 3, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthCritical, notif.State(cid))
|
||||
})
|
||||
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 3, notif.Updates(cid))
|
||||
require.Equal(r, 4, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthPassing, notif.State(cid))
|
||||
})
|
||||
}
|
||||
|
@ -888,17 +896,18 @@ func TestStatusHandlerResetCountersOnNonIdenticalsConsecutiveChecks(t *testing.T
|
|||
cid := structs.NewCheckID("foo", nil)
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 2, 3)
|
||||
statusHandler := NewStatusHandler(notif, logger, 2, 2, 3)
|
||||
|
||||
// Set the initial status to passing after a single success
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
// Status should remain passing after FAIL PASS FAIL FAIL sequence
|
||||
// Status should remain passing after FAIL PASS FAIL PASS FAIL sequence
|
||||
// Although we have 3 FAILS, they are not consecutive
|
||||
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
|
@ -906,11 +915,19 @@ func TestStatusHandlerResetCountersOnNonIdenticalsConsecutiveChecks(t *testing.T
|
|||
require.Equal(r, api.HealthPassing, notif.State(cid))
|
||||
})
|
||||
|
||||
// Critical after a 3rd consecutive FAIL
|
||||
// Warning after a 2rd consecutive FAIL
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 2, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthWarning, notif.State(cid))
|
||||
})
|
||||
|
||||
// Critical after a 3rd consecutive FAIL
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 3, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthCritical, notif.State(cid))
|
||||
})
|
||||
|
||||
|
@ -920,19 +937,137 @@ func TestStatusHandlerResetCountersOnNonIdenticalsConsecutiveChecks(t *testing.T
|
|||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 2, notif.Updates(cid))
|
||||
require.Equal(r, 3, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthCritical, notif.State(cid))
|
||||
})
|
||||
|
||||
// Passing after a 2nd consecutive PASS
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 4, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthPassing, notif.State(cid))
|
||||
})
|
||||
}
|
||||
|
||||
func TestStatusHandlerWarningAndCriticalThresholdsTheSameSetsCritical(t *testing.T) {
|
||||
t.Parallel()
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 2, 3, 3)
|
||||
|
||||
// Set the initial status to passing after a single success
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
// Status should remain passing after FAIL FAIL sequence
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 1, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthPassing, notif.State(cid))
|
||||
})
|
||||
|
||||
// Critical and not Warning after a 3rd consecutive FAIL
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 2, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthCritical, notif.State(cid))
|
||||
})
|
||||
|
||||
// Passing after consecutive PASS PASS sequence
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 3, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthPassing, notif.State(cid))
|
||||
})
|
||||
}
|
||||
|
||||
func TestStatusHandlerMaintainWarningStatusWhenCheckIsFlapping(t *testing.T) {
|
||||
t.Parallel()
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 3, 3, 5)
|
||||
|
||||
// Set the initial status to passing after a single success.
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
|
||||
// Status should remain passing after a FAIL FAIL sequence.
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 1, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthPassing, notif.State(cid))
|
||||
})
|
||||
|
||||
// Warning after a 3rd consecutive FAIL.
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 2, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthWarning, notif.State(cid))
|
||||
})
|
||||
|
||||
// Status should remain passing after PASS FAIL FAIL FAIL PASS FAIL FAIL FAIL PASS sequence.
|
||||
// Although we have 6 FAILS, they are not consecutive.
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
// The status gets updated due to failuresCounter being reset
|
||||
// but the status itself remains as Warning.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 3, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthWarning, notif.State(cid))
|
||||
})
|
||||
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
// Status doesn'tn change, but the state update is triggered.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 4, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthWarning, notif.State(cid))
|
||||
})
|
||||
|
||||
// Status should change only after 5 consecutive FAIL updates.
|
||||
statusHandler.updateCheck(cid, api.HealthPassing, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
// The status doesn't change, but a status update is triggered.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 5, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthWarning, notif.State(cid))
|
||||
})
|
||||
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
// The status doesn't change, but a status update is triggered.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 6, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthWarning, notif.State(cid))
|
||||
})
|
||||
|
||||
statusHandler.updateCheck(cid, api.HealthCritical, "bar")
|
||||
|
||||
// The FailuresBeforeCritical threshold is finally breached.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
require.Equal(r, 7, notif.Updates(cid))
|
||||
require.Equal(r, api.HealthCritical, notif.State(cid))
|
||||
})
|
||||
}
|
||||
|
||||
func TestCheckTCPCritical(t *testing.T) {
|
||||
t.Parallel()
|
||||
var (
|
||||
|
@ -992,7 +1127,7 @@ func TestCheckH2PING(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
tlsCfg := &api.TLSConfig{
|
||||
InsecureSkipVerify: true,
|
||||
|
@ -1044,7 +1179,7 @@ func TestCheckH2PING_TLS_BadVerify(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
tlsCfg := &api.TLSConfig{}
|
||||
tlsClientCfg, err := api.SetupTLSConfig(tlsCfg)
|
||||
|
@ -1085,7 +1220,7 @@ func TestCheckH2PINGInvalidListener(t *testing.T) {
|
|||
|
||||
notif := mock.NewNotify()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
tlsCfg := &api.TLSConfig{
|
||||
InsecureSkipVerify: true,
|
||||
|
@ -1388,7 +1523,7 @@ func TestCheck_Docker(t *testing.T) {
|
|||
|
||||
notif, upd := mock.NewNotifyChan()
|
||||
logger := testutil.Logger(t)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
id := structs.NewCheckID("chk", nil)
|
||||
|
||||
check := &CheckDocker{
|
||||
|
|
|
@ -113,7 +113,7 @@ func TestGRPC_Proxied(t *testing.T) {
|
|||
Output: ioutil.Discard,
|
||||
})
|
||||
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
check := &CheckGRPC{
|
||||
|
@ -147,7 +147,7 @@ func TestGRPC_NotProxied(t *testing.T) {
|
|||
Output: ioutil.Discard,
|
||||
})
|
||||
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0)
|
||||
statusHandler := NewStatusHandler(notif, logger, 0, 0, 0)
|
||||
cid := structs.NewCheckID("foo", nil)
|
||||
|
||||
check := &CheckGRPC{
|
||||
|
|
|
@ -346,10 +346,12 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
for _, err := range validateEnterpriseConfigKeys(&c2) {
|
||||
b.warn("%s", err)
|
||||
}
|
||||
b.Warnings = append(b.Warnings, md.Warnings...)
|
||||
|
||||
// if we have a single 'check' or 'service' we need to add them to the
|
||||
// list of checks and services first since we cannot merge them
|
||||
// generically and later values would clobber earlier ones.
|
||||
// TODO: move to applyDeprecatedConfig
|
||||
if c2.Check != nil {
|
||||
c2.Checks = append(c2.Checks, *c2.Check)
|
||||
c2.Check = nil
|
||||
|
@ -426,10 +428,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
httpPort := b.portVal("ports.http", c.Ports.HTTP)
|
||||
httpsPort := b.portVal("ports.https", c.Ports.HTTPS)
|
||||
serverPort := b.portVal("ports.server", c.Ports.Server)
|
||||
if c.Ports.XDS == nil {
|
||||
c.Ports.XDS = c.Ports.GRPC
|
||||
}
|
||||
xdsPort := b.portVal("ports.xds", c.Ports.XDS)
|
||||
grpcPort := b.portVal("ports.grpc", c.Ports.GRPC)
|
||||
serfPortLAN := b.portVal("ports.serf_lan", c.Ports.SerfLAN)
|
||||
serfPortWAN := b.portVal("ports.serf_wan", c.Ports.SerfWAN)
|
||||
proxyMinPort := b.portVal("ports.proxy_min_port", c.Ports.ProxyMinPort)
|
||||
|
@ -556,10 +555,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
dnsAddrs := b.makeAddrs(b.expandAddrs("addresses.dns", c.Addresses.DNS), clientAddrs, dnsPort)
|
||||
httpAddrs := b.makeAddrs(b.expandAddrs("addresses.http", c.Addresses.HTTP), clientAddrs, httpPort)
|
||||
httpsAddrs := b.makeAddrs(b.expandAddrs("addresses.https", c.Addresses.HTTPS), clientAddrs, httpsPort)
|
||||
if c.Addresses.XDS == nil {
|
||||
c.Addresses.XDS = c.Addresses.GRPC
|
||||
}
|
||||
xdsAddrs := b.makeAddrs(b.expandAddrs("addresses.xds", c.Addresses.XDS), clientAddrs, xdsPort)
|
||||
grpcAddrs := b.makeAddrs(b.expandAddrs("addresses.grpc", c.Addresses.GRPC), clientAddrs, grpcPort)
|
||||
|
||||
for _, a := range dnsAddrs {
|
||||
if x, ok := a.(*net.TCPAddr); ok {
|
||||
|
@ -733,16 +729,6 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
|
||||
aclsEnabled := false
|
||||
primaryDatacenter := strings.ToLower(stringVal(c.PrimaryDatacenter))
|
||||
if c.ACLDatacenter != nil {
|
||||
b.warn("The 'acl_datacenter' field is deprecated. Use the 'primary_datacenter' field instead.")
|
||||
|
||||
if primaryDatacenter == "" {
|
||||
primaryDatacenter = strings.ToLower(stringVal(c.ACLDatacenter))
|
||||
}
|
||||
|
||||
// when the acl_datacenter config is used it implicitly enables acls
|
||||
aclsEnabled = true
|
||||
}
|
||||
|
||||
if c.ACL.Enabled != nil {
|
||||
aclsEnabled = boolVal(c.ACL.Enabled)
|
||||
|
@ -753,13 +739,6 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
primaryDatacenter = datacenter
|
||||
}
|
||||
|
||||
enableTokenReplication := false
|
||||
if c.ACLReplicationToken != nil {
|
||||
enableTokenReplication = true
|
||||
}
|
||||
|
||||
boolValWithDefault(c.ACL.TokenReplication, boolValWithDefault(c.EnableACLReplication, enableTokenReplication))
|
||||
|
||||
enableRemoteScriptChecks := boolVal(c.EnableScriptChecks)
|
||||
enableLocalScriptChecks := boolValWithDefault(c.EnableLocalScriptChecks, enableRemoteScriptChecks)
|
||||
|
||||
|
@ -787,7 +766,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
return RuntimeConfig{}, fmt.Errorf("config_entries.bootstrap[%d]: %s", i, err)
|
||||
}
|
||||
if err := entry.Validate(); err != nil {
|
||||
return RuntimeConfig{}, fmt.Errorf("config_entries.bootstrap[%d]: %s", i, err)
|
||||
return RuntimeConfig{}, fmt.Errorf("config_entries.bootstrap[%d]: %w", i, err)
|
||||
}
|
||||
configEntries = append(configEntries, entry)
|
||||
}
|
||||
|
@ -871,24 +850,24 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
Datacenter: datacenter,
|
||||
NodeName: b.nodeName(c.NodeName),
|
||||
ACLPolicyTTL: b.durationVal("acl.policy_ttl", c.ACL.PolicyTTL),
|
||||
ACLTokenTTL: b.durationValWithDefault("acl.token_ttl", c.ACL.TokenTTL, b.durationVal("acl_ttl", c.ACLTTL)),
|
||||
ACLTokenTTL: b.durationVal("acl.token_ttl", c.ACL.TokenTTL),
|
||||
ACLRoleTTL: b.durationVal("acl.role_ttl", c.ACL.RoleTTL),
|
||||
ACLDownPolicy: stringValWithDefault(c.ACL.DownPolicy, stringVal(c.ACLDownPolicy)),
|
||||
ACLDefaultPolicy: stringValWithDefault(c.ACL.DefaultPolicy, stringVal(c.ACLDefaultPolicy)),
|
||||
ACLDownPolicy: stringVal(c.ACL.DownPolicy),
|
||||
ACLDefaultPolicy: stringVal(c.ACL.DefaultPolicy),
|
||||
},
|
||||
|
||||
ACLEnableKeyListPolicy: boolValWithDefault(c.ACL.EnableKeyListPolicy, boolVal(c.ACLEnableKeyListPolicy)),
|
||||
ACLMasterToken: stringValWithDefault(c.ACL.Tokens.Master, stringVal(c.ACLMasterToken)),
|
||||
ACLEnableKeyListPolicy: boolVal(c.ACL.EnableKeyListPolicy),
|
||||
ACLMasterToken: stringVal(c.ACL.Tokens.Master),
|
||||
|
||||
ACLTokenReplication: boolValWithDefault(c.ACL.TokenReplication, boolValWithDefault(c.EnableACLReplication, enableTokenReplication)),
|
||||
ACLTokenReplication: boolVal(c.ACL.TokenReplication),
|
||||
|
||||
ACLTokens: token.Config{
|
||||
DataDir: dataDir,
|
||||
EnablePersistence: boolValWithDefault(c.ACL.EnableTokenPersistence, false),
|
||||
ACLDefaultToken: stringValWithDefault(c.ACL.Tokens.Default, stringVal(c.ACLToken)),
|
||||
ACLAgentToken: stringValWithDefault(c.ACL.Tokens.Agent, stringVal(c.ACLAgentToken)),
|
||||
ACLAgentMasterToken: stringValWithDefault(c.ACL.Tokens.AgentMaster, stringVal(c.ACLAgentMasterToken)),
|
||||
ACLReplicationToken: stringValWithDefault(c.ACL.Tokens.Replication, stringVal(c.ACLReplicationToken)),
|
||||
ACLDefaultToken: stringVal(c.ACL.Tokens.Default),
|
||||
ACLAgentToken: stringVal(c.ACL.Tokens.Agent),
|
||||
ACLAgentMasterToken: stringVal(c.ACL.Tokens.AgentMaster),
|
||||
ACLReplicationToken: stringVal(c.ACL.Tokens.Replication),
|
||||
},
|
||||
|
||||
// Autopilot
|
||||
|
@ -1023,8 +1002,8 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
EncryptKey: stringVal(c.EncryptKey),
|
||||
EncryptVerifyIncoming: boolVal(c.EncryptVerifyIncoming),
|
||||
EncryptVerifyOutgoing: boolVal(c.EncryptVerifyOutgoing),
|
||||
XDSPort: xdsPort,
|
||||
XDSAddrs: xdsAddrs,
|
||||
GRPCPort: grpcPort,
|
||||
GRPCAddrs: grpcAddrs,
|
||||
HTTPMaxConnsPerClient: intVal(c.Limits.HTTPMaxConnsPerClient),
|
||||
HTTPSHandshakeTimeout: b.durationVal("limits.https_handshake_timeout", c.Limits.HTTPSHandshakeTimeout),
|
||||
KeyFile: stringVal(c.KeyFile),
|
||||
|
@ -1415,6 +1394,12 @@ func (b *builder) validate(rt RuntimeConfig) error {
|
|||
return fmt.Errorf("service %q: %s", s.Name, err)
|
||||
}
|
||||
}
|
||||
// Check for errors in the node check definitions
|
||||
for _, c := range rt.Checks {
|
||||
if err := c.CheckType().Validate(); err != nil {
|
||||
return fmt.Errorf("check %q: %w", c.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the given Connect CA provider config
|
||||
validCAProviders := map[string]bool{
|
||||
|
@ -1584,6 +1569,7 @@ func (b *builder) checkVal(v *CheckDefinition) *structs.CheckDefinition {
|
|||
TTL: b.durationVal(fmt.Sprintf("check[%s].ttl", id), v.TTL),
|
||||
SuccessBeforePassing: intVal(v.SuccessBeforePassing),
|
||||
FailuresBeforeCritical: intVal(v.FailuresBeforeCritical),
|
||||
FailuresBeforeWarning: intValWithDefault(v.FailuresBeforeWarning, intVal(v.FailuresBeforeCritical)),
|
||||
H2PING: stringVal(v.H2PING),
|
||||
DeregisterCriticalServiceAfter: b.durationVal(fmt.Sprintf("check[%s].deregister_critical_service_after", id), v.DeregisterCriticalServiceAfter),
|
||||
OutputMaxSize: intValWithDefault(v.OutputMaxSize, checks.DefaultBufSize),
|
||||
|
|
|
@ -15,7 +15,7 @@ type Source interface {
|
|||
// Source returns an identifier for the Source that can be used in error message
|
||||
Source() string
|
||||
// Parse a configuration and return the result.
|
||||
Parse() (Config, mapstructure.Metadata, error)
|
||||
Parse() (Config, Metadata, error)
|
||||
}
|
||||
|
||||
// ErrNoData indicates to Builder.Build that the source contained no data, and
|
||||
|
@ -34,9 +34,10 @@ func (f FileSource) Source() string {
|
|||
}
|
||||
|
||||
// Parse a config file in either JSON or HCL format.
|
||||
func (f FileSource) Parse() (Config, mapstructure.Metadata, error) {
|
||||
func (f FileSource) Parse() (Config, Metadata, error) {
|
||||
m := Metadata{}
|
||||
if f.Name == "" || f.Data == "" {
|
||||
return Config{}, mapstructure.Metadata{}, ErrNoData
|
||||
return Config{}, m, ErrNoData
|
||||
}
|
||||
|
||||
var raw map[string]interface{}
|
||||
|
@ -51,10 +52,10 @@ func (f FileSource) Parse() (Config, mapstructure.Metadata, error) {
|
|||
err = fmt.Errorf("invalid format: %s", f.Format)
|
||||
}
|
||||
if err != nil {
|
||||
return Config{}, md, err
|
||||
return Config{}, m, err
|
||||
}
|
||||
|
||||
var c Config
|
||||
var target decodeTarget
|
||||
d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
||||
DecodeHook: mapstructure.ComposeDecodeHookFunc(
|
||||
// decode.HookWeakDecodeFromSlice is only necessary when reading from
|
||||
|
@ -66,16 +67,30 @@ func (f FileSource) Parse() (Config, mapstructure.Metadata, error) {
|
|||
decode.HookTranslateKeys,
|
||||
),
|
||||
Metadata: &md,
|
||||
Result: &c,
|
||||
Result: &target,
|
||||
})
|
||||
if err != nil {
|
||||
return Config{}, md, err
|
||||
return Config{}, m, err
|
||||
}
|
||||
if err := d.Decode(raw); err != nil {
|
||||
return Config{}, md, err
|
||||
return Config{}, m, err
|
||||
}
|
||||
|
||||
return c, md, nil
|
||||
c, warns := applyDeprecatedConfig(&target)
|
||||
m.Unused = md.Unused
|
||||
m.Keys = md.Keys
|
||||
m.Warnings = warns
|
||||
return c, m, nil
|
||||
}
|
||||
|
||||
// Metadata created by Source.Parse
|
||||
type Metadata struct {
|
||||
// Keys used in the config file.
|
||||
Keys []string
|
||||
// Unused keys that did not match any struct fields.
|
||||
Unused []string
|
||||
// Warnings caused by deprecated fields
|
||||
Warnings []string
|
||||
}
|
||||
|
||||
// LiteralSource implements Source and returns an existing Config struct.
|
||||
|
@ -88,8 +103,13 @@ func (l LiteralSource) Source() string {
|
|||
return l.Name
|
||||
}
|
||||
|
||||
func (l LiteralSource) Parse() (Config, mapstructure.Metadata, error) {
|
||||
return l.Config, mapstructure.Metadata{}, nil
|
||||
func (l LiteralSource) Parse() (Config, Metadata, error) {
|
||||
return l.Config, Metadata{}, nil
|
||||
}
|
||||
|
||||
type decodeTarget struct {
|
||||
DeprecatedConfig `mapstructure:",squash"`
|
||||
Config `mapstructure:",squash"`
|
||||
}
|
||||
|
||||
// Cache configuration for the agent/cache.
|
||||
|
@ -110,26 +130,6 @@ type Cache struct {
|
|||
// configuration it should be treated as an external API which cannot be
|
||||
// changed and refactored at will since this will break existing setups.
|
||||
type Config struct {
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved into the "acl.tokens" stanza
|
||||
ACLAgentMasterToken *string `mapstructure:"acl_agent_master_token"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved into the "acl.tokens" stanza
|
||||
ACLAgentToken *string `mapstructure:"acl_agent_token"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved to "primary_datacenter"
|
||||
ACLDatacenter *string `mapstructure:"acl_datacenter"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved into the "acl" stanza
|
||||
ACLDefaultPolicy *string `mapstructure:"acl_default_policy"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved into the "acl" stanza
|
||||
ACLDownPolicy *string `mapstructure:"acl_down_policy"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved into the "acl" stanza
|
||||
ACLEnableKeyListPolicy *bool `mapstructure:"acl_enable_key_list_policy"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved into the "acl" stanza
|
||||
ACLMasterToken *string `mapstructure:"acl_master_token"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved into the "acl.tokens" stanza
|
||||
ACLReplicationToken *string `mapstructure:"acl_replication_token"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved into the "acl.tokens" stanza
|
||||
ACLTTL *string `mapstructure:"acl_ttl"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved into the "acl.tokens" stanza
|
||||
ACLToken *string `mapstructure:"acl_token"`
|
||||
ACL ACL `mapstructure:"acl"`
|
||||
Addresses Addresses `mapstructure:"addresses"`
|
||||
AdvertiseAddrLAN *string `mapstructure:"advertise_addr"`
|
||||
|
@ -172,7 +172,6 @@ type Config struct {
|
|||
DisableUpdateCheck *bool `mapstructure:"disable_update_check"`
|
||||
DiscardCheckOutput *bool `mapstructure:"discard_check_output"`
|
||||
DiscoveryMaxStale *string `mapstructure:"discovery_max_stale"`
|
||||
EnableACLReplication *bool `mapstructure:"enable_acl_replication"`
|
||||
EnableAgentTLSForChecks *bool `mapstructure:"enable_agent_tls_for_checks"`
|
||||
EnableCentralServiceConfig *bool `mapstructure:"enable_central_service_config"`
|
||||
EnableDebug *bool `mapstructure:"enable_debug"`
|
||||
|
@ -338,10 +337,7 @@ type Addresses struct {
|
|||
DNS *string `mapstructure:"dns"`
|
||||
HTTP *string `mapstructure:"http"`
|
||||
HTTPS *string `mapstructure:"https"`
|
||||
XDS *string `mapstructure:"xds"`
|
||||
|
||||
// Deprecated: replaced by XDS
|
||||
GRPC *string `mapstructure:"grpc"`
|
||||
GRPC *string `mapstructure:"grpc"`
|
||||
}
|
||||
|
||||
type AdvertiseAddrsConfig struct {
|
||||
|
@ -424,6 +420,7 @@ type CheckDefinition struct {
|
|||
TTL *string `mapstructure:"ttl"`
|
||||
H2PING *string `mapstructure:"h2ping"`
|
||||
SuccessBeforePassing *int `mapstructure:"success_before_passing"`
|
||||
FailuresBeforeWarning *int `mapstructure:"failures_before_warning"`
|
||||
FailuresBeforeCritical *int `mapstructure:"failures_before_critical"`
|
||||
DeregisterCriticalServiceAfter *string `mapstructure:"deregister_critical_service_after" alias:"deregistercriticalserviceafter"`
|
||||
|
||||
|
@ -693,16 +690,13 @@ type Ports struct {
|
|||
SerfLAN *int `mapstructure:"serf_lan"`
|
||||
SerfWAN *int `mapstructure:"serf_wan"`
|
||||
Server *int `mapstructure:"server"`
|
||||
XDS *int `mapstructure:"xds"`
|
||||
GRPC *int `mapstructure:"grpc"`
|
||||
ProxyMinPort *int `mapstructure:"proxy_min_port"`
|
||||
ProxyMaxPort *int `mapstructure:"proxy_max_port"`
|
||||
SidecarMinPort *int `mapstructure:"sidecar_min_port"`
|
||||
SidecarMaxPort *int `mapstructure:"sidecar_max_port"`
|
||||
ExposeMinPort *int `mapstructure:"expose_min_port"`
|
||||
ExposeMaxPort *int `mapstructure:"expose_max_port"`
|
||||
|
||||
// Deprecated: replaced by XDS
|
||||
GRPC *int `mapstructure:"grpc"`
|
||||
}
|
||||
|
||||
type UnixSocket struct {
|
||||
|
|
|
@ -27,11 +27,11 @@ func DefaultSource() Source {
|
|||
Name: "default",
|
||||
Format: "hcl",
|
||||
Data: `
|
||||
acl_default_policy = "allow"
|
||||
acl_down_policy = "extend-cache"
|
||||
acl_ttl = "30s"
|
||||
acl = {
|
||||
token_ttl = "30s"
|
||||
policy_ttl = "30s"
|
||||
default_policy = "allow"
|
||||
down_policy = "extend-cache"
|
||||
}
|
||||
bind_addr = "0.0.0.0"
|
||||
bootstrap = false
|
||||
|
|
|
@ -0,0 +1,127 @@
|
|||
package config
|
||||
|
||||
import "fmt"
|
||||
|
||||
type DeprecatedConfig struct {
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved into the "acl.tokens" stanza
|
||||
ACLAgentMasterToken *string `mapstructure:"acl_agent_master_token"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved into the "acl.tokens" stanza
|
||||
ACLAgentToken *string `mapstructure:"acl_agent_token"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved into the "acl.tokens" stanza
|
||||
ACLToken *string `mapstructure:"acl_token"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved to "acl.enable_key_list_policy"
|
||||
ACLEnableKeyListPolicy *bool `mapstructure:"acl_enable_key_list_policy"`
|
||||
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved into the "acl" stanza
|
||||
ACLMasterToken *string `mapstructure:"acl_master_token"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved into the "acl.tokens" stanza
|
||||
ACLReplicationToken *string `mapstructure:"acl_replication_token"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved to "acl.enable_token_replication"
|
||||
EnableACLReplication *bool `mapstructure:"enable_acl_replication"`
|
||||
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved to "primary_datacenter"
|
||||
ACLDatacenter *string `mapstructure:"acl_datacenter"`
|
||||
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved to "acl.default_policy"
|
||||
ACLDefaultPolicy *string `mapstructure:"acl_default_policy"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved to "acl.down_policy"
|
||||
ACLDownPolicy *string `mapstructure:"acl_down_policy"`
|
||||
// DEPRECATED (ACL-Legacy-Compat) - moved to "acl.token_ttl"
|
||||
ACLTTL *string `mapstructure:"acl_ttl"`
|
||||
}
|
||||
|
||||
func applyDeprecatedConfig(d *decodeTarget) (Config, []string) {
|
||||
dep := d.DeprecatedConfig
|
||||
var warns []string
|
||||
|
||||
if dep.ACLAgentMasterToken != nil {
|
||||
if d.Config.ACL.Tokens.AgentMaster == nil {
|
||||
d.Config.ACL.Tokens.AgentMaster = dep.ACLAgentMasterToken
|
||||
}
|
||||
warns = append(warns, deprecationWarning("acl_agent_master_token", "acl.tokens.agent_master"))
|
||||
}
|
||||
|
||||
if dep.ACLAgentToken != nil {
|
||||
if d.Config.ACL.Tokens.Agent == nil {
|
||||
d.Config.ACL.Tokens.Agent = dep.ACLAgentToken
|
||||
}
|
||||
warns = append(warns, deprecationWarning("acl_agent_token", "acl.tokens.agent"))
|
||||
}
|
||||
|
||||
if dep.ACLToken != nil {
|
||||
if d.Config.ACL.Tokens.Default == nil {
|
||||
d.Config.ACL.Tokens.Default = dep.ACLToken
|
||||
}
|
||||
warns = append(warns, deprecationWarning("acl_token", "acl.tokens.default"))
|
||||
}
|
||||
|
||||
if dep.ACLMasterToken != nil {
|
||||
if d.Config.ACL.Tokens.Master == nil {
|
||||
d.Config.ACL.Tokens.Master = dep.ACLMasterToken
|
||||
}
|
||||
warns = append(warns, deprecationWarning("acl_master_token", "acl.tokens.master"))
|
||||
}
|
||||
|
||||
if dep.ACLReplicationToken != nil {
|
||||
if d.Config.ACL.Tokens.Replication == nil {
|
||||
d.Config.ACL.Tokens.Replication = dep.ACLReplicationToken
|
||||
}
|
||||
d.Config.ACL.TokenReplication = pBool(true)
|
||||
warns = append(warns, deprecationWarning("acl_replication_token", "acl.tokens.replication"))
|
||||
}
|
||||
|
||||
if dep.EnableACLReplication != nil {
|
||||
if d.Config.ACL.TokenReplication == nil {
|
||||
d.Config.ACL.TokenReplication = dep.EnableACLReplication
|
||||
}
|
||||
warns = append(warns, deprecationWarning("enable_acl_replication", "acl.enable_token_replication"))
|
||||
}
|
||||
|
||||
if dep.ACLDatacenter != nil {
|
||||
if d.Config.PrimaryDatacenter == nil {
|
||||
d.Config.PrimaryDatacenter = dep.ACLDatacenter
|
||||
}
|
||||
|
||||
// when the acl_datacenter config is used it implicitly enables acls
|
||||
d.Config.ACL.Enabled = pBool(true)
|
||||
warns = append(warns, deprecationWarning("acl_datacenter", "primary_datacenter"))
|
||||
}
|
||||
|
||||
if dep.ACLDefaultPolicy != nil {
|
||||
if d.Config.ACL.DefaultPolicy == nil {
|
||||
d.Config.ACL.DefaultPolicy = dep.ACLDefaultPolicy
|
||||
}
|
||||
warns = append(warns, deprecationWarning("acl_default_policy", "acl.default_policy"))
|
||||
}
|
||||
|
||||
if dep.ACLDownPolicy != nil {
|
||||
if d.Config.ACL.DownPolicy == nil {
|
||||
d.Config.ACL.DownPolicy = dep.ACLDownPolicy
|
||||
}
|
||||
warns = append(warns, deprecationWarning("acl_down_policy", "acl.down_policy"))
|
||||
}
|
||||
|
||||
if dep.ACLTTL != nil {
|
||||
if d.Config.ACL.TokenTTL == nil {
|
||||
d.Config.ACL.TokenTTL = dep.ACLTTL
|
||||
}
|
||||
warns = append(warns, deprecationWarning("acl_ttl", "acl.token_ttl"))
|
||||
}
|
||||
|
||||
if dep.ACLEnableKeyListPolicy != nil {
|
||||
if d.Config.ACL.EnableKeyListPolicy == nil {
|
||||
d.Config.ACL.EnableKeyListPolicy = dep.ACLEnableKeyListPolicy
|
||||
}
|
||||
warns = append(warns, deprecationWarning("acl_enable_key_list_policy", "acl.enable_key_list_policy"))
|
||||
}
|
||||
|
||||
return d.Config, warns
|
||||
}
|
||||
|
||||
func deprecationWarning(old, new string) string {
|
||||
return fmt.Sprintf("The '%v' field is deprecated. Use the '%v' field instead.", old, new)
|
||||
}
|
||||
|
||||
func pBool(v bool) *bool {
|
||||
return &v
|
||||
}
|
|
@ -0,0 +1,93 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLoad_DeprecatedConfig(t *testing.T) {
|
||||
opts := LoadOpts{
|
||||
HCL: []string{`
|
||||
data_dir = "/foo"
|
||||
|
||||
acl_datacenter = "dcone"
|
||||
|
||||
acl_agent_master_token = "token1"
|
||||
acl_agent_token = "token2"
|
||||
acl_token = "token3"
|
||||
|
||||
acl_master_token = "token4"
|
||||
acl_replication_token = "token5"
|
||||
|
||||
acl_default_policy = "deny"
|
||||
acl_down_policy = "async-cache"
|
||||
|
||||
acl_ttl = "3h"
|
||||
acl_enable_key_list_policy = true
|
||||
|
||||
`},
|
||||
}
|
||||
patchLoadOptsShims(&opts)
|
||||
result, err := Load(opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectWarns := []string{
|
||||
deprecationWarning("acl_agent_master_token", "acl.tokens.agent_master"),
|
||||
deprecationWarning("acl_agent_token", "acl.tokens.agent"),
|
||||
deprecationWarning("acl_datacenter", "primary_datacenter"),
|
||||
deprecationWarning("acl_default_policy", "acl.default_policy"),
|
||||
deprecationWarning("acl_down_policy", "acl.down_policy"),
|
||||
deprecationWarning("acl_enable_key_list_policy", "acl.enable_key_list_policy"),
|
||||
deprecationWarning("acl_master_token", "acl.tokens.master"),
|
||||
deprecationWarning("acl_replication_token", "acl.tokens.replication"),
|
||||
deprecationWarning("acl_token", "acl.tokens.default"),
|
||||
deprecationWarning("acl_ttl", "acl.token_ttl"),
|
||||
}
|
||||
sort.Strings(result.Warnings)
|
||||
require.Equal(t, expectWarns, result.Warnings)
|
||||
// Ideally this would compare against the entire result.RuntimeConfig, but
|
||||
// we have so many non-zero defaults in that response that the noise of those
|
||||
// defaults makes this test difficult to read. So as a workaround, compare
|
||||
// specific values.
|
||||
rt := result.RuntimeConfig
|
||||
require.Equal(t, true, rt.ACLsEnabled)
|
||||
require.Equal(t, "dcone", rt.PrimaryDatacenter)
|
||||
require.Equal(t, "token1", rt.ACLTokens.ACLAgentMasterToken)
|
||||
require.Equal(t, "token2", rt.ACLTokens.ACLAgentToken)
|
||||
require.Equal(t, "token3", rt.ACLTokens.ACLDefaultToken)
|
||||
require.Equal(t, "token4", rt.ACLMasterToken)
|
||||
require.Equal(t, "token5", rt.ACLTokens.ACLReplicationToken)
|
||||
require.Equal(t, "deny", rt.ACLResolverSettings.ACLDefaultPolicy)
|
||||
require.Equal(t, "async-cache", rt.ACLResolverSettings.ACLDownPolicy)
|
||||
require.Equal(t, 3*time.Hour, rt.ACLResolverSettings.ACLTokenTTL)
|
||||
require.Equal(t, true, rt.ACLEnableKeyListPolicy)
|
||||
}
|
||||
|
||||
func TestLoad_DeprecatedConfig_ACLReplication(t *testing.T) {
|
||||
opts := LoadOpts{
|
||||
HCL: []string{`
|
||||
data_dir = "/foo"
|
||||
|
||||
enable_acl_replication = true
|
||||
|
||||
`},
|
||||
}
|
||||
patchLoadOptsShims(&opts)
|
||||
result, err := Load(opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectWarns := []string{
|
||||
deprecationWarning("enable_acl_replication", "acl.enable_token_replication"),
|
||||
}
|
||||
sort.Strings(result.Warnings)
|
||||
require.Equal(t, expectWarns, result.Warnings)
|
||||
// Ideally this would compare against the entire result.RuntimeConfig, but
|
||||
// we have so many non-zero defaults in that response that the noise of those
|
||||
// defaults makes this test difficult to read. So as a workaround, compare
|
||||
// specific values.
|
||||
rt := result.RuntimeConfig
|
||||
require.Equal(t, true, rt.ACLTokenReplication)
|
||||
}
|
|
@ -53,8 +53,7 @@ func AddFlags(fs *flag.FlagSet, f *LoadOpts) {
|
|||
add(&f.FlagValues.EnableLocalScriptChecks, "enable-local-script-checks", "Enables health check scripts from configuration file.")
|
||||
add(&f.FlagValues.HTTPConfig.AllowWriteHTTPFrom, "allow-write-http-from", "Only allow write endpoint calls from given network. CIDR format, can be specified multiple times.")
|
||||
add(&f.FlagValues.EncryptKey, "encrypt", "Provides the gossip encryption key.")
|
||||
add(&f.FlagValues.Ports.XDS, "grpc-port", "Deprecated, use xds-port")
|
||||
add(&f.FlagValues.Ports.XDS, "xds-port", "Sets the xDS gRPC port to listen on (used by Envoy proxies).")
|
||||
add(&f.FlagValues.Ports.GRPC, "grpc-port", "Sets the gRPC API port to listen on (currently needed for Envoy xDS only).")
|
||||
add(&f.FlagValues.Ports.HTTP, "http-port", "Sets the HTTP API port to listen on.")
|
||||
add(&f.FlagValues.Ports.HTTPS, "https-port", "Sets the HTTPS API port to listen on.")
|
||||
add(&f.FlagValues.StartJoinAddrsLAN, "join", "Address of an agent to join at start time. Can be specified multiple times.")
|
||||
|
|
|
@ -49,7 +49,7 @@ func TestAddFlags_WithParse(t *testing.T) {
|
|||
},
|
||||
{
|
||||
args: []string{`-grpc-port`, `1`},
|
||||
expected: LoadOpts{FlagValues: Config{Ports: Ports{XDS: pInt(1)}}},
|
||||
expected: LoadOpts{FlagValues: Config{Ports: Ports{GRPC: pInt(1)}}},
|
||||
},
|
||||
{
|
||||
args: []string{`-http-port`, `1`},
|
||||
|
|
|
@ -52,7 +52,6 @@ func TestMerge(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func pBool(v bool) *bool { return &v }
|
||||
func pInt(v int) *int { return &v }
|
||||
func pString(v string) *string { return &v }
|
||||
func pDuration(v time.Duration) *string { s := v.String(); return &s }
|
||||
|
|
|
@ -434,6 +434,9 @@ type RuntimeConfig struct {
|
|||
// tls_skip_verify = (true|false)
|
||||
// timeout = "duration"
|
||||
// ttl = "duration"
|
||||
// success_before_passing = int
|
||||
// failures_before_warning = int
|
||||
// failures_before_critical = int
|
||||
// deregister_critical_service_after = "duration"
|
||||
// },
|
||||
// ...
|
||||
|
@ -672,27 +675,27 @@ type RuntimeConfig struct {
|
|||
// hcl: encrypt_verify_outgoing = (true|false)
|
||||
EncryptVerifyOutgoing bool
|
||||
|
||||
// XDSPort is the port the xDS gRPC server listens on. This port only
|
||||
// GRPCPort is the port the gRPC server listens on. Currently this only
|
||||
// exposes the xDS and ext_authz APIs for Envoy and it is disabled by default.
|
||||
//
|
||||
// hcl: ports { xds = int }
|
||||
// flags: -xds-port int
|
||||
XDSPort int
|
||||
// hcl: ports { grpc = int }
|
||||
// flags: -grpc-port int
|
||||
GRPCPort int
|
||||
|
||||
// XDSAddrs contains the list of TCP addresses and UNIX sockets the xDS gRPC
|
||||
// server will bind to. If the xDS endpoint is disabled (ports.xds <= 0)
|
||||
// GRPCAddrs contains the list of TCP addresses and UNIX sockets the gRPC
|
||||
// server will bind to. If the gRPC endpoint is disabled (ports.grpc <= 0)
|
||||
// the list is empty.
|
||||
//
|
||||
// The addresses are taken from 'addresses.xds' which should contain a
|
||||
// The addresses are taken from 'addresses.grpc' which should contain a
|
||||
// space separated list of ip addresses, UNIX socket paths and/or
|
||||
// go-sockaddr templates. UNIX socket paths must be written as
|
||||
// 'unix://<full path>', e.g. 'unix:///var/run/consul-xds.sock'.
|
||||
// 'unix://<full path>', e.g. 'unix:///var/run/consul-grpc.sock'.
|
||||
//
|
||||
// If 'addresses.xds' was not provided the 'client_addr' addresses are
|
||||
// If 'addresses.grpc' was not provided the 'client_addr' addresses are
|
||||
// used.
|
||||
//
|
||||
// hcl: client_addr = string addresses { xds = string } ports { xds = int }
|
||||
XDSAddrs []net.Addr
|
||||
// hcl: client_addr = string addresses { grpc = string } ports { grpc = int }
|
||||
GRPCAddrs []net.Addr
|
||||
|
||||
// HTTPAddrs contains the list of TCP addresses and UNIX sockets the HTTP
|
||||
// server will bind to. If the HTTP endpoint is disabled (ports.http <= 0)
|
||||
|
|
|
@ -338,8 +338,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.GossipWANProbeTimeout = 100 * time.Millisecond
|
||||
rt.GossipWANSuspicionMult = 3
|
||||
rt.ConsulServerHealthInterval = 10 * time.Millisecond
|
||||
rt.XDSPort = 8502
|
||||
rt.XDSAddrs = []net.Addr{tcpAddr("127.0.0.1:8502")}
|
||||
rt.GRPCPort = 8502
|
||||
rt.GRPCAddrs = []net.Addr{tcpAddr("127.0.0.1:8502")}
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
})
|
||||
|
@ -1048,8 +1048,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.HTTPAddrs = []net.Addr{tcpAddr("0.0.0.0:2")}
|
||||
rt.HTTPSPort = 3
|
||||
rt.HTTPSAddrs = []net.Addr{tcpAddr("0.0.0.0:3")}
|
||||
rt.XDSPort = 4
|
||||
rt.XDSAddrs = []net.Addr{tcpAddr("0.0.0.0:4")}
|
||||
rt.GRPCPort = 4
|
||||
rt.GRPCAddrs = []net.Addr{tcpAddr("0.0.0.0:4")}
|
||||
rt.DataDir = dataDir
|
||||
},
|
||||
})
|
||||
|
@ -1121,8 +1121,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.HTTPAddrs = []net.Addr{tcpAddr("2.2.2.2:2")}
|
||||
rt.HTTPSPort = 3
|
||||
rt.HTTPSAddrs = []net.Addr{tcpAddr("3.3.3.3:3")}
|
||||
rt.XDSPort = 4
|
||||
rt.XDSAddrs = []net.Addr{tcpAddr("4.4.4.4:4")}
|
||||
rt.GRPCPort = 4
|
||||
rt.GRPCAddrs = []net.Addr{tcpAddr("4.4.4.4:4")}
|
||||
rt.DataDir = dataDir
|
||||
},
|
||||
})
|
||||
|
@ -1145,8 +1145,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.HTTPAddrs = []net.Addr{tcpAddr("1.2.3.4:2"), tcpAddr("[2001:db8::1]:2")}
|
||||
rt.HTTPSPort = 3
|
||||
rt.HTTPSAddrs = []net.Addr{tcpAddr("1.2.3.4:3"), tcpAddr("[2001:db8::1]:3")}
|
||||
rt.XDSPort = 4
|
||||
rt.XDSAddrs = []net.Addr{tcpAddr("1.2.3.4:4"), tcpAddr("[2001:db8::1]:4")}
|
||||
rt.GRPCPort = 4
|
||||
rt.GRPCAddrs = []net.Addr{tcpAddr("1.2.3.4:4"), tcpAddr("[2001:db8::1]:4")}
|
||||
rt.DataDir = dataDir
|
||||
},
|
||||
})
|
||||
|
@ -1181,8 +1181,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.HTTPAddrs = []net.Addr{tcpAddr("2.2.2.2:2"), unixAddr("unix://http"), tcpAddr("[2001:db8::20]:2")}
|
||||
rt.HTTPSPort = 3
|
||||
rt.HTTPSAddrs = []net.Addr{tcpAddr("3.3.3.3:3"), unixAddr("unix://https"), tcpAddr("[2001:db8::30]:3")}
|
||||
rt.XDSPort = 4
|
||||
rt.XDSAddrs = []net.Addr{tcpAddr("4.4.4.4:4"), unixAddr("unix://grpc"), tcpAddr("[2001:db8::40]:4")}
|
||||
rt.GRPCPort = 4
|
||||
rt.GRPCAddrs = []net.Addr{tcpAddr("4.4.4.4:4"), unixAddr("unix://grpc"), tcpAddr("[2001:db8::40]:4")}
|
||||
rt.DataDir = dataDir
|
||||
},
|
||||
})
|
||||
|
@ -1633,16 +1633,28 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
expectedWarnings: []string{`The 'acl_datacenter' field is deprecated. Use the 'primary_datacenter' field instead.`},
|
||||
})
|
||||
run(t, testCase{
|
||||
desc: "acl_replication_token enables acl replication",
|
||||
args: []string{`-data-dir=` + dataDir},
|
||||
json: []string{`{ "acl_replication_token": "a" }`},
|
||||
hcl: []string{`acl_replication_token = "a"`},
|
||||
desc: "acl_replication_token enables acl replication",
|
||||
args: []string{`-data-dir=` + dataDir},
|
||||
json: []string{`{ "acl_replication_token": "a" }`},
|
||||
hcl: []string{`acl_replication_token = "a"`},
|
||||
expectedWarnings: []string{deprecationWarning("acl_replication_token", "acl.tokens.replication")},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.ACLTokens.ACLReplicationToken = "a"
|
||||
rt.ACLTokenReplication = true
|
||||
rt.DataDir = dataDir
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
desc: "acl.tokens.replace does not enable acl replication",
|
||||
args: []string{`-data-dir=` + dataDir},
|
||||
json: []string{`{ "acl": { "tokens": { "replication": "a" }}}`},
|
||||
hcl: []string{`acl { tokens { replication = "a"}}`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.ACLTokens.ACLReplicationToken = "a"
|
||||
rt.ACLTokenReplication = false
|
||||
rt.DataDir = dataDir
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
desc: "acl_enforce_version_8 is deprecated",
|
||||
args: []string{`-data-dir=` + dataDir},
|
||||
|
@ -2330,17 +2342,17 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{
|
||||
`{ "check": { "name": "a", "args": ["/bin/true"] } }`,
|
||||
`{ "check": { "name": "b", "args": ["/bin/false"] } }`,
|
||||
`{ "check": { "name": "a", "args": ["/bin/true"], "interval": "1s" } }`,
|
||||
`{ "check": { "name": "b", "args": ["/bin/false"], "interval": "1s" } }`,
|
||||
},
|
||||
hcl: []string{
|
||||
`check = { name = "a" args = ["/bin/true"] }`,
|
||||
`check = { name = "b" args = ["/bin/false"] }`,
|
||||
`check = { name = "a" args = ["/bin/true"] interval = "1s"}`,
|
||||
`check = { name = "b" args = ["/bin/false"] interval = "1s" }`,
|
||||
},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.Checks = []*structs.CheckDefinition{
|
||||
{Name: "a", ScriptArgs: []string{"/bin/true"}, OutputMaxSize: checks.DefaultBufSize},
|
||||
{Name: "b", ScriptArgs: []string{"/bin/false"}, OutputMaxSize: checks.DefaultBufSize},
|
||||
{Name: "a", ScriptArgs: []string{"/bin/true"}, OutputMaxSize: checks.DefaultBufSize, Interval: time.Second},
|
||||
{Name: "b", ScriptArgs: []string{"/bin/false"}, OutputMaxSize: checks.DefaultBufSize, Interval: time.Second},
|
||||
}
|
||||
rt.DataDir = dataDir
|
||||
},
|
||||
|
@ -2351,14 +2363,14 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{
|
||||
`{ "check": { "name": "a", "grpc": "localhost:12345/foo", "grpc_use_tls": true } }`,
|
||||
`{ "check": { "name": "a", "grpc": "localhost:12345/foo", "grpc_use_tls": true, "interval": "1s" } }`,
|
||||
},
|
||||
hcl: []string{
|
||||
`check = { name = "a" grpc = "localhost:12345/foo", grpc_use_tls = true }`,
|
||||
`check = { name = "a" grpc = "localhost:12345/foo", grpc_use_tls = true interval = "1s" }`,
|
||||
},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.Checks = []*structs.CheckDefinition{
|
||||
{Name: "a", GRPC: "localhost:12345/foo", GRPCUseTLS: true, OutputMaxSize: checks.DefaultBufSize},
|
||||
{Name: "a", GRPC: "localhost:12345/foo", GRPCUseTLS: true, OutputMaxSize: checks.DefaultBufSize, Interval: time.Second},
|
||||
}
|
||||
rt.DataDir = dataDir
|
||||
},
|
||||
|
@ -2478,7 +2490,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
"name": "y",
|
||||
"DockerContainerID": "z",
|
||||
"DeregisterCriticalServiceAfter": "10s",
|
||||
"ScriptArgs": ["a", "b"]
|
||||
"ScriptArgs": ["a", "b"],
|
||||
"Interval": "2s"
|
||||
}
|
||||
}
|
||||
}`,
|
||||
|
@ -2500,6 +2513,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
DockerContainerID = "z"
|
||||
DeregisterCriticalServiceAfter = "10s"
|
||||
ScriptArgs = ["a", "b"]
|
||||
Interval = "2s"
|
||||
}
|
||||
}`,
|
||||
},
|
||||
|
@ -2517,12 +2531,13 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
EnableTagOverride: true,
|
||||
Checks: []*structs.CheckType{
|
||||
{
|
||||
CheckID: types.CheckID("x"),
|
||||
CheckID: "x",
|
||||
Name: "y",
|
||||
DockerContainerID: "z",
|
||||
DeregisterCriticalServiceAfter: 10 * time.Second,
|
||||
ScriptArgs: []string{"a", "b"},
|
||||
OutputMaxSize: checks.DefaultBufSize,
|
||||
Interval: 2 * time.Second,
|
||||
},
|
||||
},
|
||||
Weights: &structs.Weights{
|
||||
|
@ -5150,6 +5165,7 @@ func (tc testCase) run(format string, dataDir string) func(t *testing.T) {
|
|||
expected.ACLResolverSettings.Datacenter = expected.Datacenter
|
||||
expected.ACLResolverSettings.ACLsEnabled = expected.ACLsEnabled
|
||||
expected.ACLResolverSettings.NodeName = expected.NodeName
|
||||
expected.ACLResolverSettings.EnterpriseMeta = *structs.NodeEnterpriseMetaInPartition(expected.PartitionOrDefault())
|
||||
|
||||
assertDeepEqual(t, expected, actual, cmpopts.EquateEmpty())
|
||||
}
|
||||
|
@ -5189,6 +5205,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
defaultEntMeta := structs.DefaultEnterpriseMetaInDefaultPartition()
|
||||
nodeEntMeta := structs.NodeEnterpriseMetaInDefaultPartition()
|
||||
expected := &RuntimeConfig{
|
||||
// non-user configurable values
|
||||
AEInterval: time.Minute,
|
||||
|
@ -5241,6 +5258,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
ACLsEnabled: true,
|
||||
Datacenter: "rzo029wg",
|
||||
NodeName: "otlLxGaI",
|
||||
EnterpriseMeta: *nodeEntMeta,
|
||||
ACLDefaultPolicy: "72c2e7a0",
|
||||
ACLDownPolicy: "03eb2aee",
|
||||
ACLTokenTTL: 3321 * time.Second,
|
||||
|
@ -5296,7 +5314,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "bdeb5f6a",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 1813 * time.Second,
|
||||
TTL: 21743 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 14232 * time.Second,
|
||||
},
|
||||
{
|
||||
|
@ -5323,7 +5340,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "6adc3bfb",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 18506 * time.Second,
|
||||
TTL: 31006 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 2366 * time.Second,
|
||||
},
|
||||
{
|
||||
|
@ -5350,7 +5366,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "7BdnzBYk",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 5954 * time.Second,
|
||||
TTL: 30044 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 13209 * time.Second,
|
||||
},
|
||||
},
|
||||
|
@ -5462,8 +5477,8 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
EncryptKey: "A4wELWqH",
|
||||
EncryptVerifyIncoming: true,
|
||||
EncryptVerifyOutgoing: true,
|
||||
XDSPort: 4881,
|
||||
XDSAddrs: []net.Addr{tcpAddr("32.31.61.91:4881")},
|
||||
GRPCPort: 4881,
|
||||
GRPCAddrs: []net.Addr{tcpAddr("32.31.61.91:4881")},
|
||||
HTTPAddrs: []net.Addr{tcpAddr("83.39.91.39:7999")},
|
||||
HTTPBlockEndpoints: []string{"RBvAFcGD", "fWOWFznh"},
|
||||
AllowWriteHTTPFrom: []*net.IPNet{cidr("127.0.0.0/8"), cidr("22.33.44.55/32"), cidr("0.0.0.0/0")},
|
||||
|
@ -5556,7 +5571,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "4f191d4F",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 38333 * time.Second,
|
||||
TTL: 57201 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 44214 * time.Second,
|
||||
},
|
||||
},
|
||||
|
@ -5608,30 +5622,14 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "f43ouY7a",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 34738 * time.Second,
|
||||
TTL: 22773 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 84282 * time.Second,
|
||||
},
|
||||
&structs.CheckType{
|
||||
CheckID: "UHsDeLxG",
|
||||
Name: "PQSaPWlT",
|
||||
Notes: "jKChDOdl",
|
||||
Status: "5qFz6OZn",
|
||||
ScriptArgs: []string{"NMtYWlT9", "vj74JXsm"},
|
||||
HTTP: "1LBDJhw4",
|
||||
Header: map[string][]string{
|
||||
"cXPmnv1M": {"imDqfaBx", "NFxZ1bQe"},
|
||||
"vr7wY7CS": {"EtCoNPPL", "9vAarJ5s"},
|
||||
},
|
||||
Method: "wzByP903",
|
||||
Body: "4I8ucZgZ",
|
||||
CheckID: "UHsDeLxG",
|
||||
Name: "PQSaPWlT",
|
||||
Notes: "jKChDOdl",
|
||||
Status: "5qFz6OZn",
|
||||
OutputMaxSize: checks.DefaultBufSize,
|
||||
TCP: "2exjZIGE",
|
||||
H2PING: "jTDuR1DC",
|
||||
Interval: 5656 * time.Second,
|
||||
DockerContainerID: "5tDBWpfA",
|
||||
Shell: "rlTpLM8s",
|
||||
TLSServerName: "sOv5WTtp",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 4868 * time.Second,
|
||||
TTL: 11222 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 68482 * time.Second,
|
||||
|
@ -5767,7 +5765,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "axw5QPL5",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 18913 * time.Second,
|
||||
TTL: 44743 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 8482 * time.Second,
|
||||
},
|
||||
&structs.CheckType{
|
||||
|
@ -5792,7 +5789,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "7uwWOnUS",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 38282 * time.Second,
|
||||
TTL: 1181 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 4992 * time.Second,
|
||||
},
|
||||
&structs.CheckType{
|
||||
|
@ -5817,7 +5813,6 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
TLSServerName: "ECSHk8WF",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 38483 * time.Second,
|
||||
TTL: 10943 * time.Second,
|
||||
DeregisterCriticalServiceAfter: 68787 * time.Second,
|
||||
},
|
||||
},
|
||||
|
@ -5919,7 +5914,17 @@ func TestLoad_FullConfig(t *testing.T) {
|
|||
entFullRuntimeConfig(expected)
|
||||
|
||||
expectedWarns := []string{
|
||||
`The 'acl_datacenter' field is deprecated. Use the 'primary_datacenter' field instead.`,
|
||||
deprecationWarning("acl_datacenter", "primary_datacenter"),
|
||||
deprecationWarning("acl_agent_master_token", "acl.tokens.agent_master"),
|
||||
deprecationWarning("acl_agent_token", "acl.tokens.agent"),
|
||||
deprecationWarning("acl_token", "acl.tokens.default"),
|
||||
deprecationWarning("acl_master_token", "acl.tokens.master"),
|
||||
deprecationWarning("acl_replication_token", "acl.tokens.replication"),
|
||||
deprecationWarning("enable_acl_replication", "acl.enable_token_replication"),
|
||||
deprecationWarning("acl_default_policy", "acl.default_policy"),
|
||||
deprecationWarning("acl_down_policy", "acl.down_policy"),
|
||||
deprecationWarning("acl_ttl", "acl.token_ttl"),
|
||||
deprecationWarning("acl_enable_key_list_policy", "acl.enable_key_list_policy"),
|
||||
`bootstrap_expect > 0: expecting 53 servers`,
|
||||
}
|
||||
expectedWarns = append(expectedWarns, enterpriseConfigKeyWarnings...)
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
"ACLTokenTTL": "0s",
|
||||
"ACLsEnabled": false,
|
||||
"Datacenter": "",
|
||||
"EnterpriseMeta": {},
|
||||
"NodeName": ""
|
||||
},
|
||||
"ACLTokenReplication": false,
|
||||
|
@ -94,6 +95,7 @@
|
|||
"DockerContainerID": "",
|
||||
"EnterpriseMeta": {},
|
||||
"FailuresBeforeCritical": 0,
|
||||
"FailuresBeforeWarning": 0,
|
||||
"GRPC": "",
|
||||
"GRPCUseTLS": false,
|
||||
"H2PING": "",
|
||||
|
@ -186,6 +188,8 @@
|
|||
"EnterpriseRuntimeConfig": {},
|
||||
"ExposeMaxPort": 0,
|
||||
"ExposeMinPort": 0,
|
||||
"GRPCAddrs": [],
|
||||
"GRPCPort": 0,
|
||||
"GossipLANGossipInterval": "0s",
|
||||
"GossipLANGossipNodes": 0,
|
||||
"GossipLANProbeInterval": "0s",
|
||||
|
@ -295,6 +299,7 @@
|
|||
"DeregisterCriticalServiceAfter": "0s",
|
||||
"DockerContainerID": "",
|
||||
"FailuresBeforeCritical": 0,
|
||||
"FailuresBeforeWarning": 0,
|
||||
"GRPC": "",
|
||||
"GRPCUseTLS": false,
|
||||
"H2PING": "",
|
||||
|
@ -412,7 +417,5 @@
|
|||
"VerifyServerHostname": false,
|
||||
"Version": "",
|
||||
"VersionPrerelease": "",
|
||||
"Watches": [],
|
||||
"XDSAddrs": [],
|
||||
"XDSPort": 0
|
||||
"Watches": []
|
||||
}
|
|
@ -117,7 +117,6 @@ check = {
|
|||
tls_server_name = "7BdnzBYk"
|
||||
tls_skip_verify = true
|
||||
timeout = "5954s"
|
||||
ttl = "30044s"
|
||||
deregister_critical_service_after = "13209s"
|
||||
},
|
||||
checks = [
|
||||
|
@ -145,7 +144,6 @@ checks = [
|
|||
tls_server_name = "bdeb5f6a"
|
||||
tls_skip_verify = true
|
||||
timeout = "1813s"
|
||||
ttl = "21743s"
|
||||
deregister_critical_service_after = "14232s"
|
||||
},
|
||||
{
|
||||
|
@ -172,7 +170,6 @@ checks = [
|
|||
tls_server_name = "6adc3bfb"
|
||||
tls_skip_verify = true
|
||||
timeout = "18506s"
|
||||
ttl = "31006s"
|
||||
deregister_critical_service_after = "2366s"
|
||||
}
|
||||
]
|
||||
|
@ -389,7 +386,6 @@ service = {
|
|||
tls_server_name = "ECSHk8WF"
|
||||
tls_skip_verify = true
|
||||
timeout = "38483s"
|
||||
ttl = "10943s"
|
||||
deregister_critical_service_after = "68787s"
|
||||
}
|
||||
checks = [
|
||||
|
@ -415,7 +411,6 @@ service = {
|
|||
tls_server_name = "axw5QPL5"
|
||||
tls_skip_verify = true
|
||||
timeout = "18913s"
|
||||
ttl = "44743s"
|
||||
deregister_critical_service_after = "8482s"
|
||||
},
|
||||
{
|
||||
|
@ -440,7 +435,6 @@ service = {
|
|||
tls_server_name = "7uwWOnUS"
|
||||
tls_skip_verify = true
|
||||
timeout = "38282s"
|
||||
ttl = "1181s"
|
||||
deregister_critical_service_after = "4992s"
|
||||
}
|
||||
]
|
||||
|
@ -479,7 +473,6 @@ services = [
|
|||
tls_server_name = "4f191d4F"
|
||||
tls_skip_verify = true
|
||||
timeout = "38333s"
|
||||
ttl = "57201s"
|
||||
deregister_critical_service_after = "44214s"
|
||||
}
|
||||
connect {
|
||||
|
@ -521,7 +514,6 @@ services = [
|
|||
tls_server_name = "f43ouY7a"
|
||||
tls_skip_verify = true
|
||||
timeout = "34738s"
|
||||
ttl = "22773s"
|
||||
deregister_critical_service_after = "84282s"
|
||||
},
|
||||
{
|
||||
|
@ -529,22 +521,7 @@ services = [
|
|||
name = "PQSaPWlT"
|
||||
notes = "jKChDOdl"
|
||||
status = "5qFz6OZn"
|
||||
args = ["NMtYWlT9", "vj74JXsm"]
|
||||
http = "1LBDJhw4"
|
||||
header = {
|
||||
"cXPmnv1M" = [ "imDqfaBx", "NFxZ1bQe" ],
|
||||
"vr7wY7CS" = [ "EtCoNPPL", "9vAarJ5s" ]
|
||||
}
|
||||
method = "wzByP903"
|
||||
body = "4I8ucZgZ"
|
||||
tcp = "2exjZIGE"
|
||||
h2ping = "jTDuR1DC"
|
||||
interval = "5656s"
|
||||
output_max_size = 4096
|
||||
docker_container_id = "5tDBWpfA"
|
||||
shell = "rlTpLM8s"
|
||||
tls_server_name = "sOv5WTtp"
|
||||
tls_skip_verify = true
|
||||
timeout = "4868s"
|
||||
ttl = "11222s"
|
||||
deregister_critical_service_after = "68482s"
|
||||
|
|
|
@ -118,7 +118,6 @@
|
|||
"tls_server_name": "7BdnzBYk",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "5954s",
|
||||
"ttl": "30044s",
|
||||
"deregister_critical_service_after": "13209s"
|
||||
},
|
||||
"checks": [
|
||||
|
@ -146,7 +145,6 @@
|
|||
"tls_server_name": "bdeb5f6a",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "1813s",
|
||||
"ttl": "21743s",
|
||||
"deregister_critical_service_after": "14232s"
|
||||
},
|
||||
{
|
||||
|
@ -173,7 +171,6 @@
|
|||
"tls_server_name": "6adc3bfb",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "18506s",
|
||||
"ttl": "31006s",
|
||||
"deregister_critical_service_after": "2366s"
|
||||
}
|
||||
],
|
||||
|
@ -386,7 +383,6 @@
|
|||
"tls_server_name": "ECSHk8WF",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "38483s",
|
||||
"ttl": "10943s",
|
||||
"deregister_critical_service_after": "68787s"
|
||||
},
|
||||
"checks": [
|
||||
|
@ -412,7 +408,6 @@
|
|||
"tls_server_name": "axw5QPL5",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "18913s",
|
||||
"ttl": "44743s",
|
||||
"deregister_critical_service_after": "8482s"
|
||||
},
|
||||
{
|
||||
|
@ -437,7 +432,6 @@
|
|||
"tls_server_name": "7uwWOnUS",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "38282s",
|
||||
"ttl": "1181s",
|
||||
"deregister_critical_service_after": "4992s"
|
||||
}
|
||||
],
|
||||
|
@ -476,7 +470,6 @@
|
|||
"tls_server_name": "4f191d4F",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "38333s",
|
||||
"ttl": "57201s",
|
||||
"deregister_critical_service_after": "44214s"
|
||||
},
|
||||
"connect": {
|
||||
|
@ -518,7 +511,6 @@
|
|||
"tls_server_name": "f43ouY7a",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "34738s",
|
||||
"ttl": "22773s",
|
||||
"deregister_critical_service_after": "84282s"
|
||||
},
|
||||
{
|
||||
|
@ -526,22 +518,7 @@
|
|||
"name": "PQSaPWlT",
|
||||
"notes": "jKChDOdl",
|
||||
"status": "5qFz6OZn",
|
||||
"args": ["NMtYWlT9", "vj74JXsm"],
|
||||
"http": "1LBDJhw4",
|
||||
"header": {
|
||||
"cXPmnv1M": [ "imDqfaBx", "NFxZ1bQe" ],
|
||||
"vr7wY7CS": [ "EtCoNPPL", "9vAarJ5s" ]
|
||||
},
|
||||
"method": "wzByP903",
|
||||
"body": "4I8ucZgZ",
|
||||
"tcp": "2exjZIGE",
|
||||
"h2ping": "jTDuR1DC",
|
||||
"interval": "5656s",
|
||||
"output_max_size": 4096,
|
||||
"docker_container_id": "5tDBWpfA",
|
||||
"shell": "rlTpLM8s",
|
||||
"tls_server_name": "sOv5WTtp",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "4868s",
|
||||
"ttl": "11222s",
|
||||
"deregister_critical_service_after": "68482s"
|
||||
|
|
|
@ -109,7 +109,7 @@ func (s *HTTPHandlers) configDelete(resp http.ResponseWriter, req *http.Request)
|
|||
return reply, nil
|
||||
}
|
||||
|
||||
// ConfigCreate applies the given config entry update.
|
||||
// ConfigApply applies the given config entry update.
|
||||
func (s *HTTPHandlers) ConfigApply(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
args := structs.ConfigEntryRequest{
|
||||
Op: structs.ConfigEntryUpsert,
|
||||
|
|
|
@ -11,13 +11,17 @@ import (
|
|||
// The return value of `auth` is only valid if the second value `match` is true.
|
||||
// If `match` is false, then the intention doesn't match this target and any result should be ignored.
|
||||
func AuthorizeIntentionTarget(
|
||||
target, targetNS string,
|
||||
target, targetNS, targetAP string,
|
||||
ixn *structs.Intention,
|
||||
matchType structs.IntentionMatchType,
|
||||
) (auth bool, match bool) {
|
||||
|
||||
switch matchType {
|
||||
case structs.IntentionMatchDestination:
|
||||
if structs.PartitionOrDefault(ixn.DestinationPartition) != structs.PartitionOrDefault(targetAP) {
|
||||
return false, false
|
||||
}
|
||||
|
||||
if ixn.DestinationNS != structs.WildcardSpecifier && ixn.DestinationNS != targetNS {
|
||||
// Non-matching namespace
|
||||
return false, false
|
||||
|
@ -29,6 +33,10 @@ func AuthorizeIntentionTarget(
|
|||
}
|
||||
|
||||
case structs.IntentionMatchSource:
|
||||
if structs.PartitionOrDefault(ixn.SourcePartition) != structs.PartitionOrDefault(targetAP) {
|
||||
return false, false
|
||||
}
|
||||
|
||||
if ixn.SourceNS != structs.WildcardSpecifier && ixn.SourceNS != targetNS {
|
||||
// Non-matching namespace
|
||||
return false, false
|
||||
|
|
|
@ -11,6 +11,7 @@ func TestAuthorizeIntentionTarget(t *testing.T) {
|
|||
name string
|
||||
target string
|
||||
targetNS string
|
||||
targetAP string
|
||||
ixn *structs.Intention
|
||||
matchType structs.IntentionMatchType
|
||||
auth bool
|
||||
|
@ -18,36 +19,20 @@ func TestAuthorizeIntentionTarget(t *testing.T) {
|
|||
}{
|
||||
// Source match type
|
||||
{
|
||||
name: "match exact source, not matching namespace",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
name: "match exact source, not matching name",
|
||||
target: "web",
|
||||
ixn: &structs.Intention{
|
||||
SourceName: "db",
|
||||
SourceNS: "different",
|
||||
},
|
||||
matchType: structs.IntentionMatchSource,
|
||||
auth: false,
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
name: "match exact source, not matching name",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
SourceName: "db",
|
||||
SourceNS: structs.IntentionDefaultNamespace,
|
||||
},
|
||||
matchType: structs.IntentionMatchSource,
|
||||
auth: false,
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
name: "match exact source, allow",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
name: "match exact source, allow",
|
||||
target: "web",
|
||||
ixn: &structs.Intention{
|
||||
SourceName: "web",
|
||||
SourceNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
matchType: structs.IntentionMatchSource,
|
||||
|
@ -55,20 +40,17 @@ func TestAuthorizeIntentionTarget(t *testing.T) {
|
|||
match: true,
|
||||
},
|
||||
{
|
||||
name: "match exact source, deny",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
name: "match exact source, deny",
|
||||
target: "web",
|
||||
ixn: &structs.Intention{
|
||||
SourceName: "web",
|
||||
SourceNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionDeny,
|
||||
},
|
||||
matchType: structs.IntentionMatchSource,
|
||||
auth: false,
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
name: "match exact sourceNS for wildcard service, deny",
|
||||
name: "match wildcard service, deny",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
|
@ -81,12 +63,10 @@ func TestAuthorizeIntentionTarget(t *testing.T) {
|
|||
match: true,
|
||||
},
|
||||
{
|
||||
name: "match exact sourceNS for wildcard service, allow",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
name: "match wildcard service, allow",
|
||||
target: "web",
|
||||
ixn: &structs.Intention{
|
||||
SourceName: structs.WildcardSpecifier,
|
||||
SourceNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
matchType: structs.IntentionMatchSource,
|
||||
|
@ -96,36 +76,20 @@ func TestAuthorizeIntentionTarget(t *testing.T) {
|
|||
|
||||
// Destination match type
|
||||
{
|
||||
name: "match exact destination, not matching namespace",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
name: "match exact destination, not matching name",
|
||||
target: "web",
|
||||
ixn: &structs.Intention{
|
||||
DestinationName: "db",
|
||||
DestinationNS: "different",
|
||||
},
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
auth: false,
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
name: "match exact destination, not matching name",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
DestinationName: "db",
|
||||
DestinationNS: structs.IntentionDefaultNamespace,
|
||||
},
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
auth: false,
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
name: "match exact destination, allow",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
name: "match exact destination, allow",
|
||||
target: "web",
|
||||
ixn: &structs.Intention{
|
||||
DestinationName: "web",
|
||||
DestinationNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
|
@ -133,12 +97,10 @@ func TestAuthorizeIntentionTarget(t *testing.T) {
|
|||
match: true,
|
||||
},
|
||||
{
|
||||
name: "match exact destination, deny",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
name: "match exact destination, deny",
|
||||
target: "web",
|
||||
ixn: &structs.Intention{
|
||||
DestinationName: "web",
|
||||
DestinationNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionDeny,
|
||||
},
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
|
@ -146,12 +108,10 @@ func TestAuthorizeIntentionTarget(t *testing.T) {
|
|||
match: true,
|
||||
},
|
||||
{
|
||||
name: "match exact destinationNS for wildcard service, deny",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
name: "match wildcard service, deny",
|
||||
target: "web",
|
||||
ixn: &structs.Intention{
|
||||
DestinationName: structs.WildcardSpecifier,
|
||||
DestinationNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionDeny,
|
||||
},
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
|
@ -159,12 +119,10 @@ func TestAuthorizeIntentionTarget(t *testing.T) {
|
|||
match: true,
|
||||
},
|
||||
{
|
||||
name: "match exact destinationNS for wildcard service, allow",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
name: "match wildcard service, allow",
|
||||
target: "web",
|
||||
ixn: &structs.Intention{
|
||||
DestinationName: structs.WildcardSpecifier,
|
||||
DestinationNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
|
@ -172,12 +130,10 @@ func TestAuthorizeIntentionTarget(t *testing.T) {
|
|||
match: true,
|
||||
},
|
||||
{
|
||||
name: "unknown match type",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
name: "unknown match type",
|
||||
target: "web",
|
||||
ixn: &structs.Intention{
|
||||
DestinationName: structs.WildcardSpecifier,
|
||||
DestinationNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
matchType: structs.IntentionMatchType("unknown"),
|
||||
|
@ -188,7 +144,7 @@ func TestAuthorizeIntentionTarget(t *testing.T) {
|
|||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
auth, match := AuthorizeIntentionTarget(tc.target, tc.targetNS, tc.ixn, tc.matchType)
|
||||
auth, match := AuthorizeIntentionTarget(tc.target, tc.targetNS, tc.targetAP, tc.ixn, tc.matchType)
|
||||
assert.Equal(t, tc.auth, auth)
|
||||
assert.Equal(t, tc.match, match)
|
||||
})
|
||||
|
|
|
@ -2,10 +2,17 @@ package connect
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
const (
|
||||
internal = "internal"
|
||||
version = "v1"
|
||||
internalVersion = internal + "-" + version
|
||||
)
|
||||
|
||||
func UpstreamSNI(u *structs.Upstream, subset string, dc string, trustDomain string) string {
|
||||
if u.Datacenter != "" {
|
||||
dc = u.Datacenter
|
||||
|
@ -14,23 +21,39 @@ func UpstreamSNI(u *structs.Upstream, subset string, dc string, trustDomain stri
|
|||
if u.DestinationType == structs.UpstreamDestTypePreparedQuery {
|
||||
return QuerySNI(u.DestinationName, dc, trustDomain)
|
||||
}
|
||||
return ServiceSNI(u.DestinationName, subset, u.DestinationNamespace, dc, trustDomain)
|
||||
return ServiceSNI(u.DestinationName, subset, u.DestinationNamespace, u.DestinationPartition, dc, trustDomain)
|
||||
}
|
||||
|
||||
func DatacenterSNI(dc string, trustDomain string) string {
|
||||
return fmt.Sprintf("%s.internal.%s", dc, trustDomain)
|
||||
}
|
||||
|
||||
func ServiceSNI(service string, subset string, namespace string, datacenter string, trustDomain string) string {
|
||||
func ServiceSNI(service string, subset string, namespace string, partition string, datacenter string, trustDomain string) string {
|
||||
if namespace == "" {
|
||||
namespace = "default"
|
||||
}
|
||||
|
||||
if subset == "" {
|
||||
return fmt.Sprintf("%s.%s.%s.internal.%s", service, namespace, datacenter, trustDomain)
|
||||
} else {
|
||||
return fmt.Sprintf("%s.%s.%s.%s.internal.%s", subset, service, namespace, datacenter, trustDomain)
|
||||
if partition == "" {
|
||||
partition = "default"
|
||||
}
|
||||
|
||||
switch partition {
|
||||
case "default":
|
||||
if subset == "" {
|
||||
return dotJoin(service, namespace, datacenter, internal, trustDomain)
|
||||
} else {
|
||||
return dotJoin(subset, service, namespace, datacenter, internal, trustDomain)
|
||||
}
|
||||
default:
|
||||
if subset == "" {
|
||||
return dotJoin(service, namespace, partition, datacenter, internalVersion, trustDomain)
|
||||
} else {
|
||||
return dotJoin(subset, service, namespace, partition, datacenter, internalVersion, trustDomain)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func dotJoin(parts ...string) string {
|
||||
return strings.Join(parts, ".")
|
||||
}
|
||||
|
||||
func QuerySNI(service string, datacenter string, trustDomain string) string {
|
||||
|
@ -38,5 +61,5 @@ func QuerySNI(service string, datacenter string, trustDomain string) string {
|
|||
}
|
||||
|
||||
func TargetSNI(target *structs.DiscoveryTarget, trustDomain string) string {
|
||||
return ServiceSNI(target.Service, target.ServiceSubset, target.Namespace, target.Datacenter, trustDomain)
|
||||
return ServiceSNI(target.Service, target.ServiceSubset, target.Namespace, target.Partition, target.Datacenter, trustDomain)
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue