mirror of
https://github.com/status-im/consul.git
synced 2025-02-12 13:46:46 +00:00
Merge branch 'master' of github.com:hashicorp/consul into tg-rewrite
This commit is contained in:
commit
409768d6e5
3
.changelog/8564.txt
Normal file
3
.changelog/8564.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
cli: the `consul connect envoy --envoy_statsd_url` flag will now resolve the `$HOST_IP` environment variable, as part of a full url.
|
||||
```
|
3
.changelog/8599.txt
Normal file
3
.changelog/8599.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
api: `AutopilotServerHelath` now handles the 429 status code returned by the v1/operator/autopilot/health endpoint and still returned the parsed reply which will indicate server healthiness
|
||||
```
|
3
.changelog/9475.txt
Normal file
3
.changelog/9475.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:bug
|
||||
checks: add TLSServerName field to allow setting the TLS server name for HTTPS health checks.
|
||||
```
|
3
.changelog/9617.txt
Normal file
3
.changelog/9617.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:feature
|
||||
ui: Read-only ACL Auth Methods view
|
||||
```
|
6
.changelog/9658.txt
Normal file
6
.changelog/9658.txt
Normal file
@ -0,0 +1,6 @@
|
||||
```release-note:improvement
|
||||
xds: default to speaking xDS v3, but allow for v2 to be spoken upon request
|
||||
```
|
||||
```release-note:improvement
|
||||
xds: add support for envoy 1.17.0
|
||||
```
|
4
.changelog/9672.txt
Normal file
4
.changelog/9672.txt
Normal file
@ -0,0 +1,4 @@
|
||||
```release-note:improvement
|
||||
cli: added a `-force-without-cross-signing` flag to the `ca set-config` command.
|
||||
connect/ca: The ForceWithoutCrossSigning field will now work as expected for CA providers that support cross signing.
|
||||
```
|
@ -1,3 +1,3 @@
|
||||
```release-notes:improvement
|
||||
```release-note:improvement
|
||||
client: when a client agent is attempting to dereigster a service, anddoes not have access to the ACL token used to register a service, attempt to use the agent token instead of the default user token. If no agent token is set, fall back to the default user token.
|
||||
```
|
||||
|
3
.changelog/9703.txt
Normal file
3
.changelog/9703.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:bug
|
||||
streaming: lookup in health properly handle case-sensitivity and perform filtering based on tags and node-meta
|
||||
```
|
3
.changelog/9752.txt
Normal file
3
.changelog/9752.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:bug
|
||||
api: Remove trailing periods from the gateway internal HTTP API endpoint
|
||||
```
|
3
.changelog/9768.txt
Normal file
3
.changelog/9768.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
connect: adds new flags `prometheus-backend-port` and `prometheus-scrape-port` to `consul connect envoy` to support envoy_prometheus_bind_addr pointing to the merged metrics port when using Consul Connect on K8s.
|
||||
```
|
3
.changelog/9792.txt
Normal file
3
.changelog/9792.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:feature
|
||||
cli: Add prefix option to kv import command
|
||||
```
|
3
.changelog/9819.txt
Normal file
3
.changelog/9819.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
ui: improve accessibility of modal dialogs
|
||||
```
|
3
.changelog/9847.txt
Normal file
3
.changelog/9847.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
ui: support stricter content security policies
|
||||
```
|
3
.changelog/9851.txt
Normal file
3
.changelog/9851.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:bug
|
||||
config: correct config key from `advertise_addr_ipv6` to `advertise_addr_wan_ipv6`
|
||||
```
|
3
.changelog/9864.txt
Normal file
3
.changelog/9864.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
ui: add permanently visible indicator when ACLs are disabled
|
||||
```
|
3
.changelog/9872.txt
Normal file
3
.changelog/9872.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
connect: Allow per-upstream configuration to be set in service-defaults. [experimental]
|
||||
```
|
3
.changelog/9894.txt
Normal file
3
.changelog/9894.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
connect: Add support for transparently proxying traffic through Envoy. [experimental]
|
||||
```
|
3
.changelog/9901.txt
Normal file
3
.changelog/9901.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:bug
|
||||
ui: Fix intention form cancel button
|
||||
```
|
4
.changelog/9903.txt
Normal file
4
.changelog/9903.txt
Normal file
@ -0,0 +1,4 @@
|
||||
```release-note:improvement
|
||||
api: Enable setting query options on agent endpoints.
|
||||
```
|
||||
|
3
.changelog/9920.txt
Normal file
3
.changelog/9920.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:improvement
|
||||
connect: The builtin connect proxy no longer advertises support for h2 via ALPN. [[GH-4466](https://github.com/hashicorp/consul/issues/4466)].
|
||||
```
|
3
.changelog/9923.txt
Normal file
3
.changelog/9923.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:bug
|
||||
http: fix a bug in Consul Enterprise that would cause the UI to believe namespaces were supported, resulting in warning logs and incorrect UI behaviour.
|
||||
```
|
3
.changelog/9967.txt
Normal file
3
.changelog/9967.txt
Normal file
@ -0,0 +1,3 @@
|
||||
```release-note:bug
|
||||
api: ensure v1/health/ingress/:service endpoint works properly when streaming is enabled
|
||||
```
|
@ -520,13 +520,13 @@ jobs:
|
||||
- run: *notify-slack-failure
|
||||
|
||||
# run integration tests on nomad/master
|
||||
nomad-integration-master:
|
||||
nomad-integration-main:
|
||||
docker:
|
||||
- image: *GOLANG_IMAGE
|
||||
environment:
|
||||
<<: *ENVIRONMENT
|
||||
NOMAD_WORKING_DIR: /go/src/github.com/hashicorp/nomad
|
||||
NOMAD_VERSION: master
|
||||
NOMAD_VERSION: main
|
||||
steps: *NOMAD_INTEGRATION_TEST_STEPS
|
||||
|
||||
build-website-docker-image:
|
||||
@ -819,17 +819,38 @@ jobs:
|
||||
path: *TEST_RESULTS_DIR
|
||||
- run: *notify-slack-failure
|
||||
|
||||
envoy-integration-test-1_14_6-v2compat:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.14.6"
|
||||
TEST_V2_XDS: "1"
|
||||
|
||||
envoy-integration-test-1_15_3:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.15.3"
|
||||
|
||||
envoy-integration-test-1_15_3-v2compat:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.15.3"
|
||||
TEST_V2_XDS: "1"
|
||||
|
||||
envoy-integration-test-1_16_2:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.16.2"
|
||||
|
||||
# TODO(rb): add in 1.17.0 support when v3 is ready
|
||||
envoy-integration-test-1_16_2-v2compat:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.16.2"
|
||||
TEST_V2_XDS: "1"
|
||||
|
||||
envoy-integration-test-1_17_0:
|
||||
<<: *ENVOY_TESTS
|
||||
environment:
|
||||
ENVOY_VERSION: "1.17.0"
|
||||
|
||||
# run integration tests for the connect ca providers
|
||||
test-connect-ca-providers:
|
||||
@ -1033,7 +1054,7 @@ workflows:
|
||||
- dev-upload-docker:
|
||||
<<: *dev-upload
|
||||
context: consul-ci
|
||||
- nomad-integration-master:
|
||||
- nomad-integration-main:
|
||||
requires:
|
||||
- dev-build
|
||||
- nomad-integration-0_8:
|
||||
@ -1042,13 +1063,24 @@ workflows:
|
||||
- envoy-integration-test-1_14_6:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_14_6-v2compat:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_15_3:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_15_3-v2compat:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_16_2:
|
||||
requires:
|
||||
- dev-build
|
||||
# TODO(rb): add in 1.17.0 support when v3 is ready
|
||||
- envoy-integration-test-1_16_2-v2compat:
|
||||
requires:
|
||||
- dev-build
|
||||
- envoy-integration-test-1_17_0:
|
||||
requires:
|
||||
- dev-build
|
||||
|
||||
website:
|
||||
unless: << pipeline.parameters.trigger-load-test >>
|
||||
|
36
.github/workflows/changelog-checker.yml
vendored
Normal file
36
.github/workflows/changelog-checker.yml
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
# This workflow checks that there is either a 'pr/no-changelog' label applied to a PR
|
||||
# or there is a .changelog/<pr number>.txt file associated with a PR for a changelog entry
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, labeled]
|
||||
# Runs on PRs to master and all release branches
|
||||
branches:
|
||||
- master
|
||||
- release/*
|
||||
|
||||
jobs:
|
||||
# checks that a .changelog entry is present for a PR
|
||||
changelog-check:
|
||||
# If there a `pr/no-changelog` label we ignore this check
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'pr/no-changelog')"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0 # by default the checkout action doesn't checkout all branches
|
||||
- name: Check for changelog entry in diff
|
||||
run: |
|
||||
# check if there is a diff in the .changelog directory
|
||||
changelog_files=$(git --no-pager diff --name-only HEAD "$(git merge-base HEAD "origin/${{ github.event.pull_request.base.ref }}")" -- .changelog/${{ github.event.pull_request.number }}.txt)
|
||||
|
||||
# If we do not find a file in .changelog/, we fail the check
|
||||
if [ -z "$changelog_files" ]; then
|
||||
# Fail status check when no .changelog entry was found on the PR
|
||||
echo "Did not find a .changelog entry and the 'pr/no-changelog' label was not applied. Reference - https://github.com/hashicorp/consul/pull/8387"
|
||||
exit 1
|
||||
else
|
||||
echo "Found .changelog entry in PR!"
|
||||
fi
|
@ -5,6 +5,11 @@
|
||||
#
|
||||
# All the code checked out in this workflow should be considered untrusted. This workflow must
|
||||
# never call any makefiles or scripts. It must never be changed to run any code from the checkout.
|
||||
|
||||
# This workflow posts a message to a PR to remind maintainers that there are website/ changes
|
||||
# in the PR and if they need to be cherry-picked to the stable-website branch, the
|
||||
# 'type/docs-cherrypick' label needs to be applied.
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened]
|
||||
@ -14,39 +19,10 @@ on:
|
||||
- release/*
|
||||
|
||||
jobs:
|
||||
# checks that a .changelog entry is present for a PR
|
||||
changelog-check:
|
||||
# If there a `pr/no-changelog` label we ignore this check
|
||||
if: "!${{ contains(github.event.pull_request.labels.*.name, 'pr/no-changelog')}}"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0 # by default the checkout action doesn't checkout all branches
|
||||
- name: Check for changelog entry in diff
|
||||
run: |
|
||||
# check if there is a diff in the .changelog directory
|
||||
changelog_files=$(git --no-pager diff --name-only HEAD "$(git merge-base HEAD "origin/${{ github.event.pull_request.base.ref }}")" -- .changelog/)
|
||||
|
||||
# If we do not find a file in .changelog/, we post a comment to the PR
|
||||
if [ -z "$changelog_files" ]; then
|
||||
# post PR comment to GitHub when no .changelog entry was found on PR
|
||||
echo "changelog-check: Did not find a .changelog entry, posting a reminder in the PR"
|
||||
github_message="🤔 Double check that this PR does not require a changelog entry in the \`.changelog\` directory. [Reference](https://github.com/hashicorp/consul/pull/8387)"
|
||||
curl -f -s -H "Authorization: token ${{ secrets.PR_COMMENT_TOKEN }}" \
|
||||
-X POST \
|
||||
-d "{ \"body\": \"${github_message}\"}" \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/issues/${{ github.event.pull_request.number }}/comments"
|
||||
else
|
||||
echo "changelog-check: Found .changelog entry in PR!"
|
||||
fi
|
||||
|
||||
# checks that a 'type/docs-cherrypick' label is attached to PRs with website/ changes
|
||||
website-check:
|
||||
# If there's a `type/docs-cherrypick` label we ignore this check
|
||||
if: "!${{ contains(github.event.pull_request.labels.*.name, 'type/docs-cherrypick')}}"
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'type/docs-cherrypick')"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
60
CHANGELOG.md
60
CHANGELOG.md
@ -1,5 +1,27 @@
|
||||
## UNRELEASED
|
||||
|
||||
## 1.9.4 (March 04, 2021)
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* connect: if the token given to the vault provider returns no data avoid a panic [[GH-9806](https://github.com/hashicorp/consul/issues/9806)]
|
||||
* connect: update supported envoy point releases to 1.16.2, 1.15.3, 1.14.6, 1.13.7 [[GH-9737](https://github.com/hashicorp/consul/issues/9737)]
|
||||
* xds: only try to create an ipv6 expose checks listener if ipv6 is supported by the kernel [[GH-9765](https://github.com/hashicorp/consul/issues/9765)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* api: Remove trailing periods from the gateway internal HTTP API endpoint [[GH-9752](https://github.com/hashicorp/consul/issues/9752)]
|
||||
* cache: Prevent spamming the logs for days when a cached request encounters an "ACL not found" error. [[GH-9738](https://github.com/hashicorp/consul/issues/9738)]
|
||||
* connect: connect CA Roots in the primary datacenter should use a SigningKeyID derived from their local intermediate [[GH-9428](https://github.com/hashicorp/consul/issues/9428)]
|
||||
* proxycfg: avoid potential deadlock in delivering proxy snapshot to watchers. [[GH-9689](https://github.com/hashicorp/consul/issues/9689)]
|
||||
* replication: Correctly log all replication warnings that should not be suppressed [[GH-9320](https://github.com/hashicorp/consul/issues/9320)]
|
||||
* streaming: fixes a bug caused by caching an incorrect snapshot, that would cause clients
|
||||
to error until the cache expired. [[GH-9772](https://github.com/hashicorp/consul/issues/9772)]
|
||||
* ui: Exclude proxies when showing the total number of instances on a node. [[GH-9749](https://github.com/hashicorp/consul/issues/9749)]
|
||||
* ui: Fixed a bug in older browsers relating to String.replaceAll and fieldset w/flexbox usage [[GH-9715](https://github.com/hashicorp/consul/issues/9715)]
|
||||
* xds: deduplicate mesh gateway listeners by address in a stable way to prevent some LDS churn [[GH-9650](https://github.com/hashicorp/consul/issues/9650)]
|
||||
* xds: prevent LDS flaps in mesh gateways due to unstable datacenter lists; also prevent some flaps in terminating gateways as well [[GH-9651](https://github.com/hashicorp/consul/issues/9651)]
|
||||
|
||||
## 1.9.3 (February 01, 2021)
|
||||
|
||||
FEATURES:
|
||||
@ -174,6 +196,7 @@ BUG FIXES:
|
||||
* agent: make the json/hcl decoding of ConnectProxyConfig fully work with CamelCase and snake_case [[GH-8741](https://github.com/hashicorp/consul/issues/8741)]
|
||||
* agent: when enable_central_service_config is enabled ensure agent reload doesn't revert check state to critical [[GH-8747](https://github.com/hashicorp/consul/issues/8747)]
|
||||
* api: Fixed a bug where the Check.GRPCUseTLS field could not be set using snake case. [[GH-8771](https://github.com/hashicorp/consul/issues/8771)]
|
||||
* api: Fixed a bug where additional headers configured with `http_config.response_headers` would not be served on index and error pages [[GH-8694](https://github.com/hashicorp/consul/pull/8694/files#diff-160c9abf1b1868a8505065ab02d736fd2dc522a7a555d57383e8428883dc7755R545-R548)]
|
||||
* autopilot: **(Enterprise Only)** Previously servers in other zones would not be promoted when all servers in a second zone had failed. Now the actual behavior matches the docs and autopilot will promote a healthy non-voter from any zone to replace failure of an entire zone. [[GH-9103](https://github.com/hashicorp/consul/issues/9103)]
|
||||
* autopilot: Prevent panic when requesting the autopilot health immediately after a leader is elected. [[GH-9204](https://github.com/hashicorp/consul/issues/9204)]
|
||||
* command: when generating envoy bootstrap configs use the datacenter returned from the agent services endpoint [[GH-9229](https://github.com/hashicorp/consul/issues/9229)]
|
||||
@ -190,6 +213,28 @@ BUG FIXES:
|
||||
* telemetry: fixed a bug that caused logs to be flooded with `[WARN] agent.router: Non-server in server-only area` [[GH-8685](https://github.com/hashicorp/consul/issues/8685)]
|
||||
* ui: show correct datacenter for gateways [[GH-8704](https://github.com/hashicorp/consul/issues/8704)]
|
||||
|
||||
## 1.8.9 (March 04, 2021)
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* cli: Add new `-cluster-id` and `common-name` to `consul tls ca create` to support creating a CA for Consul Connect. [[GH-9585](https://github.com/hashicorp/consul/issues/9585)]
|
||||
* connect: if the token given to the vault provider returns no data avoid a panic [[GH-9806](https://github.com/hashicorp/consul/issues/9806)]
|
||||
* connect: update supported envoy point releases to 1.14.6, 1.13.7, 1.12.7, 1.11.2 [[GH-9739](https://github.com/hashicorp/consul/issues/9739)]
|
||||
* license: **(Enterprise only)** Temporary client license duration was increased from 30m to 6h.
|
||||
* server: use the presense of stored federation state data as a sign that we already activated the federation state feature flag [[GH-9519](https://github.com/hashicorp/consul/issues/9519)]
|
||||
* xds: only try to create an ipv6 expose checks listener if ipv6 is supported by the kernel [[GH-9765](https://github.com/hashicorp/consul/issues/9765)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* api: Remove trailing periods from the gateway internal HTTP API endpoint [[GH-9752](https://github.com/hashicorp/consul/issues/9752)]
|
||||
* cache: Prevent spamming the logs for days when a cached request encounters an "ACL not found" error. [[GH-9738](https://github.com/hashicorp/consul/issues/9738)]
|
||||
* connect: connect CA Roots in the primary datacenter should use a SigningKeyID derived from their local intermediate [[GH-9428](https://github.com/hashicorp/consul/issues/9428)]
|
||||
* proxycfg: avoid potential deadlock in delivering proxy snapshot to watchers. [[GH-9689](https://github.com/hashicorp/consul/issues/9689)]
|
||||
* server: When wan federating via mesh gateways after initial federation default to using the local mesh gateways unless the heuristic indicates a bypass is required. [[GH-9528](https://github.com/hashicorp/consul/issues/9528)]
|
||||
* server: When wan federating via mesh gateways only do heuristic primary DC bypass on the leader. [[GH-9366](https://github.com/hashicorp/consul/issues/9366)]
|
||||
* xds: deduplicate mesh gateway listeners by address in a stable way to prevent some LDS churn [[GH-9650](https://github.com/hashicorp/consul/issues/9650)]
|
||||
* xds: prevent LDS flaps in mesh gateways due to unstable datacenter lists; also prevent some flaps in terminating gateways as well [[GH-9651](https://github.com/hashicorp/consul/issues/9651)]
|
||||
*
|
||||
## 1.8.8 (January 22, 2021)
|
||||
|
||||
BUG FIXES:
|
||||
@ -386,6 +431,21 @@ BUGFIXES:
|
||||
* ui: Miscellaneous amends for Safari and Firefox [[GH-7904](https://github.com/hashicorp/consul/issues/7904)] [[GH-7907](https://github.com/hashicorp/consul/pull/7907)]
|
||||
* ui: Ensure a value is always passed to CONSUL_SSO_ENABLED [[GH-7913](https://github.com/hashicorp/consul/pull/7913)]
|
||||
|
||||
## 1.7.13 (March 04, 2021)
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
||||
* connect: update supported envoy point releases to 1.13.7, 1.12.7, 1.11.2, 1.10.0 [[GH-9740](https://github.com/hashicorp/consul/issues/9740)]
|
||||
* license: **(Enterprise only)** Temporary client license duration was increased from 30m to 6h.
|
||||
* xds: only try to create an ipv6 expose checks listener if ipv6 is supported by the kernel [[GH-9765](https://github.com/hashicorp/consul/issues/9765)]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
* cache: Prevent spamming the logs for days when a cached request encounters an "ACL not found" error. [[GH-9738](https://github.com/hashicorp/consul/issues/9738)]
|
||||
* connect: connect CA Roots in the primary datacenter should use a SigningKeyID derived from their local intermediate [[GH-9428](https://github.com/hashicorp/consul/issues/9428)]
|
||||
* xds: deduplicate mesh gateway listeners by address in a stable way to prevent some LDS churn [[GH-9650](https://github.com/hashicorp/consul/issues/9650)]
|
||||
* xds: prevent LDS flaps in mesh gateways due to unstable datacenter lists; also prevent some flaps in terminating gateways as well [[GH-9651](https://github.com/hashicorp/consul/issues/9651)]
|
||||
|
||||
## 1.7.12 (January 22, 2021)
|
||||
|
||||
BUG FIXES:
|
||||
|
@ -374,11 +374,10 @@ func New(bd BaseDeps) (*Agent, error) {
|
||||
cacheName = cachetype.StreamingHealthServicesName
|
||||
}
|
||||
a.rpcClientHealth = &health.Client{
|
||||
Cache: bd.Cache,
|
||||
NetRPC: &a,
|
||||
CacheName: cacheName,
|
||||
// Temporarily until streaming supports all connect events
|
||||
CacheNameConnect: cachetype.HealthServicesName,
|
||||
Cache: bd.Cache,
|
||||
NetRPC: &a,
|
||||
CacheName: cacheName,
|
||||
CacheNameIngress: cachetype.HealthServicesName,
|
||||
}
|
||||
|
||||
a.serviceManager = NewServiceManager(&a)
|
||||
@ -540,6 +539,7 @@ func (a *Agent) Start(ctx context.Context) error {
|
||||
// Start the proxy config manager.
|
||||
a.proxyConfig, err = proxycfg.NewManager(proxycfg.ManagerConfig{
|
||||
Cache: a.cache,
|
||||
Health: a.rpcClientHealth,
|
||||
Logger: a.logger.Named(logging.ProxyConfig),
|
||||
State: a.State,
|
||||
Source: &structs.QuerySource{
|
||||
@ -2518,7 +2518,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
||||
chkType.Interval = checks.MinInterval
|
||||
}
|
||||
|
||||
tlsClientConfig := a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify)
|
||||
tlsClientConfig := a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify, chkType.TLSServerName)
|
||||
|
||||
http := &checks.CheckHTTP{
|
||||
CheckID: cid,
|
||||
@ -2590,7 +2590,7 @@ func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType,
|
||||
|
||||
var tlsClientConfig *tls.Config
|
||||
if chkType.GRPCUseTLS {
|
||||
tlsClientConfig = a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify)
|
||||
tlsClientConfig = a.tlsConfigurator.OutgoingTLSConfigForCheck(chkType.TLSSkipVerify, chkType.TLSServerName)
|
||||
}
|
||||
|
||||
grpc := &checks.CheckGRPC{
|
||||
@ -3709,6 +3709,8 @@ func (a *Agent) registerCache() {
|
||||
|
||||
a.cache.RegisterType(cachetype.IntentionMatchName, &cachetype.IntentionMatch{RPC: a})
|
||||
|
||||
a.cache.RegisterType(cachetype.IntentionUpstreamsName, &cachetype.IntentionUpstreams{RPC: a})
|
||||
|
||||
a.cache.RegisterType(cachetype.CatalogServicesName, &cachetype.CatalogServices{RPC: a})
|
||||
|
||||
a.cache.RegisterType(cachetype.HealthServicesName, &cachetype.HealthServices{RPC: a})
|
||||
|
@ -36,7 +36,6 @@ import (
|
||||
tokenStore "github.com/hashicorp/consul/agent/token"
|
||||
"github.com/hashicorp/consul/agent/xds/proxysupport"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
@ -197,6 +196,7 @@ func TestAgent_Services_Sidecar(t *testing.T) {
|
||||
Proxy: structs.ConnectProxyConfig{
|
||||
DestinationServiceName: "db",
|
||||
Upstreams: structs.TestUpstreams(t),
|
||||
TransparentProxy: true,
|
||||
},
|
||||
}
|
||||
a.State.AddService(srv1, "")
|
||||
@ -396,7 +396,7 @@ func TestAgent_Service(t *testing.T) {
|
||||
Service: "web-sidecar-proxy",
|
||||
Port: 8000,
|
||||
Proxy: expectProxy.ToAPI(),
|
||||
ContentHash: "4c7d5f8d3748be6d",
|
||||
ContentHash: "fa3af167b81f6721",
|
||||
Weights: api.AgentWeights{
|
||||
Passing: 1,
|
||||
Warning: 1,
|
||||
@ -410,7 +410,7 @@ func TestAgent_Service(t *testing.T) {
|
||||
// Copy and modify
|
||||
updatedResponse := *expectedResponse
|
||||
updatedResponse.Port = 9999
|
||||
updatedResponse.ContentHash = "713435ba1f5badcf"
|
||||
updatedResponse.ContentHash = "c7739b50900c7483"
|
||||
|
||||
// Simple response for non-proxy service registered in TestAgent config
|
||||
expectWebResponse := &api.AgentService{
|
||||
@ -3156,11 +3156,11 @@ func TestAgent_RegisterService_TranslateKeys(t *testing.T) {
|
||||
|
||||
t.Run("normal", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testAgent_RegisterService_ACLDeny(t, "enable_central_service_config = false")
|
||||
testAgent_RegisterService_TranslateKeys(t, "enable_central_service_config = false")
|
||||
})
|
||||
t.Run("service manager", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
testAgent_RegisterService_ACLDeny(t, "enable_central_service_config = true")
|
||||
testAgent_RegisterService_TranslateKeys(t, "enable_central_service_config = true")
|
||||
})
|
||||
}
|
||||
|
||||
@ -3316,6 +3316,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
|
||||
// there worked by inspecting the registered sidecar below.
|
||||
SidecarService: nil,
|
||||
},
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
}
|
||||
|
||||
got := a.State.Service(structs.NewServiceID("test", nil))
|
||||
@ -3329,6 +3330,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
|
||||
"some": "meta",
|
||||
"enable_tag_override": "sidecar_service.meta is 'opaque' so should not get translated",
|
||||
},
|
||||
TaggedAddresses: map[string]structs.ServiceAddress{},
|
||||
Port: 8001,
|
||||
EnableTagOverride: true,
|
||||
Weights: &structs.Weights{Passing: 1, Warning: 1},
|
||||
@ -3351,6 +3353,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
|
||||
},
|
||||
},
|
||||
},
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
}
|
||||
gotSidecar := a.State.Service(structs.NewServiceID("test-sidecar-proxy", nil))
|
||||
hasNoCorrectTCPCheck := true
|
||||
@ -6137,13 +6140,6 @@ func requireLeafValidUnderCA(t *testing.T, issued *structs.IssuedCert, ca *struc
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func makeTelemetryDefaults(targetID string) lib.TelemetryConfig {
|
||||
return lib.TelemetryConfig{
|
||||
FilterDefault: true,
|
||||
MetricsPrefix: "consul.proxy." + targetID,
|
||||
}
|
||||
}
|
||||
|
||||
func TestAgentConnectAuthorize_badBody(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
52
agent/cache-types/intention_upstreams.go
Normal file
52
agent/cache-types/intention_upstreams.go
Normal file
@ -0,0 +1,52 @@
|
||||
package cachetype
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
// Recommended name for registration.
|
||||
const IntentionUpstreamsName = "intention-upstreams"
|
||||
|
||||
// GatewayUpstreams supports fetching upstreams for a given gateway name.
|
||||
type IntentionUpstreams struct {
|
||||
RegisterOptionsBlockingRefresh
|
||||
RPC RPC
|
||||
}
|
||||
|
||||
func (i *IntentionUpstreams) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) {
|
||||
var result cache.FetchResult
|
||||
|
||||
// The request should be a ServiceSpecificRequest.
|
||||
reqReal, ok := req.(*structs.ServiceSpecificRequest)
|
||||
if !ok {
|
||||
return result, fmt.Errorf(
|
||||
"Internal cache failure: request wrong type: %T", req)
|
||||
}
|
||||
|
||||
// Lightweight copy this object so that manipulating QueryOptions doesn't race.
|
||||
dup := *reqReal
|
||||
reqReal = &dup
|
||||
|
||||
// Set the minimum query index to our current index so we block
|
||||
reqReal.QueryOptions.MinQueryIndex = opts.MinIndex
|
||||
reqReal.QueryOptions.MaxQueryTime = opts.Timeout
|
||||
|
||||
// Always allow stale - there's no point in hitting leader if the request is
|
||||
// going to be served from cache and end up arbitrarily stale anyway. This
|
||||
// allows cached service-discover to automatically read scale across all
|
||||
// servers too.
|
||||
reqReal.AllowStale = true
|
||||
|
||||
// Fetch
|
||||
var reply structs.IndexedServiceList
|
||||
if err := i.RPC.RPC("Internal.IntentionUpstreams", reqReal, &reply); err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
result.Value = &reply
|
||||
result.Index = reply.QueryMeta.Index
|
||||
return result, nil
|
||||
}
|
52
agent/cache-types/intention_upstreams_test.go
Normal file
52
agent/cache-types/intention_upstreams_test.go
Normal file
@ -0,0 +1,52 @@
|
||||
package cachetype
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIntentionUpstreams(t *testing.T) {
|
||||
rpc := TestRPC(t)
|
||||
typ := &IntentionUpstreams{RPC: rpc}
|
||||
|
||||
// Expect the proper RPC call. This also sets the expected value
|
||||
// since that is return-by-pointer in the arguments.
|
||||
var resp *structs.IndexedServiceList
|
||||
rpc.On("RPC", "Internal.IntentionUpstreams", mock.Anything, mock.Anything).Return(nil).
|
||||
Run(func(args mock.Arguments) {
|
||||
req := args.Get(1).(*structs.ServiceSpecificRequest)
|
||||
require.Equal(t, uint64(24), req.QueryOptions.MinQueryIndex)
|
||||
require.Equal(t, 1*time.Second, req.QueryOptions.MaxQueryTime)
|
||||
require.True(t, req.AllowStale)
|
||||
require.Equal(t, "foo", req.ServiceName)
|
||||
|
||||
services := structs.ServiceList{
|
||||
{Name: "foo"},
|
||||
}
|
||||
reply := args.Get(2).(*structs.IndexedServiceList)
|
||||
reply.Services = services
|
||||
reply.QueryMeta.Index = 48
|
||||
resp = reply
|
||||
})
|
||||
|
||||
// Fetch
|
||||
resultA, err := typ.Fetch(cache.FetchOptions{
|
||||
MinIndex: 24,
|
||||
Timeout: 1 * time.Second,
|
||||
}, &structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "foo",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cache.FetchResult{
|
||||
Value: resp,
|
||||
Index: 48,
|
||||
}, resultA)
|
||||
|
||||
rpc.AssertExpectations(t)
|
||||
}
|
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-bexpr"
|
||||
@ -74,7 +75,7 @@ func (c *StreamingHealthServices) Fetch(opts cache.FetchOptions, req cache.Reque
|
||||
Token: srvReq.Token,
|
||||
Datacenter: srvReq.Datacenter,
|
||||
Index: index,
|
||||
Namespace: srvReq.EnterpriseMeta.GetNamespace(),
|
||||
Namespace: srvReq.EnterpriseMeta.NamespaceOrEmpty(),
|
||||
}
|
||||
if srvReq.Connect {
|
||||
req.Topic = pbsubscribe.Topic_ServiceHealthConnect
|
||||
@ -82,7 +83,7 @@ func (c *StreamingHealthServices) Fetch(opts cache.FetchOptions, req cache.Reque
|
||||
return req
|
||||
}
|
||||
|
||||
materializer, err := newMaterializer(c.deps, newReqFn, srvReq.Filter)
|
||||
materializer, err := newMaterializer(c.deps, newReqFn, srvReq)
|
||||
if err != nil {
|
||||
return cache.FetchResult{}, err
|
||||
}
|
||||
@ -100,9 +101,9 @@ func (c *StreamingHealthServices) Fetch(opts cache.FetchOptions, req cache.Reque
|
||||
func newMaterializer(
|
||||
deps MaterializerDeps,
|
||||
newRequestFn func(uint64) pbsubscribe.SubscribeRequest,
|
||||
filter string,
|
||||
req *structs.ServiceSpecificRequest,
|
||||
) (*submatview.Materializer, error) {
|
||||
view, err := newHealthView(filter)
|
||||
view, err := newHealthView(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -139,8 +140,8 @@ func (s *streamingHealthState) Fetch(opts cache.FetchOptions) (cache.FetchResult
|
||||
return result, err
|
||||
}
|
||||
|
||||
func newHealthView(filterExpr string) (*healthView, error) {
|
||||
fe, err := newFilterEvaluator(filterExpr)
|
||||
func newHealthView(req *structs.ServiceSpecificRequest) (*healthView, error) {
|
||||
fe, err := newFilterEvaluator(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -192,11 +193,44 @@ type filterEvaluator interface {
|
||||
Evaluate(datum interface{}) (bool, error)
|
||||
}
|
||||
|
||||
func newFilterEvaluator(expr string) (filterEvaluator, error) {
|
||||
if expr == "" {
|
||||
return noopFilterEvaluator{}, nil
|
||||
func newFilterEvaluator(req *structs.ServiceSpecificRequest) (filterEvaluator, error) {
|
||||
var evaluators []filterEvaluator
|
||||
|
||||
typ := reflect.TypeOf(structs.CheckServiceNode{})
|
||||
if req.Filter != "" {
|
||||
e, err := bexpr.CreateEvaluatorForType(req.Filter, nil, typ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
evaluators = append(evaluators, e)
|
||||
}
|
||||
|
||||
if req.ServiceTag != "" {
|
||||
// Handle backwards compat with old field
|
||||
req.ServiceTags = []string{req.ServiceTag}
|
||||
}
|
||||
|
||||
if req.TagFilter && len(req.ServiceTags) > 0 {
|
||||
evaluators = append(evaluators, serviceTagEvaluator{tags: req.ServiceTags})
|
||||
}
|
||||
|
||||
for key, value := range req.NodeMetaFilters {
|
||||
expr := fmt.Sprintf(`"%s" in Node.Meta.%s`, value, key)
|
||||
e, err := bexpr.CreateEvaluatorForType(expr, nil, typ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
evaluators = append(evaluators, e)
|
||||
}
|
||||
|
||||
switch len(evaluators) {
|
||||
case 0:
|
||||
return noopFilterEvaluator{}, nil
|
||||
case 1:
|
||||
return evaluators[0], nil
|
||||
default:
|
||||
return &multiFilterEvaluator{evaluators: evaluators}, nil
|
||||
}
|
||||
return bexpr.CreateEvaluatorForType(expr, nil, reflect.TypeOf(structs.CheckServiceNode{}))
|
||||
}
|
||||
|
||||
// noopFilterEvaluator may be used in place of a bexpr.Evaluator. The Evaluate
|
||||
@ -207,6 +241,20 @@ func (noopFilterEvaluator) Evaluate(_ interface{}) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type multiFilterEvaluator struct {
|
||||
evaluators []filterEvaluator
|
||||
}
|
||||
|
||||
func (m multiFilterEvaluator) Evaluate(data interface{}) (bool, error) {
|
||||
for _, e := range m.evaluators {
|
||||
match, err := e.Evaluate(data)
|
||||
if !match || err != nil {
|
||||
return match, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// sortCheckServiceNodes sorts the results to match memdb semantics
|
||||
// Sort results by Node.Node, if 2 instances match, order by Service.ID
|
||||
// Will allow result to be stable sorted and match queries without cache
|
||||
@ -240,3 +288,34 @@ func (s *healthView) Result(index uint64) (interface{}, error) {
|
||||
func (s *healthView) Reset() {
|
||||
s.state = make(map[string]structs.CheckServiceNode)
|
||||
}
|
||||
|
||||
// serviceTagEvaluator implements the filterEvaluator to perform filtering
|
||||
// by service tags. bexpr can not be used at this time, because the filtering
|
||||
// must be case insensitive for backwards compatibility. In the future this
|
||||
// may be replaced with bexpr once case insensitive support is added.
|
||||
type serviceTagEvaluator struct {
|
||||
tags []string
|
||||
}
|
||||
|
||||
func (m serviceTagEvaluator) Evaluate(data interface{}) (bool, error) {
|
||||
csn, ok := data.(structs.CheckServiceNode)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("unexpected type %T for structs.CheckServiceNode filter", data)
|
||||
}
|
||||
for _, tag := range m.tags {
|
||||
if !serviceHasTag(csn.Service, tag) {
|
||||
// If any one of the expected tags was not found, filter the service
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func serviceHasTag(sn *structs.NodeService, tag string) bool {
|
||||
for _, t := range sn.Tags {
|
||||
if strings.EqualFold(t, tag) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -229,7 +229,7 @@ func requireResultsSame(t *testing.T, want, got *structs.IndexedCheckServiceNode
|
||||
// without duplicating the tests.
|
||||
func getNamespace(ns string) string {
|
||||
meta := structs.NewEnterpriseMeta(ns)
|
||||
return meta.GetNamespace()
|
||||
return meta.NamespaceOrEmpty()
|
||||
}
|
||||
|
||||
func TestOrderingConsistentWithMemDb(t *testing.T) {
|
||||
@ -568,3 +568,211 @@ func runStep(t *testing.T, name string, fn func(t *testing.T)) {
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFilterEvaluator(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
req structs.ServiceSpecificRequest
|
||||
data structs.CheckServiceNode
|
||||
expected bool
|
||||
}
|
||||
|
||||
fn := func(t *testing.T, tc testCase) {
|
||||
e, err := newFilterEvaluator(&tc.req)
|
||||
require.NoError(t, err)
|
||||
actual, err := e.Evaluate(tc.data)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
}
|
||||
|
||||
var testCases = []testCase{
|
||||
{
|
||||
name: "single ServiceTags match",
|
||||
req: structs.ServiceSpecificRequest{
|
||||
ServiceTags: []string{"match"},
|
||||
TagFilter: true,
|
||||
},
|
||||
data: structs.CheckServiceNode{
|
||||
Service: &structs.NodeService{
|
||||
Tags: []string{"extra", "match"},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "single deprecated ServiceTag match",
|
||||
req: structs.ServiceSpecificRequest{
|
||||
ServiceTag: "match",
|
||||
TagFilter: true,
|
||||
},
|
||||
data: structs.CheckServiceNode{
|
||||
Service: &structs.NodeService{
|
||||
Tags: []string{"extra", "match"},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "single ServiceTags mismatch",
|
||||
req: structs.ServiceSpecificRequest{
|
||||
ServiceTags: []string{"other"},
|
||||
TagFilter: true,
|
||||
},
|
||||
data: structs.CheckServiceNode{
|
||||
Service: &structs.NodeService{
|
||||
Tags: []string{"extra", "match"},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "multiple ServiceTags match",
|
||||
req: structs.ServiceSpecificRequest{
|
||||
ServiceTags: []string{"match", "second"},
|
||||
TagFilter: true,
|
||||
},
|
||||
data: structs.CheckServiceNode{
|
||||
Service: &structs.NodeService{
|
||||
Tags: []string{"extra", "match", "second"},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "multiple ServiceTags mismatch",
|
||||
req: structs.ServiceSpecificRequest{
|
||||
ServiceTags: []string{"match", "not"},
|
||||
TagFilter: true,
|
||||
},
|
||||
data: structs.CheckServiceNode{
|
||||
Service: &structs.NodeService{
|
||||
Tags: []string{"extra", "match"},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "single NodeMetaFilter match",
|
||||
req: structs.ServiceSpecificRequest{
|
||||
NodeMetaFilters: map[string]string{"meta1": "match"},
|
||||
},
|
||||
data: structs.CheckServiceNode{
|
||||
Node: &structs.Node{
|
||||
Meta: map[string]string{
|
||||
"meta1": "match",
|
||||
"extra": "some",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "single NodeMetaFilter mismatch",
|
||||
req: structs.ServiceSpecificRequest{
|
||||
NodeMetaFilters: map[string]string{
|
||||
"meta1": "match",
|
||||
},
|
||||
},
|
||||
data: structs.CheckServiceNode{
|
||||
Node: &structs.Node{
|
||||
Meta: map[string]string{
|
||||
"meta1": "other",
|
||||
"extra": "some",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "multiple NodeMetaFilter match",
|
||||
req: structs.ServiceSpecificRequest{
|
||||
NodeMetaFilters: map[string]string{"meta1": "match", "meta2": "a"},
|
||||
},
|
||||
data: structs.CheckServiceNode{
|
||||
Node: &structs.Node{
|
||||
Meta: map[string]string{
|
||||
"meta1": "match",
|
||||
"meta2": "a",
|
||||
"extra": "some",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "multiple NodeMetaFilter mismatch",
|
||||
req: structs.ServiceSpecificRequest{
|
||||
NodeMetaFilters: map[string]string{
|
||||
"meta1": "match",
|
||||
"meta2": "beta",
|
||||
},
|
||||
},
|
||||
data: structs.CheckServiceNode{
|
||||
Node: &structs.Node{
|
||||
Meta: map[string]string{
|
||||
"meta1": "other",
|
||||
"meta2": "gamma",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "QueryOptions.Filter match",
|
||||
req: structs.ServiceSpecificRequest{
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Filter: `Node.Node == "node3"`,
|
||||
},
|
||||
},
|
||||
data: structs.CheckServiceNode{
|
||||
Node: &structs.Node{Node: "node3"},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "QueryOptions.Filter mismatch",
|
||||
req: structs.ServiceSpecificRequest{
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Filter: `Node.Node == "node2"`,
|
||||
},
|
||||
},
|
||||
data: structs.CheckServiceNode{
|
||||
Node: &structs.Node{Node: "node3"},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "all match",
|
||||
req: structs.ServiceSpecificRequest{
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Filter: `Node.Node == "node3"`,
|
||||
},
|
||||
ServiceTags: []string{"tag1", "tag2"},
|
||||
NodeMetaFilters: map[string]string{
|
||||
"meta1": "match1",
|
||||
"meta2": "match2",
|
||||
},
|
||||
},
|
||||
data: structs.CheckServiceNode{
|
||||
Node: &structs.Node{
|
||||
Node: "node3",
|
||||
Meta: map[string]string{
|
||||
"meta1": "match1",
|
||||
"meta2": "match2",
|
||||
"extra": "other",
|
||||
},
|
||||
},
|
||||
Service: &structs.NodeService{
|
||||
Tags: []string{"tag1", "tag2", "extra"},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
fn(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -822,6 +822,8 @@ func (b *builder) Build() (rt RuntimeConfig, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
serverMode := boolVal(c.ServerMode)
|
||||
|
||||
// ----------------------------------------------------------------
|
||||
// build runtime config
|
||||
//
|
||||
@ -1050,7 +1052,7 @@ func (b *builder) Build() (rt RuntimeConfig, err error) {
|
||||
RPCMaxConnsPerClient: intVal(c.Limits.RPCMaxConnsPerClient),
|
||||
RPCProtocol: intVal(c.RPCProtocol),
|
||||
RPCRateLimit: rate.Limit(float64Val(c.Limits.RPCRate)),
|
||||
RPCConfig: consul.RPCConfig{EnableStreaming: boolVal(c.RPC.EnableStreaming)},
|
||||
RPCConfig: consul.RPCConfig{EnableStreaming: boolValWithDefault(c.RPC.EnableStreaming, serverMode)},
|
||||
RaftProtocol: intVal(c.RaftProtocol),
|
||||
RaftSnapshotThreshold: intVal(c.RaftSnapshotThreshold),
|
||||
RaftSnapshotInterval: b.durationVal("raft_snapshot_interval", c.RaftSnapshotInterval),
|
||||
@ -1074,7 +1076,7 @@ func (b *builder) Build() (rt RuntimeConfig, err error) {
|
||||
SerfBindAddrWAN: serfBindAddrWAN,
|
||||
SerfPortLAN: serfPortLAN,
|
||||
SerfPortWAN: serfPortWAN,
|
||||
ServerMode: boolVal(c.ServerMode),
|
||||
ServerMode: serverMode,
|
||||
ServerName: stringVal(c.ServerName),
|
||||
ServerPort: serverPort,
|
||||
Services: services,
|
||||
@ -1569,6 +1571,7 @@ func (b *builder) checkVal(v *CheckDefinition) *structs.CheckDefinition {
|
||||
Shell: stringVal(v.Shell),
|
||||
GRPC: stringVal(v.GRPC),
|
||||
GRPCUseTLS: boolVal(v.GRPCUseTLS),
|
||||
TLSServerName: stringVal(v.TLSServerName),
|
||||
TLSSkipVerify: boolVal(v.TLSSkipVerify),
|
||||
AliasNode: stringVal(v.AliasNode),
|
||||
AliasService: stringVal(v.AliasService),
|
||||
@ -1687,6 +1690,7 @@ func (b *builder) serviceProxyVal(v *ServiceProxy) *structs.ConnectProxyConfig {
|
||||
Upstreams: b.upstreamsVal(v.Upstreams),
|
||||
MeshGateway: b.meshGatewayConfVal(v.MeshGateway),
|
||||
Expose: b.exposeConfVal(v.Expose),
|
||||
TransparentProxy: boolVal(v.TransparentProxy),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -137,7 +137,7 @@ type Config struct {
|
||||
AdvertiseAddrLANIPv6 *string `mapstructure:"advertise_addr_ipv6"`
|
||||
AdvertiseAddrWAN *string `mapstructure:"advertise_addr_wan"`
|
||||
AdvertiseAddrWANIPv4 *string `mapstructure:"advertise_addr_wan_ipv4"`
|
||||
AdvertiseAddrWANIPv6 *string `mapstructure:"advertise_addr_ipv6"`
|
||||
AdvertiseAddrWANIPv6 *string `mapstructure:"advertise_addr_wan_ipv6"`
|
||||
AdvertiseReconnectTimeout *string `mapstructure:"advertise_reconnect_timeout"`
|
||||
AutoConfig AutoConfigRaw `mapstructure:"auto_config"`
|
||||
Autopilot Autopilot `mapstructure:"autopilot"`
|
||||
@ -405,6 +405,7 @@ type CheckDefinition struct {
|
||||
Shell *string `mapstructure:"shell"`
|
||||
GRPC *string `mapstructure:"grpc"`
|
||||
GRPCUseTLS *bool `mapstructure:"grpc_use_tls"`
|
||||
TLSServerName *string `mapstructure:"tls_server_name"`
|
||||
TLSSkipVerify *bool `mapstructure:"tls_skip_verify" alias:"tlsskipverify"`
|
||||
AliasNode *string `mapstructure:"alias_node"`
|
||||
AliasService *string `mapstructure:"alias_service"`
|
||||
@ -472,6 +473,10 @@ type ServiceProxy struct {
|
||||
|
||||
// Expose defines whether checks or paths are exposed through the proxy
|
||||
Expose *ExposeConfig `mapstructure:"expose"`
|
||||
|
||||
// TransparentProxy toggles whether inbound and outbound traffic is being
|
||||
// redirected to the proxy.
|
||||
TransparentProxy *bool `mapstructure:"transparent_proxy"`
|
||||
}
|
||||
|
||||
// Upstream represents a single upstream dependency for a service or proxy. It
|
||||
|
@ -179,6 +179,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.DataDir = dataDir
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
expectedWarnings: []string{"bootstrap = true: do not enable unless necessary"},
|
||||
})
|
||||
@ -195,6 +196,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.DataDir = dataDir
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
expectedWarnings: []string{"bootstrap_expect > 0: expecting 3 servers"},
|
||||
})
|
||||
@ -342,6 +344,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
rt.ConsulServerHealthInterval = 10 * time.Millisecond
|
||||
rt.GRPCPort = 8502
|
||||
rt.GRPCAddrs = []net.Addr{tcpAddr("127.0.0.1:8502")}
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
@ -663,6 +666,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
rt.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
@ -845,6 +849,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.DataDir = dataDir
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
@ -1851,6 +1856,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
rt.ServerMode = true
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.DataDir = dataDir
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
expectedWarnings: []string{"BootstrapExpect is set to 1; this is the same as Bootstrap mode.", "bootstrap = true: do not enable unless necessary"},
|
||||
})
|
||||
@ -1867,6 +1873,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
rt.ServerMode = true
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.DataDir = dataDir
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
expectedWarnings: []string{
|
||||
`bootstrap_expect = 2: A cluster with 2 servers will provide no failure tolerance. See https://www.consul.io/docs/internals/consensus.html#deployment-table`,
|
||||
@ -1886,6 +1893,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
rt.ServerMode = true
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.DataDir = dataDir
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
expectedWarnings: []string{
|
||||
`bootstrap_expect is even number: A cluster with an even number of servers does not achieve optimum fault tolerance. See https://www.consul.io/docs/internals/consensus.html#deployment-table`,
|
||||
@ -2574,6 +2582,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
}
|
||||
]
|
||||
},
|
||||
"transparent_proxy": true,
|
||||
"upstreams": [
|
||||
{
|
||||
"destination_name": "db",
|
||||
@ -2609,7 +2618,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
protocol = "http"
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
transparent_proxy = true
|
||||
upstreams = [
|
||||
{
|
||||
destination_name = "db"
|
||||
@ -2649,6 +2659,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
TransparentProxy: true,
|
||||
Upstreams: structs.Upstreams{
|
||||
structs.Upstream{
|
||||
DestinationType: "service",
|
||||
@ -2703,6 +2714,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
}
|
||||
]
|
||||
},
|
||||
"transparent_proxy": true,
|
||||
"upstreams": [
|
||||
{
|
||||
"destination_name": "db",
|
||||
@ -2738,7 +2750,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
protocol = "http"
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
transparent_proxy = true,
|
||||
upstreams = [
|
||||
{
|
||||
destination_name = "db"
|
||||
@ -2778,6 +2791,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
TransparentProxy: true,
|
||||
Upstreams: structs.Upstreams{
|
||||
structs.Upstream{
|
||||
DestinationType: "service",
|
||||
@ -2843,6 +2857,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
rt.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
@ -2870,6 +2885,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
rt.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
@ -2897,6 +2913,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
rt.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
@ -2921,6 +2938,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
rt.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
@ -2949,10 +2967,12 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
},
|
||||
json: []string{`{
|
||||
"use_streaming_backend": true,
|
||||
"rpc": {"enable_streaming": false},
|
||||
"server": true
|
||||
}`},
|
||||
hcl: []string{`
|
||||
use_streaming_backend = true
|
||||
rpc { enable_streaming = false }
|
||||
server = true
|
||||
`},
|
||||
expectedWarnings: []string{"use_streaming_backend = true requires rpc.enable_streaming on servers to work properly"},
|
||||
@ -3322,6 +3342,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
rt.ServerMode = true
|
||||
rt.LeaveOnTerm = false
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
})
|
||||
|
||||
@ -3413,7 +3434,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
},
|
||||
"mesh_gateway": {
|
||||
"mode": "remote"
|
||||
}
|
||||
},
|
||||
"transparent_proxy": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -3432,6 +3454,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
mesh_gateway {
|
||||
mode = "remote"
|
||||
}
|
||||
transparent_proxy = true
|
||||
}
|
||||
}`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
@ -3450,6 +3473,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
MeshGateway: structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
},
|
||||
TransparentProxy: true,
|
||||
},
|
||||
}
|
||||
},
|
||||
@ -3471,7 +3495,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
},
|
||||
"MeshGateway": {
|
||||
"Mode": "remote"
|
||||
}
|
||||
},
|
||||
"TransparentProxy": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -3490,6 +3515,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
MeshGateway {
|
||||
Mode = "remote"
|
||||
}
|
||||
TransparentProxy = true
|
||||
}
|
||||
}`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
@ -3508,6 +3534,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
MeshGateway: structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
},
|
||||
TransparentProxy: true,
|
||||
},
|
||||
}
|
||||
},
|
||||
@ -3529,7 +3556,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
"external_sni": "abc-123",
|
||||
"mesh_gateway": {
|
||||
"mode": "remote"
|
||||
}
|
||||
},
|
||||
"transparent_proxy": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -3548,6 +3576,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
mesh_gateway {
|
||||
mode = "remote"
|
||||
}
|
||||
transparent_proxy = true
|
||||
}
|
||||
}`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
@ -3566,6 +3595,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
MeshGateway: structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
},
|
||||
TransparentProxy: true,
|
||||
},
|
||||
}
|
||||
},
|
||||
@ -3587,7 +3617,8 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
"ExternalSNI": "abc-123",
|
||||
"MeshGateway": {
|
||||
"Mode": "remote"
|
||||
}
|
||||
},
|
||||
"TransparentProxy": true
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -3606,6 +3637,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
MeshGateway {
|
||||
Mode = "remote"
|
||||
}
|
||||
TransparentProxy = true
|
||||
}
|
||||
}`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
@ -3624,6 +3656,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
MeshGateway: structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeRemote,
|
||||
},
|
||||
TransparentProxy: true,
|
||||
},
|
||||
}
|
||||
},
|
||||
@ -4497,6 +4530,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
||||
rt.ServerMode = true
|
||||
rt.SkipLeaveOnInt = true
|
||||
rt.CertFile = "foo"
|
||||
rt.RPCConfig.EnableStreaming = true
|
||||
},
|
||||
})
|
||||
// UI Config tests
|
||||
@ -5065,6 +5099,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
||||
OutputMaxSize: checks.DefaultBufSize,
|
||||
DockerContainerID: "ipgdFtjd",
|
||||
Shell: "qAeOYy0M",
|
||||
TLSServerName: "bdeb5f6a",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 1813 * time.Second,
|
||||
TTL: 21743 * time.Second,
|
||||
@ -5090,6 +5125,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
||||
Interval: 28767 * time.Second,
|
||||
DockerContainerID: "THW6u7rL",
|
||||
Shell: "C1Zt3Zwh",
|
||||
TLSServerName: "6adc3bfb",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 18506 * time.Second,
|
||||
TTL: 31006 * time.Second,
|
||||
@ -5115,6 +5151,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
||||
Interval: 18714 * time.Second,
|
||||
DockerContainerID: "qF66POS9",
|
||||
Shell: "sOnDy228",
|
||||
TLSServerName: "7BdnzBYk",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 5954 * time.Second,
|
||||
TTL: 30044 * time.Second,
|
||||
@ -5320,6 +5357,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
||||
Interval: 24392 * time.Second,
|
||||
DockerContainerID: "ZKXr68Yb",
|
||||
Shell: "CEfzx0Fo",
|
||||
TLSServerName: "4f191d4F",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 38333 * time.Second,
|
||||
TTL: 57201 * time.Second,
|
||||
@ -5370,6 +5408,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
||||
Interval: 32718 * time.Second,
|
||||
DockerContainerID: "cU15LMet",
|
||||
Shell: "nEz9qz2l",
|
||||
TLSServerName: "f43ouY7a",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 34738 * time.Second,
|
||||
TTL: 22773 * time.Second,
|
||||
@ -5393,6 +5432,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
||||
Interval: 5656 * time.Second,
|
||||
DockerContainerID: "5tDBWpfA",
|
||||
Shell: "rlTpLM8s",
|
||||
TLSServerName: "sOv5WTtp",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 4868 * time.Second,
|
||||
TTL: 11222 * time.Second,
|
||||
@ -5442,6 +5482,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
TransparentProxy: true,
|
||||
},
|
||||
Weights: &structs.Weights{
|
||||
Passing: 1,
|
||||
@ -5509,6 +5550,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
||||
Interval: 22224 * time.Second,
|
||||
DockerContainerID: "ipgdFtjd",
|
||||
Shell: "omVZq7Sz",
|
||||
TLSServerName: "axw5QPL5",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 18913 * time.Second,
|
||||
TTL: 44743 * time.Second,
|
||||
@ -5532,6 +5574,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
||||
Interval: 12356 * time.Second,
|
||||
DockerContainerID: "HBndBU6R",
|
||||
Shell: "hVI33JjA",
|
||||
TLSServerName: "7uwWOnUS",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 38282 * time.Second,
|
||||
TTL: 1181 * time.Second,
|
||||
@ -5555,6 +5598,7 @@ func TestLoad_FullConfig(t *testing.T) {
|
||||
Interval: 23926 * time.Second,
|
||||
DockerContainerID: "dO5TtRHk",
|
||||
Shell: "e6q2ttES",
|
||||
TLSServerName: "ECSHk8WF",
|
||||
TLSSkipVerify: true,
|
||||
Timeout: 38483 * time.Second,
|
||||
TTL: 10943 * time.Second,
|
||||
|
@ -107,6 +107,7 @@
|
||||
"Status": "",
|
||||
"SuccessBeforePassing": 0,
|
||||
"TCP": "",
|
||||
"TLSServerName": "",
|
||||
"TLSSkipVerify": false,
|
||||
"TTL": "0s",
|
||||
"Timeout": "0s",
|
||||
@ -307,6 +308,7 @@
|
||||
"Status": "",
|
||||
"SuccessBeforePassing": 0,
|
||||
"TCP": "",
|
||||
"TLSServerName": "",
|
||||
"TLSSkipVerify": false,
|
||||
"TTL": "0s",
|
||||
"Timeout": "0s"
|
||||
|
10
agent/config/testdata/full-config.hcl
vendored
10
agent/config/testdata/full-config.hcl
vendored
@ -113,6 +113,7 @@ check = {
|
||||
output_max_size = 4096
|
||||
docker_container_id = "qF66POS9"
|
||||
shell = "sOnDy228"
|
||||
tls_server_name = "7BdnzBYk"
|
||||
tls_skip_verify = true
|
||||
timeout = "5954s"
|
||||
ttl = "30044s"
|
||||
@ -139,6 +140,7 @@ checks = [
|
||||
output_max_size = 4096
|
||||
docker_container_id = "ipgdFtjd"
|
||||
shell = "qAeOYy0M"
|
||||
tls_server_name = "bdeb5f6a"
|
||||
tls_skip_verify = true
|
||||
timeout = "1813s"
|
||||
ttl = "21743s"
|
||||
@ -164,6 +166,7 @@ checks = [
|
||||
output_max_size = 4096
|
||||
docker_container_id = "THW6u7rL"
|
||||
shell = "C1Zt3Zwh"
|
||||
tls_server_name = "6adc3bfb"
|
||||
tls_skip_verify = true
|
||||
timeout = "18506s"
|
||||
ttl = "31006s"
|
||||
@ -378,6 +381,7 @@ service = {
|
||||
interval = "23926s"
|
||||
docker_container_id = "dO5TtRHk"
|
||||
shell = "e6q2ttES"
|
||||
tls_server_name = "ECSHk8WF"
|
||||
tls_skip_verify = true
|
||||
timeout = "38483s"
|
||||
ttl = "10943s"
|
||||
@ -402,6 +406,7 @@ service = {
|
||||
output_max_size = 4096
|
||||
docker_container_id = "ipgdFtjd"
|
||||
shell = "omVZq7Sz"
|
||||
tls_server_name = "axw5QPL5"
|
||||
tls_skip_verify = true
|
||||
timeout = "18913s"
|
||||
ttl = "44743s"
|
||||
@ -425,6 +430,7 @@ service = {
|
||||
output_max_size = 4096
|
||||
docker_container_id = "HBndBU6R"
|
||||
shell = "hVI33JjA"
|
||||
tls_server_name = "7uwWOnUS"
|
||||
tls_skip_verify = true
|
||||
timeout = "38282s"
|
||||
ttl = "1181s"
|
||||
@ -462,6 +468,7 @@ services = [
|
||||
output_max_size = 4096
|
||||
docker_container_id = "ZKXr68Yb"
|
||||
shell = "CEfzx0Fo"
|
||||
tls_server_name = "4f191d4F"
|
||||
tls_skip_verify = true
|
||||
timeout = "38333s"
|
||||
ttl = "57201s"
|
||||
@ -502,6 +509,7 @@ services = [
|
||||
output_max_size = 4096
|
||||
docker_container_id = "cU15LMet"
|
||||
shell = "nEz9qz2l"
|
||||
tls_server_name = "f43ouY7a"
|
||||
tls_skip_verify = true
|
||||
timeout = "34738s"
|
||||
ttl = "22773s"
|
||||
@ -525,6 +533,7 @@ services = [
|
||||
output_max_size = 4096
|
||||
docker_container_id = "5tDBWpfA"
|
||||
shell = "rlTpLM8s"
|
||||
tls_server_name = "sOv5WTtp"
|
||||
tls_skip_verify = true
|
||||
timeout = "4868s"
|
||||
ttl = "11222s"
|
||||
@ -573,6 +582,7 @@ services = [
|
||||
}
|
||||
]
|
||||
}
|
||||
transparent_proxy = true
|
||||
}
|
||||
},
|
||||
{
|
||||
|
10
agent/config/testdata/full-config.json
vendored
10
agent/config/testdata/full-config.json
vendored
@ -114,6 +114,7 @@
|
||||
"interval": "18714s",
|
||||
"docker_container_id": "qF66POS9",
|
||||
"shell": "sOnDy228",
|
||||
"tls_server_name": "7BdnzBYk",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "5954s",
|
||||
"ttl": "30044s",
|
||||
@ -140,6 +141,7 @@
|
||||
"output_max_size": 4096,
|
||||
"docker_container_id": "ipgdFtjd",
|
||||
"shell": "qAeOYy0M",
|
||||
"tls_server_name": "bdeb5f6a",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "1813s",
|
||||
"ttl": "21743s",
|
||||
@ -165,6 +167,7 @@
|
||||
"output_max_size": 4096,
|
||||
"docker_container_id": "THW6u7rL",
|
||||
"shell": "C1Zt3Zwh",
|
||||
"tls_server_name": "6adc3bfb",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "18506s",
|
||||
"ttl": "31006s",
|
||||
@ -375,6 +378,7 @@
|
||||
"output_max_size": 4096,
|
||||
"docker_container_id": "dO5TtRHk",
|
||||
"shell": "e6q2ttES",
|
||||
"tls_server_name": "ECSHk8WF",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "38483s",
|
||||
"ttl": "10943s",
|
||||
@ -399,6 +403,7 @@
|
||||
"output_max_size": 4096,
|
||||
"docker_container_id": "ipgdFtjd",
|
||||
"shell": "omVZq7Sz",
|
||||
"tls_server_name": "axw5QPL5",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "18913s",
|
||||
"ttl": "44743s",
|
||||
@ -422,6 +427,7 @@
|
||||
"output_max_size": 4096,
|
||||
"docker_container_id": "HBndBU6R",
|
||||
"shell": "hVI33JjA",
|
||||
"tls_server_name": "7uwWOnUS",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "38282s",
|
||||
"ttl": "1181s",
|
||||
@ -459,6 +465,7 @@
|
||||
"output_max_size": 4096,
|
||||
"docker_container_id": "ZKXr68Yb",
|
||||
"shell": "CEfzx0Fo",
|
||||
"tls_server_name": "4f191d4F",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "38333s",
|
||||
"ttl": "57201s",
|
||||
@ -499,6 +506,7 @@
|
||||
"output_max_size": 4096,
|
||||
"docker_container_id": "cU15LMet",
|
||||
"shell": "nEz9qz2l",
|
||||
"tls_server_name": "f43ouY7a",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "34738s",
|
||||
"ttl": "22773s",
|
||||
@ -522,6 +530,7 @@
|
||||
"output_max_size": 4096,
|
||||
"docker_container_id": "5tDBWpfA",
|
||||
"shell": "rlTpLM8s",
|
||||
"tls_server_name": "sOv5WTtp",
|
||||
"tls_skip_verify": true,
|
||||
"timeout": "4868s",
|
||||
"ttl": "11222s",
|
||||
@ -554,6 +563,7 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"transparent_proxy": true,
|
||||
"upstreams": [
|
||||
{
|
||||
"destination_name": "KPtAj2cb",
|
||||
|
49
agent/connect/authz.go
Normal file
49
agent/connect/authz.go
Normal file
@ -0,0 +1,49 @@
|
||||
package connect
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
// AuthorizeIntentionTarget determines whether the destination is covered by the given intention
|
||||
// and whether the intention action allows a connection.
|
||||
// This is a generalized version of the old CertURI.Authorize(), and can be evaluated against sources or destinations.
|
||||
//
|
||||
// The return value of `auth` is only valid if the second value `match` is true.
|
||||
// If `match` is false, then the intention doesn't match this target and any result should be ignored.
|
||||
func AuthorizeIntentionTarget(
|
||||
target, targetNS string,
|
||||
ixn *structs.Intention,
|
||||
matchType structs.IntentionMatchType,
|
||||
) (auth bool, match bool) {
|
||||
|
||||
switch matchType {
|
||||
case structs.IntentionMatchDestination:
|
||||
if ixn.DestinationNS != structs.WildcardSpecifier && ixn.DestinationNS != targetNS {
|
||||
// Non-matching namespace
|
||||
return false, false
|
||||
}
|
||||
|
||||
if ixn.DestinationName != structs.WildcardSpecifier && ixn.DestinationName != target {
|
||||
// Non-matching name
|
||||
return false, false
|
||||
}
|
||||
|
||||
case structs.IntentionMatchSource:
|
||||
if ixn.SourceNS != structs.WildcardSpecifier && ixn.SourceNS != targetNS {
|
||||
// Non-matching namespace
|
||||
return false, false
|
||||
}
|
||||
|
||||
if ixn.SourceName != structs.WildcardSpecifier && ixn.SourceName != target {
|
||||
// Non-matching name
|
||||
return false, false
|
||||
}
|
||||
|
||||
default:
|
||||
// Reject on any un-recognized match type
|
||||
return false, false
|
||||
}
|
||||
|
||||
// The name and namespace match, so the destination is covered
|
||||
return ixn.Action == structs.IntentionActionAllow, true
|
||||
}
|
196
agent/connect/authz_test.go
Normal file
196
agent/connect/authz_test.go
Normal file
@ -0,0 +1,196 @@
|
||||
package connect
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAuthorizeIntentionTarget(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
target string
|
||||
targetNS string
|
||||
ixn *structs.Intention
|
||||
matchType structs.IntentionMatchType
|
||||
auth bool
|
||||
match bool
|
||||
}{
|
||||
// Source match type
|
||||
{
|
||||
name: "match exact source, not matching namespace",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
SourceName: "db",
|
||||
SourceNS: "different",
|
||||
},
|
||||
matchType: structs.IntentionMatchSource,
|
||||
auth: false,
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
name: "match exact source, not matching name",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
SourceName: "db",
|
||||
SourceNS: structs.IntentionDefaultNamespace,
|
||||
},
|
||||
matchType: structs.IntentionMatchSource,
|
||||
auth: false,
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
name: "match exact source, allow",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
SourceName: "web",
|
||||
SourceNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
matchType: structs.IntentionMatchSource,
|
||||
auth: true,
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
name: "match exact source, deny",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
SourceName: "web",
|
||||
SourceNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionDeny,
|
||||
},
|
||||
matchType: structs.IntentionMatchSource,
|
||||
auth: false,
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
name: "match exact sourceNS for wildcard service, deny",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
SourceName: structs.WildcardSpecifier,
|
||||
SourceNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionDeny,
|
||||
},
|
||||
matchType: structs.IntentionMatchSource,
|
||||
auth: false,
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
name: "match exact sourceNS for wildcard service, allow",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
SourceName: structs.WildcardSpecifier,
|
||||
SourceNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
matchType: structs.IntentionMatchSource,
|
||||
auth: true,
|
||||
match: true,
|
||||
},
|
||||
|
||||
// Destination match type
|
||||
{
|
||||
name: "match exact destination, not matching namespace",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
DestinationName: "db",
|
||||
DestinationNS: "different",
|
||||
},
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
auth: false,
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
name: "match exact destination, not matching name",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
DestinationName: "db",
|
||||
DestinationNS: structs.IntentionDefaultNamespace,
|
||||
},
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
auth: false,
|
||||
match: false,
|
||||
},
|
||||
{
|
||||
name: "match exact destination, allow",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
DestinationName: "web",
|
||||
DestinationNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
auth: true,
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
name: "match exact destination, deny",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
DestinationName: "web",
|
||||
DestinationNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionDeny,
|
||||
},
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
auth: false,
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
name: "match exact destinationNS for wildcard service, deny",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
DestinationName: structs.WildcardSpecifier,
|
||||
DestinationNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionDeny,
|
||||
},
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
auth: false,
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
name: "match exact destinationNS for wildcard service, allow",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
DestinationName: structs.WildcardSpecifier,
|
||||
DestinationNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
auth: true,
|
||||
match: true,
|
||||
},
|
||||
{
|
||||
name: "unknown match type",
|
||||
target: "web",
|
||||
targetNS: structs.IntentionDefaultNamespace,
|
||||
ixn: &structs.Intention{
|
||||
DestinationName: structs.WildcardSpecifier,
|
||||
DestinationNS: structs.IntentionDefaultNamespace,
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
matchType: structs.IntentionMatchType("unknown"),
|
||||
auth: false,
|
||||
match: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
auth, match := AuthorizeIntentionTarget(tc.target, tc.targetNS, tc.ixn, tc.matchType)
|
||||
assert.Equal(t, tc.auth, auth)
|
||||
assert.Equal(t, tc.match, match)
|
||||
})
|
||||
}
|
||||
}
|
@ -5,8 +5,6 @@ import (
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
// CertURI represents a Connect-valid URI value for a TLS certificate.
|
||||
@ -17,13 +15,6 @@ import (
|
||||
// However, we anticipate that we may accept URIs that are also not SPIFFE
|
||||
// compliant and therefore the interface is named as such.
|
||||
type CertURI interface {
|
||||
// Authorize tests the authorization for this URI as a client
|
||||
// for the given intention. The return value `auth` is only valid if
|
||||
// the second value `match` is true. If the second value `match` is
|
||||
// false, then the intention doesn't match this client and any
|
||||
// result should be ignored.
|
||||
Authorize(*structs.Intention) (auth bool, match bool)
|
||||
|
||||
// URI is the valid URI value used in the cert.
|
||||
URI() *url.URL
|
||||
}
|
||||
|
@ -3,8 +3,6 @@ package connect
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
// SpiffeIDService is the structure to represent the SPIFFE ID for an agent.
|
||||
@ -23,11 +21,6 @@ func (id *SpiffeIDAgent) URI() *url.URL {
|
||||
return &result
|
||||
}
|
||||
|
||||
// CertURI impl.
|
||||
func (id *SpiffeIDAgent) Authorize(_ *structs.Intention) (bool, bool) {
|
||||
return false, false
|
||||
}
|
||||
|
||||
func (id *SpiffeIDAgent) CommonName() string {
|
||||
return AgentCN(id.Agent, id.Host)
|
||||
}
|
||||
|
@ -15,14 +15,3 @@ func TestSpiffeIDAgentURI(t *testing.T) {
|
||||
|
||||
require.Equal(t, "spiffe://1234.consul/agent/client/dc/dc1/id/123", agent.URI().String())
|
||||
}
|
||||
|
||||
func TestSpiffeIDAgentAuthorize(t *testing.T) {
|
||||
agent := &SpiffeIDAgent{
|
||||
Host: "1234.consul",
|
||||
Agent: "uuid",
|
||||
}
|
||||
|
||||
auth, match := agent.Authorize(nil)
|
||||
require.False(t, auth)
|
||||
require.False(t, match)
|
||||
}
|
||||
|
@ -3,8 +3,6 @@ package connect
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
// SpiffeIDService is the structure to represent the SPIFFE ID for a service.
|
||||
@ -25,22 +23,6 @@ func (id *SpiffeIDService) URI() *url.URL {
|
||||
return &result
|
||||
}
|
||||
|
||||
// CertURI impl.
|
||||
func (id *SpiffeIDService) Authorize(ixn *structs.Intention) (bool, bool) {
|
||||
if ixn.SourceNS != structs.WildcardSpecifier && ixn.SourceNS != id.Namespace {
|
||||
// Non-matching namespace
|
||||
return false, false
|
||||
}
|
||||
|
||||
if ixn.SourceName != structs.WildcardSpecifier && ixn.SourceName != id.Service {
|
||||
// Non-matching name
|
||||
return false, false
|
||||
}
|
||||
|
||||
// Match, return allow value
|
||||
return ixn.Action == structs.IntentionActionAllow, true
|
||||
}
|
||||
|
||||
func (id *SpiffeIDService) CommonName() string {
|
||||
return ServiceCN(id.Service, id.Namespace, id.Host)
|
||||
}
|
||||
|
@ -1,104 +0,0 @@
|
||||
package connect
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSpiffeIDServiceAuthorize(t *testing.T) {
|
||||
ns := structs.IntentionDefaultNamespace
|
||||
serviceWeb := &SpiffeIDService{
|
||||
Host: "1234.consul",
|
||||
Namespace: structs.IntentionDefaultNamespace,
|
||||
Datacenter: "dc01",
|
||||
Service: "web",
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
URI *SpiffeIDService
|
||||
Ixn *structs.Intention
|
||||
Auth bool
|
||||
Match bool
|
||||
}{
|
||||
{
|
||||
"exact source, not matching namespace",
|
||||
serviceWeb,
|
||||
&structs.Intention{
|
||||
SourceNS: "different",
|
||||
SourceName: "db",
|
||||
},
|
||||
false,
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
"exact source, not matching name",
|
||||
serviceWeb,
|
||||
&structs.Intention{
|
||||
SourceNS: ns,
|
||||
SourceName: "db",
|
||||
},
|
||||
false,
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
"exact source, allow",
|
||||
serviceWeb,
|
||||
&structs.Intention{
|
||||
SourceNS: serviceWeb.Namespace,
|
||||
SourceName: serviceWeb.Service,
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
true,
|
||||
true,
|
||||
},
|
||||
|
||||
{
|
||||
"exact source, deny",
|
||||
serviceWeb,
|
||||
&structs.Intention{
|
||||
SourceNS: serviceWeb.Namespace,
|
||||
SourceName: serviceWeb.Service,
|
||||
Action: structs.IntentionActionDeny,
|
||||
},
|
||||
false,
|
||||
true,
|
||||
},
|
||||
|
||||
{
|
||||
"exact namespace, wildcard service, deny",
|
||||
serviceWeb,
|
||||
&structs.Intention{
|
||||
SourceNS: serviceWeb.Namespace,
|
||||
SourceName: structs.WildcardSpecifier,
|
||||
Action: structs.IntentionActionDeny,
|
||||
},
|
||||
false,
|
||||
true,
|
||||
},
|
||||
|
||||
{
|
||||
"exact namespace, wildcard service, allow",
|
||||
serviceWeb,
|
||||
&structs.Intention{
|
||||
SourceNS: serviceWeb.Namespace,
|
||||
SourceName: structs.WildcardSpecifier,
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
true,
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
auth, match := tc.URI.Authorize(tc.Ixn)
|
||||
assert.Equal(t, tc.Auth, auth)
|
||||
assert.Equal(t, tc.Match, match)
|
||||
})
|
||||
}
|
||||
}
|
@ -28,12 +28,6 @@ func (id *SpiffeIDSigning) Host() string {
|
||||
return strings.ToLower(fmt.Sprintf("%s.%s", id.ClusterID, id.Domain))
|
||||
}
|
||||
|
||||
// CertURI impl.
|
||||
func (id *SpiffeIDSigning) Authorize(ixn *structs.Intention) (bool, bool) {
|
||||
// Never authorize as a client.
|
||||
return false, true
|
||||
}
|
||||
|
||||
// CanSign takes any CertURI and returns whether or not this signing entity is
|
||||
// allowed to sign CSRs for that entity (i.e. represents the trust domain for
|
||||
// that entity).
|
||||
|
@ -10,14 +10,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Signing ID should never authorize
|
||||
func TestSpiffeIDSigningAuthorize(t *testing.T) {
|
||||
var id SpiffeIDSigning
|
||||
auth, ok := id.Authorize(nil)
|
||||
assert.False(t, auth)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestSpiffeIDSigningForCluster(t *testing.T) {
|
||||
// For now it should just append .consul to the ID.
|
||||
config := &structs.CAConfiguration{
|
||||
@ -31,10 +23,6 @@ func TestSpiffeIDSigningForCluster(t *testing.T) {
|
||||
// about
|
||||
type fakeCertURI string
|
||||
|
||||
func (f fakeCertURI) Authorize(*structs.Intention) (auth bool, match bool) {
|
||||
return false, false
|
||||
}
|
||||
|
||||
func (f fakeCertURI) URI() *url.URL {
|
||||
u, _ := url.Parse(string(f))
|
||||
return u
|
||||
|
@ -3,7 +3,6 @@ package agent
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||
@ -105,7 +104,8 @@ func (a *Agent) ConnectAuthorize(token string,
|
||||
// Figure out which source matches this request.
|
||||
var ixnMatch *structs.Intention
|
||||
for _, ixn := range reply.Matches[0] {
|
||||
if _, ok := uriService.Authorize(ixn); ok {
|
||||
// We match on the intention source because the uriService is the source of the connection to authorize.
|
||||
if _, ok := connect.AuthorizeIntentionTarget(uriService.Service, uriService.Namespace, ixn, structs.IntentionMatchSource); ok {
|
||||
ixnMatch = ixn
|
||||
break
|
||||
}
|
||||
|
@ -397,7 +397,7 @@ func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.In
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
index, services, err := state.ServiceList(ws, &args.EnterpriseMeta)
|
||||
index, services, err := state.ServiceList(ws, nil, &args.EnterpriseMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -329,31 +329,21 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
reply.Reset()
|
||||
|
||||
reply.MeshGateway.Mode = structs.MeshGatewayModeDefault
|
||||
// Pass the WatchSet to both the service and proxy config lookups. If either is updated
|
||||
// during the blocking query, this function will be rerun and these state store lookups
|
||||
// will both be current.
|
||||
index, serviceEntry, err := state.ConfigEntry(ws, structs.ServiceDefaults, args.Name, &args.EnterpriseMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var serviceConf *structs.ServiceConfigEntry
|
||||
var ok bool
|
||||
if serviceEntry != nil {
|
||||
serviceConf, ok = serviceEntry.(*structs.ServiceConfigEntry)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid service config type %T", serviceEntry)
|
||||
}
|
||||
}
|
||||
|
||||
// Use the default enterprise meta to look up the global proxy defaults. In the future we may allow per-namespace proxy-defaults
|
||||
// but not yet.
|
||||
// TODO(freddy) Refactor this into smaller set of state store functions
|
||||
// Pass the WatchSet to both the service and proxy config lookups. If either is updated during the
|
||||
// blocking query, this function will be rerun and these state store lookups will both be current.
|
||||
// We use the default enterprise meta to look up the global proxy defaults because they are not namespaced.
|
||||
_, proxyEntry, err := state.ConfigEntry(ws, structs.ProxyDefaults, structs.ProxyConfigGlobal, structs.DefaultEnterpriseMeta())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var proxyConf *structs.ProxyConfigEntry
|
||||
|
||||
var (
|
||||
proxyConf *structs.ProxyConfigEntry
|
||||
proxyConfGlobalProtocol string
|
||||
ok bool
|
||||
)
|
||||
if proxyEntry != nil {
|
||||
proxyConf, ok = proxyEntry.(*structs.ProxyConfigEntry)
|
||||
if !ok {
|
||||
@ -367,11 +357,30 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
||||
reply.ProxyConfig = mapCopy.(map[string]interface{})
|
||||
reply.MeshGateway = proxyConf.MeshGateway
|
||||
reply.Expose = proxyConf.Expose
|
||||
reply.TransparentProxy = proxyConf.TransparentProxy
|
||||
|
||||
// Extract the global protocol from proxyConf for upstream configs.
|
||||
rawProtocol := proxyConf.Config["protocol"]
|
||||
if rawProtocol != nil {
|
||||
proxyConfGlobalProtocol, ok = rawProtocol.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid protocol type %T", rawProtocol)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
index, serviceEntry, err := state.ConfigEntry(ws, structs.ServiceDefaults, args.Name, &args.EnterpriseMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reply.Index = index
|
||||
|
||||
if serviceConf != nil {
|
||||
var serviceConf *structs.ServiceConfigEntry
|
||||
if serviceEntry != nil {
|
||||
serviceConf, ok = serviceEntry.(*structs.ServiceConfigEntry)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid service config type %T", serviceEntry)
|
||||
}
|
||||
if serviceConf.Expose.Checks {
|
||||
reply.Expose.Checks = true
|
||||
}
|
||||
@ -387,57 +396,112 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
||||
}
|
||||
reply.ProxyConfig["protocol"] = serviceConf.Protocol
|
||||
}
|
||||
if serviceConf.TransparentProxy {
|
||||
reply.TransparentProxy = serviceConf.TransparentProxy
|
||||
}
|
||||
}
|
||||
|
||||
// Extract the global protocol from proxyConf for upstream configs.
|
||||
var proxyConfGlobalProtocol interface{}
|
||||
if proxyConf != nil && proxyConf.Config != nil {
|
||||
proxyConfGlobalProtocol = proxyConf.Config["protocol"]
|
||||
}
|
||||
// First collect all upstreams into a set of seen upstreams.
|
||||
// Upstreams can come from:
|
||||
// - Explicitly from proxy registrations, and therefore as an argument to this RPC endpoint
|
||||
// - Implicitly from centralized upstream config in service-defaults
|
||||
seenUpstreams := map[structs.ServiceID]struct{}{}
|
||||
|
||||
// map the legacy request structure using only service names
|
||||
// to the new ServiceID type.
|
||||
upstreamIDs := args.UpstreamIDs
|
||||
legacyUpstreams := false
|
||||
|
||||
// Before Consul namespaces were released, the Upstreams provided to the endpoint did not contain the namespace.
|
||||
// Because of this we attach the enterprise meta of the request, which will just be the default namespace.
|
||||
if len(upstreamIDs) == 0 {
|
||||
legacyUpstreams = true
|
||||
|
||||
upstreamIDs = make([]structs.ServiceID, 0)
|
||||
for _, upstream := range args.Upstreams {
|
||||
upstreamIDs = append(upstreamIDs, structs.NewServiceID(upstream, &args.EnterpriseMeta))
|
||||
sid := structs.NewServiceID(upstream, &args.EnterpriseMeta)
|
||||
upstreamIDs = append(upstreamIDs, sid)
|
||||
}
|
||||
}
|
||||
|
||||
// First store all upstreams that were provided in the request
|
||||
for _, sid := range upstreamIDs {
|
||||
if _, ok := seenUpstreams[sid]; !ok {
|
||||
seenUpstreams[sid] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Then store upstreams inferred from service-defaults
|
||||
if serviceConf != nil && serviceConf.Connect != nil {
|
||||
for sid := range serviceConf.Connect.UpstreamConfigs {
|
||||
seenUpstreams[structs.ServiceIDFromString(sid)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
upstreamDefaults *structs.UpstreamConfig
|
||||
upstreamConfigs map[string]*structs.UpstreamConfig
|
||||
)
|
||||
if serviceConf != nil && serviceConf.Connect != nil {
|
||||
if serviceConf.Connect.UpstreamDefaults != nil {
|
||||
upstreamDefaults = serviceConf.Connect.UpstreamDefaults
|
||||
}
|
||||
if serviceConf.Connect.UpstreamConfigs != nil {
|
||||
upstreamConfigs = serviceConf.Connect.UpstreamConfigs
|
||||
}
|
||||
}
|
||||
|
||||
// usConfigs stores the opaque config map for each upstream and is keyed on the upstream's ID.
|
||||
usConfigs := make(map[structs.ServiceID]map[string]interface{})
|
||||
|
||||
for _, upstream := range upstreamIDs {
|
||||
_, upstreamEntry, err := state.ConfigEntry(ws, structs.ServiceDefaults, upstream.ID, &upstream.EnterpriseMeta)
|
||||
for upstream := range seenUpstreams {
|
||||
resolvedCfg := make(map[string]interface{})
|
||||
|
||||
// The protocol of an upstream is resolved in this order:
|
||||
// 1. Default protocol from proxy-defaults (how all services should be addressed)
|
||||
// 2. Protocol for upstream service defined in its service-defaults (how the upstream wants to be addressed)
|
||||
// 3. Protocol defined for the upstream in the service-defaults.(upstream_defaults|upstream_configs) of the downstream
|
||||
// (how the downstream wants to address it)
|
||||
protocol := proxyConfGlobalProtocol
|
||||
|
||||
_, upstreamSvcDefaults, err := state.ConfigEntry(ws, structs.ServiceDefaults, upstream.ID, &upstream.EnterpriseMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var upstreamConf *structs.ServiceConfigEntry
|
||||
var ok bool
|
||||
if upstreamEntry != nil {
|
||||
upstreamConf, ok = upstreamEntry.(*structs.ServiceConfigEntry)
|
||||
if upstreamSvcDefaults != nil {
|
||||
cfg, ok := upstreamSvcDefaults.(*structs.ServiceConfigEntry)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid service config type %T", upstreamEntry)
|
||||
return fmt.Errorf("invalid service config type %T", upstreamSvcDefaults)
|
||||
}
|
||||
if cfg.Protocol != "" {
|
||||
protocol = cfg.Protocol
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to proxyConf global protocol.
|
||||
protocol := proxyConfGlobalProtocol
|
||||
if upstreamConf != nil && upstreamConf.Protocol != "" {
|
||||
protocol = upstreamConf.Protocol
|
||||
if protocol != "" {
|
||||
resolvedCfg["protocol"] = protocol
|
||||
}
|
||||
|
||||
// Nothing to configure if a protocol hasn't been set.
|
||||
if protocol == nil {
|
||||
continue
|
||||
// Merge centralized defaults for all upstreams before configuration for specific upstreams
|
||||
if upstreamDefaults != nil {
|
||||
upstreamDefaults.MergeInto(resolvedCfg)
|
||||
}
|
||||
|
||||
usConfigs[upstream] = map[string]interface{}{
|
||||
"protocol": protocol,
|
||||
// The MeshGateway value from the proxy registration overrides the one from upstream_defaults
|
||||
// because it is specific to the proxy instance.
|
||||
//
|
||||
// The goal is to flatten the mesh gateway mode in this order:
|
||||
// 0. Value from centralized upstream_defaults
|
||||
// 1. Value from local proxy registration
|
||||
// 2. Value from centralized upstream_configs
|
||||
// 3. Value from local upstream definition. This last step is done in the client's service manager.
|
||||
if !args.MeshGateway.IsZero() {
|
||||
resolvedCfg["mesh_gateway"] = args.MeshGateway
|
||||
}
|
||||
|
||||
if upstreamConfigs[upstream.String()] != nil {
|
||||
upstreamConfigs[upstream.String()].MergeInto(resolvedCfg)
|
||||
}
|
||||
|
||||
if len(resolvedCfg) > 0 {
|
||||
usConfigs[upstream] = resolvedCfg
|
||||
}
|
||||
}
|
||||
|
||||
@ -447,22 +511,21 @@ func (c *ConfigEntry) ResolveServiceConfig(args *structs.ServiceConfigRequest, r
|
||||
}
|
||||
|
||||
if legacyUpstreams {
|
||||
if reply.UpstreamConfigs == nil {
|
||||
reply.UpstreamConfigs = make(map[string]map[string]interface{})
|
||||
}
|
||||
// For legacy upstreams we return a map that is only keyed on the string ID, since they precede namespaces
|
||||
reply.UpstreamConfigs = make(map[string]map[string]interface{})
|
||||
|
||||
for us, conf := range usConfigs {
|
||||
reply.UpstreamConfigs[us.ID] = conf
|
||||
}
|
||||
|
||||
} else {
|
||||
if reply.UpstreamIDConfigs == nil {
|
||||
reply.UpstreamIDConfigs = make(structs.UpstreamConfigs, 0, len(usConfigs))
|
||||
}
|
||||
reply.UpstreamIDConfigs = make(structs.OpaqueUpstreamConfigs, 0, len(usConfigs))
|
||||
|
||||
for us, conf := range usConfigs {
|
||||
reply.UpstreamIDConfigs = append(reply.UpstreamIDConfigs, structs.UpstreamConfig{Upstream: us, Config: conf})
|
||||
reply.UpstreamIDConfigs = append(reply.UpstreamIDConfigs,
|
||||
structs.OpaqueUpstreamConfig{Upstream: us, Config: conf})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package consul
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -892,6 +893,353 @@ func TestConfigEntry_ResolveServiceConfig(t *testing.T) {
|
||||
require.Equal(map[string]interface{}{"foo": 1}, proxyConf.Config)
|
||||
}
|
||||
|
||||
func TestConfigEntry_ResolveServiceConfig_TransparentProxy(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
tt := []struct {
|
||||
name string
|
||||
entries []structs.ConfigEntry
|
||||
request structs.ServiceConfigRequest
|
||||
proxyCfg structs.ConnectProxyConfig
|
||||
expect structs.ServiceConfigResponse
|
||||
}{
|
||||
{
|
||||
name: "from proxy-defaults",
|
||||
entries: []structs.ConfigEntry{
|
||||
&structs.ProxyConfigEntry{
|
||||
Kind: structs.ProxyDefaults,
|
||||
Name: structs.ProxyConfigGlobal,
|
||||
TransparentProxy: true,
|
||||
},
|
||||
},
|
||||
request: structs.ServiceConfigRequest{
|
||||
Name: "foo",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
expect: structs.ServiceConfigResponse{
|
||||
TransparentProxy: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "from service-defaults",
|
||||
entries: []structs.ConfigEntry{
|
||||
&structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "foo",
|
||||
TransparentProxy: true,
|
||||
},
|
||||
},
|
||||
request: structs.ServiceConfigRequest{
|
||||
Name: "foo",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
expect: structs.ServiceConfigResponse{
|
||||
TransparentProxy: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "service-defaults overrides proxy-defaults",
|
||||
entries: []structs.ConfigEntry{
|
||||
&structs.ProxyConfigEntry{
|
||||
Kind: structs.ProxyDefaults,
|
||||
Name: structs.ProxyConfigGlobal,
|
||||
TransparentProxy: false,
|
||||
},
|
||||
&structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "foo",
|
||||
TransparentProxy: true,
|
||||
},
|
||||
},
|
||||
request: structs.ServiceConfigRequest{
|
||||
Name: "foo",
|
||||
Datacenter: "dc1",
|
||||
},
|
||||
expect: structs.ServiceConfigResponse{
|
||||
TransparentProxy: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tt {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
dir1, s1 := testServer(t)
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
// Boostrap the config entries
|
||||
idx := uint64(1)
|
||||
for _, conf := range tc.entries {
|
||||
require.NoError(t, s1.fsm.State().EnsureConfigEntry(idx, conf))
|
||||
idx++
|
||||
}
|
||||
|
||||
var out structs.ServiceConfigResponse
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &tc.request, &out))
|
||||
|
||||
// Don't know what this is deterministically, so we grab it from the response
|
||||
tc.expect.QueryMeta = out.QueryMeta
|
||||
|
||||
require.Equal(t, tc.expect, out)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
mysql := structs.NewServiceID("mysql", structs.DefaultEnterpriseMeta())
|
||||
cache := structs.NewServiceID("cache", structs.DefaultEnterpriseMeta())
|
||||
|
||||
tt := []struct {
|
||||
name string
|
||||
entries []structs.ConfigEntry
|
||||
request structs.ServiceConfigRequest
|
||||
proxyCfg structs.ConnectProxyConfig
|
||||
expect structs.ServiceConfigResponse
|
||||
}{
|
||||
{
|
||||
name: "upstream config entries from Upstreams and service-defaults",
|
||||
entries: []structs.ConfigEntry{
|
||||
&structs.ProxyConfigEntry{
|
||||
Kind: structs.ProxyDefaults,
|
||||
Name: structs.ProxyConfigGlobal,
|
||||
Config: map[string]interface{}{
|
||||
"protocol": "grpc",
|
||||
},
|
||||
},
|
||||
&structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "api",
|
||||
Connect: &structs.ConnectConfiguration{
|
||||
UpstreamConfigs: map[string]*structs.UpstreamConfig{
|
||||
mysql.String(): {
|
||||
Protocol: "http",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
request: structs.ServiceConfigRequest{
|
||||
Name: "api",
|
||||
Datacenter: "dc1",
|
||||
Upstreams: []string{"cache"},
|
||||
},
|
||||
expect: structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{
|
||||
"protocol": "grpc",
|
||||
},
|
||||
UpstreamConfigs: map[string]map[string]interface{}{
|
||||
"mysql": {
|
||||
"protocol": "http",
|
||||
},
|
||||
"cache": {
|
||||
"protocol": "grpc",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "upstream config entries from UpstreamIDs and service-defaults",
|
||||
entries: []structs.ConfigEntry{
|
||||
&structs.ProxyConfigEntry{
|
||||
Kind: structs.ProxyDefaults,
|
||||
Name: structs.ProxyConfigGlobal,
|
||||
Config: map[string]interface{}{
|
||||
"protocol": "grpc",
|
||||
},
|
||||
},
|
||||
&structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "api",
|
||||
Connect: &structs.ConnectConfiguration{
|
||||
UpstreamConfigs: map[string]*structs.UpstreamConfig{
|
||||
mysql.String(): {
|
||||
Protocol: "http",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
request: structs.ServiceConfigRequest{
|
||||
Name: "api",
|
||||
Datacenter: "dc1",
|
||||
UpstreamIDs: []structs.ServiceID{
|
||||
cache,
|
||||
},
|
||||
},
|
||||
expect: structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{
|
||||
"protocol": "grpc",
|
||||
},
|
||||
UpstreamIDConfigs: structs.OpaqueUpstreamConfigs{
|
||||
{
|
||||
Upstream: cache,
|
||||
Config: map[string]interface{}{
|
||||
"protocol": "grpc",
|
||||
},
|
||||
},
|
||||
{
|
||||
Upstream: structs.ServiceID{
|
||||
ID: "mysql",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
Config: map[string]interface{}{
|
||||
"protocol": "http",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "proxy registration overrides upstream_defaults",
|
||||
entries: []structs.ConfigEntry{
|
||||
&structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "api",
|
||||
Connect: &structs.ConnectConfiguration{
|
||||
UpstreamDefaults: &structs.UpstreamConfig{
|
||||
MeshGateway: structs.MeshGatewayConfig{Mode: structs.MeshGatewayModeRemote},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
request: structs.ServiceConfigRequest{
|
||||
Name: "api",
|
||||
Datacenter: "dc1",
|
||||
MeshGateway: structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeNone,
|
||||
},
|
||||
UpstreamIDs: []structs.ServiceID{
|
||||
mysql,
|
||||
},
|
||||
},
|
||||
expect: structs.ServiceConfigResponse{
|
||||
UpstreamIDConfigs: structs.OpaqueUpstreamConfigs{
|
||||
{
|
||||
Upstream: mysql,
|
||||
Config: map[string]interface{}{
|
||||
"mesh_gateway": map[string]interface{}{
|
||||
"Mode": "none",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "upstream_configs overrides all",
|
||||
entries: []structs.ConfigEntry{
|
||||
&structs.ProxyConfigEntry{
|
||||
Kind: structs.ProxyDefaults,
|
||||
Name: structs.ProxyConfigGlobal,
|
||||
Config: map[string]interface{}{
|
||||
"protocol": "udp",
|
||||
},
|
||||
},
|
||||
&structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "api",
|
||||
Protocol: "tcp",
|
||||
},
|
||||
&structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "api",
|
||||
Connect: &structs.ConnectConfiguration{
|
||||
UpstreamDefaults: &structs.UpstreamConfig{
|
||||
Protocol: "http",
|
||||
MeshGateway: structs.MeshGatewayConfig{Mode: structs.MeshGatewayModeRemote},
|
||||
PassiveHealthCheck: &structs.PassiveHealthCheck{
|
||||
Interval: 10,
|
||||
MaxFailures: 2,
|
||||
},
|
||||
},
|
||||
UpstreamConfigs: map[string]*structs.UpstreamConfig{
|
||||
mysql.String(): {
|
||||
Protocol: "grpc",
|
||||
MeshGateway: structs.MeshGatewayConfig{Mode: structs.MeshGatewayModeLocal},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
request: structs.ServiceConfigRequest{
|
||||
Name: "api",
|
||||
Datacenter: "dc1",
|
||||
MeshGateway: structs.MeshGatewayConfig{
|
||||
Mode: structs.MeshGatewayModeNone,
|
||||
},
|
||||
UpstreamIDs: []structs.ServiceID{
|
||||
mysql,
|
||||
},
|
||||
},
|
||||
expect: structs.ServiceConfigResponse{
|
||||
ProxyConfig: map[string]interface{}{
|
||||
"protocol": "udp",
|
||||
},
|
||||
UpstreamIDConfigs: structs.OpaqueUpstreamConfigs{
|
||||
{
|
||||
Upstream: mysql,
|
||||
Config: map[string]interface{}{
|
||||
"passive_health_check": map[string]interface{}{
|
||||
"Interval": int64(10),
|
||||
"MaxFailures": int64(2),
|
||||
},
|
||||
"mesh_gateway": map[string]interface{}{
|
||||
"Mode": "local",
|
||||
},
|
||||
"protocol": "grpc",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tt {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
dir1, s1 := testServer(t)
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
state := s1.fsm.State()
|
||||
|
||||
// Boostrap the config entries
|
||||
idx := uint64(1)
|
||||
for _, conf := range tc.entries {
|
||||
require.NoError(t, state.EnsureConfigEntry(idx, conf))
|
||||
idx++
|
||||
}
|
||||
|
||||
var out structs.ServiceConfigResponse
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.ResolveServiceConfig", &tc.request, &out))
|
||||
|
||||
// Don't know what this is deterministically, so we grab it from the response
|
||||
tc.expect.QueryMeta = out.QueryMeta
|
||||
|
||||
// Order of this slice is also not deterministic since it's populated from a map
|
||||
sort.SliceStable(out.UpstreamIDConfigs, func(i, j int) bool {
|
||||
return out.UpstreamIDConfigs[i].Upstream.String() < out.UpstreamIDConfigs[j].Upstream.String()
|
||||
})
|
||||
|
||||
require.Equal(t, tc.expect, out)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigEntry_ResolveServiceConfig_Blocking(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
@ -910,3 +910,138 @@ func registerTestTopologyEntries(t *testing.T, codec rpc.ClientCodec, token stri
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &out))
|
||||
}
|
||||
}
|
||||
|
||||
func registerIntentionUpstreamEntries(t *testing.T, codec rpc.ClientCodec, token string) {
|
||||
t.Helper()
|
||||
|
||||
// api and api-proxy on node foo
|
||||
// web and web-proxy on node foo
|
||||
// redis and redis-proxy on node foo
|
||||
// * -> * (deny) intention
|
||||
// web -> api (allow)
|
||||
registrations := map[string]*structs.RegisterRequest{
|
||||
"Node foo": {
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
ID: types.NodeID("e0155642-135d-4739-9853-a1ee6c9f945b"),
|
||||
Address: "127.0.0.2",
|
||||
WriteRequest: structs.WriteRequest{Token: token},
|
||||
},
|
||||
"Service api on foo": {
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
ID: "api",
|
||||
Service: "api",
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: token},
|
||||
},
|
||||
"Service api-proxy": {
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
ID: "api-proxy",
|
||||
Service: "api-proxy",
|
||||
Port: 8443,
|
||||
Address: "198.18.1.2",
|
||||
Proxy: structs.ConnectProxyConfig{
|
||||
DestinationServiceName: "api",
|
||||
},
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: token},
|
||||
},
|
||||
"Service web on foo": {
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
ID: "web",
|
||||
Service: "web",
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: token},
|
||||
},
|
||||
"Service web-proxy on foo": {
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
ID: "web-proxy",
|
||||
Service: "web-proxy",
|
||||
Port: 8080,
|
||||
Proxy: structs.ConnectProxyConfig{
|
||||
DestinationServiceName: "web",
|
||||
},
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: token},
|
||||
},
|
||||
"Service redis on foo": {
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindTypical,
|
||||
ID: "redis",
|
||||
Service: "redis",
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: token},
|
||||
},
|
||||
"Service redis-proxy on foo": {
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
SkipNodeUpdate: true,
|
||||
Service: &structs.NodeService{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
ID: "redis-proxy",
|
||||
Service: "redis-proxy",
|
||||
Port: 1234,
|
||||
Proxy: structs.ConnectProxyConfig{
|
||||
DestinationServiceName: "redis",
|
||||
},
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: token},
|
||||
},
|
||||
}
|
||||
registerTestCatalogEntriesMap(t, codec, registrations)
|
||||
|
||||
// Add intentions: deny all and web -> api
|
||||
entries := []structs.ConfigEntryRequest{
|
||||
{
|
||||
Datacenter: "dc1",
|
||||
Entry: &structs.ServiceIntentionsConfigEntry{
|
||||
Kind: structs.ServiceIntentions,
|
||||
Name: "api",
|
||||
Sources: []*structs.SourceIntention{
|
||||
{
|
||||
Name: "web",
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
},
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: token},
|
||||
},
|
||||
{
|
||||
Datacenter: "dc1",
|
||||
Entry: &structs.ServiceIntentionsConfigEntry{
|
||||
Kind: structs.ServiceIntentions,
|
||||
Name: "*",
|
||||
Sources: []*structs.SourceIntention{
|
||||
{
|
||||
Name: "*",
|
||||
Action: structs.IntentionActionDeny,
|
||||
},
|
||||
},
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: token},
|
||||
},
|
||||
}
|
||||
for _, req := range entries {
|
||||
var out bool
|
||||
require.NoError(t, msgpackrpc.CallWithCodec(codec, "ConfigEntry.Apply", &req, &out))
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/armon/go-metrics/prometheus"
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
@ -684,16 +683,7 @@ func (s *Intention) Check(args *structs.IntentionQueryRequest, reply *structs.In
|
||||
return fmt.Errorf("Invalid destination namespace %q: %v", query.DestinationNS, err)
|
||||
}
|
||||
|
||||
// Build the URI
|
||||
var uri connect.CertURI
|
||||
switch query.SourceType {
|
||||
case structs.IntentionSourceConsul:
|
||||
uri = &connect.SpiffeIDService{
|
||||
Namespace: query.SourceNS,
|
||||
Service: query.SourceName,
|
||||
}
|
||||
|
||||
default:
|
||||
if query.SourceType != structs.IntentionSourceConsul {
|
||||
return fmt.Errorf("unsupported SourceType: %q", query.SourceType)
|
||||
}
|
||||
|
||||
@ -732,7 +722,17 @@ func (s *Intention) Check(args *structs.IntentionQueryRequest, reply *structs.In
|
||||
}
|
||||
|
||||
state := s.srv.fsm.State()
|
||||
decision, err := state.IntentionDecision(uri, query.DestinationName, query.DestinationNS, defaultDecision)
|
||||
|
||||
entry := structs.IntentionMatchEntry{
|
||||
Namespace: query.SourceNS,
|
||||
Name: query.SourceName,
|
||||
}
|
||||
_, intentions, err := state.IntentionMatchOne(nil, entry, structs.IntentionMatchSource)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query intentions for %s/%s", query.SourceNS, query.SourceName)
|
||||
}
|
||||
|
||||
decision, err := state.IntentionDecision(query.DestinationName, query.DestinationNS, intentions, structs.IntentionMatchDestination, defaultDecision, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get intention decision from (%s/%s) to (%s/%s): %v",
|
||||
query.SourceNS, query.SourceName, query.DestinationNS, query.DestinationName, err)
|
||||
|
@ -188,6 +188,49 @@ func (m *Internal) ServiceTopology(args *structs.ServiceSpecificRequest, reply *
|
||||
})
|
||||
}
|
||||
|
||||
// IntentionUpstreams returns the upstreams of a service. Upstreams are inferred from intentions.
|
||||
// If intentions allow a connection from the target to some candidate service, the candidate service is considered
|
||||
// an upstream of the target.
|
||||
func (m *Internal) IntentionUpstreams(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceList) error {
|
||||
// Exit early if Connect hasn't been enabled.
|
||||
if !m.srv.config.ConnectEnabled {
|
||||
return ErrConnectNotEnabled
|
||||
}
|
||||
if args.ServiceName == "" {
|
||||
return fmt.Errorf("Must provide a service name")
|
||||
}
|
||||
if done, err := m.srv.ForwardRPC("Internal.IntentionUpstreams", args, args, reply); done {
|
||||
return err
|
||||
}
|
||||
|
||||
authz, err := m.srv.ResolveTokenAndDefaultMeta(args.Token, &args.EnterpriseMeta, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.srv.validateEnterpriseRequest(&args.EnterpriseMeta, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.srv.blockingQuery(
|
||||
&args.QueryOptions,
|
||||
&reply.QueryMeta,
|
||||
func(ws memdb.WatchSet, state *state.Store) error {
|
||||
defaultDecision := acl.Allow
|
||||
if authz != nil {
|
||||
defaultDecision = authz.IntentionDefaultAllow(nil)
|
||||
}
|
||||
|
||||
sn := structs.NewServiceName(args.ServiceName, &args.EnterpriseMeta)
|
||||
index, services, err := state.IntentionTopology(ws, sn, false, defaultDecision)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reply.Index, reply.Services = index, services
|
||||
return m.srv.filterACLWithAuthorizer(authz, reply)
|
||||
})
|
||||
}
|
||||
|
||||
// GatewayServiceNodes returns all the nodes for services associated with a gateway along with their gateway config
|
||||
func (m *Internal) GatewayServiceDump(args *structs.ServiceSpecificRequest, reply *structs.IndexedServiceDump) error {
|
||||
if done, err := m.srv.ForwardRPC("Internal.GatewayServiceDump", args, args, reply); done {
|
||||
|
@ -1885,3 +1885,124 @@ service "web" { policy = "read" }
|
||||
require.True(t, acl.IsErrPermissionDenied(err))
|
||||
})
|
||||
}
|
||||
|
||||
func TestInternal_IntentionUpstreams(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
dir1, s1 := testServer(t)
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
// Services:
|
||||
// api and api-proxy on node foo
|
||||
// web and web-proxy on node foo
|
||||
//
|
||||
// Intentions
|
||||
// * -> * (deny) intention
|
||||
// web -> api (allow)
|
||||
registerIntentionUpstreamEntries(t, codec, "")
|
||||
|
||||
t.Run("web", func(t *testing.T) {
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
args := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "web",
|
||||
}
|
||||
var out structs.IndexedServiceList
|
||||
require.NoError(r, msgpackrpc.CallWithCodec(codec, "Internal.IntentionUpstreams", &args, &out))
|
||||
|
||||
// foo/api
|
||||
require.Len(r, out.Services, 1)
|
||||
|
||||
expectUp := structs.ServiceList{
|
||||
structs.NewServiceName("api", structs.DefaultEnterpriseMeta()),
|
||||
}
|
||||
require.Equal(r, expectUp, out.Services)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestInternal_IntentionUpstreams_ACL(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.ACLDatacenter = "dc1"
|
||||
c.ACLsEnabled = true
|
||||
c.ACLMasterToken = TestDefaultMasterToken
|
||||
c.ACLDefaultPolicy = "deny"
|
||||
})
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
// Services:
|
||||
// api and api-proxy on node foo
|
||||
// web and web-proxy on node foo
|
||||
//
|
||||
// Intentions
|
||||
// * -> * (deny) intention
|
||||
// web -> api (allow)
|
||||
registerIntentionUpstreamEntries(t, codec, TestDefaultMasterToken)
|
||||
|
||||
t.Run("valid token", func(t *testing.T) {
|
||||
// Token grants read to read api service
|
||||
userToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `
|
||||
service_prefix "api" { policy = "read" }
|
||||
`)
|
||||
require.NoError(t, err)
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
args := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "web",
|
||||
QueryOptions: structs.QueryOptions{Token: userToken.SecretID},
|
||||
}
|
||||
var out structs.IndexedServiceList
|
||||
require.NoError(r, msgpackrpc.CallWithCodec(codec, "Internal.IntentionUpstreams", &args, &out))
|
||||
|
||||
// foo/api
|
||||
require.Len(r, out.Services, 1)
|
||||
|
||||
expectUp := structs.ServiceList{
|
||||
structs.NewServiceName("api", structs.DefaultEnterpriseMeta()),
|
||||
}
|
||||
require.Equal(r, expectUp, out.Services)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("invalid token filters results", func(t *testing.T) {
|
||||
// Token grants read to read an unrelated service, mongo
|
||||
userToken, err := upsertTestTokenWithPolicyRules(codec, TestDefaultMasterToken, "dc1", `
|
||||
service_prefix "mongo" { policy = "read" }
|
||||
`)
|
||||
require.NoError(t, err)
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
args := structs.ServiceSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
ServiceName: "web",
|
||||
QueryOptions: structs.QueryOptions{Token: userToken.SecretID},
|
||||
}
|
||||
var out structs.IndexedServiceList
|
||||
require.NoError(r, msgpackrpc.CallWithCodec(codec, "Internal.IntentionUpstreams", &args, &out))
|
||||
|
||||
// Token can't read api service
|
||||
require.Empty(r, out.Services)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@ -887,12 +887,13 @@ func (c *CAManager) UpdateConfiguration(args *structs.CARequest) (reterr error)
|
||||
"You can try again with ForceWithoutCrossSigningSet but this may cause " +
|
||||
"disruption - see documentation for more.")
|
||||
}
|
||||
if !canXSign && args.Config.ForceWithoutCrossSigning {
|
||||
c.logger.Warn("current CA doesn't support cross signing but " +
|
||||
"CA reconfiguration forced anyway with ForceWithoutCrossSigning")
|
||||
if args.Config.ForceWithoutCrossSigning {
|
||||
c.logger.Warn("ForceWithoutCrossSigning set, CA reconfiguration skipping cross-signing")
|
||||
}
|
||||
|
||||
if canXSign {
|
||||
// If ForceWithoutCrossSigning wasn't set, attempt to have the old CA generate a
|
||||
// cross-signed intermediate.
|
||||
if canXSign && !args.Config.ForceWithoutCrossSigning {
|
||||
// Have the old provider cross-sign the new root
|
||||
xcCert, err := oldProvider.CrossSignCA(newRoot)
|
||||
if err != nil {
|
||||
|
@ -1410,3 +1410,130 @@ func TestLeader_Consul_BadCAConfigShouldntPreventLeaderEstablishment(t *testing.
|
||||
require.NotEmpty(t, rootsList.Roots)
|
||||
require.NotNil(t, activeRoot)
|
||||
}
|
||||
|
||||
func TestLeader_Consul_ForceWithoutCrossSigning(t *testing.T) {
|
||||
require := require.New(t)
|
||||
dir1, s1 := testServer(t)
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
waitForLeaderEstablishment(t, s1)
|
||||
|
||||
// Get the current root
|
||||
rootReq := &structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
var rootList structs.IndexedCARoots
|
||||
require.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
|
||||
require.Len(rootList.Roots, 1)
|
||||
oldRoot := rootList.Roots[0]
|
||||
|
||||
// Update the provider config to use a new private key, which should
|
||||
// cause a rotation.
|
||||
_, newKey, err := connect.GeneratePrivateKey()
|
||||
require.NoError(err)
|
||||
newConfig := &structs.CAConfiguration{
|
||||
Provider: "consul",
|
||||
Config: map[string]interface{}{
|
||||
"LeafCertTTL": "500ms",
|
||||
"PrivateKey": newKey,
|
||||
"RootCert": "",
|
||||
"RotationPeriod": "2160h",
|
||||
"SkipValidate": true,
|
||||
},
|
||||
ForceWithoutCrossSigning: true,
|
||||
}
|
||||
{
|
||||
args := &structs.CARequest{
|
||||
Datacenter: "dc1",
|
||||
Config: newConfig,
|
||||
}
|
||||
var reply interface{}
|
||||
|
||||
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
|
||||
}
|
||||
|
||||
// Old root should no longer be active.
|
||||
_, roots, err := s1.fsm.State().CARoots(nil)
|
||||
require.NoError(err)
|
||||
require.Len(roots, 2)
|
||||
for _, r := range roots {
|
||||
if r.ID == oldRoot.ID {
|
||||
require.False(r.Active)
|
||||
} else {
|
||||
require.True(r.Active)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeader_Vault_ForceWithoutCrossSigning(t *testing.T) {
|
||||
ca.SkipIfVaultNotPresent(t)
|
||||
|
||||
require := require.New(t)
|
||||
testVault := ca.NewTestVaultServer(t)
|
||||
defer testVault.Stop()
|
||||
|
||||
_, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Build = "1.9.1"
|
||||
c.PrimaryDatacenter = "dc1"
|
||||
c.CAConfig = &structs.CAConfiguration{
|
||||
Provider: "vault",
|
||||
Config: map[string]interface{}{
|
||||
"Address": testVault.Addr,
|
||||
"Token": testVault.RootToken,
|
||||
"RootPKIPath": "pki-root/",
|
||||
"IntermediatePKIPath": "pki-intermediate/",
|
||||
},
|
||||
}
|
||||
})
|
||||
defer s1.Shutdown()
|
||||
codec := rpcClient(t, s1)
|
||||
defer codec.Close()
|
||||
|
||||
waitForLeaderEstablishment(t, s1)
|
||||
|
||||
// Get the current root
|
||||
rootReq := &structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
var rootList structs.IndexedCARoots
|
||||
require.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList))
|
||||
require.Len(rootList.Roots, 1)
|
||||
oldRoot := rootList.Roots[0]
|
||||
|
||||
// Update the provider config to use a new PKI path, which should
|
||||
// cause a rotation.
|
||||
newConfig := &structs.CAConfiguration{
|
||||
Provider: "vault",
|
||||
Config: map[string]interface{}{
|
||||
"Address": testVault.Addr,
|
||||
"Token": testVault.RootToken,
|
||||
"RootPKIPath": "pki-root-2/",
|
||||
"IntermediatePKIPath": "pki-intermediate/",
|
||||
},
|
||||
ForceWithoutCrossSigning: true,
|
||||
}
|
||||
{
|
||||
args := &structs.CARequest{
|
||||
Datacenter: "dc1",
|
||||
Config: newConfig,
|
||||
}
|
||||
var reply interface{}
|
||||
|
||||
require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply))
|
||||
}
|
||||
|
||||
// Old root should no longer be active.
|
||||
_, roots, err := s1.fsm.State().CARoots(nil)
|
||||
require.NoError(err)
|
||||
require.Len(roots, 2)
|
||||
for _, r := range roots {
|
||||
if r.ID == oldRoot.ID {
|
||||
require.False(r.Active)
|
||||
} else {
|
||||
require.True(r.Active)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -6,12 +6,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
tokenStore "github.com/hashicorp/consul/agent/token"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLeader_ReplicateIntentions(t *testing.T) {
|
||||
@ -543,17 +545,17 @@ func TestLeader_LegacyIntentionMigration(t *testing.T) {
|
||||
checkIntentions(t, s1, true, map[string]*structs.Intention{})
|
||||
}))
|
||||
|
||||
mapifyConfigs := func(entries interface{}) map[structs.ConfigEntryKindName]*structs.ServiceIntentionsConfigEntry {
|
||||
m := make(map[structs.ConfigEntryKindName]*structs.ServiceIntentionsConfigEntry)
|
||||
mapifyConfigs := func(entries interface{}) map[state.ConfigEntryKindName]*structs.ServiceIntentionsConfigEntry {
|
||||
m := make(map[state.ConfigEntryKindName]*structs.ServiceIntentionsConfigEntry)
|
||||
switch v := entries.(type) {
|
||||
case []*structs.ServiceIntentionsConfigEntry:
|
||||
for _, entry := range v {
|
||||
kn := structs.NewConfigEntryKindName(entry.Kind, entry.Name, &entry.EnterpriseMeta)
|
||||
kn := state.NewConfigEntryKindName(entry.Kind, entry.Name, &entry.EnterpriseMeta)
|
||||
m[kn] = entry
|
||||
}
|
||||
case []structs.ConfigEntry:
|
||||
for _, entry := range v {
|
||||
kn := structs.NewConfigEntryKindName(entry.GetKind(), entry.GetName(), entry.GetEnterpriseMeta())
|
||||
kn := state.NewConfigEntryKindName(entry.GetKind(), entry.GetName(), entry.GetEnterpriseMeta())
|
||||
m[kn] = entry.(*structs.ServiceIntentionsConfigEntry)
|
||||
}
|
||||
default:
|
||||
|
@ -53,21 +53,14 @@ func testTLSCertificates(serverName string) (cert string, key string, cacert str
|
||||
return "", "", "", err
|
||||
}
|
||||
|
||||
serial, err := tlsutil.GenerateSerialNumber()
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
|
||||
cert, privateKey, err := tlsutil.GenerateCert(
|
||||
signer,
|
||||
ca,
|
||||
serial,
|
||||
"Test Cert Name",
|
||||
365,
|
||||
[]string{serverName},
|
||||
nil,
|
||||
[]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
)
|
||||
cert, privateKey, err := tlsutil.GenerateCert(tlsutil.CertOpts{
|
||||
Signer: signer,
|
||||
CA: ca,
|
||||
Name: "Test Cert Name",
|
||||
Days: 365,
|
||||
DNSNames: []string{serverName},
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
|
||||
})
|
||||
if err != nil {
|
||||
return "", "", "", err
|
||||
}
|
||||
|
@ -113,57 +113,6 @@ func (s *TokenRolesIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
return val, nil
|
||||
}
|
||||
|
||||
type RolePoliciesIndex struct {
|
||||
}
|
||||
|
||||
func (s *RolePoliciesIndex) FromObject(obj interface{}) (bool, [][]byte, error) {
|
||||
role, ok := obj.(*structs.ACLRole)
|
||||
if !ok {
|
||||
return false, nil, fmt.Errorf("object is not an ACLRole")
|
||||
}
|
||||
|
||||
links := role.Policies
|
||||
|
||||
numLinks := len(links)
|
||||
if numLinks == 0 {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
vals := make([][]byte, 0, numLinks)
|
||||
for _, link := range links {
|
||||
vals = append(vals, []byte(link.ID+"\x00"))
|
||||
}
|
||||
|
||||
return true, vals, nil
|
||||
}
|
||||
|
||||
func (s *RolePoliciesIndex) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("must provide only a single argument")
|
||||
}
|
||||
arg, ok := args[0].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("argument must be a string: %#v", args[0])
|
||||
}
|
||||
// Add the null character as a terminator
|
||||
arg += "\x00"
|
||||
return []byte(arg), nil
|
||||
}
|
||||
|
||||
func (s *RolePoliciesIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
val, err := s.FromArgs(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Strip the null terminator, the rest is a prefix
|
||||
n := len(val)
|
||||
if n > 0 {
|
||||
return val[:n-1], nil
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
type TokenExpirationIndex struct {
|
||||
LocalFilter bool
|
||||
}
|
||||
@ -228,11 +177,7 @@ func (s *Restore) ACLToken(token *structs.ACLToken) error {
|
||||
|
||||
// ACLPolicies is used when saving a snapshot
|
||||
func (s *Snapshot) ACLPolicies() (memdb.ResultIterator, error) {
|
||||
iter, err := s.tx.Get("acl-policies", "id")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return iter, nil
|
||||
return s.tx.Get(tableACLPolicies, indexID)
|
||||
}
|
||||
|
||||
func (s *Restore) ACLPolicy(policy *structs.ACLPolicy) error {
|
||||
@ -241,7 +186,7 @@ func (s *Restore) ACLPolicy(policy *structs.ACLPolicy) error {
|
||||
|
||||
// ACLRoles is used when saving a snapshot
|
||||
func (s *Snapshot) ACLRoles() (memdb.ResultIterator, error) {
|
||||
iter, err := s.tx.Get("acl-roles", "id")
|
||||
iter, err := s.tx.Get(tableACLRoles, indexID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -548,22 +493,21 @@ func fixupTokenRoleLinks(tx ReadTxn, original *structs.ACLToken) (*structs.ACLTo
|
||||
|
||||
func resolveRolePolicyLinks(tx *txn, role *structs.ACLRole, allowMissing bool) error {
|
||||
for linkIndex, link := range role.Policies {
|
||||
if link.ID != "" {
|
||||
policy, err := getPolicyWithTxn(tx, nil, link.ID, aclPolicyGetByID, &role.EnterpriseMeta)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if policy != nil {
|
||||
// the name doesn't matter here
|
||||
role.Policies[linkIndex].Name = policy.Name
|
||||
} else if !allowMissing {
|
||||
return fmt.Errorf("No such policy with ID: %s", link.ID)
|
||||
}
|
||||
} else {
|
||||
if link.ID == "" {
|
||||
return fmt.Errorf("Encountered a Role with policies linked by Name in the state store")
|
||||
}
|
||||
|
||||
policy, err := getPolicyWithTxn(tx, nil, link.ID, aclPolicyGetByID, &role.EnterpriseMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if policy != nil {
|
||||
// the name doesn't matter here
|
||||
role.Policies[linkIndex].Name = policy.Name
|
||||
} else if !allowMissing {
|
||||
return fmt.Errorf("No such policy with ID: %s", link.ID)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1162,7 +1106,8 @@ func aclPolicySetTxn(tx *txn, idx uint64, policy *structs.ACLPolicy) error {
|
||||
}
|
||||
|
||||
// ensure the name is unique (cannot conflict with another policy with a different ID)
|
||||
_, nameMatch, err := aclPolicyGetByName(tx, policy.Name, &policy.EnterpriseMeta)
|
||||
q := Query{Value: policy.Name, EnterpriseMeta: policy.EnterpriseMeta}
|
||||
nameMatch, err := tx.First(tableACLPolicies, indexName, q)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1195,6 +1140,15 @@ func (s *Store) ACLPolicyGetByName(ws memdb.WatchSet, name string, entMeta *stru
|
||||
return s.aclPolicyGet(ws, name, aclPolicyGetByName, entMeta)
|
||||
}
|
||||
|
||||
func aclPolicyGetByName(tx ReadTxn, name string, entMeta *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) {
|
||||
// todo: accept non-pointer value
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
q := Query{Value: name, EnterpriseMeta: *entMeta}
|
||||
return tx.FirstWatch(tableACLPolicies, indexName, q)
|
||||
}
|
||||
|
||||
func (s *Store) ACLPolicyBatchGet(ws memdb.WatchSet, ids []string) (uint64, structs.ACLPolicies, error) {
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
@ -1212,8 +1166,8 @@ func (s *Store) ACLPolicyBatchGet(ws memdb.WatchSet, ids []string) (uint64, stru
|
||||
}
|
||||
|
||||
// We are specifically not wanting to call aclPolicyMaxIndex here as we always want the
|
||||
// index entry for the "acl-policies" table.
|
||||
idx := maxIndexTxn(tx, "acl-policies")
|
||||
// index entry for the tableACLPolicies table.
|
||||
idx := maxIndexTxn(tx, tableACLPolicies)
|
||||
|
||||
return idx, policies, nil
|
||||
}
|
||||
@ -1252,7 +1206,7 @@ func (s *Store) ACLPolicyList(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
|
||||
iter, err := aclPolicyList(tx, entMeta)
|
||||
iter, err := tx.Get(tableACLPolicies, indexName+"_prefix", entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed acl policy lookup: %v", err)
|
||||
}
|
||||
@ -1365,7 +1319,8 @@ func aclRoleSetTxn(tx *txn, idx uint64, role *structs.ACLRole, allowMissing bool
|
||||
}
|
||||
|
||||
// ensure the name is unique (cannot conflict with another role with a different ID)
|
||||
_, nameMatch, err := aclRoleGetByName(tx, role.Name, &role.EnterpriseMeta)
|
||||
q := Query{EnterpriseMeta: role.EnterpriseMeta, Value: role.Name}
|
||||
nameMatch, err := tx.First(tableACLRoles, indexName, q)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed acl role lookup: %v", err)
|
||||
}
|
||||
@ -1418,6 +1373,15 @@ func (s *Store) ACLRoleGetByName(ws memdb.WatchSet, name string, entMeta *struct
|
||||
return s.aclRoleGet(ws, name, aclRoleGetByName, entMeta)
|
||||
}
|
||||
|
||||
func aclRoleGetByName(tx ReadTxn, name string, entMeta *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) {
|
||||
// TODO: accept non-pointer value
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
q := Query{EnterpriseMeta: *entMeta, Value: name}
|
||||
return tx.FirstWatch(tableACLRoles, indexName, q)
|
||||
}
|
||||
|
||||
func (s *Store) ACLRoleBatchGet(ws memdb.WatchSet, ids []string) (uint64, structs.ACLRoles, error) {
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
@ -1434,7 +1398,7 @@ func (s *Store) ACLRoleBatchGet(ws memdb.WatchSet, ids []string) (uint64, struct
|
||||
}
|
||||
}
|
||||
|
||||
idx := maxIndexTxn(tx, "acl-roles")
|
||||
idx := maxIndexTxn(tx, tableACLRoles)
|
||||
|
||||
return idx, roles, nil
|
||||
}
|
||||
@ -1479,10 +1443,16 @@ func (s *Store) ACLRoleList(ws memdb.WatchSet, policy string, entMeta *structs.E
|
||||
var iter memdb.ResultIterator
|
||||
var err error
|
||||
|
||||
// TODO: accept non-pointer value
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
|
||||
if policy != "" {
|
||||
iter, err = aclRoleListByPolicy(tx, policy, entMeta)
|
||||
q := Query{Value: policy, EnterpriseMeta: *entMeta}
|
||||
iter, err = tx.Get(tableACLRoles, indexPolicies, q)
|
||||
} else {
|
||||
iter, err = aclRoleList(tx, entMeta)
|
||||
iter, err = tx.Get(tableACLRoles, indexName+"_prefix", entMeta)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -1,9 +1,10 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
|
||||
"github.com/hashicorp/consul/agent/consul/stream"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
||||
// aclChangeUnsubscribeEvent creates and returns stream.UnsubscribeEvents that
|
||||
@ -19,7 +20,7 @@ func aclChangeUnsubscribeEvent(tx ReadTxn, changes Changes) ([]stream.Event, err
|
||||
token := changeObject(change).(*structs.ACLToken)
|
||||
secretIDs = append(secretIDs, token.SecretID)
|
||||
|
||||
case "acl-roles":
|
||||
case tableACLRoles:
|
||||
role := changeObject(change).(*structs.ACLRole)
|
||||
tokens, err := aclTokenListByRole(tx, role.ID, &role.EnterpriseMeta)
|
||||
if err != nil {
|
||||
@ -27,7 +28,7 @@ func aclChangeUnsubscribeEvent(tx ReadTxn, changes Changes) ([]stream.Event, err
|
||||
}
|
||||
secretIDs = appendSecretIDsFromTokenIterator(secretIDs, tokens)
|
||||
|
||||
case "acl-policies":
|
||||
case tableACLPolicies:
|
||||
policy := changeObject(change).(*structs.ACLPolicy)
|
||||
tokens, err := aclTokenListByPolicy(tx, policy.ID, &policy.EnterpriseMeta)
|
||||
if err != nil {
|
||||
@ -35,7 +36,8 @@ func aclChangeUnsubscribeEvent(tx ReadTxn, changes Changes) ([]stream.Event, err
|
||||
}
|
||||
secretIDs = appendSecretIDsFromTokenIterator(secretIDs, tokens)
|
||||
|
||||
roles, err := aclRoleListByPolicy(tx, policy.ID, &policy.EnterpriseMeta)
|
||||
q := Query{Value: policy.ID, EnterpriseMeta: policy.EnterpriseMeta}
|
||||
roles, err := tx.Get(tableACLRoles, indexPolicies, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -11,11 +11,11 @@ import (
|
||||
)
|
||||
|
||||
func aclPolicyInsert(tx *txn, policy *structs.ACLPolicy) error {
|
||||
if err := tx.Insert("acl-policies", policy); err != nil {
|
||||
if err := tx.Insert(tableACLPolicies, policy); err != nil {
|
||||
return fmt.Errorf("failed inserting acl policy: %v", err)
|
||||
}
|
||||
|
||||
if err := indexUpdateMaxTxn(tx, policy.ModifyIndex, "acl-policies"); err != nil {
|
||||
if err := indexUpdateMaxTxn(tx, policy.ModifyIndex, tableACLPolicies); err != nil {
|
||||
return fmt.Errorf("failed updating acl policies index: %v", err)
|
||||
}
|
||||
|
||||
@ -23,32 +23,24 @@ func aclPolicyInsert(tx *txn, policy *structs.ACLPolicy) error {
|
||||
}
|
||||
|
||||
func aclPolicyGetByID(tx ReadTxn, id string, _ *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) {
|
||||
return tx.FirstWatch("acl-policies", "id", id)
|
||||
}
|
||||
|
||||
func aclPolicyGetByName(tx ReadTxn, name string, _ *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) {
|
||||
return tx.FirstWatch("acl-policies", "name", name)
|
||||
}
|
||||
|
||||
func aclPolicyList(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get("acl-policies", "id")
|
||||
return tx.FirstWatch(tableACLPolicies, indexID, id)
|
||||
}
|
||||
|
||||
func aclPolicyDeleteWithPolicy(tx *txn, policy *structs.ACLPolicy, idx uint64) error {
|
||||
// remove the policy
|
||||
if err := tx.Delete("acl-policies", policy); err != nil {
|
||||
if err := tx.Delete(tableACLPolicies, policy); err != nil {
|
||||
return fmt.Errorf("failed deleting acl policy: %v", err)
|
||||
}
|
||||
|
||||
// update the overall acl-policies index
|
||||
if err := indexUpdateMaxTxn(tx, idx, "acl-policies"); err != nil {
|
||||
if err := indexUpdateMaxTxn(tx, idx, tableACLPolicies); err != nil {
|
||||
return fmt.Errorf("failed updating acl policies index: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func aclPolicyMaxIndex(tx ReadTxn, _ *structs.ACLPolicy, _ *structs.EnterpriseMeta) uint64 {
|
||||
return maxIndexTxn(tx, "acl-policies")
|
||||
return maxIndexTxn(tx, tableACLPolicies)
|
||||
}
|
||||
|
||||
func aclPolicyUpsertValidateEnterprise(*txn, *structs.ACLPolicy, *structs.ACLPolicy) error {
|
||||
@ -136,48 +128,36 @@ func (s *Store) ACLTokenUpsertValidateEnterprise(token *structs.ACLToken, existi
|
||||
|
||||
func aclRoleInsert(tx *txn, role *structs.ACLRole) error {
|
||||
// insert the role into memdb
|
||||
if err := tx.Insert("acl-roles", role); err != nil {
|
||||
if err := tx.Insert(tableACLRoles, role); err != nil {
|
||||
return fmt.Errorf("failed inserting acl role: %v", err)
|
||||
}
|
||||
|
||||
// update the overall acl-roles index
|
||||
if err := indexUpdateMaxTxn(tx, role.ModifyIndex, "acl-roles"); err != nil {
|
||||
if err := indexUpdateMaxTxn(tx, role.ModifyIndex, tableACLRoles); err != nil {
|
||||
return fmt.Errorf("failed updating acl roles index: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func aclRoleGetByID(tx ReadTxn, id string, _ *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) {
|
||||
return tx.FirstWatch("acl-roles", "id", id)
|
||||
}
|
||||
|
||||
func aclRoleGetByName(tx ReadTxn, name string, _ *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) {
|
||||
return tx.FirstWatch("acl-roles", "name", name)
|
||||
}
|
||||
|
||||
func aclRoleList(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get("acl-roles", "id")
|
||||
}
|
||||
|
||||
func aclRoleListByPolicy(tx ReadTxn, policy string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get("acl-roles", "policies", policy)
|
||||
return tx.FirstWatch(tableACLRoles, indexID, id)
|
||||
}
|
||||
|
||||
func aclRoleDeleteWithRole(tx *txn, role *structs.ACLRole, idx uint64) error {
|
||||
// remove the role
|
||||
if err := tx.Delete("acl-roles", role); err != nil {
|
||||
if err := tx.Delete(tableACLRoles, role); err != nil {
|
||||
return fmt.Errorf("failed deleting acl role: %v", err)
|
||||
}
|
||||
|
||||
// update the overall acl-roles index
|
||||
if err := indexUpdateMaxTxn(tx, idx, "acl-roles"); err != nil {
|
||||
if err := indexUpdateMaxTxn(tx, idx, tableACLRoles); err != nil {
|
||||
return fmt.Errorf("failed updating acl policies index: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func aclRoleMaxIndex(tx ReadTxn, _ *structs.ACLRole, _ *structs.EnterpriseMeta) uint64 {
|
||||
return maxIndexTxn(tx, "acl-roles")
|
||||
return maxIndexTxn(tx, tableACLRoles)
|
||||
}
|
||||
|
||||
func aclRoleUpsertValidateEnterprise(tx *txn, role *structs.ACLRole, existing *structs.ACLRole) error {
|
||||
|
82
agent/consul/state/acl_oss_test.go
Normal file
82
agent/consul/state/acl_oss_test.go
Normal file
@ -0,0 +1,82 @@
|
||||
// +build !consulent
|
||||
|
||||
package state
|
||||
|
||||
import "github.com/hashicorp/consul/agent/structs"
|
||||
|
||||
func testIndexerTableACLPolicies() map[string]indexerTestCase {
|
||||
obj := &structs.ACLPolicy{
|
||||
ID: "123e4567-e89b-12d3-a456-426614174abc",
|
||||
Name: "PoLiCyNaMe",
|
||||
}
|
||||
encodedID := []byte{0x12, 0x3e, 0x45, 0x67, 0xe8, 0x9b, 0x12, 0xd3, 0xa4, 0x56, 0x42, 0x66, 0x14, 0x17, 0x4a, 0xbc}
|
||||
return map[string]indexerTestCase{
|
||||
indexID: {
|
||||
read: indexValue{
|
||||
source: obj.ID,
|
||||
expected: encodedID,
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: encodedID,
|
||||
},
|
||||
},
|
||||
indexName: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "PolicyName"},
|
||||
expected: []byte("policyname\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("policyname\x00"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testIndexerTableACLRoles() map[string]indexerTestCase {
|
||||
policyID1 := "123e4567-e89a-12d7-a456-426614174001"
|
||||
policyID2 := "123e4567-e89a-12d7-a456-426614174002"
|
||||
obj := &structs.ACLRole{
|
||||
ID: "123e4567-e89a-12d7-a456-426614174abc",
|
||||
Name: "RoLe",
|
||||
Policies: []structs.ACLRolePolicyLink{
|
||||
{ID: policyID1}, {ID: policyID2},
|
||||
},
|
||||
}
|
||||
encodedID := []byte{0x12, 0x3e, 0x45, 0x67, 0xe8, 0x9a, 0x12, 0xd7, 0xa4, 0x56, 0x42, 0x66, 0x14, 0x17, 0x4a, 0xbc}
|
||||
encodedPID1 := []byte{0x12, 0x3e, 0x45, 0x67, 0xe8, 0x9a, 0x12, 0xd7, 0xa4, 0x56, 0x42, 0x66, 0x14, 0x17, 0x40, 0x01}
|
||||
encodedPID2 := []byte{0x12, 0x3e, 0x45, 0x67, 0xe8, 0x9a, 0x12, 0xd7, 0xa4, 0x56, 0x42, 0x66, 0x14, 0x17, 0x40, 0x02}
|
||||
return map[string]indexerTestCase{
|
||||
indexID: {
|
||||
read: indexValue{
|
||||
source: obj.ID,
|
||||
expected: encodedID,
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: encodedID,
|
||||
},
|
||||
},
|
||||
indexName: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "RoLe"},
|
||||
expected: []byte("role\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("role\x00"),
|
||||
},
|
||||
},
|
||||
indexPolicies: {
|
||||
read: indexValue{
|
||||
source: Query{Value: policyID1},
|
||||
expected: encodedPID1,
|
||||
},
|
||||
writeMulti: indexValueMulti{
|
||||
source: obj,
|
||||
expected: [][]byte{encodedPID1, encodedPID2},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
@ -1,6 +1,9 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-memdb"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
@ -125,16 +128,31 @@ func policiesTableSchema() *memdb.TableSchema {
|
||||
Name: indexName,
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: &memdb.StringFieldIndex{
|
||||
Field: "Name",
|
||||
// TODO (ACL-V2) - should we coerce to lowercase?
|
||||
Lowercase: true,
|
||||
Indexer: indexerSingleWithPrefix{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexNameFromACLPolicy,
|
||||
prefixIndex: prefixIndexFromQuery,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func indexNameFromACLPolicy(raw interface{}) ([]byte, error) {
|
||||
p, ok := raw.(*structs.ACLPolicy)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.ACLPolicy index", raw)
|
||||
}
|
||||
|
||||
if p.Name == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(p.Name))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func rolesTableSchema() *memdb.TableSchema {
|
||||
return &memdb.TableSchema{
|
||||
Name: tableACLRoles,
|
||||
@ -151,9 +169,10 @@ func rolesTableSchema() *memdb.TableSchema {
|
||||
Name: indexName,
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: &memdb.StringFieldIndex{
|
||||
Field: "Name",
|
||||
Lowercase: true,
|
||||
Indexer: indexerSingleWithPrefix{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexNameFromACLRole,
|
||||
prefixIndex: prefixIndexFromQuery,
|
||||
},
|
||||
},
|
||||
indexPolicies: {
|
||||
@ -161,12 +180,61 @@ func rolesTableSchema() *memdb.TableSchema {
|
||||
// Need to allow missing for the anonymous token
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: &RolePoliciesIndex{},
|
||||
Indexer: indexerMulti{
|
||||
readIndex: indexFromUUIDQuery,
|
||||
writeIndexMulti: multiIndexPolicyFromACLRole,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func indexNameFromACLRole(raw interface{}) ([]byte, error) {
|
||||
p, ok := raw.(*structs.ACLRole)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.ACLRole index", raw)
|
||||
}
|
||||
|
||||
if p.Name == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(p.Name))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func indexFromUUIDQuery(raw interface{}) ([]byte, error) {
|
||||
q, ok := raw.(Query)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for UUIDQuery index", raw)
|
||||
}
|
||||
return uuidStringToBytes(q.Value)
|
||||
}
|
||||
|
||||
func multiIndexPolicyFromACLRole(raw interface{}) ([][]byte, error) {
|
||||
role, ok := raw.(*structs.ACLRole)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.ACLRole index", raw)
|
||||
}
|
||||
|
||||
count := len(role.Policies)
|
||||
if count == 0 {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
vals := make([][]byte, 0, count)
|
||||
for _, link := range role.Policies {
|
||||
v, err := uuidStringToBytes(link.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vals = append(vals, v)
|
||||
}
|
||||
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
func bindingRulesTableSchema() *memdb.TableSchema {
|
||||
return &memdb.TableSchema{
|
||||
Name: tableACLBindingRules,
|
||||
|
@ -7,13 +7,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
pbacl "github.com/hashicorp/consul/proto/pbacl"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -3801,7 +3802,7 @@ func TestStateStore_ACLPolicies_Snapshot_Restore(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), idx)
|
||||
require.ElementsMatch(t, policies, res)
|
||||
require.Equal(t, uint64(2), s.maxIndex("acl-policies"))
|
||||
require.Equal(t, uint64(2), s.maxIndex(tableACLPolicies))
|
||||
}()
|
||||
}
|
||||
|
||||
@ -4081,7 +4082,7 @@ func TestStateStore_ACLRoles_Snapshot_Restore(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), idx)
|
||||
require.ElementsMatch(t, roles, res)
|
||||
require.Equal(t, uint64(2), s.maxIndex("acl-roles"))
|
||||
require.Equal(t, uint64(2), s.maxIndex(tableACLRoles))
|
||||
}()
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
"github.com/mitchellh/copystructure"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
@ -41,7 +40,7 @@ func resizeNodeLookupKey(s string) string {
|
||||
|
||||
// Nodes is used to pull the full list of nodes for use during snapshots.
|
||||
func (s *Snapshot) Nodes() (memdb.ResultIterator, error) {
|
||||
iter, err := s.tx.Get("nodes", "id")
|
||||
iter, err := s.tx.Get(tableNodes, indexID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -51,21 +50,13 @@ func (s *Snapshot) Nodes() (memdb.ResultIterator, error) {
|
||||
// Services is used to pull the full list of services for a given node for use
|
||||
// during snapshots.
|
||||
func (s *Snapshot) Services(node string) (memdb.ResultIterator, error) {
|
||||
iter, err := catalogServiceListByNode(s.tx, node, structs.WildcardEnterpriseMeta(), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return iter, nil
|
||||
return s.tx.Get(tableServices, indexNode, Query{Value: node})
|
||||
}
|
||||
|
||||
// Checks is used to pull the full list of checks for a given node for use
|
||||
// during snapshots.
|
||||
func (s *Snapshot) Checks(node string) (memdb.ResultIterator, error) {
|
||||
iter, err := catalogListChecksByNode(s.tx, node, structs.WildcardEnterpriseMeta())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return iter, nil
|
||||
return s.tx.Get(tableChecks, indexNode, Query{Value: node})
|
||||
}
|
||||
|
||||
// Registration is used to make sure a node, service, and check registration is
|
||||
@ -128,7 +119,7 @@ func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes b
|
||||
// modify the node at all so we prevent watch churn and useless writes
|
||||
// and modify index bumps on the node.
|
||||
{
|
||||
existing, err := tx.First("nodes", "id", node.Node)
|
||||
existing, err := tx.First(tableNodes, indexID, Query{Value: node.Node})
|
||||
if err != nil {
|
||||
return fmt.Errorf("node lookup failed: %s", err)
|
||||
}
|
||||
@ -143,7 +134,7 @@ func (s *Store) ensureRegistrationTxn(tx WriteTxn, idx uint64, preserveIndexes b
|
||||
// node info above to make sure we actually need to update the service
|
||||
// definition in order to prevent useless churn if nothing has changed.
|
||||
if req.Service != nil {
|
||||
_, existing, err := firstWatchCompoundWithTxn(tx, "services", "id", &req.Service.EnterpriseMeta, req.Node, req.Service.ID)
|
||||
existing, err := tx.First(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: req.Service.EnterpriseMeta, Node: req.Node, Service: req.Service.ID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
@ -187,7 +178,7 @@ func (s *Store) EnsureNode(idx uint64, node *structs.Node) error {
|
||||
// If allowClashWithoutID then, getting a conflict on another node without ID will be allowed
|
||||
func ensureNoNodeWithSimilarNameTxn(tx ReadTxn, node *structs.Node, allowClashWithoutID bool) error {
|
||||
// Retrieve all of the nodes
|
||||
enodes, err := tx.Get("nodes", "id")
|
||||
enodes, err := tx.Get(tableNodes, indexID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Cannot lookup all nodes: %s", err)
|
||||
}
|
||||
@ -196,7 +187,7 @@ func ensureNoNodeWithSimilarNameTxn(tx ReadTxn, node *structs.Node, allowClashWi
|
||||
if strings.EqualFold(node.Node, enode.Node) && node.ID != enode.ID {
|
||||
// Look up the existing node's Serf health check to see if it's failed.
|
||||
// If it is, the node can be renamed.
|
||||
_, enodeCheck, err := firstWatchCompoundWithTxn(tx, "checks", "id", structs.DefaultEnterpriseMeta(), enode.Node, string(structs.SerfCheckID))
|
||||
enodeCheck, err := tx.First(tableChecks, indexID, NodeCheckQuery{EnterpriseMeta: *structs.DefaultEnterpriseMeta(), Node: enode.Node, CheckID: string(structs.SerfCheckID)})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Cannot get status of node %s: %s", enode.Node, err)
|
||||
}
|
||||
@ -289,7 +280,7 @@ func (s *Store) ensureNodeTxn(tx WriteTxn, idx uint64, preserveIndexes bool, nod
|
||||
|
||||
// Check for an existing node by name to support nodes with no IDs.
|
||||
if n == nil {
|
||||
existing, err := tx.First("nodes", "id", node.Node)
|
||||
existing, err := tx.First(tableNodes, indexID, Query{Value: node.Node})
|
||||
if err != nil {
|
||||
return fmt.Errorf("node name lookup failed: %s", err)
|
||||
}
|
||||
@ -354,7 +345,7 @@ func (s *Store) GetNode(id string) (uint64, *structs.Node, error) {
|
||||
}
|
||||
|
||||
func getNodeTxn(tx ReadTxn, nodeName string) (*structs.Node, error) {
|
||||
node, err := tx.First("nodes", "id", nodeName)
|
||||
node, err := tx.First(tableNodes, indexID, Query{Value: nodeName})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("node lookup failed: %s", err)
|
||||
}
|
||||
@ -403,7 +394,7 @@ func (s *Store) Nodes(ws memdb.WatchSet) (uint64, structs.Nodes, error) {
|
||||
idx := maxIndexTxn(tx, "nodes")
|
||||
|
||||
// Retrieve all of the nodes
|
||||
nodes, err := tx.Get("nodes", "id")
|
||||
nodes, err := tx.Get(tableNodes, indexID)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
|
||||
}
|
||||
@ -493,7 +484,7 @@ func (s *Store) deleteNodeCASTxn(tx WriteTxn, idx, cidx uint64, nodeName string)
|
||||
// the store within a given transaction.
|
||||
func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string) error {
|
||||
// Look up the node.
|
||||
node, err := tx.First("nodes", "id", nodeName)
|
||||
node, err := tx.First(tableNodes, indexID, Query{Value: nodeName})
|
||||
if err != nil {
|
||||
return fmt.Errorf("node lookup failed: %s", err)
|
||||
}
|
||||
@ -502,7 +493,7 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string) error {
|
||||
}
|
||||
|
||||
// Delete all services associated with the node and update the service index.
|
||||
services, err := tx.Get("services", "node", nodeName)
|
||||
services, err := tx.Get(tableServices, indexNode, Query{Value: nodeName})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
@ -528,7 +519,7 @@ func (s *Store) deleteNodeTxn(tx WriteTxn, idx uint64, nodeName string) error {
|
||||
|
||||
// Delete all checks associated with the node. This will invalidate
|
||||
// sessions as necessary.
|
||||
checks, err := tx.Get("checks", "node", nodeName)
|
||||
checks, err := tx.Get(tableChecks, indexNode, Query{Value: nodeName})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed check lookup: %s", err)
|
||||
}
|
||||
@ -604,7 +595,7 @@ var errCASCompareFailed = errors.New("compare-and-set: comparison failed")
|
||||
// Returns an error if the write didn't happen and nil if write was successful.
|
||||
func ensureServiceCASTxn(tx WriteTxn, idx uint64, node string, svc *structs.NodeService) error {
|
||||
// Retrieve the existing service.
|
||||
_, existing, err := firstWatchCompoundWithTxn(tx, "services", "id", &svc.EnterpriseMeta, node, svc.ID)
|
||||
existing, err := tx.First(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: svc.EnterpriseMeta, Node: node, Service: svc.ID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
@ -629,7 +620,7 @@ func ensureServiceCASTxn(tx WriteTxn, idx uint64, node string, svc *structs.Node
|
||||
// existing memdb transaction.
|
||||
func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool, svc *structs.NodeService) error {
|
||||
// Check for existing service
|
||||
_, existing, err := firstWatchCompoundWithTxn(tx, "services", "id", &svc.EnterpriseMeta, node, svc.ID)
|
||||
existing, err := tx.First(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: svc.EnterpriseMeta, Node: node, Service: svc.ID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
@ -654,7 +645,7 @@ func ensureServiceTxn(tx WriteTxn, idx uint64, node string, preserveIndexes bool
|
||||
// That's always populated when we read from the state store.
|
||||
entry := svc.ToServiceNode(node)
|
||||
// Get the node
|
||||
n, err := tx.First("nodes", "id", node)
|
||||
n, err := tx.First(tableNodes, indexID, Query{Value: node})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed node lookup: %s", err)
|
||||
}
|
||||
@ -693,7 +684,7 @@ func (s *Store) Services(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (ui
|
||||
idx := catalogServicesMaxIndex(tx, entMeta)
|
||||
|
||||
// List all the services.
|
||||
services, err := catalogServiceList(tx, entMeta, false)
|
||||
services, err := catalogServiceListNoWildcard(tx, entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed querying services: %s", err)
|
||||
}
|
||||
@ -725,17 +716,19 @@ func (s *Store) Services(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (ui
|
||||
return idx, results, nil
|
||||
}
|
||||
|
||||
func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceList, error) {
|
||||
func (s *Store) ServiceList(ws memdb.WatchSet,
|
||||
include func(svc *structs.ServiceNode) bool, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceList, error) {
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
|
||||
return serviceListTxn(tx, ws, entMeta)
|
||||
return serviceListTxn(tx, ws, include, entMeta)
|
||||
}
|
||||
|
||||
func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceList, error) {
|
||||
func serviceListTxn(tx ReadTxn, ws memdb.WatchSet,
|
||||
include func(svc *structs.ServiceNode) bool, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceList, error) {
|
||||
idx := catalogServicesMaxIndex(tx, entMeta)
|
||||
|
||||
services, err := catalogServiceList(tx, entMeta, true)
|
||||
services, err := tx.Get(tableServices, indexID+"_prefix", entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed querying services: %s", err)
|
||||
}
|
||||
@ -744,7 +737,11 @@ func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMe
|
||||
unique := make(map[structs.ServiceName]struct{})
|
||||
for service := services.Next(); service != nil; service = services.Next() {
|
||||
svc := service.(*structs.ServiceNode)
|
||||
unique[svc.CompoundServiceName()] = struct{}{}
|
||||
// TODO (freddy) This is a hack to exclude certain kinds.
|
||||
// Need a new index to query by kind and namespace, have to coordinate with consul foundations first
|
||||
if include == nil || include(svc) {
|
||||
unique[svc.CompoundServiceName()] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
results := make(structs.ServiceList, 0, len(unique))
|
||||
@ -780,7 +777,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string,
|
||||
|
||||
// We don't want to track an unlimited number of services, so we pull a
|
||||
// top-level watch to use as a fallback.
|
||||
allServices, err := catalogServiceList(tx, entMeta, false)
|
||||
allServices, err := catalogServiceListNoWildcard(tx, entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed services lookup: %s", err)
|
||||
}
|
||||
@ -900,25 +897,34 @@ func maxIndexAndWatchChsForServiceNodes(tx ReadTxn,
|
||||
// compatible destination for the given service name. This will include
|
||||
// both proxies and native integrations.
|
||||
func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) {
|
||||
return s.serviceNodes(ws, serviceName, true, entMeta)
|
||||
tx := s.db.ReadTxn()
|
||||
defer tx.Abort()
|
||||
|
||||
// TODO: accept non-pointer value
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
q := Query{Value: serviceName, EnterpriseMeta: *entMeta}
|
||||
return serviceNodesTxn(tx, ws, indexConnect, q)
|
||||
}
|
||||
|
||||
// ServiceNodes returns the nodes associated with a given service name.
|
||||
func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) {
|
||||
return s.serviceNodes(ws, serviceName, false, entMeta)
|
||||
}
|
||||
|
||||
func (s *Store) serviceNodes(ws memdb.WatchSet, serviceName string, connect bool, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) {
|
||||
tx := s.db.Txn(false)
|
||||
tx := s.db.ReadTxn()
|
||||
defer tx.Abort()
|
||||
|
||||
// Function for lookup
|
||||
index := "service"
|
||||
if connect {
|
||||
index = "connect"
|
||||
// TODO: accept non-pointer value
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
q := Query{Value: serviceName, EnterpriseMeta: *entMeta}
|
||||
return serviceNodesTxn(tx, ws, indexService, q)
|
||||
}
|
||||
|
||||
services, err := catalogServiceNodeList(tx, serviceName, index, entMeta)
|
||||
func serviceNodesTxn(tx ReadTxn, ws memdb.WatchSet, index string, q Query) (uint64, structs.ServiceNodes, error) {
|
||||
connect := index == indexConnect
|
||||
serviceName := q.Value
|
||||
services, err := tx.Get(tableServices, index, q)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
@ -936,7 +942,7 @@ func (s *Store) serviceNodes(ws memdb.WatchSet, serviceName string, connect bool
|
||||
var idx uint64
|
||||
if connect {
|
||||
// Look up gateway nodes associated with the service
|
||||
gwIdx, nodes, err := serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindTerminatingGateway, entMeta)
|
||||
gwIdx, nodes, err := serviceGatewayNodes(tx, ws, serviceName, structs.ServiceKindTerminatingGateway, &q.EnterpriseMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed gateway nodes lookup: %v", err)
|
||||
}
|
||||
@ -967,7 +973,7 @@ func (s *Store) serviceNodes(ws memdb.WatchSet, serviceName string, connect bool
|
||||
// Get the table index.
|
||||
// TODO (gateways) (freddy) Why do we always consider the main service index here?
|
||||
// This doesn't seem to make sense for Connect when there's more than 1 result
|
||||
svcIdx := maxIndexForService(tx, serviceName, len(results) > 0, false, entMeta)
|
||||
svcIdx := maxIndexForService(tx, serviceName, len(results) > 0, false, &q.EnterpriseMeta)
|
||||
if idx < svcIdx {
|
||||
idx = svcIdx
|
||||
}
|
||||
@ -981,8 +987,13 @@ func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tags []string
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
|
||||
// List all the services.
|
||||
services, err := catalogServiceNodeList(tx, service, "service", entMeta)
|
||||
// TODO: accept non-pointer value
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
|
||||
q := Query{Value: service, EnterpriseMeta: *entMeta}
|
||||
services, err := tx.Get(tableServices, indexService, q)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
@ -1047,7 +1058,7 @@ func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta *
|
||||
defer tx.Abort()
|
||||
|
||||
// List all the services.
|
||||
services, err := catalogServiceList(tx, entMeta, true)
|
||||
services, err := tx.Get(tableServices, indexID+"_prefix", entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
@ -1082,7 +1093,7 @@ func (s *Store) ServiceAddressNodes(ws memdb.WatchSet, address string, entMeta *
|
||||
func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNodes) (structs.ServiceNodes, error) {
|
||||
// We don't want to track an unlimited number of nodes, so we pull a
|
||||
// top-level watch to use as a fallback.
|
||||
allNodes, err := tx.Get("nodes", "id")
|
||||
allNodes, err := tx.Get(tableNodes, indexID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed nodes lookup: %s", err)
|
||||
}
|
||||
@ -1097,7 +1108,7 @@ func parseServiceNodes(tx ReadTxn, ws memdb.WatchSet, services structs.ServiceNo
|
||||
s := sn.PartialClone()
|
||||
|
||||
// Grab the corresponding node record.
|
||||
watchCh, n, err := tx.FirstWatch("nodes", "id", sn.Node)
|
||||
watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{Value: sn.Node})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed node lookup: %s", err)
|
||||
}
|
||||
@ -1137,8 +1148,13 @@ func (s *Store) NodeService(nodeName string, serviceID string, entMeta *structs.
|
||||
}
|
||||
|
||||
func getNodeServiceTxn(tx ReadTxn, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) (*structs.NodeService, error) {
|
||||
// TODO: pass non-pointer type for ent meta
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
|
||||
// Query the service
|
||||
_, service, err := firstWatchCompoundWithTxn(tx, "services", "id", entMeta, nodeName, serviceID)
|
||||
service, err := tx.First(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: *entMeta, Node: nodeName, Service: serviceID})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed querying service for node %q: %s", nodeName, err)
|
||||
}
|
||||
@ -1158,7 +1174,7 @@ func (s *Store) nodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *st
|
||||
idx := catalogMaxIndex(tx, entMeta, false)
|
||||
|
||||
// Query the node by node name
|
||||
watchCh, n, err := tx.FirstWatch("nodes", "id", nodeNameOrID)
|
||||
watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{Value: nodeNameOrID})
|
||||
if err != nil {
|
||||
return true, 0, nil, nil, fmt.Errorf("node lookup failed: %s", err)
|
||||
}
|
||||
@ -1306,8 +1322,12 @@ func (s *Store) deleteServiceCASTxn(tx WriteTxn, idx, cidx uint64, nodeName, ser
|
||||
// deleteServiceTxn is the inner method called to remove a service
|
||||
// registration within an existing transaction.
|
||||
func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID string, entMeta *structs.EnterpriseMeta) error {
|
||||
// Look up the service.
|
||||
_, service, err := firstWatchCompoundWithTxn(tx, "services", "id", entMeta, nodeName, serviceID)
|
||||
// TODO: pass non-pointer type for ent meta
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
|
||||
service, err := tx.First(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: *entMeta, Node: nodeName, Service: serviceID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
@ -1315,9 +1335,14 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: accept a non-pointer value for EnterpriseMeta
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
// Delete any checks associated with the service. This will invalidate
|
||||
// sessions as necessary.
|
||||
checks, err := catalogChecksForNodeService(tx, nodeName, serviceID, entMeta)
|
||||
nsq := NodeServiceQuery{Node: nodeName, Service: serviceID, EnterpriseMeta: *entMeta}
|
||||
checks, err := tx.Get(tableChecks, indexNodeService, nsq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed service check lookup: %s", err)
|
||||
}
|
||||
@ -1339,7 +1364,7 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
|
||||
}
|
||||
|
||||
// Delete the service and update the index
|
||||
if err := tx.Delete("services", service); err != nil {
|
||||
if err := tx.Delete(tableServices, service); err != nil {
|
||||
return fmt.Errorf("failed deleting service: %s", err)
|
||||
}
|
||||
if err := catalogUpdateServicesIndexes(tx, idx, entMeta); err != nil {
|
||||
@ -1356,7 +1381,8 @@ func (s *Store) deleteServiceTxn(tx WriteTxn, idx uint64, nodeName, serviceID st
|
||||
return fmt.Errorf("failed to clean up mesh-topology associations for %q: %v", name.String(), err)
|
||||
}
|
||||
|
||||
if _, remainingService, err := firstWatchWithTxn(tx, "services", "service", svc.ServiceName, entMeta); err == nil {
|
||||
q := Query{Value: svc.ServiceName, EnterpriseMeta: *entMeta}
|
||||
if remainingService, err := tx.First(tableServices, indexService, q); err == nil {
|
||||
if remainingService != nil {
|
||||
// We have at least one remaining service, update the index
|
||||
if err := catalogUpdateServiceIndexes(tx, svc.ServiceName, idx, entMeta); err != nil {
|
||||
@ -1401,7 +1427,7 @@ func (s *Store) EnsureCheck(idx uint64, hc *structs.HealthCheck) error {
|
||||
|
||||
// updateAllServiceIndexesOfNode updates the Raft index of all the services associated with this node
|
||||
func updateAllServiceIndexesOfNode(tx WriteTxn, idx uint64, nodeID string) error {
|
||||
services, err := tx.Get("services", "node", nodeID)
|
||||
services, err := tx.Get(tableServices, indexNode, Query{Value: nodeID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed updating services for node %s: %s", nodeID, err)
|
||||
}
|
||||
@ -1451,7 +1477,7 @@ func (s *Store) ensureCheckCASTxn(tx WriteTxn, idx uint64, hc *structs.HealthChe
|
||||
// checks with no matching node or service.
|
||||
func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc *structs.HealthCheck) error {
|
||||
// Check if we have an existing health check
|
||||
_, existing, err := firstWatchCompoundWithTxn(tx, "checks", "id", &hc.EnterpriseMeta, hc.Node, string(hc.CheckID))
|
||||
existing, err := tx.First(tableChecks, indexID, NodeCheckQuery{EnterpriseMeta: hc.EnterpriseMeta, Node: hc.Node, CheckID: string(hc.CheckID)})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed health check lookup: %s", err)
|
||||
}
|
||||
@ -1471,7 +1497,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc
|
||||
}
|
||||
|
||||
// Get the node
|
||||
node, err := tx.First("nodes", "id", hc.Node)
|
||||
node, err := tx.First(tableNodes, indexID, Query{Value: hc.Node})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed node lookup: %s", err)
|
||||
}
|
||||
@ -1483,7 +1509,7 @@ func (s *Store) ensureCheckTxn(tx WriteTxn, idx uint64, preserveIndexes bool, hc
|
||||
// If the check is associated with a service, check that we have
|
||||
// a registration for the service.
|
||||
if hc.ServiceID != "" {
|
||||
_, service, err := firstWatchCompoundWithTxn(tx, "services", "id", &hc.EnterpriseMeta, hc.Node, hc.ServiceID)
|
||||
service, err := tx.First(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: hc.EnterpriseMeta, Node: hc.Node, Service: hc.ServiceID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
@ -1558,8 +1584,13 @@ func getNodeCheckTxn(tx ReadTxn, nodeName string, checkID types.CheckID, entMeta
|
||||
// Get the table index.
|
||||
idx := catalogChecksMaxIndex(tx, entMeta)
|
||||
|
||||
// TODO: accept non-pointer value
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
|
||||
// Return the check.
|
||||
_, check, err := firstWatchCompoundWithTxn(tx, "checks", "id", entMeta, nodeName, string(checkID))
|
||||
check, err := tx.First(tableChecks, indexID, NodeCheckQuery{EnterpriseMeta: *entMeta, Node: nodeName, CheckID: string(checkID)})
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
|
||||
}
|
||||
@ -1576,11 +1607,15 @@ func (s *Store) NodeChecks(ws memdb.WatchSet, nodeName string, entMeta *structs.
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
|
||||
// Get the table index.
|
||||
idx := catalogChecksMaxIndex(tx, entMeta)
|
||||
|
||||
// Return the checks.
|
||||
iter, err := catalogListChecksByNode(tx, nodeName, entMeta)
|
||||
iter, err := catalogListChecksByNode(tx, Query{Value: nodeName, EnterpriseMeta: *entMeta})
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
|
||||
}
|
||||
@ -1603,8 +1638,11 @@ func (s *Store) ServiceChecks(ws memdb.WatchSet, serviceName string, entMeta *st
|
||||
// Get the table index.
|
||||
idx := catalogChecksMaxIndex(tx, entMeta)
|
||||
|
||||
// Return the checks.
|
||||
iter, err := catalogListChecksByService(tx, serviceName, entMeta)
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
q := Query{Value: serviceName, EnterpriseMeta: *entMeta}
|
||||
iter, err := tx.Get(tableChecks, indexService, q)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
|
||||
}
|
||||
@ -1628,8 +1666,12 @@ func (s *Store) ServiceChecksByNodeMeta(ws memdb.WatchSet, serviceName string,
|
||||
|
||||
// Get the table index.
|
||||
idx := maxIndexForService(tx, serviceName, true, true, entMeta)
|
||||
// Return the checks.
|
||||
iter, err := catalogListChecksByService(tx, serviceName, entMeta)
|
||||
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
q := Query{Value: serviceName, EnterpriseMeta: *entMeta}
|
||||
iter, err := tx.Get(tableChecks, indexService, q)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
|
||||
}
|
||||
@ -1674,13 +1716,18 @@ func checksInStateTxn(tx ReadTxn, ws memdb.WatchSet, state string, entMeta *stru
|
||||
// Get the table index.
|
||||
idx := catalogChecksMaxIndex(tx, entMeta)
|
||||
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
|
||||
// Query all checks if HealthAny is passed, otherwise use the index.
|
||||
var iter memdb.ResultIterator
|
||||
var err error
|
||||
if state == api.HealthAny {
|
||||
iter, err = catalogListChecks(tx, entMeta)
|
||||
iter, err = tx.Get(tableChecks, indexID+"_prefix", entMeta)
|
||||
} else {
|
||||
iter, err = catalogListChecksInState(tx, state, entMeta)
|
||||
q := Query{Value: state, EnterpriseMeta: *entMeta}
|
||||
iter, err = tx.Get(tableChecks, indexStatus, q)
|
||||
}
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed check lookup: %s", err)
|
||||
@ -1697,7 +1744,7 @@ func parseChecksByNodeMeta(tx ReadTxn, ws memdb.WatchSet,
|
||||
|
||||
// We don't want to track an unlimited number of nodes, so we pull a
|
||||
// top-level watch to use as a fallback.
|
||||
allNodes, err := tx.Get("nodes", "id")
|
||||
allNodes, err := tx.Get(tableNodes, indexID)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
|
||||
}
|
||||
@ -1707,7 +1754,7 @@ func parseChecksByNodeMeta(tx ReadTxn, ws memdb.WatchSet,
|
||||
var results structs.HealthChecks
|
||||
for check := iter.Next(); check != nil; check = iter.Next() {
|
||||
healthCheck := check.(*structs.HealthCheck)
|
||||
watchCh, node, err := tx.FirstWatch("nodes", "id", healthCheck.Node)
|
||||
watchCh, node, err := tx.FirstWatch(tableNodes, indexID, Query{Value: healthCheck.Node})
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
|
||||
}
|
||||
@ -1766,11 +1813,28 @@ func (s *Store) deleteCheckCASTxn(tx WriteTxn, idx, cidx uint64, node string, ch
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// NodeServiceQuery is a type used to query the checks table.
|
||||
type NodeServiceQuery struct {
|
||||
Node string
|
||||
Service string
|
||||
structs.EnterpriseMeta
|
||||
}
|
||||
|
||||
// NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer
|
||||
// receiver for this method. Remove once that is fixed.
|
||||
func (q NodeServiceQuery) NamespaceOrDefault() string {
|
||||
return q.EnterpriseMeta.NamespaceOrDefault()
|
||||
}
|
||||
|
||||
// deleteCheckTxn is the inner method used to call a health
|
||||
// check deletion within an existing transaction.
|
||||
func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID types.CheckID, entMeta *structs.EnterpriseMeta) error {
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
|
||||
// Try to retrieve the existing health check.
|
||||
_, hc, err := firstWatchCompoundWithTxn(tx, "checks", "id", entMeta, node, string(checkID))
|
||||
hc, err := tx.First(tableChecks, indexID, NodeCheckQuery{EnterpriseMeta: *entMeta, Node: node, CheckID: string(checkID)})
|
||||
if err != nil {
|
||||
return fmt.Errorf("check lookup failed: %s", err)
|
||||
}
|
||||
@ -1785,7 +1849,7 @@ func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID typ
|
||||
return err
|
||||
}
|
||||
|
||||
_, svcRaw, err := firstWatchCompoundWithTxn(tx, "services", "id", &existing.EnterpriseMeta, existing.Node, existing.ServiceID)
|
||||
svcRaw, err := tx.First(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: existing.EnterpriseMeta, Node: existing.Node, Service: existing.ServiceID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed retrieving service from state store: %v", err)
|
||||
}
|
||||
@ -1805,7 +1869,7 @@ func (s *Store) deleteCheckTxn(tx WriteTxn, idx uint64, node string, checkID typ
|
||||
}
|
||||
|
||||
// Delete the check from the DB and update the index.
|
||||
if err := tx.Delete("checks", hc); err != nil {
|
||||
if err := tx.Delete(tableChecks, hc); err != nil {
|
||||
return fmt.Errorf("failed removing check: %s", err)
|
||||
}
|
||||
|
||||
@ -1913,14 +1977,18 @@ func (s *Store) checkServiceNodes(ws memdb.WatchSet, serviceName string, connect
|
||||
}
|
||||
|
||||
func checkServiceNodesTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, connect bool, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) {
|
||||
// Function for lookup
|
||||
index := "service"
|
||||
index := indexService
|
||||
if connect {
|
||||
index = "connect"
|
||||
index = indexConnect
|
||||
}
|
||||
|
||||
// Query the state store for the service.
|
||||
iter, err := catalogServiceNodeList(tx, serviceName, index, entMeta)
|
||||
// TODO: accept non-pointer
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
|
||||
q := Query{Value: serviceName, EnterpriseMeta: *entMeta}
|
||||
iter, err := tx.Get(tableServices, index, q)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
@ -2043,8 +2111,13 @@ func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
|
||||
// Query the state store for the service.
|
||||
iter, err := catalogServiceNodeList(tx, serviceName, "service", entMeta)
|
||||
// TODO: accept non-pointer value
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
|
||||
q := Query{Value: serviceName, EnterpriseMeta: *entMeta}
|
||||
iter, err := tx.Get(tableServices, indexService, q)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
@ -2071,7 +2144,7 @@ func (s *Store) GatewayServices(ws memdb.WatchSet, gateway string, entMeta *stru
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
|
||||
iter, err := gatewayServices(tx, gateway, entMeta)
|
||||
iter, err := tx.Get(tableGatewayServices, indexGateway, structs.NewServiceName(gateway, entMeta))
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed gateway services lookup: %s", err)
|
||||
}
|
||||
@ -2089,6 +2162,10 @@ func (s *Store) GatewayServices(ws memdb.WatchSet, gateway string, entMeta *stru
|
||||
// parseCheckServiceNodes is used to parse through a given set of services,
|
||||
// and query for an associated node and a set of checks. This is the inner
|
||||
// method used to return a rich set of results from a more simple query.
|
||||
//
|
||||
// TODO: idx parameter is not used except as a return value. Remove it.
|
||||
// TODO: err parameter is only used for early return. Remove it and check from the
|
||||
// caller.
|
||||
func parseCheckServiceNodes(
|
||||
tx ReadTxn, ws memdb.WatchSet, idx uint64,
|
||||
services structs.ServiceNodes,
|
||||
@ -2105,7 +2182,7 @@ func parseCheckServiceNodes(
|
||||
|
||||
// We don't want to track an unlimited number of nodes, so we pull a
|
||||
// top-level watch to use as a fallback.
|
||||
allNodes, err := tx.Get("nodes", "id")
|
||||
allNodes, err := tx.Get(tableNodes, indexID)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed nodes lookup: %s", err)
|
||||
}
|
||||
@ -2114,7 +2191,7 @@ func parseCheckServiceNodes(
|
||||
// We need a similar fallback for checks. Since services need the
|
||||
// status of node + service-specific checks, we pull in a top-level
|
||||
// watch over all checks.
|
||||
allChecks, err := tx.Get("checks", "id")
|
||||
allChecks, err := tx.Get(tableChecks, indexID)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed checks lookup: %s", err)
|
||||
}
|
||||
@ -2123,7 +2200,7 @@ func parseCheckServiceNodes(
|
||||
results := make(structs.CheckServiceNodes, 0, len(services))
|
||||
for _, sn := range services {
|
||||
// Retrieve the node.
|
||||
watchCh, n, err := tx.FirstWatch("nodes", "id", sn.Node)
|
||||
watchCh, n, err := tx.FirstWatch(tableNodes, indexID, Query{Value: sn.Node})
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
|
||||
}
|
||||
@ -2137,7 +2214,8 @@ func parseCheckServiceNodes(
|
||||
// First add the node-level checks. These always apply to any
|
||||
// service on the node.
|
||||
var checks structs.HealthChecks
|
||||
iter, err := catalogListNodeChecks(tx, sn.Node)
|
||||
q := NodeServiceQuery{Node: sn.Node, EnterpriseMeta: *structs.DefaultEnterpriseMeta()}
|
||||
iter, err := tx.Get(tableChecks, indexNodeService, q)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
@ -2147,7 +2225,8 @@ func parseCheckServiceNodes(
|
||||
}
|
||||
|
||||
// Now add the service-specific checks.
|
||||
iter, err = catalogListServiceChecks(tx, sn.Node, sn.ServiceID, &sn.EnterpriseMeta)
|
||||
q = NodeServiceQuery{Node: sn.Node, Service: sn.ServiceID, EnterpriseMeta: sn.EnterpriseMeta}
|
||||
iter, err = tx.Get(tableChecks, indexNodeService, q)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
@ -2177,7 +2256,7 @@ func (s *Store) NodeInfo(ws memdb.WatchSet, node string, entMeta *structs.Enterp
|
||||
idx := catalogMaxIndex(tx, entMeta, true)
|
||||
|
||||
// Query the node by the passed node
|
||||
nodes, err := tx.Get("nodes", "id", node)
|
||||
nodes, err := tx.Get(tableNodes, indexID, Query{Value: node})
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
|
||||
}
|
||||
@ -2196,7 +2275,7 @@ func (s *Store) NodeDump(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (ui
|
||||
idx := catalogMaxIndex(tx, entMeta, true)
|
||||
|
||||
// Fetch all of the registered nodes
|
||||
nodes, err := tx.Get("nodes", "id")
|
||||
nodes, err := tx.Get(tableNodes, indexID)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
|
||||
}
|
||||
@ -2219,7 +2298,7 @@ func serviceDumpAllTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.Enterpris
|
||||
// Get the table index
|
||||
idx := catalogMaxIndexWatch(tx, ws, entMeta, true)
|
||||
|
||||
services, err := catalogServiceList(tx, entMeta, true)
|
||||
services, err := tx.Get(tableServices, indexID+"_prefix", entMeta)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
@ -2239,8 +2318,11 @@ func serviceDumpKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind,
|
||||
// entries
|
||||
idx := catalogServiceKindMaxIndex(tx, ws, kind, entMeta)
|
||||
|
||||
// Query the state store for the service.
|
||||
services, err := catalogServiceListByKind(tx, kind, entMeta)
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
q := Query{Value: string(kind), EnterpriseMeta: *entMeta}
|
||||
services, err := tx.Get(tableServices, indexKind, q)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
@ -2260,16 +2342,20 @@ func serviceDumpKindTxn(tx ReadTxn, ws memdb.WatchSet, kind structs.ServiceKind,
|
||||
func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64,
|
||||
iter memdb.ResultIterator, entMeta *structs.EnterpriseMeta) (uint64, structs.NodeDump, error) {
|
||||
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
|
||||
// We don't want to track an unlimited number of services, so we pull a
|
||||
// top-level watch to use as a fallback.
|
||||
allServices, err := tx.Get("services", "id")
|
||||
allServices, err := tx.Get(tableServices, indexID)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed services lookup: %s", err)
|
||||
}
|
||||
allServicesCh := allServices.WatchCh()
|
||||
|
||||
// We need a similar fallback for checks.
|
||||
allChecks, err := tx.Get("checks", "id")
|
||||
allChecks, err := tx.Get(tableChecks, indexID)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed checks lookup: %s", err)
|
||||
}
|
||||
@ -2300,7 +2386,7 @@ func parseNodes(tx ReadTxn, ws memdb.WatchSet, idx uint64,
|
||||
}
|
||||
|
||||
// Query the service level checks
|
||||
checks, err := catalogListChecksByNode(tx, node.Node, entMeta)
|
||||
checks, err := catalogListChecksByNode(tx, Query{Value: node.Node, EnterpriseMeta: *entMeta})
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed node lookup: %s", err)
|
||||
}
|
||||
@ -2355,7 +2441,7 @@ func updateGatewayServices(tx WriteTxn, idx uint64, conf structs.ConfigEntry, en
|
||||
// Delete all associated with gateway first, to avoid keeping mappings that were removed
|
||||
sn := structs.NewServiceName(conf.GetName(), entMeta)
|
||||
|
||||
if _, err := tx.DeleteAll(tableGatewayServices, "gateway", sn); err != nil {
|
||||
if _, err := tx.DeleteAll(tableGatewayServices, indexGateway, sn); err != nil {
|
||||
return fmt.Errorf("failed to truncate gateway services table: %v", err)
|
||||
}
|
||||
if err := truncateGatewayServiceTopologyMappings(tx, idx, sn, conf.GetKind()); err != nil {
|
||||
@ -2479,7 +2565,11 @@ func terminatingConfigGatewayServices(
|
||||
|
||||
// updateGatewayNamespace is used to target all services within a namespace
|
||||
func updateGatewayNamespace(tx WriteTxn, idx uint64, service *structs.GatewayService, entMeta *structs.EnterpriseMeta) error {
|
||||
services, err := catalogServiceListByKind(tx, structs.ServiceKindTypical, entMeta)
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
q := Query{Value: string(structs.ServiceKindTypical), EnterpriseMeta: *entMeta}
|
||||
services, err := tx.Get(tableServices, indexKind, q)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed querying services: %s", err)
|
||||
}
|
||||
@ -2493,7 +2583,7 @@ func updateGatewayNamespace(tx WriteTxn, idx uint64, service *structs.GatewaySer
|
||||
continue
|
||||
}
|
||||
|
||||
existing, err := tx.First(tableGatewayServices, "id", service.Gateway, sn.CompoundServiceName(), service.Port)
|
||||
existing, err := tx.First(tableGatewayServices, indexID, service.Gateway, sn.CompoundServiceName(), service.Port)
|
||||
if err != nil {
|
||||
return fmt.Errorf("gateway service lookup failed: %s", err)
|
||||
}
|
||||
@ -2528,7 +2618,7 @@ func updateGatewayNamespace(tx WriteTxn, idx uint64, service *structs.GatewaySer
|
||||
func updateGatewayService(tx WriteTxn, idx uint64, mapping *structs.GatewayService) error {
|
||||
// Check if mapping already exists in table if it's already in the table
|
||||
// Avoid insert if nothing changed
|
||||
existing, err := tx.First(tableGatewayServices, "id", mapping.Gateway, mapping.Service, mapping.Port)
|
||||
existing, err := tx.First(tableGatewayServices, indexID, mapping.Gateway, mapping.Service, mapping.Port)
|
||||
if err != nil {
|
||||
return fmt.Errorf("gateway service lookup failed: %s", err)
|
||||
}
|
||||
@ -2566,7 +2656,8 @@ func checkGatewayWildcardsAndUpdate(tx WriteTxn, idx uint64, svc *structs.NodeSe
|
||||
return nil
|
||||
}
|
||||
|
||||
svcGateways, err := serviceGateways(tx, structs.WildcardSpecifier, &svc.EnterpriseMeta)
|
||||
sn := structs.ServiceName{Name: structs.WildcardSpecifier, EnterpriseMeta: svc.EnterpriseMeta}
|
||||
svcGateways, err := tx.Get(tableGatewayServices, indexService, sn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed gateway lookup for %q: %s", svc.Service, err)
|
||||
}
|
||||
@ -2589,7 +2680,8 @@ func checkGatewayWildcardsAndUpdate(tx WriteTxn, idx uint64, svc *structs.NodeSe
|
||||
|
||||
func cleanupGatewayWildcards(tx WriteTxn, idx uint64, svc *structs.ServiceNode) error {
|
||||
// Clean up association between service name and gateways if needed
|
||||
gateways, err := serviceGateways(tx, svc.ServiceName, &svc.EnterpriseMeta)
|
||||
sn := structs.ServiceName{Name: svc.ServiceName, EnterpriseMeta: svc.EnterpriseMeta}
|
||||
gateways, err := tx.Get(tableGatewayServices, indexService, sn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed gateway lookup for %q: %s", svc.ServiceName, err)
|
||||
}
|
||||
@ -2621,21 +2713,11 @@ func cleanupGatewayWildcards(tx WriteTxn, idx uint64, svc *structs.ServiceNode)
|
||||
return nil
|
||||
}
|
||||
|
||||
// serviceGateways returns all GatewayService entries with the given service name. This effectively looks up
|
||||
// all the gateways mapped to this service.
|
||||
func serviceGateways(tx ReadTxn, name string, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get(tableGatewayServices, "service", structs.NewServiceName(name, entMeta))
|
||||
}
|
||||
|
||||
func gatewayServices(tx ReadTxn, name string, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get(tableGatewayServices, "gateway", structs.NewServiceName(name, entMeta))
|
||||
}
|
||||
|
||||
func (s *Store) DumpGatewayServices(ws memdb.WatchSet) (uint64, structs.GatewayServices, error) {
|
||||
tx := s.db.ReadTxn()
|
||||
defer tx.Abort()
|
||||
|
||||
iter, err := tx.Get(tableGatewayServices, "id")
|
||||
iter, err := tx.Get(tableGatewayServices, indexID)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to dump gateway-services: %s", err)
|
||||
}
|
||||
@ -2678,7 +2760,7 @@ func (s *Store) collectGatewayServices(tx ReadTxn, ws memdb.WatchSet, iter memdb
|
||||
// We might need something like the service_last_extinction index?
|
||||
func serviceGatewayNodes(tx ReadTxn, ws memdb.WatchSet, service string, kind structs.ServiceKind, entMeta *structs.EnterpriseMeta) (uint64, structs.ServiceNodes, error) {
|
||||
// Look up gateway name associated with the service
|
||||
gws, err := serviceGateways(tx, service, entMeta)
|
||||
gws, err := tx.Get(tableGatewayServices, indexService, structs.NewServiceName(service, entMeta))
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed gateway lookup: %s", err)
|
||||
}
|
||||
@ -2699,7 +2781,8 @@ func serviceGatewayNodes(tx ReadTxn, ws memdb.WatchSet, service string, kind str
|
||||
maxIdx = lib.MaxUint64(maxIdx, mapping.ModifyIndex)
|
||||
|
||||
// Look up nodes for gateway
|
||||
gwServices, err := catalogServiceNodeList(tx, mapping.Gateway.Name, "service", &mapping.Gateway.EnterpriseMeta)
|
||||
q := Query{Value: mapping.Gateway.Name, EnterpriseMeta: mapping.Gateway.EnterpriseMeta}
|
||||
gwServices, err := tx.Get(tableServices, indexService, q)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed service lookup: %s", err)
|
||||
}
|
||||
@ -2822,16 +2905,20 @@ func (s *Store) ServiceTopology(
|
||||
|
||||
upstreamDecisions := make(map[string]structs.IntentionDecisionSummary)
|
||||
|
||||
// The given service is the source relative to upstreams
|
||||
sourceURI := connect.SpiffeIDService{
|
||||
matchEntry := structs.IntentionMatchEntry{
|
||||
Namespace: entMeta.NamespaceOrDefault(),
|
||||
Service: service,
|
||||
Name: service,
|
||||
}
|
||||
// The given service is a source relative to its upstreams
|
||||
_, srcIntentions, err := compatIntentionMatchOneTxn(tx, ws, matchEntry, structs.IntentionMatchSource)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to query intentions for %s", sn.String())
|
||||
}
|
||||
for _, un := range upstreamNames {
|
||||
decision, err := s.IntentionDecision(&sourceURI, un.Name, un.NamespaceOrDefault(), defaultAllow)
|
||||
decision, err := s.IntentionDecision(un.Name, un.NamespaceOrDefault(), srcIntentions, structs.IntentionMatchDestination, defaultAllow, false)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to get intention decision from (%s/%s) to (%s/%s): %v",
|
||||
sourceURI.Namespace, sourceURI.Service, un.Name, un.NamespaceOrDefault(), err)
|
||||
return 0, nil, fmt.Errorf("failed to get intention decision from (%s) to (%s): %v",
|
||||
sn.String(), un.String(), err)
|
||||
}
|
||||
upstreamDecisions[un.String()] = decision
|
||||
}
|
||||
@ -2851,17 +2938,17 @@ func (s *Store) ServiceTopology(
|
||||
maxIdx = idx
|
||||
}
|
||||
|
||||
// The given service is a destination relative to its downstreams
|
||||
_, dstIntentions, err := compatIntentionMatchOneTxn(tx, ws, matchEntry, structs.IntentionMatchDestination)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to query intentions for %s", sn.String())
|
||||
}
|
||||
downstreamDecisions := make(map[string]structs.IntentionDecisionSummary)
|
||||
for _, dn := range downstreamNames {
|
||||
// Downstreams are the source relative to the given service
|
||||
sourceURI := connect.SpiffeIDService{
|
||||
Namespace: dn.NamespaceOrDefault(),
|
||||
Service: dn.Name,
|
||||
}
|
||||
decision, err := s.IntentionDecision(&sourceURI, service, entMeta.NamespaceOrDefault(), defaultAllow)
|
||||
decision, err := s.IntentionDecision(dn.Name, dn.NamespaceOrDefault(), dstIntentions, structs.IntentionMatchSource, defaultAllow, false)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to get intention decision from (%s/%s) to (%s/%s): %v",
|
||||
sourceURI.Namespace, sourceURI.Service, service, dn.NamespaceOrDefault(), err)
|
||||
return 0, nil, fmt.Errorf("failed to get intention decision from (%s) to (%s): %v",
|
||||
dn.String(), sn.String(), err)
|
||||
}
|
||||
downstreamDecisions[dn.String()] = decision
|
||||
}
|
||||
@ -2957,9 +3044,9 @@ func downstreamsFromRegistrationTxn(tx ReadTxn, ws memdb.WatchSet, sn structs.Se
|
||||
func linkedFromRegistrationTxn(tx ReadTxn, ws memdb.WatchSet, service structs.ServiceName, downstreams bool) (uint64, []structs.ServiceName, error) {
|
||||
// To fetch upstreams we query services that have the input listed as a downstream
|
||||
// To fetch downstreams we query services that have the input listed as an upstream
|
||||
index := "downstream"
|
||||
index := indexDownstream
|
||||
if downstreams {
|
||||
index = "upstream"
|
||||
index = indexUpstream
|
||||
}
|
||||
|
||||
iter, err := tx.Get(tableMeshTopology, index, service)
|
||||
@ -2973,7 +3060,7 @@ func linkedFromRegistrationTxn(tx ReadTxn, ws memdb.WatchSet, service structs.Se
|
||||
resp []structs.ServiceName
|
||||
)
|
||||
for raw := iter.Next(); raw != nil; raw = iter.Next() {
|
||||
entry := raw.(*structs.UpstreamDownstream)
|
||||
entry := raw.(*upstreamDownstream)
|
||||
if entry.ModifyIndex > idx {
|
||||
idx = entry.ModifyIndex
|
||||
}
|
||||
@ -3018,20 +3105,20 @@ func updateMeshTopology(tx WriteTxn, idx uint64, node string, svc *structs.NodeS
|
||||
upstreamMeta := structs.NewEnterpriseMeta(u.DestinationNamespace)
|
||||
upstream := structs.NewServiceName(u.DestinationName, &upstreamMeta)
|
||||
|
||||
obj, err := tx.First(tableMeshTopology, "id", upstream, downstream)
|
||||
obj, err := tx.First(tableMeshTopology, indexID, upstream, downstream)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%q lookup failed: %v", tableMeshTopology, err)
|
||||
}
|
||||
sid := svc.CompoundServiceID()
|
||||
uid := structs.UniqueID(node, sid.String())
|
||||
|
||||
var mapping *structs.UpstreamDownstream
|
||||
if existing, ok := obj.(*structs.UpstreamDownstream); ok {
|
||||
var mapping *upstreamDownstream
|
||||
if existing, ok := obj.(*upstreamDownstream); ok {
|
||||
rawCopy, err := copystructure.Copy(existing)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy existing topology mapping: %v", err)
|
||||
}
|
||||
mapping, ok = rawCopy.(*structs.UpstreamDownstream)
|
||||
mapping, ok = rawCopy.(*upstreamDownstream)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected topology type %T", rawCopy)
|
||||
}
|
||||
@ -3041,7 +3128,7 @@ func updateMeshTopology(tx WriteTxn, idx uint64, node string, svc *structs.NodeS
|
||||
inserted[upstream] = true
|
||||
}
|
||||
if mapping == nil {
|
||||
mapping = &structs.UpstreamDownstream{
|
||||
mapping = &upstreamDownstream{
|
||||
Upstream: upstream,
|
||||
Downstream: downstream,
|
||||
Refs: map[string]struct{}{uid: {}},
|
||||
@ -3062,7 +3149,7 @@ func updateMeshTopology(tx WriteTxn, idx uint64, node string, svc *structs.NodeS
|
||||
|
||||
for u := range oldUpstreams {
|
||||
if !inserted[u] {
|
||||
if _, err := tx.DeleteAll(tableMeshTopology, "id", u, downstream); err != nil {
|
||||
if _, err := tx.DeleteAll(tableMeshTopology, indexID, u, downstream); err != nil {
|
||||
return fmt.Errorf("failed to truncate %s table: %v", tableMeshTopology, err)
|
||||
}
|
||||
if err := indexUpdateMaxTxn(tx, idx, tableMeshTopology); err != nil {
|
||||
@ -3084,14 +3171,14 @@ func cleanupMeshTopology(tx WriteTxn, idx uint64, service *structs.ServiceNode)
|
||||
sid := service.CompoundServiceID()
|
||||
uid := structs.UniqueID(service.Node, sid.String())
|
||||
|
||||
iter, err := tx.Get(tableMeshTopology, "downstream", sn)
|
||||
iter, err := tx.Get(tableMeshTopology, indexDownstream, sn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%q lookup failed: %v", tableMeshTopology, err)
|
||||
}
|
||||
|
||||
mappings := make([]*structs.UpstreamDownstream, 0)
|
||||
mappings := make([]*upstreamDownstream, 0)
|
||||
for raw := iter.Next(); raw != nil; raw = iter.Next() {
|
||||
mappings = append(mappings, raw.(*structs.UpstreamDownstream))
|
||||
mappings = append(mappings, raw.(*upstreamDownstream))
|
||||
}
|
||||
|
||||
// Do the updates in a separate loop so we don't trash the iterator.
|
||||
@ -3100,7 +3187,7 @@ func cleanupMeshTopology(tx WriteTxn, idx uint64, service *structs.ServiceNode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy existing topology mapping: %v", err)
|
||||
}
|
||||
copy, ok := rawCopy.(*structs.UpstreamDownstream)
|
||||
copy, ok := rawCopy.(*upstreamDownstream)
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected topology type %T", rawCopy)
|
||||
}
|
||||
@ -3134,7 +3221,7 @@ func insertGatewayServiceTopologyMapping(tx WriteTxn, idx uint64, gs *structs.Ga
|
||||
return nil
|
||||
}
|
||||
|
||||
mapping := structs.UpstreamDownstream{
|
||||
mapping := upstreamDownstream{
|
||||
Upstream: gs.Service,
|
||||
Downstream: gs.Gateway,
|
||||
RaftIndex: gs.RaftIndex,
|
||||
@ -3155,7 +3242,7 @@ func deleteGatewayServiceTopologyMapping(tx WriteTxn, idx uint64, gs *structs.Ga
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := tx.DeleteAll(tableMeshTopology, "id", gs.Service, gs.Gateway); err != nil {
|
||||
if _, err := tx.DeleteAll(tableMeshTopology, indexID, gs.Service, gs.Gateway); err != nil {
|
||||
return fmt.Errorf("failed to truncate %s table: %v", tableMeshTopology, err)
|
||||
}
|
||||
if err := indexUpdateMaxTxn(tx, idx, tableMeshTopology); err != nil {
|
||||
@ -3171,7 +3258,7 @@ func truncateGatewayServiceTopologyMappings(tx WriteTxn, idx uint64, gateway str
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := tx.DeleteAll(tableMeshTopology, "downstream", gateway); err != nil {
|
||||
if _, err := tx.DeleteAll(tableMeshTopology, indexDownstream, gateway); err != nil {
|
||||
return fmt.Errorf("failed to truncate %s table: %v", tableMeshTopology, err)
|
||||
}
|
||||
if err := indexUpdateMaxTxn(tx, idx, tableMeshTopology); err != nil {
|
||||
|
@ -1,6 +1,8 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
@ -21,7 +23,8 @@ type EventPayloadCheckServiceNode struct {
|
||||
// key is used to override the key used to filter the payload. It is set for
|
||||
// events in the connect topic to specify the name of the underlying service
|
||||
// when the change event is for a sidecar or gateway.
|
||||
key string
|
||||
overrideKey string
|
||||
overrideNamespace string
|
||||
}
|
||||
|
||||
func (e EventPayloadCheckServiceNode) HasReadPermission(authz acl.Authorizer) bool {
|
||||
@ -38,17 +41,19 @@ func (e EventPayloadCheckServiceNode) MatchesKey(key, namespace string) bool {
|
||||
}
|
||||
|
||||
name := e.Value.Service.Service
|
||||
if e.key != "" {
|
||||
name = e.key
|
||||
if e.overrideKey != "" {
|
||||
name = e.overrideKey
|
||||
}
|
||||
ns := e.Value.Service.EnterpriseMeta.GetNamespace()
|
||||
return (key == "" || key == name) && (namespace == "" || namespace == ns)
|
||||
ns := e.Value.Service.EnterpriseMeta.NamespaceOrDefault()
|
||||
if e.overrideNamespace != "" {
|
||||
ns = e.overrideNamespace
|
||||
}
|
||||
return (key == "" || strings.EqualFold(key, name)) &&
|
||||
(namespace == "" || strings.EqualFold(namespace, ns))
|
||||
}
|
||||
|
||||
// serviceHealthSnapshot returns a stream.SnapshotFunc that provides a snapshot
|
||||
// of stream.Events that describe the current state of a service health query.
|
||||
//
|
||||
// TODO: no tests for this yet
|
||||
func serviceHealthSnapshot(db ReadDB, topic stream.Topic) stream.SnapshotFunc {
|
||||
return func(req stream.SubscribeRequest, buf stream.SnapshotAppender) (index uint64, err error) {
|
||||
tx := db.ReadTxn()
|
||||
@ -72,15 +77,25 @@ func serviceHealthSnapshot(db ReadDB, topic stream.Topic) stream.SnapshotFunc {
|
||||
},
|
||||
}
|
||||
|
||||
// append each event as a separate item so that they can be serialized
|
||||
// separately, to prevent the encoding of one massive message.
|
||||
buf.Append([]stream.Event{event})
|
||||
if !connect {
|
||||
// append each event as a separate item so that they can be serialized
|
||||
// separately, to prevent the encoding of one massive message.
|
||||
buf.Append([]stream.Event{event})
|
||||
continue
|
||||
}
|
||||
|
||||
events, err := connectEventsByServiceKind(tx, event)
|
||||
if err != nil {
|
||||
return idx, err
|
||||
}
|
||||
buf.Append(events)
|
||||
}
|
||||
|
||||
return idx, err
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: this could use NodeServiceQuery
|
||||
type nodeServiceTuple struct {
|
||||
Node string
|
||||
ServiceID string
|
||||
@ -117,6 +132,7 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
|
||||
|
||||
var nodeChanges map[string]changeType
|
||||
var serviceChanges map[nodeServiceTuple]serviceChange
|
||||
var termGatewayChanges map[structs.ServiceName]map[structs.ServiceName]serviceChange
|
||||
|
||||
markNode := func(node string, typ changeType) {
|
||||
if nodeChanges == nil {
|
||||
@ -154,12 +170,12 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
|
||||
n := changeObject(change).(*structs.Node)
|
||||
markNode(n.Node, changeTypeFromChange(change))
|
||||
|
||||
case "services":
|
||||
case tableServices:
|
||||
sn := changeObject(change).(*structs.ServiceNode)
|
||||
srvChange := serviceChange{changeType: changeTypeFromChange(change), change: change}
|
||||
markService(newNodeServiceTupleFromServiceNode(sn), srvChange)
|
||||
|
||||
case "checks":
|
||||
case tableChecks:
|
||||
// For health we only care about the scope for now to know if it's just
|
||||
// affecting a single service or every service on a node. There is a
|
||||
// subtle edge case where the check with same ID changes from being node
|
||||
@ -195,6 +211,33 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
|
||||
markService(newNodeServiceTupleFromServiceHealthCheck(obj), serviceChangeIndirect)
|
||||
}
|
||||
}
|
||||
case tableGatewayServices:
|
||||
gs := changeObject(change).(*structs.GatewayService)
|
||||
if gs.GatewayKind != structs.ServiceKindTerminatingGateway {
|
||||
continue
|
||||
}
|
||||
|
||||
gsChange := serviceChange{changeType: changeTypeFromChange(change), change: change}
|
||||
|
||||
if termGatewayChanges == nil {
|
||||
termGatewayChanges = make(map[structs.ServiceName]map[structs.ServiceName]serviceChange)
|
||||
}
|
||||
|
||||
_, ok := termGatewayChanges[gs.Gateway]
|
||||
if !ok {
|
||||
termGatewayChanges[gs.Gateway] = map[structs.ServiceName]serviceChange{}
|
||||
}
|
||||
|
||||
switch gsChange.changeType {
|
||||
case changeUpdate:
|
||||
after := gsChange.change.After.(*structs.GatewayService)
|
||||
if gsChange.change.Before.(*structs.GatewayService).IsSame(after) {
|
||||
continue
|
||||
}
|
||||
termGatewayChanges[gs.Gateway][gs.Service] = gsChange
|
||||
case changeDelete, changeCreate:
|
||||
termGatewayChanges[gs.Gateway][gs.Service] = gsChange
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -215,9 +258,6 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
|
||||
}
|
||||
|
||||
for tuple, srvChange := range serviceChanges {
|
||||
// change may be nil if there was a change that _affected_ the service
|
||||
// like a change to checks but it didn't actually change the service
|
||||
// record itself.
|
||||
if srvChange.changeType == changeDelete {
|
||||
sn := srvChange.change.Before.(*structs.ServiceNode)
|
||||
e := newServiceHealthEventDeregister(changes.Index, sn)
|
||||
@ -259,9 +299,65 @@ func ServiceHealthEventsFromChanges(tx ReadTxn, changes Changes) ([]stream.Event
|
||||
events = append(events, e)
|
||||
}
|
||||
|
||||
for gatewayName, serviceChanges := range termGatewayChanges {
|
||||
for serviceName, gsChange := range serviceChanges {
|
||||
gs := changeObject(gsChange.change).(*structs.GatewayService)
|
||||
|
||||
q := Query{Value: gs.Gateway.Name, EnterpriseMeta: gatewayName.EnterpriseMeta}
|
||||
_, nodes, err := serviceNodesTxn(tx, nil, indexService, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Always send deregister events for deletes/updates.
|
||||
if gsChange.changeType != changeCreate {
|
||||
for _, sn := range nodes {
|
||||
e := newServiceHealthEventDeregister(changes.Index, sn)
|
||||
|
||||
e.Topic = topicServiceHealthConnect
|
||||
payload := e.Payload.(EventPayloadCheckServiceNode)
|
||||
payload.overrideKey = serviceName.Name
|
||||
if gatewayName.EnterpriseMeta.NamespaceOrDefault() != serviceName.EnterpriseMeta.NamespaceOrDefault() {
|
||||
payload.overrideNamespace = serviceName.EnterpriseMeta.NamespaceOrDefault()
|
||||
}
|
||||
e.Payload = payload
|
||||
|
||||
events = append(events, e)
|
||||
}
|
||||
}
|
||||
|
||||
if gsChange.changeType == changeDelete {
|
||||
continue
|
||||
}
|
||||
|
||||
// Build service events and append them
|
||||
for _, sn := range nodes {
|
||||
tuple := newNodeServiceTupleFromServiceNode(sn)
|
||||
e, err := newServiceHealthEventForService(tx, changes.Index, tuple)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
e.Topic = topicServiceHealthConnect
|
||||
payload := e.Payload.(EventPayloadCheckServiceNode)
|
||||
payload.overrideKey = serviceName.Name
|
||||
if gatewayName.EnterpriseMeta.NamespaceOrDefault() != serviceName.EnterpriseMeta.NamespaceOrDefault() {
|
||||
payload.overrideNamespace = serviceName.EnterpriseMeta.NamespaceOrDefault()
|
||||
}
|
||||
e.Payload = payload
|
||||
|
||||
events = append(events, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Duplicate any events that affected connect-enabled instances (proxies or
|
||||
// native apps) to the relevant Connect topic.
|
||||
events = append(events, serviceHealthToConnectEvents(events...)...)
|
||||
connectEvents, err := serviceHealthToConnectEvents(tx, events...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
events = append(events, connectEvents...)
|
||||
|
||||
return events, nil
|
||||
}
|
||||
@ -279,7 +375,7 @@ func isConnectProxyDestinationServiceChange(idx uint64, before, after *structs.S
|
||||
e := newServiceHealthEventDeregister(idx, before)
|
||||
e.Topic = topicServiceHealthConnect
|
||||
payload := e.Payload.(EventPayloadCheckServiceNode)
|
||||
payload.key = payload.Value.Service.Proxy.DestinationServiceName
|
||||
payload.overrideKey = payload.Value.Service.Proxy.DestinationServiceName
|
||||
e.Payload = payload
|
||||
return e, true
|
||||
}
|
||||
@ -312,38 +408,81 @@ func changeTypeFromChange(change memdb.Change) changeType {
|
||||
// enabled and so of no interest to those subscribers but also involves
|
||||
// switching connection details to be the proxy instead of the actual instance
|
||||
// in case of a sidecar.
|
||||
func serviceHealthToConnectEvents(events ...stream.Event) []stream.Event {
|
||||
func serviceHealthToConnectEvents(
|
||||
tx ReadTxn,
|
||||
events ...stream.Event,
|
||||
) ([]stream.Event, error) {
|
||||
var result []stream.Event
|
||||
for _, event := range events {
|
||||
if event.Topic != topicServiceHealth {
|
||||
if event.Topic != topicServiceHealth { // event.Topic == topicServiceHealthConnect
|
||||
// Skip non-health or any events already emitted to Connect topic
|
||||
continue
|
||||
}
|
||||
node := getPayloadCheckServiceNode(event.Payload)
|
||||
if node.Service == nil {
|
||||
continue
|
||||
|
||||
connectEvents, err := connectEventsByServiceKind(tx, event)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connectEvent := event
|
||||
connectEvent.Topic = topicServiceHealthConnect
|
||||
|
||||
switch {
|
||||
case node.Service.Connect.Native:
|
||||
result = append(result, connectEvent)
|
||||
|
||||
case node.Service.Kind == structs.ServiceKindConnectProxy:
|
||||
payload := event.Payload.(EventPayloadCheckServiceNode)
|
||||
payload.key = node.Service.Proxy.DestinationServiceName
|
||||
connectEvent.Payload = payload
|
||||
result = append(result, connectEvent)
|
||||
|
||||
default:
|
||||
// ServiceKindTerminatingGateway changes are handled separately.
|
||||
// All other cases are not relevant to the connect topic
|
||||
}
|
||||
result = append(result, connectEvents...)
|
||||
}
|
||||
|
||||
return result
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func connectEventsByServiceKind(tx ReadTxn, origEvent stream.Event) ([]stream.Event, error) {
|
||||
node := getPayloadCheckServiceNode(origEvent.Payload)
|
||||
if node.Service == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
event := origEvent // shallow copy the event
|
||||
event.Topic = topicServiceHealthConnect
|
||||
|
||||
if node.Service.Connect.Native {
|
||||
return []stream.Event{event}, nil
|
||||
}
|
||||
|
||||
switch node.Service.Kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
payload := event.Payload.(EventPayloadCheckServiceNode)
|
||||
payload.overrideKey = node.Service.Proxy.DestinationServiceName
|
||||
event.Payload = payload
|
||||
return []stream.Event{event}, nil
|
||||
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
var result []stream.Event
|
||||
|
||||
sn := structs.ServiceName{
|
||||
Name: node.Service.Service,
|
||||
EnterpriseMeta: node.Service.EnterpriseMeta,
|
||||
}
|
||||
iter, err := tx.Get(tableGatewayServices, indexGateway, sn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// similar to checkServiceNodesTxn -> serviceGatewayNodes
|
||||
for obj := iter.Next(); obj != nil; obj = iter.Next() {
|
||||
result = append(result, copyEventForService(event, obj.(*structs.GatewayService).Service))
|
||||
}
|
||||
return result, nil
|
||||
default:
|
||||
// All other cases are not relevant to the connect topic
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func copyEventForService(event stream.Event, service structs.ServiceName) stream.Event {
|
||||
event.Topic = topicServiceHealthConnect
|
||||
payload := event.Payload.(EventPayloadCheckServiceNode)
|
||||
payload.overrideKey = service.Name
|
||||
if payload.Value.Service.EnterpriseMeta.NamespaceOrDefault() != service.EnterpriseMeta.NamespaceOrDefault() {
|
||||
payload.overrideNamespace = service.EnterpriseMeta.NamespaceOrDefault()
|
||||
}
|
||||
|
||||
event.Payload = payload
|
||||
return event
|
||||
}
|
||||
|
||||
func getPayloadCheckServiceNode(payload stream.Payload) *structs.CheckServiceNode {
|
||||
@ -359,7 +498,7 @@ func getPayloadCheckServiceNode(payload stream.Payload) *structs.CheckServiceNod
|
||||
// parseCheckServiceNodes but is more efficient since we know they are all on
|
||||
// the same node.
|
||||
func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]stream.Event, error) {
|
||||
services, err := catalogServiceListByNode(tx, node, structs.WildcardEnterpriseMeta(), true)
|
||||
services, err := tx.Get(tableServices, indexNode, Query{Value: node})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -384,7 +523,7 @@ func newServiceHealthEventsForNode(tx ReadTxn, idx uint64, node string) ([]strea
|
||||
// the full list of checks for a specific service on that node.
|
||||
func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, serviceChecksFunc, error) {
|
||||
// Fetch the node
|
||||
nodeRaw, err := tx.First("nodes", "id", node)
|
||||
nodeRaw, err := tx.First(tableNodes, indexID, Query{Value: node})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -393,7 +532,7 @@ func getNodeAndChecks(tx ReadTxn, node string) (*structs.Node, serviceChecksFunc
|
||||
}
|
||||
n := nodeRaw.(*structs.Node)
|
||||
|
||||
iter, err := catalogListChecksByNode(tx, node, structs.WildcardEnterpriseMeta())
|
||||
iter, err := tx.Get(tableChecks, indexNode, Query{Value: node})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -432,7 +571,7 @@ func newServiceHealthEventForService(tx ReadTxn, idx uint64, tuple nodeServiceTu
|
||||
return stream.Event{}, err
|
||||
}
|
||||
|
||||
svc, err := getCompoundWithTxn(tx, "services", "id", &tuple.EntMeta, tuple.Node, tuple.ServiceID)
|
||||
svc, err := tx.Get(tableServices, indexID, NodeServiceQuery{EnterpriseMeta: tuple.EntMeta, Node: tuple.Node, Service: tuple.ServiceID})
|
||||
if err != nil {
|
||||
return stream.Event{}, err
|
||||
}
|
||||
|
7
agent/consul/state/catalog_events_oss_test.go
Normal file
7
agent/consul/state/catalog_events_oss_test.go
Normal file
@ -0,0 +1,7 @@
|
||||
// +build !consulent
|
||||
|
||||
package state
|
||||
|
||||
func withServiceHealthEnterpriseCases(cases []serviceHealthTestCase) []serviceHealthTestCase {
|
||||
return cases
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -28,7 +28,7 @@ func serviceKindIndexName(kind structs.ServiceKind, _ *structs.EnterpriseMeta) s
|
||||
|
||||
func catalogUpdateServicesIndexes(tx WriteTxn, idx uint64, _ *structs.EnterpriseMeta) error {
|
||||
// overall services index
|
||||
if err := indexUpdateMaxTxn(tx, idx, "services"); err != nil {
|
||||
if err := indexUpdateMaxTxn(tx, idx, tableServices); err != nil {
|
||||
return fmt.Errorf("failed updating index: %s", err)
|
||||
}
|
||||
|
||||
@ -62,7 +62,7 @@ func catalogUpdateServiceExtinctionIndex(tx WriteTxn, idx uint64, _ *structs.Ent
|
||||
|
||||
func catalogInsertService(tx WriteTxn, svc *structs.ServiceNode) error {
|
||||
// Insert the service and update the index
|
||||
if err := tx.Insert("services", svc); err != nil {
|
||||
if err := tx.Insert(tableServices, svc); err != nil {
|
||||
return fmt.Errorf("failed inserting service: %s", err)
|
||||
}
|
||||
|
||||
@ -82,7 +82,7 @@ func catalogInsertService(tx WriteTxn, svc *structs.ServiceNode) error {
|
||||
}
|
||||
|
||||
func catalogServicesMaxIndex(tx ReadTxn, _ *structs.EnterpriseMeta) uint64 {
|
||||
return maxIndexTxn(tx, "services")
|
||||
return maxIndexTxn(tx, tableServices)
|
||||
}
|
||||
|
||||
func catalogServiceMaxIndex(tx ReadTxn, serviceName string, _ *structs.EnterpriseMeta) (<-chan struct{}, interface{}, error) {
|
||||
@ -93,20 +93,12 @@ func catalogServiceKindMaxIndex(tx ReadTxn, ws memdb.WatchSet, kind structs.Serv
|
||||
return maxIndexWatchTxn(tx, ws, serviceKindIndexName(kind, nil))
|
||||
}
|
||||
|
||||
func catalogServiceList(tx ReadTxn, _ *structs.EnterpriseMeta, _ bool) (memdb.ResultIterator, error) {
|
||||
return tx.Get("services", "id")
|
||||
}
|
||||
|
||||
func catalogServiceListByKind(tx ReadTxn, kind structs.ServiceKind, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get("services", "kind", string(kind))
|
||||
func catalogServiceListNoWildcard(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get(tableServices, indexID)
|
||||
}
|
||||
|
||||
func catalogServiceListByNode(tx ReadTxn, node string, _ *structs.EnterpriseMeta, _ bool) (memdb.ResultIterator, error) {
|
||||
return tx.Get("services", "node", node)
|
||||
}
|
||||
|
||||
func catalogServiceNodeList(tx ReadTxn, name string, index string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get("services", index, name)
|
||||
return tx.Get(tableServices, indexNode, Query{Value: node})
|
||||
}
|
||||
|
||||
func catalogServiceLastExtinctionIndex(tx ReadTxn, _ *structs.EnterpriseMeta) (interface{}, error) {
|
||||
@ -115,58 +107,37 @@ func catalogServiceLastExtinctionIndex(tx ReadTxn, _ *structs.EnterpriseMeta) (i
|
||||
|
||||
func catalogMaxIndex(tx ReadTxn, _ *structs.EnterpriseMeta, checks bool) uint64 {
|
||||
if checks {
|
||||
return maxIndexTxn(tx, "nodes", "services", "checks")
|
||||
return maxIndexTxn(tx, "nodes", tableServices, tableChecks)
|
||||
}
|
||||
return maxIndexTxn(tx, "nodes", "services")
|
||||
return maxIndexTxn(tx, "nodes", tableServices)
|
||||
}
|
||||
|
||||
func catalogMaxIndexWatch(tx ReadTxn, ws memdb.WatchSet, _ *structs.EnterpriseMeta, checks bool) uint64 {
|
||||
if checks {
|
||||
return maxIndexWatchTxn(tx, ws, "nodes", "services", "checks")
|
||||
return maxIndexWatchTxn(tx, ws, "nodes", tableServices, tableChecks)
|
||||
}
|
||||
return maxIndexWatchTxn(tx, ws, "nodes", "services")
|
||||
return maxIndexWatchTxn(tx, ws, "nodes", tableServices)
|
||||
}
|
||||
|
||||
func catalogUpdateCheckIndexes(tx WriteTxn, idx uint64, _ *structs.EnterpriseMeta) error {
|
||||
// update the universal index entry
|
||||
if err := tx.Insert(tableIndex, &IndexEntry{"checks", idx}); err != nil {
|
||||
if err := tx.Insert(tableIndex, &IndexEntry{tableChecks, idx}); err != nil {
|
||||
return fmt.Errorf("failed updating index: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func catalogChecksMaxIndex(tx ReadTxn, _ *structs.EnterpriseMeta) uint64 {
|
||||
return maxIndexTxn(tx, "checks")
|
||||
return maxIndexTxn(tx, tableChecks)
|
||||
}
|
||||
|
||||
func catalogListChecksByNode(tx ReadTxn, node string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get("checks", "node", node)
|
||||
}
|
||||
|
||||
func catalogListChecksByService(tx ReadTxn, service string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get("checks", "service", service)
|
||||
}
|
||||
|
||||
func catalogListChecksInState(tx ReadTxn, state string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
// simpler than normal due to the use of the CompoundMultiIndex
|
||||
return tx.Get("checks", "status", state)
|
||||
}
|
||||
|
||||
func catalogListChecks(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get("checks", "id")
|
||||
}
|
||||
|
||||
func catalogListNodeChecks(tx ReadTxn, node string) (memdb.ResultIterator, error) {
|
||||
return tx.Get("checks", "node_service_check", node, false)
|
||||
}
|
||||
|
||||
func catalogListServiceChecks(tx ReadTxn, node string, service string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get("checks", "node_service", node, service)
|
||||
func catalogListChecksByNode(tx ReadTxn, q Query) (memdb.ResultIterator, error) {
|
||||
return tx.Get(tableChecks, indexNode, q)
|
||||
}
|
||||
|
||||
func catalogInsertCheck(tx WriteTxn, chk *structs.HealthCheck, idx uint64) error {
|
||||
// Insert the check
|
||||
if err := tx.Insert("checks", chk); err != nil {
|
||||
if err := tx.Insert(tableChecks, chk); err != nil {
|
||||
return fmt.Errorf("failed inserting check: %s", err)
|
||||
}
|
||||
|
||||
@ -177,10 +148,6 @@ func catalogInsertCheck(tx WriteTxn, chk *structs.HealthCheck, idx uint64) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func catalogChecksForNodeService(tx ReadTxn, node string, service string, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get("checks", "node_service", node, service)
|
||||
}
|
||||
|
||||
func validateRegisterRequestTxn(_ ReadTxn, _ *structs.RegisterRequest, _ bool) (*structs.EnterpriseMeta, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
275
agent/consul/state/catalog_oss_test.go
Normal file
275
agent/consul/state/catalog_oss_test.go
Normal file
@ -0,0 +1,275 @@
|
||||
// +build !consulent
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func testIndexerTableChecks() map[string]indexerTestCase {
|
||||
obj := &structs.HealthCheck{
|
||||
Node: "NoDe",
|
||||
ServiceID: "SeRvIcE",
|
||||
ServiceName: "ServiceName",
|
||||
CheckID: "CheckID",
|
||||
Status: "PASSING",
|
||||
}
|
||||
return map[string]indexerTestCase{
|
||||
indexID: {
|
||||
read: indexValue{
|
||||
source: NodeCheckQuery{
|
||||
Node: "NoDe",
|
||||
CheckID: "CheckId",
|
||||
},
|
||||
expected: []byte("node\x00checkid\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("node\x00checkid\x00"),
|
||||
},
|
||||
prefix: []indexValue{
|
||||
{
|
||||
source: structs.EnterpriseMeta{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
source: Query{Value: "nOdE"},
|
||||
expected: []byte("node\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
indexStatus: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "PASSING"},
|
||||
expected: []byte("passing\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("passing\x00"),
|
||||
},
|
||||
},
|
||||
indexService: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "ServiceName"},
|
||||
expected: []byte("servicename\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("servicename\x00"),
|
||||
},
|
||||
},
|
||||
indexNodeService: {
|
||||
read: indexValue{
|
||||
source: NodeServiceQuery{
|
||||
Node: "NoDe",
|
||||
Service: "SeRvIcE",
|
||||
},
|
||||
expected: []byte("node\x00service\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("node\x00service\x00"),
|
||||
},
|
||||
},
|
||||
indexNode: {
|
||||
read: indexValue{
|
||||
source: Query{
|
||||
Value: "NoDe",
|
||||
},
|
||||
expected: []byte("node\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("node\x00"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testIndexerTableMeshTopology() map[string]indexerTestCase {
|
||||
obj := upstreamDownstream{
|
||||
Upstream: structs.ServiceName{Name: "UpStReAm"},
|
||||
Downstream: structs.ServiceName{Name: "DownStream"},
|
||||
}
|
||||
|
||||
return map[string]indexerTestCase{
|
||||
indexID: {
|
||||
read: indexValue{
|
||||
source: []interface{}{
|
||||
structs.ServiceName{Name: "UpStReAm"},
|
||||
structs.ServiceName{Name: "DownStream"},
|
||||
},
|
||||
expected: []byte("upstream\x00downstream\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("upstream\x00downstream\x00"),
|
||||
},
|
||||
},
|
||||
indexUpstream: {
|
||||
read: indexValue{
|
||||
source: structs.ServiceName{Name: "UpStReAm"},
|
||||
expected: []byte("upstream\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("upstream\x00"),
|
||||
},
|
||||
},
|
||||
indexDownstream: {
|
||||
read: indexValue{
|
||||
source: structs.ServiceName{Name: "DownStream"},
|
||||
expected: []byte("downstream\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("downstream\x00"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testIndexerTableGatewayServices() map[string]indexerTestCase {
|
||||
obj := &structs.GatewayService{
|
||||
Gateway: structs.ServiceName{Name: "GateWay"},
|
||||
Service: structs.ServiceName{Name: "SerVice"},
|
||||
Port: 50123,
|
||||
}
|
||||
encodedPort := string([]byte{0x96, 0x8f, 0x06, 0, 0, 0, 0, 0, 0, 0})
|
||||
return map[string]indexerTestCase{
|
||||
indexID: {
|
||||
read: indexValue{
|
||||
source: []interface{}{
|
||||
structs.ServiceName{Name: "GateWay"},
|
||||
structs.ServiceName{Name: "SerVice"},
|
||||
50123,
|
||||
},
|
||||
expected: []byte("gateway\x00service\x00" + encodedPort),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("gateway\x00service\x00" + encodedPort),
|
||||
},
|
||||
},
|
||||
indexGateway: {
|
||||
read: indexValue{
|
||||
source: structs.ServiceName{Name: "GateWay"},
|
||||
expected: []byte("gateway\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("gateway\x00"),
|
||||
},
|
||||
},
|
||||
indexService: {
|
||||
read: indexValue{
|
||||
source: structs.ServiceName{Name: "SerVice"},
|
||||
expected: []byte("service\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("service\x00"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testIndexerTableNodes() map[string]indexerTestCase {
|
||||
return map[string]indexerTestCase{
|
||||
indexID: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "NoDeId"},
|
||||
expected: []byte("nodeid\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: &structs.Node{Node: "NoDeId"},
|
||||
expected: []byte("nodeid\x00"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testIndexerTableServices() map[string]indexerTestCase {
|
||||
obj := &structs.ServiceNode{
|
||||
Node: "NoDeId",
|
||||
ServiceID: "SeRviCe",
|
||||
ServiceName: "ServiceName",
|
||||
}
|
||||
|
||||
return map[string]indexerTestCase{
|
||||
indexID: {
|
||||
read: indexValue{
|
||||
source: NodeServiceQuery{
|
||||
Node: "NoDeId",
|
||||
Service: "SeRvIcE",
|
||||
},
|
||||
expected: []byte("nodeid\x00service\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("nodeid\x00service\x00"),
|
||||
},
|
||||
prefix: []indexValue{
|
||||
{
|
||||
source: (*structs.EnterpriseMeta)(nil),
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
source: structs.EnterpriseMeta{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
source: Query{Value: "NoDeId"},
|
||||
expected: []byte("nodeid\x00"),
|
||||
},
|
||||
},
|
||||
},
|
||||
indexNode: {
|
||||
read: indexValue{
|
||||
source: Query{
|
||||
Value: "NoDeId",
|
||||
},
|
||||
expected: []byte("nodeid\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("nodeid\x00"),
|
||||
},
|
||||
},
|
||||
indexService: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "ServiceName"},
|
||||
expected: []byte("servicename\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: obj,
|
||||
expected: []byte("servicename\x00"),
|
||||
},
|
||||
},
|
||||
indexConnect: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "ConnectName"},
|
||||
expected: []byte("connectname\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: &structs.ServiceNode{
|
||||
ServiceName: "ConnectName",
|
||||
ServiceConnect: structs.ServiceConnect{Native: true},
|
||||
},
|
||||
expected: []byte("connectname\x00"),
|
||||
},
|
||||
},
|
||||
indexKind: {
|
||||
read: indexValue{
|
||||
source: Query{Value: "connect-proxy"},
|
||||
expected: []byte("connect-proxy\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: &structs.ServiceNode{
|
||||
ServiceKind: structs.ServiceKindConnectProxy,
|
||||
},
|
||||
expected: []byte("connect-proxy\x00"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
@ -17,17 +17,19 @@ const (
|
||||
tableGatewayServices = "gateway-services"
|
||||
tableMeshTopology = "mesh-topology"
|
||||
|
||||
indexID = "id"
|
||||
indexServiceName = "service"
|
||||
indexConnect = "connect"
|
||||
indexKind = "kind"
|
||||
indexStatus = "status"
|
||||
indexNodeServiceCheck = "node_service_check"
|
||||
indexNodeService = "node_service"
|
||||
indexID = "id"
|
||||
indexService = "service"
|
||||
indexConnect = "connect"
|
||||
indexKind = "kind"
|
||||
indexStatus = "status"
|
||||
indexNodeService = "node_service"
|
||||
indexNode = "node"
|
||||
indexUpstream = "upstream"
|
||||
indexDownstream = "downstream"
|
||||
indexGateway = "gateway"
|
||||
)
|
||||
|
||||
// nodesTableSchema returns a new table schema used for storing node
|
||||
// information.
|
||||
// nodesTableSchema returns a new table schema used for storing struct.Node.
|
||||
func nodesTableSchema() *memdb.TableSchema {
|
||||
return &memdb.TableSchema{
|
||||
Name: tableNodes,
|
||||
@ -36,18 +38,16 @@ func nodesTableSchema() *memdb.TableSchema {
|
||||
Name: indexID,
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: &memdb.StringFieldIndex{
|
||||
Field: "Node",
|
||||
Lowercase: true,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexFromNode,
|
||||
},
|
||||
},
|
||||
"uuid": {
|
||||
Name: "uuid",
|
||||
AllowMissing: true,
|
||||
Unique: true,
|
||||
Indexer: &memdb.UUIDFieldIndex{
|
||||
Field: "ID",
|
||||
},
|
||||
Indexer: &memdb.UUIDFieldIndex{Field: "ID"},
|
||||
},
|
||||
"meta": {
|
||||
Name: "meta",
|
||||
@ -62,6 +62,21 @@ func nodesTableSchema() *memdb.TableSchema {
|
||||
}
|
||||
}
|
||||
|
||||
func indexFromNode(raw interface{}) ([]byte, error) {
|
||||
n, ok := raw.(*structs.Node)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.Node index", raw)
|
||||
}
|
||||
|
||||
if n.Node == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(n.Node))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// servicesTableSchema returns a new table schema used to store information
|
||||
// about services.
|
||||
func servicesTableSchema() *memdb.TableSchema {
|
||||
@ -72,53 +87,156 @@ func servicesTableSchema() *memdb.TableSchema {
|
||||
Name: indexID,
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: &memdb.CompoundIndex{
|
||||
Indexes: []memdb.Indexer{
|
||||
&memdb.StringFieldIndex{
|
||||
Field: "Node",
|
||||
Lowercase: true,
|
||||
},
|
||||
&memdb.StringFieldIndex{
|
||||
Field: "ServiceID",
|
||||
Lowercase: true,
|
||||
},
|
||||
},
|
||||
Indexer: indexerSingleWithPrefix{
|
||||
readIndex: indexFromNodeServiceQuery,
|
||||
writeIndex: indexFromServiceNode,
|
||||
prefixIndex: prefixIndexFromQuery,
|
||||
},
|
||||
},
|
||||
"node": {
|
||||
Name: "node",
|
||||
indexNode: {
|
||||
Name: indexNode,
|
||||
AllowMissing: false,
|
||||
Unique: false,
|
||||
Indexer: &memdb.StringFieldIndex{
|
||||
Field: "Node",
|
||||
Lowercase: true,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexFromNodeIdentity,
|
||||
},
|
||||
},
|
||||
indexServiceName: {
|
||||
Name: indexServiceName,
|
||||
indexService: {
|
||||
Name: indexService,
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: &memdb.StringFieldIndex{
|
||||
Field: "ServiceName",
|
||||
Lowercase: true,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexServiceNameFromServiceNode,
|
||||
},
|
||||
},
|
||||
indexConnect: {
|
||||
Name: indexConnect,
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: &IndexConnectService{},
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexConnectNameFromServiceNode,
|
||||
},
|
||||
},
|
||||
indexKind: {
|
||||
Name: indexKind,
|
||||
AllowMissing: false,
|
||||
Unique: false,
|
||||
Indexer: &IndexServiceKind{},
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexKindFromServiceNode,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func indexFromNodeServiceQuery(arg interface{}) ([]byte, error) {
|
||||
q, ok := arg.(NodeServiceQuery)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for NodeServiceQuery index", arg)
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(q.Node))
|
||||
b.String(strings.ToLower(q.Service))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func indexFromServiceNode(raw interface{}) ([]byte, error) {
|
||||
n, ok := raw.(*structs.ServiceNode)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.ServiceNode index", raw)
|
||||
}
|
||||
|
||||
if n.Node == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(n.Node))
|
||||
b.String(strings.ToLower(n.ServiceID))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func indexFromNodeIdentity(raw interface{}) ([]byte, error) {
|
||||
n, ok := raw.(interface {
|
||||
NodeIdentity() structs.Identity
|
||||
})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for index, type must provide NodeIdentity()", raw)
|
||||
}
|
||||
|
||||
id := n.NodeIdentity()
|
||||
if id.ID == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(id.ID))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func indexServiceNameFromServiceNode(raw interface{}) ([]byte, error) {
|
||||
n, ok := raw.(*structs.ServiceNode)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.ServiceNode index", raw)
|
||||
}
|
||||
|
||||
if n.Node == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(n.ServiceName))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func indexConnectNameFromServiceNode(raw interface{}) ([]byte, error) {
|
||||
n, ok := raw.(*structs.ServiceNode)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.ServiceNode index", raw)
|
||||
}
|
||||
|
||||
name, ok := connectNameFromServiceNode(n)
|
||||
if !ok {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(name))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func connectNameFromServiceNode(sn *structs.ServiceNode) (string, bool) {
|
||||
switch {
|
||||
case sn.ServiceKind == structs.ServiceKindConnectProxy:
|
||||
// For proxies, this service supports Connect for the destination
|
||||
return sn.ServiceProxy.DestinationServiceName, true
|
||||
|
||||
case sn.ServiceConnect.Native:
|
||||
// For native, this service supports Connect directly
|
||||
return sn.ServiceName, true
|
||||
|
||||
default:
|
||||
// Doesn't support Connect at all
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
func indexKindFromServiceNode(raw interface{}) ([]byte, error) {
|
||||
n, ok := raw.(*structs.ServiceNode)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.ServiceNode index", raw)
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(string(n.ServiceKind)))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// checksTableSchema returns a new table schema used for storing and indexing
|
||||
// health check information. Health checks have a number of different attributes
|
||||
// we want to filter by, so this table is a bit more complex.
|
||||
@ -130,83 +248,130 @@ func checksTableSchema() *memdb.TableSchema {
|
||||
Name: indexID,
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: &memdb.CompoundIndex{
|
||||
Indexes: []memdb.Indexer{
|
||||
&memdb.StringFieldIndex{
|
||||
Field: "Node",
|
||||
Lowercase: true,
|
||||
},
|
||||
&memdb.StringFieldIndex{
|
||||
Field: "CheckID",
|
||||
Lowercase: true,
|
||||
},
|
||||
},
|
||||
Indexer: indexerSingleWithPrefix{
|
||||
readIndex: indexFromNodeCheckQuery,
|
||||
writeIndex: indexFromHealthCheck,
|
||||
prefixIndex: prefixIndexFromQuery,
|
||||
},
|
||||
},
|
||||
indexStatus: {
|
||||
Name: indexStatus,
|
||||
AllowMissing: false,
|
||||
Unique: false,
|
||||
Indexer: &memdb.StringFieldIndex{
|
||||
Field: "Status",
|
||||
Lowercase: false,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexStatusFromHealthCheck,
|
||||
},
|
||||
},
|
||||
indexServiceName: {
|
||||
Name: indexServiceName,
|
||||
indexService: {
|
||||
Name: indexService,
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: &memdb.StringFieldIndex{
|
||||
Field: "ServiceName",
|
||||
Lowercase: true,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexServiceNameFromHealthCheck,
|
||||
},
|
||||
},
|
||||
"node": {
|
||||
Name: "node",
|
||||
indexNode: {
|
||||
Name: indexNode,
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: &memdb.StringFieldIndex{
|
||||
Field: "Node",
|
||||
Lowercase: true,
|
||||
},
|
||||
},
|
||||
indexNodeServiceCheck: {
|
||||
Name: indexNodeServiceCheck,
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: &memdb.CompoundIndex{
|
||||
Indexes: []memdb.Indexer{
|
||||
&memdb.StringFieldIndex{
|
||||
Field: "Node",
|
||||
Lowercase: true,
|
||||
},
|
||||
&memdb.FieldSetIndex{
|
||||
Field: "ServiceID",
|
||||
},
|
||||
},
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromQuery,
|
||||
writeIndex: indexFromNodeIdentity,
|
||||
},
|
||||
},
|
||||
indexNodeService: {
|
||||
Name: indexNodeService,
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: &memdb.CompoundIndex{
|
||||
Indexes: []memdb.Indexer{
|
||||
&memdb.StringFieldIndex{
|
||||
Field: "Node",
|
||||
Lowercase: true,
|
||||
},
|
||||
&memdb.StringFieldIndex{
|
||||
Field: "ServiceID",
|
||||
Lowercase: true,
|
||||
},
|
||||
},
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromNodeServiceQuery,
|
||||
writeIndex: indexNodeServiceFromHealthCheck,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func indexFromNodeCheckQuery(raw interface{}) ([]byte, error) {
|
||||
hc, ok := raw.(NodeCheckQuery)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for NodeCheckQuery index", raw)
|
||||
}
|
||||
|
||||
if hc.Node == "" || hc.CheckID == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(hc.Node))
|
||||
b.String(strings.ToLower(hc.CheckID))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func indexFromHealthCheck(raw interface{}) ([]byte, error) {
|
||||
hc, ok := raw.(*structs.HealthCheck)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.HealthCheck index", raw)
|
||||
}
|
||||
|
||||
if hc.Node == "" || hc.CheckID == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(hc.Node))
|
||||
b.String(strings.ToLower(string(hc.CheckID)))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func indexNodeServiceFromHealthCheck(raw interface{}) ([]byte, error) {
|
||||
hc, ok := raw.(*structs.HealthCheck)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.HealthCheck index", raw)
|
||||
}
|
||||
|
||||
if hc.Node == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(hc.Node))
|
||||
b.String(strings.ToLower(hc.ServiceID))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func indexStatusFromHealthCheck(raw interface{}) ([]byte, error) {
|
||||
hc, ok := raw.(*structs.HealthCheck)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.HealthCheck index", raw)
|
||||
}
|
||||
|
||||
if hc.Status == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(hc.Status))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func indexServiceNameFromHealthCheck(raw interface{}) ([]byte, error) {
|
||||
hc, ok := raw.(*structs.HealthCheck)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for structs.HealthCheck index", raw)
|
||||
}
|
||||
|
||||
if hc.ServiceName == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(hc.ServiceName))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// gatewayServicesTableSchema returns a new table schema used to store information
|
||||
// about services associated with terminating gateways.
|
||||
func gatewayServicesTableSchema() *memdb.TableSchema {
|
||||
@ -231,16 +396,16 @@ func gatewayServicesTableSchema() *memdb.TableSchema {
|
||||
},
|
||||
},
|
||||
},
|
||||
"gateway": {
|
||||
Name: "gateway",
|
||||
indexGateway: {
|
||||
Name: indexGateway,
|
||||
AllowMissing: false,
|
||||
Unique: false,
|
||||
Indexer: &ServiceNameIndex{
|
||||
Field: "Gateway",
|
||||
},
|
||||
},
|
||||
"service": {
|
||||
Name: "service",
|
||||
indexService: {
|
||||
Name: indexService,
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: &ServiceNameIndex{
|
||||
@ -272,16 +437,16 @@ func meshTopologyTableSchema() *memdb.TableSchema {
|
||||
},
|
||||
},
|
||||
},
|
||||
"upstream": {
|
||||
Name: "upstream",
|
||||
indexUpstream: {
|
||||
Name: indexUpstream,
|
||||
AllowMissing: true,
|
||||
Unique: false,
|
||||
Indexer: &ServiceNameIndex{
|
||||
Field: "Upstream",
|
||||
},
|
||||
},
|
||||
"downstream": {
|
||||
Name: "downstream",
|
||||
indexDownstream: {
|
||||
Name: indexDownstream,
|
||||
AllowMissing: false,
|
||||
Unique: false,
|
||||
Indexer: &ServiceNameIndex{
|
||||
@ -347,3 +512,31 @@ func (index *ServiceNameIndex) PrefixFromArgs(args ...interface{}) ([]byte, erro
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// upstreamDownstream pairs come from individual proxy registrations, which can be updated independently.
|
||||
type upstreamDownstream struct {
|
||||
Upstream structs.ServiceName
|
||||
Downstream structs.ServiceName
|
||||
|
||||
// Refs stores the registrations that contain this pairing.
|
||||
// When there are no remaining Refs, the upstreamDownstream can be deleted.
|
||||
//
|
||||
// Note: This map must be treated as immutable when accessed in MemDB.
|
||||
// The entire upstreamDownstream structure must be deep copied on updates.
|
||||
Refs map[string]struct{}
|
||||
|
||||
structs.RaftIndex
|
||||
}
|
||||
|
||||
// NodeCheckQuery is used to query the ID index of the checks table.
|
||||
type NodeCheckQuery struct {
|
||||
Node string
|
||||
CheckID string
|
||||
structs.EnterpriseMeta
|
||||
}
|
||||
|
||||
// NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer
|
||||
// receiver for this method. Remove once that is fixed.
|
||||
func (q NodeCheckQuery) NamespaceOrDefault() string {
|
||||
return q.EnterpriseMeta.NamespaceOrDefault()
|
||||
}
|
||||
|
@ -1298,7 +1298,7 @@ func TestStateStore_DeleteNode(t *testing.T) {
|
||||
// the DB to make sure it is actually gone.
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
services, err := getCompoundWithTxn(tx, "services", "id", nil, "node1", "service1")
|
||||
services, err := tx.Get(tableServices, indexID, NodeServiceQuery{Node: "node1", Service: "service1"})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -1307,7 +1307,7 @@ func TestStateStore_DeleteNode(t *testing.T) {
|
||||
}
|
||||
|
||||
// Associated health check was removed.
|
||||
checks, err := getCompoundWithTxn(tx, "checks", "id", nil, "node1", "check1")
|
||||
checks, err := tx.Get(tableChecks, indexID, NodeCheckQuery{Node: "node1", CheckID: "check1"})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -1316,7 +1316,7 @@ func TestStateStore_DeleteNode(t *testing.T) {
|
||||
}
|
||||
|
||||
// Indexes were updated.
|
||||
for _, tbl := range []string{"nodes", "services", "checks"} {
|
||||
for _, tbl := range []string{"nodes", tableServices, tableChecks} {
|
||||
if idx := s.maxIndex(tbl); idx != 3 {
|
||||
t.Fatalf("bad index: %d (%s)", idx, tbl)
|
||||
}
|
||||
@ -1479,7 +1479,7 @@ func TestStateStore_EnsureService(t *testing.T) {
|
||||
}
|
||||
|
||||
// Index tables were updated.
|
||||
if idx := s.maxIndex("services"); idx != 30 {
|
||||
if idx := s.maxIndex(tableServices); idx != 30 {
|
||||
t.Fatalf("bad index: %d", idx)
|
||||
}
|
||||
|
||||
@ -1510,7 +1510,7 @@ func TestStateStore_EnsureService(t *testing.T) {
|
||||
}
|
||||
|
||||
// Index tables were updated.
|
||||
if idx := s.maxIndex("services"); idx != 40 {
|
||||
if idx := s.maxIndex(tableServices); idx != 40 {
|
||||
t.Fatalf("bad index: %d", idx)
|
||||
}
|
||||
}
|
||||
@ -2067,16 +2067,16 @@ func TestStateStore_DeleteService(t *testing.T) {
|
||||
// that it actually is removed in the state store.
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
_, check, err := firstWatchCompoundWithTxn(tx, "checks", "id", nil, "node1", "check1")
|
||||
check, err := tx.First(tableChecks, indexID, NodeCheckQuery{Node: "node1", CheckID: "check1"})
|
||||
if err != nil || check != nil {
|
||||
t.Fatalf("bad: %#v (err: %s)", check, err)
|
||||
}
|
||||
|
||||
// Index tables were updated.
|
||||
if idx := s.maxIndex("services"); idx != 4 {
|
||||
if idx := s.maxIndex(tableServices); idx != 4 {
|
||||
t.Fatalf("bad index: %d", idx)
|
||||
}
|
||||
if idx := s.maxIndex("checks"); idx != 4 {
|
||||
if idx := s.maxIndex(tableChecks); idx != 4 {
|
||||
t.Fatalf("bad index: %d", idx)
|
||||
}
|
||||
|
||||
@ -2085,7 +2085,7 @@ func TestStateStore_DeleteService(t *testing.T) {
|
||||
if err := s.DeleteService(5, "node1", "service1", nil); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if idx := s.maxIndex("services"); idx != 4 {
|
||||
if idx := s.maxIndex(tableServices); idx != 4 {
|
||||
t.Fatalf("bad index: %d", idx)
|
||||
}
|
||||
if watchFired(ws) {
|
||||
@ -2411,7 +2411,7 @@ func TestStateStore_EnsureCheck(t *testing.T) {
|
||||
testCheckOutput(t, 5, 5, "bbbmodified")
|
||||
|
||||
// Index tables were updated
|
||||
if idx := s.maxIndex("checks"); idx != 5 {
|
||||
if idx := s.maxIndex(tableChecks); idx != 5 {
|
||||
t.Fatalf("bad index: %d", idx)
|
||||
}
|
||||
}
|
||||
@ -2894,7 +2894,7 @@ func TestStateStore_DeleteCheck(t *testing.T) {
|
||||
if idx, check, err := s.NodeCheck("node1", "check1", nil); idx != 3 || err != nil || check != nil {
|
||||
t.Fatalf("Node check should have been deleted idx=%d, node=%v, err=%s", idx, check, err)
|
||||
}
|
||||
if idx := s.maxIndex("checks"); idx != 3 {
|
||||
if idx := s.maxIndex(tableChecks); idx != 3 {
|
||||
t.Fatalf("bad index for checks: %d", idx)
|
||||
}
|
||||
if !watchFired(ws) {
|
||||
@ -2914,7 +2914,7 @@ func TestStateStore_DeleteCheck(t *testing.T) {
|
||||
}
|
||||
|
||||
// Index tables were updated.
|
||||
if idx := s.maxIndex("checks"); idx != 3 {
|
||||
if idx := s.maxIndex(tableChecks); idx != 3 {
|
||||
t.Fatalf("bad index: %d", idx)
|
||||
}
|
||||
|
||||
@ -2923,7 +2923,7 @@ func TestStateStore_DeleteCheck(t *testing.T) {
|
||||
if err := s.DeleteCheck(4, "node1", "check1", nil); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if idx := s.maxIndex("checks"); idx != 3 {
|
||||
if idx := s.maxIndex(tableChecks); idx != 3 {
|
||||
t.Fatalf("bad index: %d", idx)
|
||||
}
|
||||
if watchFired(ws) {
|
||||
|
@ -106,7 +106,7 @@ func configEntryTxn(tx ReadTxn, ws memdb.WatchSet, kind, name string, entMeta *s
|
||||
idx := maxIndexTxn(tx, tableConfigEntries)
|
||||
|
||||
// Get the existing config entry.
|
||||
watchCh, existing, err := firstWatchConfigEntryWithTxn(tx, kind, name, entMeta)
|
||||
watchCh, existing, err := tx.FirstWatch(tableConfigEntries, "id", NewConfigEntryKindName(kind, name, entMeta))
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed config entry lookup: %s", err)
|
||||
}
|
||||
@ -175,7 +175,7 @@ func (s *Store) EnsureConfigEntry(idx uint64, conf structs.ConfigEntry) error {
|
||||
// ensureConfigEntryTxn upserts a config entry inside of a transaction.
|
||||
func ensureConfigEntryTxn(tx WriteTxn, idx uint64, conf structs.ConfigEntry) error {
|
||||
// Check for existing configuration.
|
||||
existing, err := firstConfigEntryWithTxn(tx, conf.GetKind(), conf.GetName(), conf.GetEnterpriseMeta())
|
||||
existing, err := tx.First(tableConfigEntries, indexID, newConfigEntryQuery(conf))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed configuration lookup: %s", err)
|
||||
}
|
||||
@ -214,7 +214,7 @@ func (s *Store) EnsureConfigEntryCAS(idx, cidx uint64, conf structs.ConfigEntry)
|
||||
defer tx.Abort()
|
||||
|
||||
// Check for existing configuration.
|
||||
existing, err := firstConfigEntryWithTxn(tx, conf.GetKind(), conf.GetName(), conf.GetEnterpriseMeta())
|
||||
existing, err := tx.First(tableConfigEntries, indexID, newConfigEntryQuery(conf))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed configuration lookup: %s", err)
|
||||
}
|
||||
@ -254,9 +254,9 @@ func (s *Store) DeleteConfigEntry(idx uint64, kind, name string, entMeta *struct
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// TODO: accept structs.ConfigEntry instead of individual fields
|
||||
func deleteConfigEntryTxn(tx WriteTxn, idx uint64, kind, name string, entMeta *structs.EnterpriseMeta) error {
|
||||
// Try to retrieve the existing config entry.
|
||||
existing, err := firstConfigEntryWithTxn(tx, kind, name, entMeta)
|
||||
existing, err := tx.First(tableConfigEntries, indexID, NewConfigEntryKindName(kind, name, entMeta))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed config entry lookup: %s", err)
|
||||
}
|
||||
@ -269,7 +269,7 @@ func deleteConfigEntryTxn(tx WriteTxn, idx uint64, kind, name string, entMeta *s
|
||||
sn := structs.NewServiceName(name, entMeta)
|
||||
|
||||
if kind == structs.TerminatingGateway || kind == structs.IngressGateway {
|
||||
if _, err := tx.DeleteAll(tableGatewayServices, "gateway", sn); err != nil {
|
||||
if _, err := tx.DeleteAll(tableGatewayServices, indexGateway, sn); err != nil {
|
||||
return fmt.Errorf("failed to truncate gateway services table: %v", err)
|
||||
}
|
||||
if err := indexUpdateMaxTxn(tx, idx, tableGatewayServices); err != nil {
|
||||
@ -278,7 +278,7 @@ func deleteConfigEntryTxn(tx WriteTxn, idx uint64, kind, name string, entMeta *s
|
||||
}
|
||||
// Also clean up associations in the mesh topology table for ingress gateways
|
||||
if kind == structs.IngressGateway {
|
||||
if _, err := tx.DeleteAll(tableMeshTopology, "downstream", sn); err != nil {
|
||||
if _, err := tx.DeleteAll(tableMeshTopology, indexDownstream, sn); err != nil {
|
||||
return fmt.Errorf("failed to truncate %s table: %v", tableMeshTopology, err)
|
||||
}
|
||||
if err := indexUpdateMaxTxn(tx, idx, tableMeshTopology); err != nil {
|
||||
@ -629,8 +629,8 @@ func validateProposedConfigEntryInServiceGraph(
|
||||
checkChains[sn.ToServiceID()] = struct{}{}
|
||||
}
|
||||
|
||||
overrides := map[structs.ConfigEntryKindName]structs.ConfigEntry{
|
||||
structs.NewConfigEntryKindName(kind, name, entMeta): proposedEntry,
|
||||
overrides := map[ConfigEntryKindName]structs.ConfigEntry{
|
||||
NewConfigEntryKindName(kind, name, entMeta): proposedEntry,
|
||||
}
|
||||
|
||||
var (
|
||||
@ -709,7 +709,7 @@ func validateProposedConfigEntryInServiceGraph(
|
||||
func testCompileDiscoveryChain(
|
||||
tx ReadTxn,
|
||||
chainName string,
|
||||
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
|
||||
overrides map[ConfigEntryKindName]structs.ConfigEntry,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
) (string, *structs.DiscoveryGraphNode, error) {
|
||||
_, speculativeEntries, err := readDiscoveryChainConfigEntriesTxn(tx, nil, chainName, overrides, entMeta)
|
||||
@ -815,7 +815,7 @@ func (s *Store) ReadDiscoveryChainConfigEntries(
|
||||
func (s *Store) readDiscoveryChainConfigEntries(
|
||||
ws memdb.WatchSet,
|
||||
serviceName string,
|
||||
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
|
||||
overrides map[ConfigEntryKindName]structs.ConfigEntry,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
) (uint64, *structs.DiscoveryChainConfigEntries, error) {
|
||||
tx := s.db.Txn(false)
|
||||
@ -827,7 +827,7 @@ func readDiscoveryChainConfigEntriesTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
serviceName string,
|
||||
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
|
||||
overrides map[ConfigEntryKindName]structs.ConfigEntry,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
) (uint64, *structs.DiscoveryChainConfigEntries, error) {
|
||||
res := structs.NewDiscoveryChainConfigEntries()
|
||||
@ -1016,7 +1016,7 @@ func getProxyConfigEntryTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
name string,
|
||||
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
|
||||
overrides map[ConfigEntryKindName]structs.ConfigEntry,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
) (uint64, *structs.ProxyConfigEntry, error) {
|
||||
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ProxyDefaults, name, overrides, entMeta)
|
||||
@ -1041,7 +1041,7 @@ func getServiceConfigEntryTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
serviceName string,
|
||||
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
|
||||
overrides map[ConfigEntryKindName]structs.ConfigEntry,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
) (uint64, *structs.ServiceConfigEntry, error) {
|
||||
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceDefaults, serviceName, overrides, entMeta)
|
||||
@ -1066,7 +1066,7 @@ func getRouterConfigEntryTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
serviceName string,
|
||||
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
|
||||
overrides map[ConfigEntryKindName]structs.ConfigEntry,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
) (uint64, *structs.ServiceRouterConfigEntry, error) {
|
||||
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceRouter, serviceName, overrides, entMeta)
|
||||
@ -1091,7 +1091,7 @@ func getSplitterConfigEntryTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
serviceName string,
|
||||
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
|
||||
overrides map[ConfigEntryKindName]structs.ConfigEntry,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
) (uint64, *structs.ServiceSplitterConfigEntry, error) {
|
||||
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceSplitter, serviceName, overrides, entMeta)
|
||||
@ -1116,7 +1116,7 @@ func getResolverConfigEntryTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
serviceName string,
|
||||
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
|
||||
overrides map[ConfigEntryKindName]structs.ConfigEntry,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
) (uint64, *structs.ServiceResolverConfigEntry, error) {
|
||||
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceResolver, serviceName, overrides, entMeta)
|
||||
@ -1141,7 +1141,7 @@ func getServiceIntentionsConfigEntryTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
name string,
|
||||
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
|
||||
overrides map[ConfigEntryKindName]structs.ConfigEntry,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
) (uint64, *structs.ServiceIntentionsConfigEntry, error) {
|
||||
idx, entry, err := configEntryWithOverridesTxn(tx, ws, structs.ServiceIntentions, name, overrides, entMeta)
|
||||
@ -1163,11 +1163,11 @@ func configEntryWithOverridesTxn(
|
||||
ws memdb.WatchSet,
|
||||
kind string,
|
||||
name string,
|
||||
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
|
||||
overrides map[ConfigEntryKindName]structs.ConfigEntry,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
) (uint64, structs.ConfigEntry, error) {
|
||||
if len(overrides) > 0 {
|
||||
kn := structs.NewConfigEntryKindName(kind, name, entMeta)
|
||||
kn := NewConfigEntryKindName(kind, name, entMeta)
|
||||
entry, ok := overrides[kn]
|
||||
if ok {
|
||||
return 0, entry, nil // a nil entry implies it should act like it is erased
|
||||
@ -1218,3 +1218,37 @@ func protocolForService(
|
||||
}
|
||||
return maxIdx, chain.Protocol, nil
|
||||
}
|
||||
|
||||
// ConfigEntryKindName is a value type useful for maps. You can use:
|
||||
// map[ConfigEntryKindName]Payload
|
||||
// instead of:
|
||||
// map[string]map[string]Payload
|
||||
type ConfigEntryKindName struct {
|
||||
Kind string
|
||||
Name string
|
||||
structs.EnterpriseMeta
|
||||
}
|
||||
|
||||
func NewConfigEntryKindName(kind, name string, entMeta *structs.EnterpriseMeta) ConfigEntryKindName {
|
||||
ret := ConfigEntryKindName{
|
||||
Kind: kind,
|
||||
Name: name,
|
||||
}
|
||||
if entMeta == nil {
|
||||
entMeta = structs.DefaultEnterpriseMeta()
|
||||
}
|
||||
|
||||
ret.EnterpriseMeta = *entMeta
|
||||
ret.EnterpriseMeta.Normalize()
|
||||
return ret
|
||||
}
|
||||
|
||||
func newConfigEntryQuery(c structs.ConfigEntry) ConfigEntryKindName {
|
||||
return NewConfigEntryKindName(c.GetKind(), c.GetName(), c.GetEnterpriseMeta())
|
||||
}
|
||||
|
||||
// ConfigEntryKindQuery is used to lookup config entries by their kind.
|
||||
type ConfigEntryKindQuery struct {
|
||||
Kind string
|
||||
structs.EnterpriseMeta
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ func (s *ServiceIntentionSourceIndex) FromArgs(args ...interface{}) ([]byte, err
|
||||
return []byte(arg.String() + "\x00"), nil
|
||||
}
|
||||
|
||||
func (s *Store) configIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) {
|
||||
func configIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) {
|
||||
// unrolled part of configEntriesByKindTxn
|
||||
|
||||
idx := maxIndexTxn(tx, tableConfigEntries)
|
||||
@ -144,7 +144,7 @@ func (s *Store) configIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *
|
||||
return idx, results, true, nil
|
||||
}
|
||||
|
||||
func (s *Store) configIntentionGetTxn(tx ReadTxn, ws memdb.WatchSet, id string) (uint64, *structs.ServiceIntentionsConfigEntry, *structs.Intention, error) {
|
||||
func configIntentionGetTxn(tx ReadTxn, ws memdb.WatchSet, id string) (uint64, *structs.ServiceIntentionsConfigEntry, *structs.Intention, error) {
|
||||
idx := maxIndexTxn(tx, tableConfigEntries)
|
||||
if idx < 1 {
|
||||
idx = 1
|
||||
|
@ -3,22 +3,24 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func firstConfigEntryWithTxn(tx ReadTxn, kind, name string, _ *structs.EnterpriseMeta) (interface{}, error) {
|
||||
return tx.First(tableConfigEntries, "id", kind, name)
|
||||
}
|
||||
func indexFromConfigEntryKindName(arg interface{}) ([]byte, error) {
|
||||
n, ok := arg.(ConfigEntryKindName)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid type for ConfigEntryKindName query: %T", arg)
|
||||
}
|
||||
|
||||
func firstWatchConfigEntryWithTxn(
|
||||
tx ReadTxn,
|
||||
kind string,
|
||||
name string,
|
||||
_ *structs.EnterpriseMeta,
|
||||
) (<-chan struct{}, interface{}, error) {
|
||||
return tx.FirstWatch(tableConfigEntries, "id", kind, name)
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(n.Kind))
|
||||
b.String(strings.ToLower(n.Name))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func validateConfigEntryEnterprise(_ ReadTxn, _ structs.ConfigEntry) error {
|
||||
@ -26,11 +28,11 @@ func validateConfigEntryEnterprise(_ ReadTxn, _ structs.ConfigEntry) error {
|
||||
}
|
||||
|
||||
func getAllConfigEntriesWithTxn(tx ReadTxn, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get(tableConfigEntries, "id")
|
||||
return tx.Get(tableConfigEntries, indexID)
|
||||
}
|
||||
|
||||
func getConfigEntryKindsWithTxn(tx ReadTxn, kind string, _ *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
return tx.Get(tableConfigEntries, "kind", kind)
|
||||
return tx.Get(tableConfigEntries, indexKind, ConfigEntryKindQuery{Kind: kind})
|
||||
}
|
||||
|
||||
func configIntentionsConvertToList(iter memdb.ResultIterator, _ *structs.EnterpriseMeta) structs.Intentions {
|
||||
|
35
agent/consul/state/config_entry_oss_test.go
Normal file
35
agent/consul/state/config_entry_oss_test.go
Normal file
@ -0,0 +1,35 @@
|
||||
// +build !consulent
|
||||
|
||||
package state
|
||||
|
||||
import "github.com/hashicorp/consul/agent/structs"
|
||||
|
||||
func testIndexerTableConfigEntries() map[string]indexerTestCase {
|
||||
return map[string]indexerTestCase{
|
||||
indexID: {
|
||||
read: indexValue{
|
||||
source: ConfigEntryKindName{
|
||||
Kind: "Proxy-Defaults",
|
||||
Name: "NaMe",
|
||||
},
|
||||
expected: []byte("proxy-defaults\x00name\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: &structs.ProxyConfigEntry{Name: "NaMe"},
|
||||
expected: []byte("proxy-defaults\x00name\x00"),
|
||||
},
|
||||
},
|
||||
indexKind: {
|
||||
read: indexValue{
|
||||
source: ConfigEntryKindQuery{
|
||||
Kind: "Service-Defaults",
|
||||
},
|
||||
expected: []byte("service-defaults\x00"),
|
||||
},
|
||||
write: indexValue{
|
||||
source: &structs.ServiceConfigEntry{},
|
||||
expected: []byte("service-defaults\x00"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
@ -1,6 +1,13 @@
|
||||
package state
|
||||
|
||||
import "github.com/hashicorp/go-memdb"
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-memdb"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
const (
|
||||
tableConfigEntries = "config-entries"
|
||||
@ -20,26 +27,19 @@ func configTableSchema() *memdb.TableSchema {
|
||||
Name: indexID,
|
||||
AllowMissing: false,
|
||||
Unique: true,
|
||||
Indexer: &memdb.CompoundIndex{
|
||||
Indexes: []memdb.Indexer{
|
||||
&memdb.StringFieldIndex{
|
||||
Field: "Kind",
|
||||
Lowercase: true,
|
||||
},
|
||||
&memdb.StringFieldIndex{
|
||||
Field: "Name",
|
||||
Lowercase: true,
|
||||
},
|
||||
},
|
||||
Indexer: indexerSingleWithPrefix{
|
||||
readIndex: indexFromConfigEntryKindName,
|
||||
writeIndex: indexFromConfigEntry,
|
||||
prefixIndex: indexFromConfigEntryKindName,
|
||||
},
|
||||
},
|
||||
indexKind: {
|
||||
Name: indexKind,
|
||||
AllowMissing: false,
|
||||
Unique: false,
|
||||
Indexer: &memdb.StringFieldIndex{
|
||||
Field: "Kind",
|
||||
Lowercase: true,
|
||||
Indexer: indexerSingle{
|
||||
readIndex: indexFromConfigEntryKindQuery,
|
||||
writeIndex: indexKindFromConfigEntry,
|
||||
},
|
||||
},
|
||||
indexLink: {
|
||||
@ -63,3 +63,47 @@ func configTableSchema() *memdb.TableSchema {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func indexFromConfigEntry(raw interface{}) ([]byte, error) {
|
||||
c, ok := raw.(structs.ConfigEntry)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("type must be structs.ConfigEntry: %T", raw)
|
||||
}
|
||||
|
||||
if c.GetName() == "" || c.GetKind() == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(c.GetKind()))
|
||||
b.String(strings.ToLower(c.GetName()))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func indexFromConfigEntryKindQuery(raw interface{}) ([]byte, error) {
|
||||
q, ok := raw.(ConfigEntryKindQuery)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("type must be ConfigEntryKindQuery: %T", raw)
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(q.Kind))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// indexKindFromConfigEntry indexes kinds without a namespace for any config
|
||||
// entries that span all namespaces.
|
||||
func indexKindFromConfigEntry(raw interface{}) ([]byte, error) {
|
||||
c, ok := raw.(structs.ConfigEntry)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("type must be structs.ConfigEntry: %T", raw)
|
||||
}
|
||||
|
||||
if c.GetKind() == "" {
|
||||
return nil, errMissingValueForIndex
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(c.GetKind()))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
@ -962,9 +962,9 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
entries []structs.ConfigEntry
|
||||
expectBefore []structs.ConfigEntryKindName
|
||||
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry
|
||||
expectAfter []structs.ConfigEntryKindName
|
||||
expectBefore []ConfigEntryKindName
|
||||
overrides map[ConfigEntryKindName]structs.ConfigEntry
|
||||
expectAfter []ConfigEntryKindName
|
||||
expectAfterErr string
|
||||
checkAfter func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries)
|
||||
}{
|
||||
@ -977,13 +977,13 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
|
||||
Protocol: "tcp",
|
||||
},
|
||||
},
|
||||
expectBefore: []structs.ConfigEntryKindName{
|
||||
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
expectBefore: []ConfigEntryKindName{
|
||||
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
},
|
||||
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
|
||||
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil): nil,
|
||||
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
|
||||
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil): nil,
|
||||
},
|
||||
expectAfter: []structs.ConfigEntryKindName{
|
||||
expectAfter: []ConfigEntryKindName{
|
||||
// nothing
|
||||
},
|
||||
},
|
||||
@ -996,18 +996,18 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
|
||||
Protocol: "tcp",
|
||||
},
|
||||
},
|
||||
expectBefore: []structs.ConfigEntryKindName{
|
||||
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
expectBefore: []ConfigEntryKindName{
|
||||
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
},
|
||||
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
|
||||
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil): &structs.ServiceConfigEntry{
|
||||
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
|
||||
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil): &structs.ServiceConfigEntry{
|
||||
Kind: structs.ServiceDefaults,
|
||||
Name: "main",
|
||||
Protocol: "grpc",
|
||||
},
|
||||
},
|
||||
expectAfter: []structs.ConfigEntryKindName{
|
||||
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
expectAfter: []ConfigEntryKindName{
|
||||
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
},
|
||||
checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) {
|
||||
defaults := entrySet.GetService(structs.NewServiceID("main", nil))
|
||||
@ -1029,15 +1029,15 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
|
||||
Name: "main",
|
||||
},
|
||||
},
|
||||
expectBefore: []structs.ConfigEntryKindName{
|
||||
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
|
||||
expectBefore: []ConfigEntryKindName{
|
||||
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
|
||||
},
|
||||
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
|
||||
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil): nil,
|
||||
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
|
||||
NewConfigEntryKindName(structs.ServiceRouter, "main", nil): nil,
|
||||
},
|
||||
expectAfter: []structs.ConfigEntryKindName{
|
||||
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
expectAfter: []ConfigEntryKindName{
|
||||
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -1074,13 +1074,13 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectBefore: []structs.ConfigEntryKindName{
|
||||
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
|
||||
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
|
||||
expectBefore: []ConfigEntryKindName{
|
||||
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
|
||||
NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
|
||||
},
|
||||
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
|
||||
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil): &structs.ServiceRouterConfigEntry{
|
||||
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
|
||||
NewConfigEntryKindName(structs.ServiceRouter, "main", nil): &structs.ServiceRouterConfigEntry{
|
||||
Kind: structs.ServiceRouter,
|
||||
Name: "main",
|
||||
Routes: []structs.ServiceRoute{
|
||||
@ -1097,10 +1097,10 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectAfter: []structs.ConfigEntryKindName{
|
||||
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
|
||||
structs.NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
|
||||
expectAfter: []ConfigEntryKindName{
|
||||
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
|
||||
NewConfigEntryKindName(structs.ServiceRouter, "main", nil),
|
||||
},
|
||||
checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) {
|
||||
router := entrySet.GetRouter(structs.NewServiceID("main", nil))
|
||||
@ -1137,15 +1137,15 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectBefore: []structs.ConfigEntryKindName{
|
||||
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
|
||||
expectBefore: []ConfigEntryKindName{
|
||||
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
|
||||
},
|
||||
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
|
||||
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil): nil,
|
||||
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
|
||||
NewConfigEntryKindName(structs.ServiceSplitter, "main", nil): nil,
|
||||
},
|
||||
expectAfter: []structs.ConfigEntryKindName{
|
||||
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
expectAfter: []ConfigEntryKindName{
|
||||
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -1164,12 +1164,12 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectBefore: []structs.ConfigEntryKindName{
|
||||
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
|
||||
expectBefore: []ConfigEntryKindName{
|
||||
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
|
||||
},
|
||||
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
|
||||
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil): &structs.ServiceSplitterConfigEntry{
|
||||
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
|
||||
NewConfigEntryKindName(structs.ServiceSplitter, "main", nil): &structs.ServiceSplitterConfigEntry{
|
||||
Kind: structs.ServiceSplitter,
|
||||
Name: "main",
|
||||
Splits: []structs.ServiceSplit{
|
||||
@ -1178,9 +1178,9 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
expectAfter: []structs.ConfigEntryKindName{
|
||||
structs.NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
structs.NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
|
||||
expectAfter: []ConfigEntryKindName{
|
||||
NewConfigEntryKindName(structs.ServiceDefaults, "main", nil),
|
||||
NewConfigEntryKindName(structs.ServiceSplitter, "main", nil),
|
||||
},
|
||||
checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) {
|
||||
splitter := entrySet.GetSplitter(structs.NewServiceID("main", nil))
|
||||
@ -1203,13 +1203,13 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
|
||||
Name: "main",
|
||||
},
|
||||
},
|
||||
expectBefore: []structs.ConfigEntryKindName{
|
||||
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
|
||||
expectBefore: []ConfigEntryKindName{
|
||||
NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
|
||||
},
|
||||
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
|
||||
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil): nil,
|
||||
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
|
||||
NewConfigEntryKindName(structs.ServiceResolver, "main", nil): nil,
|
||||
},
|
||||
expectAfter: []structs.ConfigEntryKindName{
|
||||
expectAfter: []ConfigEntryKindName{
|
||||
// nothing
|
||||
},
|
||||
},
|
||||
@ -1221,18 +1221,18 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
|
||||
Name: "main",
|
||||
},
|
||||
},
|
||||
expectBefore: []structs.ConfigEntryKindName{
|
||||
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
|
||||
expectBefore: []ConfigEntryKindName{
|
||||
NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
|
||||
},
|
||||
overrides: map[structs.ConfigEntryKindName]structs.ConfigEntry{
|
||||
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil): &structs.ServiceResolverConfigEntry{
|
||||
overrides: map[ConfigEntryKindName]structs.ConfigEntry{
|
||||
NewConfigEntryKindName(structs.ServiceResolver, "main", nil): &structs.ServiceResolverConfigEntry{
|
||||
Kind: structs.ServiceResolver,
|
||||
Name: "main",
|
||||
ConnectTimeout: 33 * time.Second,
|
||||
},
|
||||
},
|
||||
expectAfter: []structs.ConfigEntryKindName{
|
||||
structs.NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
|
||||
expectAfter: []ConfigEntryKindName{
|
||||
NewConfigEntryKindName(structs.ServiceResolver, "main", nil),
|
||||
},
|
||||
checkAfter: func(t *testing.T, entrySet *structs.DiscoveryChainConfigEntries) {
|
||||
resolver := entrySet.GetResolver(structs.NewServiceID("main", nil))
|
||||
@ -1276,31 +1276,31 @@ func TestStore_ReadDiscoveryChainConfigEntries_Overrides(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func entrySetToKindNames(entrySet *structs.DiscoveryChainConfigEntries) []structs.ConfigEntryKindName {
|
||||
var out []structs.ConfigEntryKindName
|
||||
func entrySetToKindNames(entrySet *structs.DiscoveryChainConfigEntries) []ConfigEntryKindName {
|
||||
var out []ConfigEntryKindName
|
||||
for _, entry := range entrySet.Routers {
|
||||
out = append(out, structs.NewConfigEntryKindName(
|
||||
out = append(out, NewConfigEntryKindName(
|
||||
entry.Kind,
|
||||
entry.Name,
|
||||
&entry.EnterpriseMeta,
|
||||
))
|
||||
}
|
||||
for _, entry := range entrySet.Splitters {
|
||||
out = append(out, structs.NewConfigEntryKindName(
|
||||
out = append(out, NewConfigEntryKindName(
|
||||
entry.Kind,
|
||||
entry.Name,
|
||||
&entry.EnterpriseMeta,
|
||||
))
|
||||
}
|
||||
for _, entry := range entrySet.Resolvers {
|
||||
out = append(out, structs.NewConfigEntryKindName(
|
||||
out = append(out, NewConfigEntryKindName(
|
||||
entry.Kind,
|
||||
entry.Name,
|
||||
&entry.EnterpriseMeta,
|
||||
))
|
||||
}
|
||||
for _, entry := range entrySet.Services {
|
||||
out = append(out, structs.NewConfigEntryKindName(
|
||||
out = append(out, NewConfigEntryKindName(
|
||||
entry.Kind,
|
||||
entry.Name,
|
||||
&entry.EnterpriseMeta,
|
||||
|
@ -146,7 +146,7 @@ func (s *Store) CoordinateBatchUpdate(idx uint64, updates structs.Coordinates) e
|
||||
// don't carefully sequence this, and since it will fix itself
|
||||
// on the next coordinate update from that node, we don't return
|
||||
// an error or log anything.
|
||||
node, err := tx.First("nodes", "id", update.Node)
|
||||
node, err := tx.First(tableNodes, indexID, Query{Value: update.Node})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed node lookup: %s", err)
|
||||
}
|
||||
|
@ -1,54 +0,0 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
// IndexConnectService indexes a *struct.ServiceNode for querying by
|
||||
// services that support Connect to some target service. This will
|
||||
// properly index the proxy destination for proxies and the service name
|
||||
// for native services.
|
||||
type IndexConnectService struct{}
|
||||
|
||||
func (idx *IndexConnectService) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
sn, ok := obj.(*structs.ServiceNode)
|
||||
if !ok {
|
||||
return false, nil, fmt.Errorf("Object must be ServiceNode, got %T", obj)
|
||||
}
|
||||
|
||||
var result []byte
|
||||
switch {
|
||||
case sn.ServiceKind == structs.ServiceKindConnectProxy:
|
||||
// For proxies, this service supports Connect for the destination
|
||||
result = []byte(strings.ToLower(sn.ServiceProxy.DestinationServiceName))
|
||||
|
||||
case sn.ServiceConnect.Native:
|
||||
// For native, this service supports Connect directly
|
||||
result = []byte(strings.ToLower(sn.ServiceName))
|
||||
|
||||
default:
|
||||
// Doesn't support Connect at all
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// Return the result with the null terminator appended so we can
|
||||
// differentiate prefix vs. non-prefix matches.
|
||||
return true, append(result, '\x00'), nil
|
||||
}
|
||||
|
||||
func (idx *IndexConnectService) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("must provide only a single argument")
|
||||
}
|
||||
|
||||
arg, ok := args[0].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("argument must be a string: %#v", args[0])
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
return append([]byte(strings.ToLower(arg)), '\x00'), nil
|
||||
}
|
@ -3,120 +3,53 @@ package state
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func TestIndexConnectService_FromObject(t *testing.T) {
|
||||
func TestConnectNameFromServiceNode(t *testing.T) {
|
||||
cases := []struct {
|
||||
Name string
|
||||
Input interface{}
|
||||
ExpectMatch bool
|
||||
ExpectVal []byte
|
||||
ExpectErr string
|
||||
name string
|
||||
input structs.ServiceNode
|
||||
expected string
|
||||
expectedOk bool
|
||||
}{
|
||||
{
|
||||
"not a ServiceNode",
|
||||
42,
|
||||
false,
|
||||
nil,
|
||||
"ServiceNode",
|
||||
name: "typical service, not native",
|
||||
input: structs.ServiceNode{ServiceName: "db"},
|
||||
expectedOk: false,
|
||||
},
|
||||
|
||||
{
|
||||
"typical service, not native",
|
||||
&structs.ServiceNode{
|
||||
ServiceName: "db",
|
||||
},
|
||||
false,
|
||||
nil,
|
||||
"",
|
||||
},
|
||||
|
||||
{
|
||||
"typical service, is native",
|
||||
&structs.ServiceNode{
|
||||
name: "typical service, is native",
|
||||
input: structs.ServiceNode{
|
||||
ServiceName: "dB",
|
||||
ServiceConnect: structs.ServiceConnect{Native: true},
|
||||
},
|
||||
true,
|
||||
[]byte("db\x00"),
|
||||
"",
|
||||
expectedOk: true,
|
||||
expected: "dB",
|
||||
},
|
||||
|
||||
{
|
||||
"proxy service",
|
||||
&structs.ServiceNode{
|
||||
name: "proxy service",
|
||||
input: structs.ServiceNode{
|
||||
ServiceKind: structs.ServiceKindConnectProxy,
|
||||
ServiceName: "db",
|
||||
ServiceProxy: structs.ConnectProxyConfig{DestinationServiceName: "fOo"},
|
||||
},
|
||||
true,
|
||||
[]byte("foo\x00"),
|
||||
"",
|
||||
expectedOk: true,
|
||||
expected: "fOo",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
var idx IndexConnectService
|
||||
match, val, err := idx.FromObject(tc.Input)
|
||||
if tc.ExpectErr != "" {
|
||||
require.Error(err)
|
||||
require.Contains(err.Error(), tc.ExpectErr)
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, ok := connectNameFromServiceNode(&tc.input)
|
||||
if !tc.expectedOk {
|
||||
require.False(t, ok, "expected no connect name")
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
require.Equal(tc.ExpectMatch, match)
|
||||
require.Equal(tc.ExpectVal, val)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndexConnectService_FromArgs(t *testing.T) {
|
||||
cases := []struct {
|
||||
Name string
|
||||
Args []interface{}
|
||||
ExpectVal []byte
|
||||
ExpectErr string
|
||||
}{
|
||||
{
|
||||
"multiple arguments",
|
||||
[]interface{}{"foo", "bar"},
|
||||
nil,
|
||||
"single",
|
||||
},
|
||||
|
||||
{
|
||||
"not a string",
|
||||
[]interface{}{42},
|
||||
nil,
|
||||
"must be a string",
|
||||
},
|
||||
|
||||
{
|
||||
"string",
|
||||
[]interface{}{"fOO"},
|
||||
[]byte("foo\x00"),
|
||||
"",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
var idx IndexConnectService
|
||||
val, err := idx.FromArgs(tc.Args...)
|
||||
if tc.ExpectErr != "" {
|
||||
require.Error(err)
|
||||
require.Contains(err.Error(), tc.ExpectErr)
|
||||
return
|
||||
}
|
||||
require.NoError(err)
|
||||
require.Equal(tc.ExpectVal, val)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1,38 +0,0 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
// IndexServiceKind indexes a *struct.ServiceNode for querying by
|
||||
// the services kind. We need a custom indexer because of the default
|
||||
// kind being the empty string. The StringFieldIndex in memdb seems to
|
||||
// treate the empty string as missing and doesn't work correctly when we actually
|
||||
// want to index ""
|
||||
type IndexServiceKind struct{}
|
||||
|
||||
func (idx *IndexServiceKind) FromObject(obj interface{}) (bool, []byte, error) {
|
||||
sn, ok := obj.(*structs.ServiceNode)
|
||||
if !ok {
|
||||
return false, nil, fmt.Errorf("Object must be ServiceNode, got %T", obj)
|
||||
}
|
||||
|
||||
return true, append([]byte(strings.ToLower(string(sn.ServiceKind))), '\x00'), nil
|
||||
}
|
||||
|
||||
func (idx *IndexServiceKind) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("must provide only a single argument")
|
||||
}
|
||||
|
||||
arg, ok := args[0].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("argument must be a structs.ServiceKind: %#v", args[0])
|
||||
}
|
||||
|
||||
// Add the null character as a terminator
|
||||
return append([]byte(strings.ToLower(arg)), '\x00'), nil
|
||||
}
|
125
agent/consul/state/indexer.go
Normal file
125
agent/consul/state/indexer.go
Normal file
@ -0,0 +1,125 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// indexerSingle implements both memdb.Indexer and memdb.SingleIndexer. It may
|
||||
// be used in a memdb.IndexSchema to specify functions that generate the index
|
||||
// value for memdb.Txn operations.
|
||||
type indexerSingle struct {
|
||||
// readIndex is used by memdb for Txn.Get, Txn.First, and other operations
|
||||
// that read data.
|
||||
readIndex
|
||||
// writeIndex is used by memdb for Txn.Insert, Txn.Delete, for operations
|
||||
// that write data to the index.
|
||||
writeIndex
|
||||
}
|
||||
|
||||
// indexerMulti implements both memdb.Indexer and memdb.MultiIndexer. It may
|
||||
// be used in a memdb.IndexSchema to specify functions that generate the index
|
||||
// value for memdb.Txn operations.
|
||||
type indexerMulti struct {
|
||||
// readIndex is used by memdb for Txn.Get, Txn.First, and other operations
|
||||
// that read data.
|
||||
readIndex
|
||||
// writeIndexMulti is used by memdb for Txn.Insert, Txn.Delete, for operations
|
||||
// that write data to the index.
|
||||
writeIndexMulti
|
||||
}
|
||||
|
||||
// indexerSingleWithPrefix is a indexerSingle which also supports prefix queries.
|
||||
type indexerSingleWithPrefix struct {
|
||||
readIndex
|
||||
writeIndex
|
||||
prefixIndex
|
||||
}
|
||||
|
||||
// readIndex implements memdb.Indexer. It exists so that a function can be used
|
||||
// to provide the interface.
|
||||
//
|
||||
// Unlike memdb.Indexer, a readIndex function accepts only a single argument. To
|
||||
// generate an index from multiple values, use a struct type with multiple fields.
|
||||
type readIndex func(arg interface{}) ([]byte, error)
|
||||
|
||||
func (f readIndex) FromArgs(args ...interface{}) ([]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("index supports only a single arg")
|
||||
}
|
||||
return f(args[0])
|
||||
}
|
||||
|
||||
var errMissingValueForIndex = fmt.Errorf("object is missing a value for this index")
|
||||
|
||||
// writeIndex implements memdb.SingleIndexer. It exists so that a function
|
||||
// can be used to provide this interface.
|
||||
//
|
||||
// Instead of a bool return value, writeIndex expects errMissingValueForIndex to
|
||||
// indicate that an index could not be build for the object. It will translate
|
||||
// this error into a false value to satisfy the memdb.SingleIndexer interface.
|
||||
type writeIndex func(raw interface{}) ([]byte, error)
|
||||
|
||||
func (f writeIndex) FromObject(raw interface{}) (bool, []byte, error) {
|
||||
v, err := f(raw)
|
||||
if errors.Is(err, errMissingValueForIndex) {
|
||||
return false, nil, nil
|
||||
}
|
||||
return err == nil, v, err
|
||||
}
|
||||
|
||||
// writeIndexMulti implements memdb.MultiIndexer. It exists so that a function
|
||||
// can be used to provide this interface.
|
||||
//
|
||||
// Instead of a bool return value, writeIndexMulti expects errMissingValueForIndex to
|
||||
// indicate that an index could not be build for the object. It will translate
|
||||
// this error into a false value to satisfy the memdb.MultiIndexer interface.
|
||||
type writeIndexMulti func(raw interface{}) ([][]byte, error)
|
||||
|
||||
func (f writeIndexMulti) FromObject(raw interface{}) (bool, [][]byte, error) {
|
||||
v, err := f(raw)
|
||||
if errors.Is(err, errMissingValueForIndex) {
|
||||
return false, nil, nil
|
||||
}
|
||||
return err == nil, v, err
|
||||
}
|
||||
|
||||
// prefixIndex implements memdb.PrefixIndexer. It exists so that a function
|
||||
// can be used to provide this interface.
|
||||
type prefixIndex func(args interface{}) ([]byte, error)
|
||||
|
||||
func (f prefixIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
|
||||
if len(args) != 1 {
|
||||
return nil, fmt.Errorf("index supports only a single arg")
|
||||
}
|
||||
return f(args[0])
|
||||
}
|
||||
|
||||
const null = "\x00"
|
||||
|
||||
// indexBuilder is a buffer used to construct memdb index values.
|
||||
type indexBuilder bytes.Buffer
|
||||
|
||||
func newIndexBuilder(cap int) *indexBuilder {
|
||||
buff := make([]byte, 0, cap)
|
||||
b := bytes.NewBuffer(buff)
|
||||
return (*indexBuilder)(b)
|
||||
}
|
||||
|
||||
// String appends the string and a null terminator to the buffer.
|
||||
func (b *indexBuilder) String(v string) {
|
||||
(*bytes.Buffer)(b).WriteString(v)
|
||||
(*bytes.Buffer)(b).WriteString(null)
|
||||
}
|
||||
|
||||
// Raw appends the bytes without a null terminator to the buffer. Raw should
|
||||
// only be used when v has a fixed length, or when building the last segment of
|
||||
// a prefix index.
|
||||
func (b *indexBuilder) Raw(v []byte) {
|
||||
(*bytes.Buffer)(b).Write(v)
|
||||
}
|
||||
|
||||
func (b *indexBuilder) Bytes() []byte {
|
||||
return (*bytes.Buffer)(b).Bytes()
|
||||
}
|
@ -3,12 +3,12 @@ package state
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"sort"
|
||||
|
||||
"github.com/hashicorp/go-memdb"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
@ -154,7 +154,7 @@ func (s *Store) LegacyIntentions(ws memdb.WatchSet, entMeta *structs.EnterpriseM
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
|
||||
idx, results, _, err := s.legacyIntentionsListTxn(tx, ws, entMeta)
|
||||
idx, results, _, err := legacyIntentionsListTxn(tx, ws, entMeta)
|
||||
return idx, results, err
|
||||
}
|
||||
|
||||
@ -168,12 +168,12 @@ func (s *Store) Intentions(ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (
|
||||
return 0, nil, false, err
|
||||
}
|
||||
if !usingConfigEntries {
|
||||
return s.legacyIntentionsListTxn(tx, ws, entMeta)
|
||||
return legacyIntentionsListTxn(tx, ws, entMeta)
|
||||
}
|
||||
return s.configIntentionsListTxn(tx, ws, entMeta)
|
||||
return configIntentionsListTxn(tx, ws, entMeta)
|
||||
}
|
||||
|
||||
func (s *Store) legacyIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) {
|
||||
func legacyIntentionsListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *structs.EnterpriseMeta) (uint64, structs.Intentions, bool, error) {
|
||||
// Get the index
|
||||
idx := maxIndexTxn(tx, tableConnectIntentions)
|
||||
if idx < 1 {
|
||||
@ -578,13 +578,13 @@ func (s *Store) IntentionGet(ws memdb.WatchSet, id string) (uint64, *structs.Ser
|
||||
return 0, nil, nil, err
|
||||
}
|
||||
if !usingConfigEntries {
|
||||
idx, ixn, err := s.legacyIntentionGetTxn(tx, ws, id)
|
||||
idx, ixn, err := legacyIntentionGetTxn(tx, ws, id)
|
||||
return idx, nil, ixn, err
|
||||
}
|
||||
return s.configIntentionGetTxn(tx, ws, id)
|
||||
return configIntentionGetTxn(tx, ws, id)
|
||||
}
|
||||
|
||||
func (s *Store) legacyIntentionGetTxn(tx ReadTxn, ws memdb.WatchSet, id string) (uint64, *structs.Intention, error) {
|
||||
func legacyIntentionGetTxn(tx ReadTxn, ws memdb.WatchSet, id string) (uint64, *structs.Intention, error) {
|
||||
// Get the table index.
|
||||
idx := maxIndexTxn(tx, tableConnectIntentions)
|
||||
if idx < 1 {
|
||||
@ -732,35 +732,19 @@ func (s *Store) LegacyIntentionDeleteAll(idx uint64) error {
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// IntentionDecision returns whether a connection should be allowed from a source URI to some destination
|
||||
// It returns true or false for the enforcement, and also a boolean for whether
|
||||
// IntentionDecision returns whether a connection should be allowed to a source or destination given a set of intentions.
|
||||
//
|
||||
// allowPermissions determines whether the presence of L7 permissions leads to a DENY decision.
|
||||
// This should be false when evaluating a connection between a source and destination, but not the request that will be sent.
|
||||
func (s *Store) IntentionDecision(
|
||||
srcURI connect.CertURI, dstName, dstNS string, defaultDecision acl.EnforcementDecision,
|
||||
target, targetNS string, intentions structs.Intentions, matchType structs.IntentionMatchType,
|
||||
defaultDecision acl.EnforcementDecision, allowPermissions bool,
|
||||
) (structs.IntentionDecisionSummary, error) {
|
||||
|
||||
_, matches, err := s.IntentionMatch(nil, &structs.IntentionQueryMatch{
|
||||
Type: structs.IntentionMatchDestination,
|
||||
Entries: []structs.IntentionMatchEntry{
|
||||
{
|
||||
Namespace: dstNS,
|
||||
Name: dstName,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return structs.IntentionDecisionSummary{}, err
|
||||
}
|
||||
if len(matches) != 1 {
|
||||
// This should never happen since the documented behavior of the
|
||||
// Match call is that it'll always return exactly the number of results
|
||||
// as entries passed in. But we guard against misbehavior.
|
||||
return structs.IntentionDecisionSummary{}, errors.New("internal error loading matches")
|
||||
}
|
||||
|
||||
// Figure out which source matches this request.
|
||||
var ixnMatch *structs.Intention
|
||||
for _, ixn := range matches[0] {
|
||||
if _, ok := srcURI.Authorize(ixn); ok {
|
||||
for _, ixn := range intentions {
|
||||
if _, ok := connect.AuthorizeIntentionTarget(target, targetNS, ixn, matchType); ok {
|
||||
ixnMatch = ixn
|
||||
break
|
||||
}
|
||||
@ -776,9 +760,9 @@ func (s *Store) IntentionDecision(
|
||||
// Intention found, combine action + permissions
|
||||
resp.Allowed = ixnMatch.Action == structs.IntentionActionAllow
|
||||
if len(ixnMatch.Permissions) > 0 {
|
||||
// If there are L7 permissions, DENY.
|
||||
// We are only evaluating source and destination, not the request that will be sent.
|
||||
resp.Allowed = false
|
||||
// If any permissions are present, fall back to allowPermissions.
|
||||
// We are not evaluating requests so we cannot know whether the L7 permission requirements will be met.
|
||||
resp.Allowed = allowPermissions
|
||||
resp.HasPermissions = true
|
||||
}
|
||||
resp.ExternalSource = ixnMatch.Meta[structs.MetaExternalSource]
|
||||
@ -853,6 +837,16 @@ func (s *Store) IntentionMatchOne(
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
|
||||
return compatIntentionMatchOneTxn(tx, ws, entry, matchType)
|
||||
}
|
||||
|
||||
func compatIntentionMatchOneTxn(
|
||||
tx ReadTxn,
|
||||
ws memdb.WatchSet,
|
||||
entry structs.IntentionMatchEntry,
|
||||
matchType structs.IntentionMatchType,
|
||||
) (uint64, structs.Intentions, error) {
|
||||
|
||||
usingConfigEntries, err := areIntentionsInConfigEntries(tx, ws)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
@ -936,3 +930,91 @@ func intentionMatchGetParams(entry structs.IntentionMatchEntry) ([][]interface{}
|
||||
result = append(result, []interface{}{entry.Namespace, entry.Name})
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// IntentionTopology returns the upstreams or downstreams of a service. Upstreams and downstreams are inferred from
|
||||
// intentions. If intentions allow a connection from the target to some candidate service, the candidate service is considered
|
||||
// an upstream of the target.
|
||||
func (s *Store) IntentionTopology(ws memdb.WatchSet,
|
||||
target structs.ServiceName, downstreams bool, defaultDecision acl.EnforcementDecision) (uint64, structs.ServiceList, error) {
|
||||
tx := s.db.ReadTxn()
|
||||
defer tx.Abort()
|
||||
|
||||
var maxIdx uint64
|
||||
|
||||
// If querying the upstreams for a service, we first query intentions that apply to the target service as a source.
|
||||
// That way we can check whether intentions from the source allow connections to upstream candidates.
|
||||
// The reverse is true for downstreams.
|
||||
intentionMatchType := structs.IntentionMatchSource
|
||||
if downstreams {
|
||||
intentionMatchType = structs.IntentionMatchDestination
|
||||
}
|
||||
entry := structs.IntentionMatchEntry{
|
||||
Namespace: target.NamespaceOrDefault(),
|
||||
Name: target.Name,
|
||||
}
|
||||
index, intentions, err := compatIntentionMatchOneTxn(tx, ws, entry, intentionMatchType)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("failed to query intentions for %s", target.String())
|
||||
}
|
||||
if index > maxIdx {
|
||||
maxIdx = index
|
||||
}
|
||||
|
||||
// Check for a wildcard intention (* -> *) since it overrides the default decision from ACLs
|
||||
if len(intentions) > 0 {
|
||||
// Intentions with wildcard source and destination have the lowest precedence, so they are last in the list
|
||||
ixn := intentions[len(intentions)-1]
|
||||
|
||||
if ixn.HasWildcardSource() && ixn.HasWildcardDestination() {
|
||||
defaultDecision = acl.Allow
|
||||
if ixn.Action == structs.IntentionActionDeny {
|
||||
defaultDecision = acl.Deny
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
index, allServices, err := serviceListTxn(tx, ws, func(svc *structs.ServiceNode) bool {
|
||||
// Only include ingress gateways as downstreams, since they cannot receive service mesh traffic
|
||||
// TODO(freddy): One remaining issue is that this includes non-Connect services (typical services without a proxy)
|
||||
// Ideally those should be excluded as well, since they can't be upstreams/downstreams without a proxy.
|
||||
// Maybe start tracking services represented by proxies? (both sidecar and ingress)
|
||||
if svc.ServiceKind == structs.ServiceKindTypical || (svc.ServiceKind == structs.ServiceKindIngressGateway && downstreams) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}, structs.WildcardEnterpriseMeta())
|
||||
if err != nil {
|
||||
return index, nil, fmt.Errorf("failed to fetch catalog service list: %v", err)
|
||||
}
|
||||
if index > maxIdx {
|
||||
maxIdx = index
|
||||
}
|
||||
|
||||
// When checking authorization to upstreams, the match type for the decision is `destination` because we are deciding
|
||||
// if upstream candidates are covered by intentions that have the target service as a source.
|
||||
// The reverse is true for downstreams.
|
||||
decisionMatchType := structs.IntentionMatchDestination
|
||||
if downstreams {
|
||||
decisionMatchType = structs.IntentionMatchSource
|
||||
}
|
||||
result := make(structs.ServiceList, 0, len(allServices))
|
||||
for _, candidate := range allServices {
|
||||
if candidate.Name == structs.ConsulServiceName {
|
||||
continue
|
||||
}
|
||||
decision, err := s.IntentionDecision(candidate.Name, candidate.NamespaceOrDefault(), intentions, decisionMatchType, defaultDecision, true)
|
||||
if err != nil {
|
||||
src, dst := target, candidate
|
||||
if downstreams {
|
||||
src, dst = candidate, target
|
||||
}
|
||||
return 0, nil, fmt.Errorf("failed to get intention decision from (%s) to (%s): %v",
|
||||
src.String(), dst.String(), err)
|
||||
}
|
||||
if !decision.Allowed || target.Matches(candidate) {
|
||||
continue
|
||||
}
|
||||
result = append(result, candidate)
|
||||
}
|
||||
return maxIdx, result, err
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -9,7 +10,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
@ -1760,16 +1760,19 @@ func TestStore_IntentionDecision(t *testing.T) {
|
||||
}
|
||||
|
||||
tt := []struct {
|
||||
name string
|
||||
src string
|
||||
dst string
|
||||
defaultDecision acl.EnforcementDecision
|
||||
expect structs.IntentionDecisionSummary
|
||||
name string
|
||||
src string
|
||||
dst string
|
||||
matchType structs.IntentionMatchType
|
||||
defaultDecision acl.EnforcementDecision
|
||||
allowPermissions bool
|
||||
expect structs.IntentionDecisionSummary
|
||||
}{
|
||||
{
|
||||
name: "no matching intention and default deny",
|
||||
src: "does-not-exist",
|
||||
dst: "ditto",
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
defaultDecision: acl.Deny,
|
||||
expect: structs.IntentionDecisionSummary{Allowed: false},
|
||||
},
|
||||
@ -1777,13 +1780,15 @@ func TestStore_IntentionDecision(t *testing.T) {
|
||||
name: "no matching intention and default allow",
|
||||
src: "does-not-exist",
|
||||
dst: "ditto",
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
defaultDecision: acl.Allow,
|
||||
expect: structs.IntentionDecisionSummary{Allowed: true},
|
||||
},
|
||||
{
|
||||
name: "denied with permissions",
|
||||
src: "web",
|
||||
dst: "redis",
|
||||
name: "denied with permissions",
|
||||
src: "web",
|
||||
dst: "redis",
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
expect: structs.IntentionDecisionSummary{
|
||||
Allowed: false,
|
||||
HasPermissions: true,
|
||||
@ -1791,9 +1796,22 @@ func TestStore_IntentionDecision(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "denied without permissions",
|
||||
src: "api",
|
||||
dst: "redis",
|
||||
name: "allowed with permissions",
|
||||
src: "web",
|
||||
dst: "redis",
|
||||
allowPermissions: true,
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
expect: structs.IntentionDecisionSummary{
|
||||
Allowed: true,
|
||||
HasPermissions: true,
|
||||
HasExact: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "denied without permissions",
|
||||
src: "api",
|
||||
dst: "redis",
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
expect: structs.IntentionDecisionSummary{
|
||||
Allowed: false,
|
||||
HasPermissions: false,
|
||||
@ -1801,9 +1819,10 @@ func TestStore_IntentionDecision(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "allowed from external source",
|
||||
src: "api",
|
||||
dst: "web",
|
||||
name: "allowed from external source",
|
||||
src: "api",
|
||||
dst: "web",
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
expect: structs.IntentionDecisionSummary{
|
||||
Allowed: true,
|
||||
HasPermissions: false,
|
||||
@ -1812,9 +1831,21 @@ func TestStore_IntentionDecision(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "allowed by source wildcard not exact",
|
||||
src: "anything",
|
||||
dst: "mysql",
|
||||
name: "allowed by source wildcard not exact",
|
||||
src: "anything",
|
||||
dst: "mysql",
|
||||
matchType: structs.IntentionMatchDestination,
|
||||
expect: structs.IntentionDecisionSummary{
|
||||
Allowed: true,
|
||||
HasPermissions: false,
|
||||
HasExact: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "allowed by matching on source",
|
||||
src: "web",
|
||||
dst: "api",
|
||||
matchType: structs.IntentionMatchSource,
|
||||
expect: structs.IntentionDecisionSummary{
|
||||
Allowed: true,
|
||||
HasPermissions: false,
|
||||
@ -1824,11 +1855,15 @@ func TestStore_IntentionDecision(t *testing.T) {
|
||||
}
|
||||
for _, tc := range tt {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
uri := connect.SpiffeIDService{
|
||||
Service: tc.src,
|
||||
entry := structs.IntentionMatchEntry{
|
||||
Namespace: structs.IntentionDefaultNamespace,
|
||||
Name: tc.src,
|
||||
}
|
||||
decision, err := s.IntentionDecision(&uri, tc.dst, structs.IntentionDefaultNamespace, tc.defaultDecision)
|
||||
_, intentions, err := s.IntentionMatchOne(nil, entry, structs.IntentionMatchSource)
|
||||
if err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
decision, err := s.IntentionDecision(tc.dst, structs.IntentionDefaultNamespace, intentions, tc.matchType, tc.defaultDecision, tc.allowPermissions)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.expect, decision)
|
||||
})
|
||||
@ -1847,3 +1882,379 @@ func testConfigStateStore(t *testing.T) *Store {
|
||||
disableLegacyIntentions(s)
|
||||
return s
|
||||
}
|
||||
|
||||
func TestStore_IntentionTopology(t *testing.T) {
|
||||
node := structs.Node{
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
}
|
||||
services := []structs.NodeService{
|
||||
{
|
||||
ID: structs.ConsulServiceID,
|
||||
Service: structs.ConsulServiceName,
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
{
|
||||
ID: "api-1",
|
||||
Service: "api",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
{
|
||||
ID: "mysql-1",
|
||||
Service: "mysql",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
{
|
||||
ID: "web-1",
|
||||
Service: "web",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
{
|
||||
Kind: structs.ServiceKindConnectProxy,
|
||||
ID: "web-proxy-1",
|
||||
Service: "web-proxy",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
{
|
||||
Kind: structs.ServiceKindTerminatingGateway,
|
||||
ID: "terminating-gateway-1",
|
||||
Service: "terminating-gateway",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
{
|
||||
Kind: structs.ServiceKindIngressGateway,
|
||||
ID: "ingress-gateway-1",
|
||||
Service: "ingress-gateway",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
{
|
||||
Kind: structs.ServiceKindMeshGateway,
|
||||
ID: "mesh-gateway-1",
|
||||
Service: "mesh-gateway",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
}
|
||||
|
||||
type expect struct {
|
||||
idx uint64
|
||||
services structs.ServiceList
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
defaultDecision acl.EnforcementDecision
|
||||
intentions []structs.ServiceIntentionsConfigEntry
|
||||
target structs.ServiceName
|
||||
downstreams bool
|
||||
expect expect
|
||||
}{
|
||||
{
|
||||
name: "(upstream) acl allow all but intentions deny one",
|
||||
defaultDecision: acl.Allow,
|
||||
intentions: []structs.ServiceIntentionsConfigEntry{
|
||||
{
|
||||
Kind: structs.ServiceIntentions,
|
||||
Name: "api",
|
||||
Sources: []*structs.SourceIntention{
|
||||
{
|
||||
Name: "web",
|
||||
Action: structs.IntentionActionDeny,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
target: structs.NewServiceName("web", nil),
|
||||
downstreams: false,
|
||||
expect: expect{
|
||||
idx: 10,
|
||||
services: structs.ServiceList{
|
||||
{
|
||||
Name: "mysql",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "(upstream) acl deny all intentions allow one",
|
||||
defaultDecision: acl.Deny,
|
||||
intentions: []structs.ServiceIntentionsConfigEntry{
|
||||
{
|
||||
Kind: structs.ServiceIntentions,
|
||||
Name: "api",
|
||||
Sources: []*structs.SourceIntention{
|
||||
{
|
||||
Name: "web",
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
target: structs.NewServiceName("web", nil),
|
||||
downstreams: false,
|
||||
expect: expect{
|
||||
idx: 10,
|
||||
services: structs.ServiceList{
|
||||
{
|
||||
Name: "api",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "(downstream) acl allow all but intentions deny one",
|
||||
defaultDecision: acl.Allow,
|
||||
intentions: []structs.ServiceIntentionsConfigEntry{
|
||||
{
|
||||
Kind: structs.ServiceIntentions,
|
||||
Name: "api",
|
||||
Sources: []*structs.SourceIntention{
|
||||
{
|
||||
Name: "web",
|
||||
Action: structs.IntentionActionDeny,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
target: structs.NewServiceName("api", nil),
|
||||
downstreams: true,
|
||||
expect: expect{
|
||||
idx: 10,
|
||||
services: structs.ServiceList{
|
||||
{
|
||||
Name: "ingress-gateway",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
{
|
||||
Name: "mysql",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "(downstream) acl deny all intentions allow one",
|
||||
defaultDecision: acl.Deny,
|
||||
intentions: []structs.ServiceIntentionsConfigEntry{
|
||||
{
|
||||
Kind: structs.ServiceIntentions,
|
||||
Name: "api",
|
||||
Sources: []*structs.SourceIntention{
|
||||
{
|
||||
Name: "web",
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
target: structs.NewServiceName("api", nil),
|
||||
downstreams: true,
|
||||
expect: expect{
|
||||
idx: 10,
|
||||
services: structs.ServiceList{
|
||||
{
|
||||
Name: "web",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "acl deny but intention allow all overrides it",
|
||||
defaultDecision: acl.Deny,
|
||||
intentions: []structs.ServiceIntentionsConfigEntry{
|
||||
{
|
||||
Kind: structs.ServiceIntentions,
|
||||
Name: "*",
|
||||
Sources: []*structs.SourceIntention{
|
||||
{
|
||||
Name: "*",
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
target: structs.NewServiceName("web", nil),
|
||||
downstreams: false,
|
||||
expect: expect{
|
||||
idx: 10,
|
||||
services: structs.ServiceList{
|
||||
{
|
||||
Name: "api",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
{
|
||||
Name: "mysql",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "acl allow but intention deny all overrides it",
|
||||
defaultDecision: acl.Allow,
|
||||
intentions: []structs.ServiceIntentionsConfigEntry{
|
||||
{
|
||||
Kind: structs.ServiceIntentions,
|
||||
Name: "*",
|
||||
Sources: []*structs.SourceIntention{
|
||||
{
|
||||
Name: "*",
|
||||
Action: structs.IntentionActionDeny,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
target: structs.NewServiceName("web", nil),
|
||||
downstreams: false,
|
||||
expect: expect{
|
||||
idx: 10,
|
||||
services: structs.ServiceList{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "acl deny but intention allow all overrides it",
|
||||
defaultDecision: acl.Deny,
|
||||
intentions: []structs.ServiceIntentionsConfigEntry{
|
||||
{
|
||||
Kind: structs.ServiceIntentions,
|
||||
Name: "*",
|
||||
Sources: []*structs.SourceIntention{
|
||||
{
|
||||
Name: "*",
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
target: structs.NewServiceName("web", nil),
|
||||
downstreams: false,
|
||||
expect: expect{
|
||||
idx: 10,
|
||||
services: structs.ServiceList{
|
||||
{
|
||||
Name: "api",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
{
|
||||
Name: "mysql",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := testConfigStateStore(t)
|
||||
|
||||
var idx uint64 = 1
|
||||
require.NoError(t, s.EnsureNode(idx, &node))
|
||||
idx++
|
||||
|
||||
for _, svc := range services {
|
||||
require.NoError(t, s.EnsureService(idx, "foo", &svc))
|
||||
idx++
|
||||
}
|
||||
for _, ixn := range tt.intentions {
|
||||
require.NoError(t, s.EnsureConfigEntry(idx, &ixn))
|
||||
idx++
|
||||
}
|
||||
|
||||
idx, got, err := s.IntentionTopology(nil, tt.target, tt.downstreams, tt.defaultDecision)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expect.idx, idx)
|
||||
|
||||
// ServiceList is from a map, so it is not deterministically sorted
|
||||
sort.Slice(got, func(i, j int) bool {
|
||||
return got[i].String() < got[j].String()
|
||||
})
|
||||
require.Equal(t, tt.expect.services, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_IntentionTopology_Watches(t *testing.T) {
|
||||
s := testConfigStateStore(t)
|
||||
|
||||
var i uint64 = 1
|
||||
require.NoError(t, s.EnsureNode(i, &structs.Node{
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
}))
|
||||
i++
|
||||
|
||||
target := structs.NewServiceName("web", structs.DefaultEnterpriseMeta())
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
index, got, err := s.IntentionTopology(ws, target, false, acl.Deny)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), index)
|
||||
require.Empty(t, got)
|
||||
|
||||
// Watch should fire after adding a relevant config entry
|
||||
require.NoError(t, s.EnsureConfigEntry(i, &structs.ServiceIntentionsConfigEntry{
|
||||
Kind: structs.ServiceIntentions,
|
||||
Name: "api",
|
||||
Sources: []*structs.SourceIntention{
|
||||
{
|
||||
Name: "web",
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
},
|
||||
}))
|
||||
i++
|
||||
|
||||
require.True(t, watchFired(ws))
|
||||
|
||||
// Reset the WatchSet
|
||||
ws = memdb.NewWatchSet()
|
||||
index, got, err = s.IntentionTopology(ws, target, false, acl.Deny)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(2), index)
|
||||
require.Empty(t, got)
|
||||
|
||||
// Watch should not fire after unrelated intention changes
|
||||
require.NoError(t, s.EnsureConfigEntry(i, &structs.ServiceIntentionsConfigEntry{
|
||||
Kind: structs.ServiceIntentions,
|
||||
Name: "another service",
|
||||
Sources: []*structs.SourceIntention{
|
||||
{
|
||||
Name: "any other service",
|
||||
Action: structs.IntentionActionAllow,
|
||||
},
|
||||
},
|
||||
}))
|
||||
i++
|
||||
|
||||
// TODO(freddy) Why is this firing?
|
||||
// require.False(t, watchFired(ws))
|
||||
|
||||
// Result should not have changed
|
||||
index, got, err = s.IntentionTopology(ws, target, false, acl.Deny)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(3), index)
|
||||
require.Empty(t, got)
|
||||
|
||||
// Watch should fire after service list changes
|
||||
require.NoError(t, s.EnsureService(i, "foo", &structs.NodeService{
|
||||
ID: "api-1",
|
||||
Service: "api",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
}))
|
||||
|
||||
require.True(t, watchFired(ws))
|
||||
|
||||
// Reset the WatchSet
|
||||
index, got, err = s.IntentionTopology(nil, target, false, acl.Deny)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(4), index)
|
||||
|
||||
expect := structs.ServiceList{
|
||||
{
|
||||
Name: "api",
|
||||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
},
|
||||
}
|
||||
require.Equal(t, expect, got)
|
||||
}
|
||||
|
@ -202,9 +202,7 @@ func processDBChanges(tx ReadTxn, changes Changes) ([]stream.Event, error) {
|
||||
|
||||
func newSnapshotHandlers(db ReadDB) stream.SnapshotHandlers {
|
||||
return stream.SnapshotHandlers{
|
||||
topicServiceHealth: serviceHealthSnapshot(db, topicServiceHealth),
|
||||
// The connect topic is temporarily disabled until the correct events are
|
||||
// created for terminating gateway changes.
|
||||
//topicServiceHealthConnect: serviceHealthSnapshot(db, topicServiceHealthConnect),
|
||||
topicServiceHealth: serviceHealthSnapshot(db, topicServiceHealth),
|
||||
topicServiceHealthConnect: serviceHealthSnapshot(db, topicServiceHealthConnect),
|
||||
}
|
||||
}
|
||||
|
@ -3,8 +3,9 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func firstWithTxn(tx ReadTxn,
|
||||
@ -19,11 +20,6 @@ func firstWatchWithTxn(tx ReadTxn,
|
||||
return tx.FirstWatch(table, index, idxVal)
|
||||
}
|
||||
|
||||
func firstWatchCompoundWithTxn(tx ReadTxn,
|
||||
table, index string, _ *structs.EnterpriseMeta, idxVals ...interface{}) (<-chan struct{}, interface{}, error) {
|
||||
return tx.FirstWatch(table, index, idxVals...)
|
||||
}
|
||||
|
||||
func getWithTxn(tx ReadTxn,
|
||||
table, index, idxVal string, entMeta *structs.EnterpriseMeta) (memdb.ResultIterator, error) {
|
||||
|
||||
|
68
agent/consul/state/query.go
Normal file
68
agent/consul/state/query.go
Normal file
@ -0,0 +1,68 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
// Query is a type used to query any single value index that may include an
|
||||
// enterprise identifier.
|
||||
type Query struct {
|
||||
Value string
|
||||
structs.EnterpriseMeta
|
||||
}
|
||||
|
||||
// NamespaceOrDefault exists because structs.EnterpriseMeta uses a pointer
|
||||
// receiver for this method. Remove once that is fixed.
|
||||
func (q Query) NamespaceOrDefault() string {
|
||||
return q.EnterpriseMeta.NamespaceOrDefault()
|
||||
}
|
||||
|
||||
// indexFromQuery builds an index key where Query.Value is lowercase, and is
|
||||
// a required value.
|
||||
func indexFromQuery(arg interface{}) ([]byte, error) {
|
||||
q, ok := arg.(Query)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected type %T for Query index", arg)
|
||||
}
|
||||
|
||||
var b indexBuilder
|
||||
b.String(strings.ToLower(q.Value))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
// uuidStringToBytes is a modified version of memdb.UUIDFieldIndex.parseString
|
||||
func uuidStringToBytes(uuid string) ([]byte, error) {
|
||||
l := len(uuid)
|
||||
if l != 36 {
|
||||
return nil, fmt.Errorf("UUID must be 36 characters")
|
||||
}
|
||||
|
||||
hyphens := strings.Count(uuid, "-")
|
||||
if hyphens > 4 {
|
||||
return nil, fmt.Errorf(`UUID should have maximum of 4 "-"; got %d`, hyphens)
|
||||
}
|
||||
|
||||
// The sanitized length is the length of the original string without the "-".
|
||||
sanitized := strings.Replace(uuid, "-", "", -1)
|
||||
sanitizedLength := len(sanitized)
|
||||
if sanitizedLength%2 != 0 {
|
||||
return nil, fmt.Errorf("UUID (without hyphens) must be even length")
|
||||
}
|
||||
|
||||
dec, err := hex.DecodeString(sanitized)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid UUID: %w", err)
|
||||
}
|
||||
return dec, nil
|
||||
}
|
||||
|
||||
// BoolQuery is a type used to query a boolean condition that may include an
|
||||
// enterprise identifier.
|
||||
type BoolQuery struct {
|
||||
Value bool
|
||||
structs.EnterpriseMeta
|
||||
}
|
25
agent/consul/state/query_oss.go
Normal file
25
agent/consul/state/query_oss.go
Normal file
@ -0,0 +1,25 @@
|
||||
// +build !consulent
|
||||
|
||||
package state
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
||||
func prefixIndexFromQuery(arg interface{}) ([]byte, error) {
|
||||
var b indexBuilder
|
||||
switch v := arg.(type) {
|
||||
case *structs.EnterpriseMeta:
|
||||
return nil, nil
|
||||
case structs.EnterpriseMeta:
|
||||
return nil, nil
|
||||
case Query:
|
||||
b.String(strings.ToLower(v.Value))
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unexpected type %T for Query prefix index", arg)
|
||||
}
|
@ -2,4 +2,5 @@
|
||||
|
||||
package state
|
||||
|
||||
var stateStoreSchemaExpected = "TestStateStoreSchema.golden"
|
||||
func addEnterpriseIndexerTestCases(testcases map[string]func() map[string]indexerTestCase) {
|
||||
}
|
||||
|
@ -1,100 +1,95 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/go-memdb"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/internal/testing/golden"
|
||||
)
|
||||
|
||||
func TestStateStoreSchema(t *testing.T) {
|
||||
type indexerTestCase struct {
|
||||
read indexValue
|
||||
write indexValue
|
||||
prefix []indexValue
|
||||
writeMulti indexValueMulti
|
||||
}
|
||||
|
||||
type indexValue struct {
|
||||
source interface{}
|
||||
expected []byte
|
||||
}
|
||||
|
||||
type indexValueMulti struct {
|
||||
source interface{}
|
||||
expected [][]byte
|
||||
}
|
||||
|
||||
func TestNewDBSchema_Indexers(t *testing.T) {
|
||||
schema := newDBSchema()
|
||||
require.NoError(t, schema.Validate())
|
||||
|
||||
_, err := memdb.NewMemDB(schema)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := repr(schema)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := golden.Get(t, actual, stateStoreSchemaExpected)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func repr(schema *memdb.DBSchema) (string, error) {
|
||||
tables := make([]string, 0, len(schema.Tables))
|
||||
for name := range schema.Tables {
|
||||
tables = append(tables, name)
|
||||
var testcases = map[string]func() map[string]indexerTestCase{
|
||||
tableACLPolicies: testIndexerTableACLPolicies,
|
||||
tableACLRoles: testIndexerTableACLRoles,
|
||||
tableChecks: testIndexerTableChecks,
|
||||
tableServices: testIndexerTableServices,
|
||||
tableNodes: testIndexerTableNodes,
|
||||
tableConfigEntries: testIndexerTableConfigEntries,
|
||||
tableMeshTopology: testIndexerTableMeshTopology,
|
||||
tableGatewayServices: testIndexerTableGatewayServices,
|
||||
}
|
||||
sort.Strings(tables)
|
||||
addEnterpriseIndexerTestCases(testcases)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
for _, name := range tables {
|
||||
fmt.Fprintf(buf, "table=%v\n", name)
|
||||
|
||||
indexes := indexNames(schema.Tables[name])
|
||||
for _, i := range indexes {
|
||||
index := schema.Tables[name].Indexes[i]
|
||||
fmt.Fprintf(buf, " index=%v", i)
|
||||
if index.Unique {
|
||||
buf.WriteString(" unique")
|
||||
}
|
||||
if index.AllowMissing {
|
||||
buf.WriteString(" allow-missing")
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
buf.WriteString(" indexer=")
|
||||
formatIndexer(buf, index.Indexer)
|
||||
buf.WriteString("\n")
|
||||
for _, table := range schema.Tables {
|
||||
if testcases[table.Name] == nil {
|
||||
continue
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
t.Run(table.Name, func(t *testing.T) {
|
||||
tableTCs := testcases[table.Name]()
|
||||
|
||||
return buf.String(), nil
|
||||
}
|
||||
for _, index := range table.Indexes {
|
||||
t.Run(index.Name, func(t *testing.T) {
|
||||
indexer := index.Indexer
|
||||
tc, ok := tableTCs[index.Name]
|
||||
if !ok {
|
||||
t.Skip("TODO: missing test case")
|
||||
}
|
||||
|
||||
func formatIndexer(buf *bytes.Buffer, indexer memdb.Indexer) {
|
||||
v := reflect.Indirect(reflect.ValueOf(indexer))
|
||||
typ := v.Type()
|
||||
buf.WriteString(typ.PkgPath() + "." + typ.Name())
|
||||
for i := 0; i < typ.NumField(); i++ {
|
||||
fmt.Fprintf(buf, " %v=", typ.Field(i).Name)
|
||||
args := []interface{}{tc.read.source}
|
||||
if s, ok := tc.read.source.([]interface{}); ok {
|
||||
// Indexes using memdb.CompoundIndex must be expanded to multiple args
|
||||
args = s
|
||||
}
|
||||
|
||||
field := v.Field(i)
|
||||
switch typ.Field(i).Type.Kind() {
|
||||
case reflect.Slice:
|
||||
buf.WriteString("[")
|
||||
for j := 0; j < field.Len(); j++ {
|
||||
if j != 0 {
|
||||
buf.WriteString(", ")
|
||||
}
|
||||
// TODO: handle other types of slices
|
||||
formatIndexer(buf, v.Field(i).Index(j).Interface().(memdb.Indexer))
|
||||
actual, err := indexer.FromArgs(args...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.read.expected, actual)
|
||||
|
||||
if i, ok := indexer.(memdb.SingleIndexer); ok {
|
||||
valid, actual, err := i.FromObject(tc.write.source)
|
||||
require.NoError(t, err)
|
||||
require.True(t, valid)
|
||||
require.Equal(t, tc.write.expected, actual)
|
||||
}
|
||||
|
||||
if i, ok := indexer.(memdb.PrefixIndexer); ok {
|
||||
for _, c := range tc.prefix {
|
||||
t.Run("", func(t *testing.T) {
|
||||
actual, err := i.PrefixFromArgs(c.source)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if i, ok := indexer.(memdb.MultiIndexer); ok {
|
||||
valid, actual, err := i.FromObject(tc.writeMulti.source)
|
||||
require.NoError(t, err)
|
||||
require.True(t, valid)
|
||||
require.Equal(t, tc.writeMulti.expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
buf.WriteString("]")
|
||||
case reflect.Func:
|
||||
// Functions are printed as pointer addresses, which change frequently.
|
||||
// Instead use the name.
|
||||
buf.WriteString(runtime.FuncForPC(field.Pointer()).Name())
|
||||
default:
|
||||
fmt.Fprintf(buf, "%v", field)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func indexNames(table *memdb.TableSchema) []string {
|
||||
indexes := make([]string, 0, len(table.Indexes))
|
||||
for name := range table.Indexes {
|
||||
indexes = append(indexes, name)
|
||||
}
|
||||
|
||||
sort.Strings(indexes)
|
||||
return indexes
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ func sessionCreateTxn(tx *txn, idx uint64, sess *structs.Session) error {
|
||||
sess.ModifyIndex = idx
|
||||
|
||||
// Check that the node exists
|
||||
node, err := tx.First("nodes", "id", sess.Node)
|
||||
node, err := tx.First(tableNodes, indexID, Query{Value: sess.Node})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed node lookup: %s", err)
|
||||
}
|
||||
|
@ -5,9 +5,10 @@ package state
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/go-memdb"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
)
|
||||
|
||||
func sessionIndexer() *memdb.UUIDFieldIndex {
|
||||
@ -107,7 +108,7 @@ func sessionMaxIndex(tx ReadTxn, entMeta *structs.EnterpriseMeta) uint64 {
|
||||
func validateSessionChecksTxn(tx *txn, session *structs.Session) error {
|
||||
// Go over the session checks and ensure they exist.
|
||||
for _, checkID := range session.CheckIDs() {
|
||||
check, err := tx.First("checks", "id", session.Node, string(checkID))
|
||||
check, err := tx.First(tableChecks, indexID, NodeCheckQuery{Node: session.Node, CheckID: string(checkID)})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed check lookup: %s", err)
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ func testRegisterNodeWithMeta(t *testing.T, s *Store, idx uint64, nodeID string,
|
||||
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
n, err := tx.First("nodes", "id", nodeID)
|
||||
n, err := tx.First(tableNodes, indexID, Query{Value: nodeID})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -105,7 +105,7 @@ func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, s
|
||||
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
_, service, err := firstWatchCompoundWithTxn(tx, "services", "id", nil, nodeID, serviceID)
|
||||
service, err := tx.First(tableServices, indexID, NodeServiceQuery{Node: nodeID, Service: serviceID})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -138,7 +138,7 @@ func testRegisterIngressService(t *testing.T, s *Store, idx uint64, nodeID, serv
|
||||
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
_, service, err := firstWatchCompoundWithTxn(tx, "services", "id", nil, nodeID, serviceID)
|
||||
service, err := tx.First(tableServices, indexID, NodeServiceQuery{Node: nodeID, Service: serviceID})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -163,7 +163,7 @@ func testRegisterCheck(t *testing.T, s *Store, idx uint64,
|
||||
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
_, c, err := firstWatchCompoundWithTxn(tx, "checks", "id", nil, nodeID, string(checkID))
|
||||
c, err := tx.First(tableChecks, indexID, NodeCheckQuery{Node: nodeID, CheckID: string(checkID)})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
@ -283,7 +283,7 @@ func TestStateStore_maxIndex(t *testing.T) {
|
||||
testRegisterNode(t, s, 1, "bar")
|
||||
testRegisterService(t, s, 2, "foo", "consul")
|
||||
|
||||
if max := s.maxIndex("nodes", "services"); max != 2 {
|
||||
if max := s.maxIndex("nodes", tableServices); max != 2 {
|
||||
t.Fatalf("bad max: %d", max)
|
||||
}
|
||||
}
|
||||
|
@ -1,188 +0,0 @@
|
||||
table=acl-auth-methods
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Name Lowercase=true
|
||||
|
||||
table=acl-binding-rules
|
||||
index=authmethod
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=AuthMethod Lowercase=true
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.UUIDFieldIndex Field=ID
|
||||
|
||||
table=acl-policies
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.UUIDFieldIndex Field=ID
|
||||
index=name unique
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Name Lowercase=true
|
||||
|
||||
table=acl-roles
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.UUIDFieldIndex Field=ID
|
||||
index=name unique
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Name Lowercase=true
|
||||
index=policies allow-missing
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.RolePoliciesIndex
|
||||
|
||||
table=acl-tokens
|
||||
index=accessor unique allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.UUIDFieldIndex Field=AccessorID
|
||||
index=authmethod allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=AuthMethod Lowercase=false
|
||||
index=expires-global allow-missing
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.TokenExpirationIndex LocalFilter=false
|
||||
index=expires-local allow-missing
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.TokenExpirationIndex LocalFilter=true
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=SecretID Lowercase=false
|
||||
index=local
|
||||
indexer=github.com/hashicorp/go-memdb.ConditionalIndex Conditional=github.com/hashicorp/consul/agent/consul/state.tokensTableSchema.func1
|
||||
index=needs-upgrade
|
||||
indexer=github.com/hashicorp/go-memdb.ConditionalIndex Conditional=github.com/hashicorp/consul/agent/consul/state.tokensTableSchema.func2
|
||||
index=policies allow-missing
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.TokenPoliciesIndex
|
||||
index=roles allow-missing
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.TokenRolesIndex
|
||||
|
||||
table=autopilot-config
|
||||
index=id unique allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.ConditionalIndex Conditional=github.com/hashicorp/consul/agent/consul/state.autopilotConfigTableSchema.func1
|
||||
|
||||
table=checks
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=CheckID Lowercase=true] AllowMissing=false
|
||||
index=node allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true
|
||||
index=node_service allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=ServiceID Lowercase=true] AllowMissing=false
|
||||
index=node_service_check allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true, github.com/hashicorp/go-memdb.FieldSetIndex Field=ServiceID] AllowMissing=false
|
||||
index=service allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=ServiceName Lowercase=true
|
||||
index=status
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Status Lowercase=false
|
||||
|
||||
table=config-entries
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Kind Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=Name Lowercase=true] AllowMissing=false
|
||||
index=intention-legacy-id unique allow-missing
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.ServiceIntentionLegacyIDIndex uuidFieldIndex={}
|
||||
index=intention-source allow-missing
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.ServiceIntentionSourceIndex
|
||||
index=kind
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Kind Lowercase=true
|
||||
index=link allow-missing
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.ConfigEntryLinkIndex
|
||||
|
||||
table=connect-ca-builtin
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=ID Lowercase=false
|
||||
|
||||
table=connect-ca-config
|
||||
index=id unique allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.ConditionalIndex Conditional=github.com/hashicorp/consul/agent/consul/state.caConfigTableSchema.func1
|
||||
|
||||
table=connect-ca-roots
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=ID Lowercase=false
|
||||
|
||||
table=connect-intentions
|
||||
index=destination allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=DestinationNS Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=DestinationName Lowercase=true] AllowMissing=false
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.UUIDFieldIndex Field=ID
|
||||
index=source allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=SourceNS Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=SourceName Lowercase=true] AllowMissing=false
|
||||
index=source_destination unique allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=SourceNS Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=SourceName Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=DestinationNS Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=DestinationName Lowercase=true] AllowMissing=false
|
||||
|
||||
table=coordinates
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=Segment Lowercase=true] AllowMissing=true
|
||||
index=node
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true
|
||||
|
||||
table=federation-states
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Datacenter Lowercase=true
|
||||
|
||||
table=gateway-services
|
||||
index=gateway
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.ServiceNameIndex Field=Gateway
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/consul/agent/consul/state.ServiceNameIndex Field=Gateway, github.com/hashicorp/consul/agent/consul/state.ServiceNameIndex Field=Service, github.com/hashicorp/go-memdb.IntFieldIndex Field=Port] AllowMissing=false
|
||||
index=service allow-missing
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.ServiceNameIndex Field=Service
|
||||
|
||||
table=index
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Key Lowercase=true
|
||||
|
||||
table=kvs
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Key Lowercase=false
|
||||
index=session allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.UUIDFieldIndex Field=Session
|
||||
|
||||
table=mesh-topology
|
||||
index=downstream
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.ServiceNameIndex Field=Downstream
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/consul/agent/consul/state.ServiceNameIndex Field=Upstream, github.com/hashicorp/consul/agent/consul/state.ServiceNameIndex Field=Downstream] AllowMissing=false
|
||||
index=upstream allow-missing
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.ServiceNameIndex Field=Upstream
|
||||
|
||||
table=nodes
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true
|
||||
index=meta allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.StringMapFieldIndex Field=Meta Lowercase=false
|
||||
index=uuid unique allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.UUIDFieldIndex Field=ID
|
||||
|
||||
table=prepared-queries
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.UUIDFieldIndex Field=ID
|
||||
index=name unique allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Name Lowercase=true
|
||||
index=session allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.UUIDFieldIndex Field=Session
|
||||
index=template unique allow-missing
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.PreparedQueryIndex
|
||||
|
||||
table=services
|
||||
index=connect allow-missing
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.IndexConnectService
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true, github.com/hashicorp/go-memdb.StringFieldIndex Field=ServiceID Lowercase=true] AllowMissing=false
|
||||
index=kind
|
||||
indexer=github.com/hashicorp/consul/agent/consul/state.IndexServiceKind
|
||||
index=node
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true
|
||||
index=service allow-missing
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=ServiceName Lowercase=true
|
||||
|
||||
table=session_checks
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true, github.com/hashicorp/consul/agent/consul/state.CheckIDIndex, github.com/hashicorp/go-memdb.UUIDFieldIndex Field=Session] AllowMissing=false
|
||||
index=node_check
|
||||
indexer=github.com/hashicorp/go-memdb.CompoundIndex Indexes=[github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true, github.com/hashicorp/consul/agent/consul/state.CheckIDIndex] AllowMissing=false
|
||||
index=session
|
||||
indexer=github.com/hashicorp/go-memdb.UUIDFieldIndex Field=Session
|
||||
|
||||
table=sessions
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.UUIDFieldIndex Field=ID
|
||||
index=node
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Node Lowercase=true
|
||||
|
||||
table=system-metadata
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Key Lowercase=true
|
||||
|
||||
table=tombstones
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=Key Lowercase=false
|
||||
|
||||
table=usage
|
||||
index=id unique
|
||||
indexer=github.com/hashicorp/go-memdb.StringFieldIndex Field=ID Lowercase=true
|
||||
|
@ -70,7 +70,7 @@ func updateUsage(tx WriteTxn, changes Changes) error {
|
||||
switch change.Table {
|
||||
case "nodes":
|
||||
usageDeltas[change.Table] += delta
|
||||
case "services":
|
||||
case tableServices:
|
||||
svc := changeObject(change).(*structs.ServiceNode)
|
||||
usageDeltas[change.Table] += delta
|
||||
addEnterpriseServiceInstanceUsage(usageDeltas, change)
|
||||
@ -107,7 +107,8 @@ func updateUsage(tx WriteTxn, changes Changes) error {
|
||||
func updateServiceNameUsage(tx WriteTxn, usageDeltas map[string]int, serviceNameChanges map[structs.ServiceName]int) (map[structs.ServiceName]uniqueServiceState, error) {
|
||||
serviceStates := make(map[structs.ServiceName]uniqueServiceState, len(serviceNameChanges))
|
||||
for svc, delta := range serviceNameChanges {
|
||||
serviceIter, err := getWithTxn(tx, tableServices, "service", svc.Name, &svc.EnterpriseMeta)
|
||||
q := Query{Value: svc.Name, EnterpriseMeta: svc.EnterpriseMeta}
|
||||
serviceIter, err := tx.Get(tableServices, indexService, q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
462
agent/consul/subscribe_backend_test.go
Normal file
462
agent/consul/subscribe_backend_test.go
Normal file
@ -0,0 +1,462 @@
|
||||
package consul
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
gogrpc "google.golang.org/grpc"
|
||||
grpcresolver "google.golang.org/grpc/resolver"
|
||||
|
||||
grpc "github.com/hashicorp/consul/agent/grpc"
|
||||
"github.com/hashicorp/consul/agent/grpc/resolver"
|
||||
"github.com/hashicorp/consul/agent/router"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/proto/pbservice"
|
||||
"github.com/hashicorp/consul/proto/pbsubscribe"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func TestSubscribeBackend_IntegrationWithServer_TLSEnabled(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, conf1 := testServerConfig(t)
|
||||
conf1.VerifyIncoming = true
|
||||
conf1.VerifyOutgoing = true
|
||||
conf1.RPCConfig.EnableStreaming = true
|
||||
configureTLS(conf1)
|
||||
server, err := newServer(t, conf1)
|
||||
require.NoError(t, err)
|
||||
defer server.Shutdown()
|
||||
|
||||
client, builder := newClientWithGRPCResolver(t, configureTLS, clientConfigVerifyOutgoing)
|
||||
|
||||
// Try to join
|
||||
testrpc.WaitForLeader(t, server.RPC, "dc1")
|
||||
joinLAN(t, client, server)
|
||||
testrpc.WaitForTestAgent(t, client.RPC, "dc1")
|
||||
|
||||
// Register a dummy node with our service on it.
|
||||
{
|
||||
req := &structs.RegisterRequest{
|
||||
Node: "node1",
|
||||
Address: "3.4.5.6",
|
||||
Datacenter: "dc1",
|
||||
Service: &structs.NodeService{
|
||||
ID: "redis1",
|
||||
Service: "redis",
|
||||
Address: "3.4.5.6",
|
||||
Port: 8080,
|
||||
},
|
||||
}
|
||||
var out struct{}
|
||||
require.NoError(t, server.RPC("Catalog.Register", &req, &out))
|
||||
}
|
||||
|
||||
// Start a Subscribe call to our streaming endpoint from the client.
|
||||
{
|
||||
pool := grpc.NewClientConnPool(builder, grpc.TLSWrapper(client.tlsConfigurator.OutgoingRPCWrapper()), client.tlsConfigurator.UseTLS)
|
||||
conn, err := pool.ClientConn("dc1")
|
||||
require.NoError(t, err)
|
||||
|
||||
streamClient := pbsubscribe.NewStateChangeSubscriptionClient(conn)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
req := &pbsubscribe.SubscribeRequest{Topic: pbsubscribe.Topic_ServiceHealth, Key: "redis"}
|
||||
streamHandle, err := streamClient.Subscribe(ctx, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start a goroutine to read updates off the pbsubscribe.
|
||||
eventCh := make(chan *pbsubscribe.Event, 0)
|
||||
go receiveSubscribeEvents(t, eventCh, streamHandle)
|
||||
|
||||
var snapshotEvents []*pbsubscribe.Event
|
||||
for i := 0; i < 2; i++ {
|
||||
select {
|
||||
case event := <-eventCh:
|
||||
snapshotEvents = append(snapshotEvents, event)
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatalf("did not receive events past %d", len(snapshotEvents))
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure the snapshot events come back with no issues.
|
||||
require.Len(t, snapshotEvents, 2)
|
||||
}
|
||||
|
||||
// Start a Subscribe call to our streaming endpoint from the server's loopback client.
|
||||
{
|
||||
|
||||
pool := grpc.NewClientConnPool(builder, grpc.TLSWrapper(client.tlsConfigurator.OutgoingRPCWrapper()), client.tlsConfigurator.UseTLS)
|
||||
conn, err := pool.ClientConn("dc1")
|
||||
require.NoError(t, err)
|
||||
|
||||
retryFailedConn(t, conn)
|
||||
|
||||
streamClient := pbsubscribe.NewStateChangeSubscriptionClient(conn)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
req := &pbsubscribe.SubscribeRequest{Topic: pbsubscribe.Topic_ServiceHealth, Key: "redis"}
|
||||
streamHandle, err := streamClient.Subscribe(ctx, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start a goroutine to read updates off the pbsubscribe.
|
||||
eventCh := make(chan *pbsubscribe.Event, 0)
|
||||
go receiveSubscribeEvents(t, eventCh, streamHandle)
|
||||
|
||||
var snapshotEvents []*pbsubscribe.Event
|
||||
for i := 0; i < 2; i++ {
|
||||
select {
|
||||
case event := <-eventCh:
|
||||
snapshotEvents = append(snapshotEvents, event)
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatalf("did not receive events past %d", len(snapshotEvents))
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure the snapshot events come back with no issues.
|
||||
require.Len(t, snapshotEvents, 2)
|
||||
}
|
||||
}
|
||||
|
||||
// receiveSubscribeEvents and send them to the channel.
|
||||
func receiveSubscribeEvents(t *testing.T, ch chan *pbsubscribe.Event, handle pbsubscribe.StateChangeSubscription_SubscribeClient) {
|
||||
for {
|
||||
event, err := handle.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "context deadline exceeded") ||
|
||||
strings.Contains(err.Error(), "context canceled") {
|
||||
break
|
||||
}
|
||||
t.Log(err)
|
||||
}
|
||||
ch <- event
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubscribeBackend_IntegrationWithServer_TLSReload(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Set up a server with initially bad certificates.
|
||||
_, conf1 := testServerConfig(t)
|
||||
conf1.VerifyIncoming = true
|
||||
conf1.VerifyOutgoing = true
|
||||
conf1.CAFile = "../../test/ca/root.cer"
|
||||
conf1.CertFile = "../../test/key/ssl-cert-snakeoil.pem"
|
||||
conf1.KeyFile = "../../test/key/ssl-cert-snakeoil.key"
|
||||
conf1.RPCConfig.EnableStreaming = true
|
||||
|
||||
server, err := newServer(t, conf1)
|
||||
require.NoError(t, err)
|
||||
defer server.Shutdown()
|
||||
|
||||
// Set up a client with valid certs and verify_outgoing = true
|
||||
client, builder := newClientWithGRPCResolver(t, configureTLS, clientConfigVerifyOutgoing)
|
||||
|
||||
testrpc.WaitForLeader(t, server.RPC, "dc1")
|
||||
|
||||
// Subscribe calls should fail initially
|
||||
joinLAN(t, client, server)
|
||||
|
||||
pool := grpc.NewClientConnPool(builder, grpc.TLSWrapper(client.tlsConfigurator.OutgoingRPCWrapper()), client.tlsConfigurator.UseTLS)
|
||||
conn, err := pool.ClientConn("dc1")
|
||||
require.NoError(t, err)
|
||||
|
||||
streamClient := pbsubscribe.NewStateChangeSubscriptionClient(conn)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
req := &pbsubscribe.SubscribeRequest{Topic: pbsubscribe.Topic_ServiceHealth, Key: "redis"}
|
||||
_, err = streamClient.Subscribe(ctx, req)
|
||||
require.Error(t, err)
|
||||
|
||||
// Reload the server with valid certs
|
||||
newConf := server.config.ToTLSUtilConfig()
|
||||
newConf.CertFile = "../../test/key/ourdomain.cer"
|
||||
newConf.KeyFile = "../../test/key/ourdomain.key"
|
||||
server.tlsConfigurator.Update(newConf)
|
||||
|
||||
// Try the subscribe call again
|
||||
retryFailedConn(t, conn)
|
||||
|
||||
streamClient = pbsubscribe.NewStateChangeSubscriptionClient(conn)
|
||||
_, err = streamClient.Subscribe(ctx, req)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func clientConfigVerifyOutgoing(config *Config) {
|
||||
config.VerifyOutgoing = true
|
||||
}
|
||||
|
||||
// retryFailedConn forces the ClientConn to reset its backoff timer and retry the connection,
|
||||
// to simulate the client eventually retrying after the initial failure. This is used both to simulate
|
||||
// retrying after an expected failure as well as to avoid flakiness when running many tests in parallel.
|
||||
func retryFailedConn(t *testing.T, conn *gogrpc.ClientConn) {
|
||||
state := conn.GetState()
|
||||
if state.String() != "TRANSIENT_FAILURE" {
|
||||
return
|
||||
}
|
||||
|
||||
// If the connection has failed, retry and wait for a state change.
|
||||
conn.ResetConnectBackoff()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
require.True(t, conn.WaitForStateChange(ctx, state))
|
||||
}
|
||||
|
||||
func TestSubscribeBackend_IntegrationWithServer_DeliversAllMessages(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for -short run")
|
||||
}
|
||||
// This is a fuzz/probabilistic test to try to provoke streaming into dropping
|
||||
// messages. There is a bug in the initial implementation that should make
|
||||
// this fail. While we can't be certain a pass means it's correct, it is
|
||||
// useful for finding bugs in our concurrency design.
|
||||
|
||||
// The issue is that when updates are coming in fast such that updates occur
|
||||
// in between us making the snapshot and beginning the stream updates, we
|
||||
// shouldn't miss anything.
|
||||
|
||||
// To test this, we will run a background goroutine that will write updates as
|
||||
// fast as possible while we then try to stream the results and ensure that we
|
||||
// see every change. We'll make the updates monotonically increasing so we can
|
||||
// easily tell if we missed one.
|
||||
|
||||
_, server := testServerWithConfig(t, func(c *Config) {
|
||||
c.Datacenter = "dc1"
|
||||
c.Bootstrap = true
|
||||
c.RPCConfig.EnableStreaming = true
|
||||
})
|
||||
defer server.Shutdown()
|
||||
codec := rpcClient(t, server)
|
||||
defer codec.Close()
|
||||
|
||||
client, builder := newClientWithGRPCResolver(t)
|
||||
|
||||
// Try to join
|
||||
testrpc.WaitForLeader(t, server.RPC, "dc1")
|
||||
joinLAN(t, client, server)
|
||||
testrpc.WaitForTestAgent(t, client.RPC, "dc1")
|
||||
|
||||
// Register a whole bunch of service instances so that the initial snapshot on
|
||||
// subscribe is big enough to take a bit of time to load giving more
|
||||
// opportunity for missed updates if there is a bug.
|
||||
for i := 0; i < 1000; i++ {
|
||||
req := &structs.RegisterRequest{
|
||||
Node: fmt.Sprintf("node-redis-%03d", i),
|
||||
Address: "3.4.5.6",
|
||||
Datacenter: "dc1",
|
||||
Service: &structs.NodeService{
|
||||
ID: fmt.Sprintf("redis-%03d", i),
|
||||
Service: "redis",
|
||||
Port: 11211,
|
||||
},
|
||||
}
|
||||
var out struct{}
|
||||
require.NoError(t, server.RPC("Catalog.Register", &req, &out))
|
||||
}
|
||||
|
||||
// Start background writer
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
go func() {
|
||||
// Update the registration with a monotonically increasing port as fast as
|
||||
// we can.
|
||||
req := &structs.RegisterRequest{
|
||||
Node: "node1",
|
||||
Address: "3.4.5.6",
|
||||
Datacenter: "dc1",
|
||||
Service: &structs.NodeService{
|
||||
ID: "redis-canary",
|
||||
Service: "redis",
|
||||
Port: 0,
|
||||
},
|
||||
}
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
var out struct{}
|
||||
require.NoError(t, server.RPC("Catalog.Register", &req, &out))
|
||||
req.Service.Port++
|
||||
if req.Service.Port > 100 {
|
||||
return
|
||||
}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
|
||||
pool := grpc.NewClientConnPool(builder, grpc.TLSWrapper(client.tlsConfigurator.OutgoingRPCWrapper()), client.tlsConfigurator.UseTLS)
|
||||
conn, err := pool.ClientConn("dc1")
|
||||
require.NoError(t, err)
|
||||
|
||||
streamClient := pbsubscribe.NewStateChangeSubscriptionClient(conn)
|
||||
|
||||
// Now start a whole bunch of streamers in parallel to maximise chance of
|
||||
// catching a race.
|
||||
n := 5
|
||||
var wg sync.WaitGroup
|
||||
var updateCount uint64
|
||||
// Buffered error chan so that workers can exit and terminate wg without
|
||||
// blocking on send. We collect errors this way since t isn't thread safe.
|
||||
errCh := make(chan error, n)
|
||||
for i := 0; i < n; i++ {
|
||||
i := i
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
verifyMonotonicStreamUpdates(ctx, t, streamClient, i, &updateCount, errCh)
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait until all subscribers have verified the first bunch of updates all got
|
||||
// delivered.
|
||||
wg.Wait()
|
||||
|
||||
close(errCh)
|
||||
|
||||
// Require that none of them errored. Since we closed the chan above this loop
|
||||
// should terminate immediately if no errors were buffered.
|
||||
for err := range errCh {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Sanity check that at least some non-snapshot messages were delivered. We
|
||||
// can't know exactly how many because it's timing dependent based on when
|
||||
// each subscribers snapshot occurs.
|
||||
require.True(t, atomic.LoadUint64(&updateCount) > 0,
|
||||
"at least some of the subscribers should have received non-snapshot updates")
|
||||
}
|
||||
|
||||
func newClientWithGRPCResolver(t *testing.T, ops ...func(*Config)) (*Client, *resolver.ServerResolverBuilder) {
|
||||
builder := resolver.NewServerResolverBuilder(resolver.Config{Scheme: t.Name()})
|
||||
registerWithGRPC(builder)
|
||||
|
||||
_, config := testClientConfig(t)
|
||||
for _, op := range ops {
|
||||
op(config)
|
||||
}
|
||||
|
||||
deps := newDefaultDeps(t, config)
|
||||
deps.Router = router.NewRouter(
|
||||
deps.Logger,
|
||||
config.Datacenter,
|
||||
fmt.Sprintf("%s.%s", config.NodeName, config.Datacenter),
|
||||
builder)
|
||||
|
||||
client, err := NewClient(config, deps)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
client.Shutdown()
|
||||
})
|
||||
return client, builder
|
||||
}
|
||||
|
||||
var grpcRegisterLock sync.Mutex
|
||||
|
||||
// registerWithGRPC registers the grpc/resolver.Builder as a grpc/resolver.
|
||||
// This function exists to synchronize registrations with a lock.
|
||||
// grpc/resolver.Register expects all registration to happen at init and does
|
||||
// not allow for concurrent registration. This function exists to support
|
||||
// parallel testing.
|
||||
func registerWithGRPC(b grpcresolver.Builder) {
|
||||
grpcRegisterLock.Lock()
|
||||
defer grpcRegisterLock.Unlock()
|
||||
grpcresolver.Register(b)
|
||||
}
|
||||
|
||||
type testLogger interface {
|
||||
Logf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
func verifyMonotonicStreamUpdates(ctx context.Context, logger testLogger, client pbsubscribe.StateChangeSubscriptionClient, i int, updateCount *uint64, errCh chan<- error) {
|
||||
req := &pbsubscribe.SubscribeRequest{Topic: pbsubscribe.Topic_ServiceHealth, Key: "redis"}
|
||||
streamHandle, err := client.Subscribe(ctx, req)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "context deadline exceeded") ||
|
||||
strings.Contains(err.Error(), "context canceled") {
|
||||
logger.Logf("subscriber %05d: context cancelled before loop")
|
||||
return
|
||||
}
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
snapshotDone := false
|
||||
expectPort := int32(0)
|
||||
for {
|
||||
event, err := streamHandle.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "context deadline exceeded") ||
|
||||
strings.Contains(err.Error(), "context canceled") {
|
||||
break
|
||||
}
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case event.GetEndOfSnapshot():
|
||||
snapshotDone = true
|
||||
logger.Logf("subscriber %05d: snapshot done, expect next port to be %d", i, expectPort)
|
||||
case snapshotDone:
|
||||
// Verify we get all updates in order
|
||||
svc, err := svcOrErr(event)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if expectPort != svc.Port {
|
||||
errCh <- fmt.Errorf("subscriber %05d: missed %d update(s)!", i, svc.Port-expectPort)
|
||||
return
|
||||
}
|
||||
atomic.AddUint64(updateCount, 1)
|
||||
logger.Logf("subscriber %05d: got event with correct port=%d", i, expectPort)
|
||||
expectPort++
|
||||
default:
|
||||
// This is a snapshot update. Check if it's an update for the canary
|
||||
// instance that got applied before our snapshot was sent (likely)
|
||||
svc, err := svcOrErr(event)
|
||||
if err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
if svc.ID == "redis-canary" {
|
||||
// Update the expected port we see in the next update to be one more
|
||||
// than the port in the snapshot.
|
||||
expectPort = svc.Port + 1
|
||||
logger.Logf("subscriber %05d: saw canary in snapshot with port %d", i, svc.Port)
|
||||
}
|
||||
}
|
||||
if expectPort > 100 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func svcOrErr(event *pbsubscribe.Event) (*pbservice.NodeService, error) {
|
||||
health := event.GetServiceHealth()
|
||||
if health == nil {
|
||||
return nil, fmt.Errorf("not a health event: %#v", event)
|
||||
}
|
||||
csn := health.CheckServiceNode
|
||||
if csn == nil {
|
||||
return nil, fmt.Errorf("nil CSN: %#v", event)
|
||||
}
|
||||
if csn.Service == nil {
|
||||
return nil, fmt.Errorf("nil service: %#v", event)
|
||||
}
|
||||
return csn.Service, nil
|
||||
}
|
@ -1171,12 +1171,16 @@ func (d *DNSServer) trimDNSResponse(cfg *dnsConfig, network string, req, resp *d
|
||||
|
||||
// lookupServiceNodes returns nodes with a given service.
|
||||
func (d *DNSServer) lookupServiceNodes(cfg *dnsConfig, lookup serviceLookup) (structs.IndexedCheckServiceNodes, error) {
|
||||
serviceTags := []string{}
|
||||
if lookup.Tag != "" {
|
||||
serviceTags = []string{lookup.Tag}
|
||||
}
|
||||
args := structs.ServiceSpecificRequest{
|
||||
Connect: lookup.Connect,
|
||||
Ingress: lookup.Ingress,
|
||||
Datacenter: lookup.Datacenter,
|
||||
ServiceName: lookup.Service,
|
||||
ServiceTags: []string{lookup.Tag},
|
||||
ServiceTags: serviceTags,
|
||||
TagFilter: lookup.Tag != "",
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: d.agent.tokens.UserToken(),
|
||||
|
@ -3016,72 +3016,95 @@ func TestDNS_CaseInsensitiveServiceLookup(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
a := NewTestAgent(t, "")
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||
|
||||
// Register a node with a service.
|
||||
{
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "Db",
|
||||
Tags: []string{"Primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
||||
var out struct{}
|
||||
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
config string
|
||||
}{
|
||||
// UDP + EDNS
|
||||
{"normal", ""},
|
||||
{"cache", `dns_config{ allow_stale=true, max_stale="3h", use_cache=true, "cache_max_age"="3h"}`},
|
||||
{"cache-with-streaming", `
|
||||
rpc{
|
||||
enable_streaming=true
|
||||
}
|
||||
use_streaming_backend=true
|
||||
dns_config{ allow_stale=true, max_stale="3h", use_cache=true, "cache_max_age"="3h"}
|
||||
`},
|
||||
}
|
||||
for _, tst := range tests {
|
||||
t.Run(fmt.Sprintf("A lookup %v", tst.name), func(t *testing.T) {
|
||||
a := NewTestAgent(t, tst.config)
|
||||
defer a.Shutdown()
|
||||
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||
|
||||
// Register an equivalent prepared query, as well as a name.
|
||||
var id string
|
||||
{
|
||||
args := &structs.PreparedQueryRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.PreparedQueryCreate,
|
||||
Query: &structs.PreparedQuery{
|
||||
Name: "somequery",
|
||||
Service: structs.ServiceQuery{
|
||||
Service: "db",
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := a.RPC("PreparedQuery.Apply", args, &id); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
// Register a node with a service.
|
||||
{
|
||||
args := &structs.RegisterRequest{
|
||||
Datacenter: "dc1",
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
Service: "Db",
|
||||
Tags: []string{"Primary"},
|
||||
Port: 12345,
|
||||
},
|
||||
}
|
||||
|
||||
// Try some variations to make sure case doesn't matter.
|
||||
questions := []string{
|
||||
"primary.db.service.consul.",
|
||||
"pRIMARY.dB.service.consul.",
|
||||
"PRIMARY.dB.service.consul.",
|
||||
"db.service.consul.",
|
||||
"DB.service.consul.",
|
||||
"Db.service.consul.",
|
||||
"somequery.query.consul.",
|
||||
"SomeQuery.query.consul.",
|
||||
"SOMEQUERY.query.consul.",
|
||||
}
|
||||
for _, question := range questions {
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion(question, dns.TypeSRV)
|
||||
var out struct{}
|
||||
if err := a.RPC("Catalog.Register", args, &out); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
c := new(dns.Client)
|
||||
in, _, err := c.Exchange(m, a.DNSAddr())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
// Register an equivalent prepared query, as well as a name.
|
||||
var id string
|
||||
{
|
||||
args := &structs.PreparedQueryRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.PreparedQueryCreate,
|
||||
Query: &structs.PreparedQuery{
|
||||
Name: "somequery",
|
||||
Service: structs.ServiceQuery{
|
||||
Service: "db",
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := a.RPC("PreparedQuery.Apply", args, &id); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(in.Answer) != 1 {
|
||||
t.Fatalf("empty lookup: %#v", in)
|
||||
}
|
||||
// Try some variations to make sure case doesn't matter.
|
||||
questions := []string{
|
||||
"primary.Db.service.consul.",
|
||||
"primary.db.service.consul.",
|
||||
"pRIMARY.dB.service.consul.",
|
||||
"PRIMARY.dB.service.consul.",
|
||||
"db.service.consul.",
|
||||
"DB.service.consul.",
|
||||
"Db.service.consul.",
|
||||
"somequery.query.consul.",
|
||||
"SomeQuery.query.consul.",
|
||||
"SOMEQUERY.query.consul.",
|
||||
}
|
||||
|
||||
for _, question := range questions {
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion(question, dns.TypeSRV)
|
||||
|
||||
c := new(dns.Client)
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
in, _, err := c.Exchange(m, a.DNSAddr())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if len(in.Answer) != 1 {
|
||||
t.Fatalf("question %v, empty lookup: %#v", question, in)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user