Merge branch 'main' of ssh://github.com/hashicorp/consul

This commit is contained in:
Tu Nguyen 2022-09-06 07:49:07 -07:00
commit f6b9ac3cc0
313 changed files with 10803 additions and 4832 deletions

3
.changelog/11742.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
api: Add filtering support to Catalog's List Services (v1/catalog/services)
```

3
.changelog/13493.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
cli: Fix Consul kv CLI 'GET' flags 'keys' and 'recurse' to be set together
```

3
.changelog/13998.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
connect: expose new tracing configuration on envoy
```

3
.changelog/14034.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
cli: When launching a sidecar proxy with `consul connect envoy` or `consul connect proxy`, the `-sidecar-for` service ID argument is now treated as case-insensitive.
```

3
.changelog/14238.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
envoy: adds additional Envoy outlier ejection parameters to passive health check configurations.
```

3
.changelog/14269.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bugfix
connect: Fix issue where `auto_config` and `auto_encrypt` could unintentionally enable TLS for gRPC xDS connections.
```

3
.changelog/14290.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bugfix
envoy: validate name before deleting proxy default configurations.
```

4
.changelog/14343.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:feature
ui: Use withCredentials for all HTTP API requests
```

3
.changelog/14364.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bugfix
peering: Fix issue preventing deletion and recreation of peerings in TERMINATED state.
```

3
.changelog/14373.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
xds: Set `max_ejection_percent` on Envoy's outlier detection to 100% for peered services.
```

5
.changelog/14378.txt Normal file
View File

@ -0,0 +1,5 @@
```release-note:bug
api: Fix a breaking change caused by renaming `QueryDatacenterOptions` to
`QueryFailoverOptions`. This adds `QueryDatacenterOptions` back as an alias to
`QueryFailoverOptions` and marks it as deprecated.
```

3
.changelog/14396.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
peering: Add support to failover to services running on cluster peers.
```

3
.changelog/14423.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
cli: Adds new subcommands for `peering` workflows. Refer to the [CLI docs](https://www.consul.io/commands/peering) for more information.
```

3
.changelog/14429.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
connect: Fixed an issue where intermediate certificates could build up in the root CA because they were never being pruned after expiring.
``

3
.changelog/14433.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
checks: If set, use proxy address for automatically added sidecar check instead of service address.
```

3
.changelog/_2271.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
snapshot agent: **(Enterprise only)** Add support for path-based addressing when using s3 backend.
```

View File

@ -816,7 +816,7 @@ jobs:
# Get go binary from workspace # Get go binary from workspace
- attach_workspace: - attach_workspace:
at: . at: .
# Build the consul-dev image from the already built binary # Build the consul:local image from the already built binary
- run: - run:
command: | command: |
sudo rm -rf /usr/local/go sudo rm -rf /usr/local/go
@ -887,8 +887,8 @@ jobs:
- attach_workspace: - attach_workspace:
at: . at: .
- run: *install-gotestsum - run: *install-gotestsum
# Build the consul-dev image from the already built binary # Build the consul:local image from the already built binary
- run: docker build -t consul-dev -f ./build-support/docker/Consul-Dev.dockerfile . - run: docker build -t consul:local -f ./build-support/docker/Consul-Dev.dockerfile .
- run: - run:
name: Envoy Integration Tests name: Envoy Integration Tests
command: | command: |
@ -902,6 +902,7 @@ jobs:
GOTESTSUM_JUNITFILE: /tmp/test-results/results.xml GOTESTSUM_JUNITFILE: /tmp/test-results/results.xml
GOTESTSUM_FORMAT: standard-verbose GOTESTSUM_FORMAT: standard-verbose
COMPOSE_INTERACTIVE_NO_CLI: 1 COMPOSE_INTERACTIVE_NO_CLI: 1
LAMBDA_TESTS_ENABLED: "true"
# tput complains if this isn't set to something. # tput complains if this isn't set to something.
TERM: ansi TERM: ansi
- store_artifacts: - store_artifacts:

View File

@ -16,7 +16,7 @@ jobs:
backport: backport:
if: github.event.pull_request.merged if: github.event.pull_request.merged
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: hashicorpdev/backport-assistant:0.2.3 container: hashicorpdev/backport-assistant:0.2.5
steps: steps:
- name: Run Backport Assistant for stable-website - name: Run Backport Assistant for stable-website
run: | run: |
@ -24,6 +24,7 @@ jobs:
env: env:
BACKPORT_LABEL_REGEXP: "type/docs-(?P<target>cherrypick)" BACKPORT_LABEL_REGEXP: "type/docs-(?P<target>cherrypick)"
BACKPORT_TARGET_TEMPLATE: "stable-website" BACKPORT_TARGET_TEMPLATE: "stable-website"
BACKPORT_MERGE_COMMIT: true
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- name: Backport changes to latest release branch - name: Backport changes to latest release branch
run: | run: |

View File

@ -8,6 +8,8 @@ linters:
- ineffassign - ineffassign
- unparam - unparam
- forbidigo - forbidigo
- gomodguard
- depguard
issues: issues:
# Disable the default exclude list so that all excludes are explicitly # Disable the default exclude list so that all excludes are explicitly
@ -75,6 +77,30 @@ linters-settings:
# Exclude godoc examples from forbidigo checks. # Exclude godoc examples from forbidigo checks.
# Default: true # Default: true
exclude_godoc_examples: false exclude_godoc_examples: false
gomodguard:
blocked:
# List of blocked modules.
modules:
# Blocked module.
- github.com/hashicorp/net-rpc-msgpackrpc:
recommendations:
- github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc
- github.com/hashicorp/go-msgpack:
recommendations:
- github.com/hashicorp/consul-net-rpc/go-msgpack
depguard:
list-type: denylist
include-go-root: true
# A list of packages for the list type specified.
# Default: []
packages:
- net/rpc
# A list of packages for the list type specified.
# Specify an error message to output when a denied package is used.
# Default: []
packages-with-error-message:
- net/rpc: 'only use forked copy in github.com/hashicorp/consul-net-rpc/net/rpc'
run: run:
timeout: 10m timeout: 10m

View File

@ -16,6 +16,7 @@ PROTOC_GO_INJECT_TAG_VERSION='v1.3.0'
GOTAGS ?= GOTAGS ?=
GOPATH=$(shell go env GOPATH) GOPATH=$(shell go env GOPATH)
GOARCH?=$(shell go env GOARCH)
MAIN_GOPATH=$(shell go env GOPATH | cut -d: -f1) MAIN_GOPATH=$(shell go env GOPATH | cut -d: -f1)
export PATH := $(PWD)/bin:$(GOPATH)/bin:$(PATH) export PATH := $(PWD)/bin:$(GOPATH)/bin:$(PATH)
@ -129,7 +130,7 @@ export GOLDFLAGS
# Allow skipping docker build during integration tests in CI since we already # Allow skipping docker build during integration tests in CI since we already
# have a built binary # have a built binary
ENVOY_INTEG_DEPS?=dev-docker ENVOY_INTEG_DEPS?=docker-envoy-integ
ifdef SKIP_DOCKER_BUILD ifdef SKIP_DOCKER_BUILD
ENVOY_INTEG_DEPS=noop ENVOY_INTEG_DEPS=noop
endif endif
@ -152,7 +153,28 @@ dev-docker: linux
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null @docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)" @echo "Building Consul Development container - $(CONSUL_DEV_IMAGE)"
# 'consul:local' tag is needed to run the integration tests # 'consul:local' tag is needed to run the integration tests
@DOCKER_DEFAULT_PLATFORM=linux/amd64 docker build $(NOCACHE) $(QUIET) -t '$(CONSUL_DEV_IMAGE)' -t 'consul:local' --build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) $(CURDIR)/pkg/bin/linux_amd64 -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile @docker buildx use default && docker buildx build -t 'consul:local' \
--platform linux/$(GOARCH) \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
--load \
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
check-remote-dev-image-env:
ifndef REMOTE_DEV_IMAGE
$(error REMOTE_DEV_IMAGE is undefined: set this image to <your_docker_repo>/<your_docker_image>:<image_tag>, e.g. hashicorp/consul-k8s-dev:latest)
endif
remote-docker: check-remote-dev-image-env
$(MAKE) GOARCH=amd64 linux
$(MAKE) GOARCH=arm64 linux
@echo "Pulling consul container image - $(CONSUL_IMAGE_VERSION)"
@docker pull consul:$(CONSUL_IMAGE_VERSION) >/dev/null
@echo "Building and Pushing Consul Development container - $(REMOTE_DEV_IMAGE)"
@docker buildx use default && docker buildx build -t '$(REMOTE_DEV_IMAGE)' \
--platform linux/amd64,linux/arm64 \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
--push \
-f $(CURDIR)/build-support/docker/Consul-Dev-Multiarch.dockerfile $(CURDIR)/pkg/bin/
# In CircleCI, the linux binary will be attached from a previous step at bin/. This make target # In CircleCI, the linux binary will be attached from a previous step at bin/. This make target
# should only run in CI and not locally. # should only run in CI and not locally.
@ -174,10 +196,10 @@ ifeq ($(CIRCLE_BRANCH), main)
@docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest @docker push $(CI_DEV_DOCKER_NAMESPACE)/$(CI_DEV_DOCKER_IMAGE_NAME):latest
endif endif
# linux builds a linux binary independent of the source platform # linux builds a linux binary compatible with the source platform
linux: linux:
@mkdir -p ./pkg/bin/linux_amd64 @mkdir -p ./pkg/bin/linux_$(GOARCH)
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./pkg/bin/linux_amd64 -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)" CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) go build -o ./pkg/bin/linux_$(GOARCH) -ldflags "$(GOLDFLAGS)" -tags "$(GOTAGS)"
# dist builds binaries for all platforms and packages them for distribution # dist builds binaries for all platforms and packages them for distribution
dist: dist:
@ -324,8 +346,22 @@ consul-docker: go-build-image
ui-docker: ui-build-image ui-docker: ui-build-image
@$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui @$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui
# Build image used to run integration tests locally.
docker-envoy-integ:
$(MAKE) GOARCH=amd64 linux
docker build \
--platform linux/amd64 $(NOCACHE) $(QUIET) \
-t 'consul:local' \
--build-arg CONSUL_IMAGE_VERSION=$(CONSUL_IMAGE_VERSION) \
$(CURDIR)/pkg/bin/linux_amd64 \
-f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile
# Run integration tests.
# Use GO_TEST_FLAGS to run specific tests:
# make test-envoy-integ GO_TEST_FLAGS="-run TestEnvoy/case-basic"
# NOTE: Always uses amd64 images, even when running on M1 macs, to match CI/CD environment.
test-envoy-integ: $(ENVOY_INTEG_DEPS) test-envoy-integ: $(ENVOY_INTEG_DEPS)
@go test -v -timeout=30m -tags integration ./test/integration/connect/envoy @go test -v -timeout=30m -tags integration $(GO_TEST_FLAGS) ./test/integration/connect/envoy
.PHONY: test-compat-integ .PHONY: test-compat-integ
test-compat-integ: dev-docker test-compat-integ: dev-docker

View File

@ -3764,7 +3764,7 @@ func testAgent_RegisterService_TranslateKeys(t *testing.T, extraHCL string) {
fmt.Println("TCP Check:= ", v) fmt.Println("TCP Check:= ", v)
} }
if hasNoCorrectTCPCheck { if hasNoCorrectTCPCheck {
t.Fatalf("Did not find the expected TCP Healtcheck '%s' in %#v ", tt.expectedTCPCheckStart, a.checkTCPs) t.Fatalf("Did not find the expected TCP Healthcheck '%s' in %#v ", tt.expectedTCPCheckStart, a.checkTCPs)
} }
require.Equal(t, sidecarSvc, gotSidecar) require.Equal(t, sidecarSvc, gotSidecar)
}) })

View File

@ -2531,10 +2531,9 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error
return c, errors.New("verify_server_hostname is only valid in the tls.internal_rpc stanza") return c, errors.New("verify_server_hostname is only valid in the tls.internal_rpc stanza")
} }
// TLS is only enabled on the gRPC listener if there's an HTTPS port configured // And UseAutoCert right now only applies to external gRPC interface.
// for historic and backwards-compatibility reasons. if t.Defaults.UseAutoCert != nil || t.HTTPS.UseAutoCert != nil || t.InternalRPC.UseAutoCert != nil {
if rt.HTTPSPort <= 0 && (t.GRPC != TLSProtocolConfig{} && t.GRPCModifiedByDeprecatedConfig == nil) { return c, errors.New("use_auto_cert is only valid in the tls.grpc stanza")
b.warn("tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)")
} }
defaultTLSMinVersion := b.tlsVersion("tls.defaults.tls_min_version", t.Defaults.TLSMinVersion) defaultTLSMinVersion := b.tlsVersion("tls.defaults.tls_min_version", t.Defaults.TLSMinVersion)
@ -2591,6 +2590,7 @@ func (b *builder) buildTLSConfig(rt RuntimeConfig, t TLS) (tlsutil.Config, error
mapCommon("https", t.HTTPS, &c.HTTPS) mapCommon("https", t.HTTPS, &c.HTTPS)
mapCommon("grpc", t.GRPC, &c.GRPC) mapCommon("grpc", t.GRPC, &c.GRPC)
c.GRPC.UseAutoCert = boolValWithDefault(t.GRPC.UseAutoCert, false)
c.ServerName = rt.ServerName c.ServerName = rt.ServerName
c.NodeName = rt.NodeName c.NodeName = rt.NodeName

View File

@ -867,6 +867,7 @@ type TLSProtocolConfig struct {
VerifyIncoming *bool `mapstructure:"verify_incoming"` VerifyIncoming *bool `mapstructure:"verify_incoming"`
VerifyOutgoing *bool `mapstructure:"verify_outgoing"` VerifyOutgoing *bool `mapstructure:"verify_outgoing"`
VerifyServerHostname *bool `mapstructure:"verify_server_hostname"` VerifyServerHostname *bool `mapstructure:"verify_server_hostname"`
UseAutoCert *bool `mapstructure:"use_auto_cert"`
} }
type TLS struct { type TLS struct {

View File

@ -5516,7 +5516,70 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
}, },
}) })
run(t, testCase{ run(t, testCase{
desc: "tls.grpc without ports.https", desc: "tls.grpc.use_auto_cert defaults to false",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{`
{
"tls": {
"grpc": {}
}
}
`},
hcl: []string{`
tls {
grpc {}
}
`},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.UseAutoCert = false
},
})
run(t, testCase{
desc: "tls.grpc.use_auto_cert defaults to false (II)",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{`
{
"tls": {}
}
`},
hcl: []string{`
tls {
}
`},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.UseAutoCert = false
},
})
run(t, testCase{
desc: "tls.grpc.use_auto_cert defaults to false (III)",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{`
{
}
`},
hcl: []string{`
`},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.UseAutoCert = false
},
})
run(t, testCase{
desc: "tls.grpc.use_auto_cert enabled when true",
args: []string{ args: []string{
`-data-dir=` + dataDir, `-data-dir=` + dataDir,
}, },
@ -5524,7 +5587,7 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
{ {
"tls": { "tls": {
"grpc": { "grpc": {
"cert_file": "cert-1234" "use_auto_cert": true
} }
} }
} }
@ -5532,20 +5595,43 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
hcl: []string{` hcl: []string{`
tls { tls {
grpc { grpc {
cert_file = "cert-1234" use_auto_cert = true
} }
} }
`}, `},
expected: func(rt *RuntimeConfig) { expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir rt.DataDir = dataDir
rt.TLS.Domain = "consul." rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname" rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.UseAutoCert = true
rt.TLS.GRPC.CertFile = "cert-1234"
}, },
expectedWarnings: []string{ })
"tls.grpc was provided but TLS will NOT be enabled on the gRPC listener without an HTTPS listener configured (e.g. via ports.https)", run(t, testCase{
desc: "tls.grpc.use_auto_cert disabled when false",
args: []string{
`-data-dir=` + dataDir,
},
json: []string{`
{
"tls": {
"grpc": {
"use_auto_cert": false
}
}
}
`},
hcl: []string{`
tls {
grpc {
use_auto_cert = false
}
}
`},
expected: func(rt *RuntimeConfig) {
rt.DataDir = dataDir
rt.TLS.Domain = "consul."
rt.TLS.NodeName = "thehostname"
rt.TLS.GRPC.UseAutoCert = false
}, },
}) })
} }
@ -6340,6 +6426,7 @@ func TestLoad_FullConfig(t *testing.T) {
TLSMinVersion: types.TLSv1_0, TLSMinVersion: types.TLSv1_0,
CipherSuites: []types.TLSCipherSuite{types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, types.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA}, CipherSuites: []types.TLSCipherSuite{types.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, types.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA},
VerifyOutgoing: false, VerifyOutgoing: false,
UseAutoCert: true,
}, },
HTTPS: tlsutil.ProtocolConfig{ HTTPS: tlsutil.ProtocolConfig{
VerifyIncoming: true, VerifyIncoming: true,

View File

@ -374,7 +374,8 @@
"TLSMinVersion": "", "TLSMinVersion": "",
"VerifyIncoming": false, "VerifyIncoming": false,
"VerifyOutgoing": false, "VerifyOutgoing": false,
"VerifyServerHostname": false "VerifyServerHostname": false,
"UseAutoCert": false
}, },
"HTTPS": { "HTTPS": {
"CAFile": "", "CAFile": "",
@ -385,7 +386,8 @@
"TLSMinVersion": "", "TLSMinVersion": "",
"VerifyIncoming": false, "VerifyIncoming": false,
"VerifyOutgoing": false, "VerifyOutgoing": false,
"VerifyServerHostname": false "VerifyServerHostname": false,
"UseAutoCert": false
}, },
"InternalRPC": { "InternalRPC": {
"CAFile": "", "CAFile": "",
@ -396,7 +398,8 @@
"TLSMinVersion": "", "TLSMinVersion": "",
"VerifyIncoming": false, "VerifyIncoming": false,
"VerifyOutgoing": false, "VerifyOutgoing": false,
"VerifyServerHostname": false "VerifyServerHostname": false,
"UseAutoCert": false
}, },
"NodeName": "", "NodeName": "",
"ServerName": "" "ServerName": ""
@ -466,4 +469,4 @@
"VersionMetadata": "", "VersionMetadata": "",
"VersionPrerelease": "", "VersionPrerelease": "",
"Watches": [] "Watches": []
} }

View File

@ -697,6 +697,7 @@ tls {
tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA" tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA"
tls_min_version = "TLSv1_0" tls_min_version = "TLSv1_0"
verify_incoming = true verify_incoming = true
use_auto_cert = true
} }
} }
tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256" tls_cipher_suites = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"

View File

@ -692,7 +692,8 @@
"key_file": "1y4prKjl", "key_file": "1y4prKjl",
"tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
"tls_min_version": "TLSv1_0", "tls_min_version": "TLSv1_0",
"verify_incoming": true "verify_incoming": true,
"use_auto_cert": true
} }
}, },
"tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "tls_cipher_suites": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",

View File

@ -178,20 +178,43 @@ func TestQuerySNI(t *testing.T) {
func TestTargetSNI(t *testing.T) { func TestTargetSNI(t *testing.T) {
// empty namespace, empty subset // empty namespace, empty subset
require.Equal(t, "api.default.foo."+testTrustDomainSuffix1, require.Equal(t, "api.default.foo."+testTrustDomainSuffix1,
TargetSNI(structs.NewDiscoveryTarget("api", "", "", "default", "foo"), testTrustDomain1)) TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
Partition: "default",
Datacenter: "foo",
}), testTrustDomain1))
require.Equal(t, "api.default.foo."+testTrustDomainSuffix1, require.Equal(t, "api.default.foo."+testTrustDomainSuffix1,
TargetSNI(structs.NewDiscoveryTarget("api", "", "", "", "foo"), testTrustDomain1)) TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
Datacenter: "foo",
}), testTrustDomain1))
// set namespace, empty subset // set namespace, empty subset
require.Equal(t, "api.neighbor.foo."+testTrustDomainSuffix2, require.Equal(t, "api.neighbor.foo."+testTrustDomainSuffix2,
TargetSNI(structs.NewDiscoveryTarget("api", "", "neighbor", "default", "foo"), testTrustDomain2)) TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
Namespace: "neighbor",
Partition: "default",
Datacenter: "foo",
}), testTrustDomain2))
// empty namespace, set subset // empty namespace, set subset
require.Equal(t, "v2.api.default.foo."+testTrustDomainSuffix1, require.Equal(t, "v2.api.default.foo."+testTrustDomainSuffix1,
TargetSNI(structs.NewDiscoveryTarget("api", "v2", "", "default", "foo"), testTrustDomain1)) TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
ServiceSubset: "v2",
Partition: "default",
Datacenter: "foo",
}), testTrustDomain1))
// set namespace, set subset // set namespace, set subset
require.Equal(t, "canary.api.neighbor.foo."+testTrustDomainSuffix2, require.Equal(t, "canary.api.neighbor.foo."+testTrustDomainSuffix2,
TargetSNI(structs.NewDiscoveryTarget("api", "canary", "neighbor", "default", "foo"), testTrustDomain2)) TargetSNI(structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "api",
ServiceSubset: "canary",
Namespace: "neighbor",
Partition: "default",
Datacenter: "foo",
}), testTrustDomain2))
} }

View File

@ -565,6 +565,11 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
return err return err
} }
filter, err := bexpr.CreateFilter(args.Filter, nil, []*structs.ServiceNode{})
if err != nil {
return err
}
// Set reply enterprise metadata after resolving and validating the token so // Set reply enterprise metadata after resolving and validating the token so
// that we can properly infer metadata from the token. // that we can properly infer metadata from the token.
reply.EnterpriseMeta = args.EnterpriseMeta reply.EnterpriseMeta = args.EnterpriseMeta
@ -574,10 +579,11 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
&reply.QueryMeta, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error { func(ws memdb.WatchSet, state *state.Store) error {
var err error var err error
var serviceNodes structs.ServiceNodes
if len(args.NodeMetaFilters) > 0 { if len(args.NodeMetaFilters) > 0 {
reply.Index, reply.Services, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName) reply.Index, serviceNodes, err = state.ServicesByNodeMeta(ws, args.NodeMetaFilters, &args.EnterpriseMeta, args.PeerName)
} else { } else {
reply.Index, reply.Services, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName) reply.Index, serviceNodes, err = state.Services(ws, &args.EnterpriseMeta, args.PeerName)
} }
if err != nil { if err != nil {
return err return err
@ -588,11 +594,43 @@ func (c *Catalog) ListServices(args *structs.DCSpecificRequest, reply *structs.I
return nil return nil
} }
raw, err := filter.Execute(serviceNodes)
if err != nil {
return err
}
reply.Services = servicesTagsByName(raw.(structs.ServiceNodes))
c.srv.filterACLWithAuthorizer(authz, reply) c.srv.filterACLWithAuthorizer(authz, reply)
return nil return nil
}) })
} }
func servicesTagsByName(services []*structs.ServiceNode) structs.Services {
unique := make(map[string]map[string]struct{})
for _, svc := range services {
tags, ok := unique[svc.ServiceName]
if !ok {
unique[svc.ServiceName] = make(map[string]struct{})
tags = unique[svc.ServiceName]
}
for _, tag := range svc.ServiceTags {
tags[tag] = struct{}{}
}
}
// Generate the output structure.
var results = make(structs.Services)
for service, tags := range unique {
results[service] = make([]string, 0, len(tags))
for tag := range tags {
results[service] = append(results[service], tag)
}
}
return results
}
// ServiceList is used to query the services in a DC. // ServiceList is used to query the services in a DC.
// Returns services as a list of ServiceNames. // Returns services as a list of ServiceNames.
func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.IndexedServiceList) error { func (c *Catalog) ServiceList(args *structs.DCSpecificRequest, reply *structs.IndexedServiceList) error {

View File

@ -1523,6 +1523,45 @@ func TestCatalog_ListServices_NodeMetaFilter(t *testing.T) {
} }
} }
func TestCatalog_ListServices_Filter(t *testing.T) {
t.Parallel()
_, s1 := testServer(t)
codec := rpcClient(t, s1)
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
// prep the cluster with some data we can use in our filters
registerTestCatalogEntries(t, codec)
// Run the tests against the test server
t.Run("ListServices", func(t *testing.T) {
args := structs.DCSpecificRequest{
Datacenter: "dc1",
}
args.Filter = "ServiceName == redis"
out := new(structs.IndexedServices)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
require.Contains(t, out.Services, "redis")
require.ElementsMatch(t, []string{"v1", "v2"}, out.Services["redis"])
args.Filter = "NodeMeta.os == NoSuchOS"
out = new(structs.IndexedServices)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
require.Len(t, out.Services, 0)
args.Filter = "NodeMeta.NoSuchMetadata == linux"
out = new(structs.IndexedServices)
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
require.Len(t, out.Services, 0)
args.Filter = "InvalidField == linux"
out = new(structs.IndexedServices)
require.Error(t, msgpackrpc.CallWithCodec(codec, "Catalog.ListServices", &args, out))
})
}
func TestCatalog_ListServices_Blocking(t *testing.T) { func TestCatalog_ListServices_Blocking(t *testing.T) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")

View File

@ -1399,8 +1399,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
Protocol: "http", Protocol: "http",
MeshGateway: structs.MeshGatewayConfig{Mode: structs.MeshGatewayModeRemote}, MeshGateway: structs.MeshGatewayConfig{Mode: structs.MeshGatewayModeRemote},
PassiveHealthCheck: &structs.PassiveHealthCheck{ PassiveHealthCheck: &structs.PassiveHealthCheck{
Interval: 10, Interval: 10,
MaxFailures: 2, MaxFailures: 2,
EnforcingConsecutive5xx: uintPointer(60),
}, },
}, },
Overrides: []*structs.UpstreamConfig{ Overrides: []*structs.UpstreamConfig{
@ -1432,8 +1433,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
Upstream: wildcard, Upstream: wildcard,
Config: map[string]interface{}{ Config: map[string]interface{}{
"passive_health_check": map[string]interface{}{ "passive_health_check": map[string]interface{}{
"Interval": int64(10), "Interval": int64(10),
"MaxFailures": int64(2), "MaxFailures": int64(2),
"EnforcingConsecutive5xx": int64(60),
}, },
"mesh_gateway": map[string]interface{}{ "mesh_gateway": map[string]interface{}{
"Mode": "remote", "Mode": "remote",
@ -1445,8 +1447,9 @@ func TestConfigEntry_ResolveServiceConfig_Upstreams(t *testing.T) {
Upstream: mysql, Upstream: mysql,
Config: map[string]interface{}{ Config: map[string]interface{}{
"passive_health_check": map[string]interface{}{ "passive_health_check": map[string]interface{}{
"Interval": int64(10), "Interval": int64(10),
"MaxFailures": int64(2), "MaxFailures": int64(2),
"EnforcingConsecutive5xx": int64(60),
}, },
"mesh_gateway": map[string]interface{}{ "mesh_gateway": map[string]interface{}{
"Mode": "local", "Mode": "local",
@ -2507,3 +2510,7 @@ func Test_gateWriteToSecondary_AllowedKinds(t *testing.T) {
}) })
} }
} }
func uintPointer(v uint32) *uint32 {
return &v
}

View File

@ -56,8 +56,17 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
return &resp, nil return &resp, nil
} }
newTarget := func(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget { newTarget := func(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter) if opts.Namespace == "" {
opts.Namespace = "default"
}
if opts.Partition == "" {
opts.Partition = "default"
}
if opts.Datacenter == "" {
opts.Datacenter = "dc1"
}
t := structs.NewDiscoveryTarget(opts)
t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul") t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul")
t.Name = t.SNI t.Name = t.SNI
t.ConnectTimeout = 5 * time.Second // default t.ConnectTimeout = 5 * time.Second // default
@ -119,7 +128,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"), "web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
}, },
}, },
} }
@ -245,7 +254,7 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) {
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"web.default.default.dc1": targetWithConnectTimeout( "web.default.default.dc1": targetWithConnectTimeout(
newTarget("web", "", "default", "default", "dc1"), newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
33*time.Second, 33*time.Second,
), ),
}, },

View File

@ -8,6 +8,7 @@ import (
"github.com/mitchellh/hashstructure" "github.com/mitchellh/hashstructure"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
@ -576,7 +577,10 @@ func (c *compiler) assembleChain() error {
if router == nil { if router == nil {
// If no router is configured, move on down the line to the next hop of // If no router is configured, move on down the line to the next hop of
// the chain. // the chain.
node, err := c.getSplitterOrResolverNode(c.newTarget(c.serviceName, "", "", "", "")) node, err := c.getSplitterOrResolverNode(c.newTarget(structs.DiscoveryTargetOpts{
Service: c.serviceName,
}))
if err != nil { if err != nil {
return err return err
} }
@ -626,11 +630,20 @@ func (c *compiler) assembleChain() error {
) )
if dest.ServiceSubset == "" { if dest.ServiceSubset == "" {
node, err = c.getSplitterOrResolverNode( node, err = c.getSplitterOrResolverNode(
c.newTarget(svc, "", destNamespace, destPartition, ""), c.newTarget(structs.DiscoveryTargetOpts{
) Service: svc,
Namespace: destNamespace,
Partition: destPartition,
},
))
} else { } else {
node, err = c.getResolverNode( node, err = c.getResolverNode(
c.newTarget(svc, dest.ServiceSubset, destNamespace, destPartition, ""), c.newTarget(structs.DiscoveryTargetOpts{
Service: svc,
ServiceSubset: dest.ServiceSubset,
Namespace: destNamespace,
Partition: destPartition,
}),
false, false,
) )
} }
@ -642,7 +655,12 @@ func (c *compiler) assembleChain() error {
// If we have a router, we'll add a catch-all route at the end to send // If we have a router, we'll add a catch-all route at the end to send
// unmatched traffic to the next hop in the chain. // unmatched traffic to the next hop in the chain.
defaultDestinationNode, err := c.getSplitterOrResolverNode(c.newTarget(router.Name, "", router.NamespaceOrDefault(), router.PartitionOrDefault(), "")) opts := structs.DiscoveryTargetOpts{
Service: router.Name,
Namespace: router.NamespaceOrDefault(),
Partition: router.PartitionOrDefault(),
}
defaultDestinationNode, err := c.getSplitterOrResolverNode(c.newTarget(opts))
if err != nil { if err != nil {
return err return err
} }
@ -674,26 +692,36 @@ func newDefaultServiceRoute(serviceName, namespace, partition string) *structs.S
} }
} }
func (c *compiler) newTarget(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget { func (c *compiler) newTarget(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
if service == "" { if opts.Service == "" {
panic("newTarget called with empty service which makes no sense") panic("newTarget called with empty service which makes no sense")
} }
t := structs.NewDiscoveryTarget( if opts.Peer == "" {
service, opts.Datacenter = defaultIfEmpty(opts.Datacenter, c.evaluateInDatacenter)
serviceSubset, opts.Namespace = defaultIfEmpty(opts.Namespace, c.evaluateInNamespace)
defaultIfEmpty(namespace, c.evaluateInNamespace), opts.Partition = defaultIfEmpty(opts.Partition, c.evaluateInPartition)
defaultIfEmpty(partition, c.evaluateInPartition), } else {
defaultIfEmpty(datacenter, c.evaluateInDatacenter), // Don't allow Peer and Datacenter.
) opts.Datacenter = ""
// Peer and Partition cannot both be set.
opts.Partition = acl.PartitionOrDefault("")
// Default to "default" rather than c.evaluateInNamespace.
opts.Namespace = acl.PartitionOrDefault(opts.Namespace)
}
// Set default connect SNI. This will be overridden later if the service t := structs.NewDiscoveryTarget(opts)
// has an explicit SNI value configured in service-defaults.
t.SNI = connect.TargetSNI(t, c.evaluateInTrustDomain)
// Use the same representation for the name. This will NOT be overridden // We don't have the peer's trust domain yet so we can't construct the SNI.
// later. if opts.Peer == "" {
t.Name = t.SNI // Set default connect SNI. This will be overridden later if the service
// has an explicit SNI value configured in service-defaults.
t.SNI = connect.TargetSNI(t, c.evaluateInTrustDomain)
// Use the same representation for the name. This will NOT be overridden
// later.
t.Name = t.SNI
}
prev, ok := c.loadedTargets[t.ID] prev, ok := c.loadedTargets[t.ID]
if ok { if ok {
@ -703,34 +731,30 @@ func (c *compiler) newTarget(service, serviceSubset, namespace, partition, datac
return t return t
} }
func (c *compiler) rewriteTarget(t *structs.DiscoveryTarget, service, serviceSubset, partition, namespace, datacenter string) *structs.DiscoveryTarget { func (c *compiler) rewriteTarget(t *structs.DiscoveryTarget, opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
var ( mergedOpts := t.ToDiscoveryTargetOpts()
service2 = t.Service
serviceSubset2 = t.ServiceSubset
partition2 = t.Partition
namespace2 = t.Namespace
datacenter2 = t.Datacenter
)
if service != "" && service != service2 { if opts.Service != "" && opts.Service != mergedOpts.Service {
service2 = service mergedOpts.Service = opts.Service
// Reset the chosen subset if we reference a service other than our own. // Reset the chosen subset if we reference a service other than our own.
serviceSubset2 = "" mergedOpts.ServiceSubset = ""
} }
if serviceSubset != "" { if opts.ServiceSubset != "" {
serviceSubset2 = serviceSubset mergedOpts.ServiceSubset = opts.ServiceSubset
} }
if partition != "" { if opts.Partition != "" {
partition2 = partition mergedOpts.Partition = opts.Partition
} }
if namespace != "" { // Only use explicit Namespace with Peer
namespace2 = namespace if opts.Namespace != "" || opts.Peer != "" {
mergedOpts.Namespace = opts.Namespace
} }
if datacenter != "" { if opts.Datacenter != "" {
datacenter2 = datacenter mergedOpts.Datacenter = opts.Datacenter
} }
mergedOpts.Peer = opts.Peer
return c.newTarget(service2, serviceSubset2, namespace2, partition2, datacenter2) return c.newTarget(mergedOpts)
} }
func (c *compiler) getSplitterOrResolverNode(target *structs.DiscoveryTarget) (*structs.DiscoveryGraphNode, error) { func (c *compiler) getSplitterOrResolverNode(target *structs.DiscoveryTarget) (*structs.DiscoveryGraphNode, error) {
@ -803,10 +827,13 @@ func (c *compiler) getSplitterNode(sid structs.ServiceID) (*structs.DiscoveryGra
// fall through to group-resolver // fall through to group-resolver
} }
node, err := c.getResolverNode( opts := structs.DiscoveryTargetOpts{
c.newTarget(splitID.ID, split.ServiceSubset, splitID.NamespaceOrDefault(), splitID.PartitionOrDefault(), ""), Service: splitID.ID,
false, ServiceSubset: split.ServiceSubset,
) Namespace: splitID.NamespaceOrDefault(),
Partition: splitID.PartitionOrDefault(),
}
node, err := c.getResolverNode(c.newTarget(opts), false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -881,11 +908,7 @@ RESOLVE_AGAIN:
redirectedTarget := c.rewriteTarget( redirectedTarget := c.rewriteTarget(
target, target,
redirect.Service, redirect.ToDiscoveryTargetOpts(),
redirect.ServiceSubset,
redirect.Partition,
redirect.Namespace,
redirect.Datacenter,
) )
if redirectedTarget.ID != target.ID { if redirectedTarget.ID != target.ID {
target = redirectedTarget target = redirectedTarget
@ -895,14 +918,9 @@ RESOLVE_AGAIN:
// Handle default subset. // Handle default subset.
if target.ServiceSubset == "" && resolver.DefaultSubset != "" { if target.ServiceSubset == "" && resolver.DefaultSubset != "" {
target = c.rewriteTarget( target = c.rewriteTarget(target, structs.DiscoveryTargetOpts{
target, ServiceSubset: resolver.DefaultSubset,
"", })
resolver.DefaultSubset,
"",
"",
"",
)
goto RESOLVE_AGAIN goto RESOLVE_AGAIN
} }
@ -1027,56 +1045,54 @@ RESOLVE_AGAIN:
failover, ok = f["*"] failover, ok = f["*"]
} }
if ok { if !ok {
// Determine which failover definitions apply. return node, nil
var failoverTargets []*structs.DiscoveryTarget }
if len(failover.Datacenters) > 0 {
for _, dc := range failover.Datacenters { // Determine which failover definitions apply.
// Rewrite the target as per the failover policy. var failoverTargets []*structs.DiscoveryTarget
failoverTarget := c.rewriteTarget( if len(failover.Datacenters) > 0 {
target, opts := failover.ToDiscoveryTargetOpts()
failover.Service, for _, dc := range failover.Datacenters {
failover.ServiceSubset,
target.Partition,
failover.Namespace,
dc,
)
if failoverTarget.ID != target.ID { // don't failover to yourself
failoverTargets = append(failoverTargets, failoverTarget)
}
}
} else {
// Rewrite the target as per the failover policy. // Rewrite the target as per the failover policy.
failoverTarget := c.rewriteTarget( opts.Datacenter = dc
target, failoverTarget := c.rewriteTarget(target, opts)
failover.Service,
failover.ServiceSubset,
target.Partition,
failover.Namespace,
"",
)
if failoverTarget.ID != target.ID { // don't failover to yourself if failoverTarget.ID != target.ID { // don't failover to yourself
failoverTargets = append(failoverTargets, failoverTarget) failoverTargets = append(failoverTargets, failoverTarget)
} }
} }
} else if len(failover.Targets) > 0 {
// If we filtered everything out then no point in having a failover. for _, t := range failover.Targets {
if len(failoverTargets) > 0 { // Rewrite the target as per the failover policy.
df := &structs.DiscoveryFailover{} failoverTarget := c.rewriteTarget(target, t.ToDiscoveryTargetOpts())
node.Resolver.Failover = df if failoverTarget.ID != target.ID { // don't failover to yourself
failoverTargets = append(failoverTargets, failoverTarget)
// Take care of doing any redirects or configuration loading
// related to targets by cheating a bit and recursing into
// ourselves.
for _, target := range failoverTargets {
failoverResolveNode, err := c.getResolverNode(target, true)
if err != nil {
return nil, err
}
failoverTarget := failoverResolveNode.Resolver.Target
df.Targets = append(df.Targets, failoverTarget)
} }
} }
} else {
// Rewrite the target as per the failover policy.
failoverTarget := c.rewriteTarget(target, failover.ToDiscoveryTargetOpts())
if failoverTarget.ID != target.ID { // don't failover to yourself
failoverTargets = append(failoverTargets, failoverTarget)
}
}
// If we filtered everything out then no point in having a failover.
if len(failoverTargets) > 0 {
df := &structs.DiscoveryFailover{}
node.Resolver.Failover = df
// Take care of doing any redirects or configuration loading
// related to targets by cheating a bit and recursing into
// ourselves.
for _, target := range failoverTargets {
failoverResolveNode, err := c.getResolverNode(target, true)
if err != nil {
return nil, err
}
failoverTarget := failoverResolveNode.Resolver.Target
df.Targets = append(df.Targets, failoverTarget)
}
} }
} }

View File

@ -39,6 +39,7 @@ func TestCompile(t *testing.T) {
"service redirect": testcase_ServiceRedirect(), "service redirect": testcase_ServiceRedirect(),
"service and subset redirect": testcase_ServiceAndSubsetRedirect(), "service and subset redirect": testcase_ServiceAndSubsetRedirect(),
"datacenter redirect": testcase_DatacenterRedirect(), "datacenter redirect": testcase_DatacenterRedirect(),
"redirect to cluster peer": testcase_PeerRedirect(),
"datacenter redirect with mesh gateways": testcase_DatacenterRedirect_WithMeshGateways(), "datacenter redirect with mesh gateways": testcase_DatacenterRedirect_WithMeshGateways(),
"service failover": testcase_ServiceFailover(), "service failover": testcase_ServiceFailover(),
"service failover through redirect": testcase_ServiceFailoverThroughRedirect(), "service failover through redirect": testcase_ServiceFailoverThroughRedirect(),
@ -46,6 +47,7 @@ func TestCompile(t *testing.T) {
"service and subset failover": testcase_ServiceAndSubsetFailover(), "service and subset failover": testcase_ServiceAndSubsetFailover(),
"datacenter failover": testcase_DatacenterFailover(), "datacenter failover": testcase_DatacenterFailover(),
"datacenter failover with mesh gateways": testcase_DatacenterFailover_WithMeshGateways(), "datacenter failover with mesh gateways": testcase_DatacenterFailover_WithMeshGateways(),
"target failover": testcase_Failover_Targets(),
"noop split to resolver with default subset": testcase_NoopSplit_WithDefaultSubset(), "noop split to resolver with default subset": testcase_NoopSplit_WithDefaultSubset(),
"resolver with default subset": testcase_Resolve_WithDefaultSubset(), "resolver with default subset": testcase_Resolve_WithDefaultSubset(),
"default resolver with external sni": testcase_DefaultResolver_ExternalSNI(), "default resolver with external sni": testcase_DefaultResolver_ExternalSNI(),
@ -182,7 +184,7 @@ func testcase_JustRouterWithDefaults() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
}, },
} }
@ -244,7 +246,7 @@ func testcase_JustRouterWithNoDestination() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
}, },
} }
@ -294,7 +296,7 @@ func testcase_RouterWithDefaults_NoSplit_WithResolver() compileTestCase {
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": targetWithConnectTimeout( "main.default.default.dc1": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc1", nil), newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
33*time.Second, 33*time.Second,
), ),
}, },
@ -361,7 +363,7 @@ func testcase_RouterWithDefaults_WithNoopSplit_DefaultResolver() compileTestCase
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
}, },
} }
@ -426,7 +428,10 @@ func testcase_NoopSplit_DefaultResolver_ProtocolFromProxyDefaults() compileTestC
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc1",
}, nil),
}, },
} }
@ -498,7 +503,7 @@ func testcase_RouterWithDefaults_WithNoopSplit_WithResolver() compileTestCase {
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": targetWithConnectTimeout( "main.default.default.dc1": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc1", nil), newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
33*time.Second, 33*time.Second,
), ),
}, },
@ -584,8 +589,11 @@ func testcase_RouteBypassesSplit() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"bypass.other.default.default.dc1": newTarget("other", "bypass", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "bypass.other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "other",
ServiceSubset: "bypass",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{ t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == bypass", Filter: "Service.Meta.version == bypass",
} }
@ -638,7 +646,7 @@ func testcase_NoopSplit_DefaultResolver() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
}, },
} }
@ -694,7 +702,7 @@ func testcase_NoopSplit_WithResolver() compileTestCase {
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": targetWithConnectTimeout( "main.default.default.dc1": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc1", nil), newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
33*time.Second, 33*time.Second,
), ),
}, },
@ -776,12 +784,19 @@ func testcase_SubsetSplit() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) {
"v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{ t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2", Filter: "Service.Meta.version == 2",
} }
}), }),
"v1.main.default.default.dc1": newTarget("main", "v1", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "v1.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v1",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{ t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 1", Filter: "Service.Meta.version == 1",
} }
@ -855,8 +870,8 @@ func testcase_ServiceSplit() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"foo.default.default.dc1": newTarget("foo", "", "default", "default", "dc1", nil), "foo.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "foo"}, nil),
"bar.default.default.dc1": newTarget("bar", "", "default", "default", "dc1", nil), "bar.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "bar"}, nil),
}, },
} }
@ -935,7 +950,10 @@ func testcase_SplitBypassesSplit() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"bypassed.next.default.default.dc1": newTarget("next", "bypassed", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "bypassed.next.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "next",
ServiceSubset: "bypassed",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{ t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == bypass", Filter: "Service.Meta.version == bypass",
} }
@ -973,7 +991,7 @@ func testcase_ServiceRedirect() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil), "other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
}, },
} }
@ -1019,7 +1037,10 @@ func testcase_ServiceAndSubsetRedirect() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"v2.other.default.default.dc1": newTarget("other", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "v2.other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "other",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{ t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2", Filter: "Service.Meta.version == 2",
} }
@ -1055,7 +1076,51 @@ func testcase_DatacenterRedirect() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc9": newTarget("main", "", "default", "default", "dc9", nil), "main.default.default.dc9": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc9",
}, nil),
},
}
return compileTestCase{entries: entries, expect: expect}
}
func testcase_PeerRedirect() compileTestCase {
entries := newEntries()
entries.AddResolvers(
&structs.ServiceResolverConfigEntry{
Kind: "service-resolver",
Name: "main",
Redirect: &structs.ServiceResolverRedirect{
Service: "other",
Peer: "cluster-01",
},
},
)
expect := &structs.CompiledDiscoveryChain{
Protocol: "tcp",
StartNode: "resolver:other.default.default.external.cluster-01",
Nodes: map[string]*structs.DiscoveryGraphNode{
"resolver:other.default.default.external.cluster-01": {
Type: structs.DiscoveryGraphNodeTypeResolver,
Name: "other.default.default.external.cluster-01",
Resolver: &structs.DiscoveryResolver{
Default: true,
ConnectTimeout: 5 * time.Second,
Target: "other.default.default.external.cluster-01",
},
},
},
Targets: map[string]*structs.DiscoveryTarget{
"other.default.default.external.cluster-01": newTarget(structs.DiscoveryTargetOpts{
Service: "other",
Peer: "cluster-01",
}, func(t *structs.DiscoveryTarget) {
t.SNI = ""
t.Name = ""
t.Datacenter = ""
}),
}, },
} }
return compileTestCase{entries: entries, expect: expect} return compileTestCase{entries: entries, expect: expect}
@ -1095,7 +1160,10 @@ func testcase_DatacenterRedirect_WithMeshGateways() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc9": newTarget("main", "", "default", "default", "dc9", func(t *structs.DiscoveryTarget) { "main.default.default.dc9": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc9",
}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{ t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote, Mode: structs.MeshGatewayModeRemote,
} }
@ -1134,8 +1202,8 @@ func testcase_ServiceFailover() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"backup.default.default.dc1": newTarget("backup", "", "default", "default", "dc1", nil), "backup.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "backup"}, nil),
}, },
} }
return compileTestCase{entries: entries, expect: expect} return compileTestCase{entries: entries, expect: expect}
@ -1177,8 +1245,8 @@ func testcase_ServiceFailoverThroughRedirect() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"actual.default.default.dc1": newTarget("actual", "", "default", "default", "dc1", nil), "actual.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "actual"}, nil),
}, },
} }
return compileTestCase{entries: entries, expect: expect} return compileTestCase{entries: entries, expect: expect}
@ -1220,8 +1288,8 @@ func testcase_Resolver_CircularFailover() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"backup.default.default.dc1": newTarget("backup", "", "default", "default", "dc1", nil), "backup.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "backup"}, nil),
}, },
} }
return compileTestCase{entries: entries, expect: expect} return compileTestCase{entries: entries, expect: expect}
@ -1261,8 +1329,11 @@ func testcase_ServiceAndSubsetFailover() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"backup.main.default.default.dc1": newTarget("main", "backup", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "backup.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "backup",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{ t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == backup", Filter: "Service.Meta.version == backup",
} }
@ -1301,9 +1372,15 @@ func testcase_DatacenterFailover() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
"main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", nil), "main.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{
"main.default.default.dc4": newTarget("main", "", "default", "default", "dc4", nil), Service: "main",
Datacenter: "dc2",
}, nil),
"main.default.default.dc4": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc4",
}, nil),
}, },
} }
return compileTestCase{entries: entries, expect: expect} return compileTestCase{entries: entries, expect: expect}
@ -1350,17 +1427,105 @@ func testcase_DatacenterFailover_WithMeshGateways() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{ t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote, Mode: structs.MeshGatewayModeRemote,
} }
}), }),
"main.default.default.dc2": newTarget("main", "", "default", "default", "dc2", func(t *structs.DiscoveryTarget) { "main.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc2",
}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{ t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote, Mode: structs.MeshGatewayModeRemote,
} }
}), }),
"main.default.default.dc4": newTarget("main", "", "default", "default", "dc4", func(t *structs.DiscoveryTarget) { "main.default.default.dc4": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc4",
}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
},
}
return compileTestCase{entries: entries, expect: expect}
}
func testcase_Failover_Targets() compileTestCase {
entries := newEntries()
entries.AddProxyDefaults(&structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
MeshGateway: structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
},
})
entries.AddResolvers(
&structs.ServiceResolverConfigEntry{
Kind: "service-resolver",
Name: "main",
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Targets: []structs.ServiceResolverFailoverTarget{
{Datacenter: "dc3"},
{Service: "new-main"},
{Peer: "cluster-01"},
},
},
},
},
)
expect := &structs.CompiledDiscoveryChain{
Protocol: "tcp",
StartNode: "resolver:main.default.default.dc1",
Nodes: map[string]*structs.DiscoveryGraphNode{
"resolver:main.default.default.dc1": {
Type: structs.DiscoveryGraphNodeTypeResolver,
Name: "main.default.default.dc1",
Resolver: &structs.DiscoveryResolver{
ConnectTimeout: 5 * time.Second,
Target: "main.default.default.dc1",
Failover: &structs.DiscoveryFailover{
Targets: []string{
"main.default.default.dc3",
"new-main.default.default.dc1",
"main.default.default.external.cluster-01",
},
},
},
},
},
Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
"main.default.default.dc3": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc3",
}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
"new-main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "new-main"}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote,
}
}),
"main.default.default.external.cluster-01": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Peer: "cluster-01",
}, func(t *structs.DiscoveryTarget) {
t.SNI = ""
t.Name = ""
t.Datacenter = ""
t.MeshGateway = structs.MeshGatewayConfig{ t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote, Mode: structs.MeshGatewayModeRemote,
} }
@ -1422,7 +1587,10 @@ func testcase_NoopSplit_WithDefaultSubset() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{ t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2", Filter: "Service.Meta.version == 2",
} }
@ -1452,7 +1620,7 @@ func testcase_DefaultResolver() compileTestCase {
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
// TODO-TARGET // TODO-TARGET
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
}, },
} }
return compileTestCase{entries: entries, expect: expect} return compileTestCase{entries: entries, expect: expect}
@ -1488,7 +1656,7 @@ func testcase_DefaultResolver_WithProxyDefaults() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
t.MeshGateway = structs.MeshGatewayConfig{ t.MeshGateway = structs.MeshGatewayConfig{
Mode: structs.MeshGatewayModeRemote, Mode: structs.MeshGatewayModeRemote,
} }
@ -1530,7 +1698,7 @@ func testcase_ServiceMetaProjection() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
}, },
} }
@ -1588,7 +1756,7 @@ func testcase_ServiceMetaProjectionWithRedirect() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil), "other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
}, },
} }
@ -1623,7 +1791,7 @@ func testcase_RedirectToDefaultResolverIsNotDefaultChain() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"other.default.default.dc1": newTarget("other", "", "default", "default", "dc1", nil), "other.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "other"}, nil),
}, },
} }
@ -1658,7 +1826,10 @@ func testcase_Resolve_WithDefaultSubset() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{ t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2", Filter: "Service.Meta.version == 2",
} }
@ -1692,7 +1863,7 @@ func testcase_DefaultResolver_ExternalSNI() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, func(t *structs.DiscoveryTarget) {
t.SNI = "main.some.other.service.mesh" t.SNI = "main.some.other.service.mesh"
t.External = true t.External = true
}), }),
@ -1857,11 +2028,17 @@ func testcase_MultiDatacenterCanary() compileTestCase {
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc2": targetWithConnectTimeout( "main.default.default.dc2": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc2", nil), newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc2",
}, nil),
33*time.Second, 33*time.Second,
), ),
"main.default.default.dc3": targetWithConnectTimeout( "main.default.default.dc3": targetWithConnectTimeout(
newTarget("main", "", "default", "default", "dc3", nil), newTarget(structs.DiscoveryTargetOpts{
Service: "main",
Datacenter: "dc3",
}, nil),
33*time.Second, 33*time.Second,
), ),
}, },
@ -2155,27 +2332,42 @@ func testcase_AllBellsAndWhistles() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"prod.redirected.default.default.dc1": newTarget("redirected", "prod", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "prod.redirected.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "redirected",
ServiceSubset: "prod",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{ t.Subset = structs.ServiceResolverSubset{
Filter: "ServiceMeta.env == prod", Filter: "ServiceMeta.env == prod",
} }
}), }),
"v1.main.default.default.dc1": newTarget("main", "v1", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "v1.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v1",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{ t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 1", Filter: "Service.Meta.version == 1",
} }
}), }),
"v2.main.default.default.dc1": newTarget("main", "v2", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "v2.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v2",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{ t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 2", Filter: "Service.Meta.version == 2",
} }
}), }),
"v3.main.default.default.dc1": newTarget("main", "v3", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "v3.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "v3",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{ t.Subset = structs.ServiceResolverSubset{
Filter: "Service.Meta.version == 3", Filter: "Service.Meta.version == 3",
} }
}), }),
"default-subset.main.default.default.dc1": newTarget("main", "default-subset", "default", "default", "dc1", func(t *structs.DiscoveryTarget) { "default-subset.main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{
Service: "main",
ServiceSubset: "default-subset",
}, func(t *structs.DiscoveryTarget) {
t.Subset = structs.ServiceResolverSubset{OnlyPassing: true} t.Subset = structs.ServiceResolverSubset{OnlyPassing: true}
}), }),
}, },
@ -2379,7 +2571,7 @@ func testcase_ResolverProtocolOverride() compileTestCase {
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
// TODO-TARGET // TODO-TARGET
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
}, },
} }
return compileTestCase{entries: entries, expect: expect, return compileTestCase{entries: entries, expect: expect,
@ -2413,7 +2605,7 @@ func testcase_ResolverProtocolOverrideIgnored() compileTestCase {
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
// TODO-TARGET // TODO-TARGET
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
}, },
} }
return compileTestCase{entries: entries, expect: expect, return compileTestCase{entries: entries, expect: expect,
@ -2451,7 +2643,7 @@ func testcase_RouterIgnored_ResolverProtocolOverride() compileTestCase {
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
// TODO-TARGET // TODO-TARGET
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
}, },
} }
return compileTestCase{entries: entries, expect: expect, return compileTestCase{entries: entries, expect: expect,
@ -2685,9 +2877,9 @@ func testcase_LBSplitterAndResolver() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"foo.default.default.dc1": newTarget("foo", "", "default", "default", "dc1", nil), "foo.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "foo"}, nil),
"bar.default.default.dc1": newTarget("bar", "", "default", "default", "dc1", nil), "bar.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "bar"}, nil),
"baz.default.default.dc1": newTarget("baz", "", "default", "default", "dc1", nil), "baz.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "baz"}, nil),
}, },
} }
@ -2743,7 +2935,7 @@ func testcase_LBResolver() compileTestCase {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"main.default.default.dc1": newTarget("main", "", "default", "default", "dc1", nil), "main.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "main"}, nil),
}, },
} }
@ -2791,8 +2983,17 @@ func newEntries() *configentry.DiscoveryChainSet {
} }
} }
func newTarget(service, serviceSubset, namespace, partition, datacenter string, modFn func(t *structs.DiscoveryTarget)) *structs.DiscoveryTarget { func newTarget(opts structs.DiscoveryTargetOpts, modFn func(t *structs.DiscoveryTarget)) *structs.DiscoveryTarget {
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter) if opts.Namespace == "" {
opts.Namespace = "default"
}
if opts.Partition == "" {
opts.Partition = "default"
}
if opts.Datacenter == "" {
opts.Datacenter = "dc1"
}
t := structs.NewDiscoveryTarget(opts)
t.SNI = connect.TargetSNI(t, "trustdomain.consul") t.SNI = connect.TargetSNI(t, "trustdomain.consul")
t.Name = t.SNI t.Name = t.SNI
t.ConnectTimeout = 5 * time.Second // default t.ConnectTimeout = 5 * time.Second // default

View File

@ -153,64 +153,87 @@ func (m *Internal) ServiceDump(args *structs.ServiceDumpRequest, reply *structs.
&args.QueryOptions, &args.QueryOptions,
&reply.QueryMeta, &reply.QueryMeta,
func(ws memdb.WatchSet, state *state.Store) error { func(ws memdb.WatchSet, state *state.Store) error {
// we don't support calling this endpoint for a specific peer
if args.PeerName != "" {
return fmt.Errorf("this endpoint does not support specifying a peer: %q", args.PeerName)
}
// this maxIndex will be the max of the ServiceDump calls and the PeeringList call // this maxIndex will be the max of the ServiceDump calls and the PeeringList call
var maxIndex uint64 var maxIndex uint64
// get a local dump for services // If PeerName is not empty, we return only the imported services from that peer
index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, structs.DefaultPeerKeyword) if args.PeerName != "" {
if err != nil { // get a local dump for services
return fmt.Errorf("could not get a service dump for local nodes: %w", err) index, nodes, err := state.ServiceDump(ws,
} args.ServiceKind,
args.UseServiceKind,
if index > maxIndex { // Note we fetch imported services with wildcard namespace because imported services' namespaces
maxIndex = index // are in a different locality; regardless of our local namespace, we return all imported services
} // of the local partition.
reply.Nodes = nodes args.EnterpriseMeta.WithWildcardNamespace(),
args.PeerName)
// get a list of all peerings
index, listedPeerings, err := state.PeeringList(ws, args.EnterpriseMeta)
if err != nil {
return fmt.Errorf("could not list peers for service dump %w", err)
}
if index > maxIndex {
maxIndex = index
}
for _, p := range listedPeerings {
index, importedNodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, p.Name)
if err != nil { if err != nil {
return fmt.Errorf("could not get a service dump for peer %q: %w", p.Name, err) return fmt.Errorf("could not get a service dump for peer %q: %w", args.PeerName, err)
} }
if index > maxIndex { if index > maxIndex {
maxIndex = index maxIndex = index
} }
reply.ImportedNodes = append(reply.ImportedNodes, importedNodes...) reply.Index = maxIndex
} reply.ImportedNodes = nodes
// Get, store, and filter gateway services } else {
idx, gatewayServices, err := state.DumpGatewayServices(ws) // otherwise return both local and all imported services
if err != nil {
return err
}
reply.Gateways = gatewayServices
if idx > maxIndex { // get a local dump for services
maxIndex = idx index, nodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, &args.EnterpriseMeta, structs.DefaultPeerKeyword)
} if err != nil {
reply.Index = maxIndex return fmt.Errorf("could not get a service dump for local nodes: %w", err)
}
raw, err := filter.Execute(reply.Nodes) if index > maxIndex {
if err != nil { maxIndex = index
return fmt.Errorf("could not filter local service dump: %w", err) }
reply.Nodes = nodes
// get a list of all peerings
index, listedPeerings, err := state.PeeringList(ws, args.EnterpriseMeta)
if err != nil {
return fmt.Errorf("could not list peers for service dump %w", err)
}
if index > maxIndex {
maxIndex = index
}
for _, p := range listedPeerings {
// Note we fetch imported services with wildcard namespace because imported services' namespaces
// are in a different locality; regardless of our local namespace, we return all imported services
// of the local partition.
index, importedNodes, err := state.ServiceDump(ws, args.ServiceKind, args.UseServiceKind, args.EnterpriseMeta.WithWildcardNamespace(), p.Name)
if err != nil {
return fmt.Errorf("could not get a service dump for peer %q: %w", p.Name, err)
}
if index > maxIndex {
maxIndex = index
}
reply.ImportedNodes = append(reply.ImportedNodes, importedNodes...)
}
// Get, store, and filter gateway services
idx, gatewayServices, err := state.DumpGatewayServices(ws)
if err != nil {
return err
}
reply.Gateways = gatewayServices
if idx > maxIndex {
maxIndex = idx
}
reply.Index = maxIndex
raw, err := filter.Execute(reply.Nodes)
if err != nil {
return fmt.Errorf("could not filter local service dump: %w", err)
}
reply.Nodes = raw.(structs.CheckServiceNodes)
} }
reply.Nodes = raw.(structs.CheckServiceNodes)
importedRaw, err := filter.Execute(reply.ImportedNodes) importedRaw, err := filter.Execute(reply.ImportedNodes)
if err != nil { if err != nil {

View File

@ -1098,11 +1098,36 @@ func setLeafSigningCert(caRoot *structs.CARoot, pem string) error {
return fmt.Errorf("error parsing leaf signing cert: %w", err) return fmt.Errorf("error parsing leaf signing cert: %w", err)
} }
if err := pruneExpiredIntermediates(caRoot); err != nil {
return err
}
caRoot.IntermediateCerts = append(caRoot.IntermediateCerts, pem) caRoot.IntermediateCerts = append(caRoot.IntermediateCerts, pem)
caRoot.SigningKeyID = connect.EncodeSigningKeyID(cert.SubjectKeyId) caRoot.SigningKeyID = connect.EncodeSigningKeyID(cert.SubjectKeyId)
return nil return nil
} }
// pruneExpiredIntermediates removes expired intermediate certificates
// from the given CARoot.
func pruneExpiredIntermediates(caRoot *structs.CARoot) error {
var newIntermediates []string
now := time.Now()
for _, intermediatePEM := range caRoot.IntermediateCerts {
cert, err := connect.ParseCert(intermediatePEM)
if err != nil {
return fmt.Errorf("error parsing leaf signing cert: %w", err)
}
// Only keep the intermediate cert if it's still valid.
if cert.NotAfter.After(now) {
newIntermediates = append(newIntermediates, intermediatePEM)
}
}
caRoot.IntermediateCerts = newIntermediates
return nil
}
// runRenewIntermediate periodically attempts to renew the intermediate cert. // runRenewIntermediate periodically attempts to renew the intermediate cert.
func (c *CAManager) runRenewIntermediate(ctx context.Context) error { func (c *CAManager) runRenewIntermediate(ctx context.Context) error {
isPrimary := c.serverConf.Datacenter == c.serverConf.PrimaryDatacenter isPrimary := c.serverConf.Datacenter == c.serverConf.PrimaryDatacenter

View File

@ -435,7 +435,6 @@ func TestCAManager_SignCertificate_WithExpiredCert(t *testing.T) {
errorMsg string errorMsg string
}{ }{
{"intermediate valid", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), false, ""}, {"intermediate valid", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), false, ""},
{"intermediate expired", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), true, "intermediate expired: certificate expired, expiration date"},
{"root expired", time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), true, "root expired: certificate expired, expiration date"}, {"root expired", time.Now().AddDate(-2, 0, 0), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), true, "root expired: certificate expired, expiration date"},
// a cert that is not yet valid is ok, assume it will be valid soon enough // a cert that is not yet valid is ok, assume it will be valid soon enough
{"intermediate in the future", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, 1), time.Now().AddDate(0, 0, 2), false, ""}, {"intermediate in the future", time.Now().AddDate(0, 0, -1), time.Now().AddDate(0, 0, 2), time.Now().AddDate(0, 0, 1), time.Now().AddDate(0, 0, 2), false, ""},

View File

@ -401,6 +401,18 @@ func TestCAManager_RenewIntermediate_Vault_Primary(t *testing.T) {
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert) err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
require.NoError(t, err) require.NoError(t, err)
verifyLeafCert(t, activeRoot, cert.CertPEM) verifyLeafCert(t, activeRoot, cert.CertPEM)
// Wait for the primary's old intermediate to be pruned after expiring.
oldIntermediate := activeRoot.IntermediateCerts[0]
retry.Run(t, func(r *retry.R) {
store := s1.caManager.delegate.State()
_, storedRoot, err := store.CARootActive(nil)
r.Check(err)
if storedRoot.IntermediateCerts[0] == oldIntermediate {
r.Fatal("old intermediate should be gone")
}
})
} }
func patchIntermediateCertRenewInterval(t *testing.T) { func patchIntermediateCertRenewInterval(t *testing.T) {
@ -516,6 +528,18 @@ func TestCAManager_RenewIntermediate_Secondary(t *testing.T) {
err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert) err = msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", &req, &cert)
require.NoError(t, err) require.NoError(t, err)
verifyLeafCert(t, activeRoot, cert.CertPEM) verifyLeafCert(t, activeRoot, cert.CertPEM)
// Wait for dc2's old intermediate to be pruned after expiring.
oldIntermediate := activeRoot.IntermediateCerts[0]
retry.Run(t, func(r *retry.R) {
store := s2.caManager.delegate.State()
_, storedRoot, err := store.CARootActive(nil)
r.Check(err)
if storedRoot.IntermediateCerts[0] == oldIntermediate {
r.Fatal("old intermediate should be gone")
}
})
} }
func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) { func TestConnectCA_ConfigurationSet_RootRotation_Secondary(t *testing.T) {

View File

@ -31,11 +31,18 @@ import (
) )
var leaderExportedServicesCountKey = []string{"consul", "peering", "exported_services"} var leaderExportedServicesCountKey = []string{"consul", "peering", "exported_services"}
var leaderHealthyPeeringKey = []string{"consul", "peering", "healthy"}
var LeaderPeeringMetrics = []prometheus.GaugeDefinition{ var LeaderPeeringMetrics = []prometheus.GaugeDefinition{
{ {
Name: leaderExportedServicesCountKey, Name: leaderExportedServicesCountKey,
Help: "A gauge that tracks how many services are exported for the peering. " + Help: "A gauge that tracks how many services are exported for the peering. " +
"The labels are \"peering\" and, for enterprise, \"partition\". " + "The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " +
"We emit this metric every 9 seconds",
},
{
Name: leaderHealthyPeeringKey,
Help: "A gauge that tracks how if a peering is healthy (1) or not (0). " +
"The labels are \"peer_name\", \"peer_id\" and, for enterprise, \"partition\". " +
"We emit this metric every 9 seconds", "We emit this metric every 9 seconds",
}, },
} }
@ -85,13 +92,6 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric
} }
for _, peer := range peers { for _, peer := range peers {
status, found := s.peerStreamServer.StreamStatus(peer.ID)
if !found {
logger.Trace("did not find status for", "peer_name", peer.Name)
continue
}
esc := status.GetExportedServicesCount()
part := peer.Partition part := peer.Partition
labels := []metrics.Label{ labels := []metrics.Label{
{Name: "peer_name", Value: peer.Name}, {Name: "peer_name", Value: peer.Name},
@ -101,7 +101,25 @@ func (s *Server) emitPeeringMetricsOnce(logger hclog.Logger, metricsImpl *metric
labels = append(labels, metrics.Label{Name: "partition", Value: part}) labels = append(labels, metrics.Label{Name: "partition", Value: part})
} }
metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels) status, found := s.peerStreamServer.StreamStatus(peer.ID)
if found {
// exported services count metric
esc := status.GetExportedServicesCount()
metricsImpl.SetGaugeWithLabels(leaderExportedServicesCountKey, float32(esc), labels)
}
// peering health metric
if status.NeverConnected {
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(math.NaN()), labels)
} else {
healthy := s.peerStreamServer.Tracker.IsHealthy(status)
healthyInt := 0
if healthy {
healthyInt = 1
}
metricsImpl.SetGaugeWithLabels(leaderHealthyPeeringKey, float32(healthyInt), labels)
}
} }
return nil return nil
@ -277,13 +295,6 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
return fmt.Errorf("failed to build TLS dial option from peering: %w", err) return fmt.Errorf("failed to build TLS dial option from peering: %w", err)
} }
// Create a ring buffer to cycle through peer addresses in the retry loop below.
buffer := ring.New(len(peer.PeerServerAddresses))
for _, addr := range peer.PeerServerAddresses {
buffer.Value = addr
buffer = buffer.Next()
}
secret, err := s.fsm.State().PeeringSecretsRead(ws, peer.ID) secret, err := s.fsm.State().PeeringSecretsRead(ws, peer.ID)
if err != nil { if err != nil {
return fmt.Errorf("failed to read secret for peering: %w", err) return fmt.Errorf("failed to read secret for peering: %w", err)
@ -294,27 +305,26 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
logger.Trace("establishing stream to peer") logger.Trace("establishing stream to peer")
retryCtx, cancel := context.WithCancel(ctx) streamStatus, err := s.peerStreamServer.Tracker.Register(peer.ID)
cancelFns[peer.ID] = cancel
streamStatus, err := s.peerStreamTracker.Register(peer.ID)
if err != nil { if err != nil {
return fmt.Errorf("failed to register stream: %v", err) return fmt.Errorf("failed to register stream: %v", err)
} }
streamCtx, cancel := context.WithCancel(ctx)
cancelFns[peer.ID] = cancel
// Start a goroutine to watch for updates to peer server addresses.
// The latest valid server address can be received from nextServerAddr.
nextServerAddr := make(chan string)
go s.watchPeerServerAddrs(streamCtx, peer, nextServerAddr)
// Establish a stream-specific retry so that retrying stream/conn errors isn't dependent on state store changes. // Establish a stream-specific retry so that retrying stream/conn errors isn't dependent on state store changes.
go retryLoopBackoffPeering(retryCtx, logger, func() error { go retryLoopBackoffPeering(streamCtx, logger, func() error {
// Try a new address on each iteration by advancing the ring buffer on errors. // Try a new address on each iteration by advancing the ring buffer on errors.
defer func() { addr := <-nextServerAddr
buffer = buffer.Next()
}()
addr, ok := buffer.Value.(string)
if !ok {
return fmt.Errorf("peer server address type %T is not a string", buffer.Value)
}
logger.Trace("dialing peer", "addr", addr) logger.Trace("dialing peer", "addr", addr)
conn, err := grpc.DialContext(retryCtx, addr, conn, err := grpc.DialContext(streamCtx, addr,
// TODO(peering): use a grpc.WithStatsHandler here?) // TODO(peering): use a grpc.WithStatsHandler here?)
tlsOption, tlsOption,
// For keep alive parameters there is a larger comment in ClientConnPool.dial about that. // For keep alive parameters there is a larger comment in ClientConnPool.dial about that.
@ -331,7 +341,7 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
defer conn.Close() defer conn.Close()
client := pbpeerstream.NewPeerStreamServiceClient(conn) client := pbpeerstream.NewPeerStreamServiceClient(conn)
stream, err := client.StreamResources(retryCtx) stream, err := client.StreamResources(streamCtx)
if err != nil { if err != nil {
return err return err
} }
@ -379,6 +389,74 @@ func (s *Server) establishStream(ctx context.Context, logger hclog.Logger, ws me
return nil return nil
} }
// watchPeerServerAddrs sends an up-to-date peer server address to nextServerAddr.
// It loads the server addresses into a ring buffer and cycles through them until:
// 1. streamCtx is cancelled (peer is deleted)
// 2. the peer is modified and the watchset fires.
//
// In case (2) we refetch the peering and rebuild the ring buffer.
func (s *Server) watchPeerServerAddrs(ctx context.Context, peer *pbpeering.Peering, nextServerAddr chan<- string) {
defer close(nextServerAddr)
// we initialize the ring buffer with the peer passed to `establishStream`
// because the caller has pre-checked `peer.ShouldDial`, guaranteeing
// at least one server address.
//
// IMPORTANT: ringbuf must always be length > 0 or else `<-nextServerAddr` may block.
ringbuf := ring.New(len(peer.PeerServerAddresses))
for _, addr := range peer.PeerServerAddresses {
ringbuf.Value = addr
ringbuf = ringbuf.Next()
}
innerWs := memdb.NewWatchSet()
_, _, err := s.fsm.State().PeeringReadByID(innerWs, peer.ID)
if err != nil {
s.logger.Warn("failed to watch for changes to peer; server addresses may become stale over time.",
"peer_id", peer.ID,
"error", err)
}
fetchAddrs := func() error {
// reinstantiate innerWs to prevent it from growing indefinitely
innerWs = memdb.NewWatchSet()
_, peering, err := s.fsm.State().PeeringReadByID(innerWs, peer.ID)
if err != nil {
return fmt.Errorf("failed to fetch peer %q: %w", peer.ID, err)
}
if !peering.IsActive() {
return fmt.Errorf("peer %q is no longer active", peer.ID)
}
if len(peering.PeerServerAddresses) == 0 {
return fmt.Errorf("peer %q has no addresses to dial", peer.ID)
}
ringbuf = ring.New(len(peering.PeerServerAddresses))
for _, addr := range peering.PeerServerAddresses {
ringbuf.Value = addr
ringbuf = ringbuf.Next()
}
return nil
}
for {
select {
case nextServerAddr <- ringbuf.Value.(string):
ringbuf = ringbuf.Next()
case err := <-innerWs.WatchCh(ctx):
if err != nil {
// context was cancelled
return
}
// watch fired so we refetch the peering and rebuild the ring buffer
if err := fetchAddrs(); err != nil {
s.logger.Warn("watchset for peer was fired but failed to update server addresses",
"peer_id", peer.ID,
"error", err)
}
}
}
}
func (s *Server) startPeeringDeferredDeletion(ctx context.Context) { func (s *Server) startPeeringDeferredDeletion(ctx context.Context) {
s.leaderRoutineManager.Start(ctx, peeringDeletionRoutineName, s.runPeeringDeletions) s.leaderRoutineManager.Start(ctx, peeringDeletionRoutineName, s.runPeeringDeletions)
} }
@ -391,6 +469,12 @@ func (s *Server) runPeeringDeletions(ctx context.Context) error {
// process. This includes deletion of the peerings themselves in addition to any peering data // process. This includes deletion of the peerings themselves in addition to any peering data
raftLimiter := rate.NewLimiter(defaultDeletionApplyRate, int(defaultDeletionApplyRate)) raftLimiter := rate.NewLimiter(defaultDeletionApplyRate, int(defaultDeletionApplyRate))
for { for {
select {
case <-ctx.Done():
return nil
default:
}
ws := memdb.NewWatchSet() ws := memdb.NewWatchSet()
state := s.fsm.State() state := s.fsm.State()
_, peerings, err := s.fsm.State().PeeringListDeleted(ws) _, peerings, err := s.fsm.State().PeeringListDeleted(ws)

View File

@ -7,6 +7,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math"
"testing" "testing"
"time" "time"
@ -17,6 +18,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status" grpcstatus "google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/state"
@ -24,6 +26,7 @@ import (
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/sdk/freeport" "github.com/hashicorp/consul/sdk/freeport"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/types" "github.com/hashicorp/consul/types"
@ -37,6 +40,7 @@ func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
testLeader_PeeringSync_Lifecycle_ClientDeletion(t, true) testLeader_PeeringSync_Lifecycle_ClientDeletion(t, true)
}) })
} }
func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS bool) { func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS bool) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")
@ -134,9 +138,11 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
// Delete the peering to trigger the termination sequence. // Delete the peering to trigger the termination sequence.
deleted := &pbpeering.Peering{ deleted := &pbpeering.Peering{
ID: p.Peering.ID, ID: p.Peering.ID,
Name: "my-peer-acceptor", Name: "my-peer-acceptor",
DeletedAt: structs.TimeToProto(time.Now()), State: pbpeering.PeeringState_DELETING,
PeerServerAddresses: p.Peering.PeerServerAddresses,
DeletedAt: structs.TimeToProto(time.Now()),
} }
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: deleted})) require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: deleted}))
dialer.logger.Trace("deleted peering for my-peer-acceptor") dialer.logger.Trace("deleted peering for my-peer-acceptor")
@ -259,6 +265,7 @@ func testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t *testing.T, enableTLS b
deleted := &pbpeering.Peering{ deleted := &pbpeering.Peering{
ID: p.Peering.PeerID, ID: p.Peering.PeerID,
Name: "my-peer-dialer", Name: "my-peer-dialer",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
} }
@ -428,6 +435,7 @@ func TestLeader_Peering_DeferredDeletion(t *testing.T) {
Peering: &pbpeering.Peering{ Peering: &pbpeering.Peering{
ID: peerID, ID: peerID,
Name: peerName, Name: peerName,
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
}, },
})) }))
@ -974,6 +982,7 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
var ( var (
s2PeerID1 = generateUUID() s2PeerID1 = generateUUID()
s2PeerID2 = generateUUID() s2PeerID2 = generateUUID()
s2PeerID3 = generateUUID()
testContextTimeout = 60 * time.Second testContextTimeout = 60 * time.Second
lastIdx = uint64(0) lastIdx = uint64(0)
) )
@ -1063,6 +1072,24 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
// mimic tracking exported services // mimic tracking exported services
mst2.TrackExportedService(structs.ServiceName{Name: "d-service"}) mst2.TrackExportedService(structs.ServiceName{Name: "d-service"})
mst2.TrackExportedService(structs.ServiceName{Name: "e-service"}) mst2.TrackExportedService(structs.ServiceName{Name: "e-service"})
// pretend that the hearbeat happened
mst2.TrackRecvHeartbeat()
}
// Simulate a peering that never connects
{
p3 := &pbpeering.Peering{
ID: s2PeerID3,
Name: "my-peer-s4",
PeerID: token.PeerID, // doesn't much matter what these values are
PeerCAPems: token.CA,
PeerServerName: token.ServerName,
PeerServerAddresses: token.ServerAddresses,
}
require.True(t, p3.ShouldDial())
lastIdx++
require.NoError(t, s2.fsm.State().PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: p3}))
} }
// set up a metrics sink // set up a metrics sink
@ -1092,6 +1119,18 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyMetric2)) require.True(r, ok, fmt.Sprintf("did not find the key %q", keyMetric2))
require.Equal(r, float32(2), metric2.Value) // for d, e services require.Equal(r, float32(2), metric2.Value) // for d, e services
keyHealthyMetric2 := fmt.Sprintf("us-west.consul.peering.healthy;peer_name=my-peer-s3;peer_id=%s", s2PeerID2)
healthyMetric2, ok := intv.Gauges[keyHealthyMetric2]
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyHealthyMetric2))
require.Equal(r, float32(1), healthyMetric2.Value)
keyHealthyMetric3 := fmt.Sprintf("us-west.consul.peering.healthy;peer_name=my-peer-s4;peer_id=%s", s2PeerID3)
healthyMetric3, ok := intv.Gauges[keyHealthyMetric3]
require.True(r, ok, fmt.Sprintf("did not find the key %q", keyHealthyMetric3))
require.True(r, math.IsNaN(float64(healthyMetric3.Value)))
}) })
} }
@ -1131,6 +1170,7 @@ func TestLeader_Peering_NoDeletionWhenPeeringDisabled(t *testing.T) {
Peering: &pbpeering.Peering{ Peering: &pbpeering.Peering{
ID: peerID, ID: peerID,
Name: peerName, Name: peerName,
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
}, },
})) }))
@ -1182,7 +1222,7 @@ func TestLeader_Peering_NoEstablishmentWhenPeeringDisabled(t *testing.T) {
})) }))
require.Never(t, func() bool { require.Never(t, func() bool {
_, found := s1.peerStreamTracker.StreamStatus(peerID) _, found := s1.peerStreamServer.StreamStatus(peerID)
return found return found
}, 7*time.Second, 1*time.Second, "peering should not have been established") }, 7*time.Second, 1*time.Second, "peering should not have been established")
} }
@ -1343,3 +1383,138 @@ func Test_isFailedPreconditionErr(t *testing.T) {
werr := fmt.Errorf("wrapped: %w", err) werr := fmt.Errorf("wrapped: %w", err)
assert.True(t, isFailedPreconditionErr(werr)) assert.True(t, isFailedPreconditionErr(werr))
} }
func Test_Leader_PeeringSync_ServerAddressUpdates(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
// We want 1s retries for this test
orig := maxRetryBackoff
maxRetryBackoff = 1
t.Cleanup(func() { maxRetryBackoff = orig })
_, acceptor := testServerWithConfig(t, func(c *Config) {
c.NodeName = "acceptor"
c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul"
})
testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
// Create a peering by generating a token
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err := grpc.DialContext(ctx, acceptor.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(acceptor.config.RPCAddr.String())),
grpc.WithInsecure(),
grpc.WithBlock())
require.NoError(t, err)
defer conn.Close()
acceptorClient := pbpeering.NewPeeringServiceClient(conn)
req := pbpeering.GenerateTokenRequest{
PeerName: "my-peer-dialer",
}
resp, err := acceptorClient.GenerateToken(ctx, &req)
require.NoError(t, err)
// Bring up dialer and establish a peering with acceptor's token so that it attempts to dial.
_, dialer := testServerWithConfig(t, func(c *Config) {
c.NodeName = "dialer"
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc2"
})
testrpc.WaitForLeader(t, dialer.RPC, "dc2")
// Create a peering at dialer by establishing a peering with acceptor's token
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err = grpc.DialContext(ctx, dialer.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(dialer.config.RPCAddr.String())),
grpc.WithInsecure(),
grpc.WithBlock())
require.NoError(t, err)
defer conn.Close()
dialerClient := pbpeering.NewPeeringServiceClient(conn)
establishReq := pbpeering.EstablishRequest{
PeerName: "my-peer-acceptor",
PeeringToken: resp.PeeringToken,
}
_, err = dialerClient.Establish(ctx, &establishReq)
require.NoError(t, err)
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
require.NoError(t, err)
retry.Run(t, func(r *retry.R) {
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
require.True(r, found)
require.True(r, status.Connected)
})
testutil.RunStep(t, "calling establish with active connection does not overwrite server addresses", func(t *testing.T) {
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
// generate a new token from the acceptor
req := pbpeering.GenerateTokenRequest{
PeerName: "my-peer-dialer",
}
resp, err := acceptorClient.GenerateToken(ctx, &req)
require.NoError(t, err)
token, err := acceptor.peeringBackend.DecodeToken([]byte(resp.PeeringToken))
require.NoError(t, err)
// we will update the token with bad addresses to assert it doesn't clobber existing ones
token.ServerAddresses = []string{"1.2.3.4:1234"}
badToken, err := acceptor.peeringBackend.EncodeToken(token)
require.NoError(t, err)
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
// Try establishing.
// This call will only succeed if the bad address was not used in the calls to exchange the peering secret.
establishReq := pbpeering.EstablishRequest{
PeerName: "my-peer-acceptor",
PeeringToken: string(badToken),
}
_, err = dialerClient.Establish(ctx, &establishReq)
require.NoError(t, err)
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
require.NoError(t, err)
require.NotContains(t, p.Peering.PeerServerAddresses, "1.2.3.4:1234")
})
testutil.RunStep(t, "updated server addresses are picked up by the leader", func(t *testing.T) {
// force close the acceptor's gRPC server so the dialier retries with a new address.
acceptor.externalGRPCServer.Stop()
clone := proto.Clone(p.Peering)
updated := clone.(*pbpeering.Peering)
// start with a bad address so we can assert for a specific error
updated.PeerServerAddresses = append([]string{
"bad",
}, p.Peering.PeerServerAddresses...)
// this write will wake up the watch on the leader to refetch server addresses
require.NoError(t, dialer.fsm.State().PeeringWrite(2000, &pbpeering.PeeringWriteRequest{Peering: updated}))
retry.Run(t, func(r *retry.R) {
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
require.True(r, found)
// We assert for this error to be set which would indicate that we iterated
// through a bad address.
require.Contains(r, status.LastSendErrorMessage, "transport: Error while dialing dial tcp: address bad: missing port in address")
require.False(r, status.Connected)
})
})
}

View File

@ -370,9 +370,9 @@ type Server struct {
// peerStreamServer is a server used to handle peering streams from external clusters. // peerStreamServer is a server used to handle peering streams from external clusters.
peerStreamServer *peerstream.Server peerStreamServer *peerstream.Server
// peeringServer handles peering RPC requests internal to this cluster, like generating peering tokens. // peeringServer handles peering RPC requests internal to this cluster, like generating peering tokens.
peeringServer *peering.Server peeringServer *peering.Server
peerStreamTracker *peerstream.Tracker
// embedded struct to hold all the enterprise specific data // embedded struct to hold all the enterprise specific data
EnterpriseServer EnterpriseServer
@ -724,11 +724,9 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
Logger: logger.Named("grpc-api.server-discovery"), Logger: logger.Named("grpc-api.server-discovery"),
}).Register(s.externalGRPCServer) }).Register(s.externalGRPCServer)
s.peerStreamTracker = peerstream.NewTracker()
s.peeringBackend = NewPeeringBackend(s) s.peeringBackend = NewPeeringBackend(s)
s.peerStreamServer = peerstream.NewServer(peerstream.Config{ s.peerStreamServer = peerstream.NewServer(peerstream.Config{
Backend: s.peeringBackend, Backend: s.peeringBackend,
Tracker: s.peerStreamTracker,
GetStore: func() peerstream.StateStore { return s.FSM().State() }, GetStore: func() peerstream.StateStore { return s.FSM().State() },
Logger: logger.Named("grpc-api.peerstream"), Logger: logger.Named("grpc-api.peerstream"),
ACLResolver: s.ACLResolver, ACLResolver: s.ACLResolver,
@ -790,7 +788,7 @@ func newGRPCHandlerFromConfig(deps Deps, config *Config, s *Server) connHandler
p := peering.NewServer(peering.Config{ p := peering.NewServer(peering.Config{
Backend: s.peeringBackend, Backend: s.peeringBackend,
Tracker: s.peerStreamTracker, Tracker: s.peerStreamServer.Tracker,
Logger: deps.Logger.Named("grpc-api.peering"), Logger: deps.Logger.Named("grpc-api.peering"),
ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) { ForwardRPC: func(info structs.RPCInfo, fn func(*grpc.ClientConn) error) (bool, error) {
// Only forward the request if the dc in the request matches the server's datacenter. // Only forward the request if the dc in the request matches the server's datacenter.
@ -1574,12 +1572,12 @@ func (s *Server) Stats() map[string]map[string]string {
// GetLANCoordinate returns the coordinate of the node in the LAN gossip // GetLANCoordinate returns the coordinate of the node in the LAN gossip
// pool. // pool.
// //
// - Clients return a single coordinate for the single gossip pool they are // - Clients return a single coordinate for the single gossip pool they are
// in (default, segment, or partition). // in (default, segment, or partition).
// //
// - Servers return one coordinate for their canonical gossip pool (i.e. // - Servers return one coordinate for their canonical gossip pool (i.e.
// default partition/segment) and one per segment they are also ancillary // default partition/segment) and one per segment they are also ancillary
// members of. // members of.
// //
// NOTE: servers do not emit coordinates for partitioned gossip pools they // NOTE: servers do not emit coordinates for partitioned gossip pools they
// are ancillary members of. // are ancillary members of.

View File

@ -1134,7 +1134,7 @@ func terminatingGatewayVirtualIPsSupported(tx ReadTxn, ws memdb.WatchSet) (bool,
} }
// Services returns all services along with a list of associated tags. // Services returns all services along with a list of associated tags.
func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Services, error) { func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, []*structs.ServiceNode, error) {
tx := s.db.Txn(false) tx := s.db.Txn(false)
defer tx.Abort() defer tx.Abort()
@ -1148,30 +1148,11 @@ func (s *Store) Services(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerNam
} }
ws.Add(services.WatchCh()) ws.Add(services.WatchCh())
// Rip through the services and enumerate them and their unique set of var result []*structs.ServiceNode
// tags.
unique := make(map[string]map[string]struct{})
for service := services.Next(); service != nil; service = services.Next() { for service := services.Next(); service != nil; service = services.Next() {
svc := service.(*structs.ServiceNode) result = append(result, service.(*structs.ServiceNode))
tags, ok := unique[svc.ServiceName]
if !ok {
unique[svc.ServiceName] = make(map[string]struct{})
tags = unique[svc.ServiceName]
}
for _, tag := range svc.ServiceTags {
tags[tag] = struct{}{}
}
} }
return idx, result, nil
// Generate the output structure.
var results = make(structs.Services)
for service, tags := range unique {
results[service] = make([]string, 0, len(tags))
for tag := range tags {
results[service] = append(results[service], tag)
}
}
return idx, results, nil
} }
func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceList, error) { func (s *Store) ServiceList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceList, error) {
@ -1212,7 +1193,7 @@ func serviceListTxn(tx ReadTxn, ws memdb.WatchSet, entMeta *acl.EnterpriseMeta,
} }
// ServicesByNodeMeta returns all services, filtered by the given node metadata. // ServicesByNodeMeta returns all services, filtered by the given node metadata.
func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.Services, error) { func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, []*structs.ServiceNode, error) {
tx := s.db.Txn(false) tx := s.db.Txn(false)
defer tx.Abort() defer tx.Abort()
@ -1259,8 +1240,7 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string,
} }
allServicesCh := allServices.WatchCh() allServicesCh := allServices.WatchCh()
// Populate the services map var result structs.ServiceNodes
unique := make(map[string]map[string]struct{})
for node := nodes.Next(); node != nil; node = nodes.Next() { for node := nodes.Next(); node != nil; node = nodes.Next() {
n := node.(*structs.Node) n := node.(*structs.Node)
if len(filters) > 1 && !structs.SatisfiesMetaFilters(n.Meta, filters) { if len(filters) > 1 && !structs.SatisfiesMetaFilters(n.Meta, filters) {
@ -1274,30 +1254,11 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string,
} }
ws.AddWithLimit(watchLimit, services.WatchCh(), allServicesCh) ws.AddWithLimit(watchLimit, services.WatchCh(), allServicesCh)
// Rip through the services and enumerate them and their unique set of
// tags.
for service := services.Next(); service != nil; service = services.Next() { for service := services.Next(); service != nil; service = services.Next() {
svc := service.(*structs.ServiceNode) result = append(result, service.(*structs.ServiceNode))
tags, ok := unique[svc.ServiceName]
if !ok {
unique[svc.ServiceName] = make(map[string]struct{})
tags = unique[svc.ServiceName]
}
for _, tag := range svc.ServiceTags {
tags[tag] = struct{}{}
}
} }
} }
return idx, result, nil
// Generate the output structure.
var results = make(structs.Services)
for service, tags := range unique {
results[service] = make([]string, 0, len(tags))
for tag := range tags {
results[service] = append(results[service], tag)
}
}
return idx, results, nil
} }
// maxIndexForService return the maximum Raft Index for a service // maxIndexForService return the maximum Raft Index for a service
@ -1717,6 +1678,9 @@ func (s *Store) ServiceNode(nodeID, nodeName, serviceID string, entMeta *acl.Ent
if err != nil { if err != nil {
return 0, nil, fmt.Errorf("failed querying service for node %q: %w", node.Node, err) return 0, nil, fmt.Errorf("failed querying service for node %q: %w", node.Node, err)
} }
if service != nil {
service.ID = node.ID
}
return idx, service, nil return idx, service, nil
} }

View File

@ -12,6 +12,8 @@ import (
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-uuid" "github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -270,17 +272,20 @@ func TestStateStore_EnsureRegistration(t *testing.T) {
require.Equal(t, uint64(2), idx) require.Equal(t, uint64(2), idx)
require.Equal(t, svcmap["redis1"], r) require.Equal(t, svcmap["redis1"], r)
exp := svcmap["redis1"].ToServiceNode("node1")
exp.ID = nodeID
// lookup service by node name // lookup service by node name
idx, sn, err := s.ServiceNode("", "node1", "redis1", nil, peerName) idx, sn, err := s.ServiceNode("", "node1", "redis1", nil, peerName)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, uint64(2), idx) require.Equal(t, uint64(2), idx)
require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn) require.Equal(t, exp, sn)
// lookup service by node ID // lookup service by node ID
idx, sn, err = s.ServiceNode(string(nodeID), "", "redis1", nil, peerName) idx, sn, err = s.ServiceNode(string(nodeID), "", "redis1", nil, peerName)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, uint64(2), idx) require.Equal(t, uint64(2), idx)
require.Equal(t, svcmap["redis1"].ToServiceNode("node1"), sn) require.Equal(t, exp, sn)
// lookup service by invalid node // lookup service by invalid node
_, _, err = s.ServiceNode("", "invalid-node", "redis1", nil, peerName) _, _, err = s.ServiceNode("", "invalid-node", "redis1", nil, peerName)
@ -2102,10 +2107,13 @@ func TestStateStore_Services(t *testing.T) {
Address: "1.1.1.1", Address: "1.1.1.1",
Port: 1111, Port: 1111,
} }
ns1.EnterpriseMeta.Normalize()
if err := s.EnsureService(2, "node1", ns1); err != nil { if err := s.EnsureService(2, "node1", ns1); err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
testRegisterService(t, s, 3, "node1", "dogs") ns1Dogs := testRegisterService(t, s, 3, "node1", "dogs")
ns1Dogs.EnterpriseMeta.Normalize()
testRegisterNode(t, s, 4, "node2") testRegisterNode(t, s, 4, "node2")
ns2 := &structs.NodeService{ ns2 := &structs.NodeService{
ID: "service3", ID: "service3",
@ -2114,6 +2122,7 @@ func TestStateStore_Services(t *testing.T) {
Address: "1.1.1.1", Address: "1.1.1.1",
Port: 1111, Port: 1111,
} }
ns2.EnterpriseMeta.Normalize()
if err := s.EnsureService(5, "node2", ns2); err != nil { if err := s.EnsureService(5, "node2", ns2); err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
@ -2131,19 +2140,13 @@ func TestStateStore_Services(t *testing.T) {
t.Fatalf("bad index: %d", idx) t.Fatalf("bad index: %d", idx)
} }
// Verify the result. We sort the lists since the order is // Verify the result.
// non-deterministic (it's built using a map internally). expected := []*structs.ServiceNode{
expected := structs.Services{ ns1Dogs.ToServiceNode("node1"),
"redis": []string{"prod", "primary", "replica"}, ns1.ToServiceNode("node1"),
"dogs": []string{}, ns2.ToServiceNode("node2"),
}
sort.Strings(expected["redis"])
for _, tags := range services {
sort.Strings(tags)
}
if !reflect.DeepEqual(expected, services) {
t.Fatalf("bad: %#v", services)
} }
assertDeepEqual(t, expected, services, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
// Deleting a node with a service should fire the watch. // Deleting a node with a service should fire the watch.
if err := s.DeleteNode(6, "node1", nil, ""); err != nil { if err := s.DeleteNode(6, "node1", nil, ""); err != nil {
@ -2182,6 +2185,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
Address: "1.1.1.1", Address: "1.1.1.1",
Port: 1111, Port: 1111,
} }
ns1.EnterpriseMeta.Normalize()
if err := s.EnsureService(2, "node0", ns1); err != nil { if err := s.EnsureService(2, "node0", ns1); err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
@ -2192,6 +2196,7 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
Address: "1.1.1.1", Address: "1.1.1.1",
Port: 1111, Port: 1111,
} }
ns2.EnterpriseMeta.Normalize()
if err := s.EnsureService(3, "node1", ns2); err != nil { if err := s.EnsureService(3, "node1", ns2); err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
@ -2206,11 +2211,10 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
expected := structs.Services{ expected := []*structs.ServiceNode{
"redis": []string{"primary", "prod"}, ns1.ToServiceNode("node0"),
} }
sort.Strings(res["redis"]) assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
require.Equal(t, expected, res)
}) })
t.Run("Get all services using the common meta value", func(t *testing.T) { t.Run("Get all services using the common meta value", func(t *testing.T) {
@ -2218,11 +2222,12 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
expected := structs.Services{ require.Len(t, res, 2)
"redis": []string{"primary", "prod", "replica"}, expected := []*structs.ServiceNode{
ns1.ToServiceNode("node0"),
ns2.ToServiceNode("node1"),
} }
sort.Strings(res["redis"]) assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
require.Equal(t, expected, res)
}) })
t.Run("Get an empty list for an invalid meta value", func(t *testing.T) { t.Run("Get an empty list for an invalid meta value", func(t *testing.T) {
@ -2230,8 +2235,8 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
expected := structs.Services{} var expected []*structs.ServiceNode
require.Equal(t, expected, res) assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
}) })
t.Run("Get the first node's service instance using multiple meta filters", func(t *testing.T) { t.Run("Get the first node's service instance using multiple meta filters", func(t *testing.T) {
@ -2239,11 +2244,10 @@ func TestStateStore_ServicesByNodeMeta(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
expected := structs.Services{ expected := []*structs.ServiceNode{
"redis": []string{"primary", "prod"}, ns1.ToServiceNode("node0"),
} }
sort.Strings(res["redis"]) assertDeepEqual(t, res, expected, cmpopts.IgnoreFields(structs.ServiceNode{}, "RaftIndex"))
require.Equal(t, expected, res)
}) })
t.Run("Registering some unrelated node + service should not fire the watch.", func(t *testing.T) { t.Run("Registering some unrelated node + service should not fire the watch.", func(t *testing.T) {
@ -8807,3 +8811,10 @@ func setVirtualIPFlags(t *testing.T, s *Store) {
Value: "true", Value: "true",
})) }))
} }
func assertDeepEqual(t *testing.T, x, y interface{}, opts ...cmp.Option) {
t.Helper()
if diff := cmp.Diff(x, y, opts...); diff != "" {
t.Fatalf("assertion failed: values are not equal\n--- expected\n+++ actual\n%v", diff)
}
}

View File

@ -7,12 +7,13 @@ import (
"strings" "strings"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib/maps" "github.com/hashicorp/consul/lib/maps"
"github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/go-memdb"
) )
const ( const (
@ -534,6 +535,12 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
if req.Peering.Name == "" { if req.Peering.Name == "" {
return errors.New("Missing Peering Name") return errors.New("Missing Peering Name")
} }
if req.Peering.State == pbpeering.PeeringState_DELETING && (req.Peering.DeletedAt == nil || structs.IsZeroProtoTime(req.Peering.DeletedAt)) {
return errors.New("Missing deletion time for peering in deleting state")
}
if req.Peering.DeletedAt != nil && !structs.IsZeroProtoTime(req.Peering.DeletedAt) && req.Peering.State != pbpeering.PeeringState_DELETING {
return fmt.Errorf("Unexpected state for peering with deletion time: %s", pbpeering.PeeringStateToAPI(req.Peering.State))
}
// Ensure the name is unique (cannot conflict with another peering with a different ID). // Ensure the name is unique (cannot conflict with another peering with a different ID).
_, existing, err := peeringReadTxn(tx, nil, Query{ _, existing, err := peeringReadTxn(tx, nil, Query{
@ -545,11 +552,32 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
} }
if existing != nil { if existing != nil {
if req.Peering.ShouldDial() != existing.ShouldDial() {
return fmt.Errorf("Cannot switch peering dialing mode from %t to %t", existing.ShouldDial(), req.Peering.ShouldDial())
}
if req.Peering.ID != existing.ID { if req.Peering.ID != existing.ID {
return fmt.Errorf("A peering already exists with the name %q and a different ID %q", req.Peering.Name, existing.ID) return fmt.Errorf("A peering already exists with the name %q and a different ID %q", req.Peering.Name, existing.ID)
} }
// Nothing to do if our peer wants to terminate the peering but the peering is already marked for deletion.
if existing.State == pbpeering.PeeringState_DELETING && req.Peering.State == pbpeering.PeeringState_TERMINATED {
return nil
}
// No-op deletion
if existing.State == pbpeering.PeeringState_DELETING && req.Peering.State == pbpeering.PeeringState_DELETING {
return nil
}
// No-op termination
if existing.State == pbpeering.PeeringState_TERMINATED && req.Peering.State == pbpeering.PeeringState_TERMINATED {
return nil
}
// Prevent modifications to Peering marked for deletion. // Prevent modifications to Peering marked for deletion.
if !existing.IsActive() { // This blocks generating new peering tokens or re-establishing the peering until the peering is done deleting.
if existing.State == pbpeering.PeeringState_DELETING {
return fmt.Errorf("cannot write to peering that is marked for deletion") return fmt.Errorf("cannot write to peering that is marked for deletion")
} }
@ -581,8 +609,8 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
req.Peering.ModifyIndex = idx req.Peering.ModifyIndex = idx
} }
// Ensure associated secrets are cleaned up when a peering is marked for deletion. // Ensure associated secrets are cleaned up when a peering is marked for deletion or terminated.
if req.Peering.State == pbpeering.PeeringState_DELETING { if !req.Peering.IsActive() {
if err := peeringSecretsDeleteTxn(tx, req.Peering.ID, req.Peering.ShouldDial()); err != nil { if err := peeringSecretsDeleteTxn(tx, req.Peering.ID, req.Peering.ShouldDial()); err != nil {
return fmt.Errorf("failed to delete peering secrets: %w", err) return fmt.Errorf("failed to delete peering secrets: %w", err)
} }
@ -981,7 +1009,7 @@ func peeringsForServiceTxn(tx ReadTxn, ws memdb.WatchSet, serviceName string, en
if idx > maxIdx { if idx > maxIdx {
maxIdx = idx maxIdx = idx
} }
if peering == nil || !peering.IsActive() { if !peering.IsActive() {
continue continue
} }
peerings = append(peerings, peering) peerings = append(peerings, peering)

View File

@ -950,6 +950,7 @@ func TestStore_Peering_Watch(t *testing.T) {
Peering: &pbpeering.Peering{ Peering: &pbpeering.Peering{
ID: testFooPeerID, ID: testFooPeerID,
Name: "foo", Name: "foo",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
}, },
}) })
@ -976,6 +977,7 @@ func TestStore_Peering_Watch(t *testing.T) {
err := s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{ err := s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &pbpeering.Peering{
ID: testBarPeerID, ID: testBarPeerID,
Name: "bar", Name: "bar",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
}, },
}) })
@ -1077,6 +1079,7 @@ func TestStore_PeeringList_Watch(t *testing.T) {
Peering: &pbpeering.Peering{ Peering: &pbpeering.Peering{
ID: testFooPeerID, ID: testFooPeerID,
Name: "foo", Name: "foo",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
}, },
@ -1112,16 +1115,22 @@ func TestStore_PeeringWrite(t *testing.T) {
// Each case depends on the previous. // Each case depends on the previous.
s := NewStateStore(nil) s := NewStateStore(nil)
testTime := time.Now()
type expectations struct {
peering *pbpeering.Peering
secrets *pbpeering.PeeringSecrets
err string
}
type testcase struct { type testcase struct {
name string name string
input *pbpeering.PeeringWriteRequest input *pbpeering.PeeringWriteRequest
expectSecrets *pbpeering.PeeringSecrets expect expectations
expectErr string
} }
run := func(t *testing.T, tc testcase) { run := func(t *testing.T, tc testcase) {
err := s.PeeringWrite(10, tc.input) err := s.PeeringWrite(10, tc.input)
if tc.expectErr != "" { if tc.expect.err != "" {
testutil.RequireErrorContains(t, err, tc.expectErr) testutil.RequireErrorContains(t, err, tc.expect.err)
return return
} }
require.NoError(t, err) require.NoError(t, err)
@ -1133,52 +1142,176 @@ func TestStore_PeeringWrite(t *testing.T) {
_, p, err := s.PeeringRead(nil, q) _, p, err := s.PeeringRead(nil, q)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, p) require.NotNil(t, p)
require.Equal(t, tc.input.Peering.State, p.State) require.Equal(t, tc.expect.peering.State, p.State)
require.Equal(t, tc.input.Peering.Name, p.Name) require.Equal(t, tc.expect.peering.Name, p.Name)
require.Equal(t, tc.expect.peering.Meta, p.Meta)
if tc.expect.peering.DeletedAt != nil {
require.Equal(t, tc.expect.peering.DeletedAt, p.DeletedAt)
}
secrets, err := s.PeeringSecretsRead(nil, tc.input.Peering.ID) secrets, err := s.PeeringSecretsRead(nil, tc.input.Peering.ID)
require.NoError(t, err) require.NoError(t, err)
prototest.AssertDeepEqual(t, tc.expectSecrets, secrets) prototest.AssertDeepEqual(t, tc.expect.secrets, secrets)
} }
tcs := []testcase{ tcs := []testcase{
{ {
name: "create baz", name: "create baz",
input: &pbpeering.PeeringWriteRequest{ input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{ Peering: &pbpeering.Peering{
ID: testBazPeerID, ID: testBazPeerID,
Name: "baz", Name: "baz",
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), State: pbpeering.PeeringState_ESTABLISHING,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
}, },
SecretsRequest: &pbpeering.SecretsWriteRequest{ SecretsRequest: &pbpeering.SecretsWriteRequest{
PeerID: testBazPeerID, PeerID: testBazPeerID,
Request: &pbpeering.SecretsWriteRequest_GenerateToken{ Request: &pbpeering.SecretsWriteRequest_Establish{
GenerateToken: &pbpeering.SecretsWriteRequest_GenerateTokenRequest{ Establish: &pbpeering.SecretsWriteRequest_EstablishRequest{
EstablishmentSecret: testBazSecretID, ActiveStreamSecret: testBazSecretID,
}, },
}, },
}, },
}, },
expectSecrets: &pbpeering.PeeringSecrets{ expect: expectations{
PeerID: testBazPeerID, peering: &pbpeering.Peering{
Establishment: &pbpeering.PeeringSecrets_Establishment{ ID: testBazPeerID,
SecretID: testBazSecretID, Name: "baz",
State: pbpeering.PeeringState_ESTABLISHING,
}, },
secrets: &pbpeering.PeeringSecrets{
PeerID: testBazPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testBazSecretID,
},
},
},
},
{
name: "cannot change ID for baz",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: "123",
Name: "baz",
State: pbpeering.PeeringState_FAILING,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
err: `A peering already exists with the name "baz" and a different ID`,
},
},
{
name: "cannot change dialer status for baz",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: "123",
Name: "baz",
State: pbpeering.PeeringState_FAILING,
// Excluding the peer server addresses leads to baz not being considered a dialer.
// PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
err: "Cannot switch peering dialing mode from true to false",
}, },
}, },
{ {
name: "update baz", name: "update baz",
input: &pbpeering.PeeringWriteRequest{ input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{ Peering: &pbpeering.Peering{
ID: testBazPeerID, ID: testBazPeerID,
Name: "baz", Name: "baz",
State: pbpeering.PeeringState_FAILING, State: pbpeering.PeeringState_FAILING,
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
}, },
}, },
expectSecrets: &pbpeering.PeeringSecrets{ expect: expectations{
PeerID: testBazPeerID, peering: &pbpeering.Peering{
Establishment: &pbpeering.PeeringSecrets_Establishment{ ID: testBazPeerID,
SecretID: testBazSecretID, Name: "baz",
State: pbpeering.PeeringState_FAILING,
},
secrets: &pbpeering.PeeringSecrets{
PeerID: testBazPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testBazSecretID,
},
},
},
},
{
name: "if no state was included in request it is inherited from existing",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
// Send undefined state.
// State: pbpeering.PeeringState_FAILING,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
// Previous failing state is picked up.
State: pbpeering.PeeringState_FAILING,
},
secrets: &pbpeering.PeeringSecrets{
PeerID: testBazPeerID,
Stream: &pbpeering.PeeringSecrets_Stream{
ActiveSecretID: testBazSecretID,
},
},
},
},
{
name: "mark baz as terminated",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
},
// Secrets for baz should have been deleted
secrets: nil,
},
},
{
name: "cannot modify peering during no-op termination",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
PeerServerAddresses: []string{"localhost:8502"},
// Attempt to add metadata
Meta: map[string]string{"foo": "bar"},
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
// Meta should be unchanged.
Meta: nil,
}, },
}, },
}, },
@ -1186,42 +1319,104 @@ func TestStore_PeeringWrite(t *testing.T) {
name: "mark baz for deletion", name: "mark baz for deletion",
input: &pbpeering.PeeringWriteRequest{ input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{ Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_DELETING,
PeerServerAddresses: []string{"localhost:8502"},
DeletedAt: structs.TimeToProto(testTime),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID, ID: testBazPeerID,
Name: "baz", Name: "baz",
State: pbpeering.PeeringState_DELETING, State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(testTime),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), },
secrets: nil,
},
},
{
name: "deleting a deleted peering is a no-op",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_DELETING,
PeerServerAddresses: []string{"localhost:8502"},
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
}, },
}, },
// Secrets for baz should have been deleted expect: expectations{
expectSecrets: nil, peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
// Still marked as deleting at the original testTime
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(testTime),
},
// Secrets for baz should have been deleted
secrets: nil,
},
},
{
name: "terminating a peering marked for deletion is a no-op",
input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
State: pbpeering.PeeringState_TERMINATED,
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
},
},
expect: expectations{
peering: &pbpeering.Peering{
ID: testBazPeerID,
Name: "baz",
// Still marked as deleting
State: pbpeering.PeeringState_DELETING,
},
// Secrets for baz should have been deleted
secrets: nil,
},
}, },
{ {
name: "cannot update peering marked for deletion", name: "cannot update peering marked for deletion",
input: &pbpeering.PeeringWriteRequest{ input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{ Peering: &pbpeering.Peering{
ID: testBazPeerID, ID: testBazPeerID,
Name: "baz", Name: "baz",
PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
// Attempt to add metadata // Attempt to add metadata
Meta: map[string]string{ Meta: map[string]string{
"source": "kubernetes", "source": "kubernetes",
}, },
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
}, },
}, },
expectErr: "cannot write to peering that is marked for deletion", expect: expectations{
err: "cannot write to peering that is marked for deletion",
},
}, },
{ {
name: "cannot create peering marked for deletion", name: "cannot create peering marked for deletion",
input: &pbpeering.PeeringWriteRequest{ input: &pbpeering.PeeringWriteRequest{
Peering: &pbpeering.Peering{ Peering: &pbpeering.Peering{
ID: testFooPeerID, ID: testFooPeerID,
Name: "foo", Name: "foo",
DeletedAt: structs.TimeToProto(time.Now()), PeerServerAddresses: []string{"localhost:8502"},
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(), State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()),
Partition: structs.NodeEnterpriseMetaInDefaultPartition().PartitionOrEmpty(),
}, },
}, },
expectErr: "cannot create a new peering marked for deletion", expect: expectations{
err: "cannot create a new peering marked for deletion",
},
}, },
} }
for _, tc := range tcs { for _, tc := range tcs {
@ -1246,6 +1441,7 @@ func TestStore_PeeringDelete(t *testing.T) {
Peering: &pbpeering.Peering{ Peering: &pbpeering.Peering{
ID: testFooPeerID, ID: testFooPeerID,
Name: "foo", Name: "foo",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
}, },
})) }))
@ -1461,7 +1657,13 @@ func TestStateStore_ExportedServicesForPeer(t *testing.T) {
} }
newTarget := func(service, serviceSubset, datacenter string) *structs.DiscoveryTarget { newTarget := func(service, serviceSubset, datacenter string) *structs.DiscoveryTarget {
t := structs.NewDiscoveryTarget(service, serviceSubset, "default", "default", datacenter) t := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: service,
ServiceSubset: serviceSubset,
Partition: "default",
Namespace: "default",
Datacenter: datacenter,
})
t.SNI = connect.TargetSNI(t, connect.TestTrustDomain) t.SNI = connect.TargetSNI(t, connect.TestTrustDomain)
t.Name = t.SNI t.Name = t.SNI
t.ConnectTimeout = 5 * time.Second // default t.ConnectTimeout = 5 * time.Second // default
@ -1753,6 +1955,7 @@ func TestStateStore_PeeringsForService(t *testing.T) {
copied := pbpeering.Peering{ copied := pbpeering.Peering{
ID: tp.peering.ID, ID: tp.peering.ID,
Name: tp.peering.Name, Name: tp.peering.Name,
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
} }
require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &copied})) require.NoError(t, s.PeeringWrite(lastIdx, &pbpeering.PeeringWriteRequest{Peering: &copied}))
@ -2195,6 +2398,7 @@ func TestStore_TrustBundleListByService(t *testing.T) {
Peering: &pbpeering.Peering{ Peering: &pbpeering.Peering{
ID: peerID1, ID: peerID1,
Name: "peer1", Name: "peer1",
State: pbpeering.PeeringState_DELETING,
DeletedAt: structs.TimeToProto(time.Now()), DeletedAt: structs.TimeToProto(time.Now()),
}, },
})) }))

View File

@ -146,13 +146,13 @@ func testRegisterServiceOpts(t *testing.T, s *Store, idx uint64, nodeID, service
// testRegisterServiceWithChange registers a service and allow ensuring the consul index is updated // testRegisterServiceWithChange registers a service and allow ensuring the consul index is updated
// even if service already exists if using `modifyAccordingIndex`. // even if service already exists if using `modifyAccordingIndex`.
// This is done by setting the transaction ID in "version" meta so service will be updated if it already exists // This is done by setting the transaction ID in "version" meta so service will be updated if it already exists
func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool) { func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool) *structs.NodeService {
testRegisterServiceWithChangeOpts(t, s, idx, nodeID, serviceID, modifyAccordingIndex) return testRegisterServiceWithChangeOpts(t, s, idx, nodeID, serviceID, modifyAccordingIndex)
} }
// testRegisterServiceWithChangeOpts is the same as testRegisterServiceWithChange with the addition of opts that can // testRegisterServiceWithChangeOpts is the same as testRegisterServiceWithChange with the addition of opts that can
// modify the service prior to writing. // modify the service prior to writing.
func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool, opts ...func(service *structs.NodeService)) { func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool, opts ...func(service *structs.NodeService)) *structs.NodeService {
meta := make(map[string]string) meta := make(map[string]string)
if modifyAccordingIndex { if modifyAccordingIndex {
meta["version"] = fmt.Sprint(idx) meta["version"] = fmt.Sprint(idx)
@ -183,14 +183,15 @@ func testRegisterServiceWithChangeOpts(t *testing.T, s *Store, idx uint64, nodeI
result.ServiceID != serviceID { result.ServiceID != serviceID {
t.Fatalf("bad service: %#v", result) t.Fatalf("bad service: %#v", result)
} }
return svc
} }
// testRegisterService register a service with given transaction idx // testRegisterService register a service with given transaction idx
// If the service already exists, transaction number might not be increased // If the service already exists, transaction number might not be increased
// Use `testRegisterServiceWithChange()` if you want perform a registration that // Use `testRegisterServiceWithChange()` if you want perform a registration that
// ensures the transaction is updated by setting idx in Meta of Service // ensures the transaction is updated by setting idx in Meta of Service
func testRegisterService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) { func testRegisterService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) *structs.NodeService {
testRegisterServiceWithChange(t, s, idx, nodeID, serviceID, false) return testRegisterServiceWithChange(t, s, idx, nodeID, serviceID, false)
} }
func testRegisterConnectService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) { func testRegisterConnectService(t *testing.T, s *Store, idx uint64, nodeID, serviceID string) {

View File

@ -41,8 +41,8 @@ var Gauges = []prometheus.GaugeDefinition{
Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.", Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.",
}, },
{ {
Name: []string{"consul", "kv", "entries"}, Name: []string{"consul", "state", "kv_entries"},
Help: "Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.10.3.", Help: "Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3.",
}, },
{ {
Name: []string{"consul", "state", "connect_instances"}, Name: []string{"consul", "state", "connect_instances"},

File diff suppressed because it is too large Load Diff

View File

@ -27,8 +27,17 @@ func TestDiscoveryChainRead(t *testing.T) {
defer a.Shutdown() defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1") testrpc.WaitForTestAgent(t, a.RPC, "dc1")
newTarget := func(service, serviceSubset, namespace, partition, datacenter string) *structs.DiscoveryTarget { newTarget := func(opts structs.DiscoveryTargetOpts) *structs.DiscoveryTarget {
t := structs.NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter) if opts.Namespace == "" {
opts.Namespace = "default"
}
if opts.Partition == "" {
opts.Partition = "default"
}
if opts.Datacenter == "" {
opts.Datacenter = "dc1"
}
t := structs.NewDiscoveryTarget(opts)
t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul") t.SNI = connect.TargetSNI(t, connect.TestClusterID+".consul")
t.Name = t.SNI t.Name = t.SNI
t.ConnectTimeout = 5 * time.Second // default t.ConnectTimeout = 5 * time.Second // default
@ -99,7 +108,7 @@ func TestDiscoveryChainRead(t *testing.T) {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"), "web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
}, },
} }
require.Equal(t, expect, value.Chain) require.Equal(t, expect, value.Chain)
@ -144,7 +153,7 @@ func TestDiscoveryChainRead(t *testing.T) {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"web.default.default.dc2": newTarget("web", "", "default", "default", "dc2"), "web.default.default.dc2": newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}),
}, },
} }
require.Equal(t, expect, value.Chain) require.Equal(t, expect, value.Chain)
@ -198,7 +207,7 @@ func TestDiscoveryChainRead(t *testing.T) {
}, },
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"web.default.default.dc1": newTarget("web", "", "default", "default", "dc1"), "web.default.default.dc1": newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
}, },
} }
require.Equal(t, expect, value.Chain) require.Equal(t, expect, value.Chain)
@ -264,11 +273,11 @@ func TestDiscoveryChainRead(t *testing.T) {
}, },
Targets: map[string]*structs.DiscoveryTarget{ Targets: map[string]*structs.DiscoveryTarget{
"web.default.default.dc1": targetWithConnectTimeout( "web.default.default.dc1": targetWithConnectTimeout(
newTarget("web", "", "default", "default", "dc1"), newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
33*time.Second, 33*time.Second,
), ),
"web.default.default.dc2": targetWithConnectTimeout( "web.default.default.dc2": targetWithConnectTimeout(
newTarget("web", "", "default", "default", "dc2"), newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}),
33*time.Second, 33*time.Second,
), ),
}, },
@ -280,7 +289,7 @@ func TestDiscoveryChainRead(t *testing.T) {
})) }))
expectTarget_DC1 := targetWithConnectTimeout( expectTarget_DC1 := targetWithConnectTimeout(
newTarget("web", "", "default", "default", "dc1"), newTarget(structs.DiscoveryTargetOpts{Service: "web"}),
22*time.Second, 22*time.Second,
) )
expectTarget_DC1.MeshGateway = structs.MeshGatewayConfig{ expectTarget_DC1.MeshGateway = structs.MeshGatewayConfig{
@ -288,7 +297,7 @@ func TestDiscoveryChainRead(t *testing.T) {
} }
expectTarget_DC2 := targetWithConnectTimeout( expectTarget_DC2 := targetWithConnectTimeout(
newTarget("web", "", "default", "default", "dc2"), newTarget(structs.DiscoveryTargetOpts{Service: "web", Datacenter: "dc2"}),
22*time.Second, 22*time.Second,
) )
expectTarget_DC2.MeshGateway = structs.MeshGatewayConfig{ expectTarget_DC2.MeshGateway = structs.MeshGatewayConfig{

View File

@ -1,12 +1,13 @@
package external package external
import ( import (
"time"
middleware "github.com/grpc-ecosystem/go-grpc-middleware" middleware "github.com/grpc-ecosystem/go-grpc-middleware"
recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive" "google.golang.org/grpc/keepalive"
"time"
agentmiddleware "github.com/hashicorp/consul/agent/grpc-middleware" agentmiddleware "github.com/hashicorp/consul/agent/grpc-middleware"
"github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/tlsutil"
@ -34,7 +35,7 @@ func NewServer(logger agentmiddleware.Logger, tls *tlsutil.Configurator) *grpc.S
MinTime: 15 * time.Second, MinTime: 15 * time.Second,
}), }),
} }
if tls != nil && tls.GRPCTLSConfigured() { if tls != nil && tls.GRPCServerUseTLS() {
creds := credentials.NewTLS(tls.IncomingGRPCConfig()) creds := credentials.NewTLS(tls.IncomingGRPCConfig())
opts = append(opts, grpc.Creds(creds)) opts = append(opts, grpc.Creds(creds))
} }

View File

@ -52,13 +52,21 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G
} }
// Build out the response // Build out the response
var serviceName string
if svc.ServiceKind == structs.ServiceKindConnectProxy {
serviceName = svc.ServiceProxy.DestinationServiceName
} else {
serviceName = svc.ServiceName
}
resp := &pbdataplane.GetEnvoyBootstrapParamsResponse{ resp := &pbdataplane.GetEnvoyBootstrapParamsResponse{
Service: svc.ServiceProxy.DestinationServiceName, Service: serviceName,
Partition: svc.EnterpriseMeta.PartitionOrDefault(), Partition: svc.EnterpriseMeta.PartitionOrDefault(),
Namespace: svc.EnterpriseMeta.NamespaceOrDefault(), Namespace: svc.EnterpriseMeta.NamespaceOrDefault(),
Datacenter: s.Datacenter, Datacenter: s.Datacenter,
ServiceKind: convertToResponseServiceKind(svc.ServiceKind), ServiceKind: convertToResponseServiceKind(svc.ServiceKind),
NodeName: svc.Node,
NodeId: string(svc.ID),
} }
bootstrapConfig, err := structpb.NewStruct(svc.ServiceProxy.Config) bootstrapConfig, err := structpb.NewStruct(svc.ServiceProxy.Config)

View File

@ -97,14 +97,20 @@ func TestGetEnvoyBootstrapParams_Success(t *testing.T) {
resp, err := client.GetEnvoyBootstrapParams(ctx, req) resp, err := client.GetEnvoyBootstrapParams(ctx, req)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tc.registerReq.Service.Proxy.DestinationServiceName, resp.Service) if tc.registerReq.Service.IsGateway() {
require.Equal(t, tc.registerReq.Service.Service, resp.Service)
} else {
require.Equal(t, tc.registerReq.Service.Proxy.DestinationServiceName, resp.Service)
}
require.Equal(t, serverDC, resp.Datacenter) require.Equal(t, serverDC, resp.Datacenter)
require.Equal(t, tc.registerReq.EnterpriseMeta.PartitionOrDefault(), resp.Partition) require.Equal(t, tc.registerReq.EnterpriseMeta.PartitionOrDefault(), resp.Partition)
require.Equal(t, tc.registerReq.EnterpriseMeta.NamespaceOrDefault(), resp.Namespace) require.Equal(t, tc.registerReq.EnterpriseMeta.NamespaceOrDefault(), resp.Namespace)
require.Contains(t, resp.Config.Fields, proxyConfigKey) require.Contains(t, resp.Config.Fields, proxyConfigKey)
require.Equal(t, structpb.NewStringValue(proxyConfigValue), resp.Config.Fields[proxyConfigKey]) require.Equal(t, structpb.NewStringValue(proxyConfigValue), resp.Config.Fields[proxyConfigKey])
require.Equal(t, convertToResponseServiceKind(tc.registerReq.Service.Kind), resp.ServiceKind) require.Equal(t, convertToResponseServiceKind(tc.registerReq.Service.Kind), resp.ServiceKind)
require.Equal(t, tc.registerReq.Node, resp.NodeName)
require.Equal(t, string(tc.registerReq.ID), resp.NodeId)
} }
testCases := []testCase{ testCases := []testCase{

View File

@ -26,11 +26,12 @@ const (
type Server struct { type Server struct {
Config Config
Tracker *Tracker
} }
type Config struct { type Config struct {
Backend Backend Backend Backend
Tracker *Tracker
GetStore func() StateStore GetStore func() StateStore
Logger hclog.Logger Logger hclog.Logger
ForwardRPC func(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error) ForwardRPC func(structs.RPCInfo, func(*grpc.ClientConn) error) (bool, error)
@ -53,7 +54,6 @@ type ACLResolver interface {
func NewServer(cfg Config) *Server { func NewServer(cfg Config) *Server {
requireNotNil(cfg.Backend, "Backend") requireNotNil(cfg.Backend, "Backend")
requireNotNil(cfg.Tracker, "Tracker")
requireNotNil(cfg.GetStore, "GetStore") requireNotNil(cfg.GetStore, "GetStore")
requireNotNil(cfg.Logger, "Logger") requireNotNil(cfg.Logger, "Logger")
// requireNotNil(cfg.ACLResolver, "ACLResolver") // TODO(peering): reenable check when ACLs are required // requireNotNil(cfg.ACLResolver, "ACLResolver") // TODO(peering): reenable check when ACLs are required
@ -67,7 +67,8 @@ func NewServer(cfg Config) *Server {
cfg.incomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout cfg.incomingHeartbeatTimeout = defaultIncomingHeartbeatTimeout
} }
return &Server{ return &Server{
Config: cfg, Config: cfg,
Tracker: NewTracker(cfg.incomingHeartbeatTimeout),
} }
} }

View File

@ -575,6 +575,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
status.TrackRecvResourceSuccess() status.TrackRecvResourceSuccess()
} }
// We are replying ACK or NACK depending on whether we successfully processed the response.
if err := streamSend(reply); err != nil { if err := streamSend(reply); err != nil {
return fmt.Errorf("failed to send to stream: %v", err) return fmt.Errorf("failed to send to stream: %v", err)
} }

View File

@ -499,9 +499,8 @@ func TestStreamResources_Server_Terminate(t *testing.T) {
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
} }
srv, store := newTestServer(t, func(c *Config) { srv, store := newTestServer(t, nil)
c.Tracker.SetClock(it.Now) srv.Tracker.setClock(it.Now)
})
p := writePeeringToBeDialed(t, store, 1, "my-peer") p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed") require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -552,9 +551,8 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), base: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
} }
srv, store := newTestServer(t, func(c *Config) { srv, store := newTestServer(t, nil)
c.Tracker.SetClock(it.Now) srv.Tracker.setClock(it.Now)
})
// Set the initial roots and CA configuration. // Set the initial roots and CA configuration.
_, rootA := writeInitialRootsAndCA(t, store) _, rootA := writeInitialRootsAndCA(t, store)
@ -572,7 +570,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
}) })
}) })
var lastSendSuccess time.Time var lastSendAck time.Time
testutil.RunStep(t, "ack tracked as success", func(t *testing.T) { testutil.RunStep(t, "ack tracked as success", func(t *testing.T) {
ack := &pbpeerstream.ReplicationMessage{ ack := &pbpeerstream.ReplicationMessage{
@ -587,19 +585,19 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
}, },
} }
lastSendSuccess = it.FutureNow(1) lastSendAck = it.FutureNow(1)
err := client.Send(ack) err := client.Send(ack)
require.NoError(t, err) require.NoError(t, err)
expect := Status{ expect := Status{
Connected: true, Connected: true,
LastAck: lastSendSuccess, LastAck: lastSendAck,
} }
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
status, ok := srv.StreamStatus(testPeerID) rStatus, ok := srv.StreamStatus(testPeerID)
require.True(r, ok) require.True(r, ok)
require.Equal(r, expect, status) require.Equal(r, expect, rStatus)
}) })
}) })
@ -629,15 +627,15 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
expect := Status{ expect := Status{
Connected: true, Connected: true,
LastAck: lastSendSuccess, LastAck: lastSendAck,
LastNack: lastNack, LastNack: lastNack,
LastNackMessage: lastNackMsg, LastNackMessage: lastNackMsg,
} }
retry.Run(t, func(r *retry.R) { retry.Run(t, func(r *retry.R) {
status, ok := srv.StreamStatus(testPeerID) rStatus, ok := srv.StreamStatus(testPeerID)
require.True(r, ok) require.True(r, ok)
require.Equal(r, expect, status) require.Equal(r, expect, rStatus)
}) })
}) })
@ -694,7 +692,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
expect := Status{ expect := Status{
Connected: true, Connected: true,
LastAck: lastSendSuccess, LastAck: lastSendAck,
LastNack: lastNack, LastNack: lastNack,
LastNackMessage: lastNackMsg, LastNackMessage: lastNackMsg,
LastRecvResourceSuccess: lastRecvResourceSuccess, LastRecvResourceSuccess: lastRecvResourceSuccess,
@ -753,7 +751,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
expect := Status{ expect := Status{
Connected: true, Connected: true,
LastAck: lastSendSuccess, LastAck: lastSendAck,
LastNack: lastNack, LastNack: lastNack,
LastNackMessage: lastNackMsg, LastNackMessage: lastNackMsg,
LastRecvResourceSuccess: lastRecvResourceSuccess, LastRecvResourceSuccess: lastRecvResourceSuccess,
@ -785,7 +783,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
expect := Status{ expect := Status{
Connected: true, Connected: true,
LastAck: lastSendSuccess, LastAck: lastSendAck,
LastNack: lastNack, LastNack: lastNack,
LastNackMessage: lastNackMsg, LastNackMessage: lastNackMsg,
LastRecvResourceSuccess: lastRecvResourceSuccess, LastRecvResourceSuccess: lastRecvResourceSuccess,
@ -816,7 +814,7 @@ func TestStreamResources_Server_StreamTracker(t *testing.T) {
expect := Status{ expect := Status{
Connected: false, Connected: false,
DisconnectErrorMessage: lastRecvErrorMsg, DisconnectErrorMessage: lastRecvErrorMsg,
LastAck: lastSendSuccess, LastAck: lastSendAck,
LastNack: lastNack, LastNack: lastNack,
LastNackMessage: lastNackMsg, LastNackMessage: lastNackMsg,
DisconnectTime: disconnectTime, DisconnectTime: disconnectTime,
@ -1128,9 +1126,9 @@ func TestStreamResources_Server_DisconnectsOnHeartbeatTimeout(t *testing.T) {
} }
srv, store := newTestServer(t, func(c *Config) { srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
c.incomingHeartbeatTimeout = 5 * time.Millisecond c.incomingHeartbeatTimeout = 5 * time.Millisecond
}) })
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer") p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed") require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -1176,9 +1174,9 @@ func TestStreamResources_Server_SendsHeartbeats(t *testing.T) {
outgoingHeartbeatInterval := 5 * time.Millisecond outgoingHeartbeatInterval := 5 * time.Millisecond
srv, store := newTestServer(t, func(c *Config) { srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
c.outgoingHeartbeatInterval = outgoingHeartbeatInterval c.outgoingHeartbeatInterval = outgoingHeartbeatInterval
}) })
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer") p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed") require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -1235,9 +1233,9 @@ func TestStreamResources_Server_KeepsConnectionOpenWithHeartbeat(t *testing.T) {
incomingHeartbeatTimeout := 10 * time.Millisecond incomingHeartbeatTimeout := 10 * time.Millisecond
srv, store := newTestServer(t, func(c *Config) { srv, store := newTestServer(t, func(c *Config) {
c.Tracker.SetClock(it.Now)
c.incomingHeartbeatTimeout = incomingHeartbeatTimeout c.incomingHeartbeatTimeout = incomingHeartbeatTimeout
}) })
srv.Tracker.setClock(it.Now)
p := writePeeringToBeDialed(t, store, 1, "my-peer") p := writePeeringToBeDialed(t, store, 1, "my-peer")
require.Empty(t, p.PeerID, "should be empty if being dialed") require.Empty(t, p.PeerID, "should be empty if being dialed")
@ -2746,7 +2744,6 @@ func newTestServer(t *testing.T, configFn func(c *Config)) (*testServer, *state.
store: store, store: store,
pub: publisher, pub: publisher,
}, },
Tracker: NewTracker(),
GetStore: func() StateStore { return store }, GetStore: func() StateStore { return store },
Logger: testutil.Logger(t), Logger: testutil.Logger(t),
Datacenter: "dc1", Datacenter: "dc1",

View File

@ -14,18 +14,27 @@ type Tracker struct {
mu sync.RWMutex mu sync.RWMutex
streams map[string]*MutableStatus streams map[string]*MutableStatus
// heartbeatTimeout is the max duration a connection is allowed to be
// disconnected before the stream health is reported as non-healthy
heartbeatTimeout time.Duration
// timeNow is a shim for testing. // timeNow is a shim for testing.
timeNow func() time.Time timeNow func() time.Time
} }
func NewTracker() *Tracker { func NewTracker(heartbeatTimeout time.Duration) *Tracker {
if heartbeatTimeout == 0 {
heartbeatTimeout = defaultIncomingHeartbeatTimeout
}
return &Tracker{ return &Tracker{
streams: make(map[string]*MutableStatus), streams: make(map[string]*MutableStatus),
timeNow: time.Now, timeNow: time.Now,
heartbeatTimeout: heartbeatTimeout,
} }
} }
func (t *Tracker) SetClock(clock func() time.Time) { // setClock is used for debugging purposes only.
func (t *Tracker) setClock(clock func() time.Time) {
if clock == nil { if clock == nil {
t.timeNow = time.Now t.timeNow = time.Now
} else { } else {
@ -101,7 +110,9 @@ func (t *Tracker) StreamStatus(id string) (resp Status, found bool) {
s, ok := t.streams[id] s, ok := t.streams[id]
if !ok { if !ok {
return Status{}, false return Status{
NeverConnected: true,
}, false
} }
return s.GetStatus(), true return s.GetStatus(), true
} }
@ -126,6 +137,39 @@ func (t *Tracker) DeleteStatus(id string) {
delete(t.streams, id) delete(t.streams, id)
} }
// IsHealthy is a calculates the health of a peering status.
// We define a peering as unhealthy if its status has been in the following
// states for longer than the configured incomingHeartbeatTimeout.
// - If it is disconnected
// - If the last received Nack is newer than last received Ack
// - If the last received error is newer than last received success
//
// If none of these conditions apply, we call the peering healthy.
func (t *Tracker) IsHealthy(s Status) bool {
// If stream is in a disconnected state for longer than the configured
// heartbeat timeout, report as unhealthy.
if !s.DisconnectTime.IsZero() &&
t.timeNow().Sub(s.DisconnectTime) > t.heartbeatTimeout {
return false
}
// If last Nack is after last Ack, it means the peer is unable to
// handle our replication message.
if s.LastNack.After(s.LastAck) &&
t.timeNow().Sub(s.LastAck) > t.heartbeatTimeout {
return false
}
// If last recv error is newer than last recv success, we were unable
// to handle the peer's replication message.
if s.LastRecvError.After(s.LastRecvResourceSuccess) &&
t.timeNow().Sub(s.LastRecvError) > t.heartbeatTimeout {
return false
}
return true
}
type MutableStatus struct { type MutableStatus struct {
mu sync.RWMutex mu sync.RWMutex
@ -145,6 +189,9 @@ type Status struct {
// Connected is true when there is an open stream for the peer. // Connected is true when there is an open stream for the peer.
Connected bool Connected bool
// NeverConnected is true for peerings that have never connected, false otherwise.
NeverConnected bool
// DisconnectErrorMessage tracks the error that caused the stream to disconnect non-gracefully. // DisconnectErrorMessage tracks the error that caused the stream to disconnect non-gracefully.
// If the stream is connected or it disconnected gracefully it will be empty. // If the stream is connected or it disconnected gracefully it will be empty.
DisconnectErrorMessage string DisconnectErrorMessage string
@ -199,7 +246,8 @@ func (s *Status) GetExportedServicesCount() uint64 {
func newMutableStatus(now func() time.Time, connected bool) *MutableStatus { func newMutableStatus(now func() time.Time, connected bool) *MutableStatus {
return &MutableStatus{ return &MutableStatus{
Status: Status{ Status: Status{
Connected: connected, Connected: connected,
NeverConnected: !connected,
}, },
timeNow: now, timeNow: now,
doneCh: make(chan struct{}), doneCh: make(chan struct{}),

View File

@ -5,13 +5,117 @@ import (
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
) )
const (
aPeerID = "63b60245-c475-426b-b314-4588d210859d"
)
func TestTracker_IsHealthy(t *testing.T) {
type testcase struct {
name string
tracker *Tracker
modifierFunc func(status *MutableStatus)
expectedVal bool
}
tcs := []testcase{
{
name: "disconnect time within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
status.DisconnectTime = time.Now()
},
},
{
name: "disconnect time past timeout",
tracker: NewTracker(1 * time.Millisecond),
expectedVal: false,
modifierFunc: func(status *MutableStatus) {
status.DisconnectTime = time.Now().Add(-1 * time.Minute)
},
},
{
name: "receive error before receive success within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
now := time.Now()
status.LastRecvResourceSuccess = now
status.LastRecvError = now.Add(1 * time.Second)
},
},
{
name: "receive error before receive success within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
now := time.Now()
status.LastRecvResourceSuccess = now
status.LastRecvError = now.Add(1 * time.Second)
},
},
{
name: "receive error before receive success past timeout",
tracker: NewTracker(1 * time.Millisecond),
expectedVal: false,
modifierFunc: func(status *MutableStatus) {
now := time.Now().Add(-2 * time.Second)
status.LastRecvResourceSuccess = now
status.LastRecvError = now.Add(1 * time.Second)
},
},
{
name: "nack before ack within timeout",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
modifierFunc: func(status *MutableStatus) {
now := time.Now()
status.LastAck = now
status.LastNack = now.Add(1 * time.Second)
},
},
{
name: "nack before ack past timeout",
tracker: NewTracker(1 * time.Millisecond),
expectedVal: false,
modifierFunc: func(status *MutableStatus) {
now := time.Now().Add(-2 * time.Second)
status.LastAck = now
status.LastNack = now.Add(1 * time.Second)
},
},
{
name: "healthy",
tracker: NewTracker(defaultIncomingHeartbeatTimeout),
expectedVal: true,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
tracker := tc.tracker
st, err := tracker.Connected(aPeerID)
require.NoError(t, err)
require.True(t, st.Connected)
if tc.modifierFunc != nil {
tc.modifierFunc(st)
}
assert.Equal(t, tc.expectedVal, tracker.IsHealthy(st.GetStatus()))
})
}
}
func TestTracker_EnsureConnectedDisconnected(t *testing.T) { func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
tracker := NewTracker() tracker := NewTracker(defaultIncomingHeartbeatTimeout)
peerID := "63b60245-c475-426b-b314-4588d210859d" peerID := "63b60245-c475-426b-b314-4588d210859d"
it := incrementalTime{ it := incrementalTime{
@ -96,7 +200,7 @@ func TestTracker_EnsureConnectedDisconnected(t *testing.T) {
status, ok := tracker.StreamStatus(peerID) status, ok := tracker.StreamStatus(peerID)
require.False(t, ok) require.False(t, ok)
require.Zero(t, status) require.Equal(t, Status{NeverConnected: true}, status)
}) })
} }
@ -108,7 +212,7 @@ func TestTracker_connectedStreams(t *testing.T) {
} }
run := func(t *testing.T, tc testCase) { run := func(t *testing.T, tc testCase) {
tracker := NewTracker() tracker := NewTracker(defaultIncomingHeartbeatTimeout)
if tc.setup != nil { if tc.setup != nil {
tc.setup(t, tracker) tc.setup(t, tracker)
} }

View File

@ -124,15 +124,21 @@ func (c *cacheProxyDataSource[ReqType]) Notify(
func dispatchCacheUpdate(ch chan<- proxycfg.UpdateEvent) cache.Callback { func dispatchCacheUpdate(ch chan<- proxycfg.UpdateEvent) cache.Callback {
return func(ctx context.Context, e cache.UpdateEvent) { return func(ctx context.Context, e cache.UpdateEvent) {
u := proxycfg.UpdateEvent{
CorrelationID: e.CorrelationID,
Result: e.Result,
Err: e.Err,
}
select { select {
case ch <- u: case ch <- newUpdateEvent(e.CorrelationID, e.Result, e.Err):
case <-ctx.Done(): case <-ctx.Done():
} }
} }
} }
func newUpdateEvent(correlationID string, result any, err error) proxycfg.UpdateEvent {
// This roughly matches the logic in agent/submatview.LocalMaterializer.isTerminalError.
if acl.IsErrNotFound(err) {
err = proxycfg.TerminalError(err)
}
return proxycfg.UpdateEvent{
CorrelationID: correlationID,
Result: result,
Err: err,
}
}

View File

@ -54,13 +54,8 @@ func (s serverIntentionUpstreams) Notify(ctx context.Context, req *structs.Servi
func dispatchBlockingQueryUpdate[ResultType any](ch chan<- proxycfg.UpdateEvent) func(context.Context, string, ResultType, error) { func dispatchBlockingQueryUpdate[ResultType any](ch chan<- proxycfg.UpdateEvent) func(context.Context, string, ResultType, error) {
return func(ctx context.Context, correlationID string, result ResultType, err error) { return func(ctx context.Context, correlationID string, result ResultType, err error) {
event := proxycfg.UpdateEvent{
CorrelationID: correlationID,
Result: result,
Err: err,
}
select { select {
case ch <- event: case ch <- newUpdateEvent(correlationID, result, err):
case <-ctx.Done(): case <-ctx.Done():
} }
} }

View File

@ -39,12 +39,8 @@ func (c cacheIntentions) Notify(ctx context.Context, req *structs.ServiceSpecifi
QueryOptions: structs.QueryOptions{Token: req.QueryOptions.Token}, QueryOptions: structs.QueryOptions{Token: req.QueryOptions.Token},
} }
return c.c.NotifyCallback(ctx, cachetype.IntentionMatchName, query, correlationID, func(ctx context.Context, event cache.UpdateEvent) { return c.c.NotifyCallback(ctx, cachetype.IntentionMatchName, query, correlationID, func(ctx context.Context, event cache.UpdateEvent) {
e := proxycfg.UpdateEvent{ var result any
CorrelationID: correlationID, if event.Err == nil {
Err: event.Err,
}
if e.Err == nil {
rsp, ok := event.Result.(*structs.IndexedIntentionMatches) rsp, ok := event.Result.(*structs.IndexedIntentionMatches)
if !ok { if !ok {
return return
@ -54,11 +50,11 @@ func (c cacheIntentions) Notify(ctx context.Context, req *structs.ServiceSpecifi
if len(rsp.Matches) != 0 { if len(rsp.Matches) != 0 {
matches = rsp.Matches[0] matches = rsp.Matches[0]
} }
e.Result = matches result = matches
} }
select { select {
case ch <- e: case ch <- newUpdateEvent(correlationID, result, event.Err):
case <-ctx.Done(): case <-ctx.Done():
} }
}) })
@ -110,10 +106,7 @@ func (s *serverIntentions) Notify(ctx context.Context, req *structs.ServiceSpeci
sort.Sort(structs.IntentionPrecedenceSorter(intentions)) sort.Sort(structs.IntentionPrecedenceSorter(intentions))
return proxycfg.UpdateEvent{ return newUpdateEvent(correlationID, intentions, nil), true
CorrelationID: correlationID,
Result: intentions,
}, true
} }
for subjectIdx, subject := range subjects { for subjectIdx, subject := range subjects {

View File

@ -280,16 +280,6 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
} }
snap.Roots = roots snap.Roots = roots
case strings.HasPrefix(u.CorrelationID, peerTrustBundleIDPrefix):
resp, ok := u.Result.(*pbpeering.TrustBundleReadResponse)
if !ok {
return fmt.Errorf("invalid type for response: %T", u.Result)
}
peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix)
if resp.Bundle != nil {
snap.ConnectProxy.UpstreamPeerTrustBundles.Set(peer, resp.Bundle)
}
case u.CorrelationID == peeringTrustBundlesWatchID: case u.CorrelationID == peeringTrustBundlesWatchID:
resp, ok := u.Result.(*pbpeering.TrustBundleListByServiceResponse) resp, ok := u.Result.(*pbpeering.TrustBundleListByServiceResponse)
if !ok { if !ok {
@ -369,6 +359,17 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
// Clean up data // Clean up data
// //
peeredChainTargets := make(map[UpstreamID]struct{})
for _, discoChain := range snap.ConnectProxy.DiscoveryChain {
for _, target := range discoChain.Targets {
if target.Peer == "" {
continue
}
uid := NewUpstreamIDFromTargetID(target.ID)
peeredChainTargets[uid] = struct{}{}
}
}
validPeerNames := make(map[string]struct{}) validPeerNames := make(map[string]struct{})
// Iterate through all known endpoints and remove references to upstream IDs that weren't in the update // Iterate through all known endpoints and remove references to upstream IDs that weren't in the update
@ -383,6 +384,11 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
validPeerNames[uid.Peer] = struct{}{} validPeerNames[uid.Peer] = struct{}{}
return true return true
} }
// Peered upstream came from a discovery chain target
if _, ok := peeredChainTargets[uid]; ok {
validPeerNames[uid.Peer] = struct{}{}
return true
}
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(uid) snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(uid)
return true return true
}) })
@ -463,8 +469,14 @@ func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u UpdateEvent, s
continue continue
} }
if _, ok := seenUpstreams[uid]; !ok { if _, ok := seenUpstreams[uid]; !ok {
for _, cancelFn := range targets { for targetID, cancelFn := range targets {
cancelFn() cancelFn()
targetUID := NewUpstreamIDFromTargetID(targetID)
if targetUID.Peer != "" {
snap.ConnectProxy.PeerUpstreamEndpoints.CancelWatch(targetUID)
snap.ConnectProxy.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
}
} }
delete(snap.ConnectProxy.WatchedUpstreams, uid) delete(snap.ConnectProxy.WatchedUpstreams, uid)
} }

View File

@ -2,6 +2,7 @@ package proxycfg
import ( import (
"context" "context"
"errors"
cachetype "github.com/hashicorp/consul/agent/cache-types" cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
@ -15,6 +16,28 @@ type UpdateEvent struct {
Err error Err error
} }
// TerminalError wraps the given error to indicate that the data source is in
// an irrecoverably broken state (e.g. because the given ACL token has been
// deleted).
//
// Setting UpdateEvent.Err to a TerminalError causes all watches to be canceled
// which, in turn, terminates the xDS streams.
func TerminalError(err error) error {
return terminalError{err}
}
// IsTerminalError returns whether the given error indicates that the data
// source is in an irrecoverably broken state so watches should be torn down
// and retried at a higher level.
func IsTerminalError(err error) bool {
return errors.As(err, &terminalError{})
}
type terminalError struct{ err error }
func (e terminalError) Error() string { return e.err.Error() }
func (e terminalError) Unwrap() error { return e.err }
// DataSources contains the dependencies used to consume data used to configure // DataSources contains the dependencies used to consume data used to configure
// proxies. // proxies.
type DataSources struct { type DataSources struct {

View File

@ -5,7 +5,9 @@ import (
"fmt" "fmt"
cachetype "github.com/hashicorp/consul/agent/cache-types" cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/proxycfg/internal/watch"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
) )
type handlerIngressGateway struct { type handlerIngressGateway struct {
@ -66,6 +68,9 @@ func (s *handlerIngressGateway) initialize(ctx context.Context) (ConfigSnapshot,
snap.IngressGateway.WatchedGateways = make(map[UpstreamID]map[string]context.CancelFunc) snap.IngressGateway.WatchedGateways = make(map[UpstreamID]map[string]context.CancelFunc)
snap.IngressGateway.WatchedGatewayEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes) snap.IngressGateway.WatchedGatewayEndpoints = make(map[UpstreamID]map[string]structs.CheckServiceNodes)
snap.IngressGateway.Listeners = make(map[IngressListenerKey]structs.IngressListener) snap.IngressGateway.Listeners = make(map[IngressListenerKey]structs.IngressListener)
snap.IngressGateway.UpstreamPeerTrustBundles = watch.NewMap[string, *pbpeering.PeeringTrustBundle]()
snap.IngressGateway.PeerUpstreamEndpoints = watch.NewMap[UpstreamID, structs.CheckServiceNodes]()
snap.IngressGateway.PeerUpstreamEndpointsUseHostnames = make(map[UpstreamID]struct{})
return snap, nil return snap, nil
} }
@ -152,6 +157,12 @@ func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u UpdateEvent,
delete(snap.IngressGateway.WatchedUpstreams[uid], targetID) delete(snap.IngressGateway.WatchedUpstreams[uid], targetID)
delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID) delete(snap.IngressGateway.WatchedUpstreamEndpoints[uid], targetID)
cancelUpstreamFn() cancelUpstreamFn()
targetUID := NewUpstreamIDFromTargetID(targetID)
if targetUID.Peer != "" {
snap.IngressGateway.PeerUpstreamEndpoints.CancelWatch(targetUID)
snap.IngressGateway.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
}
} }
cancelFn() cancelFn()

View File

@ -127,7 +127,7 @@ func (m *Manager) Register(id ProxyID, ns *structs.NodeService, source ProxySour
} }
// We are updating the proxy, close its old state // We are updating the proxy, close its old state
state.Close() state.Close(false)
} }
// TODO: move to a function that translates ManagerConfig->stateConfig // TODO: move to a function that translates ManagerConfig->stateConfig
@ -148,14 +148,13 @@ func (m *Manager) Register(id ProxyID, ns *structs.NodeService, source ProxySour
return err return err
} }
ch, err := state.Watch() if _, err = state.Watch(); err != nil {
if err != nil {
return err return err
} }
m.proxies[id] = state m.proxies[id] = state
// Start a goroutine that will wait for changes and broadcast them to watchers. // Start a goroutine that will wait for changes and broadcast them to watchers.
go m.notifyBroadcast(ch) go m.notifyBroadcast(id, state)
return nil return nil
} }
@ -175,8 +174,8 @@ func (m *Manager) Deregister(id ProxyID, source ProxySource) {
} }
// Closing state will let the goroutine we started in Register finish since // Closing state will let the goroutine we started in Register finish since
// watch chan is closed. // watch chan is closed
state.Close() state.Close(false)
delete(m.proxies, id) delete(m.proxies, id)
// We intentionally leave potential watchers hanging here - there is no new // We intentionally leave potential watchers hanging here - there is no new
@ -186,11 +185,17 @@ func (m *Manager) Deregister(id ProxyID, source ProxySource) {
// cleaned up naturally. // cleaned up naturally.
} }
func (m *Manager) notifyBroadcast(ch <-chan ConfigSnapshot) { func (m *Manager) notifyBroadcast(proxyID ProxyID, state *state) {
// Run until ch is closed // Run until ch is closed (by a defer in state.run).
for snap := range ch { for snap := range state.snapCh {
m.notify(&snap) m.notify(&snap)
} }
// If state.run exited because of an irrecoverable error, close all of the
// watchers so that the consumers reconnect/retry at a higher level.
if state.failed() {
m.closeAllWatchers(proxyID)
}
} }
func (m *Manager) notify(snap *ConfigSnapshot) { func (m *Manager) notify(snap *ConfigSnapshot) {
@ -281,6 +286,20 @@ func (m *Manager) Watch(id ProxyID) (<-chan *ConfigSnapshot, CancelFunc) {
} }
} }
func (m *Manager) closeAllWatchers(proxyID ProxyID) {
m.mu.Lock()
defer m.mu.Unlock()
watchers, ok := m.watchers[proxyID]
if !ok {
return
}
for watchID := range watchers {
m.closeWatchLocked(proxyID, watchID)
}
}
// closeWatchLocked cleans up state related to a single watcher. It assumes the // closeWatchLocked cleans up state related to a single watcher. It assumes the
// lock is held. // lock is held.
func (m *Manager) closeWatchLocked(proxyID ProxyID, watchID uint64) { func (m *Manager) closeWatchLocked(proxyID ProxyID, watchID uint64) {
@ -309,7 +328,7 @@ func (m *Manager) Close() error {
// Then close all states // Then close all states
for proxyID, state := range m.proxies { for proxyID, state := range m.proxies {
state.Close() state.Close(false)
delete(m.proxies, proxyID) delete(m.proxies, proxyID)
} }
return nil return nil

View File

@ -63,22 +63,29 @@ func NewUpstreamIDFromServiceID(sid structs.ServiceID) UpstreamID {
return id return id
} }
// TODO(peering): confirm we don't need peername here
func NewUpstreamIDFromTargetID(tid string) UpstreamID { func NewUpstreamIDFromTargetID(tid string) UpstreamID {
// Drop the leading subset if one is present in the target ID. var id UpstreamID
separators := strings.Count(tid, ".") split := strings.Split(tid, ".")
if separators > 3 {
prefix := tid[:strings.Index(tid, ".")+1] switch {
tid = strings.TrimPrefix(tid, prefix) case split[len(split)-2] == "external":
id = UpstreamID{
Name: split[0],
EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]),
Peer: split[4],
}
case len(split) == 5:
// Drop the leading subset if one is present in the target ID.
split = split[1:]
fallthrough
default:
id = UpstreamID{
Name: split[0],
EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]),
Datacenter: split[3],
}
} }
split := strings.SplitN(tid, ".", 4)
id := UpstreamID{
Name: split[0],
EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(split[2], split[1]),
Datacenter: split[3],
}
id.normalize() id.normalize()
return id return id
} }

View File

@ -35,6 +35,13 @@ func TestUpstreamIDFromTargetID(t *testing.T) {
Datacenter: "dc2", Datacenter: "dc2",
}, },
}, },
"peered": {
tid: "foo.default.default.external.cluster-01",
expect: UpstreamID{
Name: "foo",
Peer: "cluster-01",
},
},
} }
for name, tc := range cases { for name, tc := range cases {

View File

@ -814,6 +814,18 @@ func (s *ConfigSnapshot) MeshConfigTLSOutgoing() *structs.MeshDirectionalTLSConf
return mesh.TLS.Outgoing return mesh.TLS.Outgoing
} }
func (s *ConfigSnapshot) ToConfigSnapshotUpstreams() (*ConfigSnapshotUpstreams, error) {
switch s.Kind {
case structs.ServiceKindConnectProxy:
return &s.ConnectProxy.ConfigSnapshotUpstreams, nil
case structs.ServiceKindIngressGateway:
return &s.IngressGateway.ConfigSnapshotUpstreams, nil
default:
// This is a coherence check and should never fail
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", s.Kind)
}
}
func (u *ConfigSnapshotUpstreams) UpstreamPeerMeta(uid UpstreamID) structs.PeeringServiceMeta { func (u *ConfigSnapshotUpstreams) UpstreamPeerMeta(uid UpstreamID) structs.PeeringServiceMeta {
nodes, _ := u.PeerUpstreamEndpoints.Get(uid) nodes, _ := u.PeerUpstreamEndpoints.Get(uid)
if len(nodes) == 0 { if len(nodes) == 0 {

View File

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"net" "net"
"reflect" "reflect"
"sync/atomic"
"time" "time"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
@ -70,11 +71,21 @@ type state struct {
// in Watch. // in Watch.
cancel func() cancel func()
// failedFlag is (atomically) set to 1 (by Close) when run exits because a data
// source is in an irrecoverable state. It can be read with failed.
failedFlag int32
ch chan UpdateEvent ch chan UpdateEvent
snapCh chan ConfigSnapshot snapCh chan ConfigSnapshot
reqCh chan chan *ConfigSnapshot reqCh chan chan *ConfigSnapshot
} }
// failed returns whether run exited because a data source is in an
// irrecoverable state.
func (s *state) failed() bool {
return atomic.LoadInt32(&s.failedFlag) == 1
}
type DNSConfig struct { type DNSConfig struct {
Domain string Domain string
AltDomain string AltDomain string
@ -250,10 +261,13 @@ func (s *state) Watch() (<-chan ConfigSnapshot, error) {
} }
// Close discards the state and stops any long-running watches. // Close discards the state and stops any long-running watches.
func (s *state) Close() error { func (s *state) Close(failed bool) error {
if s.cancel != nil { if s.cancel != nil {
s.cancel() s.cancel()
} }
if failed {
atomic.StoreInt32(&s.failedFlag, 1)
}
return nil return nil
} }
@ -300,7 +314,13 @@ func (s *state) run(ctx context.Context, snap *ConfigSnapshot) {
case <-ctx.Done(): case <-ctx.Done():
return return
case u := <-s.ch: case u := <-s.ch:
s.logger.Trace("A blocking query returned; handling snapshot update", "correlationID", u.CorrelationID) s.logger.Trace("Data source returned; handling snapshot update", "correlationID", u.CorrelationID)
if IsTerminalError(u.Err) {
s.logger.Error("Data source in an irrecoverable state; exiting", "error", u.Err, "correlationID", u.CorrelationID)
s.Close(true)
return
}
if err := s.handler.handleUpdate(ctx, u, snap); err != nil { if err := s.handler.handleUpdate(ctx, u, snap); err != nil {
s.logger.Error("Failed to handle update from watch", s.logger.Error("Failed to handle update from watch",

View File

@ -493,6 +493,11 @@ func TestState_WatchesAndUpdates(t *testing.T) {
Mode: structs.MeshGatewayModeNone, Mode: structs.MeshGatewayModeNone,
}, },
}, },
structs.Upstream{
DestinationType: structs.UpstreamDestTypeService,
DestinationName: "api-failover-to-peer",
LocalBindPort: 10007,
},
structs.Upstream{ structs.Upstream{
DestinationType: structs.UpstreamDestTypeService, DestinationType: structs.UpstreamDestTypeService,
DestinationName: "api-dc2", DestinationName: "api-dc2",
@ -552,6 +557,16 @@ func TestState_WatchesAndUpdates(t *testing.T) {
Mode: structs.MeshGatewayModeNone, Mode: structs.MeshGatewayModeNone,
}, },
}), }),
fmt.Sprintf("discovery-chain:%s-failover-to-peer", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{
Name: "api-failover-to-peer",
EvaluateInDatacenter: "dc1",
EvaluateInNamespace: "default",
EvaluateInPartition: "default",
Datacenter: "dc1",
OverrideMeshGateway: structs.MeshGatewayConfig{
Mode: meshGatewayProxyConfigValue,
},
}),
fmt.Sprintf("discovery-chain:%s-dc2", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{ fmt.Sprintf("discovery-chain:%s-dc2", apiUID.String()): genVerifyDiscoveryChainWatch(&structs.DiscoveryChainRequest{
Name: "api-dc2", Name: "api-dc2",
EvaluateInDatacenter: "dc1", EvaluateInDatacenter: "dc1",
@ -639,6 +654,26 @@ func TestState_WatchesAndUpdates(t *testing.T) {
}, },
Err: nil, Err: nil,
}, },
{
CorrelationID: fmt.Sprintf("discovery-chain:%s-failover-to-peer", apiUID.String()),
Result: &structs.DiscoveryChainResponse{
Chain: discoverychain.TestCompileConfigEntries(t, "api-failover-to-peer", "default", "default", "dc1", "trustdomain.consul",
func(req *discoverychain.CompileRequest) {
req.OverrideMeshGateway.Mode = meshGatewayProxyConfigValue
}, &structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "api-failover-to-peer",
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Targets: []structs.ServiceResolverFailoverTarget{
{Peer: "cluster-01"},
},
},
},
}),
},
Err: nil,
},
}, },
verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) { verifySnapshot: func(t testing.TB, snap *ConfigSnapshot) {
require.True(t, snap.Valid()) require.True(t, snap.Valid())
@ -646,15 +681,18 @@ func TestState_WatchesAndUpdates(t *testing.T) {
require.Equal(t, indexedRoots, snap.Roots) require.Equal(t, indexedRoots, snap.Roots)
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf) require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
require.Len(t, snap.ConnectProxy.DiscoveryChain, 5, "%+v", snap.ConnectProxy.DiscoveryChain) require.Len(t, snap.ConnectProxy.DiscoveryChain, 6, "%+v", snap.ConnectProxy.DiscoveryChain)
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 5, "%+v", snap.ConnectProxy.WatchedUpstreams) require.Len(t, snap.ConnectProxy.WatchedUpstreams, 6, "%+v", snap.ConnectProxy.WatchedUpstreams)
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 5, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints) require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 6, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
require.Len(t, snap.ConnectProxy.WatchedGateways, 5, "%+v", snap.ConnectProxy.WatchedGateways) require.Len(t, snap.ConnectProxy.WatchedGateways, 6, "%+v", snap.ConnectProxy.WatchedGateways)
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 5, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints) require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 6, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks) require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints) require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len())
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len())
require.True(t, snap.ConnectProxy.IntentionsSet) require.True(t, snap.ConnectProxy.IntentionsSet)
require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions) require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions)
require.True(t, snap.ConnectProxy.MeshConfigSet) require.True(t, snap.ConnectProxy.MeshConfigSet)
@ -667,6 +705,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
fmt.Sprintf("upstream-target:api-failover-remote.default.default.dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-remote", "", "dc2", true), fmt.Sprintf("upstream-target:api-failover-remote.default.default.dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-remote", "", "dc2", true),
fmt.Sprintf("upstream-target:api-failover-local.default.default.dc2:%s-failover-local?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-local", "", "dc2", true), fmt.Sprintf("upstream-target:api-failover-local.default.default.dc2:%s-failover-local?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-local", "", "dc2", true),
fmt.Sprintf("upstream-target:api-failover-direct.default.default.dc2:%s-failover-direct?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-direct", "", "dc2", true), fmt.Sprintf("upstream-target:api-failover-direct.default.default.dc2:%s-failover-direct?dc=dc2", apiUID.String()): genVerifyServiceSpecificRequest("api-failover-direct", "", "dc2", true),
upstreamPeerWatchIDPrefix + fmt.Sprintf("%s-failover-to-peer?peer=cluster-01", apiUID.String()): genVerifyServiceSpecificPeeredRequest("api-failover-to-peer", "", "", "cluster-01", true),
fmt.Sprintf("mesh-gateway:dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc2"), fmt.Sprintf("mesh-gateway:dc2:%s-failover-remote?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc2"),
fmt.Sprintf("mesh-gateway:dc1:%s-failover-local?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc1"), fmt.Sprintf("mesh-gateway:dc1:%s-failover-local?dc=dc2", apiUID.String()): genVerifyGatewayWatch("dc1"),
}, },
@ -676,15 +715,18 @@ func TestState_WatchesAndUpdates(t *testing.T) {
require.Equal(t, indexedRoots, snap.Roots) require.Equal(t, indexedRoots, snap.Roots)
require.Equal(t, issuedCert, snap.ConnectProxy.Leaf) require.Equal(t, issuedCert, snap.ConnectProxy.Leaf)
require.Len(t, snap.ConnectProxy.DiscoveryChain, 5, "%+v", snap.ConnectProxy.DiscoveryChain) require.Len(t, snap.ConnectProxy.DiscoveryChain, 6, "%+v", snap.ConnectProxy.DiscoveryChain)
require.Len(t, snap.ConnectProxy.WatchedUpstreams, 5, "%+v", snap.ConnectProxy.WatchedUpstreams) require.Len(t, snap.ConnectProxy.WatchedUpstreams, 6, "%+v", snap.ConnectProxy.WatchedUpstreams)
require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 5, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints) require.Len(t, snap.ConnectProxy.WatchedUpstreamEndpoints, 6, "%+v", snap.ConnectProxy.WatchedUpstreamEndpoints)
require.Len(t, snap.ConnectProxy.WatchedGateways, 5, "%+v", snap.ConnectProxy.WatchedGateways) require.Len(t, snap.ConnectProxy.WatchedGateways, 6, "%+v", snap.ConnectProxy.WatchedGateways)
require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 5, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints) require.Len(t, snap.ConnectProxy.WatchedGatewayEndpoints, 6, "%+v", snap.ConnectProxy.WatchedGatewayEndpoints)
require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks) require.Len(t, snap.ConnectProxy.WatchedServiceChecks, 0, "%+v", snap.ConnectProxy.WatchedServiceChecks)
require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints) require.Len(t, snap.ConnectProxy.PreparedQueryEndpoints, 0, "%+v", snap.ConnectProxy.PreparedQueryEndpoints)
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.PeerUpstreamEndpoints.Len())
require.Equal(t, 1, snap.ConnectProxy.ConfigSnapshotUpstreams.UpstreamPeerTrustBundles.Len())
require.True(t, snap.ConnectProxy.IntentionsSet) require.True(t, snap.ConnectProxy.IntentionsSet)
require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions) require.Equal(t, ixnMatch, snap.ConnectProxy.Intentions)
}, },

View File

@ -280,6 +280,31 @@ func TestUpstreamNodesDC2(t testing.T) structs.CheckServiceNodes {
} }
} }
func TestUpstreamNodesPeerCluster01(t testing.T) structs.CheckServiceNodes {
peer := "cluster-01"
service := structs.TestNodeServiceWithNameInPeer(t, "web", peer)
return structs.CheckServiceNodes{
structs.CheckServiceNode{
Node: &structs.Node{
ID: "test1",
Node: "test1",
Address: "10.40.1.1",
PeerName: peer,
},
Service: service,
},
structs.CheckServiceNode{
Node: &structs.Node{
ID: "test2",
Node: "test2",
Address: "10.40.1.2",
PeerName: peer,
},
Service: service,
},
}
}
func TestUpstreamNodesInStatusDC2(t testing.T, status string) structs.CheckServiceNodes { func TestUpstreamNodesInStatusDC2(t testing.T, status string) structs.CheckServiceNodes {
return structs.CheckServiceNodes{ return structs.CheckServiceNodes{
structs.CheckServiceNode{ structs.CheckServiceNode{

View File

@ -8,6 +8,7 @@ import (
"github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/discoverychain" "github.com/hashicorp/consul/agent/consul/discoverychain"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
) )
func setupTestVariationConfigEntriesAndSnapshot( func setupTestVariationConfigEntriesAndSnapshot(
@ -72,6 +73,24 @@ func setupTestVariationConfigEntriesAndSnapshot(
Nodes: TestGatewayNodesDC2(t), Nodes: TestGatewayNodesDC2(t),
}, },
}) })
case "failover-to-cluster-peer":
events = append(events, UpdateEvent{
CorrelationID: "peer-trust-bundle:cluster-01",
Result: &pbpeering.TrustBundleReadResponse{
Bundle: &pbpeering.PeeringTrustBundle{
PeerName: "peer1",
TrustDomain: "peer1.domain",
ExportedPartition: "peer1ap",
RootPEMs: []string{"peer1-root-1"},
},
},
})
events = append(events, UpdateEvent{
CorrelationID: "upstream-peer:db?peer=cluster-01",
Result: &structs.IndexedCheckServiceNodes{
Nodes: TestUpstreamNodesPeerCluster01(t),
},
})
case "failover-through-double-remote-gateway-triggered": case "failover-through-double-remote-gateway-triggered":
events = append(events, UpdateEvent{ events = append(events, UpdateEvent{
CorrelationID: "upstream-target:db.default.default.dc1:" + dbUID.String(), CorrelationID: "upstream-target:db.default.default.dc1:" + dbUID.String(),
@ -255,6 +274,21 @@ func setupTestVariationDiscoveryChain(
}, },
}, },
) )
case "failover-to-cluster-peer":
entries = append(entries,
&structs.ServiceResolverConfigEntry{
Kind: structs.ServiceResolver,
Name: "db",
ConnectTimeout: 33 * time.Second,
Failover: map[string]structs.ServiceResolverFailover{
"*": {
Targets: []structs.ServiceResolverFailoverTarget{
{Peer: "cluster-01"},
},
},
},
},
)
case "failover-through-double-remote-gateway-triggered": case "failover-through-double-remote-gateway-triggered":
fallthrough fallthrough
case "failover-through-double-remote-gateway": case "failover-through-double-remote-gateway":

View File

@ -9,7 +9,9 @@ import (
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering"
) )
type handlerUpstreams struct { type handlerUpstreams struct {
@ -21,9 +23,10 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
return fmt.Errorf("error filling agent cache: %v", u.Err) return fmt.Errorf("error filling agent cache: %v", u.Err)
} }
upstreamsSnapshot := &snap.ConnectProxy.ConfigSnapshotUpstreams upstreamsSnapshot, err := snap.ToConfigSnapshotUpstreams()
if snap.Kind == structs.ServiceKindIngressGateway {
upstreamsSnapshot = &snap.IngressGateway.ConfigSnapshotUpstreams if err != nil {
return err
} }
switch { switch {
@ -98,19 +101,16 @@ func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEv
uid := UpstreamIDFromString(uidString) uid := UpstreamIDFromString(uidString)
filteredNodes := hostnameEndpoints( s.setPeerEndpoints(upstreamsSnapshot, uid, resp.Nodes)
s.logger,
GatewayKey{ /*empty so it never matches*/ }, case strings.HasPrefix(u.CorrelationID, peerTrustBundleIDPrefix):
resp.Nodes, resp, ok := u.Result.(*pbpeering.TrustBundleReadResponse)
) if !ok {
if len(filteredNodes) > 0 { return fmt.Errorf("invalid type for response: %T", u.Result)
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, filteredNodes); set { }
upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid] = struct{}{} peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix)
} if resp.Bundle != nil {
} else { upstreamsSnapshot.UpstreamPeerTrustBundles.Set(peer, resp.Bundle)
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, resp.Nodes); set {
delete(upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames, uid)
}
} }
case strings.HasPrefix(u.CorrelationID, "upstream-target:"): case strings.HasPrefix(u.CorrelationID, "upstream-target:"):
@ -216,6 +216,23 @@ func removeColonPrefix(s string) (string, string, bool) {
return s[0:idx], s[idx+1:], true return s[0:idx], s[idx+1:], true
} }
func (s *handlerUpstreams) setPeerEndpoints(upstreamsSnapshot *ConfigSnapshotUpstreams, uid UpstreamID, nodes structs.CheckServiceNodes) {
filteredNodes := hostnameEndpoints(
s.logger,
GatewayKey{ /*empty so it never matches*/ },
nodes,
)
if len(filteredNodes) > 0 {
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, filteredNodes); set {
upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid] = struct{}{}
}
} else {
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, nodes); set {
delete(upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames, uid)
}
}
}
func (s *handlerUpstreams) resetWatchesFromChain( func (s *handlerUpstreams) resetWatchesFromChain(
ctx context.Context, ctx context.Context,
uid UpstreamID, uid UpstreamID,
@ -255,6 +272,12 @@ func (s *handlerUpstreams) resetWatchesFromChain(
delete(snap.WatchedUpstreams[uid], targetID) delete(snap.WatchedUpstreams[uid], targetID)
delete(snap.WatchedUpstreamEndpoints[uid], targetID) delete(snap.WatchedUpstreamEndpoints[uid], targetID)
cancelFn() cancelFn()
targetUID := NewUpstreamIDFromTargetID(targetID)
if targetUID.Peer != "" {
snap.PeerUpstreamEndpoints.CancelWatch(targetUID)
snap.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
}
} }
var ( var (
@ -274,6 +297,7 @@ func (s *handlerUpstreams) resetWatchesFromChain(
service: target.Service, service: target.Service,
filter: target.Subset.Filter, filter: target.Subset.Filter,
datacenter: target.Datacenter, datacenter: target.Datacenter,
peer: target.Peer,
entMeta: target.GetEnterpriseMetadata(), entMeta: target.GetEnterpriseMetadata(),
} }
err := s.watchUpstreamTarget(ctx, snap, opts) err := s.watchUpstreamTarget(ctx, snap, opts)
@ -384,6 +408,7 @@ type targetWatchOpts struct {
service string service string
filter string filter string
datacenter string datacenter string
peer string
entMeta *acl.EnterpriseMeta entMeta *acl.EnterpriseMeta
} }
@ -397,11 +422,17 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config
var finalMeta acl.EnterpriseMeta var finalMeta acl.EnterpriseMeta
finalMeta.Merge(opts.entMeta) finalMeta.Merge(opts.entMeta)
correlationID := "upstream-target:" + opts.chainID + ":" + opts.upstreamID.String() uid := opts.upstreamID
correlationID := "upstream-target:" + opts.chainID + ":" + uid.String()
if opts.peer != "" {
uid = NewUpstreamIDFromTargetID(opts.chainID)
correlationID = upstreamPeerWatchIDPrefix + uid.String()
}
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{ err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{
PeerName: opts.upstreamID.Peer, PeerName: opts.peer,
Datacenter: opts.datacenter, Datacenter: opts.datacenter,
QueryOptions: structs.QueryOptions{ QueryOptions: structs.QueryOptions{
Token: s.token, Token: s.token,
@ -422,6 +453,31 @@ func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *Config
} }
snap.WatchedUpstreams[opts.upstreamID][opts.chainID] = cancel snap.WatchedUpstreams[opts.upstreamID][opts.chainID] = cancel
if uid.Peer == "" {
return nil
}
if ok := snap.PeerUpstreamEndpoints.IsWatched(uid); !ok {
snap.PeerUpstreamEndpoints.InitWatch(uid, cancel)
}
// Check whether a watch for this peer exists to avoid duplicates.
if ok := snap.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok {
peerCtx, cancel := context.WithCancel(ctx)
if err := s.dataSources.TrustBundle.Notify(peerCtx, &cachetype.TrustBundleReadRequest{
Request: &pbpeering.TrustBundleReadRequest{
Name: uid.Peer,
Partition: uid.PartitionOrDefault(),
},
QueryOptions: structs.QueryOptions{Token: s.token},
}, peerTrustBundleIDPrefix+uid.Peer, s.ch); err != nil {
cancel()
return fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err)
}
snap.UpstreamPeerTrustBundles.InitWatch(uid.Peer, cancel)
}
return nil return nil
} }

View File

@ -8,7 +8,6 @@ import (
"time" "time"
"github.com/armon/go-metrics" "github.com/armon/go-metrics"
"github.com/hashicorp/consul/proto/pbpeerstream"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
@ -27,6 +26,7 @@ import (
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbpeerstream"
) )
var ( var (
@ -379,6 +379,7 @@ func (s *Server) Establish(
} }
var id string var id string
serverAddrs := tok.ServerAddresses
if existing == nil { if existing == nil {
id, err = lib.GenerateUUID(s.Backend.CheckPeeringUUID) id, err = lib.GenerateUUID(s.Backend.CheckPeeringUUID)
if err != nil { if err != nil {
@ -386,6 +387,11 @@ func (s *Server) Establish(
} }
} else { } else {
id = existing.ID id = existing.ID
// If there is a connected stream, assume that the existing ServerAddresses
// are up to date and do not try to overwrite them with the token's addresses.
if status, ok := s.Tracker.StreamStatus(id); ok && status.Connected {
serverAddrs = existing.PeerServerAddresses
}
} }
// validate that this peer name is not being used as an acceptor already // validate that this peer name is not being used as an acceptor already
@ -397,7 +403,7 @@ func (s *Server) Establish(
ID: id, ID: id,
Name: req.PeerName, Name: req.PeerName,
PeerCAPems: tok.CA, PeerCAPems: tok.CA,
PeerServerAddresses: tok.ServerAddresses, PeerServerAddresses: serverAddrs,
PeerServerName: tok.ServerName, PeerServerName: tok.ServerName,
PeerID: tok.PeerID, PeerID: tok.PeerID,
Meta: req.Meta, Meta: req.Meta,
@ -418,9 +424,9 @@ func (s *Server) Establish(
} }
var exchangeResp *pbpeerstream.ExchangeSecretResponse var exchangeResp *pbpeerstream.ExchangeSecretResponse
// Loop through the token's addresses once, attempting to fetch the long-lived stream secret. // Loop through the known server addresses once, attempting to fetch the long-lived stream secret.
var dialErrors error var dialErrors error
for _, addr := range peering.PeerServerAddresses { for _, addr := range serverAddrs {
exchangeResp, err = exchangeSecret(ctx, addr, tlsOption, &exchangeReq) exchangeResp, err = exchangeSecret(ctx, addr, tlsOption, &exchangeReq)
if err != nil { if err != nil {
dialErrors = multierror.Append(dialErrors, fmt.Errorf("failed to exchange peering secret with %q: %w", addr, err)) dialErrors = multierror.Append(dialErrors, fmt.Errorf("failed to exchange peering secret with %q: %w", addr, err))
@ -720,11 +726,12 @@ func (s *Server) PeeringDelete(ctx context.Context, req *pbpeering.PeeringDelete
return nil, err return nil, err
} }
if existing == nil || !existing.IsActive() { if existing == nil || existing.State == pbpeering.PeeringState_DELETING {
// Return early when the Peering doesn't exist or is already marked for deletion. // Return early when the Peering doesn't exist or is already marked for deletion.
// We don't return nil because the pb will fail to marshal. // We don't return nil because the pb will fail to marshal.
return &pbpeering.PeeringDeleteResponse{}, nil return &pbpeering.PeeringDeleteResponse{}, nil
} }
// We are using a write request due to needing to perform a deferred deletion. // We are using a write request due to needing to perform a deferred deletion.
// The peering gets marked for deletion by setting the DeletedAt field, // The peering gets marked for deletion by setting the DeletedAt field,
// and a leader routine will handle deleting the peering. // and a leader routine will handle deleting the peering.

View File

@ -621,38 +621,50 @@ func TestPeeringService_Read_ACLEnforcement(t *testing.T) {
} }
func TestPeeringService_Delete(t *testing.T) { func TestPeeringService_Delete(t *testing.T) {
// TODO(peering): see note on newTestServer, refactor to not use this tt := map[string]pbpeering.PeeringState{
s := newTestServer(t, nil) "active peering": pbpeering.PeeringState_ACTIVE,
"terminated peering": pbpeering.PeeringState_TERMINATED,
p := &pbpeering.Peering{
ID: testUUID(t),
Name: "foo",
State: pbpeering.PeeringState_ESTABLISHING,
PeerCAPems: nil,
PeerServerName: "test",
PeerServerAddresses: []string{"addr1"},
} }
err := s.Server.FSM().State().PeeringWrite(10, &pbpeering.PeeringWriteRequest{Peering: p})
require.NoError(t, err)
require.Nil(t, p.DeletedAt)
require.True(t, p.IsActive())
client := pbpeering.NewPeeringServiceClient(s.ClientConn(t)) for name, overrideState := range tt {
t.Run(name, func(t *testing.T) {
// TODO(peering): see note on newTestServer, refactor to not use this
s := newTestServer(t, nil)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) // A pointer is kept for the following peering so that we can modify the object without another PeeringWrite.
t.Cleanup(cancel) p := &pbpeering.Peering{
ID: testUUID(t),
Name: "foo",
PeerCAPems: nil,
PeerServerName: "test",
PeerServerAddresses: []string{"addr1"},
}
err := s.Server.FSM().State().PeeringWrite(10, &pbpeering.PeeringWriteRequest{Peering: p})
require.NoError(t, err)
require.Nil(t, p.DeletedAt)
require.True(t, p.IsActive())
_, err = client.PeeringDelete(ctx, &pbpeering.PeeringDeleteRequest{Name: "foo"}) // Overwrite the peering state to simulate deleting from a non-initial state.
require.NoError(t, err) p.State = overrideState
retry.Run(t, func(r *retry.R) { client := pbpeering.NewPeeringServiceClient(s.ClientConn(t))
_, resp, err := s.Server.FSM().State().PeeringRead(nil, state.Query{Value: "foo"})
require.NoError(r, err)
// Initially the peering will be marked for deletion but eventually the leader ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
// routine will clean it up. t.Cleanup(cancel)
require.Nil(r, resp)
}) _, err = client.PeeringDelete(ctx, &pbpeering.PeeringDeleteRequest{Name: "foo"})
require.NoError(t, err)
retry.Run(t, func(r *retry.R) {
_, resp, err := s.Server.FSM().State().PeeringRead(nil, state.Query{Value: "foo"})
require.NoError(r, err)
// Initially the peering will be marked for deletion but eventually the leader
// routine will clean it up.
require.Nil(r, resp)
})
})
}
} }
func TestPeeringService_Delete_ACLEnforcement(t *testing.T) { func TestPeeringService_Delete_ACLEnforcement(t *testing.T) {

View File

@ -127,9 +127,20 @@ func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token str
if err != nil { if err != nil {
return nil, nil, "", err return nil, nil, "", err
} }
// Setup default check if none given // Setup default check if none given.
if len(checks) < 1 { if len(checks) < 1 {
checks = sidecarDefaultChecks(ns.ID, sidecar.Proxy.LocalServiceAddress, sidecar.Port) // The check should use the sidecar's address because it makes a request to the sidecar.
// If the sidecar's address is empty, we fall back to the address of the local service, as set in
// sidecar.Proxy.LocalServiceAddress, in the hope that the proxy is also accessible on that address
// (which in most cases it is because it's running as a sidecar in the same network).
// We could instead fall back to the address of the service as set by (ns.Address), but I've kept it using
// sidecar.Proxy.LocalServiceAddress so as to not change things too much in the
// process of fixing #14433.
checkAddress := sidecar.Address
if checkAddress == "" {
checkAddress = sidecar.Proxy.LocalServiceAddress
}
checks = sidecarDefaultChecks(ns.ID, checkAddress, sidecar.Port)
} }
return sidecar, checks, token, nil return sidecar, checks, token, nil
@ -202,14 +213,11 @@ func (a *Agent) sidecarPortFromServiceID(sidecarCompoundServiceID structs.Servic
return sidecarPort, nil return sidecarPort, nil
} }
func sidecarDefaultChecks(serviceID string, localServiceAddress string, port int) []*structs.CheckType { func sidecarDefaultChecks(serviceID string, address string, port int) []*structs.CheckType {
// Setup default check if none given
return []*structs.CheckType{ return []*structs.CheckType{
{ {
Name: "Connect Sidecar Listening", Name: "Connect Sidecar Listening",
// Default to localhost rather than agent/service public IP. The checks TCP: ipaddr.FormatAddressPort(address, port),
// can always be overridden if a non-loopback IP is needed.
TCP: ipaddr.FormatAddressPort(localServiceAddress, port),
Interval: 10 * time.Second, Interval: 10 * time.Second,
}, },
{ {

View File

@ -215,6 +215,141 @@ func TestAgent_sidecarServiceFromNodeService(t *testing.T) {
token: "foo", token: "foo",
wantErr: "reserved for internal use", wantErr: "reserved for internal use",
}, },
{
name: "uses proxy address for check",
sd: &structs.ServiceDefinition{
ID: "web1",
Name: "web",
Port: 1111,
Connect: &structs.ServiceConnect{
SidecarService: &structs.ServiceDefinition{
Address: "123.123.123.123",
Proxy: &structs.ConnectProxyConfig{
LocalServiceAddress: "255.255.255.255",
},
},
},
Address: "255.255.255.255",
},
token: "foo",
wantNS: &structs.NodeService{
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
Kind: structs.ServiceKindConnectProxy,
ID: "web1-sidecar-proxy",
Service: "web-sidecar-proxy",
Port: 2222,
Address: "123.123.123.123",
LocallyRegisteredAsSidecar: true,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
DestinationServiceID: "web1",
LocalServiceAddress: "255.255.255.255",
LocalServicePort: 1111,
},
},
wantChecks: []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: "123.123.123.123:2222",
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing web1",
AliasService: "web1",
},
},
wantToken: "foo",
},
{
name: "uses proxy.local_service_address for check if proxy address is empty",
sd: &structs.ServiceDefinition{
ID: "web1",
Name: "web",
Port: 1111,
Connect: &structs.ServiceConnect{
SidecarService: &structs.ServiceDefinition{
Address: "", // Proxy address empty.
Proxy: &structs.ConnectProxyConfig{
LocalServiceAddress: "1.2.3.4",
},
},
},
Address: "", // Service address empty.
},
token: "foo",
wantNS: &structs.NodeService{
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
Kind: structs.ServiceKindConnectProxy,
ID: "web1-sidecar-proxy",
Service: "web-sidecar-proxy",
Port: 2222,
Address: "",
LocallyRegisteredAsSidecar: true,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
DestinationServiceID: "web1",
LocalServiceAddress: "1.2.3.4",
LocalServicePort: 1111,
},
},
wantChecks: []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: "1.2.3.4:2222",
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing web1",
AliasService: "web1",
},
},
wantToken: "foo",
},
{
name: "uses 127.0.0.1 for check if proxy and proxy.local_service_address are empty",
sd: &structs.ServiceDefinition{
ID: "web1",
Name: "web",
Port: 1111,
Connect: &structs.ServiceConnect{
SidecarService: &structs.ServiceDefinition{
Address: "",
Proxy: &structs.ConnectProxyConfig{
LocalServiceAddress: "",
},
},
},
Address: "",
},
token: "foo",
wantNS: &structs.NodeService{
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
Kind: structs.ServiceKindConnectProxy,
ID: "web1-sidecar-proxy",
Service: "web-sidecar-proxy",
Port: 2222,
Address: "",
LocallyRegisteredAsSidecar: true,
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web",
DestinationServiceID: "web1",
LocalServiceAddress: "127.0.0.1",
LocalServicePort: 1111,
},
},
wantChecks: []*structs.CheckType{
{
Name: "Connect Sidecar Listening",
TCP: "127.0.0.1:2222",
Interval: 10 * time.Second,
},
{
Name: "Connect Sidecar Aliasing web1",
AliasService: "web1",
},
},
wantToken: "foo",
},
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {

View File

@ -3,12 +3,13 @@ package structs
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/miekg/dns"
"net" "net"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/miekg/dns"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
"github.com/mitchellh/hashstructure" "github.com/mitchellh/hashstructure"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
@ -362,6 +363,13 @@ func (e *ProxyConfigEntry) Normalize() error {
} }
e.Kind = ProxyDefaults e.Kind = ProxyDefaults
// proxy default config only accepts global configs
// this check is replicated in normalize() and validate(),
// since validate is not called by all the endpoints (e.g., delete)
if e.Name != "" && e.Name != ProxyConfigGlobal {
return fmt.Errorf("invalid name (%q), only %q is supported", e.Name, ProxyConfigGlobal)
}
e.Name = ProxyConfigGlobal e.Name = ProxyConfigGlobal
e.EnterpriseMeta.Normalize() e.EnterpriseMeta.Normalize()
@ -961,6 +969,11 @@ type PassiveHealthCheck struct {
// MaxFailures is the count of consecutive failures that results in a host // MaxFailures is the count of consecutive failures that results in a host
// being removed from the pool. // being removed from the pool.
MaxFailures uint32 `json:",omitempty" alias:"max_failures"` MaxFailures uint32 `json:",omitempty" alias:"max_failures"`
// EnforcingConsecutive5xx is the % chance that a host will be actually ejected
// when an outlier status is detected through consecutive 5xx.
// This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100.
EnforcingConsecutive5xx *uint32 `json:",omitempty" alias:"enforcing_consecutive_5xx"`
} }
func (chk *PassiveHealthCheck) Clone() *PassiveHealthCheck { func (chk *PassiveHealthCheck) Clone() *PassiveHealthCheck {

View File

@ -964,11 +964,18 @@ func (e *ServiceResolverConfigEntry) Validate() error {
// TODO(rb): prevent subsets and default subsets from being defined? // TODO(rb): prevent subsets and default subsets from being defined?
if r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Partition == "" && r.Datacenter == "" { if r.isEmpty() {
return fmt.Errorf("Redirect is empty") return fmt.Errorf("Redirect is empty")
} }
if r.Service == "" { switch {
case r.Peer != "" && r.ServiceSubset != "":
return fmt.Errorf("Redirect.Peer cannot be set with Redirect.ServiceSubset")
case r.Peer != "" && r.Partition != "":
return fmt.Errorf("Redirect.Partition cannot be set with Redirect.Peer")
case r.Peer != "" && r.Datacenter != "":
return fmt.Errorf("Redirect.Peer cannot be set with Redirect.Datacenter")
case r.Service == "":
if r.ServiceSubset != "" { if r.ServiceSubset != "" {
return fmt.Errorf("Redirect.ServiceSubset defined without Redirect.Service") return fmt.Errorf("Redirect.ServiceSubset defined without Redirect.Service")
} }
@ -978,9 +985,12 @@ func (e *ServiceResolverConfigEntry) Validate() error {
if r.Partition != "" { if r.Partition != "" {
return fmt.Errorf("Redirect.Partition defined without Redirect.Service") return fmt.Errorf("Redirect.Partition defined without Redirect.Service")
} }
} else if r.Service == e.Name { if r.Peer != "" {
if r.ServiceSubset != "" && !isSubset(r.ServiceSubset) { return fmt.Errorf("Redirect.Peer defined without Redirect.Service")
return fmt.Errorf("Redirect.ServiceSubset %q is not a valid subset of %q", r.ServiceSubset, r.Service) }
case r.ServiceSubset != "" && (r.Service == "" || r.Service == e.Name):
if !isSubset(r.ServiceSubset) {
return fmt.Errorf("Redirect.ServiceSubset %q is not a valid subset of %q", r.ServiceSubset, e.Name)
} }
} }
} }
@ -1231,6 +1241,25 @@ type ServiceResolverRedirect struct {
// Datacenter is the datacenter to resolve the service from instead of the // Datacenter is the datacenter to resolve the service from instead of the
// current one (optional). // current one (optional).
Datacenter string `json:",omitempty"` Datacenter string `json:",omitempty"`
// Peer is the name of the cluster peer to resolve the service from instead
// of the current one (optional).
Peer string `json:",omitempty"`
}
func (r *ServiceResolverRedirect) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
return DiscoveryTargetOpts{
Service: r.Service,
ServiceSubset: r.ServiceSubset,
Namespace: r.Namespace,
Partition: r.Partition,
Datacenter: r.Datacenter,
Peer: r.Peer,
}
}
func (r *ServiceResolverRedirect) isEmpty() bool {
return r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Partition == "" && r.Datacenter == "" && r.Peer == ""
} }
// There are some restrictions on what is allowed in here: // There are some restrictions on what is allowed in here:
@ -1275,6 +1304,14 @@ type ServiceResolverFailover struct {
Targets []ServiceResolverFailoverTarget `json:",omitempty"` Targets []ServiceResolverFailoverTarget `json:",omitempty"`
} }
func (t *ServiceResolverFailover) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
return DiscoveryTargetOpts{
Service: t.Service,
ServiceSubset: t.ServiceSubset,
Namespace: t.Namespace,
}
}
func (f *ServiceResolverFailover) isEmpty() bool { func (f *ServiceResolverFailover) isEmpty() bool {
return f.Service == "" && f.ServiceSubset == "" && f.Namespace == "" && len(f.Datacenters) == 0 && len(f.Targets) == 0 return f.Service == "" && f.ServiceSubset == "" && f.Namespace == "" && len(f.Datacenters) == 0 && len(f.Targets) == 0
} }
@ -1299,6 +1336,17 @@ type ServiceResolverFailoverTarget struct {
Peer string `json:",omitempty"` Peer string `json:",omitempty"`
} }
func (t *ServiceResolverFailoverTarget) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
return DiscoveryTargetOpts{
Service: t.Service,
ServiceSubset: t.ServiceSubset,
Namespace: t.Namespace,
Partition: t.Partition,
Datacenter: t.Datacenter,
Peer: t.Peer,
}
}
// LoadBalancer determines the load balancing policy and configuration for services // LoadBalancer determines the load balancing policy and configuration for services
// issuing requests to this upstream service. // issuing requests to this upstream service.
type LoadBalancer struct { type LoadBalancer struct {

View File

@ -72,6 +72,28 @@ func TestServiceResolverConfigEntry_OSS(t *testing.T) {
}, },
validateErr: `Bad Failover["*"]: Setting Namespace requires Consul Enterprise`, validateErr: `Bad Failover["*"]: Setting Namespace requires Consul Enterprise`,
}, },
{
name: "setting redirect Namespace on OSS",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Namespace: "ns1",
},
},
validateErr: `Redirect: Setting Namespace requires Consul Enterprise`,
},
{
name: "setting redirect Partition on OSS",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Partition: "ap1",
},
},
validateErr: `Redirect: Setting Partition requires Consul Enterprise`,
},
} }
// Bulk add a bunch of similar validation cases. // Bulk add a bunch of similar validation cases.

View File

@ -655,6 +655,41 @@ func TestServiceResolverConfigEntry(t *testing.T) {
}, },
validateErr: `Redirect.ServiceSubset "gone" is not a valid subset of "test"`, validateErr: `Redirect.ServiceSubset "gone" is not a valid subset of "test"`,
}, },
{
name: "redirect with peer and subset",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Peer: "cluster-01",
ServiceSubset: "gone",
},
},
validateErr: `Redirect.Peer cannot be set with Redirect.ServiceSubset`,
},
{
name: "redirect with peer and datacenter",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Peer: "cluster-01",
Datacenter: "dc2",
},
},
validateErr: `Redirect.Peer cannot be set with Redirect.Datacenter`,
},
{
name: "redirect with peer and datacenter",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Peer: "cluster-01",
},
},
validateErr: `Redirect.Peer defined without Redirect.Service`,
},
{ {
name: "self redirect with valid subset", name: "self redirect with valid subset",
entry: &ServiceResolverConfigEntry{ entry: &ServiceResolverConfigEntry{
@ -669,6 +704,17 @@ func TestServiceResolverConfigEntry(t *testing.T) {
}, },
}, },
}, },
{
name: "redirect to peer",
entry: &ServiceResolverConfigEntry{
Kind: ServiceResolver,
Name: "test",
Redirect: &ServiceResolverRedirect{
Service: "other",
Peer: "cluster-01",
},
},
},
{ {
name: "simple wildcard failover", name: "simple wildcard failover",
entry: &ServiceResolverConfigEntry{ entry: &ServiceResolverConfigEntry{

View File

@ -2754,8 +2754,9 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
MaxConcurrentRequests: intPointer(12), MaxConcurrentRequests: intPointer(12),
}, },
"passive_health_check": &PassiveHealthCheck{ "passive_health_check": &PassiveHealthCheck{
MaxFailures: 13, MaxFailures: 13,
Interval: 14 * time.Second, Interval: 14 * time.Second,
EnforcingConsecutive5xx: uintPointer(80),
}, },
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal}, "mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
}, },
@ -2770,8 +2771,9 @@ func TestUpstreamConfig_MergeInto(t *testing.T) {
MaxConcurrentRequests: intPointer(12), MaxConcurrentRequests: intPointer(12),
}, },
"passive_health_check": &PassiveHealthCheck{ "passive_health_check": &PassiveHealthCheck{
MaxFailures: 13, MaxFailures: 13,
Interval: 14 * time.Second, Interval: 14 * time.Second,
EnforcingConsecutive5xx: uintPointer(80),
}, },
"mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal}, "mesh_gateway": MeshGatewayConfig{Mode: MeshGatewayModeLocal},
}, },
@ -2944,6 +2946,28 @@ func TestParseUpstreamConfig(t *testing.T) {
} }
} }
func TestProxyConfigEntry(t *testing.T) {
cases := map[string]configEntryTestcase{
"proxy config name provided is not global": {
entry: &ProxyConfigEntry{
Name: "foo",
},
normalizeErr: `invalid name ("foo"), only "global" is supported`,
},
"proxy config has no name": {
entry: &ProxyConfigEntry{
Name: "",
},
expected: &ProxyConfigEntry{
Name: ProxyConfigGlobal,
Kind: ProxyDefaults,
EnterpriseMeta: *acl.DefaultEnterpriseMeta(),
},
},
}
testConfigEntryNormalizeAndValidate(t, cases)
}
func requireContainsLower(t *testing.T, haystack, needle string) { func requireContainsLower(t *testing.T, haystack, needle string) {
t.Helper() t.Helper()
require.Contains(t, strings.ToLower(haystack), strings.ToLower(needle)) require.Contains(t, strings.ToLower(haystack), strings.ToLower(needle))
@ -3046,3 +3070,7 @@ func testConfigEntryNormalizeAndValidate(t *testing.T, cases map[string]configEn
}) })
} }
} }
func uintPointer(v uint32) *uint32 {
return &v
}

View File

@ -56,7 +56,12 @@ type CompiledDiscoveryChain struct {
// ID returns an ID that encodes the service, namespace, partition, and datacenter. // ID returns an ID that encodes the service, namespace, partition, and datacenter.
// This ID allows us to compare a discovery chain target to the chain upstream itself. // This ID allows us to compare a discovery chain target to the chain upstream itself.
func (c *CompiledDiscoveryChain) ID() string { func (c *CompiledDiscoveryChain) ID() string {
return chainID("", c.ServiceName, c.Namespace, c.Partition, c.Datacenter) return chainID(DiscoveryTargetOpts{
Service: c.ServiceName,
Namespace: c.Namespace,
Partition: c.Partition,
Datacenter: c.Datacenter,
})
} }
func (c *CompiledDiscoveryChain) CompoundServiceName() ServiceName { func (c *CompiledDiscoveryChain) CompoundServiceName() ServiceName {
@ -185,6 +190,7 @@ type DiscoveryTarget struct {
Namespace string `json:",omitempty"` Namespace string `json:",omitempty"`
Partition string `json:",omitempty"` Partition string `json:",omitempty"`
Datacenter string `json:",omitempty"` Datacenter string `json:",omitempty"`
Peer string `json:",omitempty"`
MeshGateway MeshGatewayConfig `json:",omitempty"` MeshGateway MeshGatewayConfig `json:",omitempty"`
Subset ServiceResolverSubset `json:",omitempty"` Subset ServiceResolverSubset `json:",omitempty"`
@ -240,28 +246,52 @@ func (t *DiscoveryTarget) UnmarshalJSON(data []byte) error {
return nil return nil
} }
func NewDiscoveryTarget(service, serviceSubset, namespace, partition, datacenter string) *DiscoveryTarget { type DiscoveryTargetOpts struct {
Service string
ServiceSubset string
Namespace string
Partition string
Datacenter string
Peer string
}
func NewDiscoveryTarget(opts DiscoveryTargetOpts) *DiscoveryTarget {
t := &DiscoveryTarget{ t := &DiscoveryTarget{
Service: service, Service: opts.Service,
ServiceSubset: serviceSubset, ServiceSubset: opts.ServiceSubset,
Namespace: namespace, Namespace: opts.Namespace,
Partition: partition, Partition: opts.Partition,
Datacenter: datacenter, Datacenter: opts.Datacenter,
Peer: opts.Peer,
} }
t.setID() t.setID()
return t return t
} }
func chainID(subset, service, namespace, partition, dc string) string { func (t *DiscoveryTarget) ToDiscoveryTargetOpts() DiscoveryTargetOpts {
// NOTE: this format is similar to the SNI syntax for simplicity return DiscoveryTargetOpts{
if subset == "" { Service: t.Service,
return fmt.Sprintf("%s.%s.%s.%s", service, namespace, partition, dc) ServiceSubset: t.ServiceSubset,
Namespace: t.Namespace,
Partition: t.Partition,
Datacenter: t.Datacenter,
Peer: t.Peer,
} }
return fmt.Sprintf("%s.%s.%s.%s.%s", subset, service, namespace, partition, dc) }
func chainID(opts DiscoveryTargetOpts) string {
// NOTE: this format is similar to the SNI syntax for simplicity
if opts.Peer != "" {
return fmt.Sprintf("%s.%s.default.external.%s", opts.Service, opts.Namespace, opts.Peer)
}
if opts.ServiceSubset == "" {
return fmt.Sprintf("%s.%s.%s.%s", opts.Service, opts.Namespace, opts.Partition, opts.Datacenter)
}
return fmt.Sprintf("%s.%s.%s.%s.%s", opts.ServiceSubset, opts.Service, opts.Namespace, opts.Partition, opts.Datacenter)
} }
func (t *DiscoveryTarget) setID() { func (t *DiscoveryTarget) setID() {
t.ID = chainID(t.ServiceSubset, t.Service, t.Namespace, t.Partition, t.Datacenter) t.ID = chainID(t.ToDiscoveryTargetOpts())
} }
func (t *DiscoveryTarget) String() string { func (t *DiscoveryTarget) String() string {

View File

@ -53,6 +53,28 @@ func TestNodeServiceWithName(t testing.T, name string) *NodeService {
} }
} }
const peerTrustDomain = "1c053652-8512-4373-90cf-5a7f6263a994.consul"
func TestNodeServiceWithNameInPeer(t testing.T, name string, peer string) *NodeService {
service := "payments"
return &NodeService{
Kind: ServiceKindTypical,
Service: name,
Port: 8080,
Connect: ServiceConnect{
PeerMeta: &PeeringServiceMeta{
SNI: []string{
service + ".default.default." + peer + ".external." + peerTrustDomain,
},
SpiffeID: []string{
"spiffe://" + peerTrustDomain + "/ns/default/dc/" + peer + "-dc/svc/" + service,
},
Protocol: "tcp",
},
},
}
}
// TestNodeServiceProxy returns a *NodeService representing a valid // TestNodeServiceProxy returns a *NodeService representing a valid
// Connect proxy. // Connect proxy.
func TestNodeServiceProxy(t testing.T) *NodeService { func TestNodeServiceProxy(t testing.T) *NodeService {

View File

@ -26,7 +26,7 @@ func TestUpstreams(t testing.T) Upstreams {
Config: map[string]interface{}{ Config: map[string]interface{}{
// Float because this is how it is decoded by JSON decoder so this // Float because this is how it is decoded by JSON decoder so this
// enables the value returned to be compared directly to a decoded JSON // enables the value returned to be compared directly to a decoded JSON
// response without spurios type loss. // response without spurious type loss.
"connect_timeout_ms": float64(1000), "connect_timeout_ms": float64(1000),
}, },
}, },

View File

@ -66,6 +66,10 @@ func (m *LocalMaterializer) Run(ctx context.Context) {
if ctx.Err() != nil { if ctx.Err() != nil {
return return
} }
if m.isTerminalError(err) {
return
}
m.mat.handleError(req, err) m.mat.handleError(req, err)
if err := m.mat.retryWaiter.Wait(ctx); err != nil { if err := m.mat.retryWaiter.Wait(ctx); err != nil {
@ -74,6 +78,14 @@ func (m *LocalMaterializer) Run(ctx context.Context) {
} }
} }
// isTerminalError determines whether the given error cannot be recovered from
// and should cause the materializer to halt and be evicted from the view store.
//
// This roughly matches the logic in agent/proxycfg-glue.newUpdateEvent.
func (m *LocalMaterializer) isTerminalError(err error) bool {
return acl.IsErrNotFound(err)
}
// subscribeOnce opens a new subscription to a local backend and runs // subscribeOnce opens a new subscription to a local backend and runs
// for its lifetime or until the view is closed. // for its lifetime or until the view is closed.
func (m *LocalMaterializer) subscribeOnce(ctx context.Context, req *pbsubscribe.SubscribeRequest) error { func (m *LocalMaterializer) subscribeOnce(ctx context.Context, req *pbsubscribe.SubscribeRequest) error {

View File

@ -47,6 +47,9 @@ type entry struct {
// requests is the count of active requests using this entry. This entry will // requests is the count of active requests using this entry. This entry will
// remain in the store as long as this count remains > 0. // remain in the store as long as this count remains > 0.
requests int requests int
// evicting is used to mark an entry that will be evicted when the current in-
// flight requests finish.
evicting bool
} }
// NewStore creates and returns a Store that is ready for use. The caller must // NewStore creates and returns a Store that is ready for use. The caller must
@ -89,6 +92,7 @@ func (s *Store) Run(ctx context.Context) {
// Only stop the materializer if there are no active requests. // Only stop the materializer if there are no active requests.
if e.requests == 0 { if e.requests == 0 {
s.logger.Trace("evicting item from store", "key", he.Key())
e.stop() e.stop()
delete(s.byKey, he.Key()) delete(s.byKey, he.Key())
} }
@ -187,13 +191,13 @@ func (s *Store) NotifyCallback(
"error", err, "error", err,
"request-type", req.Type(), "request-type", req.Type(),
"index", index) "index", index)
continue
} }
index = result.Index index = result.Index
cb(ctx, cache.UpdateEvent{ cb(ctx, cache.UpdateEvent{
CorrelationID: correlationID, CorrelationID: correlationID,
Result: result.Value, Result: result.Value,
Err: err,
Meta: cache.ResultMeta{Index: result.Index, Hit: result.Cached}, Meta: cache.ResultMeta{Index: result.Index, Hit: result.Cached},
}) })
} }
@ -211,6 +215,9 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) {
defer s.lock.Unlock() defer s.lock.Unlock()
e, ok := s.byKey[key] e, ok := s.byKey[key]
if ok { if ok {
if e.evicting {
return "", nil, errors.New("item is marked for eviction")
}
e.requests++ e.requests++
s.byKey[key] = e s.byKey[key] = e
return key, e.materializer, nil return key, e.materializer, nil
@ -222,7 +229,18 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) {
} }
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
go mat.Run(ctx) go func() {
mat.Run(ctx)
// Materializers run until they either reach their TTL and are evicted (which
// cancels the given context) or encounter an irrecoverable error.
//
// If the context hasn't been canceled, we know it's the error case so we
// trigger an immediate eviction.
if ctx.Err() == nil {
s.evictNow(key)
}
}()
e = entry{ e = entry{
materializer: mat, materializer: mat,
@ -233,6 +251,28 @@ func (s *Store) readEntry(req Request) (string, Materializer, error) {
return key, e.materializer, nil return key, e.materializer, nil
} }
// evictNow causes the item with the given key to be evicted immediately.
//
// If there are requests in-flight, the item is marked for eviction such that
// once the requests have been served releaseEntry will move it to the top of
// the expiry heap. If there are no requests in-flight, evictNow will move the
// item to the top of the expiry heap itself.
//
// In either case, the entry's evicting flag prevents it from being served by
// readEntry (and thereby gaining new in-flight requests).
func (s *Store) evictNow(key string) {
s.lock.Lock()
defer s.lock.Unlock()
e := s.byKey[key]
e.evicting = true
s.byKey[key] = e
if e.requests == 0 {
s.expireNowLocked(key)
}
}
// releaseEntry decrements the request count and starts an expiry timer if the // releaseEntry decrements the request count and starts an expiry timer if the
// count has reached 0. Must be called once for every call to readEntry. // count has reached 0. Must be called once for every call to readEntry.
func (s *Store) releaseEntry(key string) { func (s *Store) releaseEntry(key string) {
@ -246,6 +286,11 @@ func (s *Store) releaseEntry(key string) {
return return
} }
if e.evicting {
s.expireNowLocked(key)
return
}
if e.expiry.Index() == ttlcache.NotIndexed { if e.expiry.Index() == ttlcache.NotIndexed {
e.expiry = s.expiryHeap.Add(key, s.idleTTL) e.expiry = s.expiryHeap.Add(key, s.idleTTL)
s.byKey[key] = e s.byKey[key] = e
@ -255,6 +300,17 @@ func (s *Store) releaseEntry(key string) {
s.expiryHeap.Update(e.expiry.Index(), s.idleTTL) s.expiryHeap.Update(e.expiry.Index(), s.idleTTL)
} }
// expireNowLocked moves the item with the given key to the top of the expiry
// heap, causing it to be picked up by the expiry loop and evicted immediately.
func (s *Store) expireNowLocked(key string) {
e := s.byKey[key]
if idx := e.expiry.Index(); idx != ttlcache.NotIndexed {
s.expiryHeap.Remove(idx)
}
e.expiry = s.expiryHeap.Add(key, time.Duration(0))
s.byKey[key] = e
}
// makeEntryKey matches agent/cache.makeEntryKey, but may change in the future. // makeEntryKey matches agent/cache.makeEntryKey, but may change in the future.
func makeEntryKey(typ string, r cache.RequestInfo) string { func makeEntryKey(typ string, r cache.RequestInfo) string {
return fmt.Sprintf("%s/%s/%s/%s", typ, r.Datacenter, r.Token, r.Key) return fmt.Sprintf("%s/%s/%s/%s", typ, r.Datacenter, r.Token, r.Key)

View File

@ -509,3 +509,75 @@ func TestStore_Run_ExpiresEntries(t *testing.T) {
require.Len(t, store.byKey, 0) require.Len(t, store.byKey, 0)
require.Equal(t, ttlcache.NotIndexed, e.expiry.Index()) require.Equal(t, ttlcache.NotIndexed, e.expiry.Index())
} }
func TestStore_Run_FailingMaterializer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
store := NewStore(hclog.NewNullLogger())
store.idleTTL = 24 * time.Hour
go store.Run(ctx)
t.Run("with an in-flight request", func(t *testing.T) {
req := &failingMaterializerRequest{
doneCh: make(chan struct{}),
}
ch := make(chan cache.UpdateEvent)
reqCtx, reqCancel := context.WithCancel(context.Background())
t.Cleanup(reqCancel)
require.NoError(t, store.Notify(reqCtx, req, "", ch))
assertRequestCount(t, store, req, 1)
// Cause the materializer to "fail" (exit before its context is canceled).
close(req.doneCh)
// End the in-flight request.
reqCancel()
// Check that the item was evicted.
retry.Run(t, func(r *retry.R) {
store.lock.Lock()
defer store.lock.Unlock()
require.Len(r, store.byKey, 0)
})
})
t.Run("with no in-flight requests", func(t *testing.T) {
req := &failingMaterializerRequest{
doneCh: make(chan struct{}),
}
// Cause the materializer to "fail" (exit before its context is canceled).
close(req.doneCh)
// Check that the item was evicted.
retry.Run(t, func(r *retry.R) {
store.lock.Lock()
defer store.lock.Unlock()
require.Len(r, store.byKey, 0)
})
})
}
type failingMaterializerRequest struct {
doneCh chan struct{}
}
func (failingMaterializerRequest) CacheInfo() cache.RequestInfo { return cache.RequestInfo{} }
func (failingMaterializerRequest) Type() string { return "test.FailingMaterializerRequest" }
func (r *failingMaterializerRequest) NewMaterializer() (Materializer, error) {
return &failingMaterializer{doneCh: r.doneCh}, nil
}
type failingMaterializer struct {
doneCh <-chan struct{}
}
func (failingMaterializer) Query(context.Context, uint64) (Result, error) { return Result{}, nil }
func (m *failingMaterializer) Run(context.Context) { <-m.doneCh }

View File

@ -185,6 +185,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
Address: node.Address, Address: node.Address,
Datacenter: node.Datacenter, Datacenter: node.Datacenter,
TaggedAddresses: node.TaggedAddresses, TaggedAddresses: node.TaggedAddresses,
PeerName: node.PeerName,
Meta: node.Meta, Meta: node.Meta,
RaftIndex: structs.RaftIndex{ RaftIndex: structs.RaftIndex{
ModifyIndex: node.ModifyIndex, ModifyIndex: node.ModifyIndex,
@ -207,6 +208,7 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
Service: structs.NodeService{ Service: structs.NodeService{
ID: svc.ID, ID: svc.ID,
Service: svc.Service, Service: svc.Service,
Kind: structs.ServiceKind(svc.Kind),
Tags: svc.Tags, Tags: svc.Tags,
Address: svc.Address, Address: svc.Address,
Meta: svc.Meta, Meta: svc.Meta,
@ -226,6 +228,39 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
}, },
}, },
} }
if svc.Proxy != nil {
out.Service.Service.Proxy = structs.ConnectProxyConfig{}
t := &out.Service.Service.Proxy
if svc.Proxy.DestinationServiceName != "" {
t.DestinationServiceName = svc.Proxy.DestinationServiceName
}
if svc.Proxy.DestinationServiceID != "" {
t.DestinationServiceID = svc.Proxy.DestinationServiceID
}
if svc.Proxy.LocalServiceAddress != "" {
t.LocalServiceAddress = svc.Proxy.LocalServiceAddress
}
if svc.Proxy.LocalServicePort != 0 {
t.LocalServicePort = svc.Proxy.LocalServicePort
}
if svc.Proxy.LocalServiceSocketPath != "" {
t.LocalServiceSocketPath = svc.Proxy.LocalServiceSocketPath
}
if svc.Proxy.MeshGateway.Mode != "" {
t.MeshGateway.Mode = structs.MeshGatewayMode(svc.Proxy.MeshGateway.Mode)
}
if svc.Proxy.TransparentProxy != nil {
if svc.Proxy.TransparentProxy.DialedDirectly {
t.TransparentProxy.DialedDirectly = svc.Proxy.TransparentProxy.DialedDirectly
}
if svc.Proxy.TransparentProxy.OutboundListenerPort != 0 {
t.TransparentProxy.OutboundListenerPort = svc.Proxy.TransparentProxy.OutboundListenerPort
}
}
}
opsRPC = append(opsRPC, out) opsRPC = append(opsRPC, out)
case in.Check != nil: case in.Check != nil:
@ -265,6 +300,8 @@ func (s *HTTPHandlers) convertOps(resp http.ResponseWriter, req *http.Request) (
ServiceID: check.ServiceID, ServiceID: check.ServiceID,
ServiceName: check.ServiceName, ServiceName: check.ServiceName,
ServiceTags: check.ServiceTags, ServiceTags: check.ServiceTags,
PeerName: check.PeerName,
ExposedPort: check.ExposedPort,
Definition: structs.HealthCheckDefinition{ Definition: structs.HealthCheckDefinition{
HTTP: check.Definition.HTTP, HTTP: check.Definition.HTTP,
TLSServerName: check.Definition.TLSServerName, TLSServerName: check.Definition.TLSServerName,

View File

@ -585,6 +585,7 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
"Output": "success", "Output": "success",
"ServiceID": "", "ServiceID": "",
"ServiceName": "", "ServiceName": "",
"ExposedPort": 5678,
"Definition": { "Definition": {
"IntervalDuration": "15s", "IntervalDuration": "15s",
"TimeoutDuration": "15s", "TimeoutDuration": "15s",
@ -600,12 +601,8 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
req, _ := http.NewRequest("PUT", "/v1/txn", buf) req, _ := http.NewRequest("PUT", "/v1/txn", buf)
resp := httptest.NewRecorder() resp := httptest.NewRecorder()
obj, err := a.srv.Txn(resp, req) obj, err := a.srv.Txn(resp, req)
if err != nil { require.NoError(t, err)
t.Fatalf("err: %v", err) require.Equal(t, 200, resp.Code, resp.Body)
}
if resp.Code != 200 {
t.Fatalf("expected 200, got %d", resp.Code)
}
txnResp, ok := obj.(structs.TxnResponse) txnResp, ok := obj.(structs.TxnResponse)
if !ok { if !ok {
@ -662,12 +659,13 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
}, },
&structs.TxnResult{ &structs.TxnResult{
Check: &structs.HealthCheck{ Check: &structs.HealthCheck{
Node: a.config.NodeName, Node: a.config.NodeName,
CheckID: "nodecheck", CheckID: "nodecheck",
Name: "Node http check", Name: "Node http check",
Status: api.HealthPassing, Status: api.HealthPassing,
Notes: "Http based health check", Notes: "Http based health check",
Output: "success", Output: "success",
ExposedPort: 5678,
Definition: structs.HealthCheckDefinition{ Definition: structs.HealthCheckDefinition{
Interval: 15 * time.Second, Interval: 15 * time.Second,
Timeout: 15 * time.Second, Timeout: 15 * time.Second,
@ -686,3 +684,117 @@ func TestTxnEndpoint_UpdateCheck(t *testing.T) {
} }
assert.Equal(t, expected, txnResp) assert.Equal(t, expected, txnResp)
} }
func TestTxnEndpoint_NodeService(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
t.Parallel()
a := NewTestAgent(t, "")
defer a.Shutdown()
testrpc.WaitForTestAgent(t, a.RPC, "dc1")
// Make sure the fields of a check are handled correctly when both creating and
// updating, and test both sets of duration fields to ensure backwards compatibility.
buf := bytes.NewBuffer([]byte(fmt.Sprintf(`
[
{
"Service": {
"Verb": "set",
"Node": "%s",
"Service": {
"Service": "test",
"Port": 4444
}
}
},
{
"Service": {
"Verb": "set",
"Node": "%s",
"Service": {
"Service": "test-sidecar-proxy",
"Port": 20000,
"Kind": "connect-proxy",
"Proxy": {
"DestinationServiceName": "test",
"DestinationServiceID": "test",
"LocalServiceAddress": "127.0.0.1",
"LocalServicePort": 4444,
"upstreams": [
{
"DestinationName": "fake-backend",
"LocalBindPort": 25001
}
]
}
}
}
}
]
`, a.config.NodeName, a.config.NodeName)))
req, _ := http.NewRequest("PUT", "/v1/txn", buf)
resp := httptest.NewRecorder()
obj, err := a.srv.Txn(resp, req)
require.NoError(t, err)
require.Equal(t, 200, resp.Code)
txnResp, ok := obj.(structs.TxnResponse)
if !ok {
t.Fatalf("bad type: %T", obj)
}
require.Equal(t, 2, len(txnResp.Results))
index := txnResp.Results[0].Service.ModifyIndex
expected := structs.TxnResponse{
Results: structs.TxnResults{
&structs.TxnResult{
Service: &structs.NodeService{
Service: "test",
ID: "test",
Port: 4444,
Weights: &structs.Weights{
Passing: 1,
Warning: 1,
},
RaftIndex: structs.RaftIndex{
CreateIndex: index,
ModifyIndex: index,
},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
},
},
&structs.TxnResult{
Service: &structs.NodeService{
Service: "test-sidecar-proxy",
ID: "test-sidecar-proxy",
Port: 20000,
Kind: "connect-proxy",
Weights: &structs.Weights{
Passing: 1,
Warning: 1,
},
Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "test",
DestinationServiceID: "test",
LocalServiceAddress: "127.0.0.1",
LocalServicePort: 4444,
},
TaggedAddresses: map[string]structs.ServiceAddress{
"consul-virtual": {
Address: "240.0.0.1",
Port: 20000,
},
},
RaftIndex: structs.RaftIndex{
CreateIndex: index,
ModifyIndex: index,
},
EnterpriseMeta: *structs.DefaultEnterpriseMetaInDefaultPartition(),
},
},
},
}
assert.Equal(t, expected, txnResp)
}

View File

@ -211,7 +211,9 @@ func (s *HTTPHandlers) UIServices(resp http.ResponseWriter, req *http.Request) (
if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done {
return nil, nil return nil, nil
} }
if peer := req.URL.Query().Get("peer"); peer != "" {
args.PeerName = peer
}
if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil { if err := s.parseEntMeta(req, &args.EnterpriseMeta); err != nil {
return nil, err return nil, err
} }

View File

@ -88,29 +88,26 @@ func (s *ResourceGenerator) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.C
clusters = append(clusters, passthroughs...) clusters = append(clusters, passthroughs...)
} }
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in endpoints.go getUpstream := func(uid proxycfg.UpstreamID) (*structs.Upstream, bool) {
// so that the sets of endpoints generated matches the sets of clusters.
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid] upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
explicit := upstream.HasLocalPortOrSocket() explicit := upstream.HasLocalPortOrSocket()
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid) implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
if !implicit && !explicit { return upstream, !implicit && !explicit
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped. }
continue
}
chainEndpoints, ok := cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid] // NOTE: Any time we skip a chain below we MUST also skip that discovery chain in endpoints.go
if !ok { // so that the sets of endpoints generated matches the sets of clusters.
// this should not happen for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
return nil, fmt.Errorf("no endpoint map for upstream %q", uid) upstream, skip := getUpstream(uid)
if skip {
continue
} }
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain( upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
uid, uid,
upstream, upstream,
chain, chain,
chainEndpoints,
cfgSnap, cfgSnap,
false, false,
) )
@ -127,18 +124,15 @@ func (s *ResourceGenerator) clustersFromSnapshotConnectProxy(cfgSnap *proxycfg.C
// upstream in endpoints.go so that the sets of endpoints generated matches // upstream in endpoints.go so that the sets of endpoints generated matches
// the sets of clusters. // the sets of clusters.
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() { for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid] upstream, skip := getUpstream(uid)
if skip {
explicit := upstreamCfg.HasLocalPortOrSocket()
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
if !implicit && !explicit {
// Not associated with a known explicit or implicit upstream so it is skipped.
continue continue
} }
peerMeta := cfgSnap.ConnectProxy.UpstreamPeerMeta(uid) peerMeta := cfgSnap.ConnectProxy.UpstreamPeerMeta(uid)
cfg := s.getAndModifyUpstreamConfigForPeeredListener(uid, upstream, peerMeta)
upstreamCluster, err := s.makeUpstreamClusterForPeerService(uid, upstreamCfg, peerMeta, cfgSnap) upstreamCluster, err := s.makeUpstreamClusterForPeerService(uid, cfg, peerMeta, cfgSnap)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -652,17 +646,10 @@ func (s *ResourceGenerator) clustersFromSnapshotIngressGateway(cfgSnap *proxycfg
return nil, fmt.Errorf("no discovery chain for upstream %q", uid) return nil, fmt.Errorf("no discovery chain for upstream %q", uid)
} }
chainEndpoints, ok := cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid]
if !ok {
// this should not happen
return nil, fmt.Errorf("no endpoint map for upstream %q", uid)
}
upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain( upstreamClusters, err := s.makeUpstreamClustersForDiscoveryChain(
uid, uid,
&u, &u,
chain, chain,
chainEndpoints,
cfgSnap, cfgSnap,
false, false,
) )
@ -745,7 +732,7 @@ func (s *ResourceGenerator) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot, nam
func (s *ResourceGenerator) makeUpstreamClusterForPeerService( func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
uid proxycfg.UpstreamID, uid proxycfg.UpstreamID,
upstream *structs.Upstream, upstreamConfig structs.UpstreamConfig,
peerMeta structs.PeeringServiceMeta, peerMeta structs.PeeringServiceMeta,
cfgSnap *proxycfg.ConfigSnapshot, cfgSnap *proxycfg.ConfigSnapshot,
) (*envoy_cluster_v3.Cluster, error) { ) (*envoy_cluster_v3.Cluster, error) {
@ -754,16 +741,21 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
err error err error
) )
cfg := s.getAndModifyUpstreamConfigForPeeredListener(uid, upstream, peerMeta) if upstreamConfig.EnvoyClusterJSON != "" {
if cfg.EnvoyClusterJSON != "" { c, err = makeClusterFromUserConfig(upstreamConfig.EnvoyClusterJSON)
c, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
if err != nil { if err != nil {
return c, err return c, err
} }
// In the happy path don't return yet as we need to inject TLS config still. // In the happy path don't return yet as we need to inject TLS config still.
} }
tbs, ok := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer) upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
if err != nil {
return c, err
}
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(uid.Peer)
if !ok { if !ok {
// this should never happen since we loop through upstreams with // this should never happen since we loop through upstreams with
// set trust bundles // set trust bundles
@ -772,22 +764,29 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
clusterName := generatePeeredClusterName(uid, tbs) clusterName := generatePeeredClusterName(uid, tbs)
outlierDetection := ToOutlierDetection(upstreamConfig.PassiveHealthCheck)
// We can't rely on health checks for services on cluster peers because they
// don't take into account service resolvers, splitters and routers. Setting
// MaxEjectionPercent too 100% gives outlier detection the power to eject the
// entire cluster.
outlierDetection.MaxEjectionPercent = &wrappers.UInt32Value{Value: 100}
s.Logger.Trace("generating cluster for", "cluster", clusterName) s.Logger.Trace("generating cluster for", "cluster", clusterName)
if c == nil { if c == nil {
c = &envoy_cluster_v3.Cluster{ c = &envoy_cluster_v3.Cluster{
Name: clusterName, Name: clusterName,
ConnectTimeout: durationpb.New(time.Duration(cfg.ConnectTimeoutMs) * time.Millisecond), ConnectTimeout: durationpb.New(time.Duration(upstreamConfig.ConnectTimeoutMs) * time.Millisecond),
CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{ CommonLbConfig: &envoy_cluster_v3.Cluster_CommonLbConfig{
HealthyPanicThreshold: &envoy_type_v3.Percent{ HealthyPanicThreshold: &envoy_type_v3.Percent{
Value: 0, // disable panic threshold Value: 0, // disable panic threshold
}, },
}, },
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{ CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
Thresholds: makeThresholdsIfNeeded(cfg.Limits), Thresholds: makeThresholdsIfNeeded(upstreamConfig.Limits),
}, },
OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck), OutlierDetection: outlierDetection,
} }
if cfg.Protocol == "http2" || cfg.Protocol == "grpc" { if upstreamConfig.Protocol == "http2" || upstreamConfig.Protocol == "grpc" {
if err := s.setHttp2ProtocolOptions(c); err != nil { if err := s.setHttp2ProtocolOptions(c); err != nil {
return c, err return c, err
} }
@ -821,12 +820,11 @@ func (s *ResourceGenerator) makeUpstreamClusterForPeerService(
false, /*onlyPassing*/ false, /*onlyPassing*/
) )
} }
} }
rootPEMs := cfgSnap.RootPEMs() rootPEMs := cfgSnap.RootPEMs()
if uid.Peer != "" { if uid.Peer != "" {
tbs, _ := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer) tbs, _ := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(uid.Peer)
rootPEMs = tbs.ConcatenatedRootPEMs() rootPEMs = tbs.ConcatenatedRootPEMs()
} }
@ -961,7 +959,6 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
uid proxycfg.UpstreamID, uid proxycfg.UpstreamID,
upstream *structs.Upstream, upstream *structs.Upstream,
chain *structs.CompiledDiscoveryChain, chain *structs.CompiledDiscoveryChain,
chainEndpoints map[string]structs.CheckServiceNodes,
cfgSnap *proxycfg.ConfigSnapshot, cfgSnap *proxycfg.ConfigSnapshot,
forMeshGateway bool, forMeshGateway bool,
) ([]*envoy_cluster_v3.Cluster, error) { ) ([]*envoy_cluster_v3.Cluster, error) {
@ -978,7 +975,15 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
upstreamConfigMap = upstream.Config upstreamConfigMap = upstream.Config
} }
cfg, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap) upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
// Mesh gateways are exempt because upstreamsSnapshot is only used for
// cluster peering targets and transative failover/redirects are unsupported.
if err != nil && !forMeshGateway {
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", cfgSnap.Kind)
}
rawUpstreamConfig, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap)
if err != nil { if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns // Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue. // default config if there is an error so it's safe to continue.
@ -986,13 +991,28 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
"error", err) "error", err)
} }
finalizeUpstreamConfig := func(cfg structs.UpstreamConfig, connectTimeout time.Duration) structs.UpstreamConfig {
if cfg.Protocol == "" {
cfg.Protocol = chain.Protocol
}
if cfg.Protocol == "" {
cfg.Protocol = "tcp"
}
if cfg.ConnectTimeoutMs == 0 {
cfg.ConnectTimeoutMs = int(connectTimeout / time.Millisecond)
}
return cfg
}
var escapeHatchCluster *envoy_cluster_v3.Cluster var escapeHatchCluster *envoy_cluster_v3.Cluster
if !forMeshGateway { if !forMeshGateway {
if cfg.EnvoyClusterJSON != "" { if rawUpstreamConfig.EnvoyClusterJSON != "" {
if chain.Default { if chain.Default {
// If you haven't done anything to setup the discovery chain, then // If you haven't done anything to setup the discovery chain, then
// you can use the envoy_cluster_json escape hatch. // you can use the envoy_cluster_json escape hatch.
escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON) escapeHatchCluster, err = makeClusterFromUserConfig(rawUpstreamConfig.EnvoyClusterJSON)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1006,14 +1026,20 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
var out []*envoy_cluster_v3.Cluster var out []*envoy_cluster_v3.Cluster
for _, node := range chain.Nodes { for _, node := range chain.Nodes {
if node.Type != structs.DiscoveryGraphNodeTypeResolver { switch {
case node == nil:
return nil, fmt.Errorf("impossible to process a nil node")
case node.Type != structs.DiscoveryGraphNodeTypeResolver:
continue continue
case node.Resolver == nil:
return nil, fmt.Errorf("impossible to process a non-resolver node")
} }
failover := node.Resolver.Failover failover := node.Resolver.Failover
// These variables are prefixed with primary to avoid shaddowing bugs. // These variables are prefixed with primary to avoid shaddowing bugs.
primaryTargetID := node.Resolver.Target primaryTargetID := node.Resolver.Target
primaryTarget := chain.Targets[primaryTargetID] primaryTarget := chain.Targets[primaryTargetID]
primaryClusterName := CustomizeClusterName(primaryTarget.Name, chain) primaryClusterName := CustomizeClusterName(primaryTarget.Name, chain)
upstreamConfig := finalizeUpstreamConfig(rawUpstreamConfig, node.Resolver.ConnectTimeout)
if forMeshGateway { if forMeshGateway {
primaryClusterName = meshGatewayExportedClusterNamePrefix + primaryClusterName primaryClusterName = meshGatewayExportedClusterNamePrefix + primaryClusterName
} }
@ -1026,22 +1052,38 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
continue continue
} }
type targetClusterOptions struct { type targetClusterOption struct {
targetID string targetID string
clusterName string clusterName string
} }
// Construct the information required to make target clusters. When // Construct the information required to make target clusters. When
// failover is configured, create the aggregate cluster. // failover is configured, create the aggregate cluster.
var targetClustersOptions []targetClusterOptions var targetClustersOptions []targetClusterOption
if failover != nil && !forMeshGateway { if failover != nil && !forMeshGateway {
var failoverClusterNames []string var failoverClusterNames []string
for _, tid := range append([]string{primaryTargetID}, failover.Targets...) { for _, tid := range append([]string{primaryTargetID}, failover.Targets...) {
target := chain.Targets[tid] target := chain.Targets[tid]
clusterName := CustomizeClusterName(target.Name, chain) clusterName := target.Name
targetUID := proxycfg.NewUpstreamIDFromTargetID(tid)
if targetUID.Peer != "" {
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(targetUID.Peer)
// We can't generate cluster on peers without the trust bundle. The
// trust bundle should be ready soon.
if !ok {
s.Logger.Debug("peer trust bundle not ready for discovery chain target",
"peer", targetUID.Peer,
"target", tid,
)
continue
}
clusterName = generatePeeredClusterName(targetUID, tbs)
}
clusterName = CustomizeClusterName(clusterName, chain)
clusterName = failoverClusterNamePrefix + clusterName clusterName = failoverClusterNamePrefix + clusterName
targetClustersOptions = append(targetClustersOptions, targetClusterOptions{ targetClustersOptions = append(targetClustersOptions, targetClusterOption{
targetID: tid, targetID: tid,
clusterName: clusterName, clusterName: clusterName,
}) })
@ -1070,7 +1112,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
out = append(out, c) out = append(out, c)
} else { } else {
targetClustersOptions = append(targetClustersOptions, targetClusterOptions{ targetClustersOptions = append(targetClustersOptions, targetClusterOption{
targetID: primaryTargetID, targetID: primaryTargetID,
clusterName: primaryClusterName, clusterName: primaryClusterName,
}) })
@ -1089,11 +1131,20 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
Datacenter: target.Datacenter, Datacenter: target.Datacenter,
Service: target.Service, Service: target.Service,
}.URI().String() }.URI().String()
if uid.Peer != "" { targetUID := proxycfg.NewUpstreamIDFromTargetID(targetInfo.targetID)
return nil, fmt.Errorf("impossible to get a peer discovery chain") s.Logger.Debug("generating cluster for", "cluster", targetInfo.clusterName)
if targetUID.Peer != "" {
peerMeta := upstreamsSnapshot.UpstreamPeerMeta(targetUID)
upstreamCluster, err := s.makeUpstreamClusterForPeerService(targetUID, upstreamConfig, peerMeta, cfgSnap)
if err != nil {
continue
}
// Override the cluster name to include the failover-target~ prefix.
upstreamCluster.Name = targetInfo.clusterName
out = append(out, upstreamCluster)
continue
} }
s.Logger.Trace("generating cluster for", "cluster", targetInfo.clusterName)
c := &envoy_cluster_v3.Cluster{ c := &envoy_cluster_v3.Cluster{
Name: targetInfo.clusterName, Name: targetInfo.clusterName,
AltStatName: targetInfo.clusterName, AltStatName: targetInfo.clusterName,
@ -1114,9 +1165,9 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
}, },
// TODO(peering): make circuit breakers or outlier detection work? // TODO(peering): make circuit breakers or outlier detection work?
CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{ CircuitBreakers: &envoy_cluster_v3.CircuitBreakers{
Thresholds: makeThresholdsIfNeeded(cfg.Limits), Thresholds: makeThresholdsIfNeeded(upstreamConfig.Limits),
}, },
OutlierDetection: ToOutlierDetection(cfg.PassiveHealthCheck), OutlierDetection: ToOutlierDetection(upstreamConfig.PassiveHealthCheck),
} }
var lb *structs.LoadBalancer var lb *structs.LoadBalancer
@ -1127,19 +1178,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
return nil, fmt.Errorf("failed to apply load balancer configuration to cluster %q: %v", targetInfo.clusterName, err) return nil, fmt.Errorf("failed to apply load balancer configuration to cluster %q: %v", targetInfo.clusterName, err)
} }
var proto string if upstreamConfig.Protocol == "http2" || upstreamConfig.Protocol == "grpc" {
if !forMeshGateway {
proto = cfg.Protocol
}
if proto == "" {
proto = chain.Protocol
}
if proto == "" {
proto = "tcp"
}
if proto == "http2" || proto == "grpc" {
if err := s.setHttp2ProtocolOptions(c); err != nil { if err := s.setHttp2ProtocolOptions(c); err != nil {
return nil, err return nil, err
} }
@ -1148,7 +1187,7 @@ func (s *ResourceGenerator) makeUpstreamClustersForDiscoveryChain(
configureTLS := true configureTLS := true
if forMeshGateway { if forMeshGateway {
// We only initiate TLS if we're doing an L7 proxy. // We only initiate TLS if we're doing an L7 proxy.
configureTLS = structs.IsProtocolHTTPLike(proto) configureTLS = structs.IsProtocolHTTPLike(upstreamConfig.Protocol)
} }
if configureTLS { if configureTLS {
@ -1221,7 +1260,6 @@ func (s *ResourceGenerator) makeExportedUpstreamClustersForMeshGateway(cfgSnap *
proxycfg.NewUpstreamIDFromServiceName(svc), proxycfg.NewUpstreamIDFromServiceName(svc),
nil, nil,
chain, chain,
nil,
cfgSnap, cfgSnap,
true, true,
) )

View File

@ -169,6 +169,18 @@ func TestClustersFromSnapshot(t *testing.T) {
}, nil) }, nil)
}, },
}, },
{
name: "custom-passive-healthcheck",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
ns.Proxy.Upstreams[0].Config["passive_health_check"] = map[string]interface{}{
"enforcing_consecutive_5xx": float64(80),
"max_failures": float64(5),
"interval": float64(10),
}
}, nil)
},
},
{ {
name: "custom-max-inbound-connections", name: "custom-max-inbound-connections",
create: func(t testinf.T) *proxycfg.ConfigSnapshot { create: func(t testinf.T) *proxycfg.ConfigSnapshot {
@ -257,6 +269,12 @@ func TestClustersFromSnapshot(t *testing.T) {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil) return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
}, },
}, },
{
name: "connect-proxy-with-chain-and-failover-to-cluster-peer",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-to-cluster-peer", nil, nil)
},
},
{ {
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway", name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot { create: func(t testinf.T) *proxycfg.ConfigSnapshot {
@ -495,6 +513,13 @@ func TestClustersFromSnapshot(t *testing.T) {
"failover", nil, nil, nil) "failover", nil, nil, nil)
}, },
}, },
{
name: "ingress-with-chain-and-failover-to-cluster-peer",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover-to-cluster-peer", nil, nil, nil)
},
},
{ {
name: "ingress-with-tcp-chain-failover-through-remote-gateway", name: "ingress-with-tcp-chain-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot { create: func(t testinf.T) *proxycfg.ConfigSnapshot {

View File

@ -27,6 +27,12 @@ type ProxyConfig struct {
// Note: This escape hatch is compatible with the discovery chain. // Note: This escape hatch is compatible with the discovery chain.
PublicListenerJSON string `mapstructure:"envoy_public_listener_json"` PublicListenerJSON string `mapstructure:"envoy_public_listener_json"`
// ListenerTracingJSON is a complete override ("escape hatch") for the
// listeners tracing configuration.
//
// Note: This escape hatch is compatible with the discovery chain.
ListenerTracingJSON string `mapstructure:"envoy_listener_tracing_json"`
// LocalClusterJSON is a complete override ("escape hatch") for the // LocalClusterJSON is a complete override ("escape hatch") for the
// local application cluster. // local application cluster.
// //
@ -168,5 +174,10 @@ func ToOutlierDetection(p *structs.PassiveHealthCheck) *envoy_cluster_v3.Outlier
if p.MaxFailures != 0 { if p.MaxFailures != 0 {
od.Consecutive_5Xx = &wrappers.UInt32Value{Value: p.MaxFailures} od.Consecutive_5Xx = &wrappers.UInt32Value{Value: p.MaxFailures}
} }
if p.EnforcingConsecutive5xx != nil {
od.EnforcingConsecutive_5Xx = &wrappers.UInt32Value{Value: *p.EnforcingConsecutive5xx}
}
return od return od
} }

View File

@ -81,6 +81,11 @@ const (
) )
func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discovery_v3.DeltaDiscoveryRequest) error { func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discovery_v3.DeltaDiscoveryRequest) error {
// Handle invalid ACL tokens up-front.
if _, err := s.authenticate(stream.Context()); err != nil {
return err
}
// Loop state // Loop state
var ( var (
cfgSnap *proxycfg.ConfigSnapshot cfgSnap *proxycfg.ConfigSnapshot
@ -200,7 +205,18 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove
} }
} }
case cfgSnap = <-stateCh: case cs, ok := <-stateCh:
if !ok {
// stateCh is closed either when *we* cancel the watch (on-exit via defer)
// or by the proxycfg.Manager when an irrecoverable error is encountered
// such as the ACL token getting deleted.
//
// We know for sure that this is the latter case, because in the former we
// would've already exited this loop.
return status.Error(codes.Aborted, "xDS stream terminated due to an irrecoverable error, please try again")
}
cfgSnap = cs
newRes, err := generator.allResourcesFromSnapshot(cfgSnap) newRes, err := generator.allResourcesFromSnapshot(cfgSnap)
if err != nil { if err != nil {
return status.Errorf(codes.Unavailable, "failed to generate all xDS resources from the snapshot: %v", err) return status.Errorf(codes.Unavailable, "failed to generate all xDS resources from the snapshot: %v", err)

View File

@ -50,14 +50,19 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Len()+ cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Len()+
len(cfgSnap.ConnectProxy.WatchedUpstreamEndpoints)) len(cfgSnap.ConnectProxy.WatchedUpstreamEndpoints))
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in clusters.go getUpstream := func(uid proxycfg.UpstreamID) (*structs.Upstream, bool) {
// so that the sets of endpoints generated matches the sets of clusters.
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid] upstream := cfgSnap.ConnectProxy.UpstreamConfig[uid]
explicit := upstream.HasLocalPortOrSocket() explicit := upstream.HasLocalPortOrSocket()
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid) implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
if !implicit && !explicit { return upstream, !implicit && !explicit
}
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in clusters.go
// so that the sets of endpoints generated matches the sets of clusters.
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
upstream, skip := getUpstream(uid)
if skip {
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped. // Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
continue continue
} }
@ -70,6 +75,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
es, err := s.endpointsFromDiscoveryChain( es, err := s.endpointsFromDiscoveryChain(
uid, uid,
chain, chain,
cfgSnap,
cfgSnap.Locality, cfgSnap.Locality,
upstreamConfigMap, upstreamConfigMap,
cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid], cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid],
@ -86,12 +92,9 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
// upstream in clusters.go so that the sets of endpoints generated matches // upstream in clusters.go so that the sets of endpoints generated matches
// the sets of clusters. // the sets of clusters.
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() { for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid] _, skip := getUpstream(uid)
if skip {
explicit := upstreamCfg.HasLocalPortOrSocket() // Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
implicit := cfgSnap.ConnectProxy.IsImplicitUpstream(uid)
if !implicit && !explicit {
// Not associated with a known explicit or implicit upstream so it is skipped.
continue continue
} }
@ -104,22 +107,14 @@ func (s *ResourceGenerator) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.
clusterName := generatePeeredClusterName(uid, tbs) clusterName := generatePeeredClusterName(uid, tbs)
// Also skip peer instances with a hostname as their address. EDS loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, clusterName, uid)
// cannot resolve hostnames, so we provide them through CDS instead.
if _, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpointsUseHostnames[uid]; ok { if err != nil {
continue return nil, err
} }
endpoints, ok := cfgSnap.ConnectProxy.PeerUpstreamEndpoints.Get(uid) if loadAssignment != nil {
if ok { resources = append(resources, loadAssignment)
la := makeLoadAssignment(
clusterName,
[]loadAssignmentEndpointGroup{
{Endpoints: endpoints},
},
proxycfg.GatewayKey{ /*empty so it never matches*/ },
)
resources = append(resources, la)
} }
} }
@ -375,6 +370,7 @@ func (s *ResourceGenerator) endpointsFromSnapshotIngressGateway(cfgSnap *proxycf
es, err := s.endpointsFromDiscoveryChain( es, err := s.endpointsFromDiscoveryChain(
uid, uid,
cfgSnap.IngressGateway.DiscoveryChain[uid], cfgSnap.IngressGateway.DiscoveryChain[uid],
cfgSnap,
proxycfg.GatewayKey{Datacenter: cfgSnap.Datacenter, Partition: u.DestinationPartition}, proxycfg.GatewayKey{Datacenter: cfgSnap.Datacenter, Partition: u.DestinationPartition},
u.Config, u.Config,
cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid], cfgSnap.IngressGateway.WatchedUpstreamEndpoints[uid],
@ -412,9 +408,38 @@ func makePipeEndpoint(path string) *envoy_endpoint_v3.LbEndpoint {
} }
} }
func (s *ResourceGenerator) makeUpstreamLoadAssignmentForPeerService(cfgSnap *proxycfg.ConfigSnapshot, clusterName string, uid proxycfg.UpstreamID) (*envoy_endpoint_v3.ClusterLoadAssignment, error) {
var la *envoy_endpoint_v3.ClusterLoadAssignment
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
if err != nil {
return la, err
}
// Also skip peer instances with a hostname as their address. EDS
// cannot resolve hostnames, so we provide them through CDS instead.
if _, ok := upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid]; ok {
return la, nil
}
endpoints, ok := upstreamsSnapshot.PeerUpstreamEndpoints.Get(uid)
if !ok {
return nil, nil
}
la = makeLoadAssignment(
clusterName,
[]loadAssignmentEndpointGroup{
{Endpoints: endpoints},
},
proxycfg.GatewayKey{ /*empty so it never matches*/ },
)
return la, nil
}
func (s *ResourceGenerator) endpointsFromDiscoveryChain( func (s *ResourceGenerator) endpointsFromDiscoveryChain(
uid proxycfg.UpstreamID, uid proxycfg.UpstreamID,
chain *structs.CompiledDiscoveryChain, chain *structs.CompiledDiscoveryChain,
cfgSnap *proxycfg.ConfigSnapshot,
gatewayKey proxycfg.GatewayKey, gatewayKey proxycfg.GatewayKey,
upstreamConfigMap map[string]interface{}, upstreamConfigMap map[string]interface{},
upstreamEndpoints map[string]structs.CheckServiceNodes, upstreamEndpoints map[string]structs.CheckServiceNodes,
@ -432,6 +457,14 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
upstreamConfigMap = make(map[string]interface{}) // TODO:needed? upstreamConfigMap = make(map[string]interface{}) // TODO:needed?
} }
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
// Mesh gateways are exempt because upstreamsSnapshot is only used for
// cluster peering targets and transative failover/redirects are unsupported.
if err != nil && !forMeshGateway {
return nil, fmt.Errorf("No upstream snapshot for gateway mode %q", cfgSnap.Kind)
}
var resources []proto.Message var resources []proto.Message
var escapeHatchCluster *envoy_cluster_v3.Cluster var escapeHatchCluster *envoy_cluster_v3.Cluster
@ -465,8 +498,15 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
if node.Type != structs.DiscoveryGraphNodeTypeResolver { if node.Type != structs.DiscoveryGraphNodeTypeResolver {
continue continue
} }
primaryTargetID := node.Resolver.Target
failover := node.Resolver.Failover failover := node.Resolver.Failover
type targetLoadAssignmentOption struct {
targetID string
clusterName string
}
var targetLoadAssignmentOptions []targetLoadAssignmentOption
var numFailoverTargets int var numFailoverTargets int
if failover != nil { if failover != nil {
numFailoverTargets = len(failover.Targets) numFailoverTargets = len(failover.Targets)
@ -474,66 +514,84 @@ func (s *ResourceGenerator) endpointsFromDiscoveryChain(
clusterNamePrefix := "" clusterNamePrefix := ""
if numFailoverTargets > 0 && !forMeshGateway { if numFailoverTargets > 0 && !forMeshGateway {
clusterNamePrefix = failoverClusterNamePrefix clusterNamePrefix = failoverClusterNamePrefix
for _, failTargetID := range failover.Targets { for _, targetID := range append([]string{primaryTargetID}, failover.Targets...) {
target := chain.Targets[failTargetID] target := chain.Targets[targetID]
endpointGroup, valid := makeLoadAssignmentEndpointGroup( clusterName := target.Name
chain.Targets, targetUID := proxycfg.NewUpstreamIDFromTargetID(targetID)
upstreamEndpoints, if targetUID.Peer != "" {
gatewayEndpoints, tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(targetUID.Peer)
failTargetID, // We can't generate cluster on peers without the trust bundle. The
gatewayKey, // trust bundle should be ready soon.
forMeshGateway, if !ok {
) s.Logger.Debug("peer trust bundle not ready for discovery chain target",
if !valid { "peer", targetUID.Peer,
continue // skip the failover target if we're still populating the snapshot "target", targetID,
} )
continue
}
clusterName := CustomizeClusterName(target.Name, chain) clusterName = generatePeeredClusterName(targetUID, tbs)
}
clusterName = CustomizeClusterName(clusterName, chain)
clusterName = failoverClusterNamePrefix + clusterName clusterName = failoverClusterNamePrefix + clusterName
if escapeHatchCluster != nil { if escapeHatchCluster != nil {
clusterName = escapeHatchCluster.Name clusterName = escapeHatchCluster.Name
} }
s.Logger.Debug("generating endpoints for", "cluster", clusterName) targetLoadAssignmentOptions = append(targetLoadAssignmentOptions, targetLoadAssignmentOption{
targetID: targetID,
la := makeLoadAssignment( clusterName: clusterName,
clusterName, })
[]loadAssignmentEndpointGroup{endpointGroup},
gatewayKey,
)
resources = append(resources, la)
} }
} } else {
targetID := node.Resolver.Target target := chain.Targets[primaryTargetID]
clusterName := CustomizeClusterName(target.Name, chain)
target := chain.Targets[targetID] clusterName = clusterNamePrefix + clusterName
clusterName := CustomizeClusterName(target.Name, chain) if escapeHatchCluster != nil {
clusterName = clusterNamePrefix + clusterName clusterName = escapeHatchCluster.Name
if escapeHatchCluster != nil { }
clusterName = escapeHatchCluster.Name if forMeshGateway {
} clusterName = meshGatewayExportedClusterNamePrefix + clusterName
if forMeshGateway { }
clusterName = meshGatewayExportedClusterNamePrefix + clusterName targetLoadAssignmentOptions = append(targetLoadAssignmentOptions, targetLoadAssignmentOption{
} targetID: primaryTargetID,
s.Logger.Debug("generating endpoints for", "cluster", clusterName) clusterName: clusterName,
endpointGroup, valid := makeLoadAssignmentEndpointGroup( })
chain.Targets,
upstreamEndpoints,
gatewayEndpoints,
targetID,
gatewayKey,
forMeshGateway,
)
if !valid {
continue // skip the cluster if we're still populating the snapshot
} }
la := makeLoadAssignment( for _, targetInfo := range targetLoadAssignmentOptions {
clusterName, s.Logger.Debug("generating endpoints for", "cluster", targetInfo.clusterName)
[]loadAssignmentEndpointGroup{endpointGroup}, targetUID := proxycfg.NewUpstreamIDFromTargetID(targetInfo.targetID)
gatewayKey, if targetUID.Peer != "" {
) loadAssignment, err := s.makeUpstreamLoadAssignmentForPeerService(cfgSnap, targetInfo.clusterName, targetUID)
resources = append(resources, la) if err != nil {
return nil, err
}
if loadAssignment != nil {
resources = append(resources, loadAssignment)
}
continue
}
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
chain.Targets,
upstreamEndpoints,
gatewayEndpoints,
targetInfo.targetID,
gatewayKey,
forMeshGateway,
)
if !valid {
continue // skip the cluster if we're still populating the snapshot
}
la := makeLoadAssignment(
targetInfo.clusterName,
[]loadAssignmentEndpointGroup{endpointGroup},
gatewayKey,
)
resources = append(resources, la)
}
} }
return resources, nil return resources, nil
@ -586,6 +644,7 @@ func (s *ResourceGenerator) makeExportedUpstreamEndpointsForMeshGateway(cfgSnap
clusterEndpoints, err := s.endpointsFromDiscoveryChain( clusterEndpoints, err := s.endpointsFromDiscoveryChain(
proxycfg.NewUpstreamIDFromServiceName(svc), proxycfg.NewUpstreamIDFromServiceName(svc),
chain, chain,
cfgSnap,
cfgSnap.Locality, cfgSnap.Locality,
nil, nil,
chainEndpoints, chainEndpoints,
@ -640,11 +699,12 @@ func makeLoadAssignment(clusterName string, endpointGroups []loadAssignmentEndpo
healthStatus = endpointGroup.OverrideHealth healthStatus = endpointGroup.OverrideHealth
} }
endpoint := &envoy_endpoint_v3.Endpoint{
Address: makeAddress(addr, port),
}
es = append(es, &envoy_endpoint_v3.LbEndpoint{ es = append(es, &envoy_endpoint_v3.LbEndpoint{
HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{ HostIdentifier: &envoy_endpoint_v3.LbEndpoint_Endpoint{
Endpoint: &envoy_endpoint_v3.Endpoint{ Endpoint: endpoint,
Address: makeAddress(addr, port),
},
}, },
HealthStatus: healthStatus, HealthStatus: healthStatus,
LoadBalancingWeight: makeUint32Value(weight), LoadBalancingWeight: makeUint32Value(weight),

View File

@ -284,6 +284,12 @@ func TestEndpointsFromSnapshot(t *testing.T) {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil) return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover", nil, nil)
}, },
}, },
{
name: "connect-proxy-with-chain-and-failover-to-cluster-peer",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotDiscoveryChain(t, "failover-to-cluster-peer", nil, nil)
},
},
{ {
name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway", name: "connect-proxy-with-tcp-chain-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot { create: func(t testinf.T) *proxycfg.ConfigSnapshot {
@ -396,6 +402,13 @@ func TestEndpointsFromSnapshot(t *testing.T) {
"failover", nil, nil, nil) "failover", nil, nil, nil)
}, },
}, },
{
name: "ingress-with-chain-and-failover-to-cluster-peer",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshotIngressGateway(t, true, "tcp",
"failover-to-cluster-peer", nil, nil, nil)
},
},
{ {
name: "ingress-with-tcp-chain-failover-through-remote-gateway", name: "ingress-with-tcp-chain-failover-through-remote-gateway",
create: func(t testinf.T) *proxycfg.ConfigSnapshot { create: func(t testinf.T) *proxycfg.ConfigSnapshot {

View File

@ -15,15 +15,40 @@ func TestFirstHealthyTarget(t *testing.T) {
warning := proxycfg.TestUpstreamNodesInStatus(t, "warning") warning := proxycfg.TestUpstreamNodesInStatus(t, "warning")
critical := proxycfg.TestUpstreamNodesInStatus(t, "critical") critical := proxycfg.TestUpstreamNodesInStatus(t, "critical")
warnOnlyPassingTarget := structs.NewDiscoveryTarget("all-warn", "", "default", "default", "dc1") warnOnlyPassingTarget := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "all-warn",
Namespace: "default",
Partition: "default",
Datacenter: "dc1",
})
warnOnlyPassingTarget.Subset.OnlyPassing = true warnOnlyPassingTarget.Subset.OnlyPassing = true
failOnlyPassingTarget := structs.NewDiscoveryTarget("all-fail", "", "default", "default", "dc1") failOnlyPassingTarget := structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "all-fail",
Namespace: "default",
Partition: "default",
Datacenter: "dc1",
})
failOnlyPassingTarget.Subset.OnlyPassing = true failOnlyPassingTarget.Subset.OnlyPassing = true
targets := map[string]*structs.DiscoveryTarget{ targets := map[string]*structs.DiscoveryTarget{
"all-ok.default.dc1": structs.NewDiscoveryTarget("all-ok", "", "default", "default", "dc1"), "all-ok.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
"all-warn.default.dc1": structs.NewDiscoveryTarget("all-warn", "", "default", "default", "dc1"), Service: "all-ok",
"all-fail.default.default.dc1": structs.NewDiscoveryTarget("all-fail", "", "default", "default", "dc1"), Namespace: "default",
Partition: "default",
Datacenter: "dc1",
}),
"all-warn.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "all-warn",
Namespace: "default",
Partition: "default",
Datacenter: "dc1",
}),
"all-fail.default.default.dc1": structs.NewDiscoveryTarget(structs.DiscoveryTargetOpts{
Service: "all-fail",
Namespace: "default",
Partition: "default",
Datacenter: "dc1",
}),
"all-warn-onlypassing.default.dc1": warnOnlyPassingTarget, "all-warn-onlypassing.default.dc1": warnOnlyPassingTarget,
"all-fail-onlypassing.default.dc1": failOnlyPassingTarget, "all-fail-onlypassing.default.dc1": failOnlyPassingTarget,
} }

View File

@ -3,7 +3,6 @@ package xds
import ( import (
"errors" "errors"
"fmt" "fmt"
envoy_extensions_filters_listener_http_inspector_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/http_inspector/v3"
"net" "net"
"net/url" "net/url"
"regexp" "regexp"
@ -12,6 +11,8 @@ import (
"strings" "strings"
"time" "time"
envoy_extensions_filters_listener_http_inspector_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/http_inspector/v3"
envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" envoy_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
@ -107,6 +108,19 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
} }
} }
proxyCfg, err := ParseProxyConfig(cfgSnap.Proxy.Config)
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err)
}
var tracing *envoy_http_v3.HttpConnectionManager_Tracing
if proxyCfg.ListenerTracingJSON != "" {
if tracing, err = makeTracingFromUserConfig(proxyCfg.ListenerTracingJSON); err != nil {
s.Logger.Warn("failed to parse ListenerTracingJSON config", "error", err)
}
}
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain { for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid] upstreamCfg := cfgSnap.ConnectProxy.UpstreamConfig[uid]
@ -153,6 +167,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
filterName: filterName, filterName: filterName,
protocol: cfg.Protocol, protocol: cfg.Protocol,
useRDS: useRDS, useRDS: useRDS,
tracing: tracing,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@ -178,6 +193,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
filterName: filterName, filterName: filterName,
protocol: cfg.Protocol, protocol: cfg.Protocol,
useRDS: useRDS, useRDS: useRDS,
tracing: tracing,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@ -249,6 +265,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
filterName: routeName, filterName: routeName,
protocol: svcConfig.Protocol, protocol: svcConfig.Protocol,
useRDS: true, useRDS: true,
tracing: tracing,
}) })
if err != nil { if err != nil {
return err return err
@ -265,6 +282,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
clusterName: clusterName, clusterName: clusterName,
filterName: clusterName, filterName: clusterName,
protocol: svcConfig.Protocol, protocol: svcConfig.Protocol,
tracing: tracing,
}) })
if err != nil { if err != nil {
return err return err
@ -376,6 +394,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
protocol: cfg.Protocol, protocol: cfg.Protocol,
useRDS: false, useRDS: false,
statPrefix: "upstream_peered.", statPrefix: "upstream_peered.",
tracing: tracing,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@ -533,6 +552,7 @@ func (s *ResourceGenerator) listenersFromSnapshotConnectProxy(cfgSnap *proxycfg.
filterName: uid.EnvoyID(), filterName: uid.EnvoyID(),
routeName: uid.EnvoyID(), routeName: uid.EnvoyID(),
protocol: cfg.Protocol, protocol: cfg.Protocol,
tracing: tracing,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@ -1188,12 +1208,20 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot
l = makePortListener(name, addr, port, envoy_core_v3.TrafficDirection_INBOUND) l = makePortListener(name, addr, port, envoy_core_v3.TrafficDirection_INBOUND)
var tracing *envoy_http_v3.HttpConnectionManager_Tracing
if cfg.ListenerTracingJSON != "" {
if tracing, err = makeTracingFromUserConfig(cfg.ListenerTracingJSON); err != nil {
s.Logger.Warn("failed to parse ListenerTracingJSON config", "error", err)
}
}
filterOpts := listenerFilterOpts{ filterOpts := listenerFilterOpts{
protocol: cfg.Protocol, protocol: cfg.Protocol,
filterName: name, filterName: name,
routeName: name, routeName: name,
cluster: LocalAppClusterName, cluster: LocalAppClusterName,
requestTimeoutMs: cfg.LocalRequestTimeoutMs, requestTimeoutMs: cfg.LocalRequestTimeoutMs,
tracing: tracing,
} }
if useHTTPFilter { if useHTTPFilter {
filterOpts.httpAuthzFilter, err = makeRBACHTTPFilter( filterOpts.httpAuthzFilter, err = makeRBACHTTPFilter(
@ -1214,16 +1242,38 @@ func (s *ResourceGenerator) makeInboundListener(cfgSnap *proxycfg.ConfigSnapshot
filterOpts.forwardClientPolicy = envoy_http_v3.HttpConnectionManager_APPEND_FORWARD filterOpts.forwardClientPolicy = envoy_http_v3.HttpConnectionManager_APPEND_FORWARD
} }
} }
// If an inbound connect limit is set, inject a connection limit filter on each chain.
if cfg.MaxInboundConnections > 0 {
connectionLimitFilter, err := makeConnectionLimitFilter(cfg.MaxInboundConnections)
if err != nil {
return nil, err
}
l.FilterChains = []*envoy_listener_v3.FilterChain{
{
Filters: []*envoy_listener_v3.Filter{
connectionLimitFilter,
},
},
}
}
filter, err := makeListenerFilter(filterOpts) filter, err := makeListenerFilter(filterOpts)
if err != nil { if err != nil {
return nil, err return nil, err
} }
l.FilterChains = []*envoy_listener_v3.FilterChain{
{ if len(l.FilterChains) > 0 {
Filters: []*envoy_listener_v3.Filter{ // The list of FilterChains has already been initialized
filter, l.FilterChains[0].Filters = append(l.FilterChains[0].Filters, filter)
} else {
l.FilterChains = []*envoy_listener_v3.FilterChain{
{
Filters: []*envoy_listener_v3.Filter{
filter,
},
}, },
}, }
} }
err = s.finalizePublicListenerFromConfig(l, cfgSnap, cfg, useHTTPFilter) err = s.finalizePublicListenerFromConfig(l, cfgSnap, cfg, useHTTPFilter)
@ -1249,17 +1299,6 @@ func (s *ResourceGenerator) finalizePublicListenerFromConfig(l *envoy_listener_v
return nil return nil
} }
// If an inbound connect limit is set, inject a connection limit filter on each chain.
if proxyCfg.MaxInboundConnections > 0 {
filter, err := makeConnectionLimitFilter(proxyCfg.MaxInboundConnections)
if err != nil {
return nil
}
for idx := range l.FilterChains {
l.FilterChains[idx].Filters = append(l.FilterChains[idx].Filters, filter)
}
}
return nil return nil
} }
@ -1299,6 +1338,7 @@ func (s *ResourceGenerator) makeExposedCheckListener(cfgSnap *proxycfg.ConfigSna
statPrefix: "", statPrefix: "",
routePath: path.Path, routePath: path.Path,
httpAuthzFilter: nil, httpAuthzFilter: nil,
// in the exposed check listener we don't set the tracing configuration
} }
f, err := makeListenerFilter(opts) f, err := makeListenerFilter(opts)
if err != nil { if err != nil {
@ -1531,6 +1571,19 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg.
filterChain.Filters = append(filterChain.Filters, authFilter) filterChain.Filters = append(filterChain.Filters, authFilter)
} }
proxyCfg, err := ParseProxyConfig(cfgSnap.Proxy.Config)
if err != nil {
// Don't hard fail on a config typo, just warn. The parse func returns
// default config if there is an error so it's safe to continue.
s.Logger.Warn("failed to parse Connect.Proxy.Config", "error", err)
}
var tracing *envoy_http_v3.HttpConnectionManager_Tracing
if proxyCfg.ListenerTracingJSON != "" {
if tracing, err = makeTracingFromUserConfig(proxyCfg.ListenerTracingJSON); err != nil {
s.Logger.Warn("failed to parse ListenerTracingJSON config", "error", err)
}
}
// Lastly we setup the actual proxying component. For L4 this is a straight // Lastly we setup the actual proxying component. For L4 this is a straight
// tcp proxy. For L7 this is a very hands-off HTTP proxy just to inject an // tcp proxy. For L7 this is a very hands-off HTTP proxy just to inject an
// HTTP filter to do intention checks here instead. // HTTP filter to do intention checks here instead.
@ -1541,6 +1594,7 @@ func (s *ResourceGenerator) makeFilterChainTerminatingGateway(cfgSnap *proxycfg.
cluster: tgtwyOpts.cluster, cluster: tgtwyOpts.cluster,
statPrefix: "upstream.", statPrefix: "upstream.",
routePath: "", routePath: "",
tracing: tracing,
} }
if useHTTPFilter { if useHTTPFilter {
@ -1787,6 +1841,7 @@ type filterChainOpts struct {
statPrefix string statPrefix string
forwardClientDetails bool forwardClientDetails bool
forwardClientPolicy envoy_http_v3.HttpConnectionManager_ForwardClientCertDetails forwardClientPolicy envoy_http_v3.HttpConnectionManager_ForwardClientCertDetails
tracing *envoy_http_v3.HttpConnectionManager_Tracing
} }
func (s *ResourceGenerator) makeUpstreamFilterChain(opts filterChainOpts) (*envoy_listener_v3.FilterChain, error) { func (s *ResourceGenerator) makeUpstreamFilterChain(opts filterChainOpts) (*envoy_listener_v3.FilterChain, error) {
@ -1802,6 +1857,7 @@ func (s *ResourceGenerator) makeUpstreamFilterChain(opts filterChainOpts) (*envo
statPrefix: opts.statPrefix, statPrefix: opts.statPrefix,
forwardClientDetails: opts.forwardClientDetails, forwardClientDetails: opts.forwardClientDetails,
forwardClientPolicy: opts.forwardClientPolicy, forwardClientPolicy: opts.forwardClientPolicy,
tracing: opts.tracing,
}) })
if err != nil { if err != nil {
return nil, err return nil, err
@ -1944,6 +2000,7 @@ type listenerFilterOpts struct {
httpAuthzFilter *envoy_http_v3.HttpFilter httpAuthzFilter *envoy_http_v3.HttpFilter
forwardClientDetails bool forwardClientDetails bool
forwardClientPolicy envoy_http_v3.HttpConnectionManager_ForwardClientCertDetails forwardClientPolicy envoy_http_v3.HttpConnectionManager_ForwardClientCertDetails
tracing *envoy_http_v3.HttpConnectionManager_Tracing
} }
func makeListenerFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) { func makeListenerFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) {
@ -1990,6 +2047,7 @@ func makeTCPProxyFilter(filterName, cluster, statPrefix string) (*envoy_listener
func makeConnectionLimitFilter(limit int) (*envoy_listener_v3.Filter, error) { func makeConnectionLimitFilter(limit int) (*envoy_listener_v3.Filter, error) {
cfg := &envoy_connection_limit_v3.ConnectionLimit{ cfg := &envoy_connection_limit_v3.ConnectionLimit{
StatPrefix: "inbound_connection_limit",
MaxConnections: wrapperspb.UInt64(uint64(limit)), MaxConnections: wrapperspb.UInt64(uint64(limit)),
} }
return makeFilter("envoy.filters.network.connection_limit", cfg) return makeFilter("envoy.filters.network.connection_limit", cfg)
@ -2002,6 +2060,19 @@ func makeStatPrefix(prefix, filterName string) string {
return fmt.Sprintf("%s%s", prefix, strings.Replace(filterName, ":", "_", -1)) return fmt.Sprintf("%s%s", prefix, strings.Replace(filterName, ":", "_", -1))
} }
func makeTracingFromUserConfig(configJSON string) (*envoy_http_v3.HttpConnectionManager_Tracing, error) {
// Type field is present so decode it as a any.Any
var any any.Any
if err := jsonpb.UnmarshalString(configJSON, &any); err != nil {
return nil, err
}
var t envoy_http_v3.HttpConnectionManager_Tracing
if err := proto.Unmarshal(any.Value, &t); err != nil {
return nil, err
}
return &t, nil
}
func makeHTTPFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) { func makeHTTPFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error) {
router, err := makeEnvoyHTTPFilter("envoy.filters.http.router", &envoy_http_router_v3.Router{}) router, err := makeEnvoyHTTPFilter("envoy.filters.http.router", &envoy_http_router_v3.Router{})
if err != nil { if err != nil {
@ -2022,6 +2093,10 @@ func makeHTTPFilter(opts listenerFilterOpts) (*envoy_listener_v3.Filter, error)
}, },
} }
if opts.tracing != nil {
cfg.Tracing = opts.tracing
}
if opts.useRDS { if opts.useRDS {
if opts.cluster != "" { if opts.cluster != "" {
return nil, fmt.Errorf("cannot specify cluster name when using RDS") return nil, fmt.Errorf("cannot specify cluster name when using RDS")

View File

@ -772,6 +772,15 @@ func TestListenersFromSnapshot(t *testing.T) {
name: "transparent-proxy-terminating-gateway", name: "transparent-proxy-terminating-gateway",
create: proxycfg.TestConfigSnapshotTransparentProxyTerminatingGatewayCatalogDestinationsOnly, create: proxycfg.TestConfigSnapshotTransparentProxyTerminatingGatewayCatalogDestinationsOnly,
}, },
{
name: "custom-trace-listener",
create: func(t testinf.T) *proxycfg.ConfigSnapshot {
return proxycfg.TestConfigSnapshot(t, func(ns *structs.NodeService) {
ns.Proxy.Config["protocol"] = "http"
ns.Proxy.Config["envoy_listener_tracing_json"] = customTraceJSON(t)
}, nil)
},
},
} }
latestEnvoyVersion := proxysupport.EnvoyVersions[0] latestEnvoyVersion := proxysupport.EnvoyVersions[0]
@ -947,6 +956,40 @@ func customHTTPListenerJSON(t testinf.T, opts customHTTPListenerJSONOptions) str
return buf.String() return buf.String()
} }
func customTraceJSON(t testinf.T) string {
t.Helper()
return `
{
"@type" : "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing",
"provider" : {
"name" : "envoy.tracers.zipkin",
"typed_config" : {
"@type" : "type.googleapis.com/envoy.config.trace.v3.ZipkinConfig",
"collector_cluster" : "otelcolector",
"collector_endpoint" : "/api/v2/spans",
"collector_endpoint_version" : "HTTP_JSON",
"shared_span_context" : false
}
},
"custom_tags" : [
{
"tag" : "custom_header",
"request_header" : {
"name" : "x-custom-traceid",
"default_value" : ""
}
},
{
"tag" : "alloc_id",
"environment" : {
"name" : "NOMAD_ALLOC_ID"
}
}
]
}
`
}
type configFetcherFunc func() string type configFetcherFunc func() string
var _ ConfigFetcher = (configFetcherFunc)(nil) var _ ConfigFetcher = (configFetcherFunc)(nil)

View File

@ -186,6 +186,18 @@ func (s *Server) Register(srv *grpc.Server) {
envoy_discovery_v3.RegisterAggregatedDiscoveryServiceServer(srv, s) envoy_discovery_v3.RegisterAggregatedDiscoveryServiceServer(srv, s)
} }
func (s *Server) authenticate(ctx context.Context) (acl.Authorizer, error) {
authz, err := s.ResolveToken(external.TokenFromContext(ctx))
if acl.IsErrNotFound(err) {
return nil, status.Errorf(codes.Unauthenticated, "unauthenticated: %v", err)
} else if acl.IsErrPermissionDenied(err) {
return nil, status.Error(codes.PermissionDenied, err.Error())
} else if err != nil {
return nil, status.Errorf(codes.Internal, "error resolving acl token: %v", err)
}
return authz, nil
}
// authorize the xDS request using the token stored in ctx. This authorization is // authorize the xDS request using the token stored in ctx. This authorization is
// a bit different from most interfaces. Instead of explicitly authorizing or // a bit different from most interfaces. Instead of explicitly authorizing or
// filtering each piece of data in the response, the request is authorized // filtering each piece of data in the response, the request is authorized
@ -201,13 +213,9 @@ func (s *Server) authorize(ctx context.Context, cfgSnap *proxycfg.ConfigSnapshot
return status.Errorf(codes.Unauthenticated, "unauthenticated: no config snapshot") return status.Errorf(codes.Unauthenticated, "unauthenticated: no config snapshot")
} }
authz, err := s.ResolveToken(external.TokenFromContext(ctx)) authz, err := s.authenticate(ctx)
if acl.IsErrNotFound(err) { if err != nil {
return status.Errorf(codes.Unauthenticated, "unauthenticated: %v", err) return err
} else if acl.IsErrPermissionDenied(err) {
return status.Error(codes.PermissionDenied, err.Error())
} else if err != nil {
return status.Errorf(codes.Internal, "error resolving acl token: %v", err)
} }
var authzContext acl.AuthorizerContext var authzContext acl.AuthorizerContext

Some files were not shown because too many files have changed in this diff Show More