This commit is contained in:
boruszak 2022-10-11 10:10:00 -05:00
parent eea2f652ed
commit 38b1a515f1
444 changed files with 18356 additions and 7040 deletions

3
.changelog/12890.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
connect: service-router destinations have gained a `RetryOn` field for specifying the conditions when Envoy should retry requests beyond specific status codes and generic connection failure which already exists.
```

1
.changelog/14356.txt Normal file
View File

@ -0,0 +1 @@
xds: configure Envoy `alpn_protocols` for connect-proxy and ingress-gateway based on service protocol.

3
.changelog/14527.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
ui: Improve guidance around topology visualisation
```

3
.changelog/14616.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
connect: Add Envoy connection balancing configuration fields.
```

3
.changelog/14723.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
agent/hcp: add initial HashiCorp Cloud Platform integration
```

3
.changelog/14724.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
peering: Add support for stale queries for trust bundle lookups
```

3
.changelog/14747.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
peering: return information about the health of the peering when the leader is queried to read a peering.
```

3
.changelog/14749.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
config-entry(ingress-gateway): Added support for `max_connections` for upstream clusters
```

3
.changelog/14751.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
connect: Fixed a bug where transparent proxy does not correctly spawn listeners for upstreams to service-resolvers.
```

3
.changelog/14796.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
peering: require TLS for peering connections using server cert signed by Connect CA
```

3
.changelog/14797.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
peering: Ensure un-exported services get deleted even if the un-export happens while cluster peering replication is down.
```

3
.changelog/14811.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
DNS-proxy support via gRPC request.
```

3
.changelog/14817.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
peering: Add mesh gateway local mode support for cluster peering.
```

3
.changelog/14831.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:improvement
connect: Bump Envoy 1.20 to 1.20.7, 1.21 to 1.21.5 and 1.22 to 1.22.5
```

3
.changelog/14854.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:breaking-change
peering: Rename `PeerName` to `Peer` on prepared queries and exported services.
```

3
.changelog/14869.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:bug
grpc: Merge proxy-defaults and service-defaults in GetEnvoyBootstrapParams response.
```

3
.changelog/14873.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
telemetry: emit memberlist size metrics and broadcast queue depth metric.
```

4
.changelog/14885.txt Normal file
View File

@ -0,0 +1,4 @@
```release-note:bug
checks: Fixed a bug that prevented registration of UDP health checks from agent configuration files, such as service definition files with embedded health check definitions.
```

3
.changelog/14903.txt Normal file
View File

@ -0,0 +1,3 @@
```release-note:feature
ui: Removed reference to node name on service instance page when using agentless
```

View File

@ -24,9 +24,9 @@ references:
VAULT_BINARY_VERSION: 1.9.4 VAULT_BINARY_VERSION: 1.9.4
GO_VERSION: 1.18.1 GO_VERSION: 1.18.1
envoy-versions: &supported_envoy_versions envoy-versions: &supported_envoy_versions
- &default_envoy_version "1.20.6" - &default_envoy_version "1.20.7"
- "1.21.4" - "1.21.5"
- "1.22.2" - "1.22.5"
- "1.23.1" - "1.23.1"
nomad-versions: &supported_nomad_versions nomad-versions: &supported_nomad_versions
- &default_nomad_version "1.3.3" - &default_nomad_version "1.3.3"

View File

@ -6,7 +6,7 @@ set -uo pipefail
### It is still up to the reviewer to make sure that any tests added are needed and meaningful. ### It is still up to the reviewer to make sure that any tests added are needed and meaningful.
# search for any "new" or modified metric emissions # search for any "new" or modified metric emissions
metrics_modified=$(git --no-pager diff origin/main...HEAD | grep -i "SetGauge\|EmitKey\|IncrCounter\|AddSample\|MeasureSince\|UpdateFilter") metrics_modified=$(git --no-pager diff origin/main...HEAD | grep -i "SetGauge\|EmitKey\|IncrCounter\|AddSample\|MeasureSince\|UpdateFilter" | grep "^[+-]")
# search for PR body or title metric references # search for PR body or title metric references
metrics_in_pr_body=$(echo "${PR_BODY-""}" | grep -i "metric") metrics_in_pr_body=$(echo "${PR_BODY-""}" | grep -i "metric")
metrics_in_pr_title=$(echo "${PR_TITLE-""}" | grep -i "metric") metrics_in_pr_title=$(echo "${PR_TITLE-""}" | grep -i "metric")
@ -15,7 +15,7 @@ metrics_in_pr_title=$(echo "${PR_TITLE-""}" | grep -i "metric")
if [ "$metrics_modified" ] || [ "$metrics_in_pr_body" ] || [ "$metrics_in_pr_title" ]; then if [ "$metrics_modified" ] || [ "$metrics_in_pr_body" ] || [ "$metrics_in_pr_title" ]; then
# need to check if there are modifications to metrics_test # need to check if there are modifications to metrics_test
test_files_regex="*_test.go" test_files_regex="*_test.go"
modified_metrics_test_files=$(git --no-pager diff HEAD "$(git merge-base HEAD "origin/main")" -- "$test_files_regex" | grep -i "metric") modified_metrics_test_files=$(git --no-pager diff HEAD "$(git merge-base HEAD "origin/main")" -- "$test_files_regex" | grep -i "metric" | grep "^[+-]")
if [ "$modified_metrics_test_files" ]; then if [ "$modified_metrics_test_files" ]; then
# 1 happy path: metrics_test has been modified bc we modified metrics behavior # 1 happy path: metrics_test has been modified bc we modified metrics behavior
echo "PR seems to modify metrics behavior. It seems it may have added tests to the metrics as well." echo "PR seems to modify metrics behavior. It seems it may have added tests to the metrics as well."

View File

@ -25,11 +25,9 @@ jobs:
fetch-depth: 0 # by default the checkout action doesn't checkout all branches fetch-depth: 0 # by default the checkout action doesn't checkout all branches
- name: Check for changelog entry in diff - name: Check for changelog entry in diff
run: | run: |
pull_request_base_main=$(expr "${{ github.event.pull_request.base.ref }}" = "main")
# check if there is a diff in the .changelog directory # check if there is a diff in the .changelog directory
# for PRs against the main branch, the changelog file name should match the PR number # for PRs against the main branch, the changelog file name should match the PR number
if [ pull_request_base_main ]; then if [ "${{ github.event.pull_request.base.ref }}" = "${{ github.event.repository.default_branch }}" ]; then
enforce_matching_pull_request_number="matching this PR number " enforce_matching_pull_request_number="matching this PR number "
changelog_file_path=".changelog/${{ github.event.pull_request.number }}.txt" changelog_file_path=".changelog/${{ github.event.pull_request.number }}.txt"
else else

View File

@ -275,3 +275,16 @@ event "post-publish-website" {
on = "always" on = "always"
} }
} }
event "update-ironbank" {
depends = ["post-publish-website"]
action "update-ironbank" {
organization = "hashicorp"
repository = "crt-workflows-common"
workflow = "update-ironbank"
}
notification {
on = "fail"
}
}

View File

@ -6,6 +6,10 @@ BUG FIXES:
## 1.13.2 (September 20, 2022) ## 1.13.2 (September 20, 2022)
BREAKING CHANGES:
* ca: If using Vault as the service mesh CA provider, the Vault policy used by Consul now requires the `update` capability on the intermediate PKI's tune mount configuration endpoint, such as `/sys/mounts/connect_inter/tune`. The breaking nature of this change will be resolved in an upcoming 1.13 patch release. Refer to [upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#modify-vault-policy-for-vault-ca-provider) for more information.
SECURITY: SECURITY:
* auto-config: Added input validation for auto-config JWT authorization checks. Prior to this change, it was possible for malicious actors to construct requests which incorrectly pass custom JWT claim validation for the `AutoConfig.InitialConfiguration` endpoint. Now, only a subset of characters are allowed for the input before evaluating the bexpr. [[GH-14577](https://github.com/hashicorp/consul/issues/14577)] * auto-config: Added input validation for auto-config JWT authorization checks. Prior to this change, it was possible for malicious actors to construct requests which incorrectly pass custom JWT claim validation for the `AutoConfig.InitialConfiguration` endpoint. Now, only a subset of characters are allowed for the input before evaluating the bexpr. [[GH-14577](https://github.com/hashicorp/consul/issues/14577)]
@ -48,6 +52,10 @@ BUG FIXES:
## 1.12.5 (September 20, 2022) ## 1.12.5 (September 20, 2022)
BREAKING CHANGES:
* ca: If using Vault as the service mesh CA provider, the Vault policy used by Consul now requires the `update` capability on the intermediate PKI's tune mount configuration endpoint, such as `/sys/mounts/connect_inter/tune`. The breaking nature of this change will be resolved in an upcoming 1.12 patch release. Refer to [upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#modify-vault-policy-for-vault-ca-provider) for more information.
SECURITY: SECURITY:
* auto-config: Added input validation for auto-config JWT authorization checks. Prior to this change, it was possible for malicious actors to construct requests which incorrectly pass custom JWT claim validation for the `AutoConfig.InitialConfiguration` endpoint. Now, only a subset of characters are allowed for the input before evaluating the bexpr. [[GH-14577](https://github.com/hashicorp/consul/issues/14577)] * auto-config: Added input validation for auto-config JWT authorization checks. Prior to this change, it was possible for malicious actors to construct requests which incorrectly pass custom JWT claim validation for the `AutoConfig.InitialConfiguration` endpoint. Now, only a subset of characters are allowed for the input before evaluating the bexpr. [[GH-14577](https://github.com/hashicorp/consul/issues/14577)]
@ -72,6 +80,10 @@ BUG FIXES:
## 1.11.9 (September 20, 2022) ## 1.11.9 (September 20, 2022)
BREAKING CHANGES:
* ca: If using Vault as the service mesh CA provider, the Vault policy used by Consul now requires the `update` capability on the intermediate PKI's tune mount configuration endpoint, such as `/sys/mounts/connect_inter/tune`. The breaking nature of this change will be resolved in an upcoming 1.11 patch release. Refer to [upgrade guidance](https://www.consul.io/docs/upgrading/upgrade-specific#modify-vault-policy-for-vault-ca-provider) for more information.
SECURITY: SECURITY:
* auto-config: Added input validation for auto-config JWT authorization checks. Prior to this change, it was possible for malicious actors to construct requests which incorrectly pass custom JWT claim validation for the `AutoConfig.InitialConfiguration` endpoint. Now, only a subset of characters are allowed for the input before evaluating the bexpr. [[GH-14577](https://github.com/hashicorp/consul/issues/14577)] * auto-config: Added input validation for auto-config JWT authorization checks. Prior to this change, it was possible for malicious actors to construct requests which incorrectly pass custom JWT claim validation for the `AutoConfig.InitialConfiguration` endpoint. Now, only a subset of characters are allowed for the input before evaluating the bexpr. [[GH-14577](https://github.com/hashicorp/consul/issues/14577)]

View File

@ -14,6 +14,8 @@ PROTOC_GEN_GO_GRPC_VERSION="v1.2.0"
MOG_VERSION='v0.3.0' MOG_VERSION='v0.3.0'
PROTOC_GO_INJECT_TAG_VERSION='v1.3.0' PROTOC_GO_INJECT_TAG_VERSION='v1.3.0'
MOCKED_PB_DIRS= pbdns
GOTAGS ?= GOTAGS ?=
GOPATH=$(shell go env GOPATH) GOPATH=$(shell go env GOPATH)
GOARCH?=$(shell go env GOARCH) GOARCH?=$(shell go env GOARCH)
@ -401,9 +403,20 @@ else
endif endif
.PHONY: proto .PHONY: proto
proto: proto-tools proto: proto-tools proto-gen proto-mocks
.PHONY: proto-gen
proto-gen: proto-tools
@$(SHELL) $(CURDIR)/build-support/scripts/protobuf.sh @$(SHELL) $(CURDIR)/build-support/scripts/protobuf.sh
.PHONY: proto-mocks
proto-mocks:
for dir in $(MOCKED_PB_DIRS) ; do \
cd proto-public && \
rm -f $$dir/mock*.go && \
mockery --dir $$dir --inpackage --all --recursive --log-level trace ; \
done
.PHONY: proto-format .PHONY: proto-format
proto-format: proto-tools proto-format: proto-tools
@buf format -w @buf format -w

View File

@ -17,10 +17,10 @@ Consul provides several key features:
* **Multi-Datacenter** - Consul is built to be datacenter aware, and can * **Multi-Datacenter** - Consul is built to be datacenter aware, and can
support any number of regions without complex configuration. support any number of regions without complex configuration.
* **Service Mesh/Service Segmentation** - Consul Connect enables secure service-to-service * **Service Mesh** - Consul Service Mesh enables secure service-to-service
communication with automatic TLS encryption and identity-based authorization. Applications communication with automatic TLS encryption and identity-based authorization. Applications
can use sidecar proxies in a service mesh configuration to establish TLS can use sidecar proxies in a service mesh configuration to establish TLS
connections for inbound and outbound connections without being aware of Connect at all. connections for inbound and outbound connections with Transparent Proxy.
* **Service Discovery** - Consul makes it simple for services to register * **Service Discovery** - Consul makes it simple for services to register
themselves and to discover other services via a DNS or HTTP interface. themselves and to discover other services via a DNS or HTTP interface.
@ -37,7 +37,7 @@ Consul provides several key features:
Consul runs on Linux, macOS, FreeBSD, Solaris, and Windows and includes an Consul runs on Linux, macOS, FreeBSD, Solaris, and Windows and includes an
optional [browser based UI](https://demo.consul.io). A commercial version optional [browser based UI](https://demo.consul.io). A commercial version
called [Consul Enterprise](https://www.hashicorp.com/products/consul) is also called [Consul Enterprise](https://www.consul.io/docs/enterprise) is also
available. available.
**Please note**: We take Consul's security and our users' trust very seriously. If you **Please note**: We take Consul's security and our users' trust very seriously. If you
@ -52,12 +52,11 @@ A few quick start guides are available on the Consul website:
* **Minikube install:** https://learn.hashicorp.com/tutorials/consul/kubernetes-minikube * **Minikube install:** https://learn.hashicorp.com/tutorials/consul/kubernetes-minikube
* **Kind install:** https://learn.hashicorp.com/tutorials/consul/kubernetes-kind * **Kind install:** https://learn.hashicorp.com/tutorials/consul/kubernetes-kind
* **Kubernetes install:** https://learn.hashicorp.com/tutorials/consul/kubernetes-deployment-guide * **Kubernetes install:** https://learn.hashicorp.com/tutorials/consul/kubernetes-deployment-guide
* **Deploy HCP Consul:** https://learn.hashicorp.com/tutorials/consul/hcp-gs-deploy
## Documentation ## Documentation
Full, comprehensive documentation is available on the Consul website: Full, comprehensive documentation is available on the Consul website: https://consul.io/docs
https://www.consul.io/docs
## Contributing ## Contributing

View File

@ -24,9 +24,11 @@ import (
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcp-scada-provider/capability"
"github.com/hashicorp/raft" "github.com/hashicorp/raft"
"github.com/hashicorp/serf/serf" "github.com/hashicorp/serf/serf"
"golang.org/x/net/http2" "golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
@ -40,6 +42,9 @@ import (
"github.com/hashicorp/consul/agent/consul/servercert" "github.com/hashicorp/consul/agent/consul/servercert"
"github.com/hashicorp/consul/agent/dns" "github.com/hashicorp/consul/agent/dns"
external "github.com/hashicorp/consul/agent/grpc-external" external "github.com/hashicorp/consul/agent/grpc-external"
grpcDNS "github.com/hashicorp/consul/agent/grpc-external/services/dns"
"github.com/hashicorp/consul/agent/hcp/scada"
libscada "github.com/hashicorp/consul/agent/hcp/scada"
"github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/local"
"github.com/hashicorp/consul/agent/proxycfg" "github.com/hashicorp/consul/agent/proxycfg"
proxycfgglue "github.com/hashicorp/consul/agent/proxycfg-glue" proxycfgglue "github.com/hashicorp/consul/agent/proxycfg-glue"
@ -382,6 +387,10 @@ type Agent struct {
// xdsServer serves the XDS protocol for configuring Envoy proxies. // xdsServer serves the XDS protocol for configuring Envoy proxies.
xdsServer *xds.Server xdsServer *xds.Server
// scadaProvider is set when HashiCorp Cloud Platform integration is configured and exposes the agent's API over
// an encrypted session to HCP
scadaProvider scada.Provider
// enterpriseAgent embeds fields that we only access in consul-enterprise builds // enterpriseAgent embeds fields that we only access in consul-enterprise builds
enterpriseAgent enterpriseAgent
} }
@ -428,6 +437,7 @@ func New(bd BaseDeps) (*Agent, error) {
config: bd.RuntimeConfig, config: bd.RuntimeConfig,
cache: bd.Cache, cache: bd.Cache,
routineManager: routine.NewManager(bd.Logger), routineManager: routine.NewManager(bd.Logger),
scadaProvider: bd.HCP.Provider,
} }
// TODO: create rpcClientHealth in BaseDeps once NetRPC is available without Agent // TODO: create rpcClientHealth in BaseDeps once NetRPC is available without Agent
@ -769,6 +779,17 @@ func (a *Agent) Start(ctx context.Context) error {
}() }()
} }
if a.scadaProvider != nil {
a.scadaProvider.UpdateMeta(map[string]string{
"consul_server_id": string(a.config.NodeID),
})
if err = a.scadaProvider.Start(); err != nil {
a.baseDeps.Logger.Error("scada provider failed to start, some HashiCorp Cloud Platform functionality has been disabled",
"error", err, "resource_id", a.config.Cloud.ResourceID)
}
}
return nil return nil
} }
@ -900,6 +921,15 @@ func (a *Agent) listenAndServeDNS() error {
} }
}(addr) }(addr)
} }
s, _ := NewDNSServer(a)
grpcDNS.NewServer(grpcDNS.Config{
Logger: a.logger.Named("grpc-api.dns"),
DNSServeMux: s.mux,
LocalAddr: grpcDNS.LocalAddr{IP: net.IPv4(127, 0, 0, 1), Port: a.config.GRPCPort},
}).Register(a.externalGRPCServer)
a.dnsServers = append(a.dnsServers, s)
// wait for servers to be up // wait for servers to be up
timeout := time.After(time.Second) timeout := time.After(time.Second)
@ -954,6 +984,12 @@ func (a *Agent) startListeners(addrs []net.Addr) ([]net.Listener, error) {
} }
l = &tcpKeepAliveListener{l.(*net.TCPListener)} l = &tcpKeepAliveListener{l.(*net.TCPListener)}
case *capability.Addr:
l, err = a.scadaProvider.Listen(x.Capability())
if err != nil {
return nil, err
}
default: default:
closeAll() closeAll()
return nil, fmt.Errorf("unsupported address type %T", addr) return nil, fmt.Errorf("unsupported address type %T", addr)
@ -1011,6 +1047,11 @@ func (a *Agent) listenHTTP() ([]apiServer, error) {
MaxHeaderBytes: a.config.HTTPMaxHeaderBytes, MaxHeaderBytes: a.config.HTTPMaxHeaderBytes,
} }
if libscada.IsCapability(l.Addr()) {
// wrap in http2 server handler
httpServer.Handler = h2c.NewHandler(srv.handler(a.config.EnableDebug), &http2.Server{})
}
// Load the connlimit helper into the server // Load the connlimit helper into the server
connLimitFn := a.httpConnLimiter.HTTPConnStateFuncWithDefault429Handler(10 * time.Millisecond) connLimitFn := a.httpConnLimiter.HTTPConnStateFuncWithDefault429Handler(10 * time.Millisecond)
@ -1027,7 +1068,12 @@ func (a *Agent) listenHTTP() ([]apiServer, error) {
return nil return nil
} }
if err := start("http", a.config.HTTPAddrs); err != nil { httpAddrs := a.config.HTTPAddrs
if a.config.IsCloudEnabled() {
httpAddrs = append(httpAddrs, scada.CAPCoreAPI)
}
if err := start("http", httpAddrs); err != nil {
closeListeners(ln) closeListeners(ln)
return nil, err return nil, err
} }
@ -1582,6 +1628,11 @@ func (a *Agent) ShutdownAgent() error {
a.rpcClientHealth.Close() a.rpcClientHealth.Close()
// Shutdown SCADA provider
if a.scadaProvider != nil {
a.scadaProvider.Stop()
}
var err error var err error
if a.delegate != nil { if a.delegate != nil {
err = a.delegate.Shutdown() err = a.delegate.Shutdown()
@ -4187,6 +4238,7 @@ func (a *Agent) registerCache() {
a.cache.RegisterType(cachetype.CompiledDiscoveryChainName, &cachetype.CompiledDiscoveryChain{RPC: a}) a.cache.RegisterType(cachetype.CompiledDiscoveryChainName, &cachetype.CompiledDiscoveryChain{RPC: a})
a.cache.RegisterType(cachetype.GatewayServicesName, &cachetype.GatewayServices{RPC: a}) a.cache.RegisterType(cachetype.GatewayServicesName, &cachetype.GatewayServices{RPC: a})
a.cache.RegisterType(cachetype.ServiceGatewaysName, &cachetype.ServiceGateways{RPC: a}) a.cache.RegisterType(cachetype.ServiceGatewaysName, &cachetype.ServiceGateways{RPC: a})
a.cache.RegisterType(cachetype.ConfigEntryListName, &cachetype.ConfigEntryList{RPC: a}) a.cache.RegisterType(cachetype.ConfigEntryListName, &cachetype.ConfigEntryList{RPC: a})
@ -4206,6 +4258,8 @@ func (a *Agent) registerCache() {
a.cache.RegisterType(cachetype.PeeredUpstreamsName, &cachetype.PeeredUpstreams{RPC: a}) a.cache.RegisterType(cachetype.PeeredUpstreamsName, &cachetype.PeeredUpstreams{RPC: a})
a.cache.RegisterType(cachetype.PeeringListName, &cachetype.Peerings{Client: a.rpcClientPeering})
a.registerEntCache() a.registerEntCache()
} }
@ -4320,6 +4374,7 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources {
InternalServiceDump: proxycfgglue.CacheInternalServiceDump(a.cache), InternalServiceDump: proxycfgglue.CacheInternalServiceDump(a.cache),
LeafCertificate: proxycfgglue.CacheLeafCertificate(a.cache), LeafCertificate: proxycfgglue.CacheLeafCertificate(a.cache),
PeeredUpstreams: proxycfgglue.CachePeeredUpstreams(a.cache), PeeredUpstreams: proxycfgglue.CachePeeredUpstreams(a.cache),
PeeringList: proxycfgglue.CachePeeringList(a.cache),
PreparedQuery: proxycfgglue.CachePrepraredQuery(a.cache), PreparedQuery: proxycfgglue.CachePrepraredQuery(a.cache),
ResolvedServiceConfig: proxycfgglue.CacheResolvedServiceConfig(a.cache), ResolvedServiceConfig: proxycfgglue.CacheResolvedServiceConfig(a.cache),
ServiceList: proxycfgglue.CacheServiceList(a.cache), ServiceList: proxycfgglue.CacheServiceList(a.cache),
@ -4348,6 +4403,7 @@ func (a *Agent) proxyDataSources() proxycfg.DataSources {
sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps) sources.IntentionUpstreams = proxycfgglue.ServerIntentionUpstreams(deps)
sources.IntentionUpstreamsDestination = proxycfgglue.ServerIntentionUpstreamsDestination(deps) sources.IntentionUpstreamsDestination = proxycfgglue.ServerIntentionUpstreamsDestination(deps)
sources.InternalServiceDump = proxycfgglue.ServerInternalServiceDump(deps, proxycfgglue.CacheInternalServiceDump(a.cache)) sources.InternalServiceDump = proxycfgglue.ServerInternalServiceDump(deps, proxycfgglue.CacheInternalServiceDump(a.cache))
sources.PeeringList = proxycfgglue.ServerPeeringList(deps)
sources.PeeredUpstreams = proxycfgglue.ServerPeeredUpstreams(deps) sources.PeeredUpstreams = proxycfgglue.ServerPeeredUpstreams(deps)
sources.ResolvedServiceConfig = proxycfgglue.ServerResolvedServiceConfig(deps, proxycfgglue.CacheResolvedServiceConfig(a.cache)) sources.ResolvedServiceConfig = proxycfgglue.ServerResolvedServiceConfig(deps, proxycfgglue.CacheResolvedServiceConfig(a.cache))
sources.ServiceList = proxycfgglue.ServerServiceList(deps, proxycfgglue.CacheServiceList(a.cache)) sources.ServiceList = proxycfgglue.ServerServiceList(deps, proxycfgglue.CacheServiceList(a.cache))

View File

@ -1443,8 +1443,8 @@ func TestAgent_Self(t *testing.T) {
} }
ports = { ports = {
grpc = -1 grpc = -1
} grpc_tls = -1
`, }`,
expectXDS: false, expectXDS: false,
grpcTLS: false, grpcTLS: false,
}, },
@ -1453,7 +1453,9 @@ func TestAgent_Self(t *testing.T) {
node_meta { node_meta {
somekey = "somevalue" somekey = "somevalue"
} }
`, ports = {
grpc_tls = -1
}`,
expectXDS: true, expectXDS: true,
grpcTLS: false, grpcTLS: false,
}, },
@ -1461,8 +1463,7 @@ func TestAgent_Self(t *testing.T) {
hcl: ` hcl: `
node_meta { node_meta {
somekey = "somevalue" somekey = "somevalue"
} }`,
`,
expectXDS: true, expectXDS: true,
grpcTLS: true, grpcTLS: true,
}, },

View File

@ -29,9 +29,11 @@ import (
"github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/cmpopts"
"github.com/google/tcpproxy" "github.com/google/tcpproxy"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/hashicorp/hcp-scada-provider/capability"
"github.com/hashicorp/serf/coordinate" "github.com/hashicorp/serf/coordinate"
"github.com/hashicorp/serf/serf" "github.com/hashicorp/serf/serf"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -43,6 +45,8 @@ import (
"github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/hcp"
"github.com/hashicorp/consul/agent/hcp/scada"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
@ -6049,6 +6053,65 @@ peering {
}) })
} }
func TestAgent_startListeners_scada(t *testing.T) {
t.Parallel()
pvd := scada.NewMockProvider(t)
c := capability.NewAddr("testcap")
pvd.EXPECT().Listen(c.Capability()).Return(nil, nil).Once()
bd := BaseDeps{
Deps: consul.Deps{
Logger: hclog.NewInterceptLogger(nil),
Tokens: new(token.Store),
GRPCConnPool: &fakeGRPCConnPool{},
HCP: hcp.Deps{
Provider: pvd,
},
},
RuntimeConfig: &config.RuntimeConfig{},
Cache: cache.New(cache.Options{}),
}
bd, err := initEnterpriseBaseDeps(bd, nil)
require.NoError(t, err)
agent, err := New(bd)
require.NoError(t, err)
_, err = agent.startListeners([]net.Addr{c})
require.NoError(t, err)
}
func TestAgent_scadaProvider(t *testing.T) {
pvd := scada.NewMockProvider(t)
// this listener is used when mocking out the scada provider
l, err := net.Listen("tcp4", fmt.Sprintf("127.0.0.1:%d", freeport.GetOne(t)))
require.NoError(t, err)
defer require.NoError(t, l.Close())
pvd.EXPECT().UpdateMeta(mock.Anything).Once()
pvd.EXPECT().Start().Return(nil).Once()
pvd.EXPECT().Listen(scada.CAPCoreAPI.Capability()).Return(l, nil).Once()
pvd.EXPECT().Stop().Return(nil).Once()
pvd.EXPECT().SessionStatus().Return("test")
a := TestAgent{
OverrideDeps: func(deps *BaseDeps) {
deps.HCP.Provider = pvd
},
Overrides: `
cloud {
resource_id = "organization/0b9de9a3-8403-4ca6-aba8-fca752f42100/project/0b9de9a3-8403-4ca6-aba8-fca752f42100/consul.cluster/0b9de9a3-8403-4ca6-aba8-fca752f42100"
client_id = "test"
client_secret = "test"
}`,
}
defer a.Shutdown()
require.NoError(t, a.Start(t))
_, err = api.NewClient(&api.Config{Address: l.Addr().String()})
require.NoError(t, err)
}
func getExpectedCaPoolByFile(t *testing.T) *x509.CertPool { func getExpectedCaPoolByFile(t *testing.T) *x509.CertPool {
pool := x509.NewCertPool() pool := x509.NewCertPool()
data, err := ioutil.ReadFile("../test/ca/root.cer") data, err := ioutil.ReadFile("../test/ca/root.cer")

View File

@ -0,0 +1,63 @@
// Code generated by mockery v2.14.0. DO NOT EDIT.
package cachetype
import (
context "context"
grpc "google.golang.org/grpc"
mock "github.com/stretchr/testify/mock"
pbpeering "github.com/hashicorp/consul/proto/pbpeering"
)
// MockPeeringLister is an autogenerated mock type for the PeeringLister type
type MockPeeringLister struct {
mock.Mock
}
// PeeringList provides a mock function with given fields: ctx, in, opts
func (_m *MockPeeringLister) PeeringList(ctx context.Context, in *pbpeering.PeeringListRequest, opts ...grpc.CallOption) (*pbpeering.PeeringListResponse, error) {
_va := make([]interface{}, len(opts))
for _i := range opts {
_va[_i] = opts[_i]
}
var _ca []interface{}
_ca = append(_ca, ctx, in)
_ca = append(_ca, _va...)
ret := _m.Called(_ca...)
var r0 *pbpeering.PeeringListResponse
if rf, ok := ret.Get(0).(func(context.Context, *pbpeering.PeeringListRequest, ...grpc.CallOption) *pbpeering.PeeringListResponse); ok {
r0 = rf(ctx, in, opts...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*pbpeering.PeeringListResponse)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(context.Context, *pbpeering.PeeringListRequest, ...grpc.CallOption) error); ok {
r1 = rf(ctx, in, opts...)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
type mockConstructorTestingTNewMockPeeringLister interface {
mock.TestingT
Cleanup(func())
}
// NewMockPeeringLister creates a new instance of MockPeeringLister. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
func NewMockPeeringLister(t mockConstructorTestingTNewMockPeeringLister) *MockPeeringLister {
mock := &MockPeeringLister{}
mock.Mock.Test(t)
t.Cleanup(func() { mock.AssertExpectations(t) })
return mock
}

View File

@ -0,0 +1,107 @@
package cachetype
import (
"context"
"fmt"
"strconv"
"time"
external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/mitchellh/hashstructure"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/structs"
)
// PeeringListName is the recommended name for registration.
const PeeringListName = "peers"
type PeeringListRequest struct {
Request *pbpeering.PeeringListRequest
structs.QueryOptions
}
func (r *PeeringListRequest) CacheInfo() cache.RequestInfo {
info := cache.RequestInfo{
Token: r.Token,
Datacenter: "",
MinIndex: 0,
Timeout: 0,
MustRevalidate: false,
// OPTIMIZE(peering): Cache.notifyPollingQuery polls at this interval. We need to revisit how that polling works.
// Using an exponential backoff when the result hasn't changed may be preferable.
MaxAge: 1 * time.Second,
}
v, err := hashstructure.Hash([]interface{}{
r.Request.Partition,
}, nil)
if err == nil {
// If there is an error, we don't set the key. A blank key forces
// no cache for this request so the request is forwarded directly
// to the server.
info.Key = strconv.FormatUint(v, 10)
}
return info
}
// Peerings supports fetching the list of peers for a given partition or wildcard-specifier.
type Peerings struct {
RegisterOptionsNoRefresh
Client PeeringLister
}
//go:generate mockery --name PeeringLister --inpackage --filename mock_PeeringLister_test.go
type PeeringLister interface {
PeeringList(
ctx context.Context, in *pbpeering.PeeringListRequest, opts ...grpc.CallOption,
) (*pbpeering.PeeringListResponse, error)
}
func (t *Peerings) Fetch(_ cache.FetchOptions, req cache.Request) (cache.FetchResult, error) {
var result cache.FetchResult
// The request should be a PeeringListRequest.
// We do not need to make a copy of this request type like in other cache types
// because the RequestInfo is synthetic.
reqReal, ok := req.(*PeeringListRequest)
if !ok {
return result, fmt.Errorf(
"Internal cache failure: request wrong type: %T", req)
}
// Always allow stale - there's no point in hitting leader if the request is
// going to be served from cache and end up arbitrarily stale anyway. This
// allows cached service-discover to automatically read scale across all
// servers too.
reqReal.QueryOptions.SetAllowStale(true)
ctx, err := external.ContextWithQueryOptions(context.Background(), reqReal.QueryOptions)
if err != nil {
return result, err
}
// Fetch
reply, err := t.Client.PeeringList(ctx, reqReal.Request)
if err != nil {
// Return an empty result if the error is due to peering being disabled.
// This allows mesh gateways to receive an update and confirm that the watch is set.
if e, ok := status.FromError(err); ok && e.Code() == codes.FailedPrecondition {
result.Index = 1
result.Value = &pbpeering.PeeringListResponse{}
return result, nil
}
return result, err
}
result.Value = reply
result.Index = reply.Index
return result, nil
}

View File

@ -0,0 +1,131 @@
package cachetype
import (
"context"
"testing"
"time"
"github.com/mitchellh/copystructure"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/proto/pbpeering"
)
func TestPeerings(t *testing.T) {
client := NewMockPeeringLister(t)
typ := &Peerings{Client: client}
resp := &pbpeering.PeeringListResponse{
Index: 48,
Peerings: []*pbpeering.Peering{
{
Name: "peer1",
ID: "8ac403cf-6834-412f-9dfe-0ac6e69bd89f",
PeerServerAddresses: []string{"1.2.3.4"},
State: pbpeering.PeeringState_ACTIVE,
},
},
}
// Expect the proper call.
// This also returns the canned response above.
client.On("PeeringList", mock.Anything, mock.Anything).
Return(resp, nil)
// Fetch and assert against the result.
result, err := typ.Fetch(cache.FetchOptions{}, &PeeringListRequest{
Request: &pbpeering.PeeringListRequest{},
})
require.NoError(t, err)
require.Equal(t, cache.FetchResult{
Value: resp,
Index: 48,
}, result)
}
func TestPeerings_PeeringDisabled(t *testing.T) {
client := NewMockPeeringLister(t)
typ := &Peerings{Client: client}
var resp *pbpeering.PeeringListResponse
// Expect the proper call, but return the peering disabled error
client.On("PeeringList", mock.Anything, mock.Anything).
Return(resp, grpcstatus.Error(codes.FailedPrecondition, "peering must be enabled to use this endpoint"))
// Fetch and assert against the result.
result, err := typ.Fetch(cache.FetchOptions{}, &PeeringListRequest{
Request: &pbpeering.PeeringListRequest{},
})
require.NoError(t, err)
require.NotNil(t, result)
require.EqualValues(t, 1, result.Index)
require.NotNil(t, result.Value)
}
func TestPeerings_badReqType(t *testing.T) {
client := pbpeering.NewPeeringServiceClient(nil)
typ := &Peerings{Client: client}
// Fetch
_, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest(
t, cache.RequestInfo{Key: "foo", MinIndex: 64}))
require.Error(t, err)
require.Contains(t, err.Error(), "wrong type")
}
// This test asserts that we can continuously poll this cache type, given that it doesn't support blocking.
func TestPeerings_MultipleUpdates(t *testing.T) {
c := cache.New(cache.Options{})
client := NewMockPeeringLister(t)
// On each mock client call to PeeringList we will increment the index by 1
// to simulate new data arriving.
resp := &pbpeering.PeeringListResponse{
Index: uint64(0),
}
client.On("PeeringList", mock.Anything, mock.Anything).
Return(func(ctx context.Context, in *pbpeering.PeeringListRequest, opts ...grpc.CallOption) *pbpeering.PeeringListResponse {
resp.Index++
// Avoids triggering the race detection by copying the output
copyResp, err := copystructure.Copy(resp)
require.NoError(t, err)
output := copyResp.(*pbpeering.PeeringListResponse)
return output
}, nil)
c.RegisterType(PeeringListName, &Peerings{Client: client})
ch := make(chan cache.UpdateEvent)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
t.Cleanup(cancel)
require.NoError(t, c.Notify(ctx, PeeringListName, &PeeringListRequest{
Request: &pbpeering.PeeringListRequest{},
}, "updates", ch))
i := uint64(1)
for {
select {
case <-ctx.Done():
t.Fatal("context deadline exceeded")
return
case update := <-ch:
// Expect to receive updates for increasing indexes serially.
actual := update.Result.(*pbpeering.PeeringListResponse)
require.Equal(t, i, actual.Index)
i++
if i > 3 {
return
}
}
}
}

View File

@ -83,7 +83,12 @@ func (t *TrustBundle) Fetch(_ cache.FetchOptions, req cache.Request) (cache.Fetc
reqReal.QueryOptions.SetAllowStale(true) reqReal.QueryOptions.SetAllowStale(true)
// Fetch // Fetch
reply, err := t.Client.TrustBundleRead(external.ContextWithToken(context.Background(), reqReal.Token), reqReal.Request) ctx, err := external.ContextWithQueryOptions(context.Background(), reqReal.QueryOptions)
if err != nil {
return result, err
}
reply, err := t.Client.TrustBundleRead(ctx, reqReal.Request)
if err != nil { if err != nil {
return result, err return result, err
} }

View File

@ -5,10 +5,11 @@ import (
"testing" "testing"
"time" "time"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/proto/pbpeering"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/proto/pbpeering"
) )
func TestTrustBundle(t *testing.T) { func TestTrustBundle(t *testing.T) {
@ -93,11 +94,12 @@ func TestTrustBundle_MultipleUpdates(t *testing.T) {
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
t.Fatal("context deadline exceeded")
return return
case update := <-ch: case update := <-ch:
// Expect to receive updates for increasing indexes serially. // Expect to receive updates for increasing indexes serially.
resp := update.Result.(*pbpeering.TrustBundleReadResponse) actual := update.Result.(*pbpeering.TrustBundleReadResponse)
require.Equal(t, i, resp.Index) require.Equal(t, i, actual.Index)
i++ i++
if i > 3 { if i > 3 {

View File

@ -87,7 +87,12 @@ func (t *TrustBundles) Fetch(_ cache.FetchOptions, req cache.Request) (cache.Fet
reqReal.QueryOptions.SetAllowStale(true) reqReal.QueryOptions.SetAllowStale(true)
// Fetch // Fetch
reply, err := t.Client.TrustBundleListByService(external.ContextWithToken(context.Background(), reqReal.Token), reqReal.Request) ctx, err := external.ContextWithQueryOptions(context.Background(), reqReal.QueryOptions)
if err != nil {
return result, err
}
reply, err := t.Client.TrustBundleListByService(ctx, reqReal.Request)
if err != nil { if err != nil {
// Return an empty result if the error is due to peering being disabled. // Return an empty result if the error is due to peering being disabled.
// This allows mesh gateways to receive an update and confirm that the watch is set. // This allows mesh gateways to receive an update and confirm that the watch is set.

View File

@ -121,6 +121,7 @@ func TestTrustBundles_MultipleUpdates(t *testing.T) {
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
t.Fatal("context deadline exceeded")
return return
case update := <-ch: case update := <-ch:
// Expect to receive updates for increasing indexes serially. // Expect to receive updates for increasing indexes serially.

View File

@ -19,6 +19,7 @@ import (
"time" "time"
"github.com/armon/go-metrics/prometheus" "github.com/armon/go-metrics/prometheus"
hcpconfig "github.com/hashicorp/consul/agent/hcp/config"
"github.com/hashicorp/go-bexpr" "github.com/hashicorp/go-bexpr"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
@ -959,6 +960,7 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
AutoEncryptIPSAN: autoEncryptIPSAN, AutoEncryptIPSAN: autoEncryptIPSAN,
AutoEncryptAllowTLS: autoEncryptAllowTLS, AutoEncryptAllowTLS: autoEncryptAllowTLS,
AutoConfig: autoConfig, AutoConfig: autoConfig,
Cloud: b.cloudConfigVal(c.Cloud),
ConnectEnabled: connectEnabled, ConnectEnabled: connectEnabled,
ConnectCAProvider: connectCAProvider, ConnectCAProvider: connectCAProvider,
ConnectCAConfig: connectCAConfig, ConnectCAConfig: connectCAConfig,
@ -1560,6 +1562,7 @@ func (b *builder) checkVal(v *CheckDefinition) *structs.CheckDefinition {
Body: stringVal(v.Body), Body: stringVal(v.Body),
DisableRedirects: boolVal(v.DisableRedirects), DisableRedirects: boolVal(v.DisableRedirects),
TCP: stringVal(v.TCP), TCP: stringVal(v.TCP),
UDP: stringVal(v.UDP),
Interval: b.durationVal(fmt.Sprintf("check[%s].interval", id), v.Interval), Interval: b.durationVal(fmt.Sprintf("check[%s].interval", id), v.Interval),
DockerContainerID: stringVal(v.DockerContainerID), DockerContainerID: stringVal(v.DockerContainerID),
Shell: stringVal(v.Shell), Shell: stringVal(v.Shell),
@ -2446,6 +2449,20 @@ func validateAutoConfigAuthorizer(rt RuntimeConfig) error {
return nil return nil
} }
func (b *builder) cloudConfigVal(v *CloudConfigRaw) (val hcpconfig.CloudConfig) {
if v == nil {
return val
}
val.ResourceID = stringVal(v.ResourceID)
val.ClientID = stringVal(v.ClientID)
val.ClientSecret = stringVal(v.ClientSecret)
val.AuthURL = stringVal(v.AuthURL)
val.Hostname = stringVal(v.Hostname)
return val
}
// decodeBytes returns the encryption key decoded. // decodeBytes returns the encryption key decoded.
func decodeBytes(key string) ([]byte, error) { func decodeBytes(key string) ([]byte, error) {
return base64.StdEncoding.DecodeString(key) return base64.StdEncoding.DecodeString(key)

View File

@ -326,6 +326,24 @@ func TestBuilder_ServiceVal_MultiError(t *testing.T) {
require.Contains(t, b.err.Error(), "cannot have both socket path") require.Contains(t, b.err.Error(), "cannot have both socket path")
} }
func TestBuilder_ServiceVal_with_Check(t *testing.T) {
b := builder{}
svc := b.serviceVal(&ServiceDefinition{
Name: strPtr("unbound"),
ID: strPtr("unbound"),
Port: intPtr(12345),
Checks: []CheckDefinition{
{
Interval: strPtr("5s"),
UDP: strPtr("localhost:53"),
},
},
})
require.NoError(t, b.err)
require.Equal(t, 1, len(svc.Checks))
require.Equal(t, "localhost:53", svc.Checks[0].UDP)
}
func intPtr(v int) *int { func intPtr(v int) *int {
return &v return &v
} }

View File

@ -153,6 +153,7 @@ type Config struct {
CheckUpdateInterval *string `mapstructure:"check_update_interval"` CheckUpdateInterval *string `mapstructure:"check_update_interval"`
Checks []CheckDefinition `mapstructure:"checks"` Checks []CheckDefinition `mapstructure:"checks"`
ClientAddr *string `mapstructure:"client_addr"` ClientAddr *string `mapstructure:"client_addr"`
Cloud *CloudConfigRaw `mapstructure:"cloud"`
ConfigEntries ConfigEntries `mapstructure:"config_entries"` ConfigEntries ConfigEntries `mapstructure:"config_entries"`
AutoEncrypt AutoEncrypt `mapstructure:"auto_encrypt"` AutoEncrypt AutoEncrypt `mapstructure:"auto_encrypt"`
Connect Connect `mapstructure:"connect"` Connect Connect `mapstructure:"connect"`
@ -859,6 +860,14 @@ type RPC struct {
EnableStreaming *bool `mapstructure:"enable_streaming"` EnableStreaming *bool `mapstructure:"enable_streaming"`
} }
type CloudConfigRaw struct {
ResourceID *string `mapstructure:"resource_id"`
ClientID *string `mapstructure:"client_id"`
ClientSecret *string `mapstructure:"client_secret"`
Hostname *string `mapstructure:"hostname"`
AuthURL *string `mapstructure:"auth_url"`
}
type TLSProtocolConfig struct { type TLSProtocolConfig struct {
CAFile *string `mapstructure:"ca_file"` CAFile *string `mapstructure:"ca_file"`
CAPath *string `mapstructure:"ca_path"` CAPath *string `mapstructure:"ca_path"`

View File

@ -13,6 +13,7 @@ import (
"github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/dns" "github.com/hashicorp/consul/agent/dns"
hcpconfig "github.com/hashicorp/consul/agent/hcp/config"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
@ -157,6 +158,11 @@ type RuntimeConfig struct {
// hcl: autopilot { upgrade_version_tag = string } // hcl: autopilot { upgrade_version_tag = string }
AutopilotUpgradeVersionTag string AutopilotUpgradeVersionTag string
// Cloud contains configuration for agents to connect to HCP.
//
// hcl: cloud { ... }
Cloud hcpconfig.CloudConfig
// DNSAllowStale is used to enable lookups with stale // DNSAllowStale is used to enable lookups with stale
// data. This gives horizontal read scalability since // data. This gives horizontal read scalability since
// any Consul server can service the query instead of // any Consul server can service the query instead of
@ -1679,6 +1685,11 @@ func (c *RuntimeConfig) Sanitized() map[string]interface{} {
return sanitize("rt", reflect.ValueOf(c)).Interface().(map[string]interface{}) return sanitize("rt", reflect.ValueOf(c)).Interface().(map[string]interface{})
} }
// IsCloudEnabled returns true if a cloud.resource_id is set and the server mode is enabled
func (c *RuntimeConfig) IsCloudEnabled() bool {
return c.ServerMode && c.Cloud.ResourceID != ""
}
// isSecret determines whether a field name represents a field which // isSecret determines whether a field name represents a field which
// may contain a secret. // may contain a secret.
func isSecret(name string) bool { func isSecret(name string) bool {

View File

@ -19,6 +19,7 @@ import (
"github.com/armon/go-metrics/prometheus" "github.com/armon/go-metrics/prometheus"
"github.com/google/go-cmp/cmp/cmpopts" "github.com/google/go-cmp/cmp/cmpopts"
hcpconfig "github.com/hashicorp/consul/agent/hcp/config"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
@ -5989,44 +5990,51 @@ func TestLoad_FullConfig(t *testing.T) {
}, },
ConnectMeshGatewayWANFederationEnabled: false, ConnectMeshGatewayWANFederationEnabled: false,
ConnectServerlessPluginEnabled: true, ConnectServerlessPluginEnabled: true,
DNSAddrs: []net.Addr{tcpAddr("93.95.95.81:7001"), udpAddr("93.95.95.81:7001")}, Cloud: hcpconfig.CloudConfig{
DNSARecordLimit: 29907, ResourceID: "N43DsscE",
DNSAllowStale: true, ClientID: "6WvsDZCP",
DNSDisableCompression: true, ClientSecret: "lCSMHOpB",
DNSDomain: "7W1xXSqd", Hostname: "DH4bh7aC",
DNSAltDomain: "1789hsd", AuthURL: "332nCdR2",
DNSEnableTruncate: true, },
DNSMaxStale: 29685 * time.Second, DNSAddrs: []net.Addr{tcpAddr("93.95.95.81:7001"), udpAddr("93.95.95.81:7001")},
DNSNodeTTL: 7084 * time.Second, DNSARecordLimit: 29907,
DNSOnlyPassing: true, DNSAllowStale: true,
DNSPort: 7001, DNSDisableCompression: true,
DNSRecursorStrategy: "sequential", DNSDomain: "7W1xXSqd",
DNSRecursorTimeout: 4427 * time.Second, DNSAltDomain: "1789hsd",
DNSRecursors: []string{"63.38.39.58", "92.49.18.18"}, DNSEnableTruncate: true,
DNSSOA: RuntimeSOAConfig{Refresh: 3600, Retry: 600, Expire: 86400, Minttl: 0}, DNSMaxStale: 29685 * time.Second,
DNSServiceTTL: map[string]time.Duration{"*": 32030 * time.Second}, DNSNodeTTL: 7084 * time.Second,
DNSUDPAnswerLimit: 29909, DNSOnlyPassing: true,
DNSNodeMetaTXT: true, DNSPort: 7001,
DNSUseCache: true, DNSRecursorStrategy: "sequential",
DNSCacheMaxAge: 5 * time.Minute, DNSRecursorTimeout: 4427 * time.Second,
DataDir: dataDir, DNSRecursors: []string{"63.38.39.58", "92.49.18.18"},
Datacenter: "rzo029wg", DNSSOA: RuntimeSOAConfig{Refresh: 3600, Retry: 600, Expire: 86400, Minttl: 0},
DefaultQueryTime: 16743 * time.Second, DNSServiceTTL: map[string]time.Duration{"*": 32030 * time.Second},
DisableAnonymousSignature: true, DNSUDPAnswerLimit: 29909,
DisableCoordinates: true, DNSNodeMetaTXT: true,
DisableHostNodeID: true, DNSUseCache: true,
DisableHTTPUnprintableCharFilter: true, DNSCacheMaxAge: 5 * time.Minute,
DisableKeyringFile: true, DataDir: dataDir,
DisableRemoteExec: true, Datacenter: "rzo029wg",
DisableUpdateCheck: true, DefaultQueryTime: 16743 * time.Second,
DiscardCheckOutput: true, DisableAnonymousSignature: true,
DiscoveryMaxStale: 5 * time.Second, DisableCoordinates: true,
EnableAgentTLSForChecks: true, DisableHostNodeID: true,
EnableCentralServiceConfig: false, DisableHTTPUnprintableCharFilter: true,
EnableDebug: true, DisableKeyringFile: true,
EnableRemoteScriptChecks: true, DisableRemoteExec: true,
EnableLocalScriptChecks: true, DisableUpdateCheck: true,
EncryptKey: "A4wELWqH", DiscardCheckOutput: true,
DiscoveryMaxStale: 5 * time.Second,
EnableAgentTLSForChecks: true,
EnableCentralServiceConfig: false,
EnableDebug: true,
EnableRemoteScriptChecks: true,
EnableLocalScriptChecks: true,
EncryptKey: "A4wELWqH",
StaticRuntimeConfig: StaticRuntimeConfig{ StaticRuntimeConfig: StaticRuntimeConfig{
EncryptVerifyIncoming: true, EncryptVerifyIncoming: true,
EncryptVerifyOutgoing: true, EncryptVerifyOutgoing: true,
@ -6771,6 +6779,11 @@ func TestRuntimeConfig_Sanitize(t *testing.T) {
EntryFetchMaxBurst: 42, EntryFetchMaxBurst: 42,
EntryFetchRate: 0.334, EntryFetchRate: 0.334,
}, },
Cloud: hcpconfig.CloudConfig{
ResourceID: "cluster1",
ClientID: "id",
ClientSecret: "secret",
},
ConsulCoordinateUpdatePeriod: 15 * time.Second, ConsulCoordinateUpdatePeriod: 15 * time.Second,
RaftProtocol: 3, RaftProtocol: 3,
RetryJoinLAN: []string{ RetryJoinLAN: []string{

View File

@ -124,6 +124,13 @@
} }
], ],
"ClientAddrs": [], "ClientAddrs": [],
"Cloud": {
"AuthURL": "",
"ClientID": "id",
"ClientSecret": "hidden",
"Hostname": "",
"ResourceID": "cluster1"
},
"ConfigEntryBootstrap": [], "ConfigEntryBootstrap": [],
"ConnectCAConfig": {}, "ConnectCAConfig": {},
"ConnectCAProvider": "", "ConnectCAProvider": "",

View File

@ -201,6 +201,13 @@ auto_encrypt = {
ip_san = ["192.168.4.139", "192.168.4.140"] ip_san = ["192.168.4.139", "192.168.4.140"]
allow_tls = true allow_tls = true
} }
cloud {
resource_id = "N43DsscE"
client_id = "6WvsDZCP"
client_secret = "lCSMHOpB"
hostname = "DH4bh7aC"
auth_url = "332nCdR2"
}
connect { connect {
ca_provider = "consul" ca_provider = "consul"
ca_config { ca_config {

View File

@ -203,6 +203,13 @@
"ip_san": ["192.168.4.139", "192.168.4.140"], "ip_san": ["192.168.4.139", "192.168.4.140"],
"allow_tls": true "allow_tls": true
}, },
"cloud": {
"resource_id": "N43DsscE",
"client_id": "6WvsDZCP",
"client_secret": "lCSMHOpB",
"hostname": "DH4bh7aC",
"auth_url": "332nCdR2"
},
"connect": { "connect": {
"ca_provider": "consul", "ca_provider": "consul",
"ca_config": { "ca_config": {

View File

@ -1,4 +1,4 @@
package consul package configentry
import ( import (
"fmt" "fmt"
@ -8,18 +8,21 @@ import (
"github.com/imdario/mergo" "github.com/imdario/mergo"
"github.com/mitchellh/copystructure" "github.com/mitchellh/copystructure"
"github.com/hashicorp/consul/agent/configentry" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
) )
// mergeNodeServiceWithCentralConfig merges a service instance (NodeService) with the type StateStore interface {
ReadResolvedServiceConfigEntries(memdb.WatchSet, string, *acl.EnterpriseMeta, []structs.ServiceID, structs.ProxyMode) (uint64, *ResolvedServiceConfigSet, error)
}
// MergeNodeServiceWithCentralConfig merges a service instance (NodeService) with the
// proxy-defaults/global and service-defaults/:service config entries. // proxy-defaults/global and service-defaults/:service config entries.
// This common helper is used by the blocking query function of different RPC endpoints // This common helper is used by the blocking query function of different RPC endpoints
// that need to return a fully resolved service defintion. // that need to return a fully resolved service defintion.
func mergeNodeServiceWithCentralConfig( func MergeNodeServiceWithCentralConfig(
ws memdb.WatchSet, ws memdb.WatchSet,
state *state.Store, state StateStore,
args *structs.ServiceSpecificRequest, args *structs.ServiceSpecificRequest,
ns *structs.NodeService, ns *structs.NodeService,
logger hclog.Logger) (uint64, *structs.NodeService, error) { logger hclog.Logger) (uint64, *structs.NodeService, error) {
@ -67,7 +70,7 @@ func mergeNodeServiceWithCentralConfig(
ns.ID, err) ns.ID, err)
} }
defaults, err := configentry.ComputeResolvedServiceConfig( defaults, err := ComputeResolvedServiceConfig(
configReq, configReq,
upstreams, upstreams,
false, false,

View File

@ -1,4 +1,4 @@
package consul package configentry
import ( import (
"testing" "testing"

View File

@ -53,6 +53,7 @@ func ComputeResolvedServiceConfig(
structs.NewServiceID(args.Name, &args.EnterpriseMeta), structs.NewServiceID(args.Name, &args.EnterpriseMeta),
) )
if serviceConf != nil { if serviceConf != nil {
if serviceConf.Expose.Checks { if serviceConf.Expose.Checks {
thisReply.Expose.Checks = true thisReply.Expose.Checks = true
} }
@ -62,12 +63,6 @@ func ComputeResolvedServiceConfig(
if serviceConf.MeshGateway.Mode != structs.MeshGatewayModeDefault { if serviceConf.MeshGateway.Mode != structs.MeshGatewayModeDefault {
thisReply.MeshGateway.Mode = serviceConf.MeshGateway.Mode thisReply.MeshGateway.Mode = serviceConf.MeshGateway.Mode
} }
if serviceConf.Protocol != "" {
if thisReply.ProxyConfig == nil {
thisReply.ProxyConfig = make(map[string]interface{})
}
thisReply.ProxyConfig["protocol"] = serviceConf.Protocol
}
if serviceConf.TransparentProxy.OutboundListenerPort != 0 { if serviceConf.TransparentProxy.OutboundListenerPort != 0 {
thisReply.TransparentProxy.OutboundListenerPort = serviceConf.TransparentProxy.OutboundListenerPort thisReply.TransparentProxy.OutboundListenerPort = serviceConf.TransparentProxy.OutboundListenerPort
} }
@ -81,25 +76,29 @@ func ComputeResolvedServiceConfig(
thisReply.Destination = *serviceConf.Destination thisReply.Destination = *serviceConf.Destination
} }
// Populate values for the proxy config map
proxyConf := thisReply.ProxyConfig
if proxyConf == nil {
proxyConf = make(map[string]interface{})
}
if serviceConf.Protocol != "" {
proxyConf["protocol"] = serviceConf.Protocol
}
if serviceConf.BalanceInboundConnections != "" {
proxyConf["balance_inbound_connections"] = serviceConf.BalanceInboundConnections
}
if serviceConf.MaxInboundConnections > 0 { if serviceConf.MaxInboundConnections > 0 {
if thisReply.ProxyConfig == nil { proxyConf["max_inbound_connections"] = serviceConf.MaxInboundConnections
thisReply.ProxyConfig = map[string]interface{}{}
}
thisReply.ProxyConfig["max_inbound_connections"] = serviceConf.MaxInboundConnections
} }
if serviceConf.LocalConnectTimeoutMs > 0 { if serviceConf.LocalConnectTimeoutMs > 0 {
if thisReply.ProxyConfig == nil { proxyConf["local_connect_timeout_ms"] = serviceConf.LocalConnectTimeoutMs
thisReply.ProxyConfig = map[string]interface{}{}
}
thisReply.ProxyConfig["local_connect_timeout_ms"] = serviceConf.LocalConnectTimeoutMs
} }
if serviceConf.LocalRequestTimeoutMs > 0 { if serviceConf.LocalRequestTimeoutMs > 0 {
if thisReply.ProxyConfig == nil { proxyConf["local_request_timeout_ms"] = serviceConf.LocalRequestTimeoutMs
thisReply.ProxyConfig = map[string]interface{}{} }
} // Add the proxy conf to the response if any fields were populated
thisReply.ProxyConfig["local_request_timeout_ms"] = serviceConf.LocalRequestTimeoutMs if len(proxyConf) > 0 {
thisReply.ProxyConfig = proxyConf
} }
thisReply.Meta = serviceConf.Meta thisReply.Meta = serviceConf.Meta

View File

@ -24,6 +24,26 @@ func Test_ComputeResolvedServiceConfig(t *testing.T) {
args args args args
want *structs.ServiceConfigResponse want *structs.ServiceConfigResponse
}{ }{
{
name: "proxy with balanceinboundconnections",
args: args{
scReq: &structs.ServiceConfigRequest{
Name: "sid",
},
entries: &ResolvedServiceConfigSet{
ServiceDefaults: map[structs.ServiceID]*structs.ServiceConfigEntry{
sid: {
BalanceInboundConnections: "exact_balance",
},
},
},
},
want: &structs.ServiceConfigResponse{
ProxyConfig: map[string]interface{}{
"balance_inbound_connections": "exact_balance",
},
},
},
{ {
name: "proxy with maxinboundsconnections", name: "proxy with maxinboundsconnections",
args: args{ args: args{

View File

@ -183,8 +183,7 @@ func TestCAWithKeyType(t testing.T, xc *structs.CARoot, keyType string, keyBits
return testCA(t, xc, keyType, keyBits, 0) return testCA(t, xc, keyType, keyBits, 0)
} }
func testLeafWithID(t testing.T, spiffeId CertURI, root *structs.CARoot, keyType string, keyBits int, expiration time.Duration) (string, string, error) { func testLeafWithID(t testing.T, spiffeId CertURI, dnsSAN string, root *structs.CARoot, keyType string, keyBits int, expiration time.Duration) (string, string, error) {
if expiration == 0 { if expiration == 0 {
// this is 10 years // this is 10 years
expiration = 10 * 365 * 24 * time.Hour expiration = 10 * 365 * 24 * time.Hour
@ -238,6 +237,7 @@ func testLeafWithID(t testing.T, spiffeId CertURI, root *structs.CARoot, keyType
NotBefore: time.Now(), NotBefore: time.Now(),
AuthorityKeyId: testKeyID(t, caSigner.Public()), AuthorityKeyId: testKeyID(t, caSigner.Public()),
SubjectKeyId: testKeyID(t, pkSigner.Public()), SubjectKeyId: testKeyID(t, pkSigner.Public()),
DNSNames: []string{dnsSAN},
} }
// Create the certificate, PEM encode it and return that value. // Create the certificate, PEM encode it and return that value.
@ -263,7 +263,7 @@ func TestAgentLeaf(t testing.T, node string, datacenter string, root *structs.CA
Agent: node, Agent: node,
} }
return testLeafWithID(t, spiffeId, root, DefaultPrivateKeyType, DefaultPrivateKeyBits, expiration) return testLeafWithID(t, spiffeId, "", root, DefaultPrivateKeyType, DefaultPrivateKeyBits, expiration)
} }
func testLeaf(t testing.T, service string, namespace string, root *structs.CARoot, keyType string, keyBits int) (string, string, error) { func testLeaf(t testing.T, service string, namespace string, root *structs.CARoot, keyType string, keyBits int) (string, string, error) {
@ -275,7 +275,7 @@ func testLeaf(t testing.T, service string, namespace string, root *structs.CARoo
Service: service, Service: service,
} }
return testLeafWithID(t, spiffeId, root, keyType, keyBits, 0) return testLeafWithID(t, spiffeId, "", root, keyType, keyBits, 0)
} }
// TestLeaf returns a valid leaf certificate and it's private key for the named // TestLeaf returns a valid leaf certificate and it's private key for the named
@ -305,7 +305,23 @@ func TestMeshGatewayLeaf(t testing.T, partition string, root *structs.CARoot) (s
Datacenter: "dc1", Datacenter: "dc1",
} }
certPEM, keyPEM, err := testLeafWithID(t, spiffeId, root, DefaultPrivateKeyType, DefaultPrivateKeyBits, 0) certPEM, keyPEM, err := testLeafWithID(t, spiffeId, "", root, DefaultPrivateKeyType, DefaultPrivateKeyBits, 0)
if err != nil {
t.Fatalf(err.Error())
}
return certPEM, keyPEM
}
func TestServerLeaf(t testing.T, dc string, root *structs.CARoot) (string, string) {
t.Helper()
spiffeID := &SpiffeIDServer{
Datacenter: dc,
Host: fmt.Sprintf("%s.consul", TestClusterID),
}
san := PeeringServerSAN(dc, TestTrustDomain)
certPEM, keyPEM, err := testLeafWithID(t, spiffeID, san, root, DefaultPrivateKeyType, DefaultPrivateKeyBits, 0)
if err != nil { if err != nil {
t.Fatalf(err.Error()) t.Fatalf(err.Error())
} }

View File

@ -16,6 +16,7 @@ import (
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/acl/resolver" "github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/ipaddr"
@ -752,7 +753,7 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru
mergedsn := sn mergedsn := sn
ns := sn.ToNodeService() ns := sn.ToNodeService()
if ns.IsSidecarProxy() || ns.IsGateway() { if ns.IsSidecarProxy() || ns.IsGateway() {
cfgIndex, mergedns, err := mergeNodeServiceWithCentralConfig(ws, state, args, ns, c.logger) cfgIndex, mergedns, err := configentry.MergeNodeServiceWithCentralConfig(ws, state, args, ns, c.logger)
if err != nil { if err != nil {
return err return err
} }
@ -960,7 +961,7 @@ func (c *Catalog) NodeServiceList(args *structs.NodeSpecificRequest, reply *stru
Datacenter: args.Datacenter, Datacenter: args.Datacenter,
QueryOptions: args.QueryOptions, QueryOptions: args.QueryOptions,
} }
cfgIndex, mergedns, err = mergeNodeServiceWithCentralConfig(ws, state, &serviceSpecificReq, ns, c.logger) cfgIndex, mergedns, err = configentry.MergeNodeServiceWithCentralConfig(ws, state, &serviceSpecificReq, ns, c.logger)
if err != nil { if err != nil {
return err return err
} }

View File

@ -37,11 +37,11 @@ func (c *Client) setupSerf(conf *serf.Config, ch chan serf.Event, path string) (
serfLogger := c.logger. serfLogger := c.logger.
NamedIntercept(logging.Serf). NamedIntercept(logging.Serf).
NamedIntercept(logging.LAN). NamedIntercept(logging.LAN).
StandardLoggerIntercept(&hclog.StandardLoggerOptions{InferLevels: true}) StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true})
memberlistLogger := c.logger. memberlistLogger := c.logger.
NamedIntercept(logging.Memberlist). NamedIntercept(logging.Memberlist).
NamedIntercept(logging.LAN). NamedIntercept(logging.LAN).
StandardLoggerIntercept(&hclog.StandardLoggerOptions{InferLevels: true}) StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true})
conf.MemberlistConfig.Logger = memberlistLogger conf.MemberlistConfig.Logger = memberlistLogger
conf.Logger = serfLogger conf.Logger = serfLogger

View File

@ -39,6 +39,7 @@ func TestCloneSerfLANConfig(t *testing.T) {
"Ping", "Ping",
"ProtocolVersion", "ProtocolVersion",
"PushPullInterval", "PushPullInterval",
"QueueCheckInterval",
"RequireNodeNames", "RequireNodeNames",
"SkipInboundLabelCheck", "SkipInboundLabelCheck",
"SuspicionMaxTimeoutMult", "SuspicionMaxTimeoutMult",

View File

@ -59,7 +59,9 @@ func TestGRPCIntegration_ConnectCA_Sign(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
t.Cleanup(cancel) t.Cleanup(cancel)
ctx = external.ContextWithToken(ctx, TestDefaultInitialManagementToken) options := structs.QueryOptions{Token: TestDefaultInitialManagementToken}
ctx, err := external.ContextWithQueryOptions(ctx, options)
require.NoError(t, err)
// This would fail if it wasn't forwarded to the leader. // This would fail if it wasn't forwarded to the leader.
rsp, err := client.Sign(ctx, &pbconnectca.SignRequest{ rsp, err := client.Sign(ctx, &pbconnectca.SignRequest{
@ -96,7 +98,9 @@ func TestGRPCIntegration_ServerDiscovery_WatchServers(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
t.Cleanup(cancel) t.Cleanup(cancel)
ctx = external.ContextWithToken(ctx, TestDefaultInitialManagementToken) options := structs.QueryOptions{Token: TestDefaultInitialManagementToken}
ctx, err := external.ContextWithQueryOptions(ctx, options)
require.NoError(t, err)
serverStream, err := client.WatchServers(ctx, &pbserverdiscovery.WatchServersRequest{Wan: false}) serverStream, err := client.WatchServers(ctx, &pbserverdiscovery.WatchServersRequest{Wan: false})
require.NoError(t, err) require.NoError(t, err)

View File

@ -11,6 +11,7 @@ import (
hashstructure_v2 "github.com/mitchellh/hashstructure/v2" hashstructure_v2 "github.com/mitchellh/hashstructure/v2"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
) )
@ -256,7 +257,7 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc
for _, node := range resolvedNodes { for _, node := range resolvedNodes {
ns := node.Service ns := node.Service
if ns.IsSidecarProxy() || ns.IsGateway() { if ns.IsSidecarProxy() || ns.IsGateway() {
cfgIndex, mergedns, err := mergeNodeServiceWithCentralConfig(ws, state, args, ns, h.logger) cfgIndex, mergedns, err := configentry.MergeNodeServiceWithCentralConfig(ws, state, args, ns, h.logger)
if err != nil { if err != nil {
return err return err
} }

View File

@ -3334,19 +3334,19 @@ func TestInternal_ExportedPeeredServices_ACLEnforcement(t *testing.T) {
{ {
Name: "web", Name: "web",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "peer-1"}, {Peer: "peer-1"},
}, },
}, },
{ {
Name: "db", Name: "db",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "peer-2"}, {Peer: "peer-2"},
}, },
}, },
{ {
Name: "api", Name: "api",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "peer-1"}, {Peer: "peer-1"},
}, },
}, },
}, },
@ -3405,7 +3405,7 @@ func TestInternal_ExportedPeeredServices_ACLEnforcement(t *testing.T) {
` `
service "web" { policy = "read" } service "web" { policy = "read" }
service "api" { policy = "read" } service "api" { policy = "read" }
service "db" { policy = "deny" } service "db" { policy = "deny" }
`), `),
expect: map[string]structs.ServiceList{ expect: map[string]structs.ServiceList{
"peer-1": { "peer-1": {
@ -3514,19 +3514,19 @@ func TestInternal_ExportedServicesForPeer_ACLEnforcement(t *testing.T) {
{ {
Name: "web", Name: "web",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "peer-1"}, {Peer: "peer-1"},
}, },
}, },
{ {
Name: "db", Name: "db",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "peer-2"}, {Peer: "peer-2"},
}, },
}, },
{ {
Name: "api", Name: "api",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "peer-1"}, {Peer: "peer-1"},
}, },
}, },
}, },

View File

@ -12,6 +12,7 @@ import (
"time" "time"
"github.com/armon/go-metrics" "github.com/armon/go-metrics"
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -21,6 +22,7 @@ import (
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
@ -33,27 +35,23 @@ import (
) )
func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) { func TestLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T) {
t.Run("without-tls", func(t *testing.T) {
testLeader_PeeringSync_Lifecycle_ClientDeletion(t, false)
})
t.Run("with-tls", func(t *testing.T) {
testLeader_PeeringSync_Lifecycle_ClientDeletion(t, true)
})
}
func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS bool) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")
} }
ca := connect.TestCA(t, nil)
_, acceptor := testServerWithConfig(t, func(c *Config) { _, acceptor := testServerWithConfig(t, func(c *Config) {
c.NodeName = "acceptor" c.NodeName = "acceptor"
c.Datacenter = "dc1" c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul" c.TLSConfig.Domain = "consul"
if enableTLS { c.GRPCTLSPort = freeport.GetOne(t)
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt" c.CAConfig = &structs.CAConfiguration{
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Bob.crt" ClusterID: connect.TestClusterID,
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Bob.key" Provider: structs.ConsulCAProvider,
Config: map[string]interface{}{
"PrivateKey": ca.SigningKey,
"RootCert": ca.RootCert,
},
} }
}) })
testrpc.WaitForLeader(t, acceptor.RPC, "dc1") testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
@ -93,11 +91,6 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
c.NodeName = "dialer" c.NodeName = "dialer"
c.Datacenter = "dc2" c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc2" c.PrimaryDatacenter = "dc2"
if enableTLS {
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Betty.crt"
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Betty.key"
}
}) })
testrpc.WaitForLeader(t, dialer.RPC, "dc2") testrpc.WaitForLeader(t, dialer.RPC, "dc2")
@ -162,28 +155,214 @@ func testLeader_PeeringSync_Lifecycle_ClientDeletion(t *testing.T, enableTLS boo
}) })
} }
func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) { func TestLeader_PeeringSync_Lifecycle_UnexportWhileDown(t *testing.T) {
t.Run("without-tls", func(t *testing.T) {
testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t, false)
})
t.Run("with-tls", func(t *testing.T) {
testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t, true)
})
}
func testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t *testing.T, enableTLS bool) {
if testing.Short() { if testing.Short() {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")
} }
// Reserve a gRPC port so we can restart the accepting server with the same port.
dialingServerPort := freeport.GetOne(t)
ca := connect.TestCA(t, nil)
_, acceptor := testServerWithConfig(t, func(c *Config) { _, acceptor := testServerWithConfig(t, func(c *Config) {
c.NodeName = "acceptor" c.NodeName = "acceptor"
c.Datacenter = "dc1" c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul" c.TLSConfig.Domain = "consul"
if enableTLS { c.GRPCTLSPort = freeport.GetOne(t)
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt" c.CAConfig = &structs.CAConfiguration{
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Bob.crt" ClusterID: connect.TestClusterID,
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Bob.key" Provider: structs.ConsulCAProvider,
Config: map[string]interface{}{
"PrivateKey": ca.SigningKey,
"RootCert": ca.RootCert,
},
}
})
testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
// Create a peering by generating a token
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err := grpc.DialContext(ctx, acceptor.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(acceptor.config.RPCAddr.String())),
grpc.WithInsecure(),
grpc.WithBlock())
require.NoError(t, err)
defer conn.Close()
acceptorClient := pbpeering.NewPeeringServiceClient(conn)
req := pbpeering.GenerateTokenRequest{
PeerName: "my-peer-dialer",
}
resp, err := acceptorClient.GenerateToken(ctx, &req)
require.NoError(t, err)
tokenJSON, err := base64.StdEncoding.DecodeString(resp.PeeringToken)
require.NoError(t, err)
var token structs.PeeringToken
require.NoError(t, json.Unmarshal(tokenJSON, &token))
// Bring up dialer and establish a peering with acceptor's token so that it attempts to dial.
_, dialer := testServerWithConfig(t, func(c *Config) {
c.NodeName = "dialer"
c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc2"
c.GRPCPort = dialingServerPort
})
testrpc.WaitForLeader(t, dialer.RPC, "dc2")
// Create a peering at dialer by establishing a peering with acceptor's token
ctx, cancel = context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel)
conn, err = grpc.DialContext(ctx, dialer.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(dialer.config.RPCAddr.String())),
grpc.WithInsecure(),
grpc.WithBlock())
require.NoError(t, err)
defer conn.Close()
dialerClient := pbpeering.NewPeeringServiceClient(conn)
establishReq := pbpeering.EstablishRequest{
PeerName: "my-peer-acceptor",
PeeringToken: resp.PeeringToken,
}
_, err = dialerClient.Establish(ctx, &establishReq)
require.NoError(t, err)
p, err := dialerClient.PeeringRead(ctx, &pbpeering.PeeringReadRequest{Name: "my-peer-acceptor"})
require.NoError(t, err)
retry.Run(t, func(r *retry.R) {
status, found := dialer.peerStreamServer.StreamStatus(p.Peering.ID)
require.True(r, found)
require.True(r, status.Connected)
})
retry.Run(t, func(r *retry.R) {
status, found := acceptor.peerStreamServer.StreamStatus(p.Peering.PeerID)
require.True(r, found)
require.True(r, status.Connected)
})
acceptorCodec := rpcClient(t, acceptor)
{
exportedServices := structs.ConfigEntryRequest{
Op: structs.ConfigEntryUpsert,
Datacenter: "dc1",
Entry: &structs.ExportedServicesConfigEntry{
Name: "default",
Services: []structs.ExportedService{
{
Name: "foo",
Consumers: []structs.ServiceConsumer{{Peer: "my-peer-dialer"}},
},
},
},
}
var configOutput bool
require.NoError(t, msgpackrpc.CallWithCodec(acceptorCodec, "ConfigEntry.Apply", &exportedServices, &configOutput))
require.True(t, configOutput)
}
insertNode := func(i int) {
req := structs.RegisterRequest{
Datacenter: "dc1",
Node: fmt.Sprintf("node%d", i+1),
Address: fmt.Sprintf("127.0.0.%d", i+1),
NodeMeta: map[string]string{
"group": fmt.Sprintf("%d", i/5),
"instance_type": "t2.micro",
},
Service: &structs.NodeService{
Service: "foo",
Port: 8000,
},
WriteRequest: structs.WriteRequest{Token: "root"},
}
var reply struct{}
if err := msgpackrpc.CallWithCodec(acceptorCodec, "Catalog.Register", &req, &reply); err != nil {
t.Fatalf("err: %v", err)
}
}
for i := 0; i < 5; i++ {
insertNode(i)
}
retry.Run(t, func(r *retry.R) {
_, nodes, err := dialer.fsm.State().CheckServiceNodes(nil, "foo", nil, "my-peer-acceptor")
require.NoError(r, err)
require.Len(r, nodes, 5)
})
// Shutdown the dialing server.
require.NoError(t, dialer.Shutdown())
// Have to manually shut down the gRPC server otherwise it stays bound to the port.
dialer.externalGRPCServer.Stop()
{
exportedServices := structs.ConfigEntryRequest{
Op: structs.ConfigEntryUpsert,
Datacenter: "dc1",
Entry: &structs.ExportedServicesConfigEntry{
Name: "default",
Services: []structs.ExportedService{},
},
}
var configOutput bool
require.NoError(t, msgpackrpc.CallWithCodec(acceptorCodec, "ConfigEntry.Apply", &exportedServices, &configOutput))
require.True(t, configOutput)
}
// Restart the server by re-using the previous acceptor's data directory and node id.
_, dialerRestart := testServerWithConfig(t, func(c *Config) {
c.NodeName = "dialer"
c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul"
c.GRPCPort = dialingServerPort
c.DataDir = dialer.config.DataDir
c.NodeID = dialer.config.NodeID
})
// The dialing peer should eventually reconnect.
retry.Run(t, func(r *retry.R) {
connStreams := dialerRestart.peerStreamServer.ConnectedStreams()
require.Contains(r, connStreams, p.Peering.ID)
})
// The un-export results in the foo nodes being deleted.
retry.Run(t, func(r *retry.R) {
_, nodes, err := dialerRestart.fsm.State().CheckServiceNodes(nil, "foo", nil, "my-peer-acceptor")
require.NoError(r, err)
require.Len(r, nodes, 0)
})
}
func TestLeader_PeeringSync_Lifecycle_ServerDeletion(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
ca := connect.TestCA(t, nil)
_, acceptor := testServerWithConfig(t, func(c *Config) {
c.NodeName = "acceptor"
c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul"
c.GRPCTLSPort = freeport.GetOne(t)
c.CAConfig = &structs.CAConfiguration{
ClusterID: connect.TestClusterID,
Provider: structs.ConsulCAProvider,
Config: map[string]interface{}{
"PrivateKey": ca.SigningKey,
"RootCert": ca.RootCert,
},
} }
}) })
testrpc.WaitForLeader(t, acceptor.RPC, "dc1") testrpc.WaitForLeader(t, acceptor.RPC, "dc1")
@ -218,11 +397,6 @@ func testLeader_PeeringSync_Lifecycle_AcceptorDeletion(t *testing.T, enableTLS b
c.NodeName = "dialer" c.NodeName = "dialer"
c.Datacenter = "dc2" c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc2" c.PrimaryDatacenter = "dc2"
if enableTLS {
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Betty.crt"
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Betty.key"
}
}) })
testrpc.WaitForLeader(t, dialer.RPC, "dc2") testrpc.WaitForLeader(t, dialer.RPC, "dc2")
@ -295,7 +469,7 @@ func TestLeader_PeeringSync_FailsForTLSError(t *testing.T) {
t.Run("server-name-validation", func(t *testing.T) { t.Run("server-name-validation", func(t *testing.T) {
testLeader_PeeringSync_failsForTLSError(t, func(token *structs.PeeringToken) { testLeader_PeeringSync_failsForTLSError(t, func(token *structs.PeeringToken) {
token.ServerName = "wrong.name" token.ServerName = "wrong.name"
}, `transport: authentication handshake failed: x509: certificate is valid for server.dc1.consul, bob.server.dc1.consul, not wrong.name`) }, `transport: authentication handshake failed: x509: certificate is valid for server.dc1.peering.11111111-2222-3333-4444-555555555555.consul, not wrong.name`)
}) })
t.Run("bad-ca-roots", func(t *testing.T) { t.Run("bad-ca-roots", func(t *testing.T) {
wrongRoot, err := ioutil.ReadFile("../../test/client_certs/rootca.crt") wrongRoot, err := ioutil.ReadFile("../../test/client_certs/rootca.crt")
@ -310,14 +484,20 @@ func TestLeader_PeeringSync_FailsForTLSError(t *testing.T) {
func testLeader_PeeringSync_failsForTLSError(t *testing.T, tokenMutateFn func(token *structs.PeeringToken), expectErr string) { func testLeader_PeeringSync_failsForTLSError(t *testing.T, tokenMutateFn func(token *structs.PeeringToken), expectErr string) {
require.NotNil(t, tokenMutateFn) require.NotNil(t, tokenMutateFn)
ca := connect.TestCA(t, nil)
_, s1 := testServerWithConfig(t, func(c *Config) { _, s1 := testServerWithConfig(t, func(c *Config) {
c.NodeName = "bob" c.NodeName = "bob"
c.Datacenter = "dc1" c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul" c.TLSConfig.Domain = "consul"
c.GRPCTLSPort = freeport.GetOne(t)
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt" c.CAConfig = &structs.CAConfiguration{
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Bob.crt" ClusterID: connect.TestClusterID,
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Bob.key" Provider: structs.ConsulCAProvider,
Config: map[string]interface{}{
"PrivateKey": ca.SigningKey,
"RootCert": ca.RootCert,
},
}
}) })
testrpc.WaitForLeader(t, s1.RPC, "dc1") testrpc.WaitForLeader(t, s1.RPC, "dc1")
@ -360,10 +540,6 @@ func testLeader_PeeringSync_failsForTLSError(t *testing.T, tokenMutateFn func(to
c.NodeName = "betty" c.NodeName = "betty"
c.Datacenter = "dc2" c.Datacenter = "dc2"
c.PrimaryDatacenter = "dc2" c.PrimaryDatacenter = "dc2"
c.TLSConfig.GRPC.CAFile = "../../test/hostname/CertAuth.crt"
c.TLSConfig.GRPC.CertFile = "../../test/hostname/Betty.crt"
c.TLSConfig.GRPC.KeyFile = "../../test/hostname/Betty.key"
}) })
testrpc.WaitForLeader(t, s2.RPC, "dc2") testrpc.WaitForLeader(t, s2.RPC, "dc2")
@ -402,11 +578,11 @@ func TestLeader_Peering_DeferredDeletion(t *testing.T) {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")
} }
// TODO(peering): Configure with TLS
_, s1 := testServerWithConfig(t, func(c *Config) { _, s1 := testServerWithConfig(t, func(c *Config) {
c.NodeName = "s1.dc1" c.NodeName = "s1.dc1"
c.Datacenter = "dc1" c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul" c.TLSConfig.Domain = "consul"
c.GRPCTLSPort = freeport.GetOne(t)
}) })
testrpc.WaitForLeader(t, s1.RPC, "dc1") testrpc.WaitForLeader(t, s1.RPC, "dc1")
@ -481,15 +657,21 @@ func TestLeader_Peering_DialerReestablishesConnectionOnError(t *testing.T) {
} }
// Reserve a gRPC port so we can restart the accepting server with the same port. // Reserve a gRPC port so we can restart the accepting server with the same port.
ports := freeport.GetN(t, 1) acceptingServerPort := freeport.GetOne(t)
acceptingServerPort := ports[0]
ca := connect.TestCA(t, nil)
_, acceptingServer := testServerWithConfig(t, func(c *Config) { _, acceptingServer := testServerWithConfig(t, func(c *Config) {
c.NodeName = "acceptingServer.dc1" c.NodeName = "acceptingServer.dc1"
c.Datacenter = "dc1" c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul" c.GRPCTLSPort = acceptingServerPort
c.GRPCPort = acceptingServerPort c.CAConfig = &structs.CAConfiguration{
c.PeeringEnabled = true ClusterID: connect.TestClusterID,
Provider: structs.ConsulCAProvider,
Config: map[string]interface{}{
"PrivateKey": ca.SigningKey,
"RootCert": ca.RootCert,
},
}
}) })
testrpc.WaitForLeader(t, acceptingServer.RPC, "dc1") testrpc.WaitForLeader(t, acceptingServer.RPC, "dc1")
@ -592,9 +774,17 @@ func TestLeader_Peering_DialerReestablishesConnectionOnError(t *testing.T) {
c.NodeName = "acceptingServer.dc1" c.NodeName = "acceptingServer.dc1"
c.Datacenter = "dc1" c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul" c.TLSConfig.Domain = "consul"
c.GRPCPort = acceptingServerPort
c.DataDir = acceptingServer.config.DataDir c.DataDir = acceptingServer.config.DataDir
c.NodeID = acceptingServer.config.NodeID c.NodeID = acceptingServer.config.NodeID
c.GRPCTLSPort = acceptingServerPort
c.CAConfig = &structs.CAConfiguration{
ClusterID: connect.TestClusterID,
Provider: structs.ConsulCAProvider,
Config: map[string]interface{}{
"PrivateKey": ca.SigningKey,
"RootCert": ca.RootCert,
},
}
}) })
testrpc.WaitForLeader(t, acceptingServerRestart.RPC, "dc1") testrpc.WaitForLeader(t, acceptingServerRestart.RPC, "dc1")
@ -689,11 +879,19 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) {
t.Skip("too slow for testing.Short") t.Skip("too slow for testing.Short")
} }
ca := connect.TestCA(t, nil)
_, s1 := testServerWithConfig(t, func(c *Config) { _, s1 := testServerWithConfig(t, func(c *Config) {
c.NodeName = "s1.dc1" c.NodeName = "s1.dc1"
c.Datacenter = "dc1" c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul" c.GRPCTLSPort = freeport.GetOne(t)
c.PeeringEnabled = true c.CAConfig = &structs.CAConfiguration{
ClusterID: connect.TestClusterID,
Provider: structs.ConsulCAProvider,
Config: map[string]interface{}{
"PrivateKey": ca.SigningKey,
"RootCert": ca.RootCert,
},
}
}) })
testrpc.WaitForLeader(t, s1.RPC, "dc1") testrpc.WaitForLeader(t, s1.RPC, "dc1")
@ -818,8 +1016,8 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) {
name string name string
description string description string
exportedService structs.ExportedServicesConfigEntry exportedService structs.ExportedServicesConfigEntry
expectedImportedServsCount uint64 expectedImportedServsCount int
expectedExportedServsCount uint64 expectedExportedServsCount int
} }
testCases := []testCase{ testCases := []testCase{
@ -833,7 +1031,7 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) {
Name: structs.WildcardSpecifier, Name: structs.WildcardSpecifier,
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "my-peer-s2", Peer: "my-peer-s2",
}, },
}, },
}, },
@ -861,7 +1059,7 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) {
Name: "a-service", Name: "a-service",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "my-peer-s2", Peer: "my-peer-s2",
}, },
}, },
}, },
@ -869,7 +1067,7 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) {
Name: "b-service", Name: "b-service",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "my-peer-s2", Peer: "my-peer-s2",
}, },
}, },
}, },
@ -888,7 +1086,7 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) {
Name: "a-service", Name: "a-service",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "my-peer-s2", Peer: "my-peer-s2",
}, },
}, },
}, },
@ -907,7 +1105,7 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) {
Name: "a-service", Name: "a-service",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "my-peer-s2", Peer: "my-peer-s2",
}, },
}, },
}, },
@ -915,7 +1113,7 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) {
Name: "c-service", Name: "c-service",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "my-peer-s2", Peer: "my-peer-s2",
}, },
}, },
}, },
@ -946,13 +1144,13 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) {
resp, err := peeringClient2.PeeringRead(context.Background(), &pbpeering.PeeringReadRequest{Name: "my-peer-s1"}) resp, err := peeringClient2.PeeringRead(context.Background(), &pbpeering.PeeringReadRequest{Name: "my-peer-s1"})
require.NoError(r, err) require.NoError(r, err)
require.NotNil(r, resp.Peering) require.NotNil(r, resp.Peering)
require.Equal(r, tc.expectedImportedServsCount, resp.Peering.ImportedServiceCount) require.Equal(r, tc.expectedImportedServsCount, len(resp.Peering.StreamStatus.ImportedServices))
// on List // on List
resp2, err2 := peeringClient2.PeeringList(context.Background(), &pbpeering.PeeringListRequest{}) resp2, err2 := peeringClient2.PeeringList(context.Background(), &pbpeering.PeeringListRequest{})
require.NoError(r, err2) require.NoError(r, err2)
require.NotEmpty(r, resp2.Peerings) require.NotEmpty(r, resp2.Peerings)
require.Equal(r, tc.expectedExportedServsCount, resp2.Peerings[0].ImportedServiceCount) require.Equal(r, tc.expectedExportedServsCount, len(resp2.Peerings[0].StreamStatus.ImportedServices))
}) })
// Check that exported services count on S1 are what we expect // Check that exported services count on S1 are what we expect
@ -961,13 +1159,13 @@ func TestLeader_Peering_ImportedExportedServicesCount(t *testing.T) {
resp, err := peeringClient.PeeringRead(context.Background(), &pbpeering.PeeringReadRequest{Name: "my-peer-s2"}) resp, err := peeringClient.PeeringRead(context.Background(), &pbpeering.PeeringReadRequest{Name: "my-peer-s2"})
require.NoError(r, err) require.NoError(r, err)
require.NotNil(r, resp.Peering) require.NotNil(r, resp.Peering)
require.Equal(r, tc.expectedImportedServsCount, resp.Peering.ExportedServiceCount) require.Equal(r, tc.expectedImportedServsCount, len(resp.Peering.StreamStatus.ExportedServices))
// on List // on List
resp2, err2 := peeringClient.PeeringList(context.Background(), &pbpeering.PeeringListRequest{}) resp2, err2 := peeringClient.PeeringList(context.Background(), &pbpeering.PeeringListRequest{})
require.NoError(r, err2) require.NoError(r, err2)
require.NotEmpty(r, resp2.Peerings) require.NotEmpty(r, resp2.Peerings)
require.Equal(r, tc.expectedExportedServsCount, resp2.Peerings[0].ExportedServiceCount) require.Equal(r, tc.expectedExportedServsCount, len(resp2.Peerings[0].StreamStatus.ExportedServices))
}) })
}) })
} }
@ -987,11 +1185,19 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
lastIdx = uint64(0) lastIdx = uint64(0)
) )
// TODO(peering): Configure with TLS ca := connect.TestCA(t, nil)
_, s1 := testServerWithConfig(t, func(c *Config) { _, s1 := testServerWithConfig(t, func(c *Config) {
c.NodeName = "s1.dc1" c.NodeName = "s1.dc1"
c.Datacenter = "dc1" c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul" c.GRPCTLSPort = freeport.GetOne(t)
c.CAConfig = &structs.CAConfiguration{
ClusterID: connect.TestClusterID,
Provider: structs.ConsulCAProvider,
Config: map[string]interface{}{
"PrivateKey": ca.SigningKey,
"RootCert": ca.RootCert,
},
}
}) })
testrpc.WaitForLeader(t, s1.RPC, "dc1") testrpc.WaitForLeader(t, s1.RPC, "dc1")
@ -1061,17 +1267,21 @@ func TestLeader_PeeringMetrics_emitPeeringMetrics(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
// mimic tracking exported services // mimic tracking exported services
mst1.TrackExportedService(structs.ServiceName{Name: "a-service"}) mst1.SetExportedServices([]structs.ServiceName{
mst1.TrackExportedService(structs.ServiceName{Name: "b-service"}) {Name: "a-service"},
mst1.TrackExportedService(structs.ServiceName{Name: "c-service"}) {Name: "b-service"},
{Name: "c-service"},
})
// connect the stream // connect the stream
mst2, err := s2.peeringServer.Tracker.Connected(s2PeerID2) mst2, err := s2.peeringServer.Tracker.Connected(s2PeerID2)
require.NoError(t, err) require.NoError(t, err)
// mimic tracking exported services // mimic tracking exported services
mst2.TrackExportedService(structs.ServiceName{Name: "d-service"}) mst2.SetExportedServices([]structs.ServiceName{
mst2.TrackExportedService(structs.ServiceName{Name: "e-service"}) {Name: "d-service"},
{Name: "e-service"},
})
// pretend that the hearbeat happened // pretend that the hearbeat happened
mst2.TrackRecvHeartbeat() mst2.TrackRecvHeartbeat()
@ -1394,10 +1604,20 @@ func Test_Leader_PeeringSync_ServerAddressUpdates(t *testing.T) {
maxRetryBackoff = 1 maxRetryBackoff = 1
t.Cleanup(func() { maxRetryBackoff = orig }) t.Cleanup(func() { maxRetryBackoff = orig })
ca := connect.TestCA(t, nil)
_, acceptor := testServerWithConfig(t, func(c *Config) { _, acceptor := testServerWithConfig(t, func(c *Config) {
c.NodeName = "acceptor" c.NodeName = "acceptor"
c.Datacenter = "dc1" c.Datacenter = "dc1"
c.TLSConfig.Domain = "consul" c.TLSConfig.Domain = "consul"
c.GRPCTLSPort = freeport.GetOne(t)
c.CAConfig = &structs.CAConfiguration{
ClusterID: connect.TestClusterID,
Provider: structs.ConsulCAProvider,
Config: map[string]interface{}{
"PrivateKey": ca.SigningKey,
"RootCert": ca.RootCert,
},
}
}) })
testrpc.WaitForLeader(t, acceptor.RPC, "dc1") testrpc.WaitForLeader(t, acceptor.RPC, "dc1")

View File

@ -2,6 +2,7 @@ package consul
import ( import (
"bufio" "bufio"
"encoding/json"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -1457,7 +1458,7 @@ func TestLeader_ConfigEntryBootstrap_Fail(t *testing.T) {
}, },
}, },
}, },
expectMessage: `Failed to apply configuration entry "service-splitter" / "web": discovery chain "web" uses a protocol "tcp" that does not permit advanced routing or splitting behavior"`, expectMessage: `Failed to apply configuration entry "service-splitter" / "web": discovery chain "web" uses a protocol "tcp" that does not permit advanced routing or splitting behavior`,
}, },
{ {
name: "service-intentions without migration", name: "service-intentions without migration",
@ -1497,7 +1498,7 @@ func TestLeader_ConfigEntryBootstrap_Fail(t *testing.T) {
serverCB: func(c *Config) { serverCB: func(c *Config) {
c.ConnectEnabled = false c.ConnectEnabled = false
}, },
expectMessage: `Refusing to apply configuration entry "service-intentions" / "web" because Connect must be enabled to bootstrap intentions"`, expectMessage: `Refusing to apply configuration entry "service-intentions" / "web" because Connect must be enabled to bootstrap intentions`,
}, },
} }
@ -1516,9 +1517,11 @@ func TestLeader_ConfigEntryBootstrap_Fail(t *testing.T) {
scan := bufio.NewScanner(pr) scan := bufio.NewScanner(pr)
for scan.Scan() { for scan.Scan() {
line := scan.Text() line := scan.Text()
lineJson := map[string]interface{}{}
json.Unmarshal([]byte(line), &lineJson)
if strings.Contains(line, "failed to establish leadership") { if strings.Contains(line, "failed to establish leadership") {
applyErrorLine = line applyErrorLine = lineJson["error"].(string)
ch <- "" ch <- ""
return return
} }
@ -1543,9 +1546,10 @@ func TestLeader_ConfigEntryBootstrap_Fail(t *testing.T) {
} }
logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{ logger := hclog.NewInterceptLogger(&hclog.LoggerOptions{
Name: config.NodeName, Name: config.NodeName,
Level: testutil.TestLogLevel, Level: testutil.TestLogLevel,
Output: io.MultiWriter(pw, testutil.NewLogBuffer(t)), Output: io.MultiWriter(pw, testutil.NewLogBuffer(t)),
JSONFormat: true,
}) })
deps := newDefaultDeps(t, config) deps := newDefaultDeps(t, config)

View File

@ -1,13 +1,14 @@
package consul package consul
import ( import (
"github.com/hashicorp/go-hclog"
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/hashicorp/consul-net-rpc/net/rpc"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/grpc-external/limiter" "github.com/hashicorp/consul/agent/grpc-external/limiter"
"github.com/hashicorp/consul/agent/hcp"
"github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/router" "github.com/hashicorp/consul/agent/router"
"github.com/hashicorp/consul/agent/rpc/middleware" "github.com/hashicorp/consul/agent/rpc/middleware"
@ -31,6 +32,10 @@ type Deps struct {
GetNetRPCInterceptorFunc func(recorder *middleware.RequestRecorder) rpc.ServerServiceCallInterceptor GetNetRPCInterceptorFunc func(recorder *middleware.RequestRecorder) rpc.ServerServiceCallInterceptor
// NewRequestRecorderFunc provides a middleware.RequestRecorder for the server to use; it cannot be nil // NewRequestRecorderFunc provides a middleware.RequestRecorder for the server to use; it cannot be nil
NewRequestRecorderFunc func(logger hclog.Logger, isLeader func() bool, localDC string) *middleware.RequestRecorder NewRequestRecorderFunc func(logger hclog.Logger, isLeader func() bool, localDC string) *middleware.RequestRecorder
// HCP contains the dependencies required when integrating with the HashiCorp Cloud Platform
HCP hcp.Deps
EnterpriseDeps EnterpriseDeps
} }

View File

@ -9,10 +9,14 @@ import (
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/acl/resolver" "github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/grpc-external/services/peerstream" "github.com/hashicorp/consul/agent/grpc-external/services/peerstream"
"github.com/hashicorp/consul/agent/rpc/peering" "github.com/hashicorp/consul/agent/rpc/peering"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/ipaddr"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/proto/pbpeering"
) )
@ -51,15 +55,68 @@ func (b *PeeringBackend) GetLeaderAddress() string {
return b.leaderAddr return b.leaderAddr
} }
// GetAgentCACertificates gets the server's raw CA data from its TLS Configurator. // GetTLSMaterials returns the TLS materials for the dialer to dial the acceptor using TLS.
func (b *PeeringBackend) GetAgentCACertificates() ([]string, error) { // It returns the server name to validate, and the CA certificate to validate with.
// TODO(peering): handle empty CA pems func (b *PeeringBackend) GetTLSMaterials(generatingToken bool) (string, []string, error) {
return b.srv.tlsConfigurator.GRPCManualCAPems(), nil if generatingToken {
if !b.srv.config.ConnectEnabled {
return "", nil, fmt.Errorf("connect.enabled must be set to true in the server's configuration when generating peering tokens")
}
if b.srv.config.GRPCTLSPort <= 0 && !b.srv.tlsConfigurator.GRPCServerUseTLS() {
return "", nil, fmt.Errorf("TLS for gRPC must be enabled when generating peering tokens")
}
}
roots, err := b.srv.getCARoots(nil, b.srv.fsm.State())
if err != nil {
return "", nil, fmt.Errorf("failed to fetch roots: %w", err)
}
if len(roots.Roots) == 0 || roots.TrustDomain == "" {
return "", nil, fmt.Errorf("CA has not finished initializing")
}
serverName := connect.PeeringServerSAN(b.srv.config.Datacenter, roots.TrustDomain)
var caPems []string
for _, r := range roots.Roots {
caPems = append(caPems, lib.EnsureTrailingNewline(r.RootCert))
}
return serverName, caPems, nil
} }
// GetServerAddresses looks up server node addresses from the state store. // GetServerAddresses looks up server or mesh gateway addresses from the state store.
func (b *PeeringBackend) GetServerAddresses() ([]string, error) { func (b *PeeringBackend) GetServerAddresses() ([]string, error) {
state := b.srv.fsm.State() _, rawEntry, err := b.srv.fsm.State().ConfigEntry(nil, structs.MeshConfig, structs.MeshConfigMesh, acl.DefaultEnterpriseMeta())
if err != nil {
return nil, fmt.Errorf("failed to read mesh config entry: %w", err)
}
meshConfig, ok := rawEntry.(*structs.MeshConfigEntry)
if ok && meshConfig.Peering != nil && meshConfig.Peering.PeerThroughMeshGateways {
return meshGatewayAdresses(b.srv.fsm.State())
}
return serverAddresses(b.srv.fsm.State())
}
func meshGatewayAdresses(state *state.Store) ([]string, error) {
_, nodes, err := state.ServiceDump(nil, structs.ServiceKindMeshGateway, true, acl.DefaultEnterpriseMeta(), structs.DefaultPeerKeyword)
if err != nil {
return nil, fmt.Errorf("failed to dump gateway addresses: %w", err)
}
var addrs []string
for _, node := range nodes {
_, addr, port := node.BestAddress(true)
addrs = append(addrs, ipaddr.FormatAddressPort(addr, port))
}
if len(addrs) == 0 {
return nil, fmt.Errorf("servers are configured to PeerThroughMeshGateways, but no mesh gateway instances are registered")
}
return addrs, nil
}
func serverAddresses(state *state.Store) ([]string, error) {
_, nodes, err := state.ServiceNodes(nil, "consul", structs.DefaultEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword) _, nodes, err := state.ServiceNodes(nil, "consul", structs.DefaultEnterpriseMetaInDefaultPartition(), structs.DefaultPeerKeyword)
if err != nil { if err != nil {
return nil, err return nil, err
@ -86,12 +143,6 @@ func (b *PeeringBackend) GetServerAddresses() ([]string, error) {
return addrs, nil return addrs, nil
} }
// GetServerName returns the SNI to be returned in the peering token data which
// will be used by peers when establishing peering connections over TLS.
func (b *PeeringBackend) GetServerName() string {
return b.srv.tlsConfigurator.ServerSNI(b.srv.config.Datacenter, "")
}
// EncodeToken encodes a peering token as a bas64-encoded representation of JSON (for now). // EncodeToken encodes a peering token as a bas64-encoded representation of JSON (for now).
func (b *PeeringBackend) EncodeToken(tok *structs.PeeringToken) ([]byte, error) { func (b *PeeringBackend) EncodeToken(tok *structs.PeeringToken) ([]byte, error) {
jsonToken, err := json.Marshal(tok) jsonToken, err := json.Marshal(tok)

View File

@ -11,7 +11,10 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
gogrpc "google.golang.org/grpc" gogrpc "google.golang.org/grpc"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/sdk/freeport"
"github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/testrpc"
) )
@ -21,9 +24,18 @@ func TestPeeringBackend_RejectsPartition(t *testing.T) {
} }
t.Parallel() t.Parallel()
ca := connect.TestCA(t, nil)
_, s1 := testServerWithConfig(t, func(c *Config) { _, s1 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1" c.GRPCTLSPort = freeport.GetOne(t)
c.Bootstrap = true c.CAConfig = &structs.CAConfiguration{
ClusterID: connect.TestClusterID,
Provider: structs.ConsulCAProvider,
Config: map[string]interface{}{
"PrivateKey": ca.SigningKey,
"RootCert": ca.RootCert,
},
}
}) })
testrpc.WaitForLeader(t, s1.RPC, "dc1") testrpc.WaitForLeader(t, s1.RPC, "dc1")
@ -55,9 +67,17 @@ func TestPeeringBackend_IgnoresDefaultPartition(t *testing.T) {
} }
t.Parallel() t.Parallel()
ca := connect.TestCA(t, nil)
_, s1 := testServerWithConfig(t, func(c *Config) { _, s1 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc1" c.GRPCTLSPort = freeport.GetOne(t)
c.Bootstrap = true c.CAConfig = &structs.CAConfiguration{
ClusterID: connect.TestClusterID,
Provider: structs.ConsulCAProvider,
Config: map[string]interface{}{
"PrivateKey": ca.SigningKey,
"RootCert": ca.RootCert,
},
}
}) })
testrpc.WaitForLeader(t, s1.RPC, "dc1") testrpc.WaitForLeader(t, s1.RPC, "dc1")

View File

@ -2,34 +2,50 @@ package consul
import ( import (
"context" "context"
"fmt"
"net" "net"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/require"
gogrpc "google.golang.org/grpc" gogrpc "google.golang.org/grpc"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/pool" "github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/proto/pbpeerstream" "github.com/hashicorp/consul/proto/pbpeerstream"
"github.com/hashicorp/consul/sdk/freeport"
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/types"
"github.com/stretchr/testify/require"
) )
func TestPeeringBackend_ForwardToLeader(t *testing.T) { func TestPeeringBackend_ForwardToLeader(t *testing.T) {
t.Parallel() if testing.Short() {
t.Skip("too slow for testing.Short")
}
_, conf1 := testServerConfig(t) ca := connect.TestCA(t, nil)
server1, err := newServer(t, conf1) _, server1 := testServerWithConfig(t, func(c *Config) {
require.NoError(t, err) c.GRPCTLSPort = freeport.GetOne(t)
c.CAConfig = &structs.CAConfiguration{
_, conf2 := testServerConfig(t) ClusterID: connect.TestClusterID,
conf2.Bootstrap = false Provider: structs.ConsulCAProvider,
server2, err := newServer(t, conf2) Config: map[string]interface{}{
require.NoError(t, err) "PrivateKey": ca.SigningKey,
"RootCert": ca.RootCert,
},
}
})
_, server2 := testServerWithConfig(t, func(c *Config) {
c.Bootstrap = false
})
// Join a 2nd server (not the leader) // Join a 2nd server (not the leader)
testrpc.WaitForLeader(t, server1.RPC, "dc1") testrpc.WaitForLeader(t, server1.RPC, "dc1")
testrpc.WaitForActiveCARoot(t, server1.RPC, "dc1", nil)
joinLAN(t, server2, server1) joinLAN(t, server2, server1)
testrpc.WaitForLeader(t, server2.RPC, "dc1") testrpc.WaitForLeader(t, server2.RPC, "dc1")
@ -60,6 +76,83 @@ func TestPeeringBackend_ForwardToLeader(t *testing.T) {
}) })
} }
func TestPeeringBackend_GetServerAddresses(t *testing.T) {
if testing.Short() {
t.Skip("too slow for testing.Short")
}
_, cfg := testServerConfig(t)
cfg.GRPCTLSPort = freeport.GetOne(t)
srv, err := newServer(t, cfg)
require.NoError(t, err)
testrpc.WaitForLeader(t, srv.RPC, "dc1")
backend := NewPeeringBackend(srv)
testutil.RunStep(t, "peer to servers", func(t *testing.T) {
addrs, err := backend.GetServerAddresses()
require.NoError(t, err)
expect := fmt.Sprintf("127.0.0.1:%d", srv.config.GRPCTLSPort)
require.Equal(t, []string{expect}, addrs)
})
testutil.RunStep(t, "existence of mesh config entry is not enough to peer through gateways", func(t *testing.T) {
mesh := structs.MeshConfigEntry{
// Enable unrelated config.
TransparentProxy: structs.TransparentProxyMeshConfig{
MeshDestinationsOnly: true,
},
}
require.NoError(t, srv.fsm.State().EnsureConfigEntry(1, &mesh))
addrs, err := backend.GetServerAddresses()
require.NoError(t, err)
// Still expect server address because PeerThroughMeshGateways was not enabled.
expect := fmt.Sprintf("127.0.0.1:%d", srv.config.GRPCTLSPort)
require.Equal(t, []string{expect}, addrs)
})
testutil.RunStep(t, "cannot peer through gateways without registered gateways", func(t *testing.T) {
mesh := structs.MeshConfigEntry{
Peering: &structs.PeeringMeshConfig{PeerThroughMeshGateways: true},
}
require.NoError(t, srv.fsm.State().EnsureConfigEntry(1, &mesh))
addrs, err := backend.GetServerAddresses()
require.Nil(t, addrs)
testutil.RequireErrorContains(t, err,
"servers are configured to PeerThroughMeshGateways, but no mesh gateway instances are registered")
})
testutil.RunStep(t, "peer through mesh gateways", func(t *testing.T) {
reg := structs.RegisterRequest{
ID: types.NodeID("b5489ca9-f5e9-4dba-a779-61fec4e8e364"),
Node: "gw-node",
Address: "1.2.3.4",
TaggedAddresses: map[string]string{
structs.TaggedAddressWAN: "172.217.22.14",
},
Service: &structs.NodeService{
ID: "mesh-gateway",
Service: "mesh-gateway",
Kind: structs.ServiceKindMeshGateway,
Port: 443,
TaggedAddresses: map[string]structs.ServiceAddress{
structs.TaggedAddressWAN: {Address: "154.238.12.252", Port: 8443},
},
},
}
require.NoError(t, srv.fsm.State().EnsureRegistration(2, &reg))
addrs, err := backend.GetServerAddresses()
require.NoError(t, err)
require.Equal(t, []string{"154.238.12.252:8443"}, addrs)
})
}
func newServerDialer(serverAddr string) func(context.Context, string) (net.Conn, error) { func newServerDialer(serverAddr string) func(context.Context, string) (net.Conn, error) {
return func(ctx context.Context, addr string) (net.Conn, error) { return func(ctx context.Context, addr string) (net.Conn, error) {
d := net.Dialer{} d := net.Dialer{}
@ -79,19 +172,30 @@ func newServerDialer(serverAddr string) func(context.Context, string) (net.Conn,
} }
func TestPeerStreamService_ForwardToLeader(t *testing.T) { func TestPeerStreamService_ForwardToLeader(t *testing.T) {
t.Parallel() if testing.Short() {
t.Skip("too slow for testing.Short")
}
_, conf1 := testServerConfig(t) ca := connect.TestCA(t, nil)
server1, err := newServer(t, conf1) _, server1 := testServerWithConfig(t, func(c *Config) {
require.NoError(t, err) c.GRPCTLSPort = freeport.GetOne(t)
c.CAConfig = &structs.CAConfiguration{
_, conf2 := testServerConfig(t) ClusterID: connect.TestClusterID,
conf2.Bootstrap = false Provider: structs.ConsulCAProvider,
server2, err := newServer(t, conf2) Config: map[string]interface{}{
require.NoError(t, err) "PrivateKey": ca.SigningKey,
"RootCert": ca.RootCert,
},
}
})
_, server2 := testServerWithConfig(t, func(c *Config) {
c.Bootstrap = false
})
// server1 is leader, server2 follower // server1 is leader, server2 follower
testrpc.WaitForLeader(t, server1.RPC, "dc1") testrpc.WaitForLeader(t, server1.RPC, "dc1")
testrpc.WaitForActiveCARoot(t, server1.RPC, "dc1", nil)
joinLAN(t, server2, server1) joinLAN(t, server2, server1)
testrpc.WaitForLeader(t, server2.RPC, "dc1") testrpc.WaitForLeader(t, server2.RPC, "dc1")

View File

@ -42,7 +42,7 @@ func TestWalk_ServiceQuery(t *testing.T) {
".Tags[0]:tag1", ".Tags[0]:tag1",
".Tags[1]:tag2", ".Tags[1]:tag2",
".Tags[2]:tag3", ".Tags[2]:tag3",
".PeerName:", ".Peer:",
} }
expected = append(expected, entMetaWalkFields...) expected = append(expected, entMetaWalkFields...)
sort.Strings(expected) sort.Strings(expected)

View File

@ -540,7 +540,7 @@ func (p *PreparedQuery) execute(query *structs.PreparedQuery,
f = state.CheckConnectServiceNodes f = state.CheckConnectServiceNodes
} }
_, nodes, err := f(nil, query.Service.Service, &query.Service.EnterpriseMeta, query.Service.PeerName) _, nodes, err := f(nil, query.Service.Service, &query.Service.EnterpriseMeta, query.Service.Peer)
if err != nil { if err != nil {
return err return err
} }
@ -571,7 +571,7 @@ func (p *PreparedQuery) execute(query *structs.PreparedQuery,
reply.DNS = query.DNS reply.DNS = query.DNS
// Stamp the result with its this datacenter or peer. // Stamp the result with its this datacenter or peer.
if peerName := query.Service.PeerName; peerName != "" { if peerName := query.Service.Peer; peerName != "" {
reply.PeerName = peerName reply.PeerName = peerName
reply.Datacenter = "" reply.Datacenter = ""
} else { } else {
@ -756,7 +756,7 @@ func queryFailover(q queryServer, query *structs.PreparedQuery,
} }
} }
if target.PeerName != "" { if target.Peer != "" {
targets = append(targets, target) targets = append(targets, target)
} }
} }
@ -777,9 +777,9 @@ func queryFailover(q queryServer, query *structs.PreparedQuery,
// Reset PeerName because it may have been set by a previous failover // Reset PeerName because it may have been set by a previous failover
// target. // target.
query.Service.PeerName = target.PeerName query.Service.Peer = target.Peer
dc := target.Datacenter dc := target.Datacenter
if target.PeerName != "" { if target.Peer != "" {
dc = q.GetLocalDC() dc = q.GetLocalDC()
} }
@ -798,7 +798,7 @@ func queryFailover(q queryServer, query *structs.PreparedQuery,
if err = q.ExecuteRemote(remote, reply); err != nil { if err = q.ExecuteRemote(remote, reply); err != nil {
q.GetLogger().Warn("Failed querying for service in datacenter", q.GetLogger().Warn("Failed querying for service in datacenter",
"service", query.Service.Service, "service", query.Service.Service,
"peerName", query.Service.PeerName, "peerName", query.Service.Peer,
"datacenter", dc, "datacenter", dc,
"error", err, "error", err,
) )

View File

@ -21,12 +21,14 @@ import (
"github.com/hashicorp/consul-net-rpc/net/rpc" "github.com/hashicorp/consul-net-rpc/net/rpc"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/connect"
grpcexternal "github.com/hashicorp/consul/agent/grpc-external" grpcexternal "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/structs/aclfilter" "github.com/hashicorp/consul/agent/structs/aclfilter"
tokenStore "github.com/hashicorp/consul/agent/token" tokenStore "github.com/hashicorp/consul/agent/token"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto/pbpeering" "github.com/hashicorp/consul/proto/pbpeering"
"github.com/hashicorp/consul/sdk/freeport"
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/testrpc"
"github.com/hashicorp/consul/types" "github.com/hashicorp/consul/types"
@ -88,7 +90,7 @@ func TestPreparedQuery_Apply(t *testing.T) {
// Fix that and ensure Targets and NearestN cannot be set at the same time. // Fix that and ensure Targets and NearestN cannot be set at the same time.
query.Query.Service.Failover.NearestN = 1 query.Query.Service.Failover.NearestN = 1
query.Query.Service.Failover.Targets = []structs.QueryFailoverTarget{{PeerName: "peer"}} query.Query.Service.Failover.Targets = []structs.QueryFailoverTarget{{Peer: "peer"}}
err = msgpackrpc.CallWithCodec(codec, "PreparedQuery.Apply", &query, &reply) err = msgpackrpc.CallWithCodec(codec, "PreparedQuery.Apply", &query, &reply)
if err == nil || !strings.Contains(err.Error(), "Targets cannot be populated with") { if err == nil || !strings.Contains(err.Error(), "Targets cannot be populated with") {
t.Fatalf("bad: %v", err) t.Fatalf("bad: %v", err)
@ -97,7 +99,7 @@ func TestPreparedQuery_Apply(t *testing.T) {
// Fix that and ensure Targets and Datacenters cannot be set at the same time. // Fix that and ensure Targets and Datacenters cannot be set at the same time.
query.Query.Service.Failover.NearestN = 0 query.Query.Service.Failover.NearestN = 0
query.Query.Service.Failover.Datacenters = []string{"dc2"} query.Query.Service.Failover.Datacenters = []string{"dc2"}
query.Query.Service.Failover.Targets = []structs.QueryFailoverTarget{{PeerName: "peer"}} query.Query.Service.Failover.Targets = []structs.QueryFailoverTarget{{Peer: "peer"}}
err = msgpackrpc.CallWithCodec(codec, "PreparedQuery.Apply", &query, &reply) err = msgpackrpc.CallWithCodec(codec, "PreparedQuery.Apply", &query, &reply)
if err == nil || !strings.Contains(err.Error(), "Targets cannot be populated with") { if err == nil || !strings.Contains(err.Error(), "Targets cannot be populated with") {
t.Fatalf("bad: %v", err) t.Fatalf("bad: %v", err)
@ -1463,10 +1465,20 @@ func TestPreparedQuery_Execute(t *testing.T) {
s2.tokens.UpdateReplicationToken("root", tokenStore.TokenSourceConfig) s2.tokens.UpdateReplicationToken("root", tokenStore.TokenSourceConfig)
ca := connect.TestCA(t, nil)
dir3, s3 := testServerWithConfig(t, func(c *Config) { dir3, s3 := testServerWithConfig(t, func(c *Config) {
c.Datacenter = "dc3" c.Datacenter = "dc3"
c.PrimaryDatacenter = "dc3" c.PrimaryDatacenter = "dc3"
c.NodeName = "acceptingServer.dc3" c.NodeName = "acceptingServer.dc3"
c.GRPCTLSPort = freeport.GetOne(t)
c.CAConfig = &structs.CAConfiguration{
ClusterID: connect.TestClusterID,
Provider: structs.ConsulCAProvider,
Config: map[string]interface{}{
"PrivateKey": ca.SigningKey,
"RootCert": ca.RootCert,
},
}
}) })
defer os.RemoveAll(dir3) defer os.RemoveAll(dir3)
defer s3.Shutdown() defer s3.Shutdown()
@ -1493,13 +1505,15 @@ func TestPreparedQuery_Execute(t *testing.T) {
acceptingPeerName := "my-peer-accepting-server" acceptingPeerName := "my-peer-accepting-server"
dialingPeerName := "my-peer-dialing-server" dialingPeerName := "my-peer-dialing-server"
// Set up peering between dc1 (dailing) and dc3 (accepting) and export the foo service // Set up peering between dc1 (dialing) and dc3 (accepting) and export the foo service
{ {
// Create a peering by generating a token. // Create a peering by generating a token.
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
t.Cleanup(cancel) t.Cleanup(cancel)
ctx = grpcexternal.ContextWithToken(ctx, "root") options := structs.QueryOptions{Token: "root"}
ctx, err := grpcexternal.ContextWithQueryOptions(ctx, options)
require.NoError(t, err)
conn, err := grpc.DialContext(ctx, s3.config.RPCAddr.String(), conn, err := grpc.DialContext(ctx, s3.config.RPCAddr.String(),
grpc.WithContextDialer(newServerDialer(s3.config.RPCAddr.String())), grpc.WithContextDialer(newServerDialer(s3.config.RPCAddr.String())),
@ -1550,7 +1564,7 @@ func TestPreparedQuery_Execute(t *testing.T) {
Services: []structs.ExportedService{ Services: []structs.ExportedService{
{ {
Name: "foo", Name: "foo",
Consumers: []structs.ServiceConsumer{{PeerName: dialingPeerName}}, Consumers: []structs.ServiceConsumer{{Peer: dialingPeerName}},
}, },
}, },
}, },
@ -2427,7 +2441,7 @@ func TestPreparedQuery_Execute(t *testing.T) {
query.Query.Service.Failover = structs.QueryFailoverOptions{ query.Query.Service.Failover = structs.QueryFailoverOptions{
Targets: []structs.QueryFailoverTarget{ Targets: []structs.QueryFailoverTarget{
{Datacenter: "dc2"}, {Datacenter: "dc2"},
{PeerName: acceptingPeerName}, {Peer: acceptingPeerName},
}, },
} }
require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID))
@ -2948,7 +2962,7 @@ func (m *mockQueryServer) GetOtherDatacentersByDistance() ([]string, error) {
} }
func (m *mockQueryServer) ExecuteRemote(args *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { func (m *mockQueryServer) ExecuteRemote(args *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error {
peerName := args.Query.Service.PeerName peerName := args.Query.Service.Peer
dc := args.Datacenter dc := args.Datacenter
if peerName != "" { if peerName != "" {
m.QueryLog = append(m.QueryLog, fmt.Sprintf("peer:%s", peerName)) m.QueryLog = append(m.QueryLog, fmt.Sprintf("peer:%s", peerName))
@ -3300,15 +3314,15 @@ func TestPreparedQuery_queryFailover(t *testing.T) {
// Failover returns data from the first cluster peer with data. // Failover returns data from the first cluster peer with data.
query.Service.Failover.Datacenters = nil query.Service.Failover.Datacenters = nil
query.Service.Failover.Targets = []structs.QueryFailoverTarget{ query.Service.Failover.Targets = []structs.QueryFailoverTarget{
{PeerName: "cluster-01"}, {Peer: "cluster-01"},
{Datacenter: "dc44"}, {Datacenter: "dc44"},
{PeerName: "cluster-02"}, {Peer: "cluster-02"},
} }
{ {
mock := &mockQueryServer{ mock := &mockQueryServer{
Datacenters: []string{"dc44"}, Datacenters: []string{"dc44"},
QueryFn: func(args *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error { QueryFn: func(args *structs.PreparedQueryExecuteRemoteRequest, reply *structs.PreparedQueryExecuteResponse) error {
if args.Query.Service.PeerName == "cluster-02" { if args.Query.Service.Peer == "cluster-02" {
reply.Nodes = nodes() reply.Nodes = nodes()
} }
return nil return nil

View File

@ -2,6 +2,7 @@ package consul
import ( import (
"context" "context"
"crypto/x509"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -17,6 +18,7 @@ import (
"time" "time"
"github.com/armon/go-metrics" "github.com/armon/go-metrics"
"github.com/hashicorp/consul/agent/hcp"
connlimit "github.com/hashicorp/go-connlimit" connlimit "github.com/hashicorp/go-connlimit"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb" "github.com/hashicorp/go-memdb"
@ -60,6 +62,7 @@ import (
"github.com/hashicorp/consul/proto/pbsubscribe" "github.com/hashicorp/consul/proto/pbsubscribe"
"github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/tlsutil"
"github.com/hashicorp/consul/types" "github.com/hashicorp/consul/types"
cslversion "github.com/hashicorp/consul/version"
) )
// NOTE The "consul.client.rpc" and "consul.client.rpc.exceeded" counters are defined in consul/client.go // NOTE The "consul.client.rpc" and "consul.client.rpc.exceeded" counters are defined in consul/client.go
@ -379,6 +382,9 @@ type Server struct {
// server is able to handle. // server is able to handle.
xdsCapacityController *xdscapacity.Controller xdsCapacityController *xdscapacity.Controller
// hcpManager handles pushing server status updates to the HashiCorp Cloud Platform when enabled
hcpManager *hcp.Manager
// embedded struct to hold all the enterprise specific data // embedded struct to hold all the enterprise specific data
EnterpriseServer EnterpriseServer
} }
@ -448,6 +454,12 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
publisher: flat.EventPublisher, publisher: flat.EventPublisher,
} }
s.hcpManager = hcp.NewManager(hcp.ManagerConfig{
Client: flat.HCP.Client,
StatusFn: s.hcpServerStatus(flat),
Logger: logger.Named("hcp_manager"),
})
var recorder *middleware.RequestRecorder var recorder *middleware.RequestRecorder
if flat.NewRequestRecorderFunc != nil { if flat.NewRequestRecorderFunc != nil {
recorder = flat.NewRequestRecorderFunc(serverLogger, s.IsLeader, s.config.Datacenter) recorder = flat.NewRequestRecorderFunc(serverLogger, s.IsLeader, s.config.Datacenter)
@ -789,6 +801,9 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server) (*Ser
// Start the metrics handlers. // Start the metrics handlers.
go s.updateMetrics() go s.updateMetrics()
// Now we are setup, configure the HCP manager
go s.hcpManager.Run(&lib.StopChannelContext{StopCh: shutdownCh})
return s, nil return s, nil
} }
@ -1712,6 +1727,9 @@ func (s *Server) trackLeaderChanges() {
s.grpcLeaderForwarder.UpdateLeaderAddr(s.config.Datacenter, string(leaderObs.LeaderAddr)) s.grpcLeaderForwarder.UpdateLeaderAddr(s.config.Datacenter, string(leaderObs.LeaderAddr))
s.peeringBackend.SetLeaderAddress(string(leaderObs.LeaderAddr)) s.peeringBackend.SetLeaderAddress(string(leaderObs.LeaderAddr))
// Trigger sending an update to HCP status
s.hcpManager.SendUpdate()
case <-s.shutdownCh: case <-s.shutdownCh:
s.raft.DeregisterObserver(observer) s.raft.DeregisterObserver(observer)
return return
@ -1719,6 +1737,61 @@ func (s *Server) trackLeaderChanges() {
} }
} }
// hcpServerStatus is the callback used by the HCP manager to emit status updates to the HashiCorp Cloud Platform when
// enabled.
func (s *Server) hcpServerStatus(deps Deps) hcp.StatusCallback {
return func(ctx context.Context) (status hcp.ServerStatus, err error) {
status.Name = s.config.NodeName
status.ID = string(s.config.NodeID)
status.Version = cslversion.GetHumanVersion()
status.LanAddress = s.config.RPCAdvertise.IP.String()
status.GossipPort = s.config.SerfLANConfig.MemberlistConfig.AdvertisePort
status.RPCPort = s.config.RPCAddr.Port
tlsCert := s.tlsConfigurator.Cert()
if tlsCert != nil {
status.TLS.Enabled = true
leaf := tlsCert.Leaf
if leaf == nil {
// Parse the leaf cert
leaf, err = x509.ParseCertificate(tlsCert.Certificate[0])
if err != nil {
// Shouldn't be possible
return
}
}
status.TLS.CertName = leaf.Subject.CommonName
status.TLS.CertSerial = leaf.SerialNumber.String()
status.TLS.CertExpiry = leaf.NotAfter
status.TLS.VerifyIncoming = s.tlsConfigurator.VerifyIncomingRPC()
status.TLS.VerifyOutgoing = s.tlsConfigurator.Base().InternalRPC.VerifyOutgoing
status.TLS.VerifyServerHostname = s.tlsConfigurator.VerifyServerHostname()
}
status.Raft.IsLeader = s.raft.State() == raft.Leader
_, leaderID := s.raft.LeaderWithID()
status.Raft.KnownLeader = leaderID != ""
status.Raft.AppliedIndex = s.raft.AppliedIndex()
if !status.Raft.IsLeader {
status.Raft.TimeSinceLastContact = time.Since(s.raft.LastContact())
}
apState := s.autopilot.GetState()
status.Autopilot.Healthy = apState.Healthy
status.Autopilot.FailureTolerance = apState.FailureTolerance
status.Autopilot.NumServers = len(apState.Servers)
status.Autopilot.NumVoters = len(apState.Voters)
status.Autopilot.MinQuorum = int(s.getAutopilotConfigOrDefault().MinQuorum)
status.ScadaStatus = "unknown"
if deps.HCP.Provider != nil {
status.ScadaStatus = deps.HCP.Provider.SessionStatus()
}
return status, nil
}
}
// peersInfoContent is used to help operators understand what happened to the // peersInfoContent is used to help operators understand what happened to the
// peers.json file. This is written to a file called peers.info in the same // peers.json file. This is written to a file called peers.info in the same
// location. // location.

View File

@ -153,11 +153,11 @@ func (s *Server) setupSerfConfig(opts setupSerfOptions) (*serf.Config, error) {
serfLogger := s.logger. serfLogger := s.logger.
NamedIntercept(logging.Serf). NamedIntercept(logging.Serf).
NamedIntercept(subLoggerName). NamedIntercept(subLoggerName).
StandardLoggerIntercept(&hclog.StandardLoggerOptions{InferLevels: true}) StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true})
memberlistLogger := s.logger. memberlistLogger := s.logger.
NamedIntercept(logging.Memberlist). NamedIntercept(logging.Memberlist).
NamedIntercept(subLoggerName). NamedIntercept(subLoggerName).
StandardLoggerIntercept(&hclog.StandardLoggerOptions{InferLevels: true}) StandardLogger(&hclog.StandardLoggerOptions{InferLevels: true})
conf.MemberlistConfig.Logger = memberlistLogger conf.MemberlistConfig.Logger = memberlistLogger
conf.Logger = serfLogger conf.Logger = serfLogger

View File

@ -1,6 +1,7 @@
package consul package consul
import ( import (
"context"
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"fmt" "fmt"
@ -15,10 +16,12 @@ import (
"github.com/armon/go-metrics" "github.com/armon/go-metrics"
"github.com/google/tcpproxy" "github.com/google/tcpproxy"
"github.com/hashicorp/consul/agent/hcp"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-uuid" uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/memberlist" "github.com/hashicorp/memberlist"
"github.com/hashicorp/raft" "github.com/hashicorp/raft"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/time/rate" "golang.org/x/time/rate"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -229,7 +232,7 @@ func testServerWithConfig(t *testing.T, configOpts ...func(*Config)) (string, *S
} }
// Apply config to copied fields because many tests only set the old // Apply config to copied fields because many tests only set the old
//values. // values.
config.ACLResolverSettings.ACLsEnabled = config.ACLsEnabled config.ACLResolverSettings.ACLsEnabled = config.ACLsEnabled
config.ACLResolverSettings.NodeName = config.NodeName config.ACLResolverSettings.NodeName = config.NodeName
config.ACLResolverSettings.Datacenter = config.Datacenter config.ACLResolverSettings.Datacenter = config.Datacenter
@ -244,15 +247,32 @@ func testServerWithConfig(t *testing.T, configOpts ...func(*Config)) (string, *S
}) })
t.Cleanup(func() { srv.Shutdown() }) t.Cleanup(func() { srv.Shutdown() })
if srv.config.GRPCPort > 0 { for _, grpcPort := range []int{srv.config.GRPCPort, srv.config.GRPCTLSPort} {
if grpcPort == 0 {
continue
}
// Normally the gRPC server listener is created at the agent level and // Normally the gRPC server listener is created at the agent level and
// passed down into the Server creation. // passed down into the Server creation.
externalGRPCAddr := fmt.Sprintf("127.0.0.1:%d", srv.config.GRPCPort) ln, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", grpcPort))
ln, err := net.Listen("tcp", externalGRPCAddr)
require.NoError(t, err) require.NoError(t, err)
// Wrap the listener with TLS if grpcPort == srv.config.GRPCTLSPort || deps.TLSConfigurator.GRPCServerUseTLS() {
if deps.TLSConfigurator.GRPCServerUseTLS() { // Set the internally managed server certificate. The cert manager is hooked to the Agent, so we need to bypass that here.
if srv.config.PeeringEnabled && srv.config.ConnectEnabled {
key, _ := srv.config.CAConfig.Config["PrivateKey"].(string)
cert, _ := srv.config.CAConfig.Config["RootCert"].(string)
if key != "" && cert != "" {
ca := &structs.CARoot{
SigningKey: key,
RootCert: cert,
}
require.NoError(t, deps.TLSConfigurator.UpdateAutoTLSCert(connect.TestServerLeaf(t, srv.config.Datacenter, ca)))
deps.TLSConfigurator.UpdateAutoTLSPeeringServerName(connect.PeeringServerSAN("dc1", connect.TestTrustDomain))
}
}
// Wrap the listener with TLS.
ln = tls.NewListener(ln, deps.TLSConfigurator.IncomingGRPCConfig()) ln = tls.NewListener(ln, deps.TLSConfigurator.IncomingGRPCConfig())
} }
@ -2012,3 +2032,27 @@ func TestServer_Peering_LeadershipCheck(t *testing.T) {
// test corollary by transitivity to future-proof against any setup bugs // test corollary by transitivity to future-proof against any setup bugs
require.NotEqual(t, s2.config.RPCAddr.String(), peeringLeaderAddr) require.NotEqual(t, s2.config.RPCAddr.String(), peeringLeaderAddr)
} }
func TestServer_hcpManager(t *testing.T) {
_, conf1 := testServerConfig(t)
conf1.BootstrapExpect = 1
conf1.RPCAdvertise = &net.TCPAddr{IP: []byte{127, 0, 0, 2}, Port: conf1.RPCAddr.Port}
hcp1 := hcp.NewMockClient(t)
hcp1.EXPECT().PushServerStatus(mock.Anything, mock.MatchedBy(func(status *hcp.ServerStatus) bool {
return status.ID == string(conf1.NodeID)
})).Run(func(ctx context.Context, status *hcp.ServerStatus) {
require.Equal(t, status.LanAddress, "127.0.0.2")
}).Call.Return(nil)
deps1 := newDefaultDeps(t, conf1)
deps1.HCP.Client = hcp1
s1, err := newServerWithDeps(t, conf1, deps1)
if err != nil {
t.Fatalf("err: %v", err)
}
defer s1.Shutdown()
require.NotNil(t, s1.hcpManager)
waitForLeaderEstablishment(t, s1)
hcp1.AssertExpectations(t)
}

View File

@ -150,7 +150,6 @@ func (m *CertManager) watchServerToken(ctx context.Context) {
// Cancel existing the leaf cert watch and spin up new one any time the server token changes. // Cancel existing the leaf cert watch and spin up new one any time the server token changes.
// The watch needs the current token as set by the leader since certificate signing requests go to the leader. // The watch needs the current token as set by the leader since certificate signing requests go to the leader.
fmt.Println("canceling and resetting")
cancel() cancel()
notifyCtx, cancel = context.WithCancel(ctx) notifyCtx, cancel = context.WithCancel(ctx)

View File

@ -63,7 +63,7 @@ func TestStore_peersForService(t *testing.T) {
Name: "not-" + queryName, Name: "not-" + queryName,
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "zip", Peer: "zip",
}, },
}, },
}, },
@ -80,7 +80,7 @@ func TestStore_peersForService(t *testing.T) {
Name: "not-" + queryName, Name: "not-" + queryName,
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "zip", Peer: "zip",
}, },
}, },
}, },
@ -88,10 +88,10 @@ func TestStore_peersForService(t *testing.T) {
Name: structs.WildcardSpecifier, Name: structs.WildcardSpecifier,
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "bar", Peer: "bar",
}, },
{ {
PeerName: "baz", Peer: "baz",
}, },
}, },
}, },
@ -108,7 +108,7 @@ func TestStore_peersForService(t *testing.T) {
Name: queryName, Name: queryName,
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "baz", Peer: "baz",
}, },
}, },
}, },
@ -116,7 +116,7 @@ func TestStore_peersForService(t *testing.T) {
Name: structs.WildcardSpecifier, Name: structs.WildcardSpecifier,
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "zip", Peer: "zip",
}, },
}, },
}, },

View File

@ -1569,7 +1569,7 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
Name: "default", Name: "default",
Services: []structs.ExportedService{{ Services: []structs.ExportedService{{
Name: "main", Name: "main",
Consumers: []structs.ServiceConsumer{{PeerName: "my-peer"}}, Consumers: []structs.ServiceConsumer{{Peer: "my-peer"}},
}}, }},
}, },
expectErr: `contains cross-datacenter resolver redirect`, expectErr: `contains cross-datacenter resolver redirect`,
@ -1588,7 +1588,7 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
Name: "default", Name: "default",
Services: []structs.ExportedService{{ Services: []structs.ExportedService{{
Name: "*", Name: "*",
Consumers: []structs.ServiceConsumer{{PeerName: "my-peer"}}, Consumers: []structs.ServiceConsumer{{Peer: "my-peer"}},
}}, }},
}, },
expectErr: `contains cross-datacenter resolver redirect`, expectErr: `contains cross-datacenter resolver redirect`,
@ -1609,7 +1609,7 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
Name: "default", Name: "default",
Services: []structs.ExportedService{{ Services: []structs.ExportedService{{
Name: "main", Name: "main",
Consumers: []structs.ServiceConsumer{{PeerName: "my-peer"}}, Consumers: []structs.ServiceConsumer{{Peer: "my-peer"}},
}}, }},
}, },
expectErr: `contains cross-datacenter failover`, expectErr: `contains cross-datacenter failover`,
@ -1630,7 +1630,7 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
Name: "default", Name: "default",
Services: []structs.ExportedService{{ Services: []structs.ExportedService{{
Name: "*", Name: "*",
Consumers: []structs.ServiceConsumer{{PeerName: "my-peer"}}, Consumers: []structs.ServiceConsumer{{Peer: "my-peer"}},
}}, }},
}, },
expectErr: `contains cross-datacenter failover`, expectErr: `contains cross-datacenter failover`,
@ -1641,7 +1641,7 @@ func TestStore_ConfigEntry_GraphValidation(t *testing.T) {
Name: "default", Name: "default",
Services: []structs.ExportedService{{ Services: []structs.ExportedService{{
Name: "main", Name: "main",
Consumers: []structs.ServiceConsumer{{PeerName: "my-peer"}}, Consumers: []structs.ServiceConsumer{{Peer: "my-peer"}},
}}, }},
}, },
}, },

View File

@ -584,10 +584,7 @@ func (s *Store) PeeringWrite(idx uint64, req *pbpeering.PeeringWriteRequest) err
if req.Peering.State == pbpeering.PeeringState_UNDEFINED { if req.Peering.State == pbpeering.PeeringState_UNDEFINED {
req.Peering.State = existing.State req.Peering.State = existing.State
} }
// TODO(peering): Confirm behavior when /peering/token is called more than once. req.Peering.StreamStatus = nil
// We may need to avoid clobbering existing values.
req.Peering.ImportedServiceCount = existing.ImportedServiceCount
req.Peering.ExportedServiceCount = existing.ExportedServiceCount
req.Peering.CreateIndex = existing.CreateIndex req.Peering.CreateIndex = existing.CreateIndex
req.Peering.ModifyIndex = idx req.Peering.ModifyIndex = idx
} else { } else {
@ -792,7 +789,7 @@ func exportedServicesForPeerTxn(
// Service was covered by a wildcard that was already accounted for // Service was covered by a wildcard that was already accounted for
continue continue
} }
if consumer.PeerName != peering.Name { if consumer.Peer != peering.Name {
continue continue
} }
sawPeer = true sawPeer = true
@ -938,7 +935,7 @@ func listServicesExportedToAnyPeerByConfigEntry(
sawPeer := false sawPeer := false
for _, consumer := range svc.Consumers { for _, consumer := range svc.Consumers {
if consumer.PeerName == "" { if consumer.Peer == "" {
continue continue
} }
sawPeer = true sawPeer = true
@ -1310,8 +1307,8 @@ func peersForServiceTxn(
} }
for _, c := range entry.Services[targetIdx].Consumers { for _, c := range entry.Services[targetIdx].Consumers {
if c.PeerName != "" { if c.Peer != "" {
results = append(results, c.PeerName) results = append(results, c.Peer)
} }
} }
return idx, results, nil return idx, results, nil

View File

@ -1686,19 +1686,19 @@ func TestStateStore_ExportedServicesForPeer(t *testing.T) {
{ {
Name: "mysql", Name: "mysql",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "my-peering"}, {Peer: "my-peering"},
}, },
}, },
{ {
Name: "redis", Name: "redis",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "my-peering"}, {Peer: "my-peering"},
}, },
}, },
{ {
Name: "mongo", Name: "mongo",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "my-other-peering"}, {Peer: "my-other-peering"},
}, },
}, },
}, },
@ -1758,7 +1758,7 @@ func TestStateStore_ExportedServicesForPeer(t *testing.T) {
{ {
Name: "*", Name: "*",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "my-peering"}, {Peer: "my-peering"},
}, },
}, },
}, },
@ -2046,10 +2046,10 @@ func TestStateStore_PeeringsForService(t *testing.T) {
Name: "foo", Name: "foo",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "peer1", Peer: "peer1",
}, },
{ {
PeerName: "peer2", Peer: "peer2",
}, },
}, },
}, },
@ -2090,7 +2090,7 @@ func TestStateStore_PeeringsForService(t *testing.T) {
Name: "foo", Name: "foo",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "peer1", Peer: "peer1",
}, },
}, },
}, },
@ -2098,7 +2098,7 @@ func TestStateStore_PeeringsForService(t *testing.T) {
Name: "bar", Name: "bar",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "peer2", Peer: "peer2",
}, },
}, },
}, },
@ -2148,10 +2148,10 @@ func TestStateStore_PeeringsForService(t *testing.T) {
Name: "*", Name: "*",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "peer1", Peer: "peer1",
}, },
{ {
PeerName: "peer2", Peer: "peer2",
}, },
}, },
}, },
@ -2159,7 +2159,7 @@ func TestStateStore_PeeringsForService(t *testing.T) {
Name: "bar", Name: "bar",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "peer3", Peer: "peer3",
}, },
}, },
}, },
@ -2261,7 +2261,7 @@ func TestStore_TrustBundleListByService(t *testing.T) {
Name: "foo", Name: "foo",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "peer1", Peer: "peer1",
}, },
}, },
}, },
@ -2318,7 +2318,7 @@ func TestStore_TrustBundleListByService(t *testing.T) {
Name: "foo", Name: "foo",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "peer1", Peer: "peer1",
}, },
}, },
}, },
@ -2371,10 +2371,10 @@ func TestStore_TrustBundleListByService(t *testing.T) {
Name: "foo", Name: "foo",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{ {
PeerName: "peer1", Peer: "peer1",
}, },
{ {
PeerName: "peer2", Peer: "peer2",
}, },
}, },
}, },

View File

@ -5,16 +5,16 @@ import (
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt" "fmt"
"math"
"net" "net"
"regexp" "regexp"
"strings" "strings"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus" "github.com/armon/go-metrics/prometheus"
"github.com/armon/go-radix"
metrics "github.com/armon/go-metrics"
radix "github.com/armon/go-radix"
"github.com/coredns/coredns/plugin/pkg/dnsutil" "github.com/coredns/coredns/plugin/pkg/dnsutil"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/miekg/dns" "github.com/miekg/dns"
@ -61,6 +61,13 @@ const (
staleCounterThreshold = 5 * time.Second staleCounterThreshold = 5 * time.Second
defaultMaxUDPSize = 512 defaultMaxUDPSize = 512
// If a consumer sets a buffer size greater than this amount we will default it down
// to this amount to ensure that consul does respond. Previously if consumer had a larger buffer
// size than 65535 - 60 bytes (maximim 60 bytes for IP header. UDP header will be offset in the
// trimUDP call) consul would fail to respond and the consumer timesout
// the request.
maxUDPDatagramSize = math.MaxUint16 - 68
) )
type dnsSOAConfig struct { type dnsSOAConfig struct {
@ -139,13 +146,13 @@ func NewDNSServer(a *Agent) (*DNSServer, error) {
// Make sure domains are FQDN, make them case insensitive for ServeMux // Make sure domains are FQDN, make them case insensitive for ServeMux
domain := dns.Fqdn(strings.ToLower(a.config.DNSDomain)) domain := dns.Fqdn(strings.ToLower(a.config.DNSDomain))
altDomain := dns.Fqdn(strings.ToLower(a.config.DNSAltDomain)) altDomain := dns.Fqdn(strings.ToLower(a.config.DNSAltDomain))
srv := &DNSServer{ srv := &DNSServer{
agent: a, agent: a,
domain: domain, domain: domain,
altDomain: altDomain, altDomain: altDomain,
logger: a.logger.Named(logging.DNS), logger: a.logger.Named(logging.DNS),
defaultEnterpriseMeta: *a.AgentEnterpriseMeta(), defaultEnterpriseMeta: *a.AgentEnterpriseMeta(),
mux: dns.NewServeMux(),
} }
cfg, err := GetDNSConfig(a.config) cfg, err := GetDNSConfig(a.config)
if err != nil { if err != nil {
@ -153,6 +160,19 @@ func NewDNSServer(a *Agent) (*DNSServer, error) {
} }
srv.config.Store(cfg) srv.config.Store(cfg)
srv.mux.HandleFunc("arpa.", srv.handlePtr)
srv.mux.HandleFunc(srv.domain, srv.handleQuery)
// this is not an empty string check because NewDNSServer will have
// converted the configured alt domain into an FQDN which will ensure that
// the value ends with a ".". Therefore "." is the empty string equivalent
// for originally having no alternate domain set. If there is a reason
// why consul should be configured to handle the root zone I have yet
// to think of it.
if srv.altDomain != "." {
srv.mux.HandleFunc(srv.altDomain, srv.handleQuery)
}
srv.toggleRecursorHandlerFromConfig(cfg)
return srv, nil return srv, nil
} }
@ -227,22 +247,6 @@ func (cfg *dnsConfig) GetTTLForService(service string) (time.Duration, bool) {
} }
func (d *DNSServer) ListenAndServe(network, addr string, notif func()) error { func (d *DNSServer) ListenAndServe(network, addr string, notif func()) error {
cfg := d.config.Load().(*dnsConfig)
d.mux = dns.NewServeMux()
d.mux.HandleFunc("arpa.", d.handlePtr)
d.mux.HandleFunc(d.domain, d.handleQuery)
// this is not an empty string check because NewDNSServer will have
// converted the configured alt domain into an FQDN which will ensure that
// the value ends with a ".". Therefore "." is the empty string equivalent
// for originally having no alternate domain set. If there is a reason
// why consul should be configured to handle the root zone I have yet
// to think of it.
if d.altDomain != "." {
d.mux.HandleFunc(d.altDomain, d.handleQuery)
}
d.toggleRecursorHandlerFromConfig(cfg)
d.Server = &dns.Server{ d.Server = &dns.Server{
Addr: addr, Addr: addr,
Net: network, Net: network,
@ -1258,6 +1262,11 @@ func trimUDPResponse(req, resp *dns.Msg, udpAnswerLimit int) (trimmed bool) {
maxSize = int(size) maxSize = int(size)
} }
} }
// Overriding maxSize as the maxSize cannot be larger than the
// maxUDPDatagram size. Reliability guarantees disappear > than this amount.
if maxSize > maxUDPDatagramSize {
maxSize = maxUDPDatagramSize
}
// We avoid some function calls and allocations by only handling the // We avoid some function calls and allocations by only handling the
// extra data when necessary. // extra data when necessary.
@ -1286,8 +1295,9 @@ func trimUDPResponse(req, resp *dns.Msg, udpAnswerLimit int) (trimmed bool) {
// will allow our responses to be compliant even if some downstream server // will allow our responses to be compliant even if some downstream server
// uncompresses them. // uncompresses them.
// Even when size is too big for one single record, try to send it anyway // Even when size is too big for one single record, try to send it anyway
// (useful for 512 bytes messages) // (useful for 512 bytes messages). 8 is removed from maxSize to ensure that we account
for len(resp.Answer) > 1 && resp.Len() > maxSize-7 { // for the udp header (8 bytes).
for len(resp.Answer) > 1 && resp.Len() > maxSize-8 {
// first try to remove the NS section may be it will truncate enough // first try to remove the NS section may be it will truncate enough
if len(resp.Ns) != 0 { if len(resp.Ns) != 0 {
resp.Ns = []dns.RR{} resp.Ns = []dns.RR{}

View File

@ -3,6 +3,7 @@ package agent
import ( import (
"errors" "errors"
"fmt" "fmt"
"math"
"math/rand" "math/rand"
"net" "net"
"reflect" "reflect"
@ -7563,6 +7564,55 @@ func TestDNS_trimUDPResponse_TrimSizeEDNS(t *testing.T) {
} }
} }
func TestDNS_trimUDPResponse_TrimSizeMaxSize(t *testing.T) {
t.Parallel()
cfg := loadRuntimeConfig(t, `node_name = "test" data_dir = "a" bind_addr = "127.0.0.1" node_name = "dummy"`)
resp := &dns.Msg{}
for i := 0; i < 600; i++ {
target := fmt.Sprintf("ip-10-0-1-%d.node.dc1.consul.", 150+i)
srv := &dns.SRV{
Hdr: dns.RR_Header{
Name: "redis-cache-redis.service.consul.",
Rrtype: dns.TypeSRV,
Class: dns.ClassINET,
},
Target: target,
}
a := &dns.A{
Hdr: dns.RR_Header{
Name: target,
Rrtype: dns.TypeA,
Class: dns.ClassINET,
},
A: net.ParseIP(fmt.Sprintf("10.0.1.%d", 150+i)),
}
resp.Answer = append(resp.Answer, srv)
resp.Extra = append(resp.Extra, a)
}
reqEDNS, respEDNS := &dns.Msg{}, &dns.Msg{}
reqEDNS.SetEdns0(math.MaxUint16, true)
respEDNS.Answer = append(respEDNS.Answer, resp.Answer...)
respEDNS.Extra = append(respEDNS.Extra, resp.Extra...)
require.Greater(t, respEDNS.Len(), math.MaxUint16)
t.Logf("length is: %v", respEDNS.Len())
if trimmed := trimUDPResponse(reqEDNS, respEDNS, cfg.DNSUDPAnswerLimit); !trimmed {
t.Errorf("expected edns to be trimmed: %#v", resp)
}
require.Greater(t, math.MaxUint16, respEDNS.Len())
t.Logf("length is: %v", respEDNS.Len())
if len(respEDNS.Answer) == 0 || len(respEDNS.Answer) != len(respEDNS.Extra) {
t.Errorf("bad edns answer length: %#v", resp)
}
}
func TestDNS_syncExtra(t *testing.T) { func TestDNS_syncExtra(t *testing.T) {
t.Parallel() t.Parallel()
resp := &dns.Msg{ resp := &dns.Msg{

View File

@ -0,0 +1,58 @@
package external
import (
"context"
"fmt"
"github.com/hashicorp/consul/agent/structs"
"github.com/mitchellh/mapstructure"
"google.golang.org/grpc/metadata"
)
// QueryOptionsFromContext returns the query options in the gRPC metadata attached to the
// given context.
func QueryOptionsFromContext(ctx context.Context) (structs.QueryOptions, error) {
options := structs.QueryOptions{}
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return options, nil
}
m := map[string]string{}
for k, v := range md {
m[k] = v[0]
}
config := &mapstructure.DecoderConfig{
Metadata: nil,
Result: &options,
WeaklyTypedInput: true,
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
}
decoder, err := mapstructure.NewDecoder(config)
if err != nil {
return structs.QueryOptions{}, err
}
err = decoder.Decode(m)
if err != nil {
return structs.QueryOptions{}, err
}
return options, nil
}
// ContextWithQueryOptions returns a context with the given query options attached.
func ContextWithQueryOptions(ctx context.Context, options structs.QueryOptions) (context.Context, error) {
md := metadata.MD{}
m := map[string]interface{}{}
err := mapstructure.Decode(options, &m)
if err != nil {
return nil, err
}
for k, v := range m {
md.Set(k, fmt.Sprintf("%v", v))
}
return metadata.NewOutgoingContext(ctx, md), nil
}

View File

@ -0,0 +1,39 @@
package external
import (
"context"
"testing"
"time"
"github.com/hashicorp/consul/agent/structs"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/metadata"
)
func TestQueryOptionsFromContextRoundTrip(t *testing.T) {
expected := structs.QueryOptions{
Token: "123",
AllowStale: true,
MinQueryIndex: uint64(10),
MaxAge: 1 * time.Hour,
}
ctx, err := ContextWithQueryOptions(context.Background(), expected)
if err != nil {
t.Fatal(err)
}
out, ok := metadata.FromOutgoingContext(ctx)
if !ok {
t.Fatalf("cannot get metadata from context")
}
ctx = metadata.NewIncomingContext(ctx, out)
actual, err := QueryOptionsFromContext(ctx)
if err != nil {
t.Fatal(err)
}
require.Equal(t, expected, actual)
}

View File

@ -25,7 +25,10 @@ func (s *Server) Sign(ctx context.Context, req *pbconnectca.SignRequest) (*pbcon
logger := s.Logger.Named("sign").With("request_id", external.TraceID()) logger := s.Logger.Named("sign").With("request_id", external.TraceID())
logger.Trace("request received") logger.Trace("request received")
token := external.TokenFromContext(ctx) options, err := external.QueryOptionsFromContext(ctx)
if err != nil {
return nil, err
}
if req.Csr == "" { if req.Csr == "" {
return nil, status.Error(codes.InvalidArgument, "CSR is required") return nil, status.Error(codes.InvalidArgument, "CSR is required")
@ -43,7 +46,7 @@ func (s *Server) Sign(ctx context.Context, req *pbconnectca.SignRequest) (*pbcon
structs.WriteRequest structs.WriteRequest
structs.DCSpecificRequest structs.DCSpecificRequest
} }
rpcInfo.Token = token rpcInfo.Token = options.Token
var rsp *pbconnectca.SignResponse var rsp *pbconnectca.SignResponse
handled, err := s.ForwardRPC(&rpcInfo, func(conn *grpc.ClientConn) error { handled, err := s.ForwardRPC(&rpcInfo, func(conn *grpc.ClientConn) error {
@ -62,7 +65,7 @@ func (s *Server) Sign(ctx context.Context, req *pbconnectca.SignRequest) (*pbcon
return nil, status.Error(codes.InvalidArgument, err.Error()) return nil, status.Error(codes.InvalidArgument, err.Error())
} }
authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(token, nil, nil) authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(options.Token, nil, nil)
if err != nil { if err != nil {
return nil, status.Error(codes.Unauthenticated, err.Error()) return nil, status.Error(codes.Unauthenticated, err.Error())
} }

View File

@ -32,7 +32,10 @@ func (s *Server) WatchRoots(_ *pbconnectca.WatchRootsRequest, serverStream pbcon
logger.Trace("starting stream") logger.Trace("starting stream")
defer logger.Trace("stream closed") defer logger.Trace("stream closed")
token := external.TokenFromContext(serverStream.Context()) options, err := external.QueryOptionsFromContext(serverStream.Context())
if err != nil {
return err
}
// Serve the roots from an EventPublisher subscription. If the subscription is // Serve the roots from an EventPublisher subscription. If the subscription is
// closed due to an ACL change, we'll attempt to re-authorize and resume it to // closed due to an ACL change, we'll attempt to re-authorize and resume it to
@ -40,7 +43,7 @@ func (s *Server) WatchRoots(_ *pbconnectca.WatchRootsRequest, serverStream pbcon
var idx uint64 var idx uint64
for { for {
var err error var err error
idx, err = s.serveRoots(token, idx, serverStream, logger) idx, err = s.serveRoots(options.Token, idx, serverStream, logger)
if errors.Is(err, stream.ErrSubForceClosed) { if errors.Is(err, stream.ErrSubForceClosed) {
logger.Trace("subscription force-closed due to an ACL change or snapshot restore, will attempt to re-auth and resume") logger.Trace("subscription force-closed due to an ACL change or snapshot restore, will attempt to re-auth and resume")
} else { } else {

View File

@ -56,7 +56,9 @@ func TestWatchRoots_Success(t *testing.T) {
aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerServiceWriteAny(t), nil) Return(testutils.TestAuthorizerServiceWriteAny(t), nil)
ctx := external.ContextWithToken(context.Background(), testACLToken) options := structs.QueryOptions{Token: testACLToken}
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
require.NoError(t, err)
server := NewServer(Config{ server := NewServer(Config{
Publisher: publisher, Publisher: publisher,
@ -104,7 +106,9 @@ func TestWatchRoots_InvalidACLToken(t *testing.T) {
aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything).
Return(resolver.Result{}, acl.ErrNotFound) Return(resolver.Result{}, acl.ErrNotFound)
ctx := external.ContextWithToken(context.Background(), testACLToken) options := structs.QueryOptions{Token: testACLToken}
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
require.NoError(t, err)
server := NewServer(Config{ server := NewServer(Config{
Publisher: publisher, Publisher: publisher,
@ -142,7 +146,9 @@ func TestWatchRoots_ACLTokenInvalidated(t *testing.T) {
aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerServiceWriteAny(t), nil).Twice() Return(testutils.TestAuthorizerServiceWriteAny(t), nil).Twice()
ctx := external.ContextWithToken(context.Background(), testACLToken) options := structs.QueryOptions{Token: testACLToken}
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
require.NoError(t, err)
server := NewServer(Config{ server := NewServer(Config{
Publisher: publisher, Publisher: publisher,
@ -210,7 +216,9 @@ func TestWatchRoots_StateStoreAbandoned(t *testing.T) {
aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerServiceWriteAny(t), nil) Return(testutils.TestAuthorizerServiceWriteAny(t), nil)
ctx := external.ContextWithToken(context.Background(), testACLToken) options := structs.QueryOptions{Token: testACLToken}
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
require.NoError(t, err)
server := NewServer(Config{ server := NewServer(Config{
Publisher: publisher, Publisher: publisher,

View File

@ -9,10 +9,11 @@ import (
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/structpb" "google.golang.org/protobuf/types/known/structpb"
acl "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/consul/state"
external "github.com/hashicorp/consul/agent/grpc-external" external "github.com/hashicorp/consul/agent/grpc-external"
structs "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbdataplane" "github.com/hashicorp/consul/proto-public/pbdataplane"
) )
@ -22,10 +23,14 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G
logger.Trace("Started processing request") logger.Trace("Started processing request")
defer logger.Trace("Finished processing request") defer logger.Trace("Finished processing request")
token := external.TokenFromContext(ctx) options, err := external.QueryOptionsFromContext(ctx)
if err != nil {
return nil, err
}
var authzContext acl.AuthorizerContext var authzContext acl.AuthorizerContext
entMeta := acl.NewEnterpriseMetaWithPartition(req.GetPartition(), req.GetNamespace()) entMeta := acl.NewEnterpriseMetaWithPartition(req.GetPartition(), req.GetNamespace())
authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(token, &entMeta, &authzContext) authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(options.Token, &entMeta, &authzContext)
if err != nil { if err != nil {
return nil, status.Error(codes.Unauthenticated, err.Error()) return nil, status.Error(codes.Unauthenticated, err.Error())
} }
@ -69,7 +74,24 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G
NodeId: string(svc.ID), NodeId: string(svc.ID),
} }
bootstrapConfig, err := structpb.NewStruct(svc.ServiceProxy.Config) // This is awkward because it's designed for different requests, but
// this fakes the ServiceSpecificRequest so that we can reuse code.
_, ns, err := configentry.MergeNodeServiceWithCentralConfig(
nil,
store,
&structs.ServiceSpecificRequest{
Datacenter: s.Datacenter,
QueryOptions: options,
},
svc.ToNodeService(),
logger,
)
if err != nil {
logger.Error("Error merging with central config", "error", err)
return nil, status.Errorf(codes.Unknown, "Error merging central config: %v", err)
}
bootstrapConfig, err := structpb.NewStruct(ns.Proxy.Config)
if err != nil { if err != nil {
logger.Error("Error creating the envoy boostrap params config", "error", err) logger.Error("Error creating the envoy boostrap params config", "error", err)
return nil, status.Error(codes.Unknown, "Error creating the envoy boostrap params config") return nil, status.Error(codes.Unknown, "Error creating the envoy boostrap params config")

View File

@ -5,29 +5,39 @@ import (
"testing" "testing"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
mock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/structpb" "google.golang.org/protobuf/types/known/structpb"
acl "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
resolver "github.com/hashicorp/consul/acl/resolver" "github.com/hashicorp/consul/acl/resolver"
external "github.com/hashicorp/consul/agent/grpc-external" external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/grpc-external/testutils" "github.com/hashicorp/consul/agent/grpc-external/testutils"
structs "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbdataplane" "github.com/hashicorp/consul/proto-public/pbdataplane"
"github.com/hashicorp/consul/types" "github.com/hashicorp/consul/types"
) )
const ( const (
testToken = "acl-token-get-envoy-bootstrap-params" testToken = "acl-token-get-envoy-bootstrap-params"
testServiceName = "web"
proxyServiceID = "web-proxy" proxyServiceID = "web-proxy"
nodeName = "foo" nodeName = "foo"
nodeID = "2980b72b-bd9d-9d7b-d4f9-951bf7508d95" nodeID = "2980b72b-bd9d-9d7b-d4f9-951bf7508d95"
proxyConfigKey = "envoy_dogstatsd_url" proxyConfigKey = "envoy_dogstatsd_url"
proxyConfigValue = "udp://127.0.0.1:8125" proxyConfigValue = "udp://127.0.0.1:8125"
serverDC = "dc1" serverDC = "dc1"
protocolKey = "protocol"
connectTimeoutKey = "local_connect_timeout_ms"
requestTimeoutKey = "local_request_timeout_ms"
proxyDefaultsProtocol = "http"
proxyDefaultsRequestTimeout = 1111
serviceDefaultsProtocol = "tcp"
serviceDefaultsConnectTimeout = 4444
) )
func testRegisterRequestProxy(t *testing.T) *structs.RegisterRequest { func testRegisterRequestProxy(t *testing.T) *structs.RegisterRequest {
@ -43,7 +53,7 @@ func testRegisterRequestProxy(t *testing.T) *structs.RegisterRequest {
Address: "127.0.0.2", Address: "127.0.0.2",
Port: 2222, Port: 2222,
Proxy: structs.ConnectProxyConfig{ Proxy: structs.ConnectProxyConfig{
DestinationServiceName: "web", DestinationServiceName: testServiceName,
Config: map[string]interface{}{ Config: map[string]interface{}{
proxyConfigKey: proxyConfigValue, proxyConfigKey: proxyConfigValue,
}, },
@ -63,22 +73,64 @@ func testRegisterIngressGateway(t *testing.T) *structs.RegisterRequest {
return registerReq return registerReq
} }
func testProxyDefaults(t *testing.T) structs.ConfigEntry {
return &structs.ProxyConfigEntry{
Kind: structs.ProxyDefaults,
Name: structs.ProxyConfigGlobal,
Config: map[string]interface{}{
protocolKey: proxyDefaultsProtocol,
requestTimeoutKey: proxyDefaultsRequestTimeout,
},
}
}
func testServiceDefaults(t *testing.T) structs.ConfigEntry {
return &structs.ServiceConfigEntry{
Kind: structs.ServiceDefaults,
Name: testServiceName,
Protocol: serviceDefaultsProtocol,
LocalConnectTimeoutMs: serviceDefaultsConnectTimeout,
}
}
func requireConfigField(t *testing.T, resp *pbdataplane.GetEnvoyBootstrapParamsResponse, key string, value interface{}) {
require.Contains(t, resp.Config.Fields, key)
require.Equal(t, value, resp.Config.Fields[key])
}
func TestGetEnvoyBootstrapParams_Success(t *testing.T) { func TestGetEnvoyBootstrapParams_Success(t *testing.T) {
type testCase struct { type testCase struct {
name string name string
registerReq *structs.RegisterRequest registerReq *structs.RegisterRequest
nodeID bool nodeID bool
proxyDefaults structs.ConfigEntry
serviceDefaults structs.ConfigEntry
} }
run := func(t *testing.T, tc testCase) { run := func(t *testing.T, tc testCase) {
store := testutils.TestStateStore(t, nil) store := testutils.TestStateStore(t, nil)
err := store.EnsureRegistration(1, tc.registerReq) idx := uint64(1)
err := store.EnsureRegistration(idx, tc.registerReq)
require.NoError(t, err) require.NoError(t, err)
if tc.proxyDefaults != nil {
idx++
err := store.EnsureConfigEntry(idx, tc.proxyDefaults)
require.NoError(t, err)
}
if tc.serviceDefaults != nil {
idx++
err := store.EnsureConfigEntry(idx, tc.serviceDefaults)
require.NoError(t, err)
}
aclResolver := &MockACLResolver{} aclResolver := &MockACLResolver{}
aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything). aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerServiceRead(t, tc.registerReq.Service.ID), nil) Return(testutils.TestAuthorizerServiceRead(t, tc.registerReq.Service.ID), nil)
ctx := external.ContextWithToken(context.Background(), testToken)
options := structs.QueryOptions{Token: testToken}
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
require.NoError(t, err)
server := NewServer(Config{ server := NewServer(Config{
GetStore: func() StateStore { return store }, GetStore: func() StateStore { return store },
@ -106,20 +158,33 @@ func TestGetEnvoyBootstrapParams_Success(t *testing.T) {
require.Equal(t, serverDC, resp.Datacenter) require.Equal(t, serverDC, resp.Datacenter)
require.Equal(t, tc.registerReq.EnterpriseMeta.PartitionOrDefault(), resp.Partition) require.Equal(t, tc.registerReq.EnterpriseMeta.PartitionOrDefault(), resp.Partition)
require.Equal(t, tc.registerReq.EnterpriseMeta.NamespaceOrDefault(), resp.Namespace) require.Equal(t, tc.registerReq.EnterpriseMeta.NamespaceOrDefault(), resp.Namespace)
require.Contains(t, resp.Config.Fields, proxyConfigKey) requireConfigField(t, resp, proxyConfigKey, structpb.NewStringValue(proxyConfigValue))
require.Equal(t, structpb.NewStringValue(proxyConfigValue), resp.Config.Fields[proxyConfigKey])
require.Equal(t, convertToResponseServiceKind(tc.registerReq.Service.Kind), resp.ServiceKind) require.Equal(t, convertToResponseServiceKind(tc.registerReq.Service.Kind), resp.ServiceKind)
require.Equal(t, tc.registerReq.Node, resp.NodeName) require.Equal(t, tc.registerReq.Node, resp.NodeName)
require.Equal(t, string(tc.registerReq.ID), resp.NodeId) require.Equal(t, string(tc.registerReq.ID), resp.NodeId)
if tc.serviceDefaults != nil && tc.proxyDefaults != nil {
// service-defaults take precedence over proxy-defaults
requireConfigField(t, resp, protocolKey, structpb.NewStringValue(serviceDefaultsProtocol))
requireConfigField(t, resp, connectTimeoutKey, structpb.NewNumberValue(serviceDefaultsConnectTimeout))
requireConfigField(t, resp, requestTimeoutKey, structpb.NewNumberValue(proxyDefaultsRequestTimeout))
} else if tc.serviceDefaults != nil {
requireConfigField(t, resp, protocolKey, structpb.NewStringValue(serviceDefaultsProtocol))
requireConfigField(t, resp, connectTimeoutKey, structpb.NewNumberValue(serviceDefaultsConnectTimeout))
} else if tc.proxyDefaults != nil {
requireConfigField(t, resp, protocolKey, structpb.NewStringValue(proxyDefaultsProtocol))
requireConfigField(t, resp, requestTimeoutKey, structpb.NewNumberValue(proxyDefaultsRequestTimeout))
}
} }
testCases := []testCase{ testCases := []testCase{
{ {
name: "lookup service side car proxy by node name", name: "lookup service sidecar proxy by node name",
registerReq: testRegisterRequestProxy(t), registerReq: testRegisterRequestProxy(t),
}, },
{ {
name: "lookup service side car proxy by node ID", name: "lookup service sidecar proxy by node ID",
registerReq: testRegisterRequestProxy(t), registerReq: testRegisterRequestProxy(t),
nodeID: true, nodeID: true,
}, },
@ -132,6 +197,21 @@ func TestGetEnvoyBootstrapParams_Success(t *testing.T) {
registerReq: testRegisterIngressGateway(t), registerReq: testRegisterIngressGateway(t),
nodeID: true, nodeID: true,
}, },
{
name: "merge proxy defaults for sidecar proxy",
registerReq: testRegisterRequestProxy(t),
proxyDefaults: testProxyDefaults(t),
},
{
name: "merge service defaults for sidecar proxy",
registerReq: testRegisterRequestProxy(t),
serviceDefaults: testServiceDefaults(t),
},
{
name: "merge proxy defaults and service defaults for sidecar proxy",
registerReq: testRegisterRequestProxy(t),
serviceDefaults: testServiceDefaults(t),
},
} }
for _, tc := range testCases { for _, tc := range testCases {
@ -154,11 +234,14 @@ func TestGetEnvoyBootstrapParams_Error(t *testing.T) {
aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything). aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerServiceRead(t, proxyServiceID), nil) Return(testutils.TestAuthorizerServiceRead(t, proxyServiceID), nil)
ctx := external.ContextWithToken(context.Background(), testToken)
options := structs.QueryOptions{Token: testToken}
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
require.NoError(t, err)
store := testutils.TestStateStore(t, nil) store := testutils.TestStateStore(t, nil)
registerReq := testRegisterRequestProxy(t) registerReq := testRegisterRequestProxy(t)
err := store.EnsureRegistration(1, registerReq) err = store.EnsureRegistration(1, registerReq)
require.NoError(t, err) require.NoError(t, err)
server := NewServer(Config{ server := NewServer(Config{
@ -224,8 +307,12 @@ func TestGetEnvoyBootstrapParams_Unauthenticated(t *testing.T) {
aclResolver := &MockACLResolver{} aclResolver := &MockACLResolver{}
aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything).
Return(resolver.Result{}, acl.ErrNotFound) Return(resolver.Result{}, acl.ErrNotFound)
ctx := external.ContextWithToken(context.Background(), testToken)
options := structs.QueryOptions{Token: testToken}
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
require.NoError(t, err)
store := testutils.TestStateStore(t, nil) store := testutils.TestStateStore(t, nil)
server := NewServer(Config{ server := NewServer(Config{
GetStore: func() StateStore { return store }, GetStore: func() StateStore { return store },
Logger: hclog.NewNullLogger(), Logger: hclog.NewNullLogger(),
@ -243,12 +330,16 @@ func TestGetEnvoyBootstrapParams_PermissionDenied(t *testing.T) {
aclResolver := &MockACLResolver{} aclResolver := &MockACLResolver{}
aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything). aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerDenyAll(t), nil) Return(testutils.TestAuthorizerDenyAll(t), nil)
ctx := external.ContextWithToken(context.Background(), testToken)
options := structs.QueryOptions{Token: testToken}
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
require.NoError(t, err)
store := testutils.TestStateStore(t, nil) store := testutils.TestStateStore(t, nil)
registerReq := structs.TestRegisterRequestProxy(t) registerReq := structs.TestRegisterRequestProxy(t)
proxyServiceID := "web-sidecar-proxy" proxyServiceID := "web-sidecar-proxy"
registerReq.Service.ID = proxyServiceID registerReq.Service.ID = proxyServiceID
err := store.EnsureRegistration(1, registerReq) err = store.EnsureRegistration(1, registerReq)
require.NoError(t, err) require.NoError(t, err)
server := NewServer(Config{ server := NewServer(Config{

View File

@ -19,10 +19,14 @@ func (s *Server) GetSupportedDataplaneFeatures(ctx context.Context, req *pbdatap
defer logger.Trace("Finished processing request") defer logger.Trace("Finished processing request")
// Require the given ACL token to have `service:write` on any service // Require the given ACL token to have `service:write` on any service
token := external.TokenFromContext(ctx) options, err := external.QueryOptionsFromContext(ctx)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
var authzContext acl.AuthorizerContext var authzContext acl.AuthorizerContext
entMeta := structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier) entMeta := structs.WildcardEnterpriseMetaInPartition(structs.WildcardSpecifier)
authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(token, entMeta, &authzContext) authz, err := s.ACLResolver.ResolveTokenAndDefaultMeta(options.Token, entMeta, &authzContext)
if err != nil { if err != nil {
return nil, status.Error(codes.Unauthenticated, err.Error()) return nil, status.Error(codes.Unauthenticated, err.Error())
} }

View File

@ -14,6 +14,7 @@ import (
resolver "github.com/hashicorp/consul/acl/resolver" resolver "github.com/hashicorp/consul/acl/resolver"
external "github.com/hashicorp/consul/agent/grpc-external" external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/grpc-external/testutils" "github.com/hashicorp/consul/agent/grpc-external/testutils"
structs "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbdataplane" "github.com/hashicorp/consul/proto-public/pbdataplane"
) )
@ -24,7 +25,11 @@ func TestSupportedDataplaneFeatures_Success(t *testing.T) {
aclResolver := &MockACLResolver{} aclResolver := &MockACLResolver{}
aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerServiceWriteAny(t), nil) Return(testutils.TestAuthorizerServiceWriteAny(t), nil)
ctx := external.ContextWithToken(context.Background(), testACLToken)
options := structs.QueryOptions{Token: testACLToken}
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
require.NoError(t, err)
server := NewServer(Config{ server := NewServer(Config{
Logger: hclog.NewNullLogger(), Logger: hclog.NewNullLogger(),
ACLResolver: aclResolver, ACLResolver: aclResolver,
@ -53,7 +58,11 @@ func TestSupportedDataplaneFeatures_Unauthenticated(t *testing.T) {
aclResolver := &MockACLResolver{} aclResolver := &MockACLResolver{}
aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything). aclResolver.On("ResolveTokenAndDefaultMeta", mock.Anything, mock.Anything, mock.Anything).
Return(resolver.Result{}, acl.ErrNotFound) Return(resolver.Result{}, acl.ErrNotFound)
ctx := external.ContextWithToken(context.Background(), testACLToken)
options := structs.QueryOptions{Token: testACLToken}
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
require.NoError(t, err)
server := NewServer(Config{ server := NewServer(Config{
Logger: hclog.NewNullLogger(), Logger: hclog.NewNullLogger(),
ACLResolver: aclResolver, ACLResolver: aclResolver,
@ -70,7 +79,11 @@ func TestSupportedDataplaneFeatures_PermissionDenied(t *testing.T) {
aclResolver := &MockACLResolver{} aclResolver := &MockACLResolver{}
aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything). aclResolver.On("ResolveTokenAndDefaultMeta", testACLToken, mock.Anything, mock.Anything).
Return(testutils.TestAuthorizerDenyAll(t), nil) Return(testutils.TestAuthorizerDenyAll(t), nil)
ctx := external.ContextWithToken(context.Background(), testACLToken)
options := structs.QueryOptions{Token: testACLToken}
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
require.NoError(t, err)
server := NewServer(Config{ server := NewServer(Config{
Logger: hclog.NewNullLogger(), Logger: hclog.NewNullLogger(),
ACLResolver: aclResolver, ACLResolver: aclResolver,

View File

@ -4,9 +4,11 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/acl/resolver" "github.com/hashicorp/consul/acl/resolver"
"github.com/hashicorp/consul/agent/configentry"
"github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbdataplane" "github.com/hashicorp/consul/proto-public/pbdataplane"
) )
@ -25,6 +27,7 @@ type Config struct {
type StateStore interface { type StateStore interface {
ServiceNode(string, string, string, *acl.EnterpriseMeta, string) (uint64, *structs.ServiceNode, error) ServiceNode(string, string, string, *acl.EnterpriseMeta, string) (uint64, *structs.ServiceNode, error)
ReadResolvedServiceConfigEntries(memdb.WatchSet, string, *acl.EnterpriseMeta, []structs.ServiceID, structs.ProxyMode) (uint64, *configentry.ResolvedServiceConfigSet, error)
} }
//go:generate mockery --name ACLResolver --inpackage //go:generate mockery --name ACLResolver --inpackage

View File

@ -0,0 +1,138 @@
package dns
import (
"context"
"fmt"
"net"
"github.com/hashicorp/go-hclog"
"github.com/miekg/dns"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
"github.com/hashicorp/consul/proto-public/pbdns"
)
type LocalAddr struct {
IP net.IP
Port int
}
type Config struct {
Logger hclog.Logger
DNSServeMux *dns.ServeMux
LocalAddr LocalAddr
}
type Server struct {
Config
}
func NewServer(cfg Config) *Server {
return &Server{cfg}
}
func (s *Server) Register(grpcServer *grpc.Server) {
pbdns.RegisterDNSServiceServer(grpcServer, s)
}
// BufferResponseWriter writes a DNS response to a byte buffer.
type BufferResponseWriter struct {
responseBuffer []byte
LocalAddress net.Addr
RemoteAddress net.Addr
Logger hclog.Logger
}
// LocalAddr returns the net.Addr of the server
func (b *BufferResponseWriter) LocalAddr() net.Addr {
return b.LocalAddress
}
// RemoteAddr returns the net.Addr of the client that sent the current request.
func (b *BufferResponseWriter) RemoteAddr() net.Addr {
return b.RemoteAddress
}
// WriteMsg writes a reply back to the client.
func (b *BufferResponseWriter) WriteMsg(m *dns.Msg) error {
// Pack message to bytes first.
msgBytes, err := m.Pack()
if err != nil {
b.Logger.Error("error packing message", "err", err)
return err
}
b.responseBuffer = msgBytes
return nil
}
// Write writes a raw buffer back to the client.
func (b *BufferResponseWriter) Write(m []byte) (int, error) {
b.Logger.Debug("Write was called")
return copy(b.responseBuffer, m), nil
}
// Close closes the connection.
func (b *BufferResponseWriter) Close() error {
// There's nothing for us to do here as we don't handle the connection.
return nil
}
// TsigStatus returns the status of the Tsig.
func (b *BufferResponseWriter) TsigStatus() error {
// TSIG doesn't apply to this response writer.
return nil
}
// TsigTimersOnly sets the tsig timers only boolean.
func (b *BufferResponseWriter) TsigTimersOnly(bool) {}
// Hijack lets the caller take over the connection.
// After a call to Hijack(), the DNS package will not do anything with the connection. {
func (b *BufferResponseWriter) Hijack() {}
// Query is a gRPC endpoint that will serve dns requests. It will be consumed primarily by the
// consul dataplane to proxy dns requests to consul.
func (s *Server) Query(ctx context.Context, req *pbdns.QueryRequest) (*pbdns.QueryResponse, error) {
pr, ok := peer.FromContext(ctx)
if !ok {
return nil, fmt.Errorf("error retrieving peer information from context")
}
var local net.Addr
var remote net.Addr
// We do this so that we switch to udp/tcp when handling the request since it will be proxied
// through consul through gRPC and we need to 'fake' the protocol so that the message is trimmed
// according to wether it is UDP or TCP.
switch req.GetProtocol() {
case pbdns.Protocol_PROTOCOL_TCP:
remote = pr.Addr
local = &net.TCPAddr{IP: s.LocalAddr.IP, Port: s.LocalAddr.Port}
case pbdns.Protocol_PROTOCOL_UDP:
remoteAddr := pr.Addr.(*net.TCPAddr)
remote = &net.UDPAddr{IP: remoteAddr.IP, Port: remoteAddr.Port}
local = &net.UDPAddr{IP: s.LocalAddr.IP, Port: s.LocalAddr.Port}
default:
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("error protocol type not set: %v", req.GetProtocol()))
}
respWriter := &BufferResponseWriter{
LocalAddress: local,
RemoteAddress: remote,
Logger: s.Logger,
}
msg := &dns.Msg{}
err := msg.Unpack(req.Msg)
if err != nil {
s.Logger.Error("error unpacking message", "err", err)
return nil, status.Error(codes.Internal, fmt.Sprintf("failure decoding dns request: %s", err.Error()))
}
s.DNSServeMux.ServeDNS(respWriter, msg)
queryResponse := &pbdns.QueryResponse{Msg: respWriter.responseBuffer}
return queryResponse, nil
}

View File

@ -0,0 +1,127 @@
package dns
import (
"context"
"errors"
"net"
"testing"
"github.com/hashicorp/go-hclog"
"github.com/miekg/dns"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"google.golang.org/grpc"
"github.com/hashicorp/consul/agent/grpc-external/testutils"
"github.com/hashicorp/consul/proto-public/pbdns"
)
var txtRR = []string{"Hello world"}
func helloServer(w dns.ResponseWriter, req *dns.Msg) {
m := new(dns.Msg)
m.SetReply(req)
m.Extra = make([]dns.RR, 1)
m.Extra[0] = &dns.TXT{
Hdr: dns.RR_Header{Name: m.Question[0].Name, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: 0},
Txt: txtRR,
}
w.WriteMsg(m)
}
func testClient(t *testing.T, server *Server) pbdns.DNSServiceClient {
t.Helper()
addr := testutils.RunTestServer(t, server)
conn, err := grpc.DialContext(context.Background(), addr.String(), grpc.WithInsecure())
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, conn.Close())
})
return pbdns.NewDNSServiceClient(conn)
}
type DNSTestSuite struct {
suite.Suite
}
func TestDNS_suite(t *testing.T) {
suite.Run(t, new(DNSTestSuite))
}
func (s *DNSTestSuite) TestProxy_Success() {
mux := dns.NewServeMux()
mux.Handle(".", dns.HandlerFunc(helloServer))
server := NewServer(Config{
Logger: hclog.Default(),
DNSServeMux: mux,
LocalAddr: LocalAddr{
net.IPv4(127, 0, 0, 1),
0,
},
})
client := testClient(s.T(), server)
testCases := map[string]struct {
question string
clientQuery func(qR *pbdns.QueryRequest)
expectedErr error
}{
"happy path udp": {
question: "abc.com.",
clientQuery: func(qR *pbdns.QueryRequest) {
qR.Protocol = pbdns.Protocol_PROTOCOL_UDP
},
},
"happy path tcp": {
question: "abc.com.",
clientQuery: func(qR *pbdns.QueryRequest) {
qR.Protocol = pbdns.Protocol_PROTOCOL_TCP
},
},
"No protocol set": {
question: "abc.com.",
clientQuery: func(qR *pbdns.QueryRequest) {},
expectedErr: errors.New("error protocol type not set: PROTOCOL_UNSET_UNSPECIFIED"),
},
"Invalid question": {
question: "notvalid",
clientQuery: func(qR *pbdns.QueryRequest) {
qR.Protocol = pbdns.Protocol_PROTOCOL_UDP
},
expectedErr: errors.New("failure decoding dns request"),
},
}
for name, tc := range testCases {
s.Run(name, func() {
req := dns.Msg{}
req.SetQuestion(tc.question, dns.TypeA)
bytes, _ := req.Pack()
clientReq := &pbdns.QueryRequest{Msg: bytes}
tc.clientQuery(clientReq)
clientResp, err := client.Query(context.Background(), clientReq)
if tc.expectedErr != nil {
s.Require().Error(err, "no errror calling gRPC endpoint")
s.Require().ErrorContains(err, tc.expectedErr.Error())
} else {
s.Require().NoError(err, "error calling gRPC endpoint")
resp := clientResp.GetMsg()
var dnsResp dns.Msg
err = dnsResp.Unpack(resp)
s.Require().NoError(err, "error unpacking dns response")
rr := dnsResp.Extra[0].(*dns.TXT)
s.Require().EqualValues(rr.Txt, txtRR)
}
})
}
}

View File

@ -23,15 +23,45 @@ import (
/* /*
TODO(peering): TODO(peering):
At the start of each peering stream establishment (not initiation, but the
thing that reconnects) we need to do a little bit of light differential
snapshot correction to initially synchronize the local state store.
Then if we ever fail to apply a replication message we should either tear Then if we ever fail to apply a replication message we should either tear
down the entire connection (and thus force a resync on reconnect) or down the entire connection (and thus force a resync on reconnect) or
request a resync operation. request a resync operation.
*/ */
// makeExportedServiceListResponse handles preparing exported service list updates to the peer cluster.
// Each cache.UpdateEvent will contain all exported services.
func makeExportedServiceListResponse(
mst *MutableStatus,
update cache.UpdateEvent,
) (*pbpeerstream.ReplicationMessage_Response, error) {
exportedService, ok := update.Result.(*pbpeerstream.ExportedServiceList)
if !ok {
return nil, fmt.Errorf("invalid type for exported service list response: %T", update.Result)
}
any, _, err := marshalToProtoAny[*pbpeerstream.ExportedServiceList](exportedService)
if err != nil {
return nil, fmt.Errorf("failed to marshal: %w", err)
}
var serviceNames []structs.ServiceName
for _, serviceName := range exportedService.Services {
sn := structs.ServiceNameFromString(serviceName)
serviceNames = append(serviceNames, sn)
}
mst.SetExportedServices(serviceNames)
return &pbpeerstream.ReplicationMessage_Response{
ResourceURL: pbpeerstream.TypeURLExportedServiceList,
// TODO(peering): Nonce management
Nonce: "",
ResourceID: subExportedServiceList,
Operation: pbpeerstream.Operation_OPERATION_UPSERT,
Resource: any,
}, nil
}
// makeServiceResponse handles preparing exported service instance updates to the peer cluster. // makeServiceResponse handles preparing exported service instance updates to the peer cluster.
// Each cache.UpdateEvent will contain all instances for a service name. // Each cache.UpdateEvent will contain all instances for a service name.
// If there are no instances in the event, we consider that to be a de-registration. // If there are no instances in the event, we consider that to be a de-registration.
@ -40,7 +70,6 @@ func makeServiceResponse(
update cache.UpdateEvent, update cache.UpdateEvent,
) (*pbpeerstream.ReplicationMessage_Response, error) { ) (*pbpeerstream.ReplicationMessage_Response, error) {
serviceName := strings.TrimPrefix(update.CorrelationID, subExportedService) serviceName := strings.TrimPrefix(update.CorrelationID, subExportedService)
sn := structs.ServiceNameFromString(serviceName)
csn, ok := update.Result.(*pbservice.IndexedCheckServiceNodes) csn, ok := update.Result.(*pbservice.IndexedCheckServiceNodes)
if !ok { if !ok {
return nil, fmt.Errorf("invalid type for service response: %T", update.Result) return nil, fmt.Errorf("invalid type for service response: %T", update.Result)
@ -54,28 +83,7 @@ func makeServiceResponse(
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to marshal: %w", err) return nil, fmt.Errorf("failed to marshal: %w", err)
} }
// If no nodes are present then it's due to one of:
// 1. The service is newly registered or exported and yielded a transient empty update.
// 2. All instances of the service were de-registered.
// 3. The service was un-exported.
//
// We don't distinguish when these three things occurred, but it's safe to send a DELETE Op in all cases, so we do that.
// Case #1 is a no-op for the importing peer.
if len(csn.Nodes) == 0 {
mst.RemoveExportedService(sn)
return &pbpeerstream.ReplicationMessage_Response{
ResourceURL: pbpeerstream.TypeURLExportedService,
// TODO(peering): Nonce management
Nonce: "",
ResourceID: serviceName,
Operation: pbpeerstream.Operation_OPERATION_DELETE,
}, nil
}
mst.TrackExportedService(sn)
// If there are nodes in the response, we push them as an UPSERT operation.
return &pbpeerstream.ReplicationMessage_Response{ return &pbpeerstream.ReplicationMessage_Response{
ResourceURL: pbpeerstream.TypeURLExportedService, ResourceURL: pbpeerstream.TypeURLExportedService,
// TODO(peering): Nonce management // TODO(peering): Nonce management
@ -178,17 +186,6 @@ func (s *Server) processResponse(
return makeACKReply(resp.ResourceURL, resp.Nonce), nil return makeACKReply(resp.ResourceURL, resp.Nonce), nil
case pbpeerstream.Operation_OPERATION_DELETE:
if err := s.handleDelete(peerName, partition, mutableStatus, resp.ResourceURL, resp.ResourceID); err != nil {
return makeNACKReply(
resp.ResourceURL,
resp.Nonce,
code.Code_INTERNAL,
fmt.Sprintf("delete error, ResourceURL: %q, ResourceID: %q: %v", resp.ResourceURL, resp.ResourceID, err),
), fmt.Errorf("delete error: %w", err)
}
return makeACKReply(resp.ResourceURL, resp.Nonce), nil
default: default:
var errMsg string var errMsg string
if op := pbpeerstream.Operation_name[int32(resp.Operation)]; op != "" { if op := pbpeerstream.Operation_name[int32(resp.Operation)]; op != "" {
@ -218,6 +215,18 @@ func (s *Server) handleUpsert(
} }
switch resourceURL { switch resourceURL {
case pbpeerstream.TypeURLExportedServiceList:
export := &pbpeerstream.ExportedServiceList{}
if err := resource.UnmarshalTo(export); err != nil {
return fmt.Errorf("failed to unmarshal resource: %w", err)
}
err := s.handleUpsertExportedServiceList(mutableStatus, peerName, partition, export)
if err != nil {
return fmt.Errorf("did not update imported services based on the exported service list event: %w", err)
}
return nil
case pbpeerstream.TypeURLExportedService: case pbpeerstream.TypeURLExportedService:
sn := structs.ServiceNameFromString(resourceID) sn := structs.ServiceNameFromString(resourceID)
sn.OverridePartition(partition) sn.OverridePartition(partition)
@ -232,8 +241,6 @@ func (s *Server) handleUpsert(
return fmt.Errorf("did not increment imported services count for service=%q: %w", sn.String(), err) return fmt.Errorf("did not increment imported services count for service=%q: %w", sn.String(), err)
} }
mutableStatus.TrackImportedService(sn)
return nil return nil
case pbpeerstream.TypeURLPeeringTrustBundle: case pbpeerstream.TypeURLPeeringTrustBundle:
@ -256,6 +263,48 @@ func (s *Server) handleUpsert(
} }
} }
func (s *Server) handleUpsertExportedServiceList(
mutableStatus *MutableStatus,
peerName string,
partition string,
export *pbpeerstream.ExportedServiceList,
) error {
exportedServices := make(map[structs.ServiceName]struct{})
var serviceNames []structs.ServiceName
for _, service := range export.Services {
sn := structs.ServiceNameFromString(service)
sn.OverridePartition(partition)
// This ensures that we don't delete exported service's sidecars below.
snSidecarProxy := structs.ServiceNameFromString(service + syntheticProxyNameSuffix)
snSidecarProxy.OverridePartition(partition)
exportedServices[sn] = struct{}{}
exportedServices[snSidecarProxy] = struct{}{}
serviceNames = append(serviceNames, sn)
}
entMeta := structs.NodeEnterpriseMetaInPartition(partition)
_, serviceList, err := s.GetStore().ServiceList(nil, entMeta, peerName)
if err != nil {
return err
}
for _, sn := range serviceList {
if _, ok := exportedServices[sn]; !ok {
err := s.handleUpdateService(peerName, partition, sn, nil)
if err != nil {
return fmt.Errorf("failed to delete unexported service: %w", err)
}
}
}
mutableStatus.SetImportedServices(serviceNames)
return nil
}
// handleUpdateService handles both deletion and upsert events for a service. // handleUpdateService handles both deletion and upsert events for a service.
// //
// On an UPSERT event: // On an UPSERT event:
@ -499,32 +548,6 @@ func (s *Server) handleUpsertServerAddrs(
return s.Backend.PeeringWrite(req) return s.Backend.PeeringWrite(req)
} }
func (s *Server) handleDelete(
peerName string,
partition string,
mutableStatus *MutableStatus,
resourceURL string,
resourceID string,
) error {
switch resourceURL {
case pbpeerstream.TypeURLExportedService:
sn := structs.ServiceNameFromString(resourceID)
sn.OverridePartition(partition)
err := s.handleUpdateService(peerName, partition, sn, nil)
if err != nil {
return err
}
mutableStatus.RemoveImportedService(sn)
return nil
default:
return fmt.Errorf("unexpected resourceURL: %s", resourceURL)
}
}
func makeACKReply(resourceURL, nonce string) *pbpeerstream.ReplicationMessage { func makeACKReply(resourceURL, nonce string) *pbpeerstream.ReplicationMessage {
return makeReplicationRequest(&pbpeerstream.ReplicationMessage_Request{ return makeReplicationRequest(&pbpeerstream.ReplicationMessage_Request{
ResourceURL: resourceURL, ResourceURL: resourceURL,

View File

@ -122,5 +122,7 @@ type StateStore interface {
NodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeServices, error) NodeServices(ws memdb.WatchSet, nodeNameOrID string, entMeta *acl.EnterpriseMeta, peerName string) (uint64, *structs.NodeServices, error)
CAConfig(ws memdb.WatchSet) (uint64, *structs.CAConfiguration, error) CAConfig(ws memdb.WatchSet) (uint64, *structs.CAConfiguration, error)
TrustBundleListByService(ws memdb.WatchSet, service, dc string, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error) TrustBundleListByService(ws memdb.WatchSet, service, dc string, entMeta acl.EnterpriseMeta) (uint64, []*pbpeering.PeeringTrustBundle, error)
ServiceList(ws memdb.WatchSet, entMeta *acl.EnterpriseMeta, peerName string) (uint64, structs.ServiceList, error)
ConfigEntry(ws memdb.WatchSet, kind, name string, entMeta *acl.EnterpriseMeta) (uint64, structs.ConfigEntry, error)
AbandonCh() <-chan struct{} AbandonCh() <-chan struct{}
} }

View File

@ -351,8 +351,14 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
err := streamReq.Stream.Send(msg) err := streamReq.Stream.Send(msg)
sendMutex.Unlock() sendMutex.Unlock()
if err != nil { // We only track send successes and errors for response types because this is meant to track
status.TrackSendError(err.Error()) // resources, not request/ack messages.
if msg.GetResponse() != nil {
if err != nil {
status.TrackSendError(err.Error())
} else {
status.TrackSendSuccess()
}
} }
return err return err
} }
@ -360,6 +366,7 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
// Subscribe to all relevant resource types. // Subscribe to all relevant resource types.
for _, resourceURL := range []string{ for _, resourceURL := range []string{
pbpeerstream.TypeURLExportedService, pbpeerstream.TypeURLExportedService,
pbpeerstream.TypeURLExportedServiceList,
pbpeerstream.TypeURLPeeringTrustBundle, pbpeerstream.TypeURLPeeringTrustBundle,
pbpeerstream.TypeURLPeeringServerAddresses, pbpeerstream.TypeURLPeeringServerAddresses,
} { } {
@ -624,6 +631,13 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
case update := <-subCh: case update := <-subCh:
var resp *pbpeerstream.ReplicationMessage_Response var resp *pbpeerstream.ReplicationMessage_Response
switch { switch {
case strings.HasPrefix(update.CorrelationID, subExportedServiceList):
resp, err = makeExportedServiceListResponse(status, update)
if err != nil {
// Log the error and skip this response to avoid locking up peering due to a bad update event.
logger.Error("failed to create exported service list response", "error", err)
continue
}
case strings.HasPrefix(update.CorrelationID, subExportedService): case strings.HasPrefix(update.CorrelationID, subExportedService):
resp, err = makeServiceResponse(status, update) resp, err = makeServiceResponse(status, update)
if err != nil { if err != nil {
@ -632,9 +646,6 @@ func (s *Server) realHandleStream(streamReq HandleStreamRequest) error {
continue continue
} }
case strings.HasPrefix(update.CorrelationID, subMeshGateway):
// TODO(Peering): figure out how to sync this separately
case update.CorrelationID == subCARoot: case update.CorrelationID == subCARoot:
resp, err = makeCARootsResponse(update) resp, err = makeCARootsResponse(update)
if err != nil { if err != nil {

File diff suppressed because it is too large Load Diff

View File

@ -214,6 +214,9 @@ type Status struct {
// LastSendErrorMessage tracks the last error message when sending into the stream. // LastSendErrorMessage tracks the last error message when sending into the stream.
LastSendErrorMessage string LastSendErrorMessage string
// LastSendSuccess tracks the time we last successfully sent a resource TO the peer.
LastSendSuccess time.Time
// LastRecvHeartbeat tracks when we last received a heartbeat from our peer. // LastRecvHeartbeat tracks when we last received a heartbeat from our peer.
LastRecvHeartbeat time.Time LastRecvHeartbeat time.Time
@ -230,9 +233,9 @@ type Status struct {
// TODO(peering): consider keeping track of imported and exported services thru raft // TODO(peering): consider keeping track of imported and exported services thru raft
// ImportedServices keeps track of which service names are imported for the peer // ImportedServices keeps track of which service names are imported for the peer
ImportedServices map[string]struct{} ImportedServices []string
// ExportedServices keeps track of which service names a peer asks to export // ExportedServices keeps track of which service names a peer asks to export
ExportedServices map[string]struct{} ExportedServices []string
} }
func (s *Status) GetImportedServicesCount() uint64 { func (s *Status) GetImportedServicesCount() uint64 {
@ -271,6 +274,12 @@ func (s *MutableStatus) TrackSendError(error string) {
s.mu.Unlock() s.mu.Unlock()
} }
func (s *MutableStatus) TrackSendSuccess() {
s.mu.Lock()
s.LastSendSuccess = s.timeNow().UTC()
s.mu.Unlock()
}
// TrackRecvResourceSuccess tracks receiving a replicated resource. // TrackRecvResourceSuccess tracks receiving a replicated resource.
func (s *MutableStatus) TrackRecvResourceSuccess() { func (s *MutableStatus) TrackRecvResourceSuccess() {
s.mu.Lock() s.mu.Lock()
@ -345,22 +354,15 @@ func (s *MutableStatus) GetStatus() Status {
return copy return copy
} }
func (s *MutableStatus) RemoveImportedService(sn structs.ServiceName) { func (s *MutableStatus) SetImportedServices(serviceNames []structs.ServiceName) {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
delete(s.ImportedServices, sn.String()) s.ImportedServices = make([]string, len(serviceNames))
}
func (s *MutableStatus) TrackImportedService(sn structs.ServiceName) { for i, sn := range serviceNames {
s.mu.Lock() s.ImportedServices[i] = sn.Name
defer s.mu.Unlock()
if s.ImportedServices == nil {
s.ImportedServices = make(map[string]struct{})
} }
s.ImportedServices[sn.String()] = struct{}{}
} }
func (s *MutableStatus) GetImportedServicesCount() int { func (s *MutableStatus) GetImportedServicesCount() int {
@ -370,22 +372,15 @@ func (s *MutableStatus) GetImportedServicesCount() int {
return len(s.ImportedServices) return len(s.ImportedServices)
} }
func (s *MutableStatus) RemoveExportedService(sn structs.ServiceName) { func (s *MutableStatus) SetExportedServices(serviceNames []structs.ServiceName) {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
delete(s.ExportedServices, sn.String()) s.ExportedServices = make([]string, len(serviceNames))
}
func (s *MutableStatus) TrackExportedService(sn structs.ServiceName) { for i, sn := range serviceNames {
s.mu.Lock() s.ExportedServices[i] = sn.Name
defer s.mu.Unlock()
if s.ExportedServices == nil {
s.ExportedServices = make(map[string]struct{})
} }
s.ExportedServices[sn.String()] = struct{}{}
} }
func (s *MutableStatus) GetExportedServicesCount() int { func (s *MutableStatus) GetExportedServicesCount() int {

View File

@ -98,25 +98,25 @@ func (m *subscriptionManager) syncViaBlockingQuery(
ws.Add(store.AbandonCh()) ws.Add(store.AbandonCh())
ws.Add(ctx.Done()) ws.Add(ctx.Done())
if result, err := queryFn(ctx, store, ws); err != nil { if result, err := queryFn(ctx, store, ws); err != nil && ctx.Err() == nil {
logger.Error("failed to sync from query", "error", err) logger.Error("failed to sync from query", "error", err)
} else { } else {
// Block for any changes to the state store. select {
updateCh <- cache.UpdateEvent{ case <-ctx.Done():
CorrelationID: correlationID, return
Result: result, case updateCh <- cache.UpdateEvent{CorrelationID: correlationID, Result: result}:
} }
// Block for any changes to the state store.
ws.WatchCtx(ctx) ws.WatchCtx(ctx)
} }
if err := waiter.Wait(ctx); err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { err := waiter.Wait(ctx)
logger.Error("failed to wait before re-trying sync", "error", err) if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
}
select {
case <-ctx.Done():
return return
default: } else if err != nil {
logger.Error("failed to wait before re-trying sync", "error", err)
} }
} }
} }

View File

@ -6,9 +6,13 @@ import (
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
"time"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/hashicorp/consul/ipaddr"
"github.com/hashicorp/consul/lib/retry"
"github.com/hashicorp/go-hclog" "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/cache"
@ -124,8 +128,6 @@ func (m *subscriptionManager) handleEvent(ctx context.Context, state *subscripti
return fmt.Errorf("received error event: %w", u.Err) return fmt.Errorf("received error event: %w", u.Err)
} }
// TODO(peering): on initial stream setup, transmit the list of exported
// services for use in differential DELETE/UPSERT. Akin to streaming's snapshot start/end.
switch { switch {
case u.CorrelationID == subExportedServiceList: case u.CorrelationID == subExportedServiceList:
// Everything starts with the exported service list coming from // Everything starts with the exported service list coming from
@ -138,10 +140,20 @@ func (m *subscriptionManager) handleEvent(ctx context.Context, state *subscripti
state.exportList = evt state.exportList = evt
pending := &pendingPayload{} pending := &pendingPayload{}
m.syncNormalServices(ctx, state, pending, evt.Services) m.syncNormalServices(ctx, state, evt.Services)
if m.config.ConnectEnabled { if m.config.ConnectEnabled {
m.syncDiscoveryChains(ctx, state, pending, evt.ListAllDiscoveryChains()) m.syncDiscoveryChains(ctx, state, pending, evt.ListAllDiscoveryChains())
} }
err := pending.Add(
exportedServiceListID,
subExportedServiceList,
pbpeerstream.ExportedServiceListFromStruct(evt),
)
if err != nil {
return err
}
state.sendPendingEvents(ctx, m.logger, pending) state.sendPendingEvents(ctx, m.logger, pending)
// cleanup event versions too // cleanup event versions too
@ -239,16 +251,10 @@ func (m *subscriptionManager) handleEvent(ctx context.Context, state *subscripti
pending := &pendingPayload{} pending := &pendingPayload{}
// Directly replicate information about our mesh gateways to the consuming side.
// TODO(peering): should we scrub anything before replicating this?
if err := pending.Add(meshGatewayPayloadID, u.CorrelationID, csn); err != nil {
return err
}
if state.exportList != nil { if state.exportList != nil {
// Trigger public events for all synthetic discovery chain replies. // Trigger public events for all synthetic discovery chain replies.
for chainName, info := range state.connectServices { for chainName, info := range state.connectServices {
m.emitEventForDiscoveryChain(ctx, state, pending, chainName, info) m.collectPendingEventForDiscoveryChain(ctx, state, pending, chainName, info)
} }
} }
@ -435,7 +441,6 @@ func (m *subscriptionManager) subscribeCARoots(
func (m *subscriptionManager) syncNormalServices( func (m *subscriptionManager) syncNormalServices(
ctx context.Context, ctx context.Context,
state *subscriptionState, state *subscriptionState,
pending *pendingPayload,
services []structs.ServiceName, services []structs.ServiceName,
) { ) {
// seen contains the set of exported service names and is used to reconcile the list of watched services. // seen contains the set of exported service names and is used to reconcile the list of watched services.
@ -464,20 +469,7 @@ func (m *subscriptionManager) syncNormalServices(
for svc, cancel := range state.watchedServices { for svc, cancel := range state.watchedServices {
if _, ok := seen[svc]; !ok { if _, ok := seen[svc]; !ok {
cancel() cancel()
delete(state.watchedServices, svc) delete(state.watchedServices, svc)
// Send an empty event to the stream handler to trigger sending a DELETE message.
// Cancelling the subscription context above is necessary, but does not yield a useful signal on its own.
err := pending.Add(
servicePayloadIDPrefix+svc.String(),
subExportedService+svc.String(),
&pbservice.IndexedCheckServiceNodes{},
)
if err != nil {
m.logger.Error("failed to send event for service", "service", svc.String(), "error", err)
continue
}
} }
} }
} }
@ -496,7 +488,7 @@ func (m *subscriptionManager) syncDiscoveryChains(
state.connectServices[chainName] = info state.connectServices[chainName] = info
m.emitEventForDiscoveryChain(ctx, state, pending, chainName, info) m.collectPendingEventForDiscoveryChain(ctx, state, pending, chainName, info)
} }
// if it was dropped, try to emit an DELETE event // if it was dropped, try to emit an DELETE event
@ -523,7 +515,7 @@ func (m *subscriptionManager) syncDiscoveryChains(
} }
} }
func (m *subscriptionManager) emitEventForDiscoveryChain( func (m *subscriptionManager) collectPendingEventForDiscoveryChain(
ctx context.Context, ctx context.Context,
state *subscriptionState, state *subscriptionState,
pending *pendingPayload, pending *pendingPayload,
@ -744,32 +736,118 @@ func (m *subscriptionManager) notifyServerAddrUpdates(
ctx context.Context, ctx context.Context,
updateCh chan<- cache.UpdateEvent, updateCh chan<- cache.UpdateEvent,
) { ) {
// Wait until this is subscribed-to. // Wait until server address updates are subscribed-to.
select { select {
case <-m.serverAddrsSubReady: case <-m.serverAddrsSubReady:
case <-ctx.Done(): case <-ctx.Done():
return return
} }
var idx uint64 configNotifyCh := m.notifyMeshConfigUpdates(ctx)
// TODO(peering): retry logic; fail past a threshold
for {
var err error
// Typically, this function will block inside `m.subscribeServerAddrs` and only return on error.
// Errors are logged and the watch is retried.
idx, err = m.subscribeServerAddrs(ctx, idx, updateCh)
if errors.Is(err, stream.ErrSubForceClosed) {
m.logger.Trace("subscription force-closed due to an ACL change or snapshot restore, will attempt resume")
} else if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
m.logger.Warn("failed to subscribe to server addresses, will attempt resume", "error", err.Error())
} else {
m.logger.Trace(err.Error())
}
// Intentionally initialized to empty values.
// These are set after the first mesh config entry update arrives.
var queryCtx context.Context
cancel := func() {}
useGateways := false
for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
cancel()
return
case event := <-configNotifyCh:
entry, ok := event.Result.(*structs.MeshConfigEntry)
if event.Result != nil && !ok {
m.logger.Error(fmt.Sprintf("saw unexpected type %T for mesh config entry: falling back to pushing direct server addresses", event.Result))
}
if entry != nil && entry.Peering != nil && entry.Peering.PeerThroughMeshGateways {
useGateways = true
} else {
useGateways = false
}
// Cancel and re-set watches based on the updated config entry.
cancel()
queryCtx, cancel = context.WithCancel(ctx)
if useGateways {
go m.notifyServerMeshGatewayAddresses(queryCtx, updateCh)
} else {
go m.ensureServerAddrSubscription(queryCtx, updateCh)
}
}
}
}
func (m *subscriptionManager) notifyMeshConfigUpdates(ctx context.Context) <-chan cache.UpdateEvent {
const meshConfigWatch = "mesh-config-entry"
notifyCh := make(chan cache.UpdateEvent, 1)
go m.syncViaBlockingQuery(ctx, meshConfigWatch, func(ctx_ context.Context, store StateStore, ws memdb.WatchSet) (interface{}, error) {
_, rawEntry, err := store.ConfigEntry(ws, structs.MeshConfig, structs.MeshConfigMesh, acl.DefaultEnterpriseMeta())
if err != nil {
return nil, fmt.Errorf("failed to get mesh config entry: %w", err)
}
return rawEntry, nil
}, meshConfigWatch, notifyCh)
return notifyCh
}
func (m *subscriptionManager) notifyServerMeshGatewayAddresses(ctx context.Context, updateCh chan<- cache.UpdateEvent) {
m.syncViaBlockingQuery(ctx, "mesh-gateways", func(ctx context.Context, store StateStore, ws memdb.WatchSet) (interface{}, error) {
_, nodes, err := store.ServiceDump(ws, structs.ServiceKindMeshGateway, true, acl.DefaultEnterpriseMeta(), structs.DefaultPeerKeyword)
if err != nil {
return nil, fmt.Errorf("failed to watch mesh gateways services for servers: %w", err)
}
var gatewayAddrs []string
for _, csn := range nodes {
_, addr, port := csn.BestAddress(true)
gatewayAddrs = append(gatewayAddrs, ipaddr.FormatAddressPort(addr, port))
}
if len(gatewayAddrs) == 0 {
return nil, errors.New("configured to peer through mesh gateways but no mesh gateways are registered")
}
// We may return an empty list if there are no gateway addresses.
return &pbpeering.PeeringServerAddresses{
Addresses: gatewayAddrs,
}, nil
}, subServerAddrs, updateCh)
}
func (m *subscriptionManager) ensureServerAddrSubscription(ctx context.Context, updateCh chan<- cache.UpdateEvent) {
waiter := &retry.Waiter{
MinFailures: 1,
Factor: 500 * time.Millisecond,
MaxWait: 60 * time.Second,
Jitter: retry.NewJitter(100),
}
logger := m.logger.With("queryType", "server-addresses")
var idx uint64
for {
var err error
idx, err = m.subscribeServerAddrs(ctx, idx, updateCh)
if errors.Is(err, stream.ErrSubForceClosed) {
logger.Trace("subscription force-closed due to an ACL change or snapshot restore, will attempt resume")
} else if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
logger.Warn("failed to subscribe to server addresses, will attempt resume", "error", err.Error())
} else if err != nil {
logger.Trace(err.Error())
return
}
if err := waiter.Wait(ctx); err != nil {
return return
default:
} }
} }
} }
@ -832,17 +910,22 @@ func (m *subscriptionManager) subscribeServerAddrs(
grpcAddr := srv.Address + ":" + strconv.Itoa(srv.ExtGRPCPort) grpcAddr := srv.Address + ":" + strconv.Itoa(srv.ExtGRPCPort)
serverAddrs = append(serverAddrs, grpcAddr) serverAddrs = append(serverAddrs, grpcAddr)
} }
if len(serverAddrs) == 0 { if len(serverAddrs) == 0 {
m.logger.Warn("did not find any server addresses with external gRPC ports to publish") m.logger.Warn("did not find any server addresses with external gRPC ports to publish")
continue continue
} }
updateCh <- cache.UpdateEvent{ u := cache.UpdateEvent{
CorrelationID: subServerAddrs, CorrelationID: subServerAddrs,
Result: &pbpeering.PeeringServerAddresses{ Result: &pbpeering.PeeringServerAddresses{
Addresses: serverAddrs, Addresses: serverAddrs,
}, },
} }
select {
case <-ctx.Done():
return 0, ctx.Err()
case updateCh <- u:
}
} }
} }

View File

@ -7,6 +7,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/hashicorp/consul/types"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -49,17 +50,15 @@ func TestSubscriptionManager_RegisterDeregister(t *testing.T) {
subCh := mgr.subscribe(ctx, id, "my-peering", partition) subCh := mgr.subscribe(ctx, id, "my-peering", partition)
var ( var (
gatewayCorrID = subMeshGateway + partition mysqlCorrID = subExportedService + structs.NewServiceName("mysql", nil).String()
mysqlCorrID = subExportedService + structs.NewServiceName("mysql", nil).String()
mysqlProxyCorrID = subExportedService + structs.NewServiceName("mysql-sidecar-proxy", nil).String() mysqlProxyCorrID = subExportedService + structs.NewServiceName("mysql-sidecar-proxy", nil).String()
) )
// Expect just the empty mesh gateway event to replicate. // Expect just the empty mesh gateway event to replicate.
expectEvents(t, subCh, func(t *testing.T, got cache.UpdateEvent) { expectEvents(t, subCh,
checkEvent(t, got, gatewayCorrID, 0) func(t *testing.T, got cache.UpdateEvent) {
}) checkExportedServices(t, got, []string{})
})
// Initially add in L4 failover so that later we can test removing it. We // Initially add in L4 failover so that later we can test removing it. We
// cannot do the other way around because it would fail validation to // cannot do the other way around because it would fail validation to
@ -81,19 +80,22 @@ func TestSubscriptionManager_RegisterDeregister(t *testing.T) {
{ {
Name: "mysql", Name: "mysql",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "my-peering"}, {Peer: "my-peering"},
}, },
}, },
{ {
Name: "mongo", Name: "mongo",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "my-other-peering"}, {Peer: "my-other-peering"},
}, },
}, },
}, },
}) })
expectEvents(t, subCh, expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
checkExportedServices(t, got, []string{"mysql"})
},
func(t *testing.T, got cache.UpdateEvent) { func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mysqlCorrID, 0) checkEvent(t, got, mysqlCorrID, 0)
}, },
@ -292,17 +294,6 @@ func TestSubscriptionManager_RegisterDeregister(t *testing.T) {
}, },
}, res.Nodes[0]) }, res.Nodes[0])
}, },
func(t *testing.T, got cache.UpdateEvent) {
require.Equal(t, gatewayCorrID, got.CorrelationID)
res := got.Result.(*pbservice.IndexedCheckServiceNodes)
require.Equal(t, uint64(0), res.Index)
require.Len(t, res.Nodes, 1)
prototest.AssertDeepEqual(t, &pbservice.CheckServiceNode{
Node: pbNode("mgw", "10.1.1.1", partition),
Service: pbService("mesh-gateway", "gateway-1", "gateway", 8443, nil),
}, res.Nodes[0])
},
) )
}) })
@ -428,12 +419,25 @@ func TestSubscriptionManager_RegisterDeregister(t *testing.T) {
require.Len(t, res.Nodes, 0) require.Len(t, res.Nodes, 0)
}, },
func(t *testing.T, got cache.UpdateEvent) { )
require.Equal(t, gatewayCorrID, got.CorrelationID) })
res := got.Result.(*pbservice.IndexedCheckServiceNodes)
require.Equal(t, uint64(0), res.Index)
require.Len(t, res.Nodes, 0) testutil.RunStep(t, "unexporting a service emits sends an event", func(t *testing.T) {
backend.ensureConfigEntry(t, &structs.ExportedServicesConfigEntry{
Name: "default",
Services: []structs.ExportedService{
{
Name: "mongo",
Consumers: []structs.ServiceConsumer{
{Peer: "my-other-peering"},
},
},
},
})
expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
checkExportedServices(t, got, []string{})
}, },
) )
}) })
@ -478,8 +482,6 @@ func TestSubscriptionManager_InitialSnapshot(t *testing.T) {
backend.ensureService(t, "zip", mongo.Service) backend.ensureService(t, "zip", mongo.Service)
var ( var (
gatewayCorrID = subMeshGateway + partition
mysqlCorrID = subExportedService + structs.NewServiceName("mysql", nil).String() mysqlCorrID = subExportedService + structs.NewServiceName("mysql", nil).String()
mongoCorrID = subExportedService + structs.NewServiceName("mongo", nil).String() mongoCorrID = subExportedService + structs.NewServiceName("mongo", nil).String()
chainCorrID = subExportedService + structs.NewServiceName("chain", nil).String() chainCorrID = subExportedService + structs.NewServiceName("chain", nil).String()
@ -490,9 +492,10 @@ func TestSubscriptionManager_InitialSnapshot(t *testing.T) {
) )
// Expect just the empty mesh gateway event to replicate. // Expect just the empty mesh gateway event to replicate.
expectEvents(t, subCh, func(t *testing.T, got cache.UpdateEvent) { expectEvents(t, subCh,
checkEvent(t, got, gatewayCorrID, 0) func(t *testing.T, got cache.UpdateEvent) {
}) checkExportedServices(t, got, []string{})
})
// At this point in time we'll have a mesh-gateway notification with no // At this point in time we'll have a mesh-gateway notification with no
// content stored and handled. // content stored and handled.
@ -503,25 +506,28 @@ func TestSubscriptionManager_InitialSnapshot(t *testing.T) {
{ {
Name: "mysql", Name: "mysql",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "my-peering"}, {Peer: "my-peering"},
}, },
}, },
{ {
Name: "mongo", Name: "mongo",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "my-peering"}, {Peer: "my-peering"},
}, },
}, },
{ {
Name: "chain", Name: "chain",
Consumers: []structs.ServiceConsumer{ Consumers: []structs.ServiceConsumer{
{PeerName: "my-peering"}, {Peer: "my-peering"},
}, },
}, },
}, },
}) })
expectEvents(t, subCh, expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
checkExportedServices(t, got, []string{"mysql", "chain", "mongo"})
},
func(t *testing.T, got cache.UpdateEvent) { func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, chainCorrID, 0) checkEvent(t, got, chainCorrID, 0)
}, },
@ -562,9 +568,6 @@ func TestSubscriptionManager_InitialSnapshot(t *testing.T) {
func(t *testing.T, got cache.UpdateEvent) { func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, mysqlProxyCorrID, 1, "mysql-sidecar-proxy", string(structs.ServiceKindConnectProxy)) checkEvent(t, got, mysqlProxyCorrID, 1, "mysql-sidecar-proxy", string(structs.ServiceKindConnectProxy))
}, },
func(t *testing.T, got cache.UpdateEvent) {
checkEvent(t, got, gatewayCorrID, 1, "gateway", string(structs.ServiceKindMeshGateway))
},
) )
}) })
} }
@ -706,6 +709,102 @@ func TestSubscriptionManager_ServerAddrs(t *testing.T) {
}, },
) )
}) })
testutil.RunStep(t, "flipped to peering through mesh gateways", func(t *testing.T) {
require.NoError(t, backend.store.EnsureConfigEntry(1, &structs.MeshConfigEntry{
Peering: &structs.PeeringMeshConfig{
PeerThroughMeshGateways: true,
},
}))
select {
case <-time.After(100 * time.Millisecond):
case <-subCh:
t.Fatal("expected to time out: no mesh gateways are registered")
}
})
testutil.RunStep(t, "registered and received a mesh gateway", func(t *testing.T) {
reg := structs.RegisterRequest{
ID: types.NodeID("b5489ca9-f5e9-4dba-a779-61fec4e8e364"),
Node: "gw-node",
Address: "1.2.3.4",
TaggedAddresses: map[string]string{
structs.TaggedAddressWAN: "172.217.22.14",
},
Service: &structs.NodeService{
ID: "mesh-gateway",
Service: "mesh-gateway",
Kind: structs.ServiceKindMeshGateway,
Port: 443,
TaggedAddresses: map[string]structs.ServiceAddress{
structs.TaggedAddressWAN: {Address: "154.238.12.252", Port: 8443},
},
},
}
require.NoError(t, backend.store.EnsureRegistration(2, &reg))
expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
require.Equal(t, subServerAddrs, got.CorrelationID)
addrs, ok := got.Result.(*pbpeering.PeeringServerAddresses)
require.True(t, ok)
require.Equal(t, []string{"154.238.12.252:8443"}, addrs.GetAddresses())
},
)
})
testutil.RunStep(t, "registered and received a second mesh gateway", func(t *testing.T) {
reg := structs.RegisterRequest{
ID: types.NodeID("e4cc0af3-5c09-4ddf-94a9-5840e427bc45"),
Node: "gw-node-2",
Address: "1.2.3.5",
TaggedAddresses: map[string]string{
structs.TaggedAddressWAN: "172.217.22.15",
},
Service: &structs.NodeService{
ID: "mesh-gateway",
Service: "mesh-gateway",
Kind: structs.ServiceKindMeshGateway,
Port: 443,
},
}
require.NoError(t, backend.store.EnsureRegistration(3, &reg))
expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
require.Equal(t, subServerAddrs, got.CorrelationID)
addrs, ok := got.Result.(*pbpeering.PeeringServerAddresses)
require.True(t, ok)
require.Equal(t, []string{"154.238.12.252:8443", "172.217.22.15:443"}, addrs.GetAddresses())
},
)
})
testutil.RunStep(t, "disabled peering through gateways and received server addresses", func(t *testing.T) {
require.NoError(t, backend.store.EnsureConfigEntry(4, &structs.MeshConfigEntry{
Peering: &structs.PeeringMeshConfig{
PeerThroughMeshGateways: false,
},
}))
expectEvents(t, subCh,
func(t *testing.T, got cache.UpdateEvent) {
require.Equal(t, subServerAddrs, got.CorrelationID)
addrs, ok := got.Result.(*pbpeering.PeeringServerAddresses)
require.True(t, ok)
// New subscriptions receive a snapshot from the event publisher.
// At the start of the test the handler registered a mock that only returns a single address.
require.Equal(t, []string{"198.18.0.1:8502"}, addrs.GetAddresses())
},
)
})
} }
type testSubscriptionBackend struct { type testSubscriptionBackend struct {
@ -933,6 +1032,23 @@ func checkEvent(
} }
} }
func checkExportedServices(
t *testing.T,
got cache.UpdateEvent,
expectedServices []string,
) {
t.Helper()
var qualifiedServices []string
for _, s := range expectedServices {
qualifiedServices = append(qualifiedServices, structs.ServiceName{Name: s}.String())
}
require.Equal(t, subExportedServiceList, got.CorrelationID)
evt := got.Result.(*pbpeerstream.ExportedServiceList)
require.ElementsMatch(t, qualifiedServices, evt.Services)
}
func pbNode(node, addr, partition string) *pbservice.Node { func pbNode(node, addr, partition string) *pbservice.Node {
return &pbservice.Node{Node: node, Partition: partition, Address: addr} return &pbservice.Node{Node: node, Partition: partition, Address: addr}
} }

View File

@ -96,6 +96,9 @@ func (s *subscriptionState) cleanupEventVersions(logger hclog.Logger) {
case id == serverAddrsPayloadID: case id == serverAddrsPayloadID:
keep = true keep = true
case id == exportedServiceListID:
keep = true
case strings.HasPrefix(id, servicePayloadIDPrefix): case strings.HasPrefix(id, servicePayloadIDPrefix):
name := strings.TrimPrefix(id, servicePayloadIDPrefix) name := strings.TrimPrefix(id, servicePayloadIDPrefix)
sn := structs.ServiceNameFromString(name) sn := structs.ServiceNameFromString(name)
@ -135,6 +138,7 @@ const (
serverAddrsPayloadID = "server-addrs" serverAddrsPayloadID = "server-addrs"
caRootsPayloadID = "roots" caRootsPayloadID = "roots"
meshGatewayPayloadID = "mesh-gateway" meshGatewayPayloadID = "mesh-gateway"
exportedServiceListID = "exported-service-list"
servicePayloadIDPrefix = "service:" servicePayloadIDPrefix = "service:"
discoveryChainPayloadIDPrefix = "chain:" discoveryChainPayloadIDPrefix = "chain:"
) )

View File

@ -5,8 +5,10 @@ import (
"fmt" "fmt"
"io" "io"
"sync" "sync"
"testing"
"time" "time"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/metadata" "google.golang.org/grpc/metadata"
"github.com/hashicorp/consul/proto/pbpeerstream" "github.com/hashicorp/consul/proto/pbpeerstream"
@ -49,6 +51,24 @@ func NewMockClient(ctx context.Context) *MockClient {
} }
} }
// DrainStream reads messages from the stream until both the exported service list and
// trust bundle messages have been read. We do this because their ording is indeterministic.
func (c *MockClient) DrainStream(t *testing.T) {
seen := make(map[string]struct{})
for len(seen) < 2 {
msg, err := c.Recv()
require.NoError(t, err)
if r := msg.GetResponse(); r != nil && r.ResourceURL == pbpeerstream.TypeURLExportedServiceList {
seen[pbpeerstream.TypeURLExportedServiceList] = struct{}{}
}
if r := msg.GetResponse(); r != nil && r.ResourceURL == pbpeerstream.TypeURLPeeringTrustBundle {
seen[pbpeerstream.TypeURLPeeringTrustBundle] = struct{}{}
}
}
}
// MockStream mocks peering.PeeringService_StreamResourcesServer // MockStream mocks peering.PeeringService_StreamResourcesServer
type MockStream struct { type MockStream struct {
sendCh chan *pbpeerstream.ReplicationMessage sendCh chan *pbpeerstream.ReplicationMessage

View File

@ -26,15 +26,17 @@ func (s *Server) WatchServers(req *pbserverdiscovery.WatchServersRequest, server
logger.Debug("starting stream") logger.Debug("starting stream")
defer logger.Trace("stream closed") defer logger.Trace("stream closed")
token := external.TokenFromContext(serverStream.Context()) options, err := external.QueryOptionsFromContext(serverStream.Context())
if err != nil {
return err
}
// Serve the ready servers from an EventPublisher subscription. If the subscription is // Serve the ready servers from an EventPublisher subscription. If the subscription is
// closed due to an ACL change, we'll attempt to re-authorize and resume it to // closed due to an ACL change, we'll attempt to re-authorize and resume it to
// prevent unnecessarily terminating the stream. // prevent unnecessarily terminating the stream.
var idx uint64 var idx uint64
for { for {
var err error var err error
idx, err = s.serveReadyServers(token, idx, req, serverStream, logger) idx, err = s.serveReadyServers(options.Token, idx, req, serverStream, logger)
if errors.Is(err, stream.ErrSubForceClosed) { if errors.Is(err, stream.ErrSubForceClosed) {
logger.Trace("subscription force-closed due to an ACL change or snapshot restore, will attempt to re-auth and resume") logger.Trace("subscription force-closed due to an ACL change or snapshot restore, will attempt to re-auth and resume")
} else { } else {

View File

@ -18,6 +18,7 @@ import (
"github.com/hashicorp/consul/agent/consul/stream" "github.com/hashicorp/consul/agent/consul/stream"
external "github.com/hashicorp/consul/agent/grpc-external" external "github.com/hashicorp/consul/agent/grpc-external"
"github.com/hashicorp/consul/agent/grpc-external/testutils" "github.com/hashicorp/consul/agent/grpc-external/testutils"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/proto-public/pbserverdiscovery" "github.com/hashicorp/consul/proto-public/pbserverdiscovery"
"github.com/hashicorp/consul/proto/prototest" "github.com/hashicorp/consul/proto/prototest"
"github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/consul/sdk/testutil"
@ -125,7 +126,9 @@ func TestWatchServers_StreamLifeCycle(t *testing.T) {
Return(testutils.TestAuthorizerServiceWriteAny(t), nil).Twice() Return(testutils.TestAuthorizerServiceWriteAny(t), nil).Twice()
// add the token to the requests context // add the token to the requests context
ctx := external.ContextWithToken(context.Background(), testACLToken) options := structs.QueryOptions{Token: testACLToken}
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
require.NoError(t, err)
// setup the server // setup the server
server := NewServer(Config{ server := NewServer(Config{
@ -198,7 +201,9 @@ func TestWatchServers_ACLToken_PermissionDenied(t *testing.T) {
Return(testutils.TestAuthorizerDenyAll(t), nil).Once() Return(testutils.TestAuthorizerDenyAll(t), nil).Once()
// add the token to the requests context // add the token to the requests context
ctx := external.ContextWithToken(context.Background(), testACLToken) options := structs.QueryOptions{Token: testACLToken}
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
require.NoError(t, err)
// setup the server // setup the server
server := NewServer(Config{ server := NewServer(Config{
@ -229,7 +234,9 @@ func TestWatchServers_ACLToken_Unauthenticated(t *testing.T) {
Return(resolver.Result{}, acl.ErrNotFound).Once() Return(resolver.Result{}, acl.ErrNotFound).Once()
// add the token to the requests context // add the token to the requests context
ctx := external.ContextWithToken(context.Background(), testACLToken) options := structs.QueryOptions{Token: testACLToken}
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
require.NoError(t, err)
// setup the server // setup the server
server := NewServer(Config{ server := NewServer(Config{

View File

@ -1,28 +0,0 @@
package external
import (
"context"
"google.golang.org/grpc/metadata"
)
const metadataKeyToken = "x-consul-token"
// TokenFromContext returns the ACL token in the gRPC metadata attached to the
// given context.
func TokenFromContext(ctx context.Context) string {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return ""
}
toks, ok := md[metadataKeyToken]
if ok && len(toks) > 0 {
return toks[0]
}
return ""
}
// ContextWithToken returns a context with the given ACL token attached.
func ContextWithToken(ctx context.Context, token string) context.Context {
return metadata.AppendToOutgoingContext(ctx, metadataKeyToken, token)
}

View File

@ -0,0 +1,305 @@
// Package bootstrap handles bootstrapping an agent's config from HCP. It must be a
// separate package from other HCP components because it has a dependency on
// agent/config while other components need to be imported and run within the
// server process in agent/consul and that would create a dependency cycle.
package bootstrap
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/agent/hcp"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/lib/retry"
)
const (
caFileName = "server-tls-cas.pem"
certFileName = "server-tls-cert.pem"
keyFileName = "server-tls-key.pem"
configFileName = "server-config.json"
subDir = "hcp-config"
)
type ConfigLoader func(source config.Source) (config.LoadResult, error)
// UI is a shim to allow the agent command to pass in it's mitchelh/cli.UI so we
// can output useful messages to the user during bootstrapping. For example if
// we have to retry several times to bootstrap we don't want the agent to just
// stall with no output which is the case if we just returned all intermediate
// warnings or errors.
type UI interface {
Output(string)
Warn(string)
Info(string)
Error(string)
}
// MaybeBootstrap will use the passed ConfigLoader to read the existing
// configuration, and if required attempt to bootstrap from HCP. It will retry
// until successful or a terminal error condition is found (e.g. permission
// denied). It must be passed a (CLI) UI implementation so it can deliver progress
// updates to the user, for example if it is waiting to retry for a long period.
func MaybeBootstrap(ctx context.Context, loader ConfigLoader, ui UI) (bool, ConfigLoader, error) {
loader = wrapConfigLoader(loader)
res, err := loader(nil)
if err != nil {
return false, nil, err
}
// Check to see if this is a server and HCP is configured
if !res.RuntimeConfig.IsCloudEnabled() {
// Not a server, let agent continue unmodified
return false, loader, nil
}
ui.Output("Bootstrapping configuration from HCP")
// See if we have existing config on disk
cfgJSON, ok := loadPersistedBootstrapConfig(res.RuntimeConfig, ui)
if !ok {
// Fetch from HCP
ui.Info("Fetching configuration from HCP")
cfgJSON, err = doHCPBootstrap(ctx, res.RuntimeConfig, ui)
if err != nil {
return false, nil, fmt.Errorf("failed to bootstrap from HCP: %w", err)
}
ui.Info("Configuration fetched from HCP and saved on local disk")
} else {
ui.Info("Loaded configuration from local disk")
}
// Create a new loader func to return
newLoader := func(source config.Source) (config.LoadResult, error) {
// Don't allow any further attempts to provide a DefaultSource. This should
// only ever be needed later in client agent AutoConfig code but that should
// be mutually exclusive from this bootstrapping mechanism since this is
// only for servers. If we ever try to change that, this clear failure
// should alert future developers that the assumptions are changing rather
// than quietly not applying the config they expect!
if source != nil {
return config.LoadResult{},
fmt.Errorf("non-nil config source provided to a loader after HCP bootstrap already provided a DefaultSource")
}
// Otherwise, just call to the loader we were passed with our own additional
// JSON as the source.
s := config.FileSource{
Name: "HCP Bootstrap",
Format: "json",
Data: cfgJSON,
}
return loader(s)
}
return true, newLoader, nil
}
func wrapConfigLoader(loader ConfigLoader) ConfigLoader {
return func(source config.Source) (config.LoadResult, error) {
res, err := loader(source)
if err != nil {
return res, err
}
if res.RuntimeConfig.Cloud.ResourceID == "" {
res.RuntimeConfig.Cloud.ResourceID = os.Getenv("HCP_RESOURCE_ID")
}
return res, nil
}
}
func doHCPBootstrap(ctx context.Context, rc *config.RuntimeConfig, ui UI) (string, error) {
w := retry.Waiter{
MinWait: 1 * time.Second,
MaxWait: 5 * time.Minute,
Jitter: retry.NewJitter(50),
}
var bsCfg *hcp.BootstrapConfig
client, err := hcp.NewClient(rc.Cloud)
if err != nil {
return "", err
}
for {
// Note we don't want to shadow `ctx` here since we need that for the Wait
// below.
reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
resp, err := client.FetchBootstrap(reqCtx)
if err != nil {
ui.Error(fmt.Sprintf("failed to fetch bootstrap config from HCP, will retry in %s: %s",
w.NextWait().Round(time.Second), err))
if err := w.Wait(ctx); err != nil {
return "", err
}
// Finished waiting, restart loop
continue
}
bsCfg = resp
break
}
dataDir := rc.DataDir
shouldPersist := true
if dataDir == "" {
// Agent in dev mode, we still need somewhere to persist the certs
// temporarily though to be able to start up at all since we don't support
// inline certs right now. Use temp dir
tmp, err := os.MkdirTemp(os.TempDir(), "consul-dev-")
if err != nil {
return "", fmt.Errorf("failed to create temp dir for certificates: %w", err)
}
dataDir = tmp
shouldPersist = false
}
// Persist the TLS cert files from the response since we need to refer to them
// as disk files either way.
if err := persistTLSCerts(dataDir, bsCfg); err != nil {
return "", fmt.Errorf("failed to persist TLS certificates to dir %q: %w", dataDir, err)
}
// Update the config JSON to include those TLS cert files
cfgJSON, err := injectTLSCerts(dataDir, bsCfg.ConsulConfig)
if err != nil {
return "", fmt.Errorf("failed to inject TLS Certs into bootstrap config: %w", err)
}
// Persist the final config we need to add for restarts. Assuming this wasn't
// a tmp dir to start with.
if shouldPersist {
if err := persistBootstrapConfig(dataDir, cfgJSON); err != nil {
return "", fmt.Errorf("failed to persist bootstrap config to dir %q: %w", dataDir, err)
}
}
return cfgJSON, nil
}
func persistTLSCerts(dataDir string, bsCfg *hcp.BootstrapConfig) error {
dir := filepath.Join(dataDir, subDir)
if bsCfg.TLSCert == "" || bsCfg.TLSCertKey == "" {
return fmt.Errorf("unexpected bootstrap response from HCP: missing TLS information")
}
// Create a subdir if it's not already there
if err := lib.EnsurePath(dir, true); err != nil {
return err
}
// Write out CA cert(s). We write them all to one file because Go's x509
// machinery will read as many certs as it finds from each PEM file provided
// and add them separaetly to the CertPool for validation
f, err := os.OpenFile(filepath.Join(dir, caFileName), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
bf := bufio.NewWriter(f)
for _, caPEM := range bsCfg.TLSCAs {
bf.WriteString(caPEM + "\n")
}
if err := bf.Flush(); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
if err := ioutil.WriteFile(filepath.Join(dir, certFileName), []byte(bsCfg.TLSCert), 0600); err != nil {
return err
}
if err := ioutil.WriteFile(filepath.Join(dir, keyFileName), []byte(bsCfg.TLSCertKey), 0600); err != nil {
return err
}
return nil
}
func injectTLSCerts(dataDir string, bootstrapJSON string) (string, error) {
// Parse just to a map for now as we only have to inject to a specific place
// and parsing whole Config struct is complicated...
var cfg map[string]interface{}
if err := json.Unmarshal([]byte(bootstrapJSON), &cfg); err != nil {
return "", err
}
// Inject TLS cert files
cfg["ca_file"] = filepath.Join(dataDir, subDir, caFileName)
cfg["cert_file"] = filepath.Join(dataDir, subDir, certFileName)
cfg["key_file"] = filepath.Join(dataDir, subDir, keyFileName)
jsonBs, err := json.Marshal(cfg)
if err != nil {
return "", err
}
return string(jsonBs), nil
}
func persistBootstrapConfig(dataDir, cfgJSON string) error {
// Persist the important bits we got from bootstrapping. The TLS certs are
// already persisted, just need to persist the config we are going to add.
name := filepath.Join(dataDir, subDir, configFileName)
return ioutil.WriteFile(name, []byte(cfgJSON), 0600)
}
func loadPersistedBootstrapConfig(rc *config.RuntimeConfig, ui UI) (string, bool) {
// Check if the files all exist
files := []string{
filepath.Join(rc.DataDir, subDir, configFileName),
filepath.Join(rc.DataDir, subDir, caFileName),
filepath.Join(rc.DataDir, subDir, certFileName),
filepath.Join(rc.DataDir, subDir, keyFileName),
}
hasSome := false
for _, name := range files {
if _, err := os.Stat(name); errors.Is(err, os.ErrNotExist) {
// At least one required file doesn't exist, failed loading. This is not
// an error though
if hasSome {
ui.Warn("ignoring incomplete local bootstrap config files")
}
return "", false
}
hasSome = true
}
name := filepath.Join(rc.DataDir, subDir, configFileName)
jsonBs, err := ioutil.ReadFile(name)
if err != nil {
ui.Warn(fmt.Sprintf("failed to read local bootstrap config file, ignoring local files: %s", err))
return "", false
}
// Check this looks non-empty at least
jsonStr := strings.TrimSpace(string(jsonBs))
// 50 is arbitrary but config containing the right secrets would always be
// bigger than this in JSON format so it is a reasonable test that this wasn't
// empty or just an empty JSON object or something.
if len(jsonStr) < 50 {
ui.Warn("ignoring incomplete local bootstrap config files")
return "", false
}
// TODO we could parse the certificates and check they are still valid here
// and force a reload if not. We could also attempt to parse config and check
// it's all valid just in case the local config was really old and has
// deprecated fields or something?
return jsonStr, true
}

Some files were not shown because too many files have changed in this diff Show More