test: general cleanup and fixes for the container integration test suite (#15959)

- remove dep on consul main module
- use 'consul tls' subcommands instead of tlsutil
- use direct json config construction instead of agent/config structs
- merge libcluster and libagent packages together
- more widely use BuildContext
- get the OSS/ENT runner stuff working properly
- reduce some flakiness
- fix some correctness related to http/https API
This commit is contained in:
R.B. Boyer 2023-01-11 15:34:27 -06:00 committed by GitHub
parent 6d2880e894
commit d59efd390c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
40 changed files with 2732 additions and 2319 deletions

View File

@ -221,6 +221,10 @@ jobs:
name: lint sdk
working_directory: sdk
command: *lintcmd
- run:
name: lint container tests
working_directory: test/integration/consul-container
command: *lintcmd
- run: *notify-slack-failure
check-go-mod:
@ -864,7 +868,9 @@ jobs:
-- \
-timeout=30m \
./... \
--target-image consul \
--target-version local \
--latest-image consul \
--latest-version latest
ls -lrt
environment:
@ -874,9 +880,6 @@ jobs:
COMPOSE_INTERACTIVE_NO_CLI: 1
# tput complains if this isn't set to something.
TERM: ansi
- store_artifacts:
path: ./test/integration/consul-container/test/upgrade/workdir/logs
destination: container-logs
- store_test_results:
path: *TEST_RESULTS_DIR
- store_artifacts:

View File

@ -34,6 +34,8 @@ DATE_FORMAT="%Y-%m-%dT%H:%M:%SZ" # it's tricky to do an RFC3339 format in a cros
GIT_DATE=$(shell $(CURDIR)/build-support/scripts/build-date.sh) # we're using this for build date because it's stable across platform builds
GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY) -X $(GIT_IMPORT).BuildDate=$(GIT_DATE)
GOTESTSUM_PATH?=$(shell command -v gotestsum)
ifeq ($(FORCE_REBUILD),1)
NOCACHE=--no-cache
else
@ -47,6 +49,12 @@ else
QUIET=
endif
ifeq ("$(GOTAGS)","")
CONSUL_COMPAT_TEST_IMAGE=consul
else
CONSUL_COMPAT_TEST_IMAGE=hashicorp/consul-enterprise
endif
CONSUL_DEV_IMAGE?=consul-dev
GO_BUILD_TAG?=consul-build-go
UI_BUILD_TAG?=consul-build-ui
@ -311,6 +319,8 @@ lint: lint-tools
@golangci-lint run --build-tags '$(GOTAGS)' && \
(cd api && golangci-lint run --build-tags '$(GOTAGS)') && \
(cd sdk && golangci-lint run --build-tags '$(GOTAGS)')
@echo "--> Running golangci-lint (container tests)"
cd test/integration/consul-container && golangci-lint run --build-tags '$(GOTAGS)'
@echo "--> Running lint-consul-retry"
@lint-consul-retry
@echo "--> Running enumcover"
@ -391,25 +401,48 @@ test-envoy-integ: $(ENVOY_INTEG_DEPS)
@go test -v -timeout=30m -tags integration $(GO_TEST_FLAGS) ./test/integration/connect/envoy
.PHONY: test-compat-integ
test-compat-integ: dev-docker
ifeq ("$(GOTAGS)","")
@docker tag consul-dev:latest consul:local
@docker run --rm -t consul:local consul version
test-compat-integ: test-compat-integ-setup
ifeq ("$(GOTESTSUM_PATH)","")
@cd ./test/integration/consul-container && \
go test -v -timeout=30m ./... --target-version local --latest-version latest
go test \
-v \
-timeout=30m \
./... \
--tags $(GOTAGS) \
--target-image $(CONSUL_COMPAT_TEST_IMAGE) \
--target-version local \
--latest-image $(CONSUL_COMPAT_TEST_IMAGE) \
--latest-version latest
else
@docker tag consul-dev:latest hashicorp/consul-enterprise:local
@docker run --rm -t hashicorp/consul-enterprise:local consul version
@cd ./test/integration/consul-container && \
go test -v -timeout=30m ./... --tags $(GOTAGS) --target-version local --latest-version latest
gotestsum \
--format=short-verbose \
--debug \
--rerun-fails=3 \
--packages="./..." \
-- \
--tags $(GOTAGS) \
-timeout=30m \
./... \
--target-image $(CONSUL_COMPAT_TEST_IMAGE) \
--target-version local \
--latest-image $(CONSUL_COMPAT_TEST_IMAGE) \
--latest-version latest
endif
.PHONY: test-compat-integ-setup
test-compat-integ-setup: dev-docker
@docker tag consul-dev:latest $(CONSUL_COMPAT_TEST_IMAGE):local
@docker run --rm -t $(CONSUL_COMPAT_TEST_IMAGE):local consul version
.PHONY: test-metrics-integ
test-metrics-integ: dev-docker
@docker tag consul-dev:latest consul:local
@docker run --rm -t consul:local consul version
test-metrics-integ: test-compat-integ-setup
@cd ./test/integration/consul-container && \
go test -v -timeout=7m ./metrics --target-version local
go test -v -timeout=7m ./test/metrics \
--target-image $(CONSUL_COMPAT_TEST_IMAGE) \
--target-version local \
--latest-image $(CONSUL_COMPAT_TEST_IMAGE) \
--latest-version latest
test-connect-ca-providers:
ifeq ("$(CIRCLECI)","true")

View File

@ -3,182 +3,79 @@ module github.com/hashicorp/consul/test/integration/consul-container
go 1.19
require (
github.com/docker/docker v20.10.11+incompatible
github.com/docker/docker v20.10.22+incompatible
github.com/docker/go-connections v0.4.0
github.com/hashicorp/consul v1.14.1
github.com/hashicorp/consul/api v1.18.0
github.com/hashicorp/consul/sdk v0.13.0
github.com/hashicorp/go-cleanhttp v0.5.2
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/go-uuid v1.0.2
github.com/hashicorp/go-version v1.2.1
github.com/hashicorp/serf v0.10.1
github.com/itchyny/gojq v0.12.9
github.com/mitchellh/copystructure v1.2.0
github.com/pkg/errors v0.9.1
github.com/stretchr/testify v1.8.0
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569
github.com/testcontainers/testcontainers-go v0.13.0
github.com/testcontainers/testcontainers-go v0.15.0
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
)
require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/DataDog/datadog-go v3.2.0+incompatible // indirect
github.com/Microsoft/go-winio v0.4.17 // indirect
github.com/Microsoft/hcsshim v0.8.24 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Microsoft/hcsshim v0.9.4 // indirect
github.com/armon/go-metrics v0.3.10 // indirect
github.com/armon/go-radix v1.0.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
github.com/aws/aws-sdk-go v1.42.34 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible // indirect
github.com/circonus-labs/circonusllhist v0.1.3 // indirect
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 // indirect
github.com/containerd/cgroups v1.0.3 // indirect
github.com/containerd/containerd v1.5.13 // indirect
github.com/coreos/go-oidc v2.1.0+incompatible // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 // indirect
github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/containerd/cgroups v1.0.4 // indirect
github.com/containerd/containerd v1.6.8 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/go-logr/logr v0.2.0 // indirect
github.com/go-openapi/analysis v0.21.2 // indirect
github.com/go-openapi/errors v0.20.2 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/loads v0.21.1 // indirect
github.com/go-openapi/runtime v0.24.1 // indirect
github.com/go-openapi/spec v0.20.4 // indirect
github.com/go-openapi/strfmt v0.21.3 // indirect
github.com/go-openapi/swag v0.21.1 // indirect
github.com/go-openapi/validate v0.21.0 // indirect
github.com/go-ozzo/ozzo-validation v3.6.0+incompatible // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.1 // indirect
github.com/google/btree v1.0.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gnostic v0.4.1 // indirect
github.com/gorilla/mux v1.7.3 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect
github.com/hashicorp/consul-awsauth v0.0.0-20220713182709-05ac1c5c2706 // indirect
github.com/hashicorp/consul-net-rpc v0.0.0-20221205195236-156cfab66a69 // indirect
github.com/hashicorp/consul/proto-public v0.2.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.2 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-connlimit v0.3.0 // indirect
github.com/hashicorp/go-hclog v1.2.1 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-memdb v1.3.4 // indirect
github.com/hashicorp/go-msgpack v1.1.5 // indirect
github.com/hashicorp/go-msgpack/v2 v2.0.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-raftchunking v0.7.0 // indirect
github.com/hashicorp/go-retryablehttp v0.6.7 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/go-syslog v1.0.0 // indirect
github.com/hashicorp/go-uuid v1.0.2 // indirect
github.com/hashicorp/go-version v1.2.1 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hashicorp/hcp-scada-provider v0.2.0 // indirect
github.com/hashicorp/hcp-sdk-go v0.23.1-0.20220921131124-49168300a7dc // indirect
github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 // indirect
github.com/hashicorp/memberlist v0.5.0 // indirect
github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 // indirect
github.com/hashicorp/raft v1.3.11 // indirect
github.com/hashicorp/raft-autopilot v0.1.6 // indirect
github.com/hashicorp/raft-boltdb/v2 v2.2.2 // indirect
github.com/hashicorp/vault/api v1.0.5-0.20200717191844-f687267c8086 // indirect
github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267 // indirect
github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/itchyny/timefmt-go v0.1.4 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.10 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/magiconair/properties v1.8.5 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/miekg/dns v1.1.41 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-testing-interface v1.14.0 // indirect
github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452 // indirect
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
github.com/mitchellh/mapstructure v1.4.3 // indirect
github.com/mitchellh/pointerstructure v1.2.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/sys/mount v0.2.0 // indirect
github.com/moby/sys/mountinfo v0.5.0 // indirect
github.com/moby/sys/mount v0.3.3 // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
github.com/opencontainers/runc v1.1.4 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
github.com/prometheus/client_golang v1.7.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.10.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/stretchr/objx v0.4.0 // indirect
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect
go.etcd.io/bbolt v1.3.5 // indirect
go.mongodb.org/mongo-driver v1.10.0 // indirect
go.opencensus.io v0.22.4 // indirect
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect
go.opencensus.io v0.23.0 // indirect
golang.org/x/net v0.4.0 // indirect
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 // indirect
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/term v0.3.0 // indirect
golang.org/x/text v0.5.0 // indirect
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737 // indirect
google.golang.org/grpc v1.49.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/api v0.20.6 // indirect
k8s.io/apimachinery v0.20.6 // indirect
k8s.io/client-go v0.20.6 // indirect
k8s.io/klog/v2 v2.4.0 // indirect
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.0.3 // indirect
sigs.k8s.io/yaml v1.2.0 // indirect
)
replace github.com/hashicorp/consul/api => ../../../api
replace github.com/hashicorp/consul/sdk => ../../../sdk
replace github.com/hashicorp/consul/proto-public => ../../../proto-public
replace github.com/hashicorp/consul => ../../..

File diff suppressed because it is too large Load Diff

View File

@ -1,31 +0,0 @@
package agent
import (
"context"
"github.com/hashicorp/consul/api"
)
// Agent represent a Consul agent abstraction
type Agent interface {
GetAddr() (string, int)
GetClient() *api.Client
GetName() string
GetConfig() Config
GetDatacenter() string
IsServer() bool
RegisterTermination(func() error)
Terminate() error
Upgrade(ctx context.Context, config Config) error
Exec(ctx context.Context, cmd []string) (int, error)
DataDir() string
}
// Config is a set of configurations required to create a Agent
type Config struct {
JSON string
Certs map[string]string
Image string
Version string
Cmd []string
}

View File

@ -1,283 +0,0 @@
package agent
import (
"encoding/json"
"path/filepath"
"github.com/pkg/errors"
"golang.org/x/mod/semver"
agentconfig "github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/tlsutil"
)
const (
remoteCertDirectory = "/consul/config/certs"
)
// BuildContext provides a reusable object meant to share common configuration settings
// between agent configuration builders.
type BuildContext struct {
datacenter string
encryptKey string
caCert string
caKey string
index int // keeps track of the certificates issued for naming purposes
injectAutoEncryption bool // initialize the built-in CA and set up agents to use auto-encrpt
injectCerts bool // initializes the built-in CA and distributes client certificates to agents
injectGossipEncryption bool // setup the agents to use a gossip encryption key
consulVersion string
}
// BuildOptions define the desired automated test setup overrides that are
// applied across agents in the cluster
type BuildOptions struct {
Datacenter string // Override datacenter for agents
InjectCerts bool // Provides a CA for all agents and (future) agent certs.
InjectAutoEncryption bool // Configures auto-encrypt for TLS and sets up certs. Overrides InjectCerts.
InjectGossipEncryption bool // Provides a gossip encryption key for all agents.
ConsulVersion string // The default Consul version for agents in the cluster when none is specified.
}
func NewBuildContext(opts BuildOptions) (*BuildContext, error) {
ctx := &BuildContext{
datacenter: opts.Datacenter,
injectAutoEncryption: opts.InjectAutoEncryption,
injectCerts: opts.InjectCerts,
injectGossipEncryption: opts.InjectGossipEncryption,
consulVersion: opts.ConsulVersion,
}
if opts.ConsulVersion == "" {
ctx.consulVersion = *utils.TargetVersion
}
if opts.InjectGossipEncryption {
serfKey, err := newSerfEncryptionKey()
if err != nil {
return nil, errors.Wrap(err, "could not generate serf encryption key")
}
ctx.encryptKey = serfKey
}
if opts.InjectAutoEncryption || opts.InjectCerts {
// This is the same call that 'consul tls ca create` will run
caCert, caKey, err := tlsutil.GenerateCA(tlsutil.CAOpts{Domain: "consul", PermittedDNSDomains: []string{"consul", "localhost"}})
if err != nil {
return nil, errors.Wrap(err, "could not generate built-in CA root pair")
}
ctx.caCert = caCert
ctx.caKey = caKey
}
return ctx, nil
}
func (c *BuildContext) GetCerts() (cert string, key string) {
return c.caCert, c.caKey
}
type Builder struct {
conf *agentconfig.Config
certs map[string]string
context *BuildContext
}
// NewConfigBuilder instantiates a builder object with sensible defaults for a single consul instance
// This includes the following:
// * default ports with no plaintext options
// * debug logging
// * single server with bootstrap
// * bind to all interfaces, advertise on 'eth0'
// * connect enabled
func NewConfigBuilder(ctx *BuildContext) *Builder {
b := &Builder{
certs: map[string]string{},
conf: &agentconfig.Config{
AdvertiseAddrLAN: utils.StringToPointer(`{{ GetInterfaceIP "eth0" }}`),
BindAddr: utils.StringToPointer("0.0.0.0"),
Bootstrap: utils.BoolToPointer(true),
ClientAddr: utils.StringToPointer("0.0.0.0"),
Connect: agentconfig.Connect{
Enabled: utils.BoolToPointer(true),
},
LogLevel: utils.StringToPointer("DEBUG"),
ServerMode: utils.BoolToPointer(true),
},
context: ctx,
}
// These are the default ports, disabling plaintext transport
b.conf.Ports = agentconfig.Ports{
DNS: utils.IntToPointer(8600),
HTTP: nil,
HTTPS: utils.IntToPointer(8501),
GRPC: utils.IntToPointer(8502),
SerfLAN: utils.IntToPointer(8301),
SerfWAN: utils.IntToPointer(8302),
Server: utils.IntToPointer(8300),
}
if ctx != nil && (ctx.consulVersion == "local" || semver.Compare("v"+ctx.consulVersion, "v1.14.0") >= 0) {
// Enable GRPCTLS for version after v1.14.0
b.conf.Ports.GRPCTLS = utils.IntToPointer(8503)
}
return b
}
func (b *Builder) Bootstrap(servers int) *Builder {
if servers < 1 {
b.conf.Bootstrap = nil
b.conf.BootstrapExpect = nil
} else if servers == 1 {
b.conf.Bootstrap = utils.BoolToPointer(true)
b.conf.BootstrapExpect = nil
} else {
b.conf.Bootstrap = nil
b.conf.BootstrapExpect = utils.IntToPointer(servers)
}
return b
}
func (b *Builder) Client() *Builder {
b.conf.Ports.Server = nil
b.conf.ServerMode = nil
b.conf.Bootstrap = nil
b.conf.BootstrapExpect = nil
return b
}
func (b *Builder) Datacenter(name string) *Builder {
b.conf.Datacenter = utils.StringToPointer(name)
return b
}
func (b *Builder) Peering(enable bool) *Builder {
b.conf.Peering = agentconfig.Peering{
Enabled: utils.BoolToPointer(enable),
}
return b
}
func (b *Builder) RetryJoin(names ...string) *Builder {
b.conf.RetryJoinLAN = names
return b
}
func (b *Builder) Telemetry(statSite string) *Builder {
b.conf.Telemetry = agentconfig.Telemetry{
StatsiteAddr: utils.StringToPointer(statSite),
}
return b
}
// ToAgentConfig renders the builders configuration into a string
// representation of the json config file for agents.
// DANGER! Some fields may not have json tags in the Agent Config.
// You may need to add these yourself.
func (b *Builder) ToAgentConfig() (*Config, error) {
b.injectContextOptions()
out, err := json.MarshalIndent(b.conf, "", " ")
if err != nil {
return nil, errors.Wrap(err, "could not marshall builder")
}
conf := &Config{
Certs: b.certs,
Cmd: []string{"agent"},
Image: *utils.TargetImage,
JSON: string(out),
Version: *utils.TargetVersion,
}
// Override the default version
if b.context != nil && b.context.consulVersion != "" {
conf.Version = b.context.consulVersion
}
return conf, nil
}
func (b *Builder) injectContextOptions() {
if b.context == nil {
return
}
var dc string
if b.context.datacenter != "" {
b.conf.Datacenter = utils.StringToPointer(b.context.datacenter)
dc = b.context.datacenter
}
if b.conf.Datacenter == nil || *b.conf.Datacenter == "" {
dc = "dc1"
}
server := b.conf.ServerMode != nil && *b.conf.ServerMode
if b.context.encryptKey != "" {
b.conf.EncryptKey = utils.StringToPointer(b.context.encryptKey)
}
// For any TLS setup, we add the CA to agent conf
if b.context.caCert != "" {
// Add the ca file to the list of certs that will be mounted to consul
filename := filepath.Join(remoteCertDirectory, "consul-agent-ca.pem")
b.certs[filename] = b.context.caCert
b.conf.TLS = agentconfig.TLS{
Defaults: agentconfig.TLSProtocolConfig{
CAFile: utils.StringToPointer(filename),
VerifyOutgoing: utils.BoolToPointer(true), // Secure settings
},
InternalRPC: agentconfig.TLSProtocolConfig{
VerifyServerHostname: utils.BoolToPointer(true),
},
}
}
// Also for any TLS setup, generate server key pairs from the CA
if b.context.caCert != "" && server {
keyFileName, priv, certFileName, pub := newServerTLSKeyPair(dc, b.context)
// Add the key pair to the list that will be mounted to consul
certFileName = filepath.Join(remoteCertDirectory, certFileName)
keyFileName = filepath.Join(remoteCertDirectory, keyFileName)
b.certs[certFileName] = pub
b.certs[keyFileName] = priv
b.conf.TLS.Defaults.CertFile = utils.StringToPointer(certFileName)
b.conf.TLS.Defaults.KeyFile = utils.StringToPointer(keyFileName)
b.conf.TLS.Defaults.VerifyIncoming = utils.BoolToPointer(true) // Only applies to servers for auto-encrypt
}
// This assumes we've already gone through the CA/Cert setup in the previous conditional
if b.context.injectAutoEncryption && server {
b.conf.AutoEncrypt = agentconfig.AutoEncrypt{
AllowTLS: utils.BoolToPointer(true), // This setting is different between client and servers
}
b.conf.TLS.GRPC = agentconfig.TLSProtocolConfig{
UseAutoCert: utils.BoolToPointer(true), // This is required for peering to work over the non-GRPC_TLS port
}
// VerifyIncoming does not apply to client agents for auto-encrypt
}
if b.context.injectAutoEncryption && !server {
b.conf.AutoEncrypt = agentconfig.AutoEncrypt{
TLS: utils.BoolToPointer(true), // This setting is different between client and servers
}
b.conf.TLS.GRPC = agentconfig.TLSProtocolConfig{
UseAutoCert: utils.BoolToPointer(true), // This is required for peering to work over the non-GRPC_TLS port
}
}
if b.context.injectCerts && !b.context.injectAutoEncryption {
panic("client certificate distribution not implemented")
}
b.context.index++
}

View File

@ -1,443 +0,0 @@
package agent
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"time"
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/pkg/errors"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
const bootLogLine = "Consul agent running"
const disableRYUKEnv = "TESTCONTAINERS_RYUK_DISABLED"
// consulContainerNode implements the Agent interface by running a Consul agent
// in a container.
type consulContainerNode struct {
ctx context.Context
client *api.Client
pod testcontainers.Container
container testcontainers.Container
serverMode bool
ip string
port int
datacenter string
config Config
podReq testcontainers.ContainerRequest
consulReq testcontainers.ContainerRequest
certDir string
dataDir string
network string
id int
name string
terminateFuncs []func() error
}
// NewConsulContainer starts a Consul agent in a container with the given config.
func NewConsulContainer(ctx context.Context, config Config, network string, index int) (Agent, error) {
license, err := readLicense()
if err != nil {
return nil, err
}
pc, err := readSomeConfigFileFields(config.JSON)
if err != nil {
return nil, err
}
consulType := "client"
if pc.Server {
consulType = "server"
}
name := utils.RandName(fmt.Sprintf("%s-consul-%s-%d", pc.Datacenter, consulType, index))
// Inject new Agent name
config.Cmd = append(config.Cmd, "-node", name)
tmpDirData, err := os.MkdirTemp("", name)
if err != nil {
return nil, err
}
err = os.Chmod(tmpDirData, 0777)
if err != nil {
return nil, err
}
configFile, err := createConfigFile(config.JSON)
if err != nil {
return nil, err
}
tmpCertData, err := os.MkdirTemp("", fmt.Sprintf("%s-certs", name))
if err != nil {
return nil, err
}
err = os.Chmod(tmpCertData, 0777)
if err != nil {
return nil, err
}
for filename, cert := range config.Certs {
err := createCertFile(tmpCertData, filename, cert)
if err != nil {
return nil, errors.Wrapf(err, "failed to write file %s", filename)
}
}
opts := containerOpts{
name: name,
certDir: tmpCertData,
configFile: configFile,
dataDir: tmpDirData,
license: license,
addtionalNetworks: []string{"bridge", network},
hostname: fmt.Sprintf("agent-%d", index),
}
podReq, consulReq := newContainerRequest(config, opts)
podContainer, err := startContainer(ctx, podReq)
if err != nil {
return nil, err
}
mappedPort, err := podContainer.MappedPort(ctx, "8500")
if err != nil {
return nil, err
}
ip, err := podContainer.ContainerIP(ctx)
if err != nil {
return nil, err
}
consulContainer, err := startContainer(ctx, consulReq)
if err != nil {
return nil, err
}
if *utils.FollowLog {
if err := consulContainer.StartLogProducer(ctx); err != nil {
return nil, err
}
consulContainer.FollowOutput(&LogConsumer{
Prefix: name,
})
}
uri, err := podContainer.Endpoint(ctx, "http")
if err != nil {
return nil, err
}
apiConfig := api.DefaultConfig()
apiConfig.Address = uri
apiClient, err := api.NewClient(apiConfig)
if err != nil {
return nil, err
}
return &consulContainerNode{
config: config,
pod: podContainer,
container: consulContainer,
serverMode: pc.Server,
ip: ip,
port: mappedPort.Int(),
datacenter: pc.Datacenter,
client: apiClient,
ctx: ctx,
podReq: podReq,
consulReq: consulReq,
dataDir: tmpDirData,
certDir: tmpCertData,
network: network,
id: index,
name: name,
}, nil
}
func (c *consulContainerNode) GetName() string {
name, err := c.container.Name(c.ctx)
if err != nil {
return ""
}
return name
}
func (c *consulContainerNode) GetConfig() Config {
return c.config
}
func (c *consulContainerNode) GetDatacenter() string {
return c.datacenter
}
func (c *consulContainerNode) IsServer() bool {
return c.serverMode
}
// GetClient returns an API client that can be used to communicate with the Agent.
func (c *consulContainerNode) GetClient() *api.Client {
return c.client
}
// GetAddr return the network address associated with the Agent.
func (c *consulContainerNode) GetAddr() (string, int) {
return c.ip, c.port
}
func (c *consulContainerNode) RegisterTermination(f func() error) {
c.terminateFuncs = append(c.terminateFuncs, f)
}
func (c *consulContainerNode) Exec(ctx context.Context, cmd []string) (int, error) {
return c.container.Exec(ctx, cmd)
}
// Upgrade terminates a running container and create a new one using the provided config.
// The upgraded node will
// - use the same node name and the data dir as the old version node
func (c *consulContainerNode) Upgrade(ctx context.Context, config Config) error {
// Reuse the node name since we assume upgrade on the same node
config.Cmd = append(config.Cmd, "-node", c.name)
file, err := createConfigFile(config.JSON)
if err != nil {
return err
}
for filename, cert := range config.Certs {
err := createCertFile(c.certDir, filename, cert)
if err != nil {
return errors.Wrapf(err, "failed to write file %s", filename)
}
}
// We'll keep the same pod.
opts := containerOpts{
name: c.consulReq.Name,
certDir: c.certDir,
configFile: file,
dataDir: c.dataDir,
license: "",
addtionalNetworks: []string{"bridge", c.network},
hostname: fmt.Sprintf("agent-%d", c.id),
}
_, consulReq2 := newContainerRequest(config, opts)
consulReq2.Env = c.consulReq.Env // copy license
fmt.Printf("Upgraded node %s config:%s\n", c.name, file)
if c.container != nil && *utils.FollowLog {
err = c.container.StopLogProducer()
time.Sleep(2 * time.Second)
if err != nil {
fmt.Printf("WARN: error stop log producer: %v", err)
}
}
if err = c.container.Terminate(c.ctx); err != nil {
return fmt.Errorf("error terminate running container: %v", err)
}
c.consulReq = consulReq2
time.Sleep(5 * time.Second)
container, err := startContainer(ctx, c.consulReq)
c.container = container
if err != nil {
return err
}
c.ctx = ctx
if *utils.FollowLog {
if err := container.StartLogProducer(ctx); err != nil {
return err
}
container.FollowOutput(&LogConsumer{
Prefix: c.name,
})
}
return nil
}
// Terminate attempts to terminate the agent container.
// This might also include running termination functions for containers associated with the agent.
// On failure, an error will be returned and the reaper process (RYUK) will handle cleanup.
func (c *consulContainerNode) Terminate() error {
// Services might register a termination function that should also fire
// when the "agent" is cleaned up
for _, f := range c.terminateFuncs {
err := f()
if err != nil {
continue
}
}
if c.container == nil {
return nil
}
state, err := c.container.State(context.Background())
if err == nil && state.Running && *utils.FollowLog {
// StopLogProducer can only be called on running containers
err = c.container.StopLogProducer()
if err1 := c.container.Terminate(c.ctx); err == nil {
err = err1
}
} else {
if err1 := c.container.Terminate(c.ctx); err == nil {
err = err1
}
}
c.container = nil
return err
}
func (c *consulContainerNode) DataDir() string {
return c.dataDir
}
func startContainer(ctx context.Context, req testcontainers.ContainerRequest) (testcontainers.Container, error) {
ctx, cancel := context.WithTimeout(ctx, time.Second*40)
defer cancel()
return testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
}
const pauseImage = "k8s.gcr.io/pause:3.3"
type containerOpts struct {
certDir string
configFile string
dataDir string
hostname string
index int
license string
name string
addtionalNetworks []string
}
func newContainerRequest(config Config, opts containerOpts) (podRequest, consulRequest testcontainers.ContainerRequest) {
skipReaper := isRYUKDisabled()
httpPort := "8500"
pod := testcontainers.ContainerRequest{
Image: pauseImage,
AutoRemove: false,
Name: opts.name + "-pod",
SkipReaper: skipReaper,
ExposedPorts: []string{httpPort + "/tcp"},
Hostname: opts.hostname,
Networks: opts.addtionalNetworks,
}
// For handshakes like auto-encrypt, it can take 10's of seconds for the agent to become "ready".
// If we only wait until the log stream starts, subsequent commands to agents will fail.
// TODO: optimize the wait strategy
app := testcontainers.ContainerRequest{
NetworkMode: dockercontainer.NetworkMode("container:" + opts.name + "-pod"),
Image: config.Image + ":" + config.Version,
WaitingFor: wait.ForLog(bootLogLine).WithStartupTimeout(60 * time.Second), // See note above
AutoRemove: false,
Name: opts.name,
Mounts: []testcontainers.ContainerMount{
{Source: testcontainers.DockerBindMountSource{HostPath: opts.certDir}, Target: "/consul/config/certs"},
{Source: testcontainers.DockerBindMountSource{HostPath: opts.configFile}, Target: "/consul/config/config.json"},
{Source: testcontainers.DockerBindMountSource{HostPath: opts.dataDir}, Target: "/consul/data"},
},
Cmd: config.Cmd,
SkipReaper: skipReaper,
Env: map[string]string{"CONSUL_LICENSE": opts.license},
}
return pod, app
}
// isRYUKDisabled returns whether the reaper process (RYUK) has been disabled
// by an environment variable.
//
// https://github.com/testcontainers/moby-ryuk
func isRYUKDisabled() bool {
skipReaperStr := os.Getenv(disableRYUKEnv)
skipReaper, err := strconv.ParseBool(skipReaperStr)
if err != nil {
return false
}
return skipReaper
}
func readLicense() (string, error) {
license := os.Getenv("CONSUL_LICENSE")
if license == "" {
licensePath := os.Getenv("CONSUL_LICENSE_PATH")
if licensePath != "" {
licenseBytes, err := os.ReadFile(licensePath)
if err != nil {
return "", err
}
license = string(licenseBytes)
}
}
return license, nil
}
func createConfigFile(JSON string) (string, error) {
tmpDir, err := os.MkdirTemp("", "consul-container-test-config")
if err != nil {
return "", err
}
err = os.Chmod(tmpDir, 0777)
if err != nil {
return "", err
}
err = os.Mkdir(tmpDir+"/config", 0777)
if err != nil {
return "", err
}
configFile := tmpDir + "/config/config.hcl"
err = os.WriteFile(configFile, []byte(JSON), 0644)
if err != nil {
return "", err
}
return configFile, nil
}
func createCertFile(dir, filename, cert string) error {
filename = filepath.Base(filename)
path := filepath.Join(dir, filename)
err := os.WriteFile(path, []byte(cert), 0644)
if err != nil {
return errors.Wrap(err, "could not write cert file")
}
return nil
}
type parsedConfig struct {
Datacenter string `json:"datacenter"`
Server bool `json:"server"`
}
func readSomeConfigFileFields(JSON string) (parsedConfig, error) {
var pc parsedConfig
if err := json.Unmarshal([]byte(JSON), &pc); err != nil {
return pc, errors.Wrap(err, "failed to parse config file")
}
if pc.Datacenter == "" {
pc.Datacenter = "dc1"
}
return pc, nil
}

View File

@ -1,58 +0,0 @@
package agent
import (
"crypto/rand"
"crypto/x509"
"encoding/base64"
"fmt"
"net"
"github.com/pkg/errors"
"github.com/hashicorp/consul/tlsutil"
)
func newSerfEncryptionKey() (string, error) {
key := make([]byte, 32)
n, err := rand.Reader.Read(key)
if err != nil {
return "", errors.Wrap(err, "error reading random data")
}
if n != 32 {
return "", errors.Wrap(err, "couldn't read enough entropy. Generate more entropy!")
}
return base64.StdEncoding.EncodeToString(key), nil
}
func newServerTLSKeyPair(dc string, ctx *BuildContext) (string, string, string, string) {
// Generate agent-specific key pair. Borrowed from 'consul tls cert create -server -dc <dc_name>'
name := fmt.Sprintf("server.%s.%s", dc, "consul")
dnsNames := []string{
name,
"localhost",
}
ipAddresses := []net.IP{net.ParseIP("127.0.0.1")}
extKeyUsage := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}
signer, err := tlsutil.ParseSigner(ctx.caKey)
if err != nil {
panic("could not parse signer from CA key")
}
pub, priv, err := tlsutil.GenerateCert(tlsutil.CertOpts{
Signer: signer, CA: ctx.caCert, Name: name, Days: 365,
DNSNames: dnsNames, IPAddresses: ipAddresses, ExtKeyUsage: extKeyUsage,
})
prefix := fmt.Sprintf("%s-server-%s", dc, "consul")
certFileName := fmt.Sprintf("%s-%d.pem", prefix, ctx.index)
keyFileName := fmt.Sprintf("%s-%d-key.pem", prefix, ctx.index)
if err = tlsutil.Verify(ctx.caCert, pub, name); err != nil {
panic(fmt.Sprintf("could not verify keypair for %s and %s", certFileName, keyFileName))
}
return keyFileName, priv, certFileName, pub
}

View File

@ -3,13 +3,13 @@ package assert
import (
"fmt"
"io"
"net/http"
"strings"
"testing"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/go-cleanhttp"
)
const (
@ -20,12 +20,13 @@ const (
// HTTPServiceEchoes verifies that a post to the given ip/port combination returns the data
// in the response body
func HTTPServiceEchoes(t *testing.T, ip string, port int) {
phrase := "hello"
const phrase = "hello"
failer := func() *retry.Timer {
return &retry.Timer{Timeout: defaultHTTPTimeout, Wait: defaultHTTPWait}
}
client := http.DefaultClient
client := cleanhttp.DefaultClient()
url := fmt.Sprintf("http://%s:%d", ip, port)
retry.RunWith(failer(), t, func(r *retry.R) {

View File

@ -0,0 +1,71 @@
package cluster
import (
"context"
"github.com/hashicorp/consul/api"
"github.com/testcontainers/testcontainers-go"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
// Agent represent a Consul agent abstraction
type Agent interface {
GetIP() string
GetClient() *api.Client
GetName() string
GetPod() testcontainers.Container
ClaimAdminPort() int
GetConfig() Config
GetInfo() AgentInfo
GetDatacenter() string
IsServer() bool
RegisterTermination(func() error)
Terminate() error
TerminateAndRetainPod() error
Upgrade(ctx context.Context, config Config) error
Exec(ctx context.Context, cmd []string) (int, error)
DataDir() string
}
// Config is a set of configurations required to create a Agent
//
// Constructed by (Builder).ToAgentConfig()
type Config struct {
ScratchDir string
CertVolume string
CACert string
JSON string
ConfigBuilder *ConfigBuilder
Image string
Version string
Cmd []string
// service defaults
UseAPIWithTLS bool // TODO
UseGRPCWithTLS bool
}
func (c *Config) DockerImage() string {
return utils.DockerImage(c.Image, c.Version)
}
// Clone copies everything. It is the caller's job to replace fields that
// should be unique.
func (c Config) Clone() Config {
c2 := c
if c.Cmd != nil {
c2.Cmd = make([]string, len(c.Cmd))
for i, v := range c.Cmd {
c2.Cmd[i] = v
}
}
return c2
}
// TODO: refactor away
type AgentInfo struct {
CACertFile string
UseTLSForAPI bool
UseTLSForGRPC bool
}

View File

@ -0,0 +1,138 @@
package cluster
import (
"context"
"fmt"
"time"
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/docker/go-connections/nat"
"github.com/hashicorp/go-multierror"
"github.com/testcontainers/testcontainers-go"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
// LaunchInfo is the resutl of LaunchContainerOnNode.
type LaunchInfo struct {
Container testcontainers.Container
IP string
MappedPorts map[string]nat.Port
}
// LaunchContainerOnNode will run a new container attached to the same network
// namespace as the provided agent, in the same manner in Kubernetes where
// you'd run two containers in the same pod so you can share localhost.
//
// This is supposed to mimic more accurately how consul/CLI/envoy/etc all are
// co-located on localhost with the consul client agent in typical deployment
// topologies.
func LaunchContainerOnNode(
ctx context.Context,
node Agent,
req testcontainers.ContainerRequest,
mapPorts []string,
) (*LaunchInfo, error) {
if req.Name == "" {
return nil, fmt.Errorf("ContainerRequest requires the Name field")
}
if req.NetworkMode != "" {
return nil, fmt.Errorf("caller should not configure ContainerRequest.NetworkMode")
}
req.NetworkMode = dockercontainer.NetworkMode("container:" + node.GetName() + "-pod")
pod := node.GetPod()
if pod == nil {
return nil, fmt.Errorf("node Pod is required")
}
// Do some trickery to ensure that partial completion is correctly torn
// down, but successful execution is not.
var deferClean utils.ResettableDefer
defer deferClean.Execute()
launchCtx, cancel := context.WithTimeout(ctx, time.Second*40)
defer cancel()
container, err := testcontainers.GenericContainer(launchCtx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
if err != nil {
return nil, err
}
deferClean.Add(func() {
_ = container.Terminate(ctx)
})
ip, err := container.ContainerIP(ctx)
if err != nil {
return nil, err
}
if utils.FollowLog {
if err := container.StartLogProducer(ctx); err != nil {
return nil, err
}
container.FollowOutput(&LogConsumer{
Prefix: req.Name,
})
deferClean.Add(func() {
_ = container.StopLogProducer()
})
}
ports := make(map[string]nat.Port)
for _, portStr := range mapPorts {
mapped, err := pod.MappedPort(ctx, nat.Port(portStr))
if err != nil {
return nil, err
}
ports[portStr] = mapped
}
info := &LaunchInfo{
Container: container,
IP: ip,
MappedPorts: ports,
}
node.RegisterTermination(func() error {
return TerminateContainer(ctx, container, true)
})
// disable cleanup functions now that we have an object with a Terminate() function
deferClean.Reset()
return info, nil
}
// TerminateContainer attempts to terminate the container. On failure, an error
// will be returned and the reaper process (RYUK) will handle cleanup.
func TerminateContainer(ctx context.Context, c testcontainers.Container, stopLogs bool) error {
if c == nil {
return nil
}
var merr error
if utils.FollowLog && stopLogs {
if state, err := c.State(ctx); err == nil && state.Running {
// StopLogProducer can only be called on running containers
if err := c.StopLogProducer(); err != nil {
merr = multierror.Append(merr, err)
}
}
}
if err := c.Stop(ctx, nil); err != nil {
merr = multierror.Append(merr, err)
}
if err := c.Terminate(ctx); err != nil {
merr = multierror.Append(merr, err)
}
return merr
}

View File

@ -0,0 +1,323 @@
package cluster
import (
"encoding/json"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"golang.org/x/mod/semver"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
// TODO: switch from semver to go-version
const (
remoteCertDirectory = "/consul/config/certs"
ConsulCACertPEM = "consul-agent-ca.pem"
ConsulCACertKey = "consul-agent-ca-key.pem"
)
// BuildContext provides a reusable object meant to share common configuration settings
// between agent configuration builders.
type BuildContext struct {
datacenter string
consulImageName string
consulVersion string
injectGossipEncryption bool // setup the agents to use a gossip encryption key
encryptKey string
injectCerts bool // initializes the built-in CA and distributes client certificates to agents
injectAutoEncryption bool // initialize the built-in CA and set up agents to use auto-encrpt
allowHTTPAnyway bool
useAPIWithTLS bool
useGRPCWithTLS bool
certVolume string
caCert string
tlsCertIndex int // keeps track of the certificates issued for naming purposes
}
func (c *BuildContext) DockerImage() string {
return utils.DockerImage(c.consulImageName, c.consulVersion)
}
// BuildOptions define the desired automated test setup overrides that are
// applied across agents in the cluster
type BuildOptions struct {
// Datacenter is the override datacenter for agents.
Datacenter string
// ConsulImageName is the default Consul image name for agents in the
// cluster when none is specified.
ConsulImageName string
// ConsulVersion is the default Consul version for agents in the cluster
// when none is specified.
ConsulVersion string
// InjectGossipEncryption provides a gossip encryption key for all agents.
InjectGossipEncryption bool
// InjectCerts provides a CA for all agents and (future) agent certs.
//
// It also disables the HTTP API unless AllowHTTPAnyway is enabled.
InjectCerts bool
// InjectAutoEncryption configures auto-encrypt for TLS and sets up certs.
// Overrides InjectCerts.
//
// It also disables the HTTP API unless AllowHTTPAnyway is enabled.
InjectAutoEncryption bool
// AllowHTTPAnyway ensures that the HTTP API is enabled even when
// InjectCerts or InjectAutoEncryption are enabled.
AllowHTTPAnyway bool
// UseAPIWithTLS ensures that any accesses for the JSON API use the https
// port. By default it will not.
UseAPIWithTLS bool
// UseGRPCWithTLS ensures that any accesses for external gRPC use the
// grpc_tls port. By default it will not.
UseGRPCWithTLS bool
}
func NewBuildContext(t *testing.T, opts BuildOptions) *BuildContext {
ctx := &BuildContext{
datacenter: opts.Datacenter,
consulImageName: opts.ConsulImageName,
consulVersion: opts.ConsulVersion,
injectGossipEncryption: opts.InjectGossipEncryption,
injectCerts: opts.InjectCerts,
injectAutoEncryption: opts.InjectAutoEncryption,
allowHTTPAnyway: opts.AllowHTTPAnyway,
useAPIWithTLS: opts.UseAPIWithTLS,
useGRPCWithTLS: opts.UseGRPCWithTLS,
}
if ctx.consulImageName == "" {
ctx.consulImageName = utils.TargetImageName
}
if ctx.consulVersion == "" {
ctx.consulVersion = utils.TargetVersion
}
if opts.InjectGossipEncryption {
serfKey, err := newSerfEncryptionKey()
require.NoError(t, err, "could not generate serf encryption key")
ctx.encryptKey = serfKey
}
if opts.InjectAutoEncryption {
if opts.UseAPIWithTLS {
// TODO: we should improve this
t.Fatalf("Cannot use TLS with the API in conjunction with Auto Encrypt because you would need to use the Connect CA Cert for verification")
}
if opts.UseGRPCWithTLS {
// TODO: we should improve this
t.Fatalf("Cannot use TLS with gRPC in conjunction with Auto Encrypt because you would need to use the Connect CA Cert for verification")
}
}
if opts.InjectAutoEncryption || opts.InjectCerts {
ctx.createTLSCAFiles(t)
} else {
if opts.UseAPIWithTLS {
t.Fatalf("UseAPIWithTLS requires one of InjectAutoEncryption or InjectCerts to be set")
}
if opts.UseGRPCWithTLS {
t.Fatalf("UseGRPCWithTLS requires one of InjectAutoEncryption or InjectCerts to be set")
}
}
return ctx
}
type Builder struct {
context *BuildContext // this is non-nil
conf *ConfigBuilder
}
// NewConfigBuilder instantiates a builder object with sensible defaults for a single consul instance
// This includes the following:
// * default ports with no plaintext options
// * debug logging
// * single server with bootstrap
// * bind to all interfaces, advertise on 'eth0'
// * connect enabled
func NewConfigBuilder(ctx *BuildContext) *Builder {
if ctx == nil {
panic("BuildContext is a required argument")
}
b := &Builder{
conf: &ConfigBuilder{},
context: ctx,
}
b.conf.Set("advertise_addr", `{{ GetInterfaceIP "eth0" }}`)
b.conf.Set("bind_addr", "0.0.0.0")
b.conf.Set("data_dir", "/consul/data")
b.conf.Set("bootstrap", true)
b.conf.Set("client_addr", "0.0.0.0")
b.conf.Set("connect.enabled", true)
b.conf.Set("log_level", "debug")
b.conf.Set("server", true)
// These are the default ports, disabling plaintext transport
b.conf.Set("ports.dns", 8600)
//nolint:staticcheck
if ctx.certVolume == "" {
b.conf.Set("ports.http", 8500)
b.conf.Set("ports.https", -1)
} else {
b.conf.Set("ports.http", -1)
b.conf.Set("ports.https", 8501)
}
b.conf.Set("ports.grpc", 8502)
b.conf.Set("ports.serf_lan", 8301)
b.conf.Set("ports.serf_wan", 8302)
b.conf.Set("ports.server", 8300)
if ctx.allowHTTPAnyway {
b.conf.Set("ports.http", 8500)
}
if ctx.consulVersion == "local" || semver.Compare("v"+ctx.consulVersion, "v1.14.0") >= 0 {
// Enable GRPCTLS for version after v1.14.0
b.conf.Set("ports.grpc_tls", 8503)
}
return b
}
// Advanced lets you directly manipulate specific config settings.
func (b *Builder) Advanced(fn func(*ConfigBuilder)) *Builder {
if fn != nil {
fn(b.conf)
}
return b
}
func (b *Builder) Bootstrap(servers int) *Builder {
if servers < 1 {
b.conf.Unset("bootstrap")
b.conf.Unset("bootstrap_expect")
} else if servers == 1 {
b.conf.Set("bootstrap", true)
b.conf.Unset("bootstrap_expect")
} else {
b.conf.Unset("bootstrap")
b.conf.Set("bootstrap_expect", servers)
}
return b
}
func (b *Builder) Client() *Builder {
b.conf.Unset("ports.server")
b.conf.Unset("server")
b.conf.Unset("bootstrap")
b.conf.Unset("bootstrap_expect")
return b
}
func (b *Builder) Datacenter(name string) *Builder {
b.conf.Set("datacenter", name)
return b
}
func (b *Builder) Peering(enable bool) *Builder {
b.conf.Set("peering.enabled", enable)
return b
}
func (b *Builder) RetryJoin(names ...string) *Builder {
b.conf.Set("retry_join", names)
return b
}
func (b *Builder) Telemetry(statSite string) *Builder {
b.conf.Set("telemetry.statsite_address", statSite)
return b
}
// ToAgentConfig renders the builders configuration into a string
// representation of the json config file for agents.
func (b *Builder) ToAgentConfig(t *testing.T) *Config {
b.injectContextOptions(t)
out, err := json.MarshalIndent(b.conf, "", " ")
require.NoError(t, err, "could not generate json config")
confCopy, err := b.conf.Clone()
require.NoError(t, err)
return &Config{
JSON: string(out),
ConfigBuilder: confCopy,
Cmd: []string{"agent"},
Image: b.context.consulImageName,
Version: b.context.consulVersion,
CertVolume: b.context.certVolume,
CACert: b.context.caCert,
UseAPIWithTLS: b.context.useAPIWithTLS,
UseGRPCWithTLS: b.context.useGRPCWithTLS,
}
}
func (b *Builder) injectContextOptions(t *testing.T) {
var dc string
if b.context.datacenter != "" {
b.conf.Set("datacenter", b.context.datacenter)
dc = b.context.datacenter
}
if val, _ := b.conf.GetString("datacenter"); val == "" {
dc = "dc1"
}
b.conf.Set("datacenter", dc)
server, _ := b.conf.GetBool("server")
if b.context.encryptKey != "" {
b.conf.Set("encrypt", b.context.encryptKey)
}
// For any TLS setup, we add the CA to agent conf
if b.context.certVolume != "" {
b.conf.Set("tls.defaults.ca_file", filepath.Join(remoteCertDirectory, ConsulCACertPEM))
b.conf.Set("tls.defaults.verify_outgoing", true) // Secure settings
b.conf.Set("tls.internal_rpc.verify_server_hostname", true)
}
// Also for any TLS setup, generate server key pairs from the CA
if b.context.certVolume != "" && server {
keyFileName, certFileName := b.context.createTLSCertFiles(t, dc)
b.context.tlsCertIndex++
b.conf.Set("tls.defaults.cert_file", filepath.Join(remoteCertDirectory, certFileName))
b.conf.Set("tls.defaults.key_file", filepath.Join(remoteCertDirectory, keyFileName))
b.conf.Set("tls.internal_rpc.verify_incoming", true) // Only applies to servers for auto-encrypt
}
// This assumes we've already gone through the CA/Cert setup in the previous conditional
if b.context.injectAutoEncryption {
if server {
b.conf.Set("auto_encrypt.allow_tls", true) // This setting is different between client and servers
b.conf.Set("tls.grpc.use_auto_cert", true) // This is required for peering to work over the non-GRPC_TLS port
// VerifyIncoming does not apply to client agents for auto-encrypt
} else {
b.conf.Set("auto_encrypt.tls", true) // This setting is different between client and servers
b.conf.Set("tls.grpc.use_auto_cert", true) // This is required for peering to work over the non-GRPC_TLS port
}
}
if b.context.injectCerts && !b.context.injectAutoEncryption {
panic("client certificate distribution not implemented")
}
}

View File

@ -2,22 +2,22 @@ package cluster
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/serf/serf"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/teris-io/shortid"
"github.com/testcontainers/testcontainers-go"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
libagent "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
)
// Cluster provides an interface for creating and controlling a Consul cluster
@ -25,75 +25,158 @@ import (
// These fields are public in the event someone might want to surgically
// craft a test case.
type Cluster struct {
Agents []libagent.Agent
BuildContext *libagent.BuildContext
CACert string
CAKey string
ID string
Index int
Network testcontainers.Network
NetworkName string
Agents []Agent
// BuildContext *BuildContext // TODO
CACert string
CAKey string
ID string
Index int
Network testcontainers.Network
NetworkName string
ScratchDir string
}
type TestingT interface {
Cleanup(f func())
}
func NewN(t TestingT, conf Config, count int) (*Cluster, error) {
var configs []Config
for i := 0; i < count; i++ {
configs = append(configs, conf)
}
return New(t, configs)
}
// New creates a Consul cluster. An agent will be started for each of the given
// configs and joined to the cluster.
//
// A cluster has its own docker network for DNS connectivity, but is also joined
func New(configs []libagent.Config) (*Cluster, error) {
// A cluster has its own docker network for DNS connectivity, but is also
// joined
//
// The provided TestingT is used to register a cleanup function to terminate
// the cluster.
func New(t TestingT, configs []Config) (*Cluster, error) {
id, err := shortid.Generate()
if err != nil {
return nil, errors.Wrap(err, "could not cluster id")
return nil, fmt.Errorf("could not cluster id: %w", err)
}
name := fmt.Sprintf("consul-int-cluster-%s", id)
network, err := createNetwork(name)
network, err := createNetwork(t, name)
if err != nil {
return nil, errors.Wrap(err, "could not create cluster container network")
return nil, fmt.Errorf("could not create cluster container network: %w", err)
}
cluster := Cluster{
// Rig up one scratch dir for the cluster with auto-cleanup on test exit.
scratchDir, err := os.MkdirTemp("", name)
if err != nil {
return nil, err
}
t.Cleanup(func() {
_ = os.RemoveAll(scratchDir)
})
err = os.Chmod(scratchDir, 0777)
if err != nil {
return nil, err
}
cluster := &Cluster{
ID: id,
Network: network,
NetworkName: name,
ScratchDir: scratchDir,
}
t.Cleanup(func() {
_ = cluster.Terminate()
})
if err := cluster.Add(configs, true); err != nil {
return nil, fmt.Errorf("could not start or join all agents: %w", err)
}
if err := cluster.Add(configs); err != nil {
return nil, errors.Wrap(err, "could not start or join all agents")
return cluster, nil
}
func (c *Cluster) AddN(conf Config, count int, join bool) error {
var configs []Config
for i := 0; i < count; i++ {
configs = append(configs, conf)
}
return &cluster, nil
return c.Add(configs, join)
}
// Add starts an agent with the given configuration and joins it with the existing cluster
func (c *Cluster) Add(configs []libagent.Config) error {
func (c *Cluster) Add(configs []Config, serfJoin bool) (xe error) {
if c.Index == 0 && !serfJoin {
return fmt.Errorf("The first call to Cluster.Add must have serfJoin=true")
}
agents := make([]libagent.Agent, len(configs))
var agents []Agent
for idx, conf := range configs {
n, err := libagent.NewConsulContainer(context.Background(), conf, c.NetworkName, c.Index)
if err != nil {
return errors.Wrapf(err, "could not add container index %d", idx)
// Each agent gets it's own area in the cluster scratch.
conf.ScratchDir = filepath.Join(c.ScratchDir, strconv.Itoa(c.Index))
if err := os.MkdirAll(conf.ScratchDir, 0777); err != nil {
return err
}
agents[idx] = n
if err := os.Chmod(conf.ScratchDir, 0777); err != nil {
return err
}
n, err := NewConsulContainer(
context.Background(),
conf,
c.NetworkName,
c.Index,
)
if err != nil {
return fmt.Errorf("could not add container index %d: %w", idx, err)
}
agents = append(agents, n)
c.Index++
}
if err := c.Join(agents); err != nil {
return errors.Wrapf(err, "could not join agent")
if serfJoin {
if err := c.Join(agents); err != nil {
return fmt.Errorf("could not join agents to cluster: %w", err)
}
} else {
if err := c.JoinExternally(agents); err != nil {
return fmt.Errorf("could not join agents to cluster: %w", err)
}
}
return nil
}
// Join joins the given agent to the cluster.
func (c *Cluster) Join(agents []libagent.Agent) error {
var joinAddr string
if len(c.Agents) >= 1 {
joinAddr, _ = c.Agents[0].GetAddr()
} else if len(agents) >= 1 {
joinAddr, _ = agents[0].GetAddr()
func (c *Cluster) Join(agents []Agent) error {
return c.join(agents, false)
}
func (c *Cluster) JoinExternally(agents []Agent) error {
return c.join(agents, true)
}
func (c *Cluster) join(agents []Agent, skipSerfJoin bool) error {
if len(agents) == 0 {
return nil // no change
}
if len(c.Agents) == 0 {
// Join the rest to the first.
c.Agents = append(c.Agents, agents[0])
return c.join(agents[1:], skipSerfJoin)
}
// Always join to the original server.
joinAddr := c.Agents[0].GetIP()
for _, n := range agents {
err := n.GetClient().Agent().Join(joinAddr, false)
if err != nil {
return errors.Wrapf(err, "could not join agent %s to %s", n.GetName(), joinAddr)
if !skipSerfJoin {
err := n.GetClient().Agent().Join(joinAddr, false)
if err != nil {
return fmt.Errorf("could not join agent %s to %s: %w", n.GetName(), joinAddr, err)
}
}
c.Agents = append(c.Agents, n)
}
@ -102,10 +185,10 @@ func (c *Cluster) Join(agents []libagent.Agent) error {
// Remove instructs the agent to leave the cluster then removes it
// from the cluster Agent list.
func (c *Cluster) Remove(n libagent.Agent) error {
func (c *Cluster) Remove(n Agent) error {
err := n.GetClient().Agent().Leave()
if err != nil {
return errors.Wrapf(err, "could not remove agent %s", n.GetName())
return fmt.Errorf("could not remove agent %s: %w", n.GetName(), err)
}
foundIdx := -1
@ -128,74 +211,100 @@ func (c *Cluster) Remove(n libagent.Agent) error {
//
// https://developer.hashicorp.com/consul/docs/upgrading#standard-upgrades
//
// - takes a snapshot
// - terminate and rejoin the pod of new version of consul
// - takes a snapshot (which is discarded)
// - terminate and rejoin the pod of a new version of consul
//
// NOTE: we pass in a *testing.T but this method also returns an error. JUST
// within this method when in doubt return an error. A testing assertion should
// be saved only for using t.Cleanup or in a few of the retry-until-working
// helpers below.
//
// This lets us have tests that assert that an upgrade will fail.
func (c *Cluster) StandardUpgrade(t *testing.T, ctx context.Context, targetVersion string) error {
retry.RunWith(&retry.Timer{Timeout: 30 * time.Second, Wait: 1 * time.Second}, t, func(r *retry.R) {
// NOTE: to suppress flakiness
execCode, err := c.Agents[0].Exec(context.Background(), []string{"consul", "snapshot", "save", "backup.snap"})
require.Equal(r, 0, execCode)
require.NoError(r, err)
})
// verify only the leader can take a snapshot
snapshotCount := 0
for _, agent := range c.Agents {
if !agent.IsServer() {
continue
}
files, err := ioutil.ReadDir(filepath.Join(agent.DataDir(), "raft", "snapshots"))
if err != nil {
return err
}
if len(files) >= 1 {
snapshotCount++
}
// We take a snapshot, but note that we currently do nothing with it.
execCode, err := c.Agents[0].Exec(context.Background(), []string{"consul", "snapshot", "save", "backup.snap"})
if execCode != 0 {
return fmt.Errorf("error taking snapshot of the cluster, returned code %d", execCode)
}
if err != nil {
return err
}
require.Equalf(t, 1, snapshotCount, "only leader agent can have a snapshot file")
// Upgrade individual agent to the target version in the following order
// 1. followers
// 2. leader
// 3. clients (TODO)
// Grab a client connected to the leader, which we will upgrade last so our
// connection remains ok.
leader, err := c.Leader()
client := leader.GetClient()
require.NoError(t, err)
t.Log("Leader name:", leader.GetName())
if err != nil {
return err
}
t.Logf("Leader name: %s", leader.GetName())
followers, err := c.Followers()
require.NoError(t, err)
t.Log("The number of followers", len(followers))
for _, agent := range followers {
t.Log("Upgrade follower", agent.GetName())
if err != nil {
return err
}
t.Logf("The number of followers = %d", len(followers))
upgradeFn := func(agent Agent, clientFactory func() *api.Client) error {
config := agent.GetConfig()
config.Version = targetVersion
if agent.IsServer() {
// You only ever need bootstrap settings the FIRST time, so we do not need
// them again.
config.ConfigBuilder.Unset("bootstrap")
} else {
// If we upgrade the clients fast enough
// membership might not be gossiped to all of
// the clients to persist into their serf
// snapshot, so force them to rejoin the
// normal way on restart.
config.ConfigBuilder.Set("retry_join", []string{"agent-0"})
}
newJSON, err := json.MarshalIndent(config.ConfigBuilder, "", " ")
if err != nil {
return fmt.Errorf("could not re-generate json config: %w", err)
}
config.JSON = string(newJSON)
t.Logf("Upgraded cluster config for %q:\n%s", agent.GetName(), config.JSON)
err = agent.Upgrade(context.Background(), config)
if err != nil {
return err
}
client := clientFactory()
// wait until the agent rejoin
WaitForMembers(t, client, len(c.Agents))
break
return nil
}
if len(followers) > 0 {
client = followers[0].GetClient()
for _, agent := range followers {
t.Logf("Upgrade follower: %s", agent.GetName())
if err := upgradeFn(agent, leader.GetClient); err != nil {
return fmt.Errorf("error upgrading follower %q: %w", agent.GetName(), err)
}
}
t.Log("Upgrade leader:", leader.GetName())
config := leader.GetConfig()
config.Version = targetVersion
err = leader.Upgrade(context.Background(), config)
t.Logf("Upgrade leader: %s", leader.GetName())
err = upgradeFn(leader, func() *api.Client {
if len(followers) > 0 {
return followers[0].GetClient()
}
return c.APIClient(0)
})
if err != nil {
return err
return fmt.Errorf("error upgrading leader %q: %w", leader.GetName(), err)
}
WaitForMembers(t, client, len(c.Agents))
return nil
}
@ -211,15 +320,15 @@ func (c *Cluster) Terminate() error {
// Testcontainers seems to clean this the network.
// Trigger it now will throw an error while the containers are still shutting down
//if err := c.Network.Remove(context.Background()); err != nil {
// return errors.Wrapf(err, "could not terminate cluster network %s", c.ID)
//}
// if err := c.Network.Remove(context.Background()); err != nil {
// return fmt.Errorf("could not terminate cluster network %s: %w", c.ID, err)
// }
return nil
}
// Leader returns the cluster leader agent, or an error if no leader is
// available.
func (c *Cluster) Leader() (libagent.Agent, error) {
func (c *Cluster) Leader() (Agent, error) {
if len(c.Agents) < 1 {
return nil, fmt.Errorf("no agent available")
}
@ -231,7 +340,7 @@ func (c *Cluster) Leader() (libagent.Agent, error) {
}
for _, n := range c.Agents {
addr, _ := n.GetAddr()
addr := n.GetIP()
if strings.Contains(leaderAdd, addr) {
return n, nil
}
@ -239,35 +348,10 @@ func (c *Cluster) Leader() (libagent.Agent, error) {
return nil, fmt.Errorf("leader not found")
}
// GetClient returns a consul API client to the node if node is provided.
// Otherwise, GetClient returns the API client to the first node of either
// server or client agent.
func (c *Cluster) GetClient(node libagent.Agent, isServer bool) (*api.Client, error) {
var err error
if node != nil {
return node.GetClient(), err
}
nodes, err := c.Clients()
if isServer {
nodes, err = c.Servers()
}
if err != nil {
return nil, fmt.Errorf("unable to get the api client: %s", err)
}
if len(nodes) <= 0 {
return nil, fmt.Errorf("not enough node: %d", len(nodes))
}
return nodes[0].GetClient(), err
}
func getLeader(client *api.Client) (string, error) {
leaderAdd, err := client.Status().Leader()
if err != nil {
return "", errors.Wrap(err, "could not query leader")
return "", fmt.Errorf("could not query leader: %w", err)
}
if leaderAdd == "" {
return "", errors.New("no leader available")
@ -276,8 +360,8 @@ func getLeader(client *api.Client) (string, error) {
}
// Followers returns the cluster following servers.
func (c *Cluster) Followers() ([]libagent.Agent, error) {
var followers []libagent.Agent
func (c *Cluster) Followers() ([]Agent, error) {
var followers []Agent
leader, err := c.Leader()
if err != nil {
@ -293,40 +377,72 @@ func (c *Cluster) Followers() ([]libagent.Agent, error) {
}
// Servers returns the handle to server agents
func (c *Cluster) Servers() ([]libagent.Agent, error) {
var servers []libagent.Agent
func (c *Cluster) Servers() []Agent {
var servers []Agent
for _, n := range c.Agents {
if n.IsServer() {
servers = append(servers, n)
}
}
return servers, nil
return servers
}
// Clients returns the handle to client agents
func (c *Cluster) Clients() ([]libagent.Agent, error) {
var clients []libagent.Agent
func (c *Cluster) Clients() []Agent {
var clients []Agent
for _, n := range c.Agents {
if !n.IsServer() {
clients = append(clients, n)
}
}
return clients, nil
return clients
}
func (c *Cluster) APIClient(index int) *api.Client {
nodes := c.Clients()
if len(nodes) == 0 {
nodes = c.Servers()
if len(nodes) == 0 {
return nil
}
}
return nodes[0].GetClient()
}
// GetClient returns a consul API client to the node if node is provided.
// Otherwise, GetClient returns the API client to the first node of either
// server or client agent.
//
// TODO: see about switching to just APIClient() calls instead?
func (c *Cluster) GetClient(node Agent, isServer bool) (*api.Client, error) {
if node != nil {
return node.GetClient(), nil
}
nodes := c.Clients()
if isServer {
nodes = c.Servers()
}
if len(nodes) <= 0 {
return nil, errors.New("no nodes")
}
return nodes[0].GetClient(), nil
}
// PeerWithCluster establishes peering with the acceptor cluster
func (c *Cluster) PeerWithCluster(acceptingClient *api.Client, acceptingPeerName string, dialingPeerName string) error {
node := c.Agents[0]
dialingClient := node.GetClient()
dialingClient := c.APIClient(0)
generateReq := api.PeeringGenerateTokenRequest{
PeerName: acceptingPeerName,
}
generateRes, _, err := acceptingClient.Peerings().GenerateToken(context.Background(), generateReq, &api.WriteOptions{})
if err != nil {
return fmt.Errorf("error generate token: %v", err)
return fmt.Errorf("error generate token: %w", err)
}
establishReq := api.PeeringEstablishRequest{
@ -335,7 +451,7 @@ func (c *Cluster) PeerWithCluster(acceptingClient *api.Client, acceptingPeerName
}
_, _, err = dialingClient.Peerings().Establish(context.Background(), establishReq, &api.WriteOptions{})
if err != nil {
return fmt.Errorf("error establish peering: %v", err)
return fmt.Errorf("error establish peering: %w", err)
}
return nil

View File

@ -0,0 +1,184 @@
package cluster
import (
"encoding/json"
"fmt"
"strings"
"github.com/mitchellh/copystructure"
)
// TODO: rename file
type ConfigBuilder struct {
nodes map[string]any
}
var _ json.Marshaler = (*ConfigBuilder)(nil)
func (b *ConfigBuilder) Clone() (*ConfigBuilder, error) {
if b.nodes == nil {
return &ConfigBuilder{}, nil
}
raw, err := copystructure.Copy(b.nodes)
if err != nil {
return nil, err
}
return &ConfigBuilder{
nodes: raw.(map[string]any),
}, nil
}
func (b *ConfigBuilder) MarshalJSON() ([]byte, error) {
if b == nil || len(b.nodes) == 0 {
return []byte("{}"), nil
}
return json.Marshal(b.nodes)
}
func (b *ConfigBuilder) String() string {
d, err := json.MarshalIndent(b, "", " ")
if err != nil {
return "<ERR: " + err.Error() + ">"
}
return string(d)
}
func (b *ConfigBuilder) GetString(k string) (string, bool) {
raw, ok := b.Get(k)
if !ok {
return "", false
}
return raw.(string), true
}
func (b *ConfigBuilder) GetBool(k string) (bool, bool) {
raw, ok := b.Get(k)
if !ok {
return false, false
}
return raw.(bool), true
}
func (b *ConfigBuilder) Get(k string) (any, bool) {
if b.nodes == nil {
return nil, false
}
parts := strings.Split(k, ".")
switch len(parts) {
case 0:
return nil, false
case 1:
v, ok := b.nodes[k]
return v, ok
}
parents, child := parts[0:len(parts)-1], parts[len(parts)-1]
curr := b.nodes
for _, parent := range parents {
next, ok := curr[parent]
if !ok {
return nil, false
}
curr = next.(map[string]any)
}
v, ok := curr[child]
return v, ok
}
func (b *ConfigBuilder) Set(k string, v any) {
if b.nodes == nil {
b.nodes = make(map[string]any)
}
validateValueType(v)
parts := strings.Split(k, ".")
switch len(parts) {
case 0:
return
case 1:
b.nodes[k] = v
return
}
parents, child := parts[0:len(parts)-1], parts[len(parts)-1]
curr := b.nodes
for _, parent := range parents {
next, ok := curr[parent]
if ok {
curr = next.(map[string]any)
} else {
next := make(map[string]any)
curr[parent] = next
curr = next
}
}
curr[child] = v
}
func validateValueType(v any) {
switch x := v.(type) {
case string:
case int:
case bool:
case []string:
case []any:
for _, item := range x {
validateSliceValueType(item)
}
default:
panic(fmt.Sprintf("unexpected type %T", v))
}
}
func validateSliceValueType(v any) {
switch v.(type) {
case string:
case int:
case bool:
case *ConfigBuilder:
default:
panic(fmt.Sprintf("unexpected type %T", v))
}
}
func (b *ConfigBuilder) Unset(k string) {
if b.nodes == nil {
return
}
parts := strings.Split(k, ".")
switch len(parts) {
case 0:
return
case 1:
delete(b.nodes, k)
return
}
parents, child := parts[0:len(parts)-1], parts[len(parts)-1]
curr := b.nodes
for _, parent := range parents {
next, ok := curr[parent]
if !ok {
return
}
curr = next.(map[string]any)
}
delete(curr, child)
}

View File

@ -0,0 +1,562 @@
package cluster
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"time"
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
const bootLogLine = "Consul agent running"
const disableRYUKEnv = "TESTCONTAINERS_RYUK_DISABLED"
// consulContainerNode implements the Agent interface by running a Consul agent
// in a container.
type consulContainerNode struct {
ctx context.Context
pod testcontainers.Container
container testcontainers.Container
serverMode bool
datacenter string
config Config
podReq testcontainers.ContainerRequest
consulReq testcontainers.ContainerRequest
dataDir string
network string
id int
name string
terminateFuncs []func() error
client *api.Client
clientAddr string
clientCACertFile string
ip string
nextAdminPortOffset int
info AgentInfo
}
func (c *consulContainerNode) GetPod() testcontainers.Container {
return c.pod
}
func (c *consulContainerNode) ClaimAdminPort() int {
p := 19000 + c.nextAdminPortOffset
c.nextAdminPortOffset++
return p
}
// NewConsulContainer starts a Consul agent in a container with the given config.
func NewConsulContainer(ctx context.Context, config Config, network string, index int) (Agent, error) {
if config.ScratchDir == "" {
return nil, fmt.Errorf("ScratchDir is required")
}
license, err := readLicense()
if err != nil {
return nil, err
}
pc, err := readSomeConfigFileFields(config.JSON)
if err != nil {
return nil, err
}
consulType := "client"
if pc.Server {
consulType = "server"
}
name := utils.RandName(fmt.Sprintf("%s-consul-%s-%d", pc.Datacenter, consulType, index))
// Inject new Agent name
config.Cmd = append(config.Cmd, "-node", name)
tmpDirData := filepath.Join(config.ScratchDir, "data")
if err := os.MkdirAll(tmpDirData, 0777); err != nil {
return nil, fmt.Errorf("error creating data directory %s: %w", tmpDirData, err)
}
if err := os.Chmod(tmpDirData, 0777); err != nil {
return nil, fmt.Errorf("error chowning data directory %s: %w", tmpDirData, err)
}
var caCertFileForAPI string
if config.CACert != "" {
caCertFileForAPI = filepath.Join(config.ScratchDir, "ca.pem")
if err := os.WriteFile(caCertFileForAPI, []byte(config.CACert), 0644); err != nil {
return nil, fmt.Errorf("error writing out CA cert %s: %w", caCertFileForAPI, err)
}
}
configFile, err := createConfigFile(config.ScratchDir, config.JSON)
if err != nil {
return nil, fmt.Errorf("error writing out config file %s: %w", configFile, err)
}
opts := containerOpts{
name: name,
configFile: configFile,
dataDir: tmpDirData,
license: license,
addtionalNetworks: []string{"bridge", network},
hostname: fmt.Sprintf("agent-%d", index),
}
podReq, consulReq := newContainerRequest(config, opts)
// Do some trickery to ensure that partial completion is correctly torn
// down, but successful execution is not.
var deferClean utils.ResettableDefer
defer deferClean.Execute()
podContainer, err := startContainer(ctx, podReq)
if err != nil {
return nil, fmt.Errorf("error starting pod with image %q: %w", podReq.Image, err)
}
deferClean.Add(func() {
_ = podContainer.Terminate(ctx)
})
var (
httpPort = pc.Ports.HTTP
httpsPort = pc.Ports.HTTPS
clientAddr string
clientCACertFile string
info AgentInfo
)
if httpPort > 0 {
uri, err := podContainer.PortEndpoint(ctx, "8500", "http")
if err != nil {
return nil, err
}
clientAddr = uri
} else if httpsPort > 0 {
uri, err := podContainer.PortEndpoint(ctx, "8501", "https")
if err != nil {
return nil, err
}
clientAddr = uri
clientCACertFile = caCertFileForAPI
} else {
if pc.Server {
return nil, fmt.Errorf("server container does not expose HTTP or HTTPS")
}
}
if caCertFileForAPI != "" {
if config.UseAPIWithTLS {
if pc.Ports.HTTPS > 0 {
info.UseTLSForAPI = true
} else {
return nil, fmt.Errorf("UseAPIWithTLS is set but ports.https is not for this agent")
}
}
if config.UseGRPCWithTLS {
if pc.Ports.GRPCTLS > 0 {
info.UseTLSForGRPC = true
} else {
return nil, fmt.Errorf("UseGRPCWithTLS is set but ports.grpc_tls is not for this agent")
}
}
info.CACertFile = clientCACertFile
}
ip, err := podContainer.ContainerIP(ctx)
if err != nil {
return nil, err
}
consulContainer, err := startContainer(ctx, consulReq)
if err != nil {
return nil, fmt.Errorf("error starting main with image %q: %w", consulReq.Image, err)
}
deferClean.Add(func() {
_ = consulContainer.Terminate(ctx)
})
if utils.FollowLog {
if err := consulContainer.StartLogProducer(ctx); err != nil {
return nil, err
}
deferClean.Add(func() {
_ = consulContainer.StopLogProducer()
})
consulContainer.FollowOutput(&LogConsumer{
Prefix: opts.name,
})
}
node := &consulContainerNode{
config: config,
pod: podContainer,
container: consulContainer,
serverMode: pc.Server,
datacenter: pc.Datacenter,
ctx: ctx,
podReq: podReq,
consulReq: consulReq,
dataDir: tmpDirData,
network: network,
id: index,
name: name,
ip: ip,
info: info,
}
if httpPort > 0 || httpsPort > 0 {
apiConfig := api.DefaultConfig()
apiConfig.Address = clientAddr
if clientCACertFile != "" {
apiConfig.TLSConfig.CAFile = clientCACertFile
}
apiClient, err := api.NewClient(apiConfig)
if err != nil {
return nil, err
}
node.client = apiClient
node.clientAddr = clientAddr
node.clientCACertFile = clientCACertFile
}
// disable cleanup functions now that we have an object with a Terminate() function
deferClean.Reset()
return node, nil
}
func (c *consulContainerNode) GetName() string {
if c.container == nil {
return c.consulReq.Name // TODO: is this safe to do all the time?
}
name, err := c.container.Name(c.ctx)
if err != nil {
return ""
}
return name
}
func (c *consulContainerNode) GetConfig() Config {
return c.config.Clone()
}
func (c *consulContainerNode) GetDatacenter() string {
return c.datacenter
}
func (c *consulContainerNode) IsServer() bool {
return c.serverMode
}
// GetClient returns an API client that can be used to communicate with the Agent.
func (c *consulContainerNode) GetClient() *api.Client {
return c.client
}
func (c *consulContainerNode) GetAPIAddrInfo() (addr, caCert string) {
return c.clientAddr, c.clientCACertFile
}
func (c *consulContainerNode) GetInfo() AgentInfo {
return c.info
}
func (c *consulContainerNode) GetIP() string {
return c.ip
}
func (c *consulContainerNode) RegisterTermination(f func() error) {
c.terminateFuncs = append(c.terminateFuncs, f)
}
func (c *consulContainerNode) Exec(ctx context.Context, cmd []string) (int, error) {
exit, _, err := c.container.Exec(ctx, cmd)
return exit, err
}
func (c *consulContainerNode) Upgrade(ctx context.Context, config Config) error {
if config.ScratchDir == "" {
return fmt.Errorf("ScratchDir is required")
}
newConfigFile, err := createConfigFile(config.ScratchDir, config.JSON)
if err != nil {
return err
}
// We'll keep the same pod.
opts := containerOpts{
name: c.consulReq.Name,
configFile: newConfigFile,
dataDir: c.dataDir,
license: "",
addtionalNetworks: []string{"bridge", c.network},
hostname: c.consulReq.Hostname,
}
_, consulReq2 := newContainerRequest(config, opts)
consulReq2.Env = c.consulReq.Env // copy license
// sanity check two fields
if consulReq2.Name != c.consulReq.Name {
return fmt.Errorf("new name %q should match old name %q", consulReq2.Name, c.consulReq.Name)
}
if consulReq2.Hostname != c.consulReq.Hostname {
return fmt.Errorf("new hostname %q should match old hostname %q", consulReq2.Hostname, c.consulReq.Hostname)
}
if err := c.TerminateAndRetainPod(); err != nil {
return fmt.Errorf("error terminating running container during upgrade: %w", err)
}
c.consulReq = consulReq2
container, err := startContainer(ctx, c.consulReq)
c.ctx = ctx
c.container = container
if err != nil {
return err
}
if utils.FollowLog {
if err := container.StartLogProducer(ctx); err != nil {
return err
}
container.FollowOutput(&LogConsumer{
Prefix: opts.name,
})
}
return nil
}
// Terminate attempts to terminate the agent container.
// This might also include running termination functions for containers associated with the agent.
// On failure, an error will be returned and the reaper process (RYUK) will handle cleanup.
func (c *consulContainerNode) Terminate() error {
return c.terminate(false)
}
func (c *consulContainerNode) TerminateAndRetainPod() error {
return c.terminate(true)
}
func (c *consulContainerNode) terminate(retainPod bool) error {
// Services might register a termination function that should also fire
// when the "agent" is cleaned up
for _, f := range c.terminateFuncs {
err := f()
if err != nil {
continue
}
}
var merr error
if c.container != nil {
if err := TerminateContainer(c.ctx, c.container, true); err != nil {
merr = multierror.Append(merr, err)
}
c.container = nil
}
if !retainPod && c.pod != nil {
if err := TerminateContainer(c.ctx, c.pod, false); err != nil {
merr = multierror.Append(merr, err)
}
c.pod = nil
}
return merr
}
func (c *consulContainerNode) DataDir() string {
return c.dataDir
}
func startContainer(ctx context.Context, req testcontainers.ContainerRequest) (testcontainers.Container, error) {
ctx, cancel := context.WithTimeout(ctx, time.Second*40)
defer cancel()
return testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
}
const pauseImage = "k8s.gcr.io/pause:3.3"
type containerOpts struct {
configFile string
dataDir string
hostname string
index int
license string
name string
addtionalNetworks []string
}
func newContainerRequest(config Config, opts containerOpts) (podRequest, consulRequest testcontainers.ContainerRequest) {
skipReaper := isRYUKDisabled()
pod := testcontainers.ContainerRequest{
Image: pauseImage,
AutoRemove: false,
Name: opts.name + "-pod",
SkipReaper: skipReaper,
ExposedPorts: []string{
"8500/tcp",
"8501/tcp",
"8443/tcp", // Envoy Gateway Listener
"5000/tcp", // Envoy Connect Listener
"8079/tcp", // Envoy Connect Listener
"8080/tcp", // Envoy Connect Listener
"9998/tcp", // Envoy Connect Listener
"9999/tcp", // Envoy Connect Listener
"19000/tcp", // Envoy Admin Port
"19001/tcp", // Envoy Admin Port
"19002/tcp", // Envoy Admin Port
"19003/tcp", // Envoy Admin Port
"19004/tcp", // Envoy Admin Port
"19005/tcp", // Envoy Admin Port
"19006/tcp", // Envoy Admin Port
"19007/tcp", // Envoy Admin Port
"19008/tcp", // Envoy Admin Port
"19009/tcp", // Envoy Admin Port
},
Hostname: opts.hostname,
Networks: opts.addtionalNetworks,
}
// For handshakes like auto-encrypt, it can take 10's of seconds for the agent to become "ready".
// If we only wait until the log stream starts, subsequent commands to agents will fail.
// TODO: optimize the wait strategy
app := testcontainers.ContainerRequest{
NetworkMode: dockercontainer.NetworkMode("container:" + opts.name + "-pod"),
Image: config.DockerImage(),
WaitingFor: wait.ForLog(bootLogLine).WithStartupTimeout(60 * time.Second), // See note above
AutoRemove: false,
Name: opts.name,
Mounts: []testcontainers.ContainerMount{
{
Source: testcontainers.DockerBindMountSource{HostPath: opts.configFile},
Target: "/consul/config/config.json",
ReadOnly: true,
},
{
Source: testcontainers.DockerBindMountSource{HostPath: opts.dataDir},
Target: "/consul/data",
},
},
Cmd: config.Cmd,
SkipReaper: skipReaper,
Env: map[string]string{"CONSUL_LICENSE": opts.license},
}
if config.CertVolume != "" {
app.Mounts = append(app.Mounts, testcontainers.ContainerMount{
Source: testcontainers.DockerVolumeMountSource{
Name: config.CertVolume,
},
Target: "/consul/config/certs",
ReadOnly: true,
})
}
// fmt.Printf("app: %s\n", utils.Dump(app))
return pod, app
}
// isRYUKDisabled returns whether the reaper process (RYUK) has been disabled
// by an environment variable.
//
// https://github.com/testcontainers/moby-ryuk
func isRYUKDisabled() bool {
skipReaperStr := os.Getenv(disableRYUKEnv)
skipReaper, err := strconv.ParseBool(skipReaperStr)
if err != nil {
return false
}
return skipReaper
}
func readLicense() (string, error) {
if license := os.Getenv("CONSUL_LICENSE"); license != "" {
return license, nil
}
licensePath := os.Getenv("CONSUL_LICENSE_PATH")
if licensePath == "" {
return "", nil
}
licenseBytes, err := os.ReadFile(licensePath)
if err != nil {
return "", err
}
return string(licenseBytes), nil
}
func createConfigFile(scratchDir string, JSON string) (string, error) {
configDir := filepath.Join(scratchDir, "config")
if err := os.MkdirAll(configDir, 0777); err != nil {
return "", err
}
if err := os.Chmod(configDir, 0777); err != nil {
return "", err
}
configFile := filepath.Join(configDir, "config.hcl")
if err := os.WriteFile(configFile, []byte(JSON), 0644); err != nil {
return "", err
}
return configFile, nil
}
type parsedConfig struct {
Datacenter string `json:"datacenter"`
Server bool `json:"server"`
Ports parsedPorts `json:"ports"`
}
type parsedPorts struct {
DNS int `json:"dns"`
HTTP int `json:"http"`
HTTPS int `json:"https"`
GRPC int `json:"grpc"`
GRPCTLS int `json:"grpc_tls"`
SerfLAN int `json:"serf_lan"`
SerfWAN int `json:"serf_wan"`
Server int `json:"server"`
}
func readSomeConfigFileFields(JSON string) (parsedConfig, error) {
var pc parsedConfig
if err := json.Unmarshal([]byte(JSON), &pc); err != nil {
return pc, errors.Wrap(err, "failed to parse config file")
}
if pc.Datacenter == "" {
pc.Datacenter = "dc1"
}
return pc, nil
}

View File

@ -0,0 +1,132 @@
package cluster
import (
"bytes"
"crypto/rand"
"encoding/base64"
"fmt"
"io"
"path/filepath"
"testing"
"github.com/hashicorp/go-uuid"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
const (
certVolumePrefix = "test-container-certs-"
consulUID = "100"
consulGID = "1000"
consulUserArg = consulUID + ":" + consulGID
)
func newSerfEncryptionKey() (string, error) {
key := make([]byte, 32)
n, err := rand.Reader.Read(key)
if err != nil {
return "", errors.Wrap(err, "error reading random data")
}
if n != 32 {
return "", errors.Wrap(err, "couldn't read enough entropy. Generate more entropy!")
}
return base64.StdEncoding.EncodeToString(key), nil
}
func (c *BuildContext) createTLSCAFiles(t *testing.T) {
id, err := uuid.GenerateUUID()
require.NoError(t, err, "could not create cert volume UUID")
c.certVolume = certVolumePrefix + id
// TODO: cleanup anything with the prefix?
// Create a volume to hold the data.
err = utils.DockerExec([]string{"volume", "create", c.certVolume}, io.Discard)
require.NoError(t, err, "could not create docker volume to hold cert data: %s", c.certVolume)
t.Cleanup(func() {
_ = utils.DockerExec([]string{"volume", "rm", c.certVolume}, io.Discard)
})
err = utils.DockerExec([]string{"run",
"--rm",
"-i",
"--net=none",
"-v", c.certVolume + ":/data",
"busybox:latest",
"sh", "-c",
// Need this so the permissions stick; docker seems to treat unused volumes differently.
`touch /data/VOLUME_PLACEHOLDER && chown -R ` + consulUserArg + ` /data`,
}, io.Discard)
require.NoError(t, err, "could not initialize docker volume for cert data: %s", c.certVolume)
err = utils.DockerExec([]string{"run",
"--rm",
"-i",
"--net=none",
"-u", consulUserArg,
"-v", c.certVolume + ":/data",
"-w", "/data",
"--entrypoint", "",
c.DockerImage(),
"consul", "tls", "ca", "create",
}, io.Discard)
require.NoError(t, err, "could not create TLS certificate authority in docker volume: %s", c.certVolume)
var w bytes.Buffer
err = utils.DockerExec([]string{"run",
"--rm",
"-i",
"--net=none",
"-u", consulUserArg,
"-v", c.certVolume + ":/data",
"-w", "/data",
"--entrypoint", "",
c.DockerImage(),
"cat", filepath.Join("/data", ConsulCACertPEM),
}, &w)
require.NoError(t, err, "could not extract TLS CA certificate authority public key from docker volume: %s", c.certVolume)
c.caCert = w.String()
}
func (c *BuildContext) createTLSCertFiles(t *testing.T, dc string) (keyFileName, certFileName string) {
require.NotEmpty(t, "the CA has not been initialized yet")
err := utils.DockerExec([]string{"run",
"--rm",
"-i",
"--net=none",
"-u", consulUserArg,
"-v", c.certVolume + ":/data",
"-w", "/data",
"--entrypoint", "",
c.DockerImage(),
"consul", "tls", "cert", "create", "-server", "-dc", dc,
}, io.Discard)
require.NoError(t, err, "could not create TLS server certificate dc=%q in docker volume: %s", dc, c.certVolume)
prefix := fmt.Sprintf("%s-server-%s", dc, "consul")
certFileName = fmt.Sprintf("%s-%d.pem", prefix, c.tlsCertIndex)
keyFileName = fmt.Sprintf("%s-%d-key.pem", prefix, c.tlsCertIndex)
for _, fn := range []string{certFileName, keyFileName} {
err = utils.DockerExec([]string{"run",
"--rm",
"-i",
"--net=none",
"-u", consulUserArg,
"-v", c.certVolume + ":/data:ro",
"-w", "/data",
"--entrypoint", "",
c.DockerImage(),
"stat", filepath.Join("/data", fn),
}, io.Discard)
require.NoError(t, err, "Generated TLS cert file %q does not exist in volume", fn)
}
return keyFileName, certFileName
}

View File

@ -1,82 +0,0 @@
package cluster
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/api"
libagent "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
type Options struct {
Datacenter string
NumServer int
NumClient int
Version string
}
// CreatingPeeringClusterAndSetup creates a cluster with peering enabled
// It also creates and registers a mesh-gateway at the client agent.
// The API client returned is pointed at the client agent.
func CreatingPeeringClusterAndSetup(t *testing.T, clusterOpts *Options) (*Cluster, *api.Client) {
var configs []libagent.Config
opts := libagent.BuildOptions{
Datacenter: clusterOpts.Datacenter,
InjectAutoEncryption: true,
InjectGossipEncryption: true,
ConsulVersion: clusterOpts.Version,
}
ctx, err := libagent.NewBuildContext(opts)
require.NoError(t, err)
numServer := clusterOpts.NumServer
for i := 0; i < numServer; i++ {
serverConf, err := libagent.NewConfigBuilder(ctx).
Bootstrap(numServer).
Peering(true).
RetryJoin(fmt.Sprintf("agent-%d", (i+1)%3)). // Round-robin join the servers
ToAgentConfig()
require.NoError(t, err)
t.Logf("%s server config %d: \n%s", clusterOpts.Datacenter, i, serverConf.JSON)
configs = append(configs, *serverConf)
}
// Add a stable client to register the service
clientConf, err := libagent.NewConfigBuilder(ctx).
Client().
Peering(true).
RetryJoin("agent-0", "agent-1", "agent-2").
ToAgentConfig()
require.NoError(t, err)
t.Logf("%s client config: \n%s", clusterOpts.Datacenter, clientConf.JSON)
configs = append(configs, *clientConf)
cluster, err := New(configs)
require.NoError(t, err)
cluster.BuildContext = ctx
client, err := cluster.GetClient(nil, false)
require.NoError(t, err)
WaitForLeader(t, cluster, client)
WaitForMembers(t, client, numServer+1)
// Default Proxy Settings
ok, err := utils.ApplyDefaultProxySettings(client)
require.NoError(t, err)
require.True(t, ok)
// Create the mesh gateway for dataplane traffic
clientNodes, _ := cluster.Clients()
_, err = libservice.NewGatewayService(context.Background(), "mesh", "mesh", clientNodes[0])
require.NoError(t, err)
return cluster, client
}

View File

@ -1,4 +1,4 @@
package agent
package cluster
import (
"fmt"

View File

@ -7,7 +7,7 @@ import (
"github.com/testcontainers/testcontainers-go"
)
func createNetwork(name string) (testcontainers.Network, error) {
func createNetwork(t TestingT, name string) (testcontainers.Network, error) {
req := testcontainers.GenericNetworkRequest{
NetworkRequest: testcontainers.NetworkRequest{
Name: name,
@ -19,5 +19,8 @@ func createNetwork(name string) (testcontainers.Network, error) {
if err != nil {
return nil, errors.Wrap(err, "could not create network")
}
t.Cleanup(func() {
_ = network.Remove(context.Background())
})
return network, nil
}

View File

@ -3,14 +3,17 @@ package service
import (
"context"
"fmt"
"path/filepath"
"strconv"
"time"
"github.com/docker/go-connections/nat"
"github.com/hashicorp/consul/api"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
libnode "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
@ -24,6 +27,8 @@ type ConnectContainer struct {
serviceName string
}
var _ Service = (*ConnectContainer)(nil)
func (g ConnectContainer) GetName() string {
name, err := g.container.Name(g.ctx)
if err != nil {
@ -47,26 +52,8 @@ func (g ConnectContainer) GetAdminAddr() (string, int) {
return "localhost", g.adminPort
}
// Terminate attempts to terminate the container. On failure, an error will be
// returned and the reaper process (RYUK) will handle cleanup.
func (c ConnectContainer) Terminate() error {
if c.container == nil {
return nil
}
var err error
if *utils.FollowLog {
err := c.container.StopLogProducer()
if err1 := c.container.Terminate(c.ctx); err == nil {
err = err1
}
} else {
err = c.container.Terminate(c.ctx)
}
c.container = nil
return err
return cluster.TerminateContainer(c.ctx, c.container, true)
}
func (g ConnectContainer) Export(partition, peer string, client *api.Client) error {
@ -79,10 +66,15 @@ func (g ConnectContainer) GetServiceName() string {
// NewConnectService returns a container that runs envoy sidecar, launched by
// "consul connect envoy", for service name (serviceName) on the specified
// node. The container exposes port serviceBindPort and envoy admin port (19000)
// by mapping them onto host ports. The container's name has a prefix
// node. The container exposes port serviceBindPort and envoy admin port
// (19000) by mapping them onto host ports. The container's name has a prefix
// combining datacenter and name.
func NewConnectService(ctx context.Context, name string, serviceName string, serviceBindPort int, node libnode.Agent) (*ConnectContainer, error) {
func NewConnectService(ctx context.Context, name string, serviceName string, serviceBindPort int, node libcluster.Agent) (*ConnectContainer, error) {
nodeConfig := node.GetConfig()
if nodeConfig.ScratchDir == "" {
return nil, fmt.Errorf("node ScratchDir is required")
}
namePrefix := fmt.Sprintf("%s-service-connect-%s", node.GetDatacenter(), name)
containerName := utils.RandName(namePrefix)
@ -97,7 +89,7 @@ func NewConnectService(ctx context.Context, name string, serviceName string, ser
}
dockerfileCtx.BuildArgs = buildargs
nodeIP, _ := node.GetAddr()
adminPort := node.ClaimAdminPort()
req := testcontainers.ContainerRequest{
FromDockerfile: dockerfileCtx,
@ -107,61 +99,61 @@ func NewConnectService(ctx context.Context, name string, serviceName string, ser
Cmd: []string{
"consul", "connect", "envoy",
"-sidecar-for", serviceName,
"-admin-bind", "0.0.0.0:19000",
"-grpc-addr", fmt.Sprintf("%s:8502", nodeIP),
"-http-addr", fmt.Sprintf("%s:8500", nodeIP),
"-admin-bind", fmt.Sprintf("0.0.0.0:%d", adminPort),
"--",
"--log-level", envoyLogLevel},
ExposedPorts: []string{
fmt.Sprintf("%d/tcp", serviceBindPort), // Envoy Listener
"19000/tcp", // Envoy Admin Port
"--log-level", envoyLogLevel,
},
}
container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
if err != nil {
return nil, err
Env: make(map[string]string),
}
ip, err := container.ContainerIP(ctx)
if err != nil {
return nil, err
}
mappedAppPort, err := container.MappedPort(ctx, nat.Port(fmt.Sprintf("%d", serviceBindPort)))
if err != nil {
return nil, err
}
mappedAdminPort, err := container.MappedPort(ctx, nat.Port(fmt.Sprintf("%d", 19000)))
if err != nil {
return nil, err
}
if *utils.FollowLog {
if err := container.StartLogProducer(ctx); err != nil {
return nil, err
}
container.FollowOutput(&LogConsumer{
Prefix: containerName,
nodeInfo := node.GetInfo()
if nodeInfo.UseTLSForAPI || nodeInfo.UseTLSForGRPC {
req.Mounts = append(req.Mounts, testcontainers.ContainerMount{
Source: testcontainers.DockerBindMountSource{
// See cluster.NewConsulContainer for this info
HostPath: filepath.Join(nodeConfig.ScratchDir, "ca.pem"),
},
Target: "/ca.pem",
ReadOnly: true,
})
}
// Register the termination function the agent so the containers can stop together
terminate := func() error {
return container.Terminate(context.Background())
if nodeInfo.UseTLSForAPI {
req.Env["CONSUL_HTTP_ADDR"] = fmt.Sprintf("https://127.0.0.1:%d", 8501)
req.Env["CONSUL_HTTP_SSL"] = "1"
req.Env["CONSUL_CACERT"] = "/ca.pem"
} else {
req.Env["CONSUL_HTTP_ADDR"] = fmt.Sprintf("http://127.0.0.1:%d", 8500)
}
if nodeInfo.UseTLSForGRPC {
req.Env["CONSUL_GRPC_ADDR"] = fmt.Sprintf("https://127.0.0.1:%d", 8503)
req.Env["CONSUL_GRPC_CACERT"] = "/ca.pem"
} else {
req.Env["CONSUL_GRPC_ADDR"] = fmt.Sprintf("http://127.0.0.1:%d", 8502)
}
var (
appPortStr = strconv.Itoa(serviceBindPort)
adminPortStr = strconv.Itoa(adminPort)
)
info, err := cluster.LaunchContainerOnNode(ctx, node, req, []string{appPortStr, adminPortStr})
if err != nil {
return nil, err
}
out := &ConnectContainer{
ctx: ctx,
container: info.Container,
ip: info.IP,
appPort: info.MappedPorts[appPortStr].Int(),
adminPort: info.MappedPorts[adminPortStr].Int(),
serviceName: name,
}
node.RegisterTermination(terminate)
fmt.Printf("NewConnectService: name %s, mappedAppPort %d, bind port %d\n",
serviceName, mappedAppPort.Int(), serviceBindPort)
serviceName, out.appPort, serviceBindPort)
return &ConnectContainer{
container: container,
ip: ip,
appPort: mappedAppPort.Int(),
adminPort: mappedAdminPort.Int(),
serviceName: name,
}, nil
return out, nil
}

View File

@ -3,14 +3,16 @@ package service
import (
"context"
"fmt"
"strconv"
"time"
"github.com/docker/go-connections/nat"
"github.com/hashicorp/consul/api"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
libnode "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
@ -24,6 +26,8 @@ type exampleContainer struct {
serviceName string
}
var _ Service = (*exampleContainer)(nil)
func (g exampleContainer) GetName() string {
name, err := g.container.Name(g.ctx)
if err != nil {
@ -43,40 +47,20 @@ func (g exampleContainer) Start() error {
return g.container.Start(context.Background())
}
// Terminate attempts to terminate the container. On failure, an error will be
// returned and the reaper process (RYUK) will handle cleanup.
func (c exampleContainer) Terminate() error {
if c.container == nil {
return nil
}
var err error
if *utils.FollowLog {
err = c.container.StopLogProducer()
if err1 := c.container.Terminate(c.ctx); err1 == nil {
err = err1
}
} else {
err = c.container.Terminate(c.ctx)
}
c.container = nil
return err
return cluster.TerminateContainer(c.ctx, c.container, true)
}
func (g exampleContainer) Export(partition, peerName string, client *api.Client) error {
config := &api.ExportedServicesConfigEntry{
Name: partition,
Services: []api.ExportedService{
{
Name: g.GetServiceName(),
Consumers: []api.ServiceConsumer{
// TODO: need to handle the changed field name in 1.13
{Peer: peerName},
},
Services: []api.ExportedService{{
Name: g.GetServiceName(),
Consumers: []api.ServiceConsumer{
// TODO: need to handle the changed field name in 1.13
{Peer: peerName},
},
},
}},
}
_, _, err := client.ConfigEntries().Set(config, &api.WriteOptions{})
@ -87,57 +71,49 @@ func (g exampleContainer) GetServiceName() string {
return g.serviceName
}
func NewExampleService(ctx context.Context, name string, httpPort int, grpcPort int, node libnode.Agent) (Service, error) {
func NewExampleService(ctx context.Context, name string, httpPort int, grpcPort int, node libcluster.Agent) (Service, error) {
namePrefix := fmt.Sprintf("%s-service-example-%s", node.GetDatacenter(), name)
containerName := utils.RandName(namePrefix)
pod := node.GetPod()
if pod == nil {
return nil, fmt.Errorf("node Pod is required")
}
var (
httpPortStr = strconv.Itoa(httpPort)
grpcPortStr = strconv.Itoa(grpcPort)
)
req := testcontainers.ContainerRequest{
Image: hashicorpDockerProxy + "/fortio/fortio",
WaitingFor: wait.ForLog("").WithStartupTimeout(10 * time.Second),
AutoRemove: false,
Name: containerName,
Cmd: []string{"server", "-http-port", fmt.Sprintf("%d", httpPort), "-grpc-port", fmt.Sprintf("%d", grpcPort), "-redirect-port", "-disabled"},
Env: map[string]string{"FORTIO_NAME": name},
ExposedPorts: []string{
fmt.Sprintf("%d/tcp", httpPort), // HTTP Listener
fmt.Sprintf("%d/tcp", grpcPort), // GRPC Listener
Cmd: []string{
"server",
"-http-port", httpPortStr,
"-grpc-port", grpcPortStr,
"-redirect-port", "-disabled",
},
Env: map[string]string{"FORTIO_NAME": name},
}
container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
if err != nil {
return nil, err
}
ip, err := container.ContainerIP(ctx)
if err != nil {
return nil, err
}
mappedHTPPPort, err := container.MappedPort(ctx, nat.Port(fmt.Sprintf("%d", httpPort)))
info, err := cluster.LaunchContainerOnNode(ctx, node, req, []string{httpPortStr, grpcPortStr})
if err != nil {
return nil, err
}
mappedGRPCPort, err := container.MappedPort(ctx, nat.Port(fmt.Sprintf("%d", grpcPort)))
if err != nil {
return nil, err
out := &exampleContainer{
ctx: ctx,
container: info.Container,
ip: info.IP,
httpPort: info.MappedPorts[httpPortStr].Int(),
grpcPort: info.MappedPorts[grpcPortStr].Int(),
serviceName: name,
}
if *utils.FollowLog {
if err := container.StartLogProducer(ctx); err != nil {
return nil, err
}
container.FollowOutput(&LogConsumer{
Prefix: containerName,
})
}
fmt.Printf("Example service exposed http port %d, gRPC port %d\n", out.httpPort, out.grpcPort)
terminate := func() error {
return container.Terminate(context.Background())
}
node.RegisterTermination(terminate)
fmt.Printf("Example service exposed http port %d, gRPC port %d\n", mappedHTPPPort.Int(), mappedGRPCPort.Int())
return &exampleContainer{container: container, ip: ip, httpPort: mappedHTPPPort.Int(), grpcPort: mappedGRPCPort.Int(), serviceName: name}, nil
return out, nil
}

View File

@ -3,13 +3,17 @@ package service
import (
"context"
"fmt"
"path/filepath"
"strconv"
"time"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
"github.com/hashicorp/consul/api"
libnode "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
"github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
@ -19,10 +23,12 @@ type gatewayContainer struct {
container testcontainers.Container
ip string
port int
req testcontainers.ContainerRequest
adminPort int
serviceName string
}
var _ Service = (*gatewayContainer)(nil)
func (g gatewayContainer) GetName() string {
name, err := g.container.Name(g.ctx)
if err != nil {
@ -35,6 +41,10 @@ func (g gatewayContainer) GetAddr() (string, int) {
return g.ip, g.port
}
func (g gatewayContainer) GetAdminAddr() (string, int) {
return "localhost", g.adminPort
}
func (g gatewayContainer) Start() error {
if g.container == nil {
return fmt.Errorf("container has not been initialized")
@ -42,26 +52,8 @@ func (g gatewayContainer) Start() error {
return g.container.Start(context.Background())
}
// Terminate attempts to terminate the container. On failure, an error will be
// returned and the reaper process (RYUK) will handle cleanup.
func (c gatewayContainer) Terminate() error {
if c.container == nil {
return nil
}
var err error
if *utils.FollowLog {
err = c.container.StopLogProducer()
if err1 := c.container.Terminate(c.ctx); err == nil {
err = err1
}
} else {
err = c.container.Terminate(c.ctx)
}
c.container = nil
return err
return cluster.TerminateContainer(c.ctx, c.container, true)
}
func (g gatewayContainer) Export(partition, peer string, client *api.Client) error {
@ -72,7 +64,12 @@ func (g gatewayContainer) GetServiceName() string {
return g.serviceName
}
func NewGatewayService(ctx context.Context, name string, kind string, node libnode.Agent) (Service, error) {
func NewGatewayService(ctx context.Context, name string, kind string, node libcluster.Agent) (Service, error) {
nodeConfig := node.GetConfig()
if nodeConfig.ScratchDir == "" {
return nil, fmt.Errorf("node ScratchDir is required")
}
namePrefix := fmt.Sprintf("%s-service-gateway-%s", node.GetDatacenter(), name)
containerName := utils.RandName(namePrefix)
@ -87,7 +84,7 @@ func NewGatewayService(ctx context.Context, name string, kind string, node libno
}
dockerfileCtx.BuildArgs = buildargs
nodeIP, _ := node.GetAddr()
adminPort := node.ClaimAdminPort()
req := testcontainers.ContainerRequest{
FromDockerfile: dockerfileCtx,
@ -100,45 +97,61 @@ func NewGatewayService(ctx context.Context, name string, kind string, node libno
"-register",
"-service", name,
"-address", "{{ GetInterfaceIP \"eth0\" }}:8443",
fmt.Sprintf("-grpc-addr=%s:%d", nodeIP, 8502),
"-admin-bind", "0.0.0.0:19000",
"-admin-bind", fmt.Sprintf("0.0.0.0:%d", adminPort),
"--",
"--log-level", envoyLogLevel},
Env: map[string]string{"CONSUL_HTTP_ADDR": fmt.Sprintf("%s:%d", nodeIP, 8500)},
ExposedPorts: []string{
"8443/tcp", // Envoy Gateway Listener
"19000/tcp", // Envoy Admin Port
"--log-level", envoyLogLevel,
},
Env: make(map[string]string),
}
container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
nodeInfo := node.GetInfo()
if nodeInfo.UseTLSForAPI || nodeInfo.UseTLSForGRPC {
req.Mounts = append(req.Mounts, testcontainers.ContainerMount{
Source: testcontainers.DockerBindMountSource{
// See cluster.NewConsulContainer for this info
HostPath: filepath.Join(nodeConfig.ScratchDir, "ca.pem"),
},
Target: "/ca.pem",
ReadOnly: true,
})
}
if nodeInfo.UseTLSForAPI {
req.Env["CONSUL_HTTP_ADDR"] = fmt.Sprintf("https://127.0.0.1:%d", 8501)
req.Env["CONSUL_HTTP_SSL"] = "1"
req.Env["CONSUL_CACERT"] = "/ca.pem"
} else {
req.Env["CONSUL_HTTP_ADDR"] = fmt.Sprintf("http://127.0.0.1:%d", 8500)
}
if nodeInfo.UseTLSForGRPC {
req.Env["CONSUL_GRPC_ADDR"] = fmt.Sprintf("https://127.0.0.1:%d", 8503)
req.Env["CONSUL_GRPC_CACERT"] = "/ca.pem"
} else {
req.Env["CONSUL_GRPC_ADDR"] = fmt.Sprintf("http://127.0.0.1:%d", 8502)
}
var (
portStr = "8443"
adminPortStr = strconv.Itoa(adminPort)
)
info, err := cluster.LaunchContainerOnNode(ctx, node, req, []string{
portStr,
adminPortStr,
})
if err != nil {
return nil, err
}
ip, err := container.ContainerIP(ctx)
if err != nil {
return nil, err
}
mappedPort, err := container.MappedPort(ctx, "8443")
if err != nil {
return nil, err
out := &gatewayContainer{
ctx: ctx,
container: info.Container,
ip: info.IP,
port: info.MappedPorts[portStr].Int(),
adminPort: info.MappedPorts[adminPortStr].Int(),
serviceName: name,
}
if *utils.FollowLog {
if err := container.StartLogProducer(ctx); err != nil {
return nil, err
}
container.FollowOutput(&LogConsumer{
Prefix: containerName,
})
}
terminate := func() error {
return container.Terminate(context.Background())
}
node.RegisterTermination(terminate)
return &gatewayContainer{container: container, ip: ip, port: mappedPort.Int(), serviceName: name}, nil
return out, nil
}

View File

@ -4,61 +4,49 @@ import (
"context"
"fmt"
"io"
"net/http"
"github.com/hashicorp/consul/api"
libnode "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
"github.com/hashicorp/go-cleanhttp"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
func CreateAndRegisterStaticServerAndSidecar(node libnode.Agent) (Service, Service, error) {
func CreateAndRegisterStaticServerAndSidecar(node libcluster.Agent) (Service, Service, error) {
// Do some trickery to ensure that partial completion is correctly torn
// down, but successful execution is not.
var deferClean utils.ResettableDefer
defer deferClean.Execute()
// Create a service and proxy instance
serverService, err := NewExampleService(context.Background(), "static-server", 8080, 8079, node)
if err != nil {
return nil, nil, err
}
deferClean.Add(func() {
_ = serverService.Terminate()
})
serverConnectProxy, err := NewConnectService(context.Background(), "static-server-sidecar", "static-server", 8080, node) // bindPort not used
if err != nil {
return nil, nil, err
}
serverServiceIP, _ := serverService.GetAddr()
serverConnectProxyIP, _ := serverConnectProxy.GetAddr()
deferClean.Add(func() {
_ = serverConnectProxy.Terminate()
})
// Register the static-server service and sidecar
req := &api.AgentServiceRegistration{
Name: "static-server",
Port: 8080,
Address: serverServiceIP,
Name: "static-server",
Port: 8080,
Connect: &api.AgentServiceConnect{
SidecarService: &api.AgentServiceRegistration{
Name: "static-server-sidecar-proxy",
Port: 20000,
Address: serverConnectProxyIP,
Kind: api.ServiceKindConnectProxy,
Checks: api.AgentServiceChecks{
&api.AgentServiceCheck{
Name: "Connect Sidecar Listening",
TCP: fmt.Sprintf("%s:%d", serverConnectProxyIP, 20000),
Interval: "10s",
Status: api.HealthPassing,
},
&api.AgentServiceCheck{
Name: "Connect Sidecar Aliasing Static Server",
AliasService: "static-server",
Status: api.HealthPassing,
},
},
Proxy: &api.AgentServiceConnectProxyConfig{
DestinationServiceName: "static-server",
LocalServiceAddress: serverServiceIP,
LocalServicePort: 8080,
},
Proxy: &api.AgentServiceConnectProxyConfig{},
},
},
Check: &api.AgentServiceCheck{
Name: "Static Server Listening",
TCP: fmt.Sprintf("%s:%d", serverServiceIP, 8080),
TCP: fmt.Sprintf("127.0.0.1:%d", 8080),
Interval: "10s",
Status: api.HealthPassing,
},
@ -69,17 +57,30 @@ func CreateAndRegisterStaticServerAndSidecar(node libnode.Agent) (Service, Servi
return serverService, serverConnectProxy, err
}
// disable cleanup functions now that we have an object with a Terminate() function
deferClean.Reset()
return serverService, serverConnectProxy, nil
}
func CreateAndRegisterStaticClientSidecar(node libnode.Agent, peerName string, localMeshGateway bool) (*ConnectContainer, error) {
func CreateAndRegisterStaticClientSidecar(
node libcluster.Agent,
peerName string,
localMeshGateway bool,
) (*ConnectContainer, error) {
// Do some trickery to ensure that partial completion is correctly torn
// down, but successful execution is not.
var deferClean utils.ResettableDefer
defer deferClean.Execute()
// Create a service and proxy instance
clientConnectProxy, err := NewConnectService(context.Background(), "static-client-sidecar", "static-client", 5000, node)
if err != nil {
return nil, err
}
clientConnectProxyIP, _ := clientConnectProxy.GetAddr()
deferClean.Add(func() {
_ = clientConnectProxy.Terminate()
})
mgwMode := api.MeshGatewayModeRemote
if localMeshGateway {
@ -92,33 +93,19 @@ func CreateAndRegisterStaticClientSidecar(node libnode.Agent, peerName string, l
Port: 8080,
Connect: &api.AgentServiceConnect{
SidecarService: &api.AgentServiceRegistration{
Name: "static-client-sidecar-proxy",
Port: 20000,
Kind: api.ServiceKindConnectProxy,
Checks: api.AgentServiceChecks{
&api.AgentServiceCheck{
Name: "Connect Sidecar Listening",
TCP: fmt.Sprintf("%s:%d", clientConnectProxyIP, 20000),
Interval: "10s",
Status: api.HealthPassing,
},
},
Proxy: &api.AgentServiceConnectProxyConfig{
Upstreams: []api.Upstream{
{
DestinationName: "static-server",
DestinationPeer: peerName,
LocalBindAddress: "0.0.0.0",
LocalBindPort: 5000,
MeshGateway: api.MeshGatewayConfig{
Mode: mgwMode,
},
Upstreams: []api.Upstream{{
DestinationName: "static-server",
DestinationPeer: peerName,
LocalBindAddress: "0.0.0.0",
LocalBindPort: 5000,
MeshGateway: api.MeshGatewayConfig{
Mode: mgwMode,
},
},
}},
},
},
},
Checks: api.AgentServiceChecks{},
}
err = node.GetClient().Agent().ServiceRegister(req)
@ -126,11 +113,14 @@ func CreateAndRegisterStaticClientSidecar(node libnode.Agent, peerName string, l
return clientConnectProxy, err
}
// disable cleanup functions now that we have an object with a Terminate() function
deferClean.Reset()
return clientConnectProxy, nil
}
func GetEnvoyConfigDump(port int) (string, error) {
client := http.DefaultClient
client := cleanhttp.DefaultClient()
url := fmt.Sprintf("http://localhost:%d/config_dump?include_eds", port)
res, err := client.Get(url)

View File

@ -1,8 +1,6 @@
package service
import (
"github.com/hashicorp/consul/api"
)
import "github.com/hashicorp/consul/api"
// Service represents a process that will be registered with the
// Consul catalog, including Consul components such as sidecars and gateways

View File

@ -0,0 +1,210 @@
package topology
import (
"context"
"fmt"
"testing"
"github.com/hashicorp/consul/api"
"github.com/stretchr/testify/require"
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
const (
AcceptingPeerName = "accepting-to-dialer"
DialingPeerName = "dialing-to-acceptor"
)
type BuiltCluster struct {
Cluster *libcluster.Cluster
Context *libcluster.BuildContext
Service libservice.Service
Container *libservice.ConnectContainer
}
// BasicPeeringTwoClustersSetup sets up a scenario for testing peering, which consists of
//
// - an accepting cluster with 3 servers and 1 client agent. The client should be used to
// host a service for export: staticServerSvc.
// - an dialing cluster with 1 server and 1 client. The client should be used to host a
// service connecting to staticServerSvc.
// - Create the peering, export the service from accepting cluster, and verify service
// connectivity.
//
// It returns objects of the accepting cluster, dialing cluster, staticServerSvc, and staticClientSvcSidecar
func BasicPeeringTwoClustersSetup(
t *testing.T,
consulVersion string,
) (*BuiltCluster, *BuiltCluster) {
acceptingCluster, acceptingCtx, acceptingClient := NewPeeringCluster(t, "dc1", 3, consulVersion)
dialingCluster, dialingCtx, dialingClient := NewPeeringCluster(t, "dc2", 1, consulVersion)
require.NoError(t, dialingCluster.PeerWithCluster(acceptingClient, AcceptingPeerName, DialingPeerName))
libassert.PeeringStatus(t, acceptingClient, AcceptingPeerName, api.PeeringStateActive)
// libassert.PeeringExports(t, acceptingClient, acceptingPeerName, 1)
// Register an static-server service in acceptingCluster and export to dialing cluster
var serverSidecarService libservice.Service
{
clientNode := acceptingCluster.Clients()[0]
// Create a service and proxy instance
var err error
serverSidecarService, _, err := libservice.CreateAndRegisterStaticServerAndSidecar(clientNode)
require.NoError(t, err)
libassert.CatalogServiceExists(t, acceptingClient, "static-server")
libassert.CatalogServiceExists(t, acceptingClient, "static-server-sidecar-proxy")
require.NoError(t, serverSidecarService.Export("default", AcceptingPeerName, acceptingClient))
}
// Register an static-client service in dialing cluster and set upstream to static-server service
var clientSidecarService *libservice.ConnectContainer
{
clientNode := dialingCluster.Clients()[0]
// Create a service and proxy instance
var err error
clientSidecarService, err = libservice.CreateAndRegisterStaticClientSidecar(clientNode, DialingPeerName, true)
require.NoError(t, err)
libassert.CatalogServiceExists(t, dialingClient, "static-client-sidecar-proxy")
}
_, port := clientSidecarService.GetAddr()
libassert.HTTPServiceEchoes(t, "localhost", port)
return &BuiltCluster{
Cluster: acceptingCluster,
Context: acceptingCtx,
Service: serverSidecarService,
Container: nil,
},
&BuiltCluster{
Cluster: dialingCluster,
Context: dialingCtx,
Service: nil,
Container: clientSidecarService,
}
}
// NewDialingCluster creates a cluster for peering with a single dev agent
// TODO: note: formerly called CreatingPeeringClusterAndSetup
//
// Deprecated: use NewPeeringCluster mostly
func NewDialingCluster(
t *testing.T,
version string,
dialingPeerName string,
) (*libcluster.Cluster, *api.Client, libservice.Service) {
t.Helper()
t.Logf("creating the dialing cluster")
opts := libcluster.BuildOptions{
Datacenter: "dc2",
InjectAutoEncryption: true,
InjectGossipEncryption: true,
AllowHTTPAnyway: true,
ConsulVersion: version,
}
ctx := libcluster.NewBuildContext(t, opts)
conf := libcluster.NewConfigBuilder(ctx).
Peering(true).
ToAgentConfig(t)
t.Logf("dc2 server config: \n%s", conf.JSON)
cluster, err := libcluster.NewN(t, *conf, 1)
require.NoError(t, err)
node := cluster.Agents[0]
client := node.GetClient()
libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForMembers(t, client, 1)
// Default Proxy Settings
ok, err := utils.ApplyDefaultProxySettings(client)
require.NoError(t, err)
require.True(t, ok)
// Create the mesh gateway for dataplane traffic
_, err = libservice.NewGatewayService(context.Background(), "mesh", "mesh", node)
require.NoError(t, err)
// Create a service and proxy instance
clientProxyService, err := libservice.CreateAndRegisterStaticClientSidecar(node, dialingPeerName, true)
require.NoError(t, err)
libassert.CatalogServiceExists(t, client, "static-client-sidecar-proxy")
return cluster, client, clientProxyService
}
// NewPeeringCluster creates a cluster with peering enabled. It also creates
// and registers a mesh-gateway at the client agent. The API client returned is
// pointed at the client agent.
func NewPeeringCluster(
t *testing.T,
datacenter string,
numServers int,
version string,
) (*libcluster.Cluster, *libcluster.BuildContext, *api.Client) {
require.NotEmpty(t, datacenter)
require.True(t, numServers > 0)
opts := libcluster.BuildOptions{
Datacenter: datacenter,
InjectAutoEncryption: true,
InjectGossipEncryption: true,
AllowHTTPAnyway: true,
ConsulVersion: version,
}
ctx := libcluster.NewBuildContext(t, opts)
serverConf := libcluster.NewConfigBuilder(ctx).
Bootstrap(numServers).
Peering(true).
ToAgentConfig(t)
t.Logf("%s server config: \n%s", datacenter, serverConf.JSON)
cluster, err := libcluster.NewN(t, *serverConf, numServers)
require.NoError(t, err)
var retryJoin []string
for i := 0; i < numServers; i++ {
retryJoin = append(retryJoin, fmt.Sprintf("agent-%d", i))
}
// Add a stable client to register the service
clientConf := libcluster.NewConfigBuilder(ctx).
Client().
Peering(true).
RetryJoin(retryJoin...).
ToAgentConfig(t)
t.Logf("%s server config: \n%s", datacenter, clientConf.JSON)
require.NoError(t, cluster.AddN(*clientConf, 1, true))
// Use the client agent as the HTTP endpoint since we will not rotate it in many tests.
clientNode := cluster.Agents[numServers]
client := clientNode.GetClient()
libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForMembers(t, client, numServers+1)
// Default Proxy Settings
ok, err := utils.ApplyDefaultProxySettings(client)
require.NoError(t, err)
require.True(t, ok)
// Create the mesh gateway for dataplane traffic
_, err = libservice.NewGatewayService(context.Background(), "mesh", "mesh", clientNode)
require.NoError(t, err)
return cluster, ctx, client
}

View File

@ -0,0 +1,12 @@
package utils
import "encoding/json"
// Dump pretty prints the provided arg as json.
func Dump(v any) string {
b, err := json.MarshalIndent(v, "", " ")
if err != nil {
return "<ERR: " + err.Error() + ">"
}
return string(b)
}

View File

@ -0,0 +1,28 @@
package utils
// ResettableDefer is a way to capture a series of cleanup functions and
// bulk-cancel them. Ideal to use in a long constructor function before the
// overall Close/Stop/Terminate method is ready to use to tear down all of the
// portions properly.
type ResettableDefer struct {
cleanupFns []func()
}
// Add registers another function to call at Execute time.
func (d *ResettableDefer) Add(f func()) {
d.cleanupFns = append(d.cleanupFns, f)
}
// Reset clears the pending defer work.
func (d *ResettableDefer) Reset() {
d.cleanupFns = nil
}
// Execute actually executes the functions registered by Add in the reverse
// order of their call order (like normal defer blocks).
func (d *ResettableDefer) Execute() {
// Run these in reverse order, like defer blocks.
for i := len(d.cleanupFns) - 1; i >= 0; i-- {
d.cleanupFns[i]()
}
}

View File

@ -0,0 +1,38 @@
package utils
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
)
// DockerExec simply shell out to the docker CLI binary on your host.
func DockerExec(args []string, stdout io.Writer) error {
return cmdExec("docker", "docker", args, stdout, "")
}
func cmdExec(name, binary string, args []string, stdout io.Writer, dir string) error {
if binary == "" {
panic("binary named " + name + " was not detected")
}
var errWriter bytes.Buffer
if stdout == nil {
stdout = os.Stdout
}
cmd := exec.Command(binary, args...)
if dir != "" {
cmd.Dir = dir
}
cmd.Stdout = stdout
cmd.Stderr = &errWriter
cmd.Stdin = nil
if err := cmd.Run(); err != nil {
return fmt.Errorf("could not invoke %q: %v : %s", name, err, errWriter.String())
}
return nil
}

View File

@ -10,6 +10,9 @@ import (
func RandName(name string) string {
shortID, err := shortid.New(1, shortid.DefaultABC, 6666)
if err != nil {
return ""
}
id, err := shortID.Generate()
if err != nil {
return ""

View File

@ -0,0 +1,47 @@
package utils
import (
"flag"
"github.com/hashicorp/go-version"
)
var (
TargetImageName string
TargetVersion string
LatestImageName string
LatestVersion string
FollowLog bool
)
const (
DefaultImageNameOSS = "consul"
DefaultImageNameENT = "hashicorp/consul-enterprise"
ImageVersionSuffixENT = "-ent"
)
func init() {
flag.StringVar(&TargetImageName, "target-image", defaultImageName, "docker image name to be used under test (Default: "+defaultImageName+")")
flag.StringVar(&TargetVersion, "target-version", "local", "docker image version to be used as UUT (unit under test)")
flag.StringVar(&LatestImageName, "latest-image", defaultImageName, "docker image name to be used under test (Default: "+defaultImageName+")")
flag.StringVar(&LatestVersion, "latest-version", "latest", "docker image to be used as latest")
flag.BoolVar(&FollowLog, "follow-log", true, "follow container log in output (Default: true)")
}
func DockerImage(image, version string) string {
v := image + ":" + version
if image == DefaultImageNameENT && isSemVer(version) {
// Enterprise versions get a suffix.
v += ImageVersionSuffixENT
}
return v
}
func isSemVer(ver string) bool {
_, err := version.NewVersion(ver)
return err == nil
}

View File

@ -3,13 +3,7 @@
package utils
import "flag"
// TODO: need a better way to abstract the container creation and configuration;
// please refer to the discussion in github PR
var TargetImage = flag.String("target-image", "consul", "docker image name to be used under test (Default: consul)")
var TargetVersion = flag.String("target-version", "local", "docker image version to be used as UUT (unit under test)")
var LatestImage = flag.String("latest-image", "consul", "docker image name to be used under test (Default: consul)")
var LatestVersion = flag.String("latest-version", "1.11", "docker image to be used as latest")
var FollowLog = flag.Bool("follow-log", true, "follow container log in output (Default: true)")
const (
defaultImageName = DefaultImageNameOSS
ImageVersionSuffix = ""
)

View File

@ -5,7 +5,6 @@ import (
"github.com/stretchr/testify/require"
libagent "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
@ -23,7 +22,6 @@ import (
// - Make sure a call to the client sidecar local bind port returns a response from the upstream, static-server
func TestBasicConnectService(t *testing.T) {
cluster := createCluster(t)
defer terminate(t, cluster)
clientService := createServices(t, cluster)
_, port := clientService.GetAddr()
@ -31,31 +29,27 @@ func TestBasicConnectService(t *testing.T) {
libassert.HTTPServiceEchoes(t, "localhost", port)
}
func terminate(t *testing.T, cluster *libcluster.Cluster) {
err := cluster.Terminate()
require.NoError(t, err)
}
// createCluster
func createCluster(t *testing.T) *libcluster.Cluster {
opts := libagent.BuildOptions{
opts := libcluster.BuildOptions{
InjectAutoEncryption: true,
InjectGossipEncryption: true,
// TODO: fix the test to not need the service/envoy stack to use :8500
AllowHTTPAnyway: true,
}
ctx, err := libagent.NewBuildContext(opts)
require.NoError(t, err)
ctx := libcluster.NewBuildContext(t, opts)
conf, err := libagent.NewConfigBuilder(ctx).ToAgentConfig()
require.NoError(t, err)
conf := libcluster.NewConfigBuilder(ctx).
ToAgentConfig(t)
t.Logf("Cluster config:\n%s", conf.JSON)
configs := []libagent.Config{*conf}
configs := []libcluster.Config{*conf}
cluster, err := libcluster.New(configs)
cluster, err := libcluster.New(t, configs)
require.NoError(t, err)
client, err := cluster.GetClient(nil, true)
require.NoError(t, err)
node := cluster.Agents[0]
client := node.GetClient()
libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForMembers(t, client, 1)

View File

@ -1,100 +1,77 @@
package metrics
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"testing"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
libagent "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
// Given a 3-server cluster, when the leader is elected, then leader's isLeader is 1 and non-leader's 0
func TestLeadershipMetrics(t *testing.T) {
var configs []agent.Config
opts := libcluster.BuildOptions{
InjectAutoEncryption: true,
InjectGossipEncryption: true,
}
ctx := libcluster.NewBuildContext(t, opts)
var configs []libcluster.Config
statsConf := libcluster.NewConfigBuilder(ctx).
Telemetry("127.0.0.0:2180").
ToAgentConfig(t)
statsConf, err := libagent.NewConfigBuilder(nil).Telemetry("127.0.0.0:2180").ToAgentConfig()
require.NoError(t, err)
configs = append(configs, *statsConf)
conf, err := libagent.NewConfigBuilder(nil).Bootstrap(3).ToAgentConfig()
require.NoError(t, err)
conf := libcluster.NewConfigBuilder(ctx).
Bootstrap(3).
ToAgentConfig(t)
numServer := 3
for i := 1; i < numServer; i++ {
configs = append(configs, *conf)
}
cluster, err := libcluster.New(configs)
cluster, err := libcluster.New(t, configs)
require.NoError(t, err)
defer terminate(t, cluster)
svrCli := cluster.Agents[0].GetClient()
libcluster.WaitForLeader(t, cluster, svrCli)
libcluster.WaitForMembers(t, svrCli, 3)
retryWithBackoff := func(agent agent.Agent, expectedStr string) error {
waiter := &utils.Waiter{
MaxWait: 5 * time.Minute,
}
_, port := agent.GetAddr()
ctx := context.Background()
for {
if waiter.Failures() > 5 {
return fmt.Errorf("reach max failure: %d", waiter.Failures())
}
metricsStr, err := getMetrics(t, "127.0.0.1", port, "/v1/agent/metrics")
if err != nil {
return fmt.Errorf("error get metrics: %v", err)
}
if strings.Contains(metricsStr, expectedStr) {
return nil
}
waiter.Wait(ctx)
}
}
leader, err := cluster.Leader()
require.NoError(t, err)
leadAddr, leaderPort := leader.GetAddr()
leadAddr := leader.GetIP()
for i, n := range cluster.Agents {
addr, port := n.GetAddr()
if addr == leadAddr && port == leaderPort {
err = retryWithBackoff(leader, ".server.isLeader\",\"Value\":1,")
require.NoError(t, err, "%dth node(leader): could not find the metric %q in the /v1/agent/metrics response", i, ".server.isLeader\",\"Value\":1,")
} else {
err = retryWithBackoff(n, ".server.isLeader\",\"Value\":0,")
require.NoError(t, err, "%dth node(non-leader): could not find the metric %q in the /v1/agent/metrics response", i, ".server.isLeader\",\"Value\":0,")
}
for _, agent := range cluster.Agents {
client := agent.GetClient().Agent()
retry.RunWith(libcluster.LongFailer(), t, func(r *retry.R) {
info, err := client.Metrics()
require.NoError(r, err)
var (
leaderGauge api.GaugeValue
found bool
)
for _, g := range info.Gauges {
if strings.HasSuffix(g.Name, ".server.isLeader") {
leaderGauge = g
found = true
}
}
require.True(r, found, "did not find isLeader gauge metric")
addr := agent.GetIP()
if addr == leadAddr {
require.Equal(r, float32(1), leaderGauge.Value)
} else {
require.Equal(r, float32(0), leaderGauge.Value)
}
})
}
}
func getMetrics(t *testing.T, addr string, port int, path string) (string, error) {
u, err := url.Parse(fmt.Sprintf("http://%s:%d", addr, port))
require.NoError(t, err)
u.Path = path
resp, err := http.Get(u.String())
if err != nil {
return "", fmt.Errorf("error get metrics: %v", err)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return "nil", fmt.Errorf("error read metrics: %v", err)
}
return string(body), nil
}
func terminate(t *testing.T, cluster *libcluster.Cluster) {
err := cluster.Terminate()
require.NoError(t, err)
}

View File

@ -6,16 +6,16 @@ import (
"testing"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
libagent "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
libtopology "github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/test/integration/consul-container/test/topology"
)
// TestPeering_RotateServerAndCAThenFail_
@ -46,48 +46,55 @@ import (
// - Terminate the server nodes in the exporting cluster
// - Make sure there is still service connectivity from the importing cluster
func TestPeering_RotateServerAndCAThenFail_(t *testing.T) {
acceptingCluster, dialingCluster, _, staticClientSvcSidecar := topology.BasicPeeringTwoClustersSetup(t, *utils.TargetVersion)
defer func() {
err := acceptingCluster.Terminate()
require.NoErrorf(t, err, "termining accepting cluster")
dialingCluster.Terminate()
require.NoErrorf(t, err, "termining dialing cluster")
}()
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, utils.TargetVersion)
var (
acceptingCluster = accepting.Cluster
dialingCluster = dialing.Cluster
acceptingCtx = accepting.Context
clientSidecarService = dialing.Container
)
dialingClient, err := dialingCluster.GetClient(nil, false)
require.NoError(t, err)
_, port := staticClientSvcSidecar.GetAddr()
acceptingClient, err := acceptingCluster.GetClient(nil, false)
require.NoError(t, err)
t.Run("test rotating servers", func(t *testing.T) {
t.Logf("test rotating servers")
{
var (
peerName = libtopology.AcceptingPeerName
cluster = acceptingCluster
client = acceptingClient
ctx = acceptingCtx
)
// Start by replacing the Followers
leader, err := acceptingCluster.Leader()
leader, err := cluster.Leader()
require.NoError(t, err)
followers, err := acceptingCluster.Followers()
followers, err := cluster.Followers()
require.NoError(t, err)
require.Len(t, followers, 2)
for idx, follower := range followers {
t.Log("Removing follower", idx)
rotateServer(t, acceptingCluster, acceptingClient, acceptingCluster.BuildContext, follower)
rotateServer(t, cluster, client, ctx, follower)
}
t.Log("Removing leader")
rotateServer(t, acceptingCluster, acceptingClient, acceptingCluster.BuildContext, leader)
rotateServer(t, cluster, client, ctx, leader)
libassert.PeeringStatus(t, acceptingClient, topology.AcceptingPeerName, api.PeeringStateActive)
libassert.PeeringExports(t, acceptingClient, topology.AcceptingPeerName, 1)
libassert.PeeringStatus(t, client, peerName, api.PeeringStateActive)
libassert.PeeringExports(t, client, peerName, 1)
_, port := clientSidecarService.GetAddr()
libassert.HTTPServiceEchoes(t, "localhost", port)
})
}
t.Run("rotate exporting cluster's root CA", func(t *testing.T) {
testutil.RunStep(t, "rotate exporting cluster's root CA", func(t *testing.T) {
// we will verify that the peering on the dialing side persists the updates CAs
peeringBefore, peerMeta, err := dialingClient.Peerings().Read(context.Background(), topology.DialingPeerName, &api.QueryOptions{})
peeringBefore, peerMeta, err := dialingClient.Peerings().Read(context.Background(), libtopology.DialingPeerName, &api.QueryOptions{})
require.NoError(t, err)
_, caMeta, err := acceptingClient.Connect().CAGetConfig(&api.QueryOptions{})
@ -116,7 +123,7 @@ func TestPeering_RotateServerAndCAThenFail_(t *testing.T) {
require.NoError(t, err)
// The peering object should reflect the update
peeringAfter, _, err := dialingClient.Peerings().Read(context.Background(), topology.DialingPeerName, &api.QueryOptions{
peeringAfter, _, err := dialingClient.Peerings().Read(context.Background(), libtopology.DialingPeerName, &api.QueryOptions{
WaitIndex: peerMeta.LastIndex,
WaitTime: 30 * time.Second,
})
@ -130,19 +137,17 @@ func TestPeering_RotateServerAndCAThenFail_(t *testing.T) {
require.Len(t, rootList.Roots, 2)
// Connectivity should still be contained
_, port := staticClientSvcSidecar.GetAddr()
_, port := clientSidecarService.GetAddr()
libassert.HTTPServiceEchoes(t, "localhost", port)
verifySidecarHasTwoRootCAs(t, staticClientSvcSidecar)
verifySidecarHasTwoRootCAs(t, clientSidecarService)
})
t.Run("terminate exporting clusters servers and ensure imported services are still reachable", func(t *testing.T) {
testutil.RunStep(t, "terminate exporting clusters servers and ensure imported services are still reachable", func(t *testing.T) {
// Keep this list for later
newNodes, err := acceptingCluster.Clients()
require.NoError(t, err)
newNodes := acceptingCluster.Clients()
serverNodes, err := acceptingCluster.Servers()
require.NoError(t, err)
serverNodes := acceptingCluster.Servers()
for _, node := range serverNodes {
require.NoError(t, node.Terminate())
}
@ -153,26 +158,20 @@ func TestPeering_RotateServerAndCAThenFail_(t *testing.T) {
// ensure any transitory actions like replication cleanup would not affect the next verifications
time.Sleep(30 * time.Second)
_, port := staticClientSvcSidecar.GetAddr()
_, port := clientSidecarService.GetAddr()
libassert.HTTPServiceEchoes(t, "localhost", port)
})
}
func terminate(t *testing.T, cluster *libcluster.Cluster) {
err := cluster.Terminate()
require.NoError(t, err)
}
// rotateServer add a new server agent to the cluster, then forces the prior agent to leave.
func rotateServer(t *testing.T, cluster *libcluster.Cluster, client *api.Client, ctx *libagent.BuildContext, node libagent.Agent) {
conf, err := libagent.NewConfigBuilder(cluster.BuildContext).
func rotateServer(t *testing.T, cluster *libcluster.Cluster, client *api.Client, ctx *libcluster.BuildContext, node libcluster.Agent) {
conf := libcluster.NewConfigBuilder(ctx).
Bootstrap(0).
Peering(true).
RetryJoin("agent-3"). // Always use the client agent since it never leaves the cluster
ToAgentConfig()
require.NoError(t, err)
ToAgentConfig(t)
err = cluster.Add([]libagent.Config{*conf})
err := cluster.AddN(*conf, 1, false)
require.NoError(t, err, "could not start new node")
libcluster.WaitForMembers(t, client, 5)
@ -185,6 +184,7 @@ func rotateServer(t *testing.T, cluster *libcluster.Cluster, client *api.Client,
func verifySidecarHasTwoRootCAs(t *testing.T, sidecar libservice.Service) {
connectContainer, ok := sidecar.(*libservice.ConnectContainer)
require.True(t, ok)
_, adminPort := connectContainer.GetAdminAddr()
failer := func() *retry.Timer {
@ -193,19 +193,13 @@ func verifySidecarHasTwoRootCAs(t *testing.T, sidecar libservice.Service) {
retry.RunWith(failer(), t, func(r *retry.R) {
dump, err := libservice.GetEnvoyConfigDump(adminPort)
if err != nil {
r.Fatal("could not curl envoy configuration")
}
require.NoError(r, err, "could not curl envoy configuration")
// Make sure there are two certs in the sidecar
filter := `.configs[] | select(.["@type"] | contains("type.googleapis.com/envoy.admin.v3.ClustersConfigDump")).dynamic_active_clusters[] | select(.cluster.name | contains("static-server.default.dialing-to-acceptor.external")).cluster.transport_socket.typed_config.common_tls_context.validation_context.trusted_ca.inline_string`
results, err := utils.JQFilter(dump, filter)
if err != nil {
r.Fatal("could not parse envoy configuration")
}
if len(results) != 1 {
r.Fatal("could not find certificates in cluster TLS context")
}
require.NoError(r, err, "could not parse envoy configuration")
require.Len(r, results, 1, "could not find certificates in cluster TLS context")
rest := []byte(results[0])
var count int
@ -218,8 +212,6 @@ func verifySidecarHasTwoRootCAs(t *testing.T, sidecar libservice.Service) {
count++
}
if count != 2 {
r.Fatalf("expected 2 TLS certificates and %d present", count)
}
require.Equal(r, 2, count, "expected 2 TLS certificates and %d present", count)
})
}

View File

@ -1,87 +0,0 @@
package topology
import (
"sync"
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/api"
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
)
const (
AcceptingPeerName = "accepting-to-dialer"
DialingPeerName = "dialing-to-acceptor"
)
// BasicPeeringTwoClustersSetup sets up a scenario for testing peering, which consists of
// - an accepting cluster with 3 servers and 1 client agnet. The client should be used to
// host a service for export: staticServerSvc.
// - an dialing cluster with 1 server and 1 client. The client should be used to host a
// service connecting to staticServerSvc.
// - Create the peering, export the service from accepting cluster, and verify service
// connectivity.
//
// It returns objects of the accepting cluster, dialing cluster, staticServerSvc, and staticClientSvcSidecar
func BasicPeeringTwoClustersSetup(t *testing.T, consulVersion string) (*libcluster.Cluster, *libcluster.Cluster, *libservice.Service, *libservice.ConnectContainer) {
var wg sync.WaitGroup
var acceptingCluster, dialingCluster *libcluster.Cluster
var acceptingClient *api.Client
wg.Add(1)
go func() {
opts := &libcluster.Options{
Datacenter: "dc1",
NumServer: 3,
NumClient: 1,
Version: consulVersion,
}
acceptingCluster, acceptingClient = libcluster.CreatingPeeringClusterAndSetup(t, opts)
wg.Done()
}()
wg.Add(1)
go func() {
opts := &libcluster.Options{
Datacenter: "dc2",
NumServer: 1,
NumClient: 1,
Version: consulVersion,
}
dialingCluster, _ = libcluster.CreatingPeeringClusterAndSetup(t, opts)
wg.Done()
}()
wg.Wait()
err := dialingCluster.PeerWithCluster(acceptingClient, AcceptingPeerName, DialingPeerName)
require.NoError(t, err)
libassert.PeeringStatus(t, acceptingClient, AcceptingPeerName, api.PeeringStateActive)
// Register an static-server service in acceptingCluster and export to dialing cluster
clientNodes, err := acceptingCluster.Clients()
require.NoError(t, err)
require.True(t, len(clientNodes) > 0)
staticServerSvc, _, err := libservice.CreateAndRegisterStaticServerAndSidecar(clientNodes[0])
require.NoError(t, err)
libassert.CatalogServiceExists(t, acceptingClient, "static-server")
libassert.CatalogServiceExists(t, acceptingClient, "static-server-sidecar-proxy")
staticServerSvc.Export("default", AcceptingPeerName, acceptingClient)
libassert.PeeringExports(t, acceptingClient, AcceptingPeerName, 1)
// Register an static-client service in dialing cluster and set upstream to static-server service
clientNodesDialing, err := dialingCluster.Clients()
require.NoError(t, err)
require.True(t, len(clientNodesDialing) > 0)
staticClientSvcSidecar, err := libservice.CreateAndRegisterStaticClientSidecar(clientNodesDialing[0], DialingPeerName, true)
require.NoError(t, err)
_, port := staticClientSvcSidecar.GetAddr()
libassert.HTTPServiceEchoes(t, "localhost", port)
return acceptingCluster, dialingCluster, &staticServerSvc, staticClientSvcSidecar
}

View File

@ -1,11 +1,17 @@
# Consul Upgrade Integration tests
## Local run
- run `make dev-docker`
- run the tests, e.g., `go test -run ^TestBasicConnectService$ ./test/basic -v`
To specify targets and latest image pass `target-version` and `latest-version` to the tests. By default, it uses the `consul` docker image with respectively `local` and `latest` tags.
To specify targets and latest image pass `target-version` and `latest-version`
to the tests. By default, it uses the `consul` docker image with respectively
`local` and `latest` tags.
To use dev consul image, pass `target-image` and `target-version`, `-target-image hashicorppreview/consul -target-version 1.14-dev`.
To use dev consul image, pass `target-image` and `target-version`:
By default, all container's logs are written to either `stdout`, or `stderr`; this makes it hard to debug, when the test case creates many
containers. To disable following container logs, run the test with `-follow-log false`.
-target-image hashicorppreview/consul -target-version 1.14-dev
By default, all container's logs are written to either `stdout`, or `stderr`;
this makes it hard to debug, when the test case creates many containers. To
disable following container logs, run the test with `-follow-log false`.

View File

@ -6,12 +6,10 @@ import (
"testing"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
libagent "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
@ -33,62 +31,48 @@ func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
},
{
oldversion: "1.13",
targetVersion: *utils.TargetVersion,
targetVersion: utils.TargetVersion,
},
{
oldversion: "1.14",
targetVersion: *utils.TargetVersion,
targetVersion: utils.TargetVersion,
},
}
run := func(t *testing.T, tc testcase) {
var configs []libagent.Config
configCtx, err := libagent.NewBuildContext(libagent.BuildOptions{
ConsulVersion: tc.oldversion,
configCtx := libcluster.NewBuildContext(t, libcluster.BuildOptions{
ConsulImageName: utils.TargetImageName,
ConsulVersion: tc.oldversion,
})
require.NoError(t, err)
numServers := 1
leaderConf, err := libagent.NewConfigBuilder(configCtx).
const (
numServers = 1
)
serverConf := libcluster.NewConfigBuilder(configCtx).
Bootstrap(numServers).
ToAgentConfig()
require.NoError(t, err)
t.Logf("Cluster config:\n%s", leaderConf.JSON)
leaderConf.Version = tc.oldversion
for i := 0; i < numServers; i++ {
configs = append(configs, *leaderConf)
}
ToAgentConfig(t)
t.Logf("Cluster config:\n%s", serverConf.JSON)
require.Equal(t, tc.oldversion, serverConf.Version) // TODO: remove
cluster, err := libcluster.New(configs)
cluster, err := libcluster.NewN(t, *serverConf, numServers)
require.NoError(t, err)
defer terminate(t, cluster)
server := cluster.Agents[0]
client := server.GetClient()
client := cluster.APIClient(0)
libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForMembers(t, client, numServers)
// Create a service to be stored in the snapshot
serviceName := "api"
const serviceName = "api"
index := serviceCreate(t, client, serviceName)
ch := make(chan []*api.ServiceEntry)
errCh := make(chan error)
go func() {
service, q, err := client.Health().Service(serviceName, "", false, &api.QueryOptions{WaitIndex: index})
if err == nil && q.QueryBackend != api.QueryBackendStreaming {
err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend)
}
if err != nil {
errCh <- err
} else {
ch <- service
}
}()
ch, errCh := serviceHealthBlockingQuery(client, serviceName, index)
require.NoError(t, client.Agent().ServiceRegister(
&api.AgentServiceRegistration{Name: serviceName, Port: 9998},
))
timer := time.NewTimer(1 * time.Second)
timer := time.NewTimer(3 * time.Second)
select {
case err := <-errCh:
require.NoError(t, err)
@ -101,6 +85,7 @@ func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
}
// upgrade the cluster to the Target version
t.Logf("initiating standard upgrade to version=%q", tc.targetVersion)
err = cluster.StandardUpgrade(t, context.Background(), tc.targetVersion)
if !tc.expectErr {
require.NoError(t, err)
@ -124,6 +109,6 @@ func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
func(t *testing.T) {
run(t, tc)
})
time.Sleep(5 * time.Second)
// time.Sleep(5 * time.Second)
}
}

View File

@ -1,16 +1,12 @@
package upgrade
import (
"context"
"fmt"
"testing"
"time"
"github.com/hashicorp/consul/api"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/api"
libagent "github.com/hashicorp/consul/test/integration/consul-container/libs/agent"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
@ -22,194 +18,19 @@ func TestTargetServersWithLatestGAClients(t *testing.T) {
numClients = 1
)
cluster := serversCluster(t, numServers, *utils.TargetVersion, *utils.TargetImage)
defer terminate(t, cluster)
cluster := serversCluster(t, numServers, utils.TargetImageName, utils.TargetVersion)
clients := clientsCreate(t, numClients, *utils.LatestImage, *utils.LatestVersion, cluster)
clientsCreate(t, numClients, utils.LatestImageName, utils.LatestVersion, cluster)
require.NoError(t, cluster.Join(clients))
client := cluster.Agents[0].GetClient()
client := cluster.APIClient(0)
libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForMembers(t, client, 4)
serviceName := "api"
const serviceName = "api"
index := serviceCreate(t, client, serviceName)
ch := make(chan []*api.ServiceEntry)
errCh := make(chan error)
go func() {
service, q, err := client.Health().Service(serviceName, "", false, &api.QueryOptions{WaitIndex: index})
if err == nil && q.QueryBackend != api.QueryBackendStreaming {
err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend)
}
if err != nil {
errCh <- err
} else {
ch <- service
}
}()
require.NoError(t, client.Agent().ServiceRegister(
&api.AgentServiceRegistration{Name: serviceName, Port: 9998},
))
timer := time.NewTimer(1 * time.Second)
select {
case err := <-errCh:
require.NoError(t, err)
case service := <-ch:
require.Len(t, service, 1)
require.Equal(t, serviceName, service[0].Service.Service)
require.Equal(t, 9998, service[0].Service.Port)
case <-timer.C:
t.Fatalf("test timeout")
}
}
// Test health check GRPC call using Mixed (majority latest) Servers and Latest GA Clients
func TestMixedServersMajorityLatestGAClient(t *testing.T) {
var configs []libagent.Config
leaderConf, err := libagent.NewConfigBuilder(nil).ToAgentConfig()
require.NoError(t, err)
configs = append(configs, *leaderConf)
// This needs a specialized config since it is using an older version of the agent.
// That is missing fields like GRPC_TLS and PEERING, which are passed as defaults
serverConf := `{
"advertise_addr": "{{ GetInterfaceIP \"eth0\" }}",
"bind_addr": "0.0.0.0",
"client_addr": "0.0.0.0",
"log_level": "DEBUG",
"server": true,
"bootstrap_expect": 3
}`
for i := 1; i < 3; i++ {
configs = append(configs,
libagent.Config{
JSON: serverConf,
Cmd: []string{"agent"},
Version: *utils.LatestVersion,
Image: *utils.LatestImage,
})
}
cluster, err := libcluster.New(configs)
require.NoError(t, err)
defer terminate(t, cluster)
const (
numClients = 1
)
clients := clientsCreate(t, numClients, *utils.LatestImage, *utils.LatestVersion, cluster)
require.NoError(t, cluster.Join(clients))
client := clients[0].GetClient()
libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForMembers(t, client, 4)
serviceName := "api"
index := serviceCreate(t, client, serviceName)
ch := make(chan []*api.ServiceEntry)
errCh := make(chan error)
go func() {
service, q, err := client.Health().Service(serviceName, "", false, &api.QueryOptions{WaitIndex: index})
if err == nil && q.QueryBackend != api.QueryBackendStreaming {
err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend)
}
if err != nil {
errCh <- err
} else {
ch <- service
}
}()
require.NoError(t, client.Agent().ServiceRegister(
&api.AgentServiceRegistration{Name: serviceName, Port: 9998},
))
timer := time.NewTimer(1 * time.Second)
select {
case err := <-errCh:
require.NoError(t, err)
case service := <-ch:
require.Len(t, service, 1)
require.Equal(t, serviceName, service[0].Service.Service)
require.Equal(t, 9998, service[0].Service.Port)
case <-timer.C:
t.Fatalf("test timeout")
}
}
// Test health check GRPC call using Mixed (majority target) Servers and Latest GA Clients
func TestMixedServersMajorityTargetGAClient(t *testing.T) {
var configs []libagent.Config
for i := 0; i < 2; i++ {
serverConf, err := libagent.NewConfigBuilder(nil).Bootstrap(3).ToAgentConfig()
require.NoError(t, err)
configs = append(configs, *serverConf)
}
leaderConf := `{
"advertise_addr": "{{ GetInterfaceIP \"eth0\" }}",
"bind_addr": "0.0.0.0",
"client_addr": "0.0.0.0",
"log_level": "DEBUG",
"server": true
}`
configs = append(configs,
libagent.Config{
JSON: leaderConf,
Cmd: []string{"agent"},
Version: *utils.LatestVersion,
Image: *utils.LatestImage,
})
cluster, err := libcluster.New(configs)
require.NoError(t, err)
defer terminate(t, cluster)
const (
numClients = 1
)
clients := clientsCreate(t, numClients, *utils.LatestImage, *utils.LatestVersion, cluster)
require.NoError(t, cluster.Join(clients))
client := clients[0].GetClient()
libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForMembers(t, client, 4)
serviceName := "api"
index := serviceCreate(t, client, serviceName)
ch := make(chan []*api.ServiceEntry)
errCh := make(chan error)
go func() {
service, q, err := client.Health().Service(serviceName, "", false, &api.QueryOptions{WaitIndex: index})
if err == nil && q.QueryBackend != api.QueryBackendStreaming {
err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend)
}
if err != nil {
errCh <- err
} else {
ch <- service
}
}()
ch, errCh := serviceHealthBlockingQuery(client, serviceName, index)
require.NoError(t, client.Agent().ServiceRegister(
&api.AgentServiceRegistration{Name: serviceName, Port: 9998},
))
@ -227,76 +48,96 @@ func TestMixedServersMajorityTargetGAClient(t *testing.T) {
}
}
func clientsCreate(t *testing.T, numClients int, image string, version string, cluster *libcluster.Cluster) []libagent.Agent {
clients := make([]libagent.Agent, numClients)
// Test health check GRPC call using Mixed (majority latest) Servers and Latest GA Clients
func TestMixedServersMajorityLatestGAClient(t *testing.T) {
testMixedServersGAClient(t, false)
}
// This needs a specialized config since it is using an older version of the agent.
// That is missing fields like GRPC_TLS and PEERING, which are passed as defaults
conf := `{
"advertise_addr": "{{ GetInterfaceIP \"eth0\" }}",
"bind_addr": "0.0.0.0",
"client_addr": "0.0.0.0",
"log_level": "DEBUG"
}`
// Test health check GRPC call using Mixed (majority target) Servers and Latest GA Clients
func TestMixedServersMajorityTargetGAClient(t *testing.T) {
testMixedServersGAClient(t, true)
}
for i := 0; i < numClients; i++ {
var err error
clients[i], err = libagent.NewConsulContainer(context.Background(),
libagent.Config{
JSON: conf,
Cmd: []string{"agent"},
Version: version,
Image: image,
},
cluster.NetworkName,
cluster.Index)
require.NoError(t, err)
// Test health check GRPC call using Mixed (majority conditional) Servers and Latest GA Clients
func testMixedServersGAClient(t *testing.T, majorityIsTarget bool) {
var (
latestOpts = libcluster.BuildOptions{
ConsulImageName: utils.LatestImageName,
ConsulVersion: utils.LatestVersion,
}
targetOpts = libcluster.BuildOptions{
ConsulImageName: utils.TargetImageName,
ConsulVersion: utils.TargetVersion,
}
majorityOpts libcluster.BuildOptions
minorityOpts libcluster.BuildOptions
)
if majorityIsTarget {
majorityOpts = targetOpts
minorityOpts = latestOpts
} else {
majorityOpts = latestOpts
minorityOpts = targetOpts
}
return clients
}
func serviceCreate(t *testing.T, client *api.Client, serviceName string) uint64 {
err := client.Agent().ServiceRegister(&api.AgentServiceRegistration{
Name: serviceName,
Port: 9999,
Connect: &api.AgentServiceConnect{
SidecarService: &api.AgentServiceRegistration{
Port: 22005,
},
},
})
require.NoError(t, err)
const (
numServers = 3
numClients = 1
)
service, meta, err := client.Catalog().Service(serviceName, "", &api.QueryOptions{})
require.NoError(t, err)
require.Len(t, service, 1)
require.Equal(t, serviceName, service[0].ServiceName)
require.Equal(t, 9999, service[0].ServicePort)
var configs []libcluster.Config
{
ctx := libcluster.NewBuildContext(t, minorityOpts)
return meta.LastIndex
}
conf := libcluster.NewConfigBuilder(ctx).
ToAgentConfig(t)
t.Logf("Cluster server (leader) config:\n%s", conf.JSON)
func serversCluster(t *testing.T, numServers int, version string, image string) *libcluster.Cluster {
var configs []libagent.Config
conf, err := libagent.NewConfigBuilder(nil).
Bootstrap(numServers).
ToAgentConfig()
require.NoError(t, err)
for i := 0; i < numServers; i++ {
configs = append(configs, *conf)
}
cluster, err := libcluster.New(configs)
{
ctx := libcluster.NewBuildContext(t, majorityOpts)
conf := libcluster.NewConfigBuilder(ctx).
Bootstrap(numServers).
ToAgentConfig(t)
t.Logf("Cluster server config:\n%s", conf.JSON)
for i := 1; i < numServers; i++ {
configs = append(configs, *conf)
}
}
cluster, err := libcluster.New(t, configs)
require.NoError(t, err)
libcluster.WaitForLeader(t, cluster, nil)
libcluster.WaitForMembers(t, cluster.Agents[0].GetClient(), numServers)
clientsCreate(t, numClients, utils.LatestImageName, utils.LatestVersion, cluster)
return cluster
}
client := cluster.APIClient(0)
func terminate(t *testing.T, cluster *libcluster.Cluster) {
err := cluster.Terminate()
require.NoError(t, err)
libcluster.WaitForLeader(t, cluster, client)
libcluster.WaitForMembers(t, client, 4) // TODO(rb): why 4?
const serviceName = "api"
index := serviceCreate(t, client, serviceName)
ch, errCh := serviceHealthBlockingQuery(client, serviceName, index)
require.NoError(t, client.Agent().ServiceRegister(
&api.AgentServiceRegistration{Name: serviceName, Port: 9998},
))
timer := time.NewTimer(3 * time.Second)
select {
case err := <-errCh:
require.NoError(t, err)
case service := <-ch:
require.Len(t, service, 1)
require.Equal(t, serviceName, service[0].Service.Service)
require.Equal(t, 9998, service[0].Service.Port)
case <-timer.C:
t.Fatalf("test timeout")
}
}

View File

@ -0,0 +1,92 @@
package upgrade
import (
"fmt"
"testing"
"github.com/hashicorp/consul/api"
"github.com/stretchr/testify/require"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
)
func serversCluster(t *testing.T, numServers int, image, version string) *libcluster.Cluster {
t.Helper()
opts := libcluster.BuildOptions{
ConsulImageName: image,
ConsulVersion: version,
}
ctx := libcluster.NewBuildContext(t, opts)
conf := libcluster.NewConfigBuilder(ctx).
Bootstrap(numServers).
ToAgentConfig(t)
t.Logf("Cluster server config:\n%s", conf.JSON)
cluster, err := libcluster.NewN(t, *conf, numServers)
require.NoError(t, err)
libcluster.WaitForLeader(t, cluster, nil)
libcluster.WaitForMembers(t, cluster.APIClient(0), numServers)
return cluster
}
func clientsCreate(t *testing.T, numClients int, image, version string, cluster *libcluster.Cluster) {
t.Helper()
opts := libcluster.BuildOptions{
ConsulImageName: image,
ConsulVersion: version,
}
ctx := libcluster.NewBuildContext(t, opts)
conf := libcluster.NewConfigBuilder(ctx).
Client().
ToAgentConfig(t)
t.Logf("Cluster client config:\n%s", conf.JSON)
require.NoError(t, cluster.AddN(*conf, numClients, true))
}
func serviceCreate(t *testing.T, client *api.Client, serviceName string) uint64 {
require.NoError(t, client.Agent().ServiceRegister(&api.AgentServiceRegistration{
Name: serviceName,
Port: 9999,
Connect: &api.AgentServiceConnect{
SidecarService: &api.AgentServiceRegistration{
Port: 22005,
},
},
}))
service, meta, err := client.Catalog().Service(serviceName, "", &api.QueryOptions{})
require.NoError(t, err)
require.Len(t, service, 1)
require.Equal(t, serviceName, service[0].ServiceName)
require.Equal(t, 9999, service[0].ServicePort)
return meta.LastIndex
}
func serviceHealthBlockingQuery(client *api.Client, serviceName string, waitIndex uint64) (chan []*api.ServiceEntry, chan error) {
var (
ch = make(chan []*api.ServiceEntry, 1)
errCh = make(chan error, 1)
)
go func() {
opts := &api.QueryOptions{WaitIndex: waitIndex}
service, q, err := client.Health().Service(serviceName, "", false, opts)
if err == nil && q.QueryBackend != api.QueryBackendStreaming {
err = fmt.Errorf("invalid backend for this test %s", q.QueryBackend)
}
if err != nil {
errCh <- err
} else {
ch <- service
}
}()
return ch, errCh
}

View File

@ -4,14 +4,13 @@ import (
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/api"
"github.com/stretchr/testify/require"
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
libtopology "github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/test/integration/consul-container/test/topology"
)
// TestPeering_UpgradeToTarget_fromLatest checks peering status after dialing cluster
@ -30,35 +29,28 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
// },
{
oldversion: "1.14",
targetVersion: *utils.TargetVersion,
targetVersion: utils.TargetVersion,
},
}
run := func(t *testing.T, tc testcase) {
acceptingCluster, dialingCluster, _, staticClientSvcSidecar := topology.BasicPeeringTwoClustersSetup(t, tc.oldversion)
// move to teardown
defer func() {
err := acceptingCluster.Terminate()
require.NoErrorf(t, err, "termining accepting cluster")
dialingCluster.Terminate()
require.NoErrorf(t, err, "termining dialing cluster")
}()
accepting, dialing := libtopology.BasicPeeringTwoClustersSetup(t, tc.oldversion)
var (
acceptingCluster = accepting.Cluster
dialingCluster = dialing.Cluster
)
dialingClient, err := dialingCluster.GetClient(nil, false)
require.NoError(t, err)
_, port := staticClientSvcSidecar.GetAddr()
// Upgrade the dialingCluster cluster and assert peering is still ACTIVE
err = dialingCluster.StandardUpgrade(t, context.Background(), tc.targetVersion)
acceptingClient, err := acceptingCluster.GetClient(nil, false)
require.NoError(t, err)
libassert.PeeringStatus(t, dialingClient, topology.DialingPeerName, api.PeeringStateActive)
libassert.HTTPServiceEchoes(t, "localhost", port)
// Upgrade the accepting cluster and assert peering is still ACTIVE
err = acceptingCluster.StandardUpgrade(t, context.Background(), tc.targetVersion)
require.NoError(t, err)
require.NoError(t, acceptingCluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
libassert.PeeringStatus(t, dialingClient, topology.DialingPeerName, api.PeeringStateActive)
libassert.PeeringStatus(t, acceptingClient, libtopology.AcceptingPeerName, api.PeeringStateActive)
libassert.PeeringStatus(t, dialingClient, libtopology.DialingPeerName, api.PeeringStateActive)
}
for _, tc := range tcs {
@ -66,6 +58,6 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
func(t *testing.T) {
run(t, tc)
})
time.Sleep(3 * time.Second)
// time.Sleep(3 * time.Second)
}
}