testing/deployer: update deployer to use v2 catalog constructs when requested (#19046)

This updates the testing/deployer (aka "topology test") framework to conditionally 
configure and launch catalog constructs using v2 resources. This is controlled via a 
Version field on the Node construct in a topology.Config. This only functions for a 
dataplane type and has other restrictions that match the rest of v2 (no peering, no 
wanfed, no mesh gateways).

Like config entries, you can statically provide a set of initial resources to be synced 
when bringing up the cluster (beyond those that are generated for you such as 
workloads, services, etc).

If you want to author a test that can be freely converted between v1 and v2 then that 
is possible. If you switch to the multi-port definition on a topology.Service (aka 
"workload/instance") then that makes v1 ineligible.

This also adds a starter set of "on every PR" integration tests for single and multiport 
under test-integ/catalogv2
This commit is contained in:
R.B. Boyer 2023-11-02 14:25:48 -05:00 committed by GitHub
parent 8f4c43727d
commit a72f868218
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
53 changed files with 2209 additions and 351 deletions

View File

@ -496,6 +496,8 @@ jobs:
contents: read
strategy:
fail-fast: false
env:
DEPLOYER_CONSUL_DATAPLANE_IMAGE: "docker.mirror.hashicorp.services/hashicorppreview/consul-dataplane:1.3-dev"
steps:
- name: Checkout code
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
@ -513,20 +515,22 @@ jobs:
run: |
mkdir -p "${{ env.TEST_RESULTS_DIR }}"
export NOLOGBUFFER=1
cd ./test-integ/connect
cd ./test-integ
go run gotest.tools/gotestsum@v${{env.GOTESTSUM_VERSION}} \
--raw-command \
--format=standard-verbose \
--debug \
--packages="./..." \
-- \
go test \
-tags "${{ env.GOTAGS }}" \
-timeout=20m \
-parallel=2 \
-json . \
-json \
`go list -tags "${{ env.GOTAGS }}" ./... | grep -v peering_commontopo` \
--target-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \
--target-version local
--target-version local \
--latest-image ${{ env.CONSUL_LATEST_IMAGE_NAME }} \
--latest-version latest
env:
# this is needed because of incompatibility between RYUK container and GHA
GOTESTSUM_JUNITFILE: ${{ env.TEST_RESULTS_DIR }}/results.xml

42
test-integ/Makefile Normal file
View File

@ -0,0 +1,42 @@
SHELL := /bin/bash
.PHONY: noop
noop:
##@ Build
.PHONY: tidy
tidy: ## Run go mod tidy.
go mod tidy
##@ Checks
.PHONY: format
format: ## Format the go files.
@for f in $$(find . -name '*.go' -print); do \
gofmt -s -w $$f ; \
done
.PHONY: lint
lint: ## Run the full linting rules.
golangci-lint run -v
.PHONY: vet
vet: ## Run go vet.
go vet ./...
##@ Help
# The help target prints out all targets with their descriptions organized
# beneath their categories. The categories are represented by '##@' and the
# target descriptions by '##'. The awk commands is responsible for reading the
# entire set of makefiles included in this invocation, looking for lines of the
# file as xyz: ## something, and then pretty-format the target and help. Then,
# if there's a line with ##@ something, that gets pretty-printed as a category.
# More info on the usage of ANSI control characters for terminal formatting:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
# More info on the awk command:
# http://linuxcommand.org/lc3_adv_awk.php
.PHONY: help
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

View File

@ -0,0 +1,284 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package catalogv2
import (
"fmt"
"strings"
"testing"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/hashicorp/consul/test-integ/topoutil"
)
// TestBasicL4ExplicitDestination sets up the following:
//
// - 1 cluster (no peering / no wanfed)
// - 3 servers in that cluster
// - v2 arch is activated
// - for each tenancy, only using v2 constructs:
// - a client with one explicit destination to a single port service
// - a client with multiple explicit destinations to multiple ports of the
// same multiport service
//
// When this test is executed in CE it will only use the default/default
// tenancy.
//
// When this test is executed in Enterprise it will additionally test the same
// things within these tenancies:
//
// - part1/default
// - default/nsa
// - part1/nsa
func TestBasicL4ExplicitDestination(t *testing.T) {
cfg := testBasicL4ExplicitDestinationCreator{}.NewConfig(t)
sp := sprawltest.Launch(t, cfg)
var (
asserter = topoutil.NewAsserter(sp)
topo = sp.Topology()
cluster = topo.Clusters["dc1"]
ships = topo.ComputeRelationships()
)
clientV2 := sp.ResourceServiceClientForCluster(cluster.Name)
t.Log(topology.RenderRelationships(ships))
// Make sure things are in v2.
for _, name := range []string{
"single-server",
"single-client",
"multi-server",
"multi-client",
} {
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, name, nil, 1)
}
// Check relationships
for _, ship := range ships {
t.Run("relationship: "+ship.String(), func(t *testing.T) {
var (
svc = ship.Caller
u = ship.Upstream
clusterPrefix string
)
if u.Peer == "" {
if u.ID.PartitionOrDefault() == "default" {
clusterPrefix = strings.Join([]string{u.PortName, u.ID.Name, u.ID.Namespace, u.Cluster, "internal"}, ".")
} else {
clusterPrefix = strings.Join([]string{u.PortName, u.ID.Name, u.ID.Namespace, u.ID.Partition, u.Cluster, "internal-v1"}, ".")
}
} else {
clusterPrefix = strings.Join([]string{u.ID.Name, u.ID.Namespace, u.Peer, "external"}, ".")
}
asserter.UpstreamEndpointStatus(t, svc, clusterPrefix+".", "HEALTHY", 1)
asserter.HTTPServiceEchoes(t, svc, u.LocalPort, "")
asserter.FortioFetch2FortioName(t, svc, u, cluster.Name, u.ID)
})
}
}
type testBasicL4ExplicitDestinationCreator struct{}
func (c testBasicL4ExplicitDestinationCreator) NewConfig(t *testing.T) *topology.Config {
const clusterName = "dc1"
servers := topoutil.NewTopologyServerSet(clusterName+"-server", 3, []string{clusterName, "wan"}, nil)
cluster := &topology.Cluster{
Enterprise: utils.IsEnterprise(),
Name: clusterName,
Nodes: servers,
}
lastNode := 0
nodeName := func() string {
lastNode++
return fmt.Sprintf("%s-box%d", clusterName, lastNode)
}
c.topologyConfigAddNodes(t, cluster, nodeName, "default", "default")
if cluster.Enterprise {
c.topologyConfigAddNodes(t, cluster, nodeName, "part1", "default")
c.topologyConfigAddNodes(t, cluster, nodeName, "part1", "nsa")
c.topologyConfigAddNodes(t, cluster, nodeName, "default", "nsa")
}
return &topology.Config{
Images: topoutil.TargetImages(),
Networks: []*topology.Network{
{Name: clusterName},
{Name: "wan", Type: "wan"},
},
Clusters: []*topology.Cluster{
cluster,
},
}
}
func (c testBasicL4ExplicitDestinationCreator) topologyConfigAddNodes(
t *testing.T,
cluster *topology.Cluster,
nodeName func() string,
partition,
namespace string,
) {
clusterName := cluster.Name
newServiceID := func(name string) topology.ServiceID {
return topology.ServiceID{
Partition: partition,
Namespace: namespace,
Name: name,
}
}
tenancy := &pbresource.Tenancy{
Partition: partition,
Namespace: namespace,
PeerName: "local",
}
singleportServerNode := &topology.Node{
Kind: topology.NodeKindDataplane,
Version: topology.NodeVersionV2,
Partition: partition,
Name: nodeName(),
Services: []*topology.Service{
topoutil.NewFortioServiceWithDefaults(
clusterName,
newServiceID("single-server"),
topology.NodeVersionV2,
nil,
),
},
}
singleportClientNode := &topology.Node{
Kind: topology.NodeKindDataplane,
Version: topology.NodeVersionV2,
Partition: partition,
Name: nodeName(),
Services: []*topology.Service{
topoutil.NewFortioServiceWithDefaults(
clusterName,
newServiceID("single-client"),
topology.NodeVersionV2,
func(svc *topology.Service) {
delete(svc.Ports, "grpc") // v2 mode turns this on, so turn it off
delete(svc.Ports, "http-alt") // v2 mode turns this on, so turn it off
svc.Upstreams = []*topology.Upstream{{
ID: newServiceID("single-server"),
PortName: "http",
LocalAddress: "0.0.0.0", // needed for an assertion
LocalPort: 5000,
}}
},
),
},
}
singleportTrafficPerms := sprawltest.MustSetResourceData(t, &pbresource.Resource{
Id: &pbresource.ID{
Type: pbauth.TrafficPermissionsType,
Name: "single-server-perms",
Tenancy: tenancy,
},
}, &pbauth.TrafficPermissions{
Destination: &pbauth.Destination{
IdentityName: "single-server",
},
Action: pbauth.Action_ACTION_ALLOW,
Permissions: []*pbauth.Permission{{
Sources: []*pbauth.Source{{
IdentityName: "single-client",
Namespace: namespace,
}},
}},
})
multiportServerNode := &topology.Node{
Kind: topology.NodeKindDataplane,
Version: topology.NodeVersionV2,
Partition: partition,
Name: nodeName(),
Services: []*topology.Service{
topoutil.NewFortioServiceWithDefaults(
clusterName,
newServiceID("multi-server"),
topology.NodeVersionV2,
nil,
),
},
}
multiportClientNode := &topology.Node{
Kind: topology.NodeKindDataplane,
Version: topology.NodeVersionV2,
Partition: partition,
Name: nodeName(),
Services: []*topology.Service{
topoutil.NewFortioServiceWithDefaults(
clusterName,
newServiceID("multi-client"),
topology.NodeVersionV2,
func(svc *topology.Service) {
svc.Upstreams = []*topology.Upstream{
{
ID: newServiceID("multi-server"),
PortName: "http",
LocalAddress: "0.0.0.0", // needed for an assertion
LocalPort: 5000,
},
{
ID: newServiceID("multi-server"),
PortName: "http-alt",
LocalAddress: "0.0.0.0", // needed for an assertion
LocalPort: 5001,
},
}
},
),
},
}
multiportTrafficPerms := sprawltest.MustSetResourceData(t, &pbresource.Resource{
Id: &pbresource.ID{
Type: pbauth.TrafficPermissionsType,
Name: "multi-server-perms",
Tenancy: tenancy,
},
}, &pbauth.TrafficPermissions{
Destination: &pbauth.Destination{
IdentityName: "multi-server",
},
Action: pbauth.Action_ACTION_ALLOW,
Permissions: []*pbauth.Permission{{
Sources: []*pbauth.Source{{
IdentityName: "multi-client",
Namespace: namespace,
}},
}},
})
cluster.Nodes = append(cluster.Nodes,
singleportClientNode,
singleportServerNode,
multiportClientNode,
multiportServerNode,
)
cluster.InitialResources = append(cluster.InitialResources,
singleportTrafficPerms,
multiportTrafficPerms,
)
}

View File

@ -4,6 +4,7 @@ go 1.20
require (
github.com/hashicorp/consul/api v1.24.0
github.com/hashicorp/consul/proto-public v0.4.1
github.com/hashicorp/consul/sdk v0.14.1
github.com/hashicorp/consul/test/integration/consul-container v0.0.0-20230628201853-bdf4fad7c5a5
github.com/hashicorp/consul/testing/deployer v0.0.0-20230811171106-4a0afb5d1373
@ -93,8 +94,8 @@ require (
github.com/hashicorp/consul v1.16.1 // indirect
github.com/hashicorp/consul-awsauth v0.0.0-20220713182709-05ac1c5c2706 // indirect
github.com/hashicorp/consul-net-rpc v0.0.0-20221205195236-156cfab66a69 // indirect
github.com/hashicorp/consul-server-connection-manager v0.1.4 // indirect
github.com/hashicorp/consul/envoyextensions v0.4.1 // indirect
github.com/hashicorp/consul/proto-public v0.4.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.2 // indirect
github.com/hashicorp/go-connlimit v0.3.0 // indirect
@ -104,6 +105,7 @@ require (
github.com/hashicorp/go-msgpack v1.1.5 // indirect
github.com/hashicorp/go-msgpack/v2 v2.0.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-netaddrs v0.1.0 // indirect
github.com/hashicorp/go-plugin v1.4.5 // indirect
github.com/hashicorp/go-raftchunking v0.7.0 // indirect
github.com/hashicorp/go-retryablehttp v0.6.7 // indirect

View File

@ -395,6 +395,8 @@ github.com/hashicorp/consul-awsauth v0.0.0-20220713182709-05ac1c5c2706 h1:1ZEjnv
github.com/hashicorp/consul-awsauth v0.0.0-20220713182709-05ac1c5c2706/go.mod h1:1Cs8FlmD1BfSQXJGcFLSV5FuIx1AbJP+EJGdxosoS2g=
github.com/hashicorp/consul-net-rpc v0.0.0-20221205195236-156cfab66a69 h1:wzWurXrxfSyG1PHskIZlfuXlTSCj1Tsyatp9DtaasuY=
github.com/hashicorp/consul-net-rpc v0.0.0-20221205195236-156cfab66a69/go.mod h1:svUZZDvotY8zTODknUePc6mZ9pX8nN0ViGwWcUSOBEA=
github.com/hashicorp/consul-server-connection-manager v0.1.4 h1:wrcSRV6WGXFBNpNbN6XsdoGgBOyso7ZbN5VaWPEX1jY=
github.com/hashicorp/consul-server-connection-manager v0.1.4/go.mod h1:LMqHkALoLP0HUQKOG21xXYr0YPUayIQIHNTlmxG100E=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -429,6 +431,8 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-netaddrs v0.1.0 h1:TnlYvODD4C/wO+j7cX1z69kV5gOzI87u3OcUinANaW8=
github.com/hashicorp/go-netaddrs v0.1.0/go.mod h1:33+a/emi5R5dqRspOuZKO0E+Tuz5WV1F84eRWALkedA=
github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ=
github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo=
github.com/hashicorp/go-plugin v1.4.5/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s=
@ -1266,8 +1270,6 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/grpc v1.57.2 h1:uw37EN34aMFFXB2QPW7Tq6tdTbind1GpRxw5aOX3a5k=
google.golang.org/grpc v1.57.2/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=

View File

@ -8,10 +8,11 @@ import (
"strings"
"testing"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/test-integ/topoutil"
)
// TestRotateGW ensures that peered services continue to be able to talk to their
@ -145,7 +146,7 @@ func (s *suiteRotateGW) setup(t *testing.T, ct *commonTopo) {
if clu.Datacenter == agentlessDC {
nodeKind = topology.NodeKindDataplane
}
clu.Nodes = append(clu.Nodes, newTopologyMeshGatewaySet(
clu.Nodes = append(clu.Nodes, topoutil.NewTopologyMeshGatewaySet(
nodeKind,
"default",
s.newMGWNodeName,

View File

@ -4,23 +4,21 @@
package peering
import (
"bytes"
"context"
"fmt"
"strconv"
"testing"
"text/tabwriter"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/testing/deployer/sprawl"
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/test-integ/topoutil"
)
// commonTopo helps create a shareable topology configured to represent
@ -46,7 +44,7 @@ type commonTopo struct {
// set after Launch. Should be considered read-only
Sprawl *sprawl.Sprawl
Assert *asserter
Assert *topoutil.Asserter
// track per-DC services to prevent duplicates
services map[string]map[topology.ServiceID]struct{}
@ -122,14 +120,14 @@ func (ct *commonTopo) Launch(t *testing.T) {
}
ct.Sprawl = sprawltest.Launch(t, ct.Cfg)
ct.Assert = newAsserter(ct.Sprawl)
ct.Assert = topoutil.NewAsserter(ct.Sprawl)
ct.postLaunchChecks(t)
}
// tests that use Relaunch might want to call this again afterwards
func (ct *commonTopo) postLaunchChecks(t *testing.T) {
t.Logf("TESTING RELATIONSHIPS: \n%s",
renderRelationships(computeRelationships(ct.Sprawl.Topology())),
topology.RenderRelationships(ct.Sprawl.Topology().ComputeRelationships()),
)
// check that exports line up as expected
@ -317,41 +315,23 @@ func ConfigEntryPartition(p string) string {
return p
}
// disableNode is a no-op if the node is already disabled.
// DisableNode is a no-op if the node is already disabled.
func DisableNode(t *testing.T, cfg *topology.Config, clusterName string, nid topology.NodeID) *topology.Config {
nodes := cfg.Cluster(clusterName).Nodes
var found bool
for _, n := range nodes {
if n.ID() == nid {
found = true
if n.Disabled {
return cfg
}
t.Logf("disabling node %s in cluster %s", nid.String(), clusterName)
n.Disabled = true
break
}
changed, err := cfg.DisableNode(clusterName, nid)
require.NoError(t, err)
if changed {
t.Logf("disabling node %s in cluster %s", nid.String(), clusterName)
}
require.True(t, found, "expected to find nodeID %q in cluster %q", nid.String(), clusterName)
return cfg
}
// enableNode is a no-op if the node is already enabled.
// EnableNode is a no-op if the node is already enabled.
func EnableNode(t *testing.T, cfg *topology.Config, clusterName string, nid topology.NodeID) *topology.Config {
nodes := cfg.Cluster(clusterName).Nodes
var found bool
for _, n := range nodes {
if n.ID() == nid {
found = true
if !n.Disabled {
return cfg
}
t.Logf("enabling node %s in cluster %s", nid.String(), clusterName)
n.Disabled = false
break
}
changed, err := cfg.EnableNode(clusterName, nid)
require.NoError(t, err)
if changed {
t.Logf("enabling node %s in cluster %s", nid.String(), clusterName)
}
require.True(t, found, "expected to find nodeID %q in cluster %q", nid.String(), clusterName)
return cfg
}
@ -386,7 +366,7 @@ func addMeshGateways(c *topology.Cluster) {
nodeKind = topology.NodeKindDataplane
}
for _, p := range c.Partitions {
c.Nodes = topology.MergeSlices(c.Nodes, newTopologyMeshGatewaySet(
c.Nodes = topology.MergeSlices(c.Nodes, topoutil.NewTopologyMeshGatewaySet(
nodeKind,
p.Name,
fmt.Sprintf("%s-%s-mgw", c.Name, p.Name),
@ -402,7 +382,7 @@ func clusterWithJustServers(name string, numServers int) *topology.Cluster {
Enterprise: utils.IsEnterprise(),
Name: name,
Datacenter: name,
Nodes: newTopologyServerSet(
Nodes: topoutil.NewTopologyServerSet(
name+"-server",
numServers,
[]string{name},
@ -458,168 +438,11 @@ func injectTenancies(clu *topology.Cluster) {
}
}
func newTopologyServerSet(
namePrefix string,
num int,
networks []string,
mutateFn func(i int, node *topology.Node),
) []*topology.Node {
var out []*topology.Node
for i := 1; i <= num; i++ {
name := namePrefix + strconv.Itoa(i)
node := &topology.Node{
Kind: topology.NodeKindServer,
Name: name,
}
for _, net := range networks {
node.Addresses = append(node.Addresses, &topology.Address{Network: net})
}
if mutateFn != nil {
mutateFn(i, node)
}
out = append(out, node)
}
return out
}
func newTopologyMeshGatewaySet(
nodeKind topology.NodeKind,
partition string,
namePrefix string,
num int,
networks []string,
mutateFn func(i int, node *topology.Node),
) []*topology.Node {
var out []*topology.Node
for i := 1; i <= num; i++ {
name := namePrefix + strconv.Itoa(i)
node := &topology.Node{
Kind: nodeKind,
Partition: partition,
Name: name,
Services: []*topology.Service{{
ID: topology.ServiceID{Name: "mesh-gateway"},
Port: 8443,
EnvoyAdminPort: 19000,
IsMeshGateway: true,
}},
}
for _, net := range networks {
node.Addresses = append(node.Addresses, &topology.Address{Network: net})
}
if mutateFn != nil {
mutateFn(i, node)
}
out = append(out, node)
}
return out
}
const HashicorpDockerProxy = "docker.mirror.hashicorp.services"
// Deprecated: topoutil.NewFortioServiceWithDefaults
func NewFortioServiceWithDefaults(
cluster string,
sid topology.ServiceID,
mut func(s *topology.Service),
) *topology.Service {
const (
httpPort = 8080
grpcPort = 8079
adminPort = 19000
)
sid.Normalize()
svc := &topology.Service{
ID: sid,
Image: HashicorpDockerProxy + "/fortio/fortio",
Port: httpPort,
EnvoyAdminPort: adminPort,
CheckTCP: "127.0.0.1:" + strconv.Itoa(httpPort),
Env: []string{
"FORTIO_NAME=" + cluster + "::" + sid.String(),
},
Command: []string{
"server",
"-http-port", strconv.Itoa(httpPort),
"-grpc-port", strconv.Itoa(grpcPort),
"-redirect-port", "-disabled",
},
}
if mut != nil {
mut(svc)
}
return svc
}
// computeRelationships will analyze a full topology and generate all of the
// downstream/upstream information for all of them.
func computeRelationships(topo *topology.Topology) []Relationship {
var out []Relationship
for _, cluster := range topo.Clusters {
for _, n := range cluster.Nodes {
for _, s := range n.Services {
for _, u := range s.Upstreams {
out = append(out, Relationship{
Caller: s,
Upstream: u,
})
}
}
}
}
return out
}
// renderRelationships will take the output of ComputeRelationships and display
// it in tabular form.
func renderRelationships(ships []Relationship) string {
var buf bytes.Buffer
w := tabwriter.NewWriter(&buf, 0, 0, 3, ' ', tabwriter.Debug)
fmt.Fprintf(w, "DOWN\tnode\tservice\tport\tUP\tservice\t\n")
for _, r := range ships {
fmt.Fprintf(w,
"%s\t%s\t%s\t%d\t%s\t%s\t\n",
r.downCluster(),
r.Caller.Node.ID().String(),
r.Caller.ID.String(),
r.Upstream.LocalPort,
r.upCluster(),
r.Upstream.ID.String(),
)
}
fmt.Fprintf(w, "\t\t\t\t\t\t\n")
w.Flush()
return buf.String()
}
type Relationship struct {
Caller *topology.Service
Upstream *topology.Upstream
}
func (r Relationship) String() string {
return fmt.Sprintf(
"%s on %s in %s via :%d => %s in %s",
r.Caller.ID.String(),
r.Caller.Node.ID().String(),
r.downCluster(),
r.Upstream.LocalPort,
r.Upstream.ID.String(),
r.upCluster(),
)
}
func (r Relationship) downCluster() string {
return r.Caller.Node.Cluster
}
func (r Relationship) upCluster() string {
return r.Upstream.Cluster
return topoutil.NewFortioServiceWithDefaults(cluster, sid, topology.NodeVersionV1, mut)
}

View File

@ -1,7 +1,7 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package peering
package topoutil
import (
"fmt"
@ -12,18 +12,17 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/sdk/testutil/retry"
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// asserter is a utility to help in reducing boilerplate in invoking test
// Asserter is a utility to help in reducing boilerplate in invoking test
// assertions against consul-topology Sprawl components.
//
// The methods should largely take in *topology.Service instances in lieu of
@ -33,32 +32,33 @@ import (
// If it's up to the test (like picking an upstream) leave port as an argument
// but still take the service and use that to grab the local ip from the
// topology.Node.
type asserter struct {
sp sprawlLite
type Asserter struct {
sp SprawlLite
}
// *sprawl.Sprawl satisfies this. We don't need anything else.
type sprawlLite interface {
type SprawlLite interface {
HTTPClientForCluster(clusterName string) (*http.Client, error)
APIClientForNode(clusterName string, nid topology.NodeID, token string) (*api.Client, error)
APIClientForCluster(clusterName string, token string) (*api.Client, error)
ResourceServiceClientForCluster(clusterName string) pbresource.ResourceServiceClient
Topology() *topology.Topology
}
// newAsserter creates a new assertion helper for the provided sprawl.
func newAsserter(sp sprawlLite) *asserter {
return &asserter{
// NewAsserter creates a new assertion helper for the provided sprawl.
func NewAsserter(sp SprawlLite) *Asserter {
return &Asserter{
sp: sp,
}
}
func (a *asserter) mustGetHTTPClient(t *testing.T, cluster string) *http.Client {
func (a *Asserter) mustGetHTTPClient(t *testing.T, cluster string) *http.Client {
client, err := a.httpClientFor(cluster)
require.NoError(t, err)
return client
}
func (a *asserter) mustGetAPIClient(t *testing.T, cluster string) *api.Client {
func (a *Asserter) mustGetAPIClient(t *testing.T, cluster string) *api.Client {
clu := a.sp.Topology().Clusters[cluster]
cl, err := a.sp.APIClientForCluster(clu.Name, "")
require.NoError(t, err)
@ -70,7 +70,7 @@ func (a *asserter) mustGetAPIClient(t *testing.T, cluster string) *api.Client {
//
// Use this in methods below to magically pick the right proxied http client
// given the home of each node being checked.
func (a *asserter) httpClientFor(cluster string) (*http.Client, error) {
func (a *Asserter) httpClientFor(cluster string) (*http.Client, error) {
client, err := a.sp.HTTPClientForCluster(cluster)
if err != nil {
return nil, err
@ -83,7 +83,7 @@ func (a *asserter) httpClientFor(cluster string) (*http.Client, error) {
// Exposes libassert.UpstreamEndpointStatus for use against a Sprawl.
//
// NOTE: this doesn't take a port b/c you always want to use the envoy admin port.
func (a *asserter) UpstreamEndpointStatus(
func (a *Asserter) UpstreamEndpointStatus(
t *testing.T,
service *topology.Service,
clusterName string,
@ -107,7 +107,7 @@ func (a *asserter) UpstreamEndpointStatus(
// Exposes libassert.HTTPServiceEchoes for use against a Sprawl.
//
// NOTE: this takes a port b/c you may want to reach this via your choice of upstream.
func (a *asserter) HTTPServiceEchoes(
func (a *Asserter) HTTPServiceEchoes(
t *testing.T,
service *topology.Service,
port int,
@ -131,7 +131,7 @@ func (a *asserter) HTTPServiceEchoes(
// Exposes libassert.HTTPServiceEchoes for use against a Sprawl.
//
// NOTE: this takes a port b/c you may want to reach this via your choice of upstream.
func (a *asserter) HTTPServiceEchoesResHeader(
func (a *Asserter) HTTPServiceEchoesResHeader(
t *testing.T,
service *topology.Service,
port int,
@ -149,7 +149,7 @@ func (a *asserter) HTTPServiceEchoesResHeader(
libassert.HTTPServiceEchoesResHeaderWithClient(t, client, addr, path, expectedResHeader)
}
func (a *asserter) HTTPStatus(
func (a *Asserter) HTTPStatus(
t *testing.T,
service *topology.Service,
port int,
@ -179,7 +179,7 @@ func (a *asserter) HTTPStatus(
}
// asserts that the service sid in cluster and exported by peer localPeerName is passing health checks,
func (a *asserter) HealthyWithPeer(t *testing.T, cluster string, sid topology.ServiceID, peerName string) {
func (a *Asserter) HealthyWithPeer(t *testing.T, cluster string, sid topology.ServiceID, peerName string) {
t.Helper()
cl := a.mustGetAPIClient(t, cluster)
retry.RunWith(&retry.Timer{Timeout: time.Minute * 1, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
@ -198,7 +198,7 @@ func (a *asserter) HealthyWithPeer(t *testing.T, cluster string, sid topology.Se
})
}
func (a *asserter) UpstreamEndpointHealthy(t *testing.T, svc *topology.Service, upstream *topology.Upstream) {
func (a *Asserter) UpstreamEndpointHealthy(t *testing.T, svc *topology.Service, upstream *topology.Upstream) {
t.Helper()
node := svc.Node
ip := node.LocalAddress()
@ -216,68 +216,86 @@ func (a *asserter) UpstreamEndpointHealthy(t *testing.T, svc *topology.Service,
)
}
type testingT interface {
require.TestingT
Helper()
}
// does a fortio /fetch2 to the given fortio service, targetting the given upstream. Returns
// the body, and response with response.Body already Closed.
//
// We treat 400, 503, and 504s as retryable errors
func (a *asserter) fortioFetch2Upstream(t *testing.T, fortioSvc *topology.Service, upstream *topology.Upstream, path string) (body []byte, res *http.Response) {
func (a *Asserter) fortioFetch2Upstream(
t testingT,
client *http.Client,
addr string,
upstream *topology.Upstream,
path string,
) (body []byte, res *http.Response) {
t.Helper()
// TODO: fortioSvc.ID.Normalize()? or should that be up to the caller?
node := fortioSvc.Node
client := a.mustGetHTTPClient(t, node.Cluster)
urlbase := fmt.Sprintf("%s:%d", node.LocalAddress(), fortioSvc.Port)
url := fmt.Sprintf("http://%s/fortio/fetch2?url=%s", urlbase,
url := fmt.Sprintf("http://%s/fortio/fetch2?url=%s", addr,
url.QueryEscape(fmt.Sprintf("http://localhost:%d/%s", upstream.LocalPort, path)),
)
req, err := http.NewRequest(http.MethodPost, url, nil)
require.NoError(t, err)
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
res, err = client.Do(req)
require.NoError(r, err)
defer res.Body.Close()
// not sure when these happen, suspect it's when the mesh gateway in the peer is not yet ready
require.NotEqual(r, http.StatusServiceUnavailable, res.StatusCode)
require.NotEqual(r, http.StatusGatewayTimeout, res.StatusCode)
// not sure when this happens, suspect it's when envoy hasn't configured the local upstream yet
require.NotEqual(r, http.StatusBadRequest, res.StatusCode)
body, err = io.ReadAll(res.Body)
require.NoError(r, err)
})
res, err = client.Do(req)
require.NoError(t, err)
defer res.Body.Close()
// not sure when these happen, suspect it's when the mesh gateway in the peer is not yet ready
require.NotEqual(t, http.StatusServiceUnavailable, res.StatusCode)
require.NotEqual(t, http.StatusGatewayTimeout, res.StatusCode)
// not sure when this happens, suspect it's when envoy hasn't configured the local upstream yet
require.NotEqual(t, http.StatusBadRequest, res.StatusCode)
body, err = io.ReadAll(res.Body)
require.NoError(t, err)
return body, res
}
// uses the /fortio/fetch2 endpoint to do a header echo check against an
// upstream fortio
func (a *asserter) FortioFetch2HeaderEcho(t *testing.T, fortioSvc *topology.Service, upstream *topology.Upstream) {
func (a *Asserter) FortioFetch2HeaderEcho(t *testing.T, fortioSvc *topology.Service, upstream *topology.Upstream) {
const kPassphrase = "x-passphrase"
const passphrase = "hello"
path := (fmt.Sprintf("/?header=%s:%s", kPassphrase, passphrase))
var (
node = fortioSvc.Node
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), fortioSvc.PortOrDefault("http"))
client = a.mustGetHTTPClient(t, node.Cluster)
)
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
_, res := a.fortioFetch2Upstream(t, fortioSvc, upstream, path)
require.Equal(t, http.StatusOK, res.StatusCode)
_, res := a.fortioFetch2Upstream(r, client, addr, upstream, path)
require.Equal(r, http.StatusOK, res.StatusCode)
v := res.Header.Get(kPassphrase)
require.Equal(t, passphrase, v)
require.Equal(r, passphrase, v)
})
}
// similar to libassert.AssertFortioName,
// uses the /fortio/fetch2 endpoint to hit the debug endpoint on the upstream,
// and assert that the FORTIO_NAME == name
func (a *asserter) FortioFetch2FortioName(t *testing.T, fortioSvc *topology.Service, upstream *topology.Upstream, clusterName string, sid topology.ServiceID) {
func (a *Asserter) FortioFetch2FortioName(t *testing.T, fortioSvc *topology.Service, upstream *topology.Upstream, clusterName string, sid topology.ServiceID) {
t.Helper()
var (
node = fortioSvc.Node
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), fortioSvc.PortOrDefault("http"))
client = a.mustGetHTTPClient(t, node.Cluster)
)
var fortioNameRE = regexp.MustCompile(("\nFORTIO_NAME=(.+)\n"))
path := "/debug?env=dump"
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
body, res := a.fortioFetch2Upstream(t, fortioSvc, upstream, path)
require.Equal(t, http.StatusOK, res.StatusCode)
body, res := a.fortioFetch2Upstream(r, client, addr, upstream, path)
require.Equal(r, http.StatusOK, res.StatusCode)
// TODO: not sure we should retry these?
m := fortioNameRE.FindStringSubmatch(string(body))
@ -289,7 +307,7 @@ func (a *asserter) FortioFetch2FortioName(t *testing.T, fortioSvc *topology.Serv
// CatalogServiceExists is the same as libassert.CatalogServiceExists, except that it uses
// a proxied API client
func (a *asserter) CatalogServiceExists(t *testing.T, cluster string, svc string, opts *api.QueryOptions) {
func (a *Asserter) CatalogServiceExists(t *testing.T, cluster string, svc string, opts *api.QueryOptions) {
t.Helper()
cl := a.mustGetAPIClient(t, cluster)
libassert.CatalogServiceExists(t, cl, svc, opts)

View File

@ -0,0 +1,120 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package topoutil
import (
"strconv"
"github.com/hashicorp/consul/testing/deployer/topology"
)
const HashicorpDockerProxy = "docker.mirror.hashicorp.services"
func NewFortioServiceWithDefaults(
cluster string,
sid topology.ServiceID,
nodeVersion topology.NodeVersion,
mut func(s *topology.Service),
) *topology.Service {
const (
httpPort = 8080
grpcPort = 8079
adminPort = 19000
)
sid.Normalize()
svc := &topology.Service{
ID: sid,
Image: HashicorpDockerProxy + "/fortio/fortio",
EnvoyAdminPort: adminPort,
CheckTCP: "127.0.0.1:" + strconv.Itoa(httpPort),
Env: []string{
"FORTIO_NAME=" + cluster + "::" + sid.String(),
},
Command: []string{
"server",
"-http-port", strconv.Itoa(httpPort),
"-grpc-port", strconv.Itoa(grpcPort),
"-redirect-port", "-disabled",
},
}
if nodeVersion == topology.NodeVersionV2 {
svc.Ports = map[string]int{
"http": httpPort,
"http-alt": httpPort,
"grpc": grpcPort,
}
} else {
svc.Port = httpPort
}
if mut != nil {
mut(svc)
}
return svc
}
func NewTopologyServerSet(
namePrefix string,
num int,
networks []string,
mutateFn func(i int, node *topology.Node),
) []*topology.Node {
var out []*topology.Node
for i := 1; i <= num; i++ {
name := namePrefix + strconv.Itoa(i)
node := &topology.Node{
Kind: topology.NodeKindServer,
Name: name,
}
for _, net := range networks {
node.Addresses = append(node.Addresses, &topology.Address{Network: net})
}
if mutateFn != nil {
mutateFn(i, node)
}
out = append(out, node)
}
return out
}
func NewTopologyMeshGatewaySet(
nodeKind topology.NodeKind,
partition string,
namePrefix string,
num int,
networks []string,
mutateFn func(i int, node *topology.Node),
) []*topology.Node {
var out []*topology.Node
for i := 1; i <= num; i++ {
name := namePrefix + strconv.Itoa(i)
node := &topology.Node{
Kind: nodeKind,
Partition: partition,
Name: name,
Services: []*topology.Service{{
ID: topology.ServiceID{Name: "mesh-gateway"},
Port: 8443,
EnvoyAdminPort: 19000,
IsMeshGateway: true,
}},
}
for _, net := range networks {
node.Addresses = append(node.Addresses, &topology.Address{Network: net})
}
if mutateFn != nil {
mutateFn(i, node)
}
out = append(out, node)
}
return out
}

View File

@ -0,0 +1,36 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package topoutil
import (
"os"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/testing/deployer/topology"
)
func TargetImages() topology.Images {
// Start with no preferences.
var images topology.Images
if !runningInCI() {
// Until 1.17 GAs, we want the pre-release versions for these tests,
// run outside of CI for convenience.
images = topology.Images{
ConsulCE: HashicorpDockerProxy + "/hashicorppreview/consul:1.17-dev",
ConsulEnterprise: HashicorpDockerProxy + "/hashicorppreview/consul-enterprise:1.17-dev",
Dataplane: HashicorpDockerProxy + "/hashicorppreview/consul-dataplane:1.3-dev",
}
}
// We want the image overridden by the local build produced by
// 'make test-compat-integ-setup' or 'make dev-docker'.
testImages := utils.TargetImages()
images = images.OverrideWith(testImages)
return images
}
func runningInCI() bool {
return os.Getenv("GITHUB_ACTIONS") != "" || os.Getenv("CI") != ""
}

View File

@ -31,7 +31,6 @@ require (
github.com/testcontainers/testcontainers-go v0.22.0
golang.org/x/mod v0.12.0
google.golang.org/grpc v1.57.2
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5
)
require (
@ -105,6 +104,7 @@ require (
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
github.com/hashicorp/consul-awsauth v0.0.0-20220713182709-05ac1c5c2706 // indirect
github.com/hashicorp/consul-net-rpc v0.0.0-20221205195236-156cfab66a69 // indirect
github.com/hashicorp/consul-server-connection-manager v0.1.4 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-bexpr v0.1.2 // indirect
github.com/hashicorp/go-connlimit v0.3.0 // indirect
@ -113,6 +113,7 @@ require (
github.com/hashicorp/go-memdb v1.3.4 // indirect
github.com/hashicorp/go-msgpack v1.1.5 // indirect
github.com/hashicorp/go-msgpack/v2 v2.0.0 // indirect
github.com/hashicorp/go-netaddrs v0.1.0 // indirect
github.com/hashicorp/go-plugin v1.4.5 // indirect
github.com/hashicorp/go-raftchunking v0.7.0 // indirect
github.com/hashicorp/go-retryablehttp v0.6.7 // indirect
@ -225,6 +226,7 @@ require (
k8s.io/client-go v0.26.2 // indirect
k8s.io/klog/v2 v2.90.1 // indirect
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect

View File

@ -391,6 +391,8 @@ github.com/hashicorp/consul-awsauth v0.0.0-20220713182709-05ac1c5c2706 h1:1ZEjnv
github.com/hashicorp/consul-awsauth v0.0.0-20220713182709-05ac1c5c2706/go.mod h1:1Cs8FlmD1BfSQXJGcFLSV5FuIx1AbJP+EJGdxosoS2g=
github.com/hashicorp/consul-net-rpc v0.0.0-20221205195236-156cfab66a69 h1:wzWurXrxfSyG1PHskIZlfuXlTSCj1Tsyatp9DtaasuY=
github.com/hashicorp/consul-net-rpc v0.0.0-20221205195236-156cfab66a69/go.mod h1:svUZZDvotY8zTODknUePc6mZ9pX8nN0ViGwWcUSOBEA=
github.com/hashicorp/consul-server-connection-manager v0.1.4 h1:wrcSRV6WGXFBNpNbN6XsdoGgBOyso7ZbN5VaWPEX1jY=
github.com/hashicorp/consul-server-connection-manager v0.1.4/go.mod h1:LMqHkALoLP0HUQKOG21xXYr0YPUayIQIHNTlmxG100E=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -425,6 +427,8 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-netaddrs v0.1.0 h1:TnlYvODD4C/wO+j7cX1z69kV5gOzI87u3OcUinANaW8=
github.com/hashicorp/go-netaddrs v0.1.0/go.mod h1:33+a/emi5R5dqRspOuZKO0E+Tuz5WV1F84eRWALkedA=
github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ=
github.com/hashicorp/go-plugin v1.4.5 h1:oTE/oQR4eghggRg8VY7PAz3dr++VwDNBGCcOfIvHpBo=
github.com/hashicorp/go-plugin v1.4.5/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s=

View File

@ -75,6 +75,7 @@ func GetEnvoyListenerTCPFiltersWithClient(
// AssertUpstreamEndpointStatus validates that proxy was configured with provided clusterName in the healthStatus
func AssertUpstreamEndpointStatus(t *testing.T, adminPort int, clusterName, healthStatus string, count int) {
t.Helper()
require.True(t, adminPort > 0)
AssertUpstreamEndpointStatusWithClient(
t,
@ -94,6 +95,7 @@ func AssertUpstreamEndpointStatusWithClient(
healthStatus string,
count int,
) {
t.Helper()
require.NotNil(t, client)
require.NotEmpty(t, addr)
failer := func() *retry.Timer {
@ -126,6 +128,7 @@ func AssertUpstreamEndpointStatusWithClient(
// AssertEnvoyMetricAtMost assert the filered metric by prefix and metric is >= count
func AssertEnvoyMetricAtMost(t *testing.T, adminPort int, prefix, metric string, count int) {
t.Helper()
var (
stats string
err error

View File

@ -12,12 +12,16 @@ import (
"testing"
"time"
"github.com/hashicorp/consul/api"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/testing/deployer/util"
"github.com/hashicorp/go-cleanhttp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
)
@ -26,6 +30,65 @@ const (
defaultHTTPWait = defaultWait
)
// CatalogV2ServiceExists verifies the service name exists in the Consul catalog
func CatalogV2ServiceExists(t *testing.T, client pbresource.ResourceServiceClient, svc string, tenancy *pbresource.Tenancy) {
t.Helper()
CatalogV2ServiceHasEndpointCount(t, client, svc, tenancy, -1)
}
// CatalogV2ServiceDoesNotExist verifies the service name does not exist in the Consul catalog
func CatalogV2ServiceDoesNotExist(t *testing.T, client pbresource.ResourceServiceClient, svc string, tenancy *pbresource.Tenancy) {
t.Helper()
ctx := testutil.TestContext(t)
retry.Run(t, func(r *retry.R) {
got, err := util.GetDecodedResource[*pbcatalog.Service](ctx, client, &pbresource.ID{
Type: pbcatalog.ServiceType,
Name: svc,
Tenancy: tenancy,
})
require.NoError(r, err, "error reading service data")
require.Nil(r, got, "unexpectedly found Service resource for %q", svc)
got2, err := util.GetDecodedResource[*pbcatalog.ServiceEndpoints](ctx, client, &pbresource.ID{
Type: pbcatalog.ServiceEndpointsType,
Name: svc,
Tenancy: tenancy,
})
require.NotNil(r, err, "error reading service data")
require.Nil(r, got2, "unexpectedly found ServiceEndpoints resource for %q", svc)
})
}
// CatalogV2ServiceHasEndpointCount verifies the service name exists in the Consul catalog and has the specified
// number of workload endpoints.
func CatalogV2ServiceHasEndpointCount(t *testing.T, client pbresource.ResourceServiceClient, svc string, tenancy *pbresource.Tenancy, count int) {
t.Helper()
require.False(t, count == 0)
ctx := testutil.TestContext(t)
retry.Run(t, func(r *retry.R) {
got, err := util.GetDecodedResource[*pbcatalog.Service](ctx, client, &pbresource.ID{
Type: pbcatalog.ServiceType,
Name: svc,
Tenancy: tenancy,
})
require.NoError(r, err, "error reading service data")
require.NotNil(r, got, "did not find Service resource for %q", svc)
got2, err := util.GetDecodedResource[*pbcatalog.ServiceEndpoints](ctx, client, &pbresource.ID{
Type: pbcatalog.ServiceEndpointsType,
Name: svc,
Tenancy: tenancy,
})
require.NoError(r, err, "error reading service data")
require.NotNil(r, got2, "did not find ServiceEndpoints resource for %q", svc)
require.NotEmpty(r, got2.Data.Endpoints, "did not find any workload data in the ServiceEndpoints resource for %q", svc)
if count > 0 {
require.Len(r, got2.Data.Endpoints, count)
}
})
}
// CatalogServiceExists verifies the service name exists in the Consul catalog
func CatalogServiceExists(t *testing.T, c *api.Client, svc string, opts *api.QueryOptions) {
retry.Run(t, func(r *retry.R) {
@ -39,6 +102,15 @@ func CatalogServiceExists(t *testing.T, c *api.Client, svc string, opts *api.Que
})
}
// CatalogServiceDoesNotExist verifies the service name does not exist in the Consul catalog
func CatalogServiceDoesNotExist(t *testing.T, c *api.Client, svc string, opts *api.QueryOptions) {
retry.Run(t, func(r *retry.R) {
services, _, err := c.Catalog().Service(svc, "", opts)
require.NoError(r, err, "error reading service data")
require.Empty(r, services)
})
}
// CatalogServiceHasInstanceCount verifies the service name exists in the Consul catalog and has the specified
// number of instances.
func CatalogServiceHasInstanceCount(t *testing.T, c *api.Client, svc string, count int, opts *api.QueryOptions) {
@ -66,6 +138,17 @@ func CatalogNodeExists(t *testing.T, c *api.Client, nodeName string) {
})
}
// CatalogNodeDoesNotExist verifies the node name does not exist in the Consul catalog
func CatalogNodeDoesNotExist(t *testing.T, c *api.Client, nodeName string) {
retry.Run(t, func(r *retry.R) {
node, _, err := c.Catalog().Node(nodeName, nil)
if err != nil {
r.Fatal("error reading node data")
}
require.Nil(r, node)
})
}
// CatalogServiceIsHealthy verifies the service name exists and all instances pass healthchecks
func CatalogServiceIsHealthy(t *testing.T, c *api.Client, svc string, opts *api.QueryOptions) {
CatalogServiceExists(t, c, svc, opts)
@ -105,6 +188,7 @@ func HTTPServiceEchoesWithClient(t *testing.T, client *http.Client, addr string,
func HTTPServiceEchoesResHeader(t *testing.T, ip string, port int, path string, expectedResHeader map[string]string) {
doHTTPServiceEchoes(t, ip, port, path, nil, expectedResHeader)
}
func HTTPServiceEchoesResHeaderWithClient(t *testing.T, client *http.Client, addr string, path string, expectedResHeader map[string]string) {
doHTTPServiceEchoesWithClient(t, client, addr, path, nil, expectedResHeader)
}
@ -142,7 +226,7 @@ func doHTTPServiceEchoesWithClient(
reader := strings.NewReader(phrase)
req, err := http.NewRequest("POST", url, reader)
require.NoError(t, err, "could not construct request")
require.NoError(r, err, "could not construct request")
for k, v := range requestHeaders {
req.Header.Add(k, v)

View File

@ -5,6 +5,7 @@ package utils
import (
"flag"
"os"
"strings"
"github.com/hashicorp/consul/testing/deployer/topology"
@ -59,15 +60,18 @@ func GetLatestImageName() string {
func TargetImages() topology.Images {
img := DockerImage(targetImageName, TargetVersion)
var set topology.Images
if IsEnterprise() {
return topology.Images{
ConsulEnterprise: img,
}
set.ConsulEnterprise = img
} else {
return topology.Images{
ConsulCE: img,
}
set.ConsulCE = img
}
if cdp := os.Getenv("DEPLOYER_CONSUL_DATAPLANE_IMAGE"); cdp != "" {
set.Dataplane = cdp
}
return set
}
func IsEnterprise() bool { return isInEnterpriseRepo }

View File

@ -4,45 +4,64 @@ go 1.20
require (
github.com/google/go-cmp v0.5.9
github.com/hashicorp/consul-server-connection-manager v0.1.4
github.com/hashicorp/consul/api v1.24.0
github.com/hashicorp/consul/proto-public v0.1.0
github.com/hashicorp/consul/sdk v0.14.1
github.com/hashicorp/go-cleanhttp v0.5.2
github.com/hashicorp/go-hclog v1.5.0
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/go-rootcerts v1.0.2
github.com/hashicorp/hcl/v2 v2.16.2
github.com/mitchellh/copystructure v1.2.0
github.com/rboyer/safeio v0.2.2
github.com/stretchr/testify v1.8.3
golang.org/x/crypto v0.14.0
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
google.golang.org/grpc v1.56.3
google.golang.org/protobuf v1.31.0
)
require (
github.com/agext/levenshtein v1.2.1 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/fatih/color v1.14.1 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-netaddrs v0.1.0 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hashicorp/go-version v1.2.1 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/serf v0.10.1 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.11.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.26.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/zclconf/go-cty v1.12.1 // indirect
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
replace github.com/hashicorp/consul/sdk => ../../sdk
replace (
github.com/hashicorp/consul/api => ../../api
github.com/hashicorp/consul/proto-public => ../../proto-public
github.com/hashicorp/consul/sdk => ../../sdk
)

View File

@ -1,3 +1,4 @@
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
@ -5,6 +6,7 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@ -15,9 +17,14 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -31,23 +38,38 @@ github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/hashicorp/consul/api v1.24.0 h1:u2XyStA2j0jnCiVUU7Qyrt8idjRn4ORhK6DlvZ3bWhA=
github.com/hashicorp/consul/api v1.24.0/go.mod h1:NZJGRFYruc/80wYowkPFCp1LbGmJC9L8izrwfyVx/Wg=
github.com/hashicorp/consul-server-connection-manager v0.1.4 h1:wrcSRV6WGXFBNpNbN6XsdoGgBOyso7ZbN5VaWPEX1jY=
github.com/hashicorp/consul-server-connection-manager v0.1.4/go.mod h1:LMqHkALoLP0HUQKOG21xXYr0YPUayIQIHNTlmxG100E=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -65,6 +87,8 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-netaddrs v0.1.0 h1:TnlYvODD4C/wO+j7cX1z69kV5gOzI87u3OcUinANaW8=
github.com/hashicorp/go-netaddrs v0.1.0/go.mod h1:33+a/emi5R5dqRspOuZKO0E+Tuz5WV1F84eRWALkedA=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
@ -88,10 +112,15 @@ github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
@ -114,6 +143,7 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY=
@ -135,6 +165,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
@ -150,14 +181,24 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s=
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/rboyer/safeio v0.2.2 h1:XhtqyUTRleMYGyBt3ni4j2BtEh669U2ry2INnnd+B4k=
github.com/rboyer/safeio v0.2.2/go.mod h1:pSnr2LFXyn/c/fotxotyOdYy7pP/XSh6MpBmzXPjiNc=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
@ -167,6 +208,7 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
@ -182,39 +224,52 @@ github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeW
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -233,6 +288,21 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc=
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -241,5 +311,6 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -93,6 +93,13 @@ func tokenForService(svc *topology.Service, overridePolicy *api.ACLPolicy, enter
}
if overridePolicy != nil {
token.Policies = []*api.ACLTokenPolicyLink{{ID: overridePolicy.ID}}
} else if svc.IsV2() {
token.TemplatedPolicies = []*api.ACLTemplatedPolicy{{
TemplateName: api.ACLTemplatedPolicyWorkloadIdentityName,
TemplateVariables: &api.ACLTemplatedPolicyVariables{
Name: svc.ID.Name,
},
}}
} else {
token.ServiceIdentities = []*api.ACLServiceIdentity{{
ServiceName: svc.ID.Name,

View File

@ -32,9 +32,11 @@ const (
func (s *Sprawl) launch() error {
return s.launchType(true)
}
func (s *Sprawl) relaunch() error {
return s.launchType(false)
}
func (s *Sprawl) launchType(firstTime bool) (launchErr error) {
if err := build.DockerImages(s.logger, s.runner, s.topology); err != nil {
return fmt.Errorf("build.DockerImages: %w", err)
@ -235,6 +237,14 @@ func (s *Sprawl) initConsulServers() error {
return fmt.Errorf("error creating final client for cluster=%s: %v", cluster.Name, err)
}
// Connect to gRPC as well.
if cluster.EnableV2 {
s.grpcConns[cluster.Name], s.grpcConnCancel[cluster.Name], err = s.dialServerGRPC(cluster, node, mgmtToken)
if err != nil {
return fmt.Errorf("error creating gRPC client conn for cluster=%s: %w", cluster.Name, err)
}
}
// For some reason the grpc resolver stuff for partitions takes some
// time to get ready.
s.waitForLocalWrites(cluster, mgmtToken)
@ -250,6 +260,10 @@ func (s *Sprawl) initConsulServers() error {
return fmt.Errorf("populateInitialConfigEntries[%s]: %w", cluster.Name, err)
}
if err := s.populateInitialResources(cluster); err != nil {
return fmt.Errorf("populateInitialResources[%s]: %w", cluster.Name, err)
}
if err := s.createAnonymousToken(cluster); err != nil {
return fmt.Errorf("createAnonymousToken[%s]: %w", cluster.Name, err)
}
@ -457,6 +471,9 @@ func (s *Sprawl) waitForLocalWrites(cluster *topology.Cluster, token string) {
}
func (s *Sprawl) waitForClientAntiEntropyOnce(cluster *topology.Cluster) error {
if cluster.EnableV2 {
return nil // v1 catalog is disabled when v2 catalog is enabled
}
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)

View File

@ -4,11 +4,18 @@
package sprawl
import (
"context"
"fmt"
"net/http"
"time"
"github.com/hashicorp/consul/api"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/hashicorp/consul/testing/deployer/util"
@ -41,6 +48,9 @@ func (s *Sprawl) registerServicesToAgents(cluster *topology.Cluster) error {
if !node.IsAgent() {
continue
}
if node.IsV2() {
panic("don't call this")
}
agentClient, err := util.ProxyAPIClient(
node.LocalProxyPort(),
@ -71,6 +81,9 @@ func (s *Sprawl) registerAgentService(
if !node.IsAgent() {
panic("called wrong method type")
}
if node.IsV2() {
panic("don't call this")
}
if svc.IsMeshGateway {
return nil // handled at startup time for agent-full, but won't be for agent-less
@ -164,6 +177,8 @@ RETRY:
}
func (s *Sprawl) registerServicesForDataplaneInstances(cluster *topology.Cluster) error {
identityInfo := make(map[topology.ServiceID]*Resource[*pbauth.WorkloadIdentity])
for _, node := range cluster.Nodes {
if !node.RunsWorkloads() || len(node.Services) == 0 || node.Disabled {
continue
@ -178,13 +193,87 @@ func (s *Sprawl) registerServicesForDataplaneInstances(cluster *topology.Cluster
}
for _, svc := range node.Services {
if err := s.registerCatalogService(cluster, node, svc); err != nil {
return fmt.Errorf("error registering service: %w", err)
}
if !svc.DisableServiceMesh {
if err := s.registerCatalogSidecarService(cluster, node, svc); err != nil {
return fmt.Errorf("error registering sidecar service: %w", err)
if node.IsV2() {
pending := serviceInstanceToResources(node, svc)
if _, ok := identityInfo[svc.ID]; !ok {
identityInfo[svc.ID] = pending.WorkloadIdentity
}
// Write workload
res, err := pending.Workload.Build()
if err != nil {
return fmt.Errorf("error serializing resource %s: %w", util.IDToString(pending.Workload.Resource.Id), err)
}
workload, err := s.writeResource(cluster, res)
if err != nil {
return err
}
// Write check linked to workload
for _, check := range pending.HealthStatuses {
check.Resource.Owner = workload.Id
res, err := check.Build()
if err != nil {
return fmt.Errorf("error serializing resource %s: %w", util.IDToString(check.Resource.Id), err)
}
if _, err := s.writeResource(cluster, res); err != nil {
return err
}
}
// maybe write destinations
if pending.Destinations != nil {
res, err := pending.Destinations.Build()
if err != nil {
return fmt.Errorf("error serializing resource %s: %w", util.IDToString(pending.Destinations.Resource.Id), err)
}
if _, err := s.writeResource(cluster, res); err != nil {
return err
}
}
} else {
if err := s.registerCatalogServiceV1(cluster, node, svc); err != nil {
return fmt.Errorf("error registering service: %w", err)
}
if !svc.DisableServiceMesh {
if err := s.registerCatalogSidecarServiceV1(cluster, node, svc); err != nil {
return fmt.Errorf("error registering sidecar service: %w", err)
}
}
}
}
}
for _, identity := range identityInfo {
res, err := identity.Build()
if err != nil {
return fmt.Errorf("error serializing resource %s: %w", util.IDToString(identity.Resource.Id), err)
}
if _, err := s.writeResource(cluster, res); err != nil {
return err
}
}
if cluster.EnableV2 {
for id, svcData := range cluster.Services {
svcInfo := &Resource[*pbcatalog.Service]{
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Type: pbcatalog.ServiceType,
Name: id.Name,
Tenancy: &pbresource.Tenancy{
Partition: id.Partition,
Namespace: id.Namespace,
},
},
},
Data: svcData,
}
res, err := svcInfo.Build()
if err != nil {
return fmt.Errorf("error serializing resource %s: %w", util.IDToString(svcInfo.Resource.Id), err)
}
if _, err := s.writeResource(cluster, res); err != nil {
return err
}
}
}
@ -195,6 +284,77 @@ func (s *Sprawl) registerServicesForDataplaneInstances(cluster *topology.Cluster
func (s *Sprawl) registerCatalogNode(
cluster *topology.Cluster,
node *topology.Node,
) error {
if node.IsV2() {
return s.registerCatalogNodeV2(cluster, node)
}
return s.registerCatalogNodeV1(cluster, node)
}
func (s *Sprawl) registerCatalogNodeV2(
cluster *topology.Cluster,
node *topology.Node,
) error {
if !node.IsDataplane() {
panic("called wrong method type")
}
nodeRes := &Resource[*pbcatalog.Node]{
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Type: pbcatalog.NodeType,
Name: node.PodName(),
Tenancy: &pbresource.Tenancy{
Partition: node.Partition,
Namespace: "default", // temporary requirement
},
},
Metadata: map[string]string{
"dataplane-faux": "1",
},
},
Data: &pbcatalog.Node{
Addresses: []*pbcatalog.NodeAddress{
{Host: node.LocalAddress()},
},
},
}
res, err := nodeRes.Build()
if err != nil {
return err
}
_, err = s.writeResource(cluster, res)
return err
}
func (s *Sprawl) writeResource(cluster *topology.Cluster, res *pbresource.Resource) (*pbresource.Resource, error) {
var (
client = s.getResourceClient(cluster.Name)
logger = s.logger.With("cluster", cluster.Name)
)
ctx := s.getManagementTokenContext(context.Background(), cluster.Name)
RETRY:
wrote, err := client.Write(ctx, &pbresource.WriteRequest{
Resource: res,
})
if err != nil {
if isACLNotFound(err) { // TODO: is this right for v2?
time.Sleep(50 * time.Millisecond)
goto RETRY
}
return nil, fmt.Errorf("error creating resource %s: %w", util.IDToString(res.Id), err)
}
logger.Info("resource upserted", "id", util.IDToString(res.Id))
return wrote.Resource, nil
}
func (s *Sprawl) registerCatalogNodeV1(
cluster *topology.Cluster,
node *topology.Node,
) error {
if !node.IsDataplane() {
panic("called wrong method type")
@ -233,7 +393,7 @@ RETRY:
return nil
}
func (s *Sprawl) registerCatalogService(
func (s *Sprawl) registerCatalogServiceV1(
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
@ -241,6 +401,9 @@ func (s *Sprawl) registerCatalogService(
if !node.IsDataplane() {
panic("called wrong method type")
}
if node.IsV2() {
panic("don't call this")
}
var (
client = s.clients[cluster.Name]
@ -266,7 +429,7 @@ RETRY:
return nil
}
func (s *Sprawl) registerCatalogSidecarService(
func (s *Sprawl) registerCatalogSidecarServiceV1(
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
@ -277,6 +440,9 @@ func (s *Sprawl) registerCatalogSidecarService(
if svc.DisableServiceMesh {
panic("not valid")
}
if node.IsV2() {
panic("don't call this")
}
var (
client = s.clients[cluster.Name]
@ -301,11 +467,172 @@ RETRY:
return nil
}
type Resource[V proto.Message] struct {
Resource *pbresource.Resource
Data V
}
func (r *Resource[V]) Build() (*pbresource.Resource, error) {
anyData, err := anypb.New(r.Data)
if err != nil {
return nil, err
}
r.Resource.Data = anyData
return r.Resource, nil
}
type ServiceResources struct {
Workload *Resource[*pbcatalog.Workload]
HealthStatuses []*Resource[*pbcatalog.HealthStatus]
Destinations *Resource[*pbmesh.Destinations]
WorkloadIdentity *Resource[*pbauth.WorkloadIdentity]
}
func serviceInstanceToResources(
node *topology.Node,
svc *topology.Service,
) *ServiceResources {
if svc.IsMeshGateway {
panic("v2 does not yet support mesh gateways")
}
tenancy := &pbresource.Tenancy{
Partition: svc.ID.Partition,
Namespace: svc.ID.Namespace,
}
var (
wlPorts = map[string]*pbcatalog.WorkloadPort{}
)
for name, port := range svc.Ports {
wlPorts[name] = &pbcatalog.WorkloadPort{
Port: uint32(port),
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
}
}
var (
selector = &pbcatalog.WorkloadSelector{
Names: []string{svc.Workload},
}
workloadRes = &Resource[*pbcatalog.Workload]{
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Type: pbcatalog.WorkloadType,
Name: svc.Workload,
Tenancy: tenancy,
},
Metadata: svc.Meta,
},
Data: &pbcatalog.Workload{
NodeName: node.PodName(),
Identity: svc.ID.Name,
Ports: wlPorts,
Addresses: []*pbcatalog.WorkloadAddress{
{Host: node.LocalAddress()},
},
},
}
worloadIdentityRes = &Resource[*pbauth.WorkloadIdentity]{
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Type: pbauth.WorkloadIdentityType,
Name: svc.ID.Name,
Tenancy: tenancy,
},
Metadata: svc.Meta,
},
Data: &pbauth.WorkloadIdentity{},
}
healthResList []*Resource[*pbcatalog.HealthStatus]
destinationsRes *Resource[*pbmesh.Destinations]
)
if svc.HasCheck() {
// TODO: needs ownerId
checkRes := &Resource[*pbcatalog.HealthStatus]{
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Type: pbcatalog.HealthStatusType,
Name: svc.Workload + "-check-0",
Tenancy: tenancy,
},
},
Data: &pbcatalog.HealthStatus{
Type: "external-sync",
Status: pbcatalog.Health_HEALTH_PASSING,
},
}
healthResList = []*Resource[*pbcatalog.HealthStatus]{checkRes}
}
if node.HasPublicAddress() {
workloadRes.Data.Addresses = append(workloadRes.Data.Addresses,
&pbcatalog.WorkloadAddress{Host: node.PublicAddress(), External: true},
)
}
if !svc.DisableServiceMesh {
workloadRes.Data.Ports["mesh"] = &pbcatalog.WorkloadPort{
Port: uint32(svc.EnvoyPublicListenerPort),
Protocol: pbcatalog.Protocol_PROTOCOL_MESH,
}
destinationsRes = &Resource[*pbmesh.Destinations]{
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Type: pbmesh.DestinationsType,
Name: svc.Workload,
Tenancy: tenancy,
},
},
Data: &pbmesh.Destinations{
Workloads: selector,
},
}
for _, u := range svc.Upstreams {
dest := &pbmesh.Destination{
DestinationRef: &pbresource.Reference{
Type: pbcatalog.ServiceType,
Name: u.ID.Name,
Tenancy: &pbresource.Tenancy{
Partition: u.ID.Partition,
Namespace: u.ID.Namespace,
},
},
DestinationPort: u.PortName,
ListenAddr: &pbmesh.Destination_IpPort{
IpPort: &pbmesh.IPPortAddress{
Ip: u.LocalAddress,
Port: uint32(u.LocalPort),
},
},
}
destinationsRes.Data.Destinations = append(destinationsRes.Data.Destinations, dest)
}
}
return &ServiceResources{
Workload: workloadRes,
HealthStatuses: healthResList,
Destinations: destinationsRes,
WorkloadIdentity: worloadIdentityRes,
}
}
func serviceToCatalogRegistration(
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
) *api.CatalogRegistration {
if node.IsV2() {
panic("don't call this")
}
reg := &api.CatalogRegistration{
Node: node.PodName(),
SkipNodeUpdate: true,
@ -394,6 +721,9 @@ func serviceToSidecarCatalogRegistration(
node *topology.Node,
svc *topology.Service,
) (topology.ServiceID, *api.CatalogRegistration) {
if node.IsV2() {
panic("don't call this")
}
pid := svc.ID
pid.Name += "-sidecar-proxy"
reg := &api.CatalogRegistration{

View File

@ -4,17 +4,29 @@
package sprawl
import (
"context"
"errors"
"fmt"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/hashicorp/consul/testing/deployer/util"
)
func (s *Sprawl) getResourceClient(clusterName string) pbresource.ResourceServiceClient {
return pbresource.NewResourceServiceClient(s.grpcConns[clusterName])
}
func (s *Sprawl) getManagementTokenContext(ctx context.Context, clusterName string) context.Context {
mgmtToken := s.secrets.ReadGeneric(clusterName, secrets.BootstrapToken)
//nolint:staticcheck
return context.WithValue(ctx, "x-consul-token", mgmtToken)
}
func getLeader(client *api.Client) (string, error) {
leaderAdd, err := client.Status().Leader()
if err != nil {

View File

@ -70,10 +70,15 @@ func (s *Sprawl) PrintDetails() error {
Service: svc.ID.String(),
})
} else {
ports := make(map[string]int)
for name, port := range svc.Ports {
ports[name] = node.ExposedPort(port)
}
cd.Apps = append(cd.Apps, appDetail{
Type: "app",
Container: node.DockerName(),
ExposedPort: node.ExposedPort(svc.Port),
ExposedPorts: ports,
ExposedEnvoyAdminPort: node.ExposedPort(svc.EnvoyAdminPort),
Addresses: addrs,
Service: svc.ID.String(),
@ -115,19 +120,23 @@ func (s *Sprawl) PrintDetails() error {
return false
}
if a.Service < b.Service {
return true
} else if a.Service > b.Service {
return false
}
return a.ExposedPort < b.ExposedPort
return a.Service < b.Service
})
for _, d := range cluster.Apps {
if d.Type == "server" && d.Container == cluster.Leader {
d.Type = "leader"
}
portStr := "app=" + strconv.Itoa(d.ExposedPort)
var portStr string
if len(d.ExposedPorts) > 0 {
var out []string
for name, exposed := range d.ExposedPorts {
out = append(out, fmt.Sprintf("app:%s=%d", name, exposed))
}
sort.Strings(out)
portStr = strings.Join(out, " ")
} else {
portStr = "app=" + strconv.Itoa(d.ExposedPort)
}
if d.ExposedEnvoyAdminPort > 0 {
portStr += " envoy=" + strconv.Itoa(d.ExposedEnvoyAdminPort)
}
@ -166,8 +175,9 @@ type appDetail struct {
Type string // server|mesh-gateway|app
Container string
Addresses []string
ExposedPort int `json:",omitempty"`
ExposedEnvoyAdminPort int `json:",omitempty"`
ExposedPort int `json:",omitempty"`
ExposedPorts map[string]int `json:",omitempty"`
ExposedEnvoyAdminPort int `json:",omitempty"`
// just services
Service string `json:",omitempty"`
}

View File

@ -0,0 +1,42 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sprawl
import (
"context"
"crypto/tls"
"fmt"
"github.com/hashicorp/go-rootcerts"
"google.golang.org/grpc"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/hashicorp/consul/testing/deployer/util"
)
func (s *Sprawl) dialServerGRPC(cluster *topology.Cluster, node *topology.Node, token string) (*grpc.ClientConn, func(), error) {
var (
logger = s.logger.With("cluster", cluster.Name)
)
tls := &tls.Config{
ServerName: fmt.Sprintf("server.%s.consul", cluster.Datacenter),
}
rootConfig := &rootcerts.Config{
CACertificate: []byte(s.secrets.ReadGeneric(cluster.Name, secrets.CAPEM)),
}
if err := rootcerts.ConfigureTLS(tls, rootConfig); err != nil {
return nil, nil, err
}
return util.DialExposedGRPCConn(
context.Background(),
logger,
node.ExposedPort(8503),
token,
tls,
)
}

View File

@ -18,6 +18,7 @@ const (
GossipKey = "gossip"
BootstrapToken = "bootstrap-token"
AgentRecovery = "agent-recovery"
CAPEM = "ca-pem"
)
func (s *Store) SaveGeneric(cluster, name, value string) {

View File

@ -13,7 +13,7 @@ import (
"github.com/hashicorp/consul/testing/deployer/topology"
)
func (g *Generator) generateAgentHCL(node *topology.Node) string {
func (g *Generator) generateAgentHCL(node *topology.Node, enableV2 bool) string {
if !node.IsAgent() {
panic("generateAgentHCL only applies to agents")
}
@ -35,6 +35,10 @@ func (g *Generator) generateAgentHCL(node *topology.Node) string {
b.add("enable_debug", true)
b.add("use_streaming_backend", true)
if enableV2 {
b.addSlice("experiments", []string{"resource-apis"})
}
// speed up leaves
b.addBlock("performance", func() {
b.add("leave_drain_time", "50ms")

View File

@ -12,7 +12,6 @@ import (
func TestDockerImageResourceName(t *testing.T) {
fn := DockerImageResourceName
assert.Equal(t, "", fn(""))
assert.Equal(t, "abcdefghijklmnopqrstuvwxyz0123456789-", fn("abcdefghijklmnopqrstuvwxyz0123456789-"))
assert.Equal(t, "hashicorp-consul-1-15-0", fn("hashicorp/consul:1.15.0"))
}

View File

@ -67,7 +67,7 @@ func (g *Generator) generateNodeContainers(
}{
terraformPod: pod,
ImageResource: DockerImageResourceName(node.Images.Consul),
HCL: g.generateAgentHCL(node),
HCL: g.generateAgentHCL(node, cluster.EnableV2 && node.IsServer()),
EnterpriseLicense: g.license,
}))
}

View File

@ -1,7 +1,7 @@
resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar" {
name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.ImageResource}}.latest
image = docker_image.{{.ImageResource}}.image_id
restart = "on-failure"
{{- range $k, $v := .Labels }}
@ -18,24 +18,34 @@ resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidec
}
env = [
"DP_CONSUL_ADDRESSES=server.{{.Node.Cluster}}-consulcluster.lan",
"DP_SERVICE_NODE_NAME={{.Node.PodName}}",
"DP_PROXY_SERVICE_ID={{.Service.ID.Name}}-sidecar-proxy",
{{ if .Enterprise }}
"DP_SERVICE_NAMESPACE={{.Service.ID.Namespace}}",
"DP_SERVICE_PARTITION={{.Service.ID.Partition}}",
{{ end }}
{{ if .Token }}
"DP_CREDENTIAL_TYPE=static",
"DP_CREDENTIAL_STATIC_TOKEN={{.Token}}",
{{ end }}
// for demo purposes
"DP_ENVOY_ADMIN_BIND_ADDRESS=0.0.0.0",
"DP_ENVOY_ADMIN_BIND_PORT=19000",
"DP_LOG_LEVEL=trace",
"DP_CA_CERTS=/consul/config/certs/consul-agent-ca.pem",
"DP_CONSUL_GRPC_PORT=8503",
"DP_TLS_SERVER_NAME=server.{{.Node.Datacenter}}.consul",
"DP_CONSUL_ADDRESSES=server.{{.Node.Cluster}}-consulcluster.lan",
{{ if .Node.IsV2 }}
"DP_PROXY_ID={{.Service.Workload}}",
{{ if .Enterprise }}
"DP_PROXY_NAMESPACE={{.Service.ID.Namespace}}",
"DP_PROXY_PARTITION={{.Service.ID.Partition}}",
{{ end }}
{{ else }}
"DP_SERVICE_NODE_NAME={{.Node.PodName}}",
"DP_PROXY_SERVICE_ID={{.Service.ID.Name}}-sidecar-proxy",
{{ if .Enterprise }}
"DP_SERVICE_NAMESPACE={{.Service.ID.Namespace}}",
"DP_SERVICE_PARTITION={{.Service.ID.Partition}}",
{{ end }}
{{ end }}
{{ if .Token }}
"DP_CREDENTIAL_TYPE=static",
"DP_CREDENTIAL_STATIC_TOKEN={{.Token}}",
{{ end }}
// for demo purposes
"DP_ENVOY_ADMIN_BIND_ADDRESS=0.0.0.0",
"DP_ENVOY_ADMIN_BIND_PORT=19000",
"DP_LOG_LEVEL=trace",
"DP_CA_CERTS=/consul/config/certs/consul-agent-ca.pem",
"DP_CONSUL_GRPC_PORT=8503",
"DP_TLS_SERVER_NAME=server.{{.Node.Datacenter}}.consul",
]
command = [

View File

@ -1,7 +1,7 @@
resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar" {
name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.ImageResource}}.latest
image = docker_image.{{.ImageResource}}.image_id
restart = "on-failure"
{{- range $k, $v := .Labels }}

View File

@ -1,7 +1,7 @@
resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}" {
name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.ImageResource}}.latest
image = docker_image.{{.ImageResource}}.image_id
restart = "on-failure"
{{- range $k, $v := .Labels }}

View File

@ -1,7 +1,7 @@
resource "docker_container" "{{.Node.DockerName}}" {
name = "{{.Node.DockerName}}"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.ImageResource}}.latest
image = docker_image.{{.ImageResource}}.image_id
restart = "always"
env = [

View File

@ -1,6 +1,6 @@
resource "docker_container" "{{.DockerNetworkName}}-coredns" {
name = "{{.DockerNetworkName}}-coredns"
image = docker_image.coredns.latest
image = docker_image.coredns.image_id
restart = "always"
dns = ["8.8.8.8"]

View File

@ -1,7 +1,7 @@
resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}" {
name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.ImageResource}}.latest
image = docker_image.{{.ImageResource}}.image_id
restart = "on-failure"
{{- range $k, $v := .Labels }}

View File

@ -1,7 +1,7 @@
resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}" {
name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.ImageResource}}.latest
image = docker_image.{{.ImageResource}}.image_id
restart = "on-failure"
{{- range $k, $v := .Labels }}

View File

@ -1,6 +1,6 @@
resource "docker_container" "{{.PodName}}" {
name = "{{.PodName}}"
image = docker_image.pause.latest
image = docker_image.pause.image_id
hostname = "{{.PodName}}"
restart = "always"
dns = ["{{.DNSAddress}}"]

View File

@ -1,6 +1,6 @@
resource "docker_container" "{{.DockerNetworkName}}-forwardproxy" {
name = "{{.DockerNetworkName}}-forwardproxy"
image = docker_image.nginx.latest
image = docker_image.nginx.image_id
restart = "always"
dns = ["8.8.8.8"]

View File

@ -0,0 +1,22 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package sprawl
import (
"github.com/hashicorp/consul/testing/deployer/topology"
)
func (s *Sprawl) populateInitialResources(cluster *topology.Cluster) error {
if len(cluster.InitialResources) == 0 {
return nil
}
for _, res := range cluster.InitialResources {
if _, err := s.writeResource(cluster, res); err != nil {
return err
}
}
return nil
}

View File

@ -16,9 +16,11 @@ import (
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror"
"github.com/mitchellh/copystructure"
"google.golang.org/grpc"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/runner"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets"
@ -43,7 +45,9 @@ type Sprawl struct {
topology *topology.Topology
generator *tfgen.Generator
clients map[string]*api.Client // one per cluster
clients map[string]*api.Client // one per cluster
grpcConns map[string]*grpc.ClientConn // one per cluster (when v2 enabled)
grpcConnCancel map[string]func() // one per cluster (when v2 enabled)
}
// Topology allows access to the topology that defines the resources. Do not
@ -60,6 +64,12 @@ func (s *Sprawl) Config() *topology.Config {
return c2
}
// ResourceServiceClientForCluster returns a shared common client that defaults
// to using the management token for this cluster.
func (s *Sprawl) ResourceServiceClientForCluster(clusterName string) pbresource.ResourceServiceClient {
return pbresource.NewResourceServiceClient(s.grpcConns[clusterName])
}
func (s *Sprawl) HTTPClientForCluster(clusterName string) (*http.Client, error) {
cluster, ok := s.topology.Clusters[clusterName]
if !ok {
@ -167,10 +177,12 @@ func Launch(
}
s := &Sprawl{
logger: logger,
runner: runner,
workdir: workdir,
clients: make(map[string]*api.Client),
logger: logger,
runner: runner,
workdir: workdir,
clients: make(map[string]*api.Client),
grpcConns: make(map[string]*grpc.ClientConn),
grpcConnCancel: make(map[string]func()),
}
if err := s.ensureLicense(); err != nil {

View File

@ -13,10 +13,13 @@ import (
"sync"
"testing"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"github.com/hashicorp/consul/testing/deployer/sprawl"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/runner"
@ -206,3 +209,10 @@ func SkipIfTerraformNotPresent(t *testing.T) {
t.Skipf("%q not found on $PATH - download and install to run this test", terraformBinaryName)
}
}
func MustSetResourceData(t *testing.T, res *pbresource.Resource, data proto.Message) *pbresource.Resource {
anyData, err := anypb.New(data)
require.NoError(t, err)
res.Data = anyData
return res
}

View File

@ -8,17 +8,160 @@ import (
"testing"
"github.com/hashicorp/consul/api"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
"github.com/hashicorp/consul/testing/deployer/topology"
)
func TestSprawl_CatalogV2(t *testing.T) {
serversDC1 := newTopologyServerSet("dc1-server", 3, []string{"dc1", "wan"}, nil)
cfg := &topology.Config{
Images: topology.Images{
ConsulCE: "hashicorppreview/consul:1.17-dev",
ConsulEnterprise: "hashicorppreview/consul-enterprise:1.17-dev",
Dataplane: "hashicorppreview/consul-dataplane:1.3-dev",
},
Networks: []*topology.Network{
{Name: "dc1"},
{Name: "wan", Type: "wan"},
},
Clusters: []*topology.Cluster{
{
Enterprise: true,
Name: "dc1",
Nodes: topology.MergeSlices(serversDC1, []*topology.Node{
{
Kind: topology.NodeKindDataplane,
Version: topology.NodeVersionV2,
Name: "dc1-client1",
Services: []*topology.Service{
{
ID: topology.ServiceID{Name: "ping"},
Image: "rboyer/pingpong:latest",
Port: 8080,
EnvoyAdminPort: 19000,
Command: []string{
"-bind", "0.0.0.0:8080",
"-dial", "127.0.0.1:9090",
"-pong-chaos",
"-dialfreq", "250ms",
"-name", "ping",
},
Upstreams: []*topology.Upstream{{
ID: topology.ServiceID{Name: "pong"},
LocalPort: 9090,
}},
},
},
},
{
Kind: topology.NodeKindDataplane,
Version: topology.NodeVersionV2,
Name: "dc1-client2",
Services: []*topology.Service{
{
ID: topology.ServiceID{Name: "pong"},
Image: "rboyer/pingpong:latest",
Port: 8080,
EnvoyAdminPort: 19000,
Command: []string{
"-bind", "0.0.0.0:8080",
"-dial", "127.0.0.1:9090",
"-pong-chaos",
"-dialfreq", "250ms",
"-name", "pong",
},
Upstreams: []*topology.Upstream{{
ID: topology.ServiceID{Name: "ping"},
LocalPort: 9090,
}},
},
},
},
}),
InitialResources: []*pbresource.Resource{
sprawltest.MustSetResourceData(t, &pbresource.Resource{
Id: &pbresource.ID{
Type: pbmesh.HTTPRouteType,
Name: "test-http-route",
},
}, &pbmesh.HTTPRoute{
ParentRefs: []*pbmesh.ParentReference{{
Ref: &pbresource.Reference{
Type: pbcatalog.ServiceType,
Name: "test",
},
}},
}),
sprawltest.MustSetResourceData(t, &pbresource.Resource{
Id: &pbresource.ID{
Type: pbauth.TrafficPermissionsType,
Name: "ping-perms",
},
}, &pbauth.TrafficPermissions{
Destination: &pbauth.Destination{
IdentityName: "ping",
},
Action: pbauth.Action_ACTION_ALLOW,
Permissions: []*pbauth.Permission{{
Sources: []*pbauth.Source{{
IdentityName: "pong",
}},
}},
}),
sprawltest.MustSetResourceData(t, &pbresource.Resource{
Id: &pbresource.ID{
Type: pbauth.TrafficPermissionsType,
Name: "pong-perms",
},
}, &pbauth.TrafficPermissions{
Destination: &pbauth.Destination{
IdentityName: "pong",
},
Action: pbauth.Action_ACTION_ALLOW,
Permissions: []*pbauth.Permission{{
Sources: []*pbauth.Source{{
IdentityName: "ping",
}},
}},
}),
},
},
},
}
sp := sprawltest.Launch(t, cfg)
for _, cluster := range sp.Topology().Clusters {
leader, err := sp.Leader(cluster.Name)
require.NoError(t, err)
t.Logf("%s: leader = %s", cluster.Name, leader.ID())
followers, err := sp.Followers(cluster.Name)
require.NoError(t, err)
for _, f := range followers {
t.Logf("%s: follower = %s", cluster.Name, f.ID())
}
}
}
func TestSprawl(t *testing.T) {
serversDC1 := newTopologyServerSet("dc1-server", 3, []string{"dc1", "wan"}, nil)
serversDC2 := newTopologyServerSet("dc2-server", 3, []string{"dc2", "wan"}, nil)
cfg := &topology.Config{
Images: topology.Images{
// ConsulEnterprise: "consul-dev:latest",
ConsulCE: "hashicorppreview/consul:1.17-dev",
ConsulEnterprise: "hashicorppreview/consul-enterprise:1.17-dev",
Dataplane: "hashicorppreview/consul-dataplane:1.3-dev",
},
Networks: []*topology.Network{
{Name: "dc1"},
{Name: "dc2"},
@ -116,6 +259,31 @@ func TestSprawl(t *testing.T) {
},
},
},
{
Kind: topology.NodeKindDataplane,
Version: topology.NodeVersionV2,
Name: "dc2-client3",
Services: []*topology.Service{
{
ID: topology.ServiceID{Name: "pong"},
Image: "rboyer/pingpong:latest",
Port: 8080,
EnvoyAdminPort: 19000,
Command: []string{
"-bind", "0.0.0.0:8080",
"-dial", "127.0.0.1:9090",
"-pong-chaos",
"-dialfreq", "250ms",
"-name", "pong",
},
Upstreams: []*topology.Upstream{{
ID: topology.ServiceID{Name: "ping"},
LocalPort: 9090,
Peer: "peer-dc1-default",
}},
},
},
},
}),
InitialConfigEntries: []api.ConfigEntry{
&api.ExportedServicesConfigEntry{

View File

@ -9,6 +9,7 @@ import (
"fmt"
"io"
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets"
"github.com/hashicorp/consul/testing/deployer/topology"
)
@ -113,6 +114,29 @@ fi
if err != nil {
return fmt.Errorf("could not create all necessary TLS certificates in docker volume: %v", err)
}
var capture bytes.Buffer
err = s.runner.DockerExec(ctx, []string{"run",
"--rm",
"-i",
"--net=none",
"-u", consulUserArg,
"-v", cluster.TLSVolumeName + ":/data",
"-w", "/data",
"busybox:1.34",
"cat",
"/data/consul-agent-ca.pem",
}, &capture, nil)
if err != nil {
return fmt.Errorf("could not read CA PEM from docker volume: %v", err)
}
caPEM := capture.String()
if caPEM == "" {
return fmt.Errorf("found empty CA PEM")
}
s.secrets.SaveGeneric(cluster.Name, secrets.CAPEM, caPEM)
}
return nil

View File

@ -13,10 +13,17 @@ import (
"sort"
"github.com/google/go-cmp/cmp"
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/go-hclog"
"golang.org/x/exp/maps"
"github.com/hashicorp/consul/testing/deployer/util"
)
const DockerPrefix = "consulcluster"
const DockerPrefix = "cslc" // ConSuLCluster
func Compile(logger hclog.Logger, raw *Config) (*Topology, error) {
return compile(logger, raw, nil)
@ -122,6 +129,22 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error
return nil, fmt.Errorf("cluster %q has no nodes", c.Name)
}
if len(c.Services) == 0 { // always initialize this regardless of v2-ness, because we might late-enable it below
c.Services = make(map[ServiceID]*pbcatalog.Service)
}
var implicitV2Services bool
if len(c.Services) > 0 {
c.EnableV2 = true
for name, svc := range c.Services {
if svc.Workloads != nil {
return nil, fmt.Errorf("the workloads field for v2 service %q is not user settable", name)
}
}
} else {
implicitV2Services = true
}
if c.TLSVolumeName != "" {
return nil, fmt.Errorf("user cannot specify the TLSVolumeName field")
}
@ -149,6 +172,39 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error
addTenancy(ce.GetPartition(), ce.GetNamespace())
}
if len(c.InitialResources) > 0 {
c.EnableV2 = true
}
for _, res := range c.InitialResources {
if res.Id.Tenancy == nil {
res.Id.Tenancy = &pbresource.Tenancy{}
}
switch res.Id.Tenancy.PeerName {
case "", "local":
default:
return nil, fmt.Errorf("resources cannot target non-local peers")
}
res.Id.Tenancy.Partition = PartitionOrDefault(res.Id.Tenancy.Partition)
res.Id.Tenancy.Namespace = NamespaceOrDefault(res.Id.Tenancy.Namespace)
switch {
case util.EqualType(pbauth.ComputedTrafficPermissionsType, res.Id.GetType()),
util.EqualType(pbauth.WorkloadIdentityType, res.Id.GetType()):
fallthrough
case util.EqualType(pbmesh.ComputedRoutesType, res.Id.GetType()),
util.EqualType(pbmesh.ProxyStateTemplateType, res.Id.GetType()):
fallthrough
case util.EqualType(pbcatalog.HealthChecksType, res.Id.GetType()),
util.EqualType(pbcatalog.HealthStatusType, res.Id.GetType()),
util.EqualType(pbcatalog.NodeType, res.Id.GetType()),
util.EqualType(pbcatalog.ServiceEndpointsType, res.Id.GetType()),
util.EqualType(pbcatalog.WorkloadType, res.Id.GetType()):
return nil, fmt.Errorf("you should not create a resource of type %q this way", util.TypeToString(res.Id.Type))
}
addTenancy(res.Id.Tenancy.Partition, res.Id.Tenancy.Namespace)
}
seenNodes := make(map[NodeID]struct{})
for _, n := range c.Nodes {
if n.Name == "" {
@ -164,6 +220,20 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error
return nil, fmt.Errorf("cluster %q node %q has invalid kind: %s", c.Name, n.Name, n.Kind)
}
if n.Version == NodeVersionUnknown {
n.Version = NodeVersionV1
}
switch n.Version {
case NodeVersionV1:
case NodeVersionV2:
if n.Kind == NodeKindClient {
return nil, fmt.Errorf("v2 does not support client agents at this time")
}
c.EnableV2 = true
default:
return nil, fmt.Errorf("cluster %q node %q has invalid version: %s", c.Name, n.Name, n.Version)
}
n.Partition = PartitionOrDefault(n.Partition)
if !IsValidLabel(n.Partition) {
return nil, fmt.Errorf("node partition is not valid: %s", n.Partition)
@ -257,6 +327,10 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error
// Denormalize
svc.Node = n
svc.NodeVersion = n.Version
if n.IsV2() {
svc.Workload = svc.ID.Name + "-" + n.PodName()
}
if !IsValidLabel(svc.ID.Partition) {
return nil, fmt.Errorf("service partition is not valid: %s", svc.ID.Partition)
@ -330,15 +404,79 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error
foundPeerNames[c.Name][u.Peer] = struct{}{}
}
if u.ID.Name == "" {
return nil, fmt.Errorf("upstream service name is required")
}
addTenancy(u.ID.Partition, u.ID.Namespace)
if u.LocalAddress == "" {
// v1 defaults to 127.0.0.1 but v2 does not. Safe to do this generally though.
u.LocalAddress = "127.0.0.1"
}
if u.PortName != "" && n.IsV1() {
return nil, fmt.Errorf("explicit upstreams cannot use port names in v1")
}
if u.PortName == "" && n.IsV2() {
// Assume this is a v1->v2 conversion and name it.
u.PortName = "legacy"
}
}
if err := svc.Validate(); err != nil {
return nil, fmt.Errorf("cluster %q node %q service %q is not valid: %w", c.Name, n.Name, svc.ID.String(), err)
}
if n.IsV2() {
if implicitV2Services {
svc.V2Services = []string{svc.ID.Name}
var svcPorts []*pbcatalog.ServicePort
for name := range svc.Ports {
svcPorts = append(svcPorts, &pbcatalog.ServicePort{
TargetPort: name,
Protocol: pbcatalog.Protocol_PROTOCOL_TCP, // TODO
})
}
if !svc.DisableServiceMesh {
svcPorts = append(svcPorts, &pbcatalog.ServicePort{
TargetPort: "mesh", Protocol: pbcatalog.Protocol_PROTOCOL_MESH,
})
}
v2svc := &pbcatalog.Service{
Workloads: &pbcatalog.WorkloadSelector{
Names: []string{svc.Workload},
},
Ports: svcPorts,
}
c.Services[svc.ID] = v2svc
} else {
for _, name := range svc.V2Services {
v2ID := NewServiceID(name, svc.ID.Namespace, svc.ID.Partition)
v2svc, ok := c.Services[v2ID]
if !ok {
return nil, fmt.Errorf("cluster %q node %q service %q has a v2 service reference that does not exist %q",
c.Name, n.Name, svc.ID.String(), name)
}
if v2svc.Workloads == nil {
v2svc.Workloads = &pbcatalog.WorkloadSelector{}
}
v2svc.Workloads.Names = append(v2svc.Workloads.Names, svc.Workload)
}
}
if len(svc.WorkloadIdentities) == 0 {
svc.WorkloadIdentities = []string{svc.ID.Name}
}
} else {
if len(svc.V2Services) > 0 {
return nil, fmt.Errorf("cannot specify v2 services for v1")
}
if len(svc.WorkloadIdentities) > 0 {
return nil, fmt.Errorf("cannot specify workload identities for v1")
}
}
}
}
@ -519,6 +657,9 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error
if len(newCluster.InitialConfigEntries) > 0 {
logger.Warn("initial config entries were provided, but are skipped on recompile")
}
if len(newCluster.InitialResources) > 0 {
logger.Warn("initial resources were provided, but are skipped on recompile")
}
// Check NODES
if err := inheritAndValidateNodes(oldCluster.Nodes, newCluster.Nodes); err != nil {
@ -553,6 +694,7 @@ func inheritAndValidateNodes(
}
if currNode.Node.Kind != node.Kind ||
currNode.Node.Version != node.Version ||
currNode.Node.Partition != node.Partition ||
currNode.Node.Name != node.Name ||
currNode.Node.Index != node.Index ||
@ -589,6 +731,7 @@ func inheritAndValidateNodes(
if currSvc.ID != svc.ID ||
currSvc.Port != svc.Port ||
!maps.Equal(currSvc.Ports, svc.Ports) ||
currSvc.EnvoyAdminPort != svc.EnvoyAdminPort ||
currSvc.EnvoyPublicListenerPort != svc.EnvoyPublicListenerPort ||
isSame(currSvc.Command, svc.Command) != nil ||

View File

@ -1,6 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package topology
const DefaultDataplaneImage = "hashicorp/consul-dataplane:1.2.1"

View File

@ -1,7 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package topology
const DefaultConsulImage = "hashicorp/consul:1.16.2"
const DefaultConsulEnterpriseImage = "hashicorp/consul-enterprise:1.16.2-ent"

View File

@ -0,0 +1,13 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
// Code generated by update-latest-versions.sh. DO NOT EDIT.
package topology
const (
DefaultConsulImage = "hashicorp/consul:1.16.2"
DefaultConsulEnterpriseImage = "hashicorp/consul-enterprise:1.16.2-ent"
DefaultEnvoyImage = "envoyproxy/envoy:v1.26.4"
DefaultDataplaneImage = "hashicorp/consul-dataplane:1.2.2"
)

View File

@ -3,4 +3,4 @@
package topology
const DefaultEnvoyImage = "envoyproxy/envoy:v1.25.1"
//go:generate ../update-latest-versions.sh

View File

@ -69,6 +69,7 @@ func (id NodeID) String() string {
func (id NodeID) ACLString() string {
return fmt.Sprintf("%s--%s", id.Partition, id.Name)
}
func (id NodeID) TFString() string {
return id.ACLString()
}
@ -111,16 +112,26 @@ func (id ServiceID) String() string {
func (id ServiceID) ACLString() string {
return fmt.Sprintf("%s--%s--%s", id.Partition, id.Namespace, id.Name)
}
func (id ServiceID) TFString() string {
return id.ACLString()
}
func (id ServiceID) PartitionOrDefault() string {
return PartitionOrDefault(id.Partition)
}
func (id ServiceID) NamespaceOrDefault() string {
return NamespaceOrDefault(id.Namespace)
}
func PartitionOrDefault(name string) string {
if name == "" {
return "default"
}
return name
}
func NamespaceOrDefault(name string) string {
if name == "" {
return "default"

View File

@ -0,0 +1,77 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package topology
import (
"bytes"
"fmt"
"text/tabwriter"
)
// ComputeRelationships will analyze a full topology and generate all of the
// downstream/upstream information for all of them.
func (t *Topology) ComputeRelationships() []Relationship {
var out []Relationship
for _, cluster := range t.Clusters {
for _, n := range cluster.Nodes {
for _, s := range n.Services {
for _, u := range s.Upstreams {
out = append(out, Relationship{
Caller: s,
Upstream: u,
})
}
}
}
}
return out
}
// RenderRelationships will take the output of ComputeRelationships and display
// it in tabular form.
func RenderRelationships(ships []Relationship) string {
var buf bytes.Buffer
w := tabwriter.NewWriter(&buf, 0, 0, 3, ' ', tabwriter.Debug)
fmt.Fprintf(w, "DOWN\tnode\tservice\tport\tUP\tservice\t\n")
for _, r := range ships {
fmt.Fprintf(w,
"%s\t%s\t%s\t%d\t%s\t%s\t\n",
r.downCluster(),
r.Caller.Node.ID().String(),
r.Caller.ID.String(),
r.Upstream.LocalPort,
r.upCluster(),
r.Upstream.ID.String(),
)
}
fmt.Fprintf(w, "\t\t\t\t\t\t\n")
w.Flush()
return buf.String()
}
type Relationship struct {
Caller *Service
Upstream *Upstream
}
func (r Relationship) String() string {
return fmt.Sprintf(
"%s on %s in %s via :%d => %s in %s",
r.Caller.ID.String(),
r.Caller.Node.ID().String(),
r.downCluster(),
r.Upstream.LocalPort,
r.Upstream.ID.String(),
r.upCluster(),
)
}
func (r Relationship) downCluster() string {
return r.Caller.Node.Cluster
}
func (r Relationship) upCluster() string {
return r.Upstream.Cluster
}

View File

@ -12,6 +12,8 @@ import (
"sort"
"github.com/hashicorp/consul/api"
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
"github.com/hashicorp/consul/proto-public/pbresource"
)
type Topology struct {
@ -108,6 +110,45 @@ func (c *Config) Cluster(name string) *Cluster {
return nil
}
// DisableNode is a no-op if the node is already disabled.
func (c *Config) DisableNode(clusterName string, nid NodeID) (bool, error) {
cluster := c.Cluster(clusterName)
if cluster == nil {
return false, fmt.Errorf("no such cluster: %q", clusterName)
}
for _, n := range cluster.Nodes {
if n.ID() == nid {
if n.Disabled {
return false, nil
}
n.Disabled = true
return true, nil
}
}
return false, fmt.Errorf("expected to find nodeID %q in cluster %q", nid.String(), clusterName)
}
// EnableNode is a no-op if the node is already enabled.
func (c *Config) EnableNode(clusterName string, nid NodeID) (bool, error) {
cluster := c.Cluster(clusterName)
if cluster == nil {
return false, fmt.Errorf("no such cluster: %q", clusterName)
}
for _, n := range cluster.Nodes {
if n.ID() == nid {
if !n.Disabled {
return false, nil
}
n.Disabled = false
return true, nil
}
}
return false, fmt.Errorf("expected to find nodeID %q in cluster %q", nid.String(), clusterName)
}
type Network struct {
Type string // lan/wan ; empty means lan
Name string // logical name
@ -198,6 +239,14 @@ type Cluster struct {
// components.
Enterprise bool `json:",omitempty"`
// Services is a forward declaration of V2 services. This goes in hand with
// the V2Services field on the Service (instance) struct.
//
// Use of this is optional. If you elect not to use it, then v2 Services
// definitions are inferred from the list of service instances defined on
// the nodes in this cluster.
Services map[ServiceID]*pbcatalog.Service `json:"omitempty"`
// Nodes is the definition of the nodes (agent-less and agent-ful).
Nodes []*Node
@ -212,11 +261,16 @@ type Cluster struct {
// create multiple peer clusters with identical datacenter names.
Datacenter string
// InitialConfigEntries is a convenience function to have some config
// InitialConfigEntries is a convenience mechanism to have some config
// entries created after the servers start up but before the rest of the
// topology comes up.
InitialConfigEntries []api.ConfigEntry `json:",omitempty"`
// InitialResources is a convenience mechanism to have some resources
// created after the servers start up but before the rest of the topology
// comes up.
InitialResources []*pbresource.Resource `json:",omitempty"`
// TLSVolumeName is the docker volume name containing the various certs
// generated by 'consul tls cert create'
//
@ -227,6 +281,10 @@ type Cluster struct {
//
// Denormalized during compile.
Peerings map[string]*PeerCluster `json:",omitempty"`
// EnableV2 activates V2 on the servers. If any node in the cluster needs
// V2 this will be turned on automatically.
EnableV2 bool `json:",omitempty"`
}
func (c *Cluster) inheritFromExisting(existing *Cluster) {
@ -422,9 +480,18 @@ const (
NodeKindDataplane NodeKind = "dataplane"
)
type NodeVersion string
const (
NodeVersionUnknown NodeVersion = ""
NodeVersionV1 NodeVersion = "v1"
NodeVersionV2 NodeVersion = "v2"
)
// TODO: rename pod
type Node struct {
Kind NodeKind
Version NodeVersion
Partition string // will be not empty
Name string // logical name
@ -462,6 +529,9 @@ func (n *Node) DockerName() string {
}
func (n *Node) ExposedPort(internalPort int) int {
if internalPort == 0 {
return 0
}
return n.usedPorts[internalPort]
}
@ -568,6 +638,14 @@ func (n *Node) PublicProxyPort() int {
panic("node has no public network")
}
func (n *Node) IsV2() bool {
return n.Version == NodeVersionV2
}
func (n *Node) IsV1() bool {
return !n.IsV2()
}
func (n *Node) IsServer() bool {
return n.Kind == NodeKindServer
}
@ -639,12 +717,46 @@ type ServiceAndNode struct {
Node *Node
}
// TODO(rb): really this should now be called "workload" or "instance"
type Service struct {
ID ServiceID
Image string
Port int
ID ServiceID
Image string
// Port is the v1 single-port of this service.
Port int `json:",omitempty"`
// Ports is the v2 multi-port list for this service.
//
// This only applies for multi-port (v2).
Ports map[string]int `json:",omitempty"`
// ExposedPort is the exposed docker port corresponding to 'Port'.
ExposedPort int `json:",omitempty"`
// ExposedPorts are the exposed docker ports corresponding to 'Ports'.
//
// This only applies for multi-port (v2).
ExposedPorts map[string]int `json:",omitempty"`
// V2Services contains service names (which are merged with the tenancy
// info from ID) to resolve services in the Services slice in the Cluster
// definition.
//
// If omitted it is inferred that the ID.Name field is the singular service
// for this workload.
//
// This only applies for multi-port (v2).
V2Services []string `json:",omitempty"`
// WorkloadIdentities contains named WorkloadIdentities to assign to this
// workload.
//
// If omitted it is inferred that the ID.Name field is the singular
// identity for this workload.
//
// This only applies for multi-port (v2).
WorkloadIdentities []string `json:",omitempty"`
Disabled bool `json:",omitempty"` // TODO
// TODO: expose extra port here?
@ -667,17 +779,44 @@ type Service struct {
Upstreams []*Upstream
// denormalized at topology compile
Node *Node `json:"-"`
Node *Node `json:"-"`
NodeVersion NodeVersion `json:"-"`
Workload string `json:"-"`
}
func (s *Service) PortOrDefault(name string) int {
if len(s.Ports) > 0 {
return s.Ports[name]
}
return s.Port
}
func (s *Service) IsV2() bool {
return s.NodeVersion == NodeVersionV2
}
func (s *Service) IsV1() bool {
return !s.IsV2()
}
func (s *Service) inheritFromExisting(existing *Service) {
s.ExposedPort = existing.ExposedPort
s.ExposedPorts = existing.ExposedPorts
s.ExposedEnvoyAdminPort = existing.ExposedEnvoyAdminPort
}
func (s *Service) ports() []int {
var out []int
if s.Port > 0 {
if len(s.Ports) > 0 {
seen := make(map[int]struct{})
for _, port := range s.Ports {
if _, ok := seen[port]; !ok {
// It's totally fine to expose the same port twice in a workload.
seen[port] = struct{}{}
out = append(out, port)
}
}
} else if s.Port > 0 {
out = append(out, s.Port)
}
if s.EnvoyAdminPort > 0 {
@ -714,8 +853,27 @@ func (s *Service) Validate() error {
if s.Image == "" && !s.IsMeshGateway {
return fmt.Errorf("service image is required")
}
if s.Port <= 0 {
return fmt.Errorf("service has invalid port")
if s.IsV2() {
if len(s.Ports) > 0 && s.Port > 0 {
return fmt.Errorf("cannot specify both singleport and multiport on service in v2")
}
if s.Port > 0 {
s.Ports = map[string]int{"legacy": s.Port}
s.Port = 0
}
for name, port := range s.Ports {
if port <= 0 {
return fmt.Errorf("service has invalid port %q", name)
}
}
} else {
if len(s.Ports) > 0 {
return fmt.Errorf("cannot specify mulitport on service in v1")
}
if s.Port <= 0 {
return fmt.Errorf("service has invalid port")
}
}
if s.DisableServiceMesh && s.IsMeshGateway {
return fmt.Errorf("cannot disable service mesh and still run a mesh gateway")
@ -758,6 +916,11 @@ type Upstream struct {
LocalAddress string `json:",omitempty"` // defaults to 127.0.0.1
LocalPort int
Peer string `json:",omitempty"`
// PortName is the named port of this Upstream to route traffic to.
//
// This only applies for multi-port (v2).
PortName string `json:",omitempty"`
// TODO: what about mesh gateway mode overrides?
// computed at topology compile

View File

@ -0,0 +1,57 @@
#!/bin/bash
set -euo pipefail
unset CDPATH
cd "$(dirname "$0")"
###
# This script will update the default image names to the latest released versions of
# Consul, Consul Enterprise, and Consul Dataplane.
#
# For Envoy, it will interrogate the latest version of Consul for it's maximum supported
# Envoy version and use that.
###
readonly consul_latest="hashicorp/consul:latest"
readonly dataplane_latest="hashicorp/consul-dataplane:latest"
# First pull current versions of some images.
docker pull "$consul_latest" || true
docker pull "$dataplane_latest" || true
# Read the version from the easy ones directly out of their image labels.
consul_version="$(docker image inspect "$consul_latest" | jq -r '.[0].Config.Labels."org.opencontainers.image.version"')"
dataplane_version="$(docker image inspect "$dataplane_latest" | jq -r '.[0].Config.Labels.version')"
# Check to see what version of Envoy consul wants.
docker rm -f consul-envoy-check &>/dev/null || true
docker run -d --name consul-envoy-check "$consul_latest"
envoy_version=""
while true; do
# We have to retry in case consul doesn't fully start up before we get here.
envoy_version="$(docker exec consul-envoy-check sh -c 'wget -q localhost:8500/v1/agent/self -O -' | jq -r '.xDS.SupportedProxies.envoy[0]')"
if [[ -n "$envoy_version" ]]; then
break
fi
done
docker rm -f consul-envoy-check &>/dev/null || true
cat > topology/default_versions.go <<EOF
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
// Code generated by update-latest-versions.sh. DO NOT EDIT.
package topology
const (
DefaultConsulImage = "hashicorp/consul:${consul_version}"
DefaultConsulEnterpriseImage = "hashicorp/consul-enterprise:${consul_version}-ent"
DefaultEnvoyImage = "envoyproxy/envoy:v${envoy_version}"
DefaultDataplaneImage = "hashicorp/consul-dataplane:${dataplane_version}"
)
EOF
# gofmt -s -w topology/default_versions.go

View File

@ -4,15 +4,63 @@
package util
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"net/url"
"strconv"
"github.com/hashicorp/consul-server-connection-manager/discovery"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-hclog"
"google.golang.org/grpc"
)
func DialExposedGRPCConn(
ctx context.Context, logger hclog.Logger,
exposedServerGRPCPort int, token string,
tlsConfig *tls.Config,
) (*grpc.ClientConn, func(), error) {
if exposedServerGRPCPort <= 0 {
return nil, nil, fmt.Errorf("cannot dial server grpc on port %d", exposedServerGRPCPort)
}
cfg := discovery.Config{
Addresses: "127.0.0.1",
GRPCPort: exposedServerGRPCPort,
// Disable server watch because we only need to get server IPs once.
ServerWatchDisabled: true,
TLS: tlsConfig,
Credentials: discovery.Credentials{
Type: discovery.CredentialsTypeStatic,
Static: discovery.StaticTokenCredential{
Token: token,
},
},
}
watcher, err := discovery.NewWatcher(ctx, cfg, logger.Named("consul-server-connection-manager"))
if err != nil {
return nil, nil, err
}
go watcher.Run()
// We recycle the GRPC connection from the discovery client because it
// should have all the necessary dial options, including the resolver that
// continuously updates Consul server addresses. Otherwise, a lot of code from consul-server-connection-manager
// would need to be duplicated
state, err := watcher.State()
if err != nil {
watcher.Stop()
return nil, nil, fmt.Errorf("unable to get connection manager state: %w", err)
}
return state.GRPCConn, func() { watcher.Stop() }, nil
}
func ProxyNotPooledAPIClient(proxyPort int, containerIP string, containerPort int, token string) (*api.Client, error) {
return proxyAPIClient(cleanhttp.DefaultTransport(), proxyPort, containerIP, containerPort, token)
}

View File

@ -0,0 +1,81 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package util
import (
"fmt"
"github.com/hashicorp/consul/proto-public/pbresource"
)
// functions copied out of consul:internal/resource/*.go
// IDToString returns a string representation of pbresource.ID. This should not
// be relied upon nor parsed and is provided just for debugging and logging
// reasons.
//
// This format should be aligned with ReferenceToString and
// (ReferenceKey).String.
func IDToString(id *pbresource.ID) string {
s := fmt.Sprintf("%s/%s/%s",
TypeToString(id.Type),
TenancyToString(id.Tenancy),
id.Name,
)
if id.Uid != "" {
return s + "?uid=" + id.Uid
}
return s
}
// ReferenceToString returns a string representation of pbresource.Reference.
// This should not be relied upon nor parsed and is provided just for debugging
// and logging reasons.
//
// This format should be aligned with IDToString and (ReferenceKey).String.
func ReferenceToString(ref *pbresource.Reference) string {
s := fmt.Sprintf("%s/%s/%s",
TypeToString(ref.Type),
TenancyToString(ref.Tenancy),
ref.Name,
)
if ref.Section != "" {
return s + "?section=" + ref.Section
}
return s
}
// TenancyToString returns a string representation of pbresource.Tenancy. This
// should not be relied upon nor parsed and is provided just for debugging and
// logging reasons.
func TenancyToString(tenancy *pbresource.Tenancy) string {
return fmt.Sprintf("%s.%s.%s", tenancy.Partition, tenancy.PeerName, tenancy.Namespace)
}
// TypeToString returns a string representation of pbresource.Type. This should
// not be relied upon nor parsed and is provided just for debugging and logging
// reasons.
func TypeToString(typ *pbresource.Type) string {
return ToGVK(typ)
}
func ToGVK(resourceType *pbresource.Type) string {
return fmt.Sprintf("%s.%s.%s", resourceType.Group, resourceType.GroupVersion, resourceType.Kind)
}
// EqualType compares two resource types for equality without reflection.
func EqualType(a, b *pbresource.Type) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.Group == b.Group &&
a.GroupVersion == b.GroupVersion &&
a.Kind == b.Kind
}

View File

@ -0,0 +1,91 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package util
import (
"context"
"fmt"
"github.com/hashicorp/consul/proto-public/pbresource"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
)
// DecodedResource is a generic holder to contain an original Resource and its
// decoded contents.
type DecodedResource[T proto.Message] struct {
Resource *pbresource.Resource
Data T
}
func (d *DecodedResource[T]) GetResource() *pbresource.Resource {
if d == nil {
return nil
}
return d.Resource
}
func (d *DecodedResource[T]) GetData() T {
if d == nil {
var zero T
return zero
}
return d.Data
}
// Decode will generically decode the provided resource into a 2-field
// structure that holds onto the original Resource and the decoded contents.
//
// Returns an ErrDataParse on unmarshalling errors.
func Decode[T proto.Message](res *pbresource.Resource) (*DecodedResource[T], error) {
var zero T
data := zero.ProtoReflect().New().Interface().(T)
// check that there is data to unmarshall
if res.Data != nil {
if err := res.Data.UnmarshalTo(data); err != nil {
return nil, NewErrDataParse(data, err)
}
}
return &DecodedResource[T]{
Resource: res,
Data: data,
}, nil
}
// GetDecodedResource will generically read the requested resource using the
// client and either return nil on a NotFound or decode the response value.
func GetDecodedResource[T proto.Message](ctx context.Context, client pbresource.ResourceServiceClient, id *pbresource.ID) (*DecodedResource[T], error) {
rsp, err := client.Read(ctx, &pbresource.ReadRequest{Id: id})
switch {
case status.Code(err) == codes.NotFound:
return nil, nil
case err != nil:
return nil, err
}
return Decode[T](rsp.Resource)
}
type ErrDataParse struct {
TypeName string
Wrapped error
}
func NewErrDataParse(msg protoreflect.ProtoMessage, err error) ErrDataParse {
return ErrDataParse{
TypeName: string(msg.ProtoReflect().Descriptor().FullName()),
Wrapped: err,
}
}
func (err ErrDataParse) Error() string {
return fmt.Sprintf("error parsing resource data as type %q: %s", err.TypeName, err.Wrapped.Error())
}
func (err ErrDataParse) Unwrap() error {
return err.Wrapped
}