mirror of https://github.com/status-im/consul.git
test: remove v2 integration tests (#21056)
This removes any references to v2 integration tests from: - envoy integration tests (test/integration/connect) - container tests (test/integration/consul-container) - deployer tests (test-integ)
This commit is contained in:
parent
b5b3a63183
commit
502346029d
|
@ -1,504 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package catalogv2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
|
||||
"github.com/hashicorp/consul/test-integ/topoutil"
|
||||
)
|
||||
|
||||
func TestSplitterFeaturesL7ExplicitDestinations(t *testing.T) {
|
||||
tenancies := []*pbresource.Tenancy{
|
||||
{
|
||||
Partition: "default",
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
if utils.IsEnterprise() {
|
||||
tenancies = append(tenancies, &pbresource.Tenancy{
|
||||
Partition: "part1",
|
||||
Namespace: "default",
|
||||
})
|
||||
tenancies = append(tenancies, &pbresource.Tenancy{
|
||||
Partition: "part1",
|
||||
Namespace: "nsa",
|
||||
})
|
||||
tenancies = append(tenancies, &pbresource.Tenancy{
|
||||
Partition: "default",
|
||||
Namespace: "nsa",
|
||||
})
|
||||
}
|
||||
cfg := testSplitterFeaturesL7ExplicitDestinationsCreator{
|
||||
tenancies: tenancies,
|
||||
}.NewConfig(t)
|
||||
|
||||
sp := sprawltest.Launch(t, cfg)
|
||||
|
||||
var (
|
||||
asserter = topoutil.NewAsserter(sp)
|
||||
|
||||
topo = sp.Topology()
|
||||
cluster = topo.Clusters["dc1"]
|
||||
|
||||
ships = topo.ComputeRelationships()
|
||||
)
|
||||
|
||||
clientV2 := sp.ResourceServiceClientForCluster(cluster.Name)
|
||||
|
||||
t.Log(topology.RenderRelationships(ships))
|
||||
|
||||
for _, tenancy := range tenancies {
|
||||
// Make sure things are in v2.
|
||||
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, "static-client", tenancy, 1)
|
||||
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, "static-server-v1", tenancy, 1)
|
||||
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, "static-server-v2", tenancy, 1)
|
||||
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, "static-server", tenancy, 0)
|
||||
}
|
||||
|
||||
// Check relationships
|
||||
for _, ship := range ships {
|
||||
t.Run("relationship: "+ship.String(), func(t *testing.T) {
|
||||
var (
|
||||
wrk = ship.Caller
|
||||
dest = ship.Destination
|
||||
)
|
||||
|
||||
v1ID := dest.ID
|
||||
v1ID.Name = "static-server-v1"
|
||||
v1ClusterPrefix := clusterPrefix(dest.PortName, v1ID, dest.Cluster)
|
||||
|
||||
v2ID := dest.ID
|
||||
v2ID.Name = "static-server-v2"
|
||||
v2ClusterPrefix := clusterPrefix(dest.PortName, v2ID, dest.Cluster)
|
||||
|
||||
// we expect 2 clusters, one for each leg of the split
|
||||
asserter.DestinationEndpointStatus(t, wrk, v1ClusterPrefix+".", "HEALTHY", 1)
|
||||
asserter.DestinationEndpointStatus(t, wrk, v2ClusterPrefix+".", "HEALTHY", 1)
|
||||
|
||||
// Both should be possible.
|
||||
v1Expect := fmt.Sprintf("%s::%s", cluster.Name, v1ID.String())
|
||||
v2Expect := fmt.Sprintf("%s::%s", cluster.Name, v2ID.String())
|
||||
|
||||
switch dest.PortName {
|
||||
case "tcp":
|
||||
asserter.CheckBlankspaceNameTrafficSplitViaTCP(t, wrk, dest,
|
||||
map[string]int{v1Expect: 10, v2Expect: 90})
|
||||
case "grpc":
|
||||
asserter.CheckBlankspaceNameTrafficSplitViaGRPC(t, wrk, dest,
|
||||
map[string]int{v1Expect: 10, v2Expect: 90})
|
||||
case "http":
|
||||
asserter.CheckBlankspaceNameTrafficSplitViaHTTP(t, wrk, dest, false, "/",
|
||||
map[string]int{v1Expect: 10, v2Expect: 90})
|
||||
case "http2":
|
||||
asserter.CheckBlankspaceNameTrafficSplitViaHTTP(t, wrk, dest, true, "/",
|
||||
map[string]int{v1Expect: 10, v2Expect: 90})
|
||||
default:
|
||||
t.Fatalf("unexpected port name: %s", dest.PortName)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testSplitterFeaturesL7ExplicitDestinationsCreator struct {
|
||||
tenancies []*pbresource.Tenancy
|
||||
}
|
||||
|
||||
func (c testSplitterFeaturesL7ExplicitDestinationsCreator) NewConfig(t *testing.T) *topology.Config {
|
||||
const clusterName = "dc1"
|
||||
|
||||
servers := topoutil.NewTopologyServerSet(clusterName+"-server", 3, []string{clusterName, "wan"}, nil)
|
||||
|
||||
cluster := &topology.Cluster{
|
||||
Enterprise: utils.IsEnterprise(),
|
||||
Name: clusterName,
|
||||
Nodes: servers,
|
||||
Services: make(map[topology.ID]*pbcatalog.Service),
|
||||
}
|
||||
|
||||
lastNode := 0
|
||||
nodeName := func() string {
|
||||
lastNode++
|
||||
return fmt.Sprintf("%s-box%d", clusterName, lastNode)
|
||||
}
|
||||
|
||||
for _, ten := range c.tenancies {
|
||||
c.topologyConfigAddNodes(t, cluster, nodeName, ten)
|
||||
}
|
||||
|
||||
return &topology.Config{
|
||||
Images: utils.TargetImages(),
|
||||
Networks: []*topology.Network{
|
||||
{Name: clusterName},
|
||||
{Name: "wan", Type: "wan"},
|
||||
},
|
||||
Clusters: []*topology.Cluster{
|
||||
cluster,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c testSplitterFeaturesL7ExplicitDestinationsCreator) topologyConfigAddNodes(
|
||||
t *testing.T,
|
||||
cluster *topology.Cluster,
|
||||
nodeName func() string,
|
||||
currentTenancy *pbresource.Tenancy,
|
||||
) {
|
||||
clusterName := cluster.Name
|
||||
|
||||
newID := func(name string, tenancy *pbresource.Tenancy) topology.ID {
|
||||
return topology.ID{
|
||||
Partition: tenancy.Partition,
|
||||
Namespace: tenancy.Namespace,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
tenancy := &pbresource.Tenancy{
|
||||
Partition: currentTenancy.Partition,
|
||||
Namespace: currentTenancy.Namespace,
|
||||
}
|
||||
|
||||
v1ServerNode := &topology.Node{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Version: topology.NodeVersionV2,
|
||||
Partition: currentTenancy.Partition,
|
||||
Name: nodeName(),
|
||||
Workloads: []*topology.Workload{
|
||||
topoutil.NewBlankspaceWorkloadWithDefaults(
|
||||
clusterName,
|
||||
newID("static-server-v1", tenancy),
|
||||
topology.NodeVersionV2,
|
||||
func(wrk *topology.Workload) {
|
||||
wrk.V2Services = []string{"static-server-v1", "static-server"}
|
||||
wrk.Meta = map[string]string{
|
||||
"version": "v1",
|
||||
}
|
||||
wrk.WorkloadIdentity = "static-server-v1"
|
||||
},
|
||||
),
|
||||
},
|
||||
}
|
||||
v2ServerNode := &topology.Node{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Version: topology.NodeVersionV2,
|
||||
Partition: currentTenancy.Partition,
|
||||
Name: nodeName(),
|
||||
Workloads: []*topology.Workload{
|
||||
topoutil.NewBlankspaceWorkloadWithDefaults(
|
||||
clusterName,
|
||||
newID("static-server-v2", tenancy),
|
||||
topology.NodeVersionV2,
|
||||
func(wrk *topology.Workload) {
|
||||
wrk.V2Services = []string{"static-server-v2", "static-server"}
|
||||
wrk.Meta = map[string]string{
|
||||
"version": "v2",
|
||||
}
|
||||
wrk.WorkloadIdentity = "static-server-v2"
|
||||
},
|
||||
),
|
||||
},
|
||||
}
|
||||
clientNode := &topology.Node{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Version: topology.NodeVersionV2,
|
||||
Partition: currentTenancy.Partition,
|
||||
Name: nodeName(),
|
||||
Workloads: []*topology.Workload{
|
||||
topoutil.NewBlankspaceWorkloadWithDefaults(
|
||||
clusterName,
|
||||
newID("static-client", tenancy),
|
||||
topology.NodeVersionV2,
|
||||
func(wrk *topology.Workload) {
|
||||
wrk.V2Services = []string{"static-client"}
|
||||
for i, tenancy := range c.tenancies {
|
||||
wrk.Destinations = append(wrk.Destinations, &topology.Destination{
|
||||
|
||||
ID: newID("static-server", tenancy),
|
||||
PortName: "http",
|
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5000 + (i * 4),
|
||||
},
|
||||
&topology.Destination{
|
||||
|
||||
ID: newID("static-server", tenancy),
|
||||
PortName: "http2",
|
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5001 + (i * 4),
|
||||
},
|
||||
&topology.Destination{
|
||||
|
||||
ID: newID("static-server", tenancy),
|
||||
PortName: "grpc",
|
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5002 + (i * 4),
|
||||
},
|
||||
&topology.Destination{
|
||||
|
||||
ID: newID("static-server", tenancy),
|
||||
PortName: "tcp",
|
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5003 + (i * 4),
|
||||
},
|
||||
)
|
||||
}
|
||||
},
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
var sources []*pbauth.Source
|
||||
for _, ten := range c.tenancies {
|
||||
sources = append(sources, &pbauth.Source{
|
||||
IdentityName: "static-client",
|
||||
Namespace: ten.Namespace,
|
||||
Partition: ten.Partition,
|
||||
})
|
||||
}
|
||||
|
||||
v1TrafficPerms := sprawltest.MustSetResourceData(t, &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbauth.TrafficPermissionsType,
|
||||
Name: "static-server-v1-perms",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
}, &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: "static-server-v1",
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
Permissions: []*pbauth.Permission{{
|
||||
Sources: sources,
|
||||
}},
|
||||
})
|
||||
|
||||
v2TrafficPerms := sprawltest.MustSetResourceData(t, &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbauth.TrafficPermissionsType,
|
||||
Name: "static-server-v2-perms",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
}, &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: "static-server-v2",
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
Permissions: []*pbauth.Permission{{
|
||||
Sources: sources,
|
||||
}},
|
||||
})
|
||||
|
||||
portsFunc := func(offset uint32) []*pbcatalog.ServicePort {
|
||||
return []*pbcatalog.ServicePort{
|
||||
{
|
||||
TargetPort: "http",
|
||||
VirtualPort: 8005 + offset,
|
||||
Protocol: pbcatalog.Protocol_PROTOCOL_HTTP,
|
||||
},
|
||||
{
|
||||
TargetPort: "http2",
|
||||
VirtualPort: 8006 + offset,
|
||||
Protocol: pbcatalog.Protocol_PROTOCOL_HTTP2,
|
||||
},
|
||||
{
|
||||
TargetPort: "grpc",
|
||||
VirtualPort: 9005 + offset,
|
||||
Protocol: pbcatalog.Protocol_PROTOCOL_GRPC,
|
||||
},
|
||||
{
|
||||
TargetPort: "tcp",
|
||||
VirtualPort: 10005 + offset,
|
||||
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
|
||||
},
|
||||
{
|
||||
TargetPort: "mesh",
|
||||
Protocol: pbcatalog.Protocol_PROTOCOL_MESH,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Differ parent and backend virtual ports to verify we route to each correctly.
|
||||
parentServicePorts := portsFunc(0)
|
||||
backendServicePorts := portsFunc(100)
|
||||
|
||||
// Explicitly define backend services s.t. they are not inferred from workload,
|
||||
// which would assign random virtual ports.
|
||||
cluster.Services[newID("static-client", tenancy)] = &pbcatalog.Service{
|
||||
Ports: []*pbcatalog.ServicePort{
|
||||
{
|
||||
TargetPort: "mesh",
|
||||
Protocol: pbcatalog.Protocol_PROTOCOL_MESH,
|
||||
},
|
||||
},
|
||||
}
|
||||
cluster.Services[newID("static-server", tenancy)] = &pbcatalog.Service{
|
||||
Ports: parentServicePorts,
|
||||
}
|
||||
cluster.Services[newID("static-server-v1", tenancy)] = &pbcatalog.Service{
|
||||
Ports: backendServicePorts,
|
||||
}
|
||||
cluster.Services[newID("static-server-v2", tenancy)] = &pbcatalog.Service{
|
||||
Ports: backendServicePorts,
|
||||
}
|
||||
|
||||
httpServerRoute := sprawltest.MustSetResourceData(t, &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbmesh.HTTPRouteType,
|
||||
Name: "static-server-http-route",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
}, &pbmesh.HTTPRoute{
|
||||
ParentRefs: []*pbmesh.ParentReference{
|
||||
{
|
||||
Ref: &pbresource.Reference{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Name: "static-server",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
Port: "8005", // use mix of target and virtual parent ports
|
||||
},
|
||||
{
|
||||
Ref: &pbresource.Reference{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Name: "static-server",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
Port: "http2",
|
||||
},
|
||||
},
|
||||
Rules: []*pbmesh.HTTPRouteRule{{
|
||||
BackendRefs: []*pbmesh.HTTPBackendRef{
|
||||
{
|
||||
BackendRef: &pbmesh.BackendReference{
|
||||
Ref: &pbresource.Reference{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Name: "static-server-v1",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
},
|
||||
Weight: 10,
|
||||
},
|
||||
{
|
||||
BackendRef: &pbmesh.BackendReference{
|
||||
Ref: &pbresource.Reference{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Name: "static-server-v2",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
},
|
||||
Weight: 90,
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
|
||||
grpcServerRoute := sprawltest.MustSetResourceData(t, &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbmesh.GRPCRouteType,
|
||||
Name: "static-server-grpc-route",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
}, &pbmesh.GRPCRoute{
|
||||
ParentRefs: []*pbmesh.ParentReference{{
|
||||
Ref: &pbresource.Reference{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Name: "static-server",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
Port: "grpc",
|
||||
}},
|
||||
Rules: []*pbmesh.GRPCRouteRule{{
|
||||
BackendRefs: []*pbmesh.GRPCBackendRef{
|
||||
{
|
||||
BackendRef: &pbmesh.BackendReference{
|
||||
Ref: &pbresource.Reference{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Name: "static-server-v1",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
Port: "9105", // use mix of virtual and target (inferred from parent) ports
|
||||
},
|
||||
Weight: 10,
|
||||
},
|
||||
{
|
||||
BackendRef: &pbmesh.BackendReference{
|
||||
Ref: &pbresource.Reference{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Name: "static-server-v2",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
},
|
||||
Weight: 90,
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
|
||||
tcpServerRoute := sprawltest.MustSetResourceData(t, &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbmesh.TCPRouteType,
|
||||
Name: "static-server-tcp-route",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
}, &pbmesh.TCPRoute{
|
||||
ParentRefs: []*pbmesh.ParentReference{{
|
||||
Ref: &pbresource.Reference{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Name: "static-server",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
Port: "10005", // use virtual parent port
|
||||
}},
|
||||
Rules: []*pbmesh.TCPRouteRule{{
|
||||
BackendRefs: []*pbmesh.TCPBackendRef{
|
||||
{
|
||||
BackendRef: &pbmesh.BackendReference{
|
||||
Ref: &pbresource.Reference{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Name: "static-server-v1",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
Port: "10105", // use explicit virtual port
|
||||
},
|
||||
Weight: 10,
|
||||
},
|
||||
{
|
||||
BackendRef: &pbmesh.BackendReference{
|
||||
Ref: &pbresource.Reference{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Name: "static-server-v2",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
Port: "tcp", // use explicit target port
|
||||
},
|
||||
Weight: 90,
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
|
||||
cluster.Nodes = append(cluster.Nodes,
|
||||
clientNode,
|
||||
v1ServerNode,
|
||||
v2ServerNode,
|
||||
)
|
||||
|
||||
cluster.InitialResources = append(cluster.InitialResources,
|
||||
v1TrafficPerms,
|
||||
v2TrafficPerms,
|
||||
httpServerRoute,
|
||||
grpcServerRoute,
|
||||
tcpServerRoute,
|
||||
)
|
||||
}
|
|
@ -1,315 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package catalogv2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
|
||||
"github.com/hashicorp/consul/test-integ/topoutil"
|
||||
)
|
||||
|
||||
// TestBasicL4ExplicitDestinations sets up the following:
|
||||
//
|
||||
// - 1 cluster (no peering / no wanfed)
|
||||
// - 3 servers in that cluster
|
||||
// - v2 arch is activated
|
||||
// - for each tenancy, only using v2 constructs:
|
||||
// - a client with one explicit destination to a single port service
|
||||
// - a client with multiple explicit destinations to multiple ports of the
|
||||
// same multiport service
|
||||
//
|
||||
// When this test is executed in CE it will only use the default/default
|
||||
// tenancy.
|
||||
//
|
||||
// When this test is executed in Enterprise it will additionally test the same
|
||||
// things within these tenancies:
|
||||
//
|
||||
// - part1/default
|
||||
// - default/nsa
|
||||
// - part1/nsa
|
||||
func TestBasicL4ExplicitDestinations(t *testing.T) {
|
||||
|
||||
tenancies := []*pbresource.Tenancy{
|
||||
{
|
||||
Partition: "default",
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
if utils.IsEnterprise() {
|
||||
tenancies = append(tenancies, &pbresource.Tenancy{
|
||||
Partition: "part1",
|
||||
Namespace: "default",
|
||||
})
|
||||
tenancies = append(tenancies, &pbresource.Tenancy{
|
||||
Partition: "part1",
|
||||
Namespace: "nsa",
|
||||
})
|
||||
tenancies = append(tenancies, &pbresource.Tenancy{
|
||||
Partition: "default",
|
||||
Namespace: "nsa",
|
||||
})
|
||||
}
|
||||
|
||||
cfg := testBasicL4ExplicitDestinationsCreator{
|
||||
tenancies: tenancies,
|
||||
}.NewConfig(t)
|
||||
|
||||
sp := sprawltest.Launch(t, cfg)
|
||||
|
||||
var (
|
||||
asserter = topoutil.NewAsserter(sp)
|
||||
|
||||
topo = sp.Topology()
|
||||
cluster = topo.Clusters["dc1"]
|
||||
|
||||
ships = topo.ComputeRelationships()
|
||||
)
|
||||
|
||||
clientV2 := sp.ResourceServiceClientForCluster(cluster.Name)
|
||||
|
||||
t.Log(topology.RenderRelationships(ships))
|
||||
|
||||
// Make sure things are in v2.
|
||||
for _, ten := range tenancies {
|
||||
for _, name := range []string{
|
||||
"single-server",
|
||||
"single-client",
|
||||
"multi-server",
|
||||
"multi-client",
|
||||
} {
|
||||
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, name, ten, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Check relationships
|
||||
for _, ship := range ships {
|
||||
t.Run("relationship: "+ship.String(), func(t *testing.T) {
|
||||
var (
|
||||
wrk = ship.Caller
|
||||
dest = ship.Destination
|
||||
)
|
||||
|
||||
clusterPrefix := clusterPrefixForDestination(dest)
|
||||
|
||||
asserter.DestinationEndpointStatus(t, wrk, clusterPrefix+".", "HEALTHY", 1)
|
||||
asserter.HTTPServiceEchoes(t, wrk, dest.LocalPort, "")
|
||||
asserter.FortioFetch2FortioName(t, wrk, dest, cluster.Name, dest.ID)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testBasicL4ExplicitDestinationsCreator struct {
|
||||
tenancies []*pbresource.Tenancy
|
||||
}
|
||||
|
||||
func (c testBasicL4ExplicitDestinationsCreator) NewConfig(t *testing.T) *topology.Config {
|
||||
const clusterName = "dc1"
|
||||
|
||||
servers := topoutil.NewTopologyServerSet(clusterName+"-server", 3, []string{clusterName, "wan"}, nil)
|
||||
|
||||
cluster := &topology.Cluster{
|
||||
Enterprise: utils.IsEnterprise(),
|
||||
Name: clusterName,
|
||||
Nodes: servers,
|
||||
}
|
||||
|
||||
lastNode := 0
|
||||
nodeName := func() string {
|
||||
lastNode++
|
||||
return fmt.Sprintf("%s-box%d", clusterName, lastNode)
|
||||
}
|
||||
|
||||
for _, ten := range c.tenancies {
|
||||
c.topologyConfigAddNodes(t, cluster, nodeName, ten)
|
||||
}
|
||||
|
||||
return &topology.Config{
|
||||
Images: utils.TargetImages(),
|
||||
Networks: []*topology.Network{
|
||||
{Name: clusterName},
|
||||
{Name: "wan", Type: "wan"},
|
||||
},
|
||||
Clusters: []*topology.Cluster{
|
||||
cluster,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c testBasicL4ExplicitDestinationsCreator) topologyConfigAddNodes(
|
||||
t *testing.T,
|
||||
cluster *topology.Cluster,
|
||||
nodeName func() string,
|
||||
tenancy *pbresource.Tenancy,
|
||||
) {
|
||||
clusterName := cluster.Name
|
||||
|
||||
newID := func(name string, tenancy *pbresource.Tenancy) topology.ID {
|
||||
return topology.ID{
|
||||
Partition: tenancy.Partition,
|
||||
Namespace: tenancy.Namespace,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
singleportServerNode := &topology.Node{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Version: topology.NodeVersionV2,
|
||||
Partition: tenancy.Partition,
|
||||
Name: nodeName(),
|
||||
Workloads: []*topology.Workload{
|
||||
topoutil.NewFortioWorkloadWithDefaults(
|
||||
clusterName,
|
||||
newID("single-server", tenancy),
|
||||
topology.NodeVersionV2,
|
||||
func(wrk *topology.Workload) {
|
||||
wrk.WorkloadIdentity = "single-server-identity"
|
||||
},
|
||||
),
|
||||
},
|
||||
}
|
||||
var singleportDestinations []*topology.Destination
|
||||
for i, ten := range c.tenancies {
|
||||
singleportDestinations = append(singleportDestinations, &topology.Destination{
|
||||
ID: newID("single-server", ten),
|
||||
PortName: "http",
|
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5000 + i,
|
||||
})
|
||||
}
|
||||
singleportClientNode := &topology.Node{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Version: topology.NodeVersionV2,
|
||||
Partition: tenancy.Partition,
|
||||
Name: nodeName(),
|
||||
Workloads: []*topology.Workload{
|
||||
topoutil.NewFortioWorkloadWithDefaults(
|
||||
clusterName,
|
||||
newID("single-client", tenancy),
|
||||
topology.NodeVersionV2,
|
||||
func(wrk *topology.Workload) {
|
||||
delete(wrk.Ports, "grpc") // v2 mode turns this on, so turn it off
|
||||
delete(wrk.Ports, "http2") // v2 mode turns this on, so turn it off
|
||||
wrk.WorkloadIdentity = "single-client-identity"
|
||||
wrk.Destinations = singleportDestinations
|
||||
},
|
||||
),
|
||||
},
|
||||
}
|
||||
var sources []*pbauth.Source
|
||||
for _, ten := range c.tenancies {
|
||||
sources = append(sources, &pbauth.Source{
|
||||
IdentityName: "single-client-identity",
|
||||
Namespace: ten.Namespace,
|
||||
Partition: ten.Partition,
|
||||
})
|
||||
}
|
||||
singleportTrafficPerms := sprawltest.MustSetResourceData(t, &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbauth.TrafficPermissionsType,
|
||||
Name: "single-server-perms",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
}, &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: "single-server-identity",
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
Permissions: []*pbauth.Permission{{
|
||||
Sources: sources,
|
||||
}},
|
||||
})
|
||||
|
||||
multiportServerNode := &topology.Node{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Version: topology.NodeVersionV2,
|
||||
Partition: tenancy.Partition,
|
||||
Name: nodeName(),
|
||||
Workloads: []*topology.Workload{
|
||||
topoutil.NewFortioWorkloadWithDefaults(
|
||||
clusterName,
|
||||
newID("multi-server", tenancy),
|
||||
topology.NodeVersionV2,
|
||||
func(wrk *topology.Workload) {
|
||||
wrk.WorkloadIdentity = "multi-server-identity"
|
||||
},
|
||||
),
|
||||
},
|
||||
}
|
||||
var multiportDestinations []*topology.Destination
|
||||
for i, ten := range c.tenancies {
|
||||
multiportDestinations = append(multiportDestinations, &topology.Destination{
|
||||
ID: newID("multi-server", ten),
|
||||
PortName: "http",
|
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5000 + 2*i,
|
||||
})
|
||||
multiportDestinations = append(multiportDestinations, &topology.Destination{
|
||||
ID: newID("multi-server", ten),
|
||||
PortName: "http2",
|
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5000 + 2*i + 1,
|
||||
})
|
||||
}
|
||||
multiportClientNode := &topology.Node{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Version: topology.NodeVersionV2,
|
||||
Partition: tenancy.Partition,
|
||||
Name: nodeName(),
|
||||
Workloads: []*topology.Workload{
|
||||
topoutil.NewFortioWorkloadWithDefaults(
|
||||
clusterName,
|
||||
newID("multi-client", tenancy),
|
||||
topology.NodeVersionV2,
|
||||
func(wrk *topology.Workload) {
|
||||
wrk.WorkloadIdentity = "multi-client-identity"
|
||||
wrk.Destinations = multiportDestinations
|
||||
},
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
var multiportSources []*pbauth.Source
|
||||
for _, ten := range c.tenancies {
|
||||
multiportSources = append(multiportSources, &pbauth.Source{
|
||||
IdentityName: "multi-client-identity",
|
||||
Namespace: ten.Namespace,
|
||||
Partition: ten.Partition,
|
||||
})
|
||||
}
|
||||
multiportTrafficPerms := sprawltest.MustSetResourceData(t, &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbauth.TrafficPermissionsType,
|
||||
Name: "multi-server-perms",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
}, &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: "multi-server-identity",
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
Permissions: []*pbauth.Permission{{
|
||||
Sources: multiportSources,
|
||||
}},
|
||||
})
|
||||
|
||||
cluster.Nodes = append(cluster.Nodes,
|
||||
singleportClientNode,
|
||||
singleportServerNode,
|
||||
multiportClientNode,
|
||||
multiportServerNode,
|
||||
)
|
||||
|
||||
cluster.InitialResources = append(cluster.InitialResources,
|
||||
singleportTrafficPerms,
|
||||
multiportTrafficPerms,
|
||||
)
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package catalogv2
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
)
|
||||
|
||||
// Deprecated: clusterPrefixForDestination
|
||||
func clusterPrefixForUpstream(dest *topology.Destination) string {
|
||||
return clusterPrefixForDestination(dest)
|
||||
}
|
||||
|
||||
func clusterPrefixForDestination(dest *topology.Destination) string {
|
||||
if dest.Peer == "" {
|
||||
return clusterPrefix(dest.PortName, dest.ID, dest.Cluster)
|
||||
} else {
|
||||
return strings.Join([]string{dest.ID.Name, dest.ID.Namespace, dest.Peer, "external"}, ".")
|
||||
}
|
||||
}
|
||||
|
||||
func clusterPrefix(port string, svcID topology.ID, cluster string) string {
|
||||
if svcID.PartitionOrDefault() == "default" {
|
||||
return strings.Join([]string{port, svcID.Name, svcID.Namespace, cluster, "internal"}, ".")
|
||||
} else {
|
||||
return strings.Join([]string{port, svcID.Name, svcID.Namespace, svcID.Partition, cluster, "internal-v1"}, ".")
|
||||
}
|
||||
}
|
|
@ -1,244 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package catalogv2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
|
||||
"github.com/hashicorp/consul/test-integ/topoutil"
|
||||
)
|
||||
|
||||
// TestBasicL4ImplicitDestinations sets up the following:
|
||||
//
|
||||
// - 1 cluster (no peering / no wanfed)
|
||||
// - 3 servers in that cluster
|
||||
// - v2 arch is activated
|
||||
// - for each tenancy, only using v2 constructs:
|
||||
// - a server exposing 2 tcp ports
|
||||
// - a client with transparent proxy enabled and no explicit upstreams
|
||||
// - a traffic permission granting the client access to the service on all ports
|
||||
//
|
||||
// When this test is executed in CE it will only use the default/default
|
||||
// tenancy.
|
||||
//
|
||||
// When this test is executed in Enterprise it will additionally test the same
|
||||
// things within these tenancies:
|
||||
//
|
||||
// - part1/default
|
||||
// - default/nsa
|
||||
// - part1/nsa
|
||||
func TestBasicL4ImplicitDestinations(t *testing.T) {
|
||||
tenancies := []*pbresource.Tenancy{{
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
}}
|
||||
if utils.IsEnterprise() {
|
||||
tenancies = append(tenancies, &pbresource.Tenancy{
|
||||
Namespace: "default",
|
||||
Partition: "nsa",
|
||||
})
|
||||
tenancies = append(tenancies, &pbresource.Tenancy{
|
||||
Namespace: "part1",
|
||||
Partition: "default",
|
||||
})
|
||||
tenancies = append(tenancies, &pbresource.Tenancy{
|
||||
Namespace: "part1",
|
||||
Partition: "nsa",
|
||||
})
|
||||
}
|
||||
|
||||
cfg := testBasicL4ImplicitDestinationsCreator{
|
||||
tenancies: tenancies,
|
||||
}.NewConfig(t)
|
||||
|
||||
sp := sprawltest.Launch(t, cfg)
|
||||
|
||||
var (
|
||||
asserter = topoutil.NewAsserter(sp)
|
||||
|
||||
topo = sp.Topology()
|
||||
cluster = topo.Clusters["dc1"]
|
||||
|
||||
ships = topo.ComputeRelationships()
|
||||
)
|
||||
|
||||
clientV2 := sp.ResourceServiceClientForCluster(cluster.Name)
|
||||
|
||||
t.Log(topology.RenderRelationships(ships))
|
||||
|
||||
// Make sure things are truly in v2 not v1.
|
||||
for _, tenancy := range tenancies {
|
||||
for _, name := range []string{
|
||||
"static-server",
|
||||
"static-client",
|
||||
} {
|
||||
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, name, tenancy, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Check relationships
|
||||
for _, ship := range ships {
|
||||
t.Run("relationship: "+ship.String(), func(t *testing.T) {
|
||||
var (
|
||||
wrk = ship.Caller
|
||||
dest = ship.Destination
|
||||
)
|
||||
|
||||
clusterPrefix := clusterPrefixForDestination(dest)
|
||||
|
||||
asserter.DestinationEndpointStatus(t, wrk, clusterPrefix+".", "HEALTHY", 1)
|
||||
if dest.LocalPort > 0 {
|
||||
asserter.HTTPServiceEchoes(t, wrk, dest.LocalPort, "")
|
||||
}
|
||||
asserter.FortioFetch2FortioName(t, wrk, dest, cluster.Name, dest.ID)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testBasicL4ImplicitDestinationsCreator struct {
|
||||
tenancies []*pbresource.Tenancy
|
||||
}
|
||||
|
||||
func (c testBasicL4ImplicitDestinationsCreator) NewConfig(t *testing.T) *topology.Config {
|
||||
const clusterName = "dc1"
|
||||
|
||||
servers := topoutil.NewTopologyServerSet(clusterName+"-server", 3, []string{clusterName, "wan"}, nil)
|
||||
|
||||
cluster := &topology.Cluster{
|
||||
Enterprise: utils.IsEnterprise(),
|
||||
Name: clusterName,
|
||||
Nodes: servers,
|
||||
}
|
||||
|
||||
lastNode := 0
|
||||
nodeName := func() string {
|
||||
lastNode++
|
||||
return fmt.Sprintf("%s-box%d", clusterName, lastNode)
|
||||
}
|
||||
|
||||
for i := range c.tenancies {
|
||||
c.topologyConfigAddNodes(t, cluster, nodeName, c.tenancies[i])
|
||||
}
|
||||
|
||||
return &topology.Config{
|
||||
Images: utils.TargetImages(),
|
||||
Networks: []*topology.Network{
|
||||
{Name: clusterName},
|
||||
{Name: "wan", Type: "wan"},
|
||||
},
|
||||
Clusters: []*topology.Cluster{
|
||||
cluster,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c testBasicL4ImplicitDestinationsCreator) topologyConfigAddNodes(
|
||||
t *testing.T,
|
||||
cluster *topology.Cluster,
|
||||
nodeName func() string,
|
||||
tenancy *pbresource.Tenancy,
|
||||
) {
|
||||
clusterName := cluster.Name
|
||||
|
||||
newID := func(name string, tenancy *pbresource.Tenancy) topology.ID {
|
||||
return topology.ID{
|
||||
Partition: tenancy.Partition,
|
||||
Namespace: tenancy.Namespace,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
serverNode := &topology.Node{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Version: topology.NodeVersionV2,
|
||||
Partition: tenancy.Partition,
|
||||
Name: nodeName(),
|
||||
Workloads: []*topology.Workload{
|
||||
topoutil.NewFortioWorkloadWithDefaults(
|
||||
clusterName,
|
||||
newID("static-server", tenancy),
|
||||
topology.NodeVersionV2,
|
||||
func(wrk *topology.Workload) {
|
||||
wrk.EnableTransparentProxy = true
|
||||
},
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
var impliedDestinations []*topology.Destination
|
||||
for _, ten := range c.tenancies {
|
||||
// For now we include all services in the same partition as implicit upstreams.
|
||||
if tenancy.Partition != ten.Partition {
|
||||
continue
|
||||
}
|
||||
impliedDestinations = append(impliedDestinations, &topology.Destination{
|
||||
ID: newID("static-server", ten),
|
||||
PortName: "http",
|
||||
})
|
||||
impliedDestinations = append(impliedDestinations, &topology.Destination{
|
||||
ID: newID("static-server", ten),
|
||||
PortName: "http2",
|
||||
})
|
||||
}
|
||||
|
||||
clientNode := &topology.Node{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Version: topology.NodeVersionV2,
|
||||
Partition: tenancy.Partition,
|
||||
Name: nodeName(),
|
||||
Workloads: []*topology.Workload{
|
||||
topoutil.NewFortioWorkloadWithDefaults(
|
||||
clusterName,
|
||||
newID("static-client", tenancy),
|
||||
topology.NodeVersionV2,
|
||||
func(wrk *topology.Workload) {
|
||||
wrk.EnableTransparentProxy = true
|
||||
wrk.ImpliedDestinations = impliedDestinations
|
||||
},
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
var sources []*pbauth.Source
|
||||
for _, ten := range c.tenancies {
|
||||
sources = append(sources, &pbauth.Source{
|
||||
IdentityName: "static-client",
|
||||
Namespace: ten.Namespace,
|
||||
Partition: ten.Partition,
|
||||
})
|
||||
}
|
||||
|
||||
trafficPerms := sprawltest.MustSetResourceData(t, &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbauth.TrafficPermissionsType,
|
||||
Name: "static-server-perms",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
}, &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: "static-server",
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
Permissions: []*pbauth.Permission{{
|
||||
Sources: sources,
|
||||
}},
|
||||
})
|
||||
|
||||
cluster.Nodes = append(cluster.Nodes,
|
||||
clientNode,
|
||||
serverNode,
|
||||
)
|
||||
|
||||
cluster.InitialResources = append(cluster.InitialResources,
|
||||
trafficPerms,
|
||||
)
|
||||
}
|
|
@ -1,459 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
package catalogv2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/test-integ/topoutil"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
permissions []*permission
|
||||
result []*testResult
|
||||
}
|
||||
|
||||
type permission struct {
|
||||
allow bool
|
||||
excludeSource bool
|
||||
includeSourceTenancy bool
|
||||
excludeSourceTenancy bool
|
||||
destRules []*destRules
|
||||
}
|
||||
|
||||
type destRules struct {
|
||||
values *ruleValues
|
||||
excludes []*ruleValues
|
||||
}
|
||||
|
||||
type ruleValues struct {
|
||||
portNames []string
|
||||
path string
|
||||
pathPref string
|
||||
pathReg string
|
||||
headers []string
|
||||
methods []string
|
||||
}
|
||||
|
||||
type testResult struct {
|
||||
fail bool
|
||||
port string
|
||||
path string
|
||||
headers map[string]string
|
||||
}
|
||||
|
||||
func newTrafficPermissions(p *permission, srcTenancy *pbresource.Tenancy) *pbauth.TrafficPermissions {
|
||||
sources := []*pbauth.Source{{
|
||||
IdentityName: "static-client",
|
||||
Namespace: srcTenancy.Namespace,
|
||||
Partition: srcTenancy.Partition,
|
||||
}}
|
||||
destinationRules := []*pbauth.DestinationRule{}
|
||||
if p != nil {
|
||||
srcId := "static-client"
|
||||
if p.includeSourceTenancy {
|
||||
srcId = ""
|
||||
}
|
||||
if p.excludeSource {
|
||||
sources = []*pbauth.Source{{
|
||||
IdentityName: srcId,
|
||||
Namespace: srcTenancy.Namespace,
|
||||
Partition: srcTenancy.Partition,
|
||||
Exclude: []*pbauth.ExcludeSource{{
|
||||
IdentityName: "static-client",
|
||||
Namespace: srcTenancy.Namespace,
|
||||
Partition: srcTenancy.Partition,
|
||||
}},
|
||||
}}
|
||||
} else {
|
||||
sources = []*pbauth.Source{{
|
||||
IdentityName: srcId,
|
||||
Namespace: srcTenancy.Namespace,
|
||||
Partition: srcTenancy.Partition,
|
||||
}}
|
||||
}
|
||||
for _, dr := range p.destRules {
|
||||
destRule := &pbauth.DestinationRule{}
|
||||
if dr.values != nil {
|
||||
destRule.PathExact = dr.values.path
|
||||
destRule.PathPrefix = dr.values.pathPref
|
||||
destRule.PathRegex = dr.values.pathReg
|
||||
destRule.Methods = dr.values.methods
|
||||
destRule.PortNames = dr.values.portNames
|
||||
destRule.Headers = []*pbauth.DestinationRuleHeader{}
|
||||
for _, h := range dr.values.headers {
|
||||
destRule.Headers = append(destRule.Headers, &pbauth.DestinationRuleHeader{
|
||||
Name: h,
|
||||
Present: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
var excludePermissions []*pbauth.ExcludePermissionRule
|
||||
for _, e := range dr.excludes {
|
||||
eRule := &pbauth.ExcludePermissionRule{
|
||||
PathExact: e.path,
|
||||
PathPrefix: e.pathPref,
|
||||
PathRegex: e.pathReg,
|
||||
Methods: e.methods,
|
||||
PortNames: e.portNames,
|
||||
}
|
||||
eRule.Headers = []*pbauth.DestinationRuleHeader{}
|
||||
for _, h := range e.headers {
|
||||
eRule.Headers = append(eRule.Headers, &pbauth.DestinationRuleHeader{
|
||||
Name: h,
|
||||
Present: true,
|
||||
})
|
||||
}
|
||||
excludePermissions = append(excludePermissions, eRule)
|
||||
}
|
||||
destRule.Exclude = excludePermissions
|
||||
destinationRules = append(destinationRules, destRule)
|
||||
}
|
||||
}
|
||||
action := pbauth.Action_ACTION_ALLOW
|
||||
if !p.allow {
|
||||
action = pbauth.Action_ACTION_DENY
|
||||
}
|
||||
return &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: "static-server",
|
||||
},
|
||||
Action: action,
|
||||
Permissions: []*pbauth.Permission{{
|
||||
Sources: sources,
|
||||
DestinationRules: destinationRules,
|
||||
}},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// This tests runs a gauntlet of traffic permissions updates and validates that the request status codes match the intended rules
|
||||
func TestL7TrafficPermissions(t *testing.T) {
|
||||
testcases := map[string]testCase{
|
||||
// L4 permissions
|
||||
"basic": {permissions: []*permission{{allow: true}}, result: []*testResult{{fail: false}}},
|
||||
"client-exclude": {permissions: []*permission{{allow: true, includeSourceTenancy: true, excludeSource: true}}, result: []*testResult{{fail: true}}},
|
||||
"allow-all-client-in-tenancy": {permissions: []*permission{{allow: true, includeSourceTenancy: true}}, result: []*testResult{{fail: false}}},
|
||||
"only-one-port": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{portNames: []string{"http"}}}}}}, result: []*testResult{{fail: true, port: "http2"}}},
|
||||
"exclude-port": {permissions: []*permission{{allow: true, destRules: []*destRules{{excludes: []*ruleValues{{portNames: []string{"http"}}}}}}}, result: []*testResult{{fail: true, port: "http"}}},
|
||||
// L7 permissions
|
||||
"methods": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{methods: []string{"POST", "PUT", "PATCH", "DELETE", "CONNECT", "HEAD", "OPTIONS", "TRACE"}, pathPref: "/"}}}}},
|
||||
// fortio fetch2 is configured to GET
|
||||
result: []*testResult{{fail: true}}},
|
||||
"headers": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{headers: []string{"a", "b"}, pathPref: "/"}}}}},
|
||||
result: []*testResult{{fail: true}, {fail: true, headers: map[string]string{"a": "1"}}, {fail: false, headers: map[string]string{"a": "1", "b": "2"}}}},
|
||||
"path-prefix-all": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/", methods: []string{"GET"}}}}}}, result: []*testResult{{fail: false}}},
|
||||
"method-exclude": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/"}, excludes: []*ruleValues{{methods: []string{"GET"}}}}}}},
|
||||
// fortio fetch2 is configured to GET
|
||||
result: []*testResult{{fail: true}}},
|
||||
"exclude-paths-and-headers": {permissions: []*permission{{allow: true, destRules: []*destRules{
|
||||
{
|
||||
values: &ruleValues{pathPref: "/f", headers: []string{"a"}},
|
||||
excludes: []*ruleValues{{headers: []string{"b"}, path: "/foobar"}},
|
||||
}}}},
|
||||
result: []*testResult{
|
||||
{fail: false, path: "foobar", headers: map[string]string{"a": "1"}},
|
||||
{fail: false, path: "foo", headers: map[string]string{"a": "1", "b": "2"}},
|
||||
{fail: true, path: "foobar", headers: map[string]string{"a": "1", "b": "2"}},
|
||||
{fail: false, path: "foo", headers: map[string]string{"a": "1"}},
|
||||
{fail: true, path: "foo", headers: map[string]string{"b": "2"}},
|
||||
{fail: true, path: "baz", headers: map[string]string{"a": "1"}},
|
||||
}},
|
||||
"exclude-paths-or-headers": {permissions: []*permission{{allow: true, destRules: []*destRules{
|
||||
{values: &ruleValues{pathPref: "/f", headers: []string{"a"}}, excludes: []*ruleValues{{headers: []string{"b"}}, {path: "/foobar"}}}}}},
|
||||
result: []*testResult{
|
||||
{fail: true, path: "foobar", headers: map[string]string{"a": "1"}},
|
||||
{fail: true, path: "foo", headers: map[string]string{"a": "1", "b": "2"}},
|
||||
{fail: true, path: "foobar", headers: map[string]string{"a": "1", "b": "2"}},
|
||||
{fail: false, path: "foo", headers: map[string]string{"a": "1"}},
|
||||
{fail: false, path: "foo", headers: map[string]string{"a": "1"}},
|
||||
{fail: true, path: "baz", port: "http", headers: map[string]string{"a": "1"}},
|
||||
}},
|
||||
"path-or-header": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/bar"}}, {values: &ruleValues{headers: []string{"b"}}}}}},
|
||||
result: []*testResult{
|
||||
{fail: false, path: "bar"},
|
||||
{fail: false, path: "foo", headers: map[string]string{"a": "1", "b": "2"}},
|
||||
{fail: false, path: "bar", headers: map[string]string{"b": "2"}},
|
||||
{fail: true, path: "foo", headers: map[string]string{"a": "1"}},
|
||||
}},
|
||||
"path-and-header": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/bar", headers: []string{"b"}}}}}},
|
||||
result: []*testResult{
|
||||
{fail: true, path: "bar"},
|
||||
{fail: true, path: "foo", headers: map[string]string{"a": "1", "b": "2"}},
|
||||
{fail: false, path: "bar", headers: map[string]string{"b": "2"}},
|
||||
{fail: true, path: "foo", headers: map[string]string{"a": "1"}},
|
||||
}},
|
||||
"path-regex-exclude": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/"}, excludes: []*ruleValues{{pathReg: ".*dns.*"}}}}}},
|
||||
result: []*testResult{{fail: true, path: "fortio/rest/dns"}, {fail: false, path: "fortio/rest/status"}}},
|
||||
"header-include-exclude-by-port": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/", headers: []string{"experiment1", "experiment2"}}, excludes: []*ruleValues{{portNames: []string{"http2"}, headers: []string{"experiment1"}}}}}}},
|
||||
result: []*testResult{{fail: true, port: "http2", headers: map[string]string{"experiment1": "a", "experiment2": "b"}},
|
||||
{fail: false, port: "http", headers: map[string]string{"experiment1": "a", "experiment2": "b"}},
|
||||
{fail: true, port: "http2", headers: map[string]string{"experiment2": "b"}},
|
||||
{fail: true, port: "http", headers: map[string]string{"experiment3": "c"}},
|
||||
}},
|
||||
"two-tp-or": {permissions: []*permission{{allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/bar"}}}}, {allow: true, destRules: []*destRules{{values: &ruleValues{headers: []string{"b"}}}}}},
|
||||
result: []*testResult{
|
||||
{fail: false, path: "bar"},
|
||||
{fail: false, path: "foo", headers: map[string]string{"a": "1", "b": "2"}},
|
||||
{fail: false, path: "bar", headers: map[string]string{"b": "2"}},
|
||||
{fail: true, path: "foo", headers: map[string]string{"a": "1"}},
|
||||
}},
|
||||
}
|
||||
if utils.IsEnterprise() {
|
||||
// DENY and ALLOW permissions
|
||||
testcases["deny-cancel-allow"] = testCase{permissions: []*permission{{allow: true}, {allow: false}}, result: []*testResult{{fail: true}}}
|
||||
testcases["l4-deny-l7-allow"] = testCase{permissions: []*permission{{allow: false}, {allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/"}}}}}, result: []*testResult{{fail: true}, {fail: true, path: "test"}}}
|
||||
testcases["l7-deny-l4-allow"] = testCase{permissions: []*permission{{allow: true}, {allow: true, destRules: []*destRules{{values: &ruleValues{pathPref: "/"}}}}, {allow: false, destRules: []*destRules{{values: &ruleValues{pathPref: "/foo"}}}}},
|
||||
result: []*testResult{{fail: false}, {fail: false, path: "test"}, {fail: true, path: "foo-bar"}}}
|
||||
}
|
||||
|
||||
tenancies := []*pbresource.Tenancy{
|
||||
{
|
||||
Partition: "default",
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
if utils.IsEnterprise() {
|
||||
tenancies = append(tenancies, &pbresource.Tenancy{
|
||||
Partition: "ap1",
|
||||
Namespace: "ns1",
|
||||
})
|
||||
}
|
||||
cfg := testL7TrafficPermissionsCreator{tenancies}.NewConfig(t)
|
||||
targetImage := utils.TargetImages()
|
||||
imageName := targetImage.Consul
|
||||
if utils.IsEnterprise() {
|
||||
imageName = targetImage.ConsulEnterprise
|
||||
}
|
||||
t.Log("running with target image: " + imageName)
|
||||
|
||||
sp := sprawltest.Launch(t, cfg)
|
||||
|
||||
asserter := topoutil.NewAsserter(sp)
|
||||
|
||||
topo := sp.Topology()
|
||||
cluster := topo.Clusters["dc1"]
|
||||
ships := topo.ComputeRelationships()
|
||||
|
||||
clientV2 := sp.ResourceServiceClientForCluster(cluster.Name)
|
||||
|
||||
// Make sure services exist
|
||||
for _, tenancy := range tenancies {
|
||||
for _, name := range []string{
|
||||
"static-server",
|
||||
"static-client",
|
||||
} {
|
||||
libassert.CatalogV2ServiceHasEndpointCount(t, clientV2, name, tenancy, len(tenancies))
|
||||
}
|
||||
}
|
||||
var initialTrafficPerms []*pbresource.Resource
|
||||
for testName, tc := range testcases {
|
||||
// Delete old TP and write new one for a new test case
|
||||
mustDeleteTestResources(t, clientV2, initialTrafficPerms)
|
||||
initialTrafficPerms = []*pbresource.Resource{}
|
||||
for _, st := range tenancies {
|
||||
for _, dt := range tenancies {
|
||||
for i, p := range tc.permissions {
|
||||
newTrafficPerms := sprawltest.MustSetResourceData(t, &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbauth.TrafficPermissionsType,
|
||||
Name: "static-server-perms" + strconv.Itoa(i) + "-" + st.Namespace + "-" + st.Partition,
|
||||
Tenancy: dt,
|
||||
},
|
||||
}, newTrafficPermissions(p, st))
|
||||
mustWriteTestResource(t, clientV2, newTrafficPerms)
|
||||
initialTrafficPerms = append(initialTrafficPerms, newTrafficPerms)
|
||||
}
|
||||
}
|
||||
}
|
||||
t.Log(initialTrafficPerms)
|
||||
// Wait for the resource updates to go through and Envoy to be ready
|
||||
time.Sleep(1 * time.Second)
|
||||
// Check the default server workload envoy config for RBAC filters matching testcase criteria
|
||||
serverWorkload := cluster.WorkloadsByID(topology.ID{
|
||||
Partition: "default",
|
||||
Namespace: "default",
|
||||
Name: "static-server",
|
||||
})
|
||||
asserter.AssertEnvoyHTTPrbacFiltersContainIntentions(t, serverWorkload[0])
|
||||
// Check relationships
|
||||
for _, ship := range ships {
|
||||
t.Run("case: "+testName+":"+ship.Destination.PortName+":("+ship.Caller.ID.Partition+"/"+ship.Caller.ID.Namespace+
|
||||
")("+ship.Destination.ID.Partition+"/"+ship.Destination.ID.Namespace+")", func(t *testing.T) {
|
||||
var (
|
||||
wrk = ship.Caller
|
||||
dest = ship.Destination
|
||||
)
|
||||
for _, res := range tc.result {
|
||||
if res.port != "" && res.port != ship.Destination.PortName {
|
||||
continue
|
||||
}
|
||||
dest.ID.Name = "static-server"
|
||||
destClusterPrefix := clusterPrefix(dest.PortName, dest.ID, dest.Cluster)
|
||||
asserter.DestinationEndpointStatus(t, wrk, destClusterPrefix+".", "HEALTHY", len(tenancies))
|
||||
status := http.StatusForbidden
|
||||
if res.fail == false {
|
||||
status = http.StatusOK
|
||||
}
|
||||
t.Log("Test request:"+res.path, res.headers, status)
|
||||
asserter.FortioFetch2ServiceStatusCodes(t, wrk, dest, res.path, res.headers, []int{status})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mustWriteTestResource(t *testing.T, client pbresource.ResourceServiceClient, res *pbresource.Resource) {
|
||||
retryer := &retry.Timer{Timeout: time.Minute, Wait: time.Second}
|
||||
rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: res})
|
||||
require.NoError(t, err)
|
||||
retry.RunWith(retryer, t, func(r *retry.R) {
|
||||
readRsp, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: rsp.Resource.Id})
|
||||
require.NoError(r, err, "error reading %s", rsp.Resource.Id.Name)
|
||||
require.NotNil(r, readRsp)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func mustDeleteTestResources(t *testing.T, client pbresource.ResourceServiceClient, resources []*pbresource.Resource) {
|
||||
if len(resources) == 0 {
|
||||
return
|
||||
}
|
||||
retryer := &retry.Timer{Timeout: time.Minute, Wait: time.Second}
|
||||
for _, res := range resources {
|
||||
retry.RunWith(retryer, t, func(r *retry.R) {
|
||||
_, err := client.Delete(context.Background(), &pbresource.DeleteRequest{Id: res.Id})
|
||||
if status.Code(err) == codes.NotFound {
|
||||
return
|
||||
}
|
||||
if err != nil && status.Code(err) != codes.Aborted {
|
||||
r.Stop(fmt.Errorf("failed to delete the resource: %w", err))
|
||||
return
|
||||
}
|
||||
require.NoError(r, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testL7TrafficPermissionsCreator struct {
|
||||
tenancies []*pbresource.Tenancy
|
||||
}
|
||||
|
||||
func (c testL7TrafficPermissionsCreator) NewConfig(t *testing.T) *topology.Config {
|
||||
const clusterName = "dc1"
|
||||
|
||||
servers := topoutil.NewTopologyServerSet(clusterName+"-server", 1, []string{clusterName, "wan"}, nil)
|
||||
|
||||
cluster := &topology.Cluster{
|
||||
Enterprise: utils.IsEnterprise(),
|
||||
Name: clusterName,
|
||||
Nodes: servers,
|
||||
}
|
||||
|
||||
lastNode := 0
|
||||
nodeName := func() string {
|
||||
lastNode++
|
||||
return fmt.Sprintf("%s-box%d", clusterName, lastNode)
|
||||
}
|
||||
|
||||
for _, st := range c.tenancies {
|
||||
for _, dt := range c.tenancies {
|
||||
c.topologyConfigAddNodes(cluster, nodeName, st, dt)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return &topology.Config{
|
||||
Images: utils.TargetImages(),
|
||||
Networks: []*topology.Network{
|
||||
{Name: clusterName},
|
||||
{Name: "wan", Type: "wan"},
|
||||
},
|
||||
Clusters: []*topology.Cluster{
|
||||
cluster,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c testL7TrafficPermissionsCreator) topologyConfigAddNodes(
|
||||
cluster *topology.Cluster,
|
||||
nodeName func() string,
|
||||
sourceTenancy *pbresource.Tenancy,
|
||||
destinationTenancy *pbresource.Tenancy,
|
||||
) {
|
||||
clusterName := cluster.Name
|
||||
|
||||
newID := func(name string, tenancy *pbresource.Tenancy) topology.ID {
|
||||
return topology.ID{
|
||||
Partition: tenancy.Partition,
|
||||
Namespace: tenancy.Namespace,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
serverNode := &topology.Node{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Version: topology.NodeVersionV2,
|
||||
Partition: destinationTenancy.Partition,
|
||||
Name: nodeName(),
|
||||
Workloads: []*topology.Workload{
|
||||
topoutil.NewFortioWorkloadWithDefaults(
|
||||
clusterName,
|
||||
newID("static-server", destinationTenancy),
|
||||
topology.NodeVersionV2,
|
||||
nil,
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
clientNode := &topology.Node{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Version: topology.NodeVersionV2,
|
||||
Partition: sourceTenancy.Partition,
|
||||
Name: nodeName(),
|
||||
Workloads: []*topology.Workload{
|
||||
topoutil.NewFortioWorkloadWithDefaults(
|
||||
clusterName,
|
||||
newID("static-client", sourceTenancy),
|
||||
topology.NodeVersionV2,
|
||||
func(wrk *topology.Workload) {
|
||||
wrk.Destinations = append(wrk.Destinations, &topology.Destination{
|
||||
ID: newID("static-server", destinationTenancy),
|
||||
PortName: "http",
|
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5000,
|
||||
},
|
||||
&topology.Destination{
|
||||
ID: newID("static-server", destinationTenancy),
|
||||
PortName: "http2",
|
||||
LocalAddress: "0.0.0.0", // needed for an assertion
|
||||
LocalPort: 5001,
|
||||
},
|
||||
)
|
||||
wrk.WorkloadIdentity = "static-client"
|
||||
},
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
cluster.Nodes = append(cluster.Nodes,
|
||||
clientNode,
|
||||
serverNode,
|
||||
)
|
||||
}
|
|
@ -1,154 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package tenancy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
)
|
||||
|
||||
// This duplicates a subset of internal/resource/resourcetest/client.go so
|
||||
// we're not importing consul internals integration tests.
|
||||
//
|
||||
// TODO: Move to a general package if used more widely.
|
||||
|
||||
type ClientOption func(*Client)
|
||||
|
||||
func WithACLToken(token string) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.token = token
|
||||
}
|
||||
}
|
||||
|
||||
// Client decorates a resource service client with helper functions to assist
|
||||
// with integration testing.
|
||||
type Client struct {
|
||||
pbresource.ResourceServiceClient
|
||||
|
||||
timeout time.Duration
|
||||
wait time.Duration
|
||||
token string
|
||||
}
|
||||
|
||||
func NewClient(client pbresource.ResourceServiceClient, opts ...ClientOption) *Client {
|
||||
c := &Client{
|
||||
ResourceServiceClient: client,
|
||||
timeout: 7 * time.Second,
|
||||
wait: 50 * time.Millisecond,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func NewClientWithACLToken(client pbresource.ResourceServiceClient, token string) *Client {
|
||||
return NewClient(client, WithACLToken(token))
|
||||
}
|
||||
|
||||
func (client *Client) SetRetryerConfig(timeout time.Duration, wait time.Duration) {
|
||||
client.timeout = timeout
|
||||
client.wait = wait
|
||||
}
|
||||
|
||||
func (client *Client) retry(t testutil.TestingTB, fn func(r *retry.R)) {
|
||||
t.Helper()
|
||||
retryer := &retry.Timer{Timeout: client.timeout, Wait: client.wait}
|
||||
retry.RunWith(retryer, t, fn)
|
||||
}
|
||||
|
||||
func (client *Client) Context(t testutil.TestingTB) context.Context {
|
||||
ctx := testutil.TestContext(t)
|
||||
|
||||
if client.token != "" {
|
||||
md := metadata.New(map[string]string{
|
||||
"x-consul-token": client.token,
|
||||
})
|
||||
ctx = metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (client *Client) RequireResourceNotFound(t testutil.TestingTB, id *pbresource.ID) {
|
||||
t.Helper()
|
||||
|
||||
rsp, err := client.Read(client.Context(t), &pbresource.ReadRequest{Id: id})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.NotFound, status.Code(err))
|
||||
require.Nil(t, rsp)
|
||||
}
|
||||
|
||||
func (client *Client) RequireResourceExists(t testutil.TestingTB, id *pbresource.ID) *pbresource.Resource {
|
||||
t.Helper()
|
||||
|
||||
rsp, err := client.Read(client.Context(t), &pbresource.ReadRequest{Id: id})
|
||||
require.NoError(t, err, "error reading %s with type %s", id.Name, ToGVK(id.Type))
|
||||
require.NotNil(t, rsp)
|
||||
return rsp.Resource
|
||||
}
|
||||
|
||||
func ToGVK(resourceType *pbresource.Type) string {
|
||||
return fmt.Sprintf("%s.%s.%s", resourceType.Group, resourceType.GroupVersion, resourceType.Kind)
|
||||
}
|
||||
|
||||
func (client *Client) WaitForResourceExists(t testutil.TestingTB, id *pbresource.ID) *pbresource.Resource {
|
||||
t.Helper()
|
||||
|
||||
var res *pbresource.Resource
|
||||
client.retry(t, func(r *retry.R) {
|
||||
res = client.RequireResourceExists(r, id)
|
||||
})
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (client *Client) WaitForDeletion(t testutil.TestingTB, id *pbresource.ID) {
|
||||
t.Helper()
|
||||
|
||||
client.retry(t, func(r *retry.R) {
|
||||
client.RequireResourceNotFound(r, id)
|
||||
})
|
||||
}
|
||||
|
||||
// MustDelete will delete a resource by its id, retrying if necessary and fail the test
|
||||
// if it cannot delete it within the timeout. The clients request delay settings are
|
||||
// taken into account with this operation.
|
||||
func (client *Client) MustDelete(t testutil.TestingTB, id *pbresource.ID) {
|
||||
t.Helper()
|
||||
client.retryDelete(t, id)
|
||||
}
|
||||
|
||||
func (client *Client) retryDelete(t testutil.TestingTB, id *pbresource.ID) {
|
||||
t.Helper()
|
||||
ctx := client.Context(t)
|
||||
|
||||
client.retry(t, func(r *retry.R) {
|
||||
_, err := client.Delete(ctx, &pbresource.DeleteRequest{Id: id})
|
||||
if status.Code(err) == codes.NotFound {
|
||||
return
|
||||
}
|
||||
|
||||
// codes.Aborted indicates a CAS failure and that the delete request should
|
||||
// be retried. Anything else should be considered an unrecoverable error.
|
||||
if err != nil && status.Code(err) != codes.Aborted {
|
||||
r.Stop(fmt.Errorf("failed to delete the resource: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(r, err)
|
||||
})
|
||||
}
|
|
@ -1,84 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package tenancy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1"
|
||||
"github.com/hashicorp/consul/test-integ/topoutil"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultNamespaceName = "default"
|
||||
DefaultPartitionName = "default"
|
||||
)
|
||||
|
||||
func newConfig(t *testing.T) *topology.Config {
|
||||
const clusterName = "cluster1"
|
||||
servers := topoutil.NewTopologyServerSet(clusterName+"-server", 3, []string{clusterName}, nil)
|
||||
|
||||
cluster := &topology.Cluster{
|
||||
Enterprise: utils.IsEnterprise(),
|
||||
Name: clusterName,
|
||||
Nodes: servers,
|
||||
EnableV2: true,
|
||||
EnableV2Tenancy: true,
|
||||
}
|
||||
|
||||
return &topology.Config{
|
||||
Images: utils.TargetImages(),
|
||||
Networks: []*topology.Network{{Name: clusterName}},
|
||||
Clusters: []*topology.Cluster{cluster},
|
||||
}
|
||||
}
|
||||
|
||||
func createNamespaces(t *testing.T, resourceServiceClient *Client, numNamespaces int, ap string) []*pbresource.Resource {
|
||||
namespaces := []*pbresource.Resource{}
|
||||
for i := 0; i < numNamespaces; i++ {
|
||||
namespace := &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Name: fmt.Sprintf("namespace-%d", i),
|
||||
Type: pbtenancy.NamespaceType,
|
||||
Tenancy: &pbresource.Tenancy{Partition: ap},
|
||||
},
|
||||
}
|
||||
rsp, err := resourceServiceClient.Write(context.Background(), &pbresource.WriteRequest{Resource: namespace})
|
||||
require.NoError(t, err)
|
||||
namespace = resourceServiceClient.WaitForResourceExists(t, rsp.Resource.Id)
|
||||
namespaces = append(namespaces, namespace)
|
||||
}
|
||||
return namespaces
|
||||
}
|
||||
|
||||
func createServices(t *testing.T, resourceServiceClient *Client, numServices int, ap string, ns string) []*pbresource.Resource {
|
||||
services := []*pbresource.Resource{}
|
||||
for i := 0; i < numServices; i++ {
|
||||
service := &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Name: fmt.Sprintf("service-%d", i),
|
||||
Type: pbcatalog.ServiceType,
|
||||
Tenancy: &pbresource.Tenancy{Partition: ap, Namespace: ns},
|
||||
},
|
||||
}
|
||||
service = sprawltest.MustSetResourceData(t, service, &pbcatalog.Service{
|
||||
Workloads: &pbcatalog.WorkloadSelector{},
|
||||
Ports: []*pbcatalog.ServicePort{},
|
||||
})
|
||||
rsp, err := resourceServiceClient.Write(context.Background(), &pbresource.WriteRequest{Resource: service})
|
||||
require.NoError(t, err)
|
||||
service = resourceServiceClient.WaitForResourceExists(t, rsp.Resource.Id)
|
||||
services = append(services, service)
|
||||
}
|
||||
return services
|
||||
}
|
|
@ -1,84 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
//go:build !consulent
|
||||
|
||||
package tenancy
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
|
||||
)
|
||||
|
||||
// TestNamespaceLifecycle sets up the following:
|
||||
//
|
||||
// - 1 cluster
|
||||
// - 3 servers in that cluster
|
||||
// - v2 resources and v2 tenancy are activated
|
||||
//
|
||||
// When this test is executed it tests the full lifecycle for a
|
||||
// small number of namespaces:
|
||||
// - creation of namespaces in the default partition
|
||||
// - populating resources under namespaces
|
||||
// - finally deleting everything
|
||||
func TestNamespaceLifecycle(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cfg := newConfig(t)
|
||||
sp := sprawltest.Launch(t, cfg)
|
||||
cluster := sp.Topology().Clusters["cluster1"]
|
||||
client := NewClient(sp.ResourceServiceClientForCluster(cluster.Name))
|
||||
|
||||
// 3 namespaces
|
||||
// @ 3 services per namespace
|
||||
// ==============================
|
||||
// 9 resources total
|
||||
tenants := []*pbresource.Resource{}
|
||||
numNamespaces := 3
|
||||
numServices := 3
|
||||
|
||||
// Default namespace is expected to exist
|
||||
// when we boostrap a cluster
|
||||
client.RequireResourceExists(t, &pbresource.ID{
|
||||
Name: DefaultNamespaceName,
|
||||
Type: pbtenancy.NamespaceType,
|
||||
Tenancy: &pbresource.Tenancy{Partition: DefaultPartitionName},
|
||||
})
|
||||
|
||||
// Namespaces are created in default partition
|
||||
namespaces := createNamespaces(t, client, numNamespaces, DefaultPartitionName)
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
services := createServices(t, client, numServices, DefaultPartitionName, namespace.Id.Name)
|
||||
tenants = append(tenants, services...)
|
||||
}
|
||||
|
||||
// Verify test setup
|
||||
require.Equal(t, len(tenants), numNamespaces*numServices)
|
||||
|
||||
// List namespaces
|
||||
listRsp, err := client.List(client.Context(t), &pbresource.ListRequest{
|
||||
Type: pbtenancy.NamespaceType,
|
||||
Tenancy: &pbresource.Tenancy{},
|
||||
NamePrefix: "namespace-",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(namespaces), len(listRsp.Resources))
|
||||
|
||||
// Delete all namespaces
|
||||
for _, namespace := range namespaces {
|
||||
_, err := client.Delete(client.Context(t), &pbresource.DeleteRequest{Id: namespace.Id})
|
||||
require.NoError(t, err)
|
||||
client.WaitForDeletion(t, namespace.Id)
|
||||
}
|
||||
|
||||
// Make sure no namespace tenants left behind
|
||||
for _, tenant := range tenants {
|
||||
client.RequireResourceNotFound(t, tenant.Id)
|
||||
}
|
||||
}
|
|
@ -18,6 +18,9 @@ func NewFortioWorkloadWithDefaults(
|
|||
nodeVersion topology.NodeVersion,
|
||||
mut func(*topology.Workload),
|
||||
) *topology.Workload {
|
||||
if nodeVersion == topology.NodeVersionV2 {
|
||||
panic("v2 nodes are not supported")
|
||||
}
|
||||
const (
|
||||
httpPort = 8080
|
||||
grpcPort = 8079
|
||||
|
@ -30,6 +33,7 @@ func NewFortioWorkloadWithDefaults(
|
|||
ID: sid,
|
||||
Image: HashicorpDockerProxy + "/fortio/fortio",
|
||||
EnvoyAdminPort: adminPort,
|
||||
Port: httpPort,
|
||||
CheckTCP: "127.0.0.1:" + strconv.Itoa(httpPort),
|
||||
Env: []string{
|
||||
"FORTIO_NAME=" + cluster + "::" + sid.String(),
|
||||
|
@ -43,17 +47,6 @@ func NewFortioWorkloadWithDefaults(
|
|||
},
|
||||
}
|
||||
|
||||
if nodeVersion == topology.NodeVersionV2 {
|
||||
wrk.Ports = map[string]*topology.Port{
|
||||
"http": {Number: httpPort, Protocol: "http"},
|
||||
"http2": {Number: httpPort, Protocol: "http2"},
|
||||
"grpc": {Number: grpcPort, Protocol: "grpc"},
|
||||
"tcp": {Number: tcpPort, Protocol: "tcp"},
|
||||
}
|
||||
} else {
|
||||
wrk.Port = httpPort
|
||||
}
|
||||
|
||||
if mut != nil {
|
||||
mut(wrk)
|
||||
}
|
||||
|
@ -66,6 +59,9 @@ func NewBlankspaceWorkloadWithDefaults(
|
|||
nodeVersion topology.NodeVersion,
|
||||
mut func(*topology.Workload),
|
||||
) *topology.Workload {
|
||||
if nodeVersion == topology.NodeVersionV2 {
|
||||
panic("v2 nodes are not supported")
|
||||
}
|
||||
const (
|
||||
httpPort = 8080
|
||||
grpcPort = 8079
|
||||
|
@ -78,6 +74,7 @@ func NewBlankspaceWorkloadWithDefaults(
|
|||
ID: sid,
|
||||
Image: HashicorpDockerProxy + "/rboyer/blankspace",
|
||||
EnvoyAdminPort: adminPort,
|
||||
Port: httpPort,
|
||||
CheckTCP: "127.0.0.1:" + strconv.Itoa(httpPort),
|
||||
Command: []string{
|
||||
"-name", cluster + "::" + sid.String(),
|
||||
|
@ -87,17 +84,6 @@ func NewBlankspaceWorkloadWithDefaults(
|
|||
},
|
||||
}
|
||||
|
||||
if nodeVersion == topology.NodeVersionV2 {
|
||||
wrk.Ports = map[string]*topology.Port{
|
||||
"http": {Number: httpPort, Protocol: "http"},
|
||||
"http2": {Number: httpPort, Protocol: "http2"},
|
||||
"grpc": {Number: grpcPort, Protocol: "grpc"},
|
||||
"tcp": {Number: tcpPort, Protocol: "tcp"},
|
||||
}
|
||||
} else {
|
||||
wrk.Port = httpPort
|
||||
}
|
||||
|
||||
if mut != nil {
|
||||
mut(wrk)
|
||||
}
|
||||
|
|
|
@ -19,8 +19,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
flagWin = flag.Bool("win", false, "Execute tests on windows")
|
||||
flagResourceAPIs = flag.Bool("enable-resource-apis", false, "Execute tests with resource apis enabled.")
|
||||
flagWin = flag.Bool("win", false, "Execute tests on windows")
|
||||
)
|
||||
|
||||
func TestEnvoy(t *testing.T) {
|
||||
|
@ -31,14 +30,7 @@ func TestEnvoy(t *testing.T) {
|
|||
check_dir_files(dir)
|
||||
}
|
||||
|
||||
var testcases []string
|
||||
var err error
|
||||
if *flagResourceAPIs == true {
|
||||
os.Setenv("USE_RESOURCE_APIS", "true")
|
||||
testcases, err = discoverResourceAPICases()
|
||||
} else {
|
||||
testcases, err = discoverCases()
|
||||
}
|
||||
testcases, err := discoverCases()
|
||||
require.NoError(t, err)
|
||||
|
||||
runCmd(t, "suite_setup")
|
||||
|
@ -125,33 +117,6 @@ func discoverCases() ([]string, error) {
|
|||
return out, nil
|
||||
}
|
||||
|
||||
// discoverResourceAPICases will discover the Envoy tests case files but will contain
|
||||
// a filter in it to only return those case for which functionality has been added
|
||||
// to the V2 catalog resources.
|
||||
func discoverResourceAPICases() ([]string, error) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dirs, err := os.ReadDir(cwd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var out []string
|
||||
for _, fi := range dirs {
|
||||
// TODO(proxystate): enable this to only include tests cases that are supported.
|
||||
// Currently the work is in progress, so it is wired up in CI, but this excludes any tests from actually running.
|
||||
if fi.IsDir() && strings.HasPrefix(fi.Name(), "case-don-match-me-on-anything-yet-because-i-am-not-ready") {
|
||||
out = append(out, fi.Name())
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(out)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// CRLF convert functions
|
||||
// Recursively iterates through the directory passed by parameter looking for the sh and bash files.
|
||||
// Upon finding them, it calls crlf_file_check.
|
||||
|
|
|
@ -179,14 +179,6 @@ function start_consul {
|
|||
license=$(cat $CONSUL_LICENSE_PATH)
|
||||
fi
|
||||
|
||||
USE_RESOURCE_APIS=${USE_RESOURCE_APIS:-false}
|
||||
|
||||
experiments="experiments=[]"
|
||||
# set up consul to run in V1 or V2 catalog mode
|
||||
if [[ "${USE_RESOURCE_APIS}" == true ]]; then
|
||||
experiments="experiments=[\"resource-apis\"]"
|
||||
fi
|
||||
|
||||
# We currently run these integration tests in two modes: one in which Envoy's
|
||||
# xDS sessions are served directly by a Consul server, and another in which it
|
||||
# goes through a client agent.
|
||||
|
@ -270,7 +262,6 @@ function start_consul {
|
|||
agent -dev -datacenter "${DC}" \
|
||||
-config-dir "/workdir/${DC}/consul" \
|
||||
-config-dir "/workdir/${DC}/consul-server" \
|
||||
-hcl=${experiments} \
|
||||
-client "0.0.0.0" >/dev/null
|
||||
fi
|
||||
}
|
||||
|
|
|
@ -40,22 +40,15 @@ require (
|
|||
fortio.org/sets v1.0.2 // indirect
|
||||
fortio.org/version v1.0.2 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/DataDog/datadog-go v4.8.2+incompatible // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/armon/go-radix v1.0.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.289 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible // indirect
|
||||
github.com/circonus-labs/circonusllhist v0.1.3 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect
|
||||
github.com/containerd/containerd v1.7.3 // indirect
|
||||
github.com/cpuguy83/dockercfg v0.3.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.3.1 // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane v0.12.0 // indirect
|
||||
|
@ -66,24 +59,16 @@ require (
|
|||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/hashicorp/consul-net-rpc v0.0.0-20221205195236-156cfab66a69 // indirect
|
||||
github.com/hashicorp/consul-server-connection-manager v0.1.4 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.2 // indirect
|
||||
github.com/hashicorp/go-hclog v1.5.0 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect
|
||||
github.com/hashicorp/go-msgpack v1.1.5 // indirect
|
||||
github.com/hashicorp/go-netaddrs v0.1.0 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.6.7 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.0 // indirect
|
||||
github.com/hashicorp/memberlist v0.5.0 // indirect
|
||||
github.com/hashicorp/raft v1.5.0 // indirect
|
||||
github.com/hashicorp/raft-autopilot v0.1.6 // indirect
|
||||
github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 // indirect
|
||||
github.com/itchyny/timefmt-go v0.1.5 // indirect
|
||||
github.com/klauspost/compress v1.16.7 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
|
@ -92,14 +77,11 @@ require (
|
|||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/miekg/dns v1.1.50 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.14.0 // indirect
|
||||
github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/patternmatcher v0.5.0 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/oklog/ulid/v2 v2.1.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc4 // indirect
|
||||
github.com/opencontainers/runc v1.1.8 // indirect
|
||||
|
@ -110,19 +92,11 @@ require (
|
|||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect
|
||||
golang.org/x/net v0.24.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5 // indirect
|
||||
|
|
|
@ -16,10 +16,7 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1
|
|||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/DataDog/datadog-go v4.8.2+incompatible h1:qbcKSx29aBLD+5QLvlQZlGmRMF/FfGqFLFev/1TDzRo=
|
||||
github.com/DataDog/datadog-go v4.8.2+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek=
|
||||
|
@ -29,33 +26,24 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
|
|||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
|
||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
|
||||
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
|
||||
github.com/aws/aws-sdk-go v1.44.289 h1:5CVEjiHFvdiVlKPBzv0rjG4zH/21W/onT18R5AH/qx0=
|
||||
github.com/aws/aws-sdk-go v1.44.289/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||
|
@ -70,8 +58,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
|||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A=
|
||||
github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v24.0.5+incompatible h1:WmgcE4fxyI6EEXxBRxsHnZXrO1pQ3smi0k/jho4HLeY=
|
||||
|
@ -93,7 +79,6 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL
|
|||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/fullstorydev/grpchan v1.1.1 h1:heQqIJlAv5Cnks9a70GRL2EJke6QQoUB25VGR6TZQas=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
|
||||
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
|
@ -120,35 +105,22 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
|
||||
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/hashicorp/consul-net-rpc v0.0.0-20221205195236-156cfab66a69 h1:wzWurXrxfSyG1PHskIZlfuXlTSCj1Tsyatp9DtaasuY=
|
||||
github.com/hashicorp/consul-net-rpc v0.0.0-20221205195236-156cfab66a69/go.mod h1:svUZZDvotY8zTODknUePc6mZ9pX8nN0ViGwWcUSOBEA=
|
||||
github.com/hashicorp/consul-server-connection-manager v0.1.4 h1:wrcSRV6WGXFBNpNbN6XsdoGgBOyso7ZbN5VaWPEX1jY=
|
||||
github.com/hashicorp/consul-server-connection-manager v0.1.4/go.mod h1:LMqHkALoLP0HUQKOG21xXYr0YPUayIQIHNTlmxG100E=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-bexpr v0.1.2 h1:ijMXI4qERbzxbCnkxmfUtwMyjrrk3y+Vt0MxojNCbBs=
|
||||
github.com/hashicorp/go-bexpr v0.1.2/go.mod h1:ANbpTX1oAql27TZkKVeW8p1w8NTdnyzPe/0qqPCKohU=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
|
||||
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo=
|
||||
github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw=
|
||||
github.com/hashicorp/go-memdb v1.3.4 h1:XSL3NR682X/cVk2IeV0d70N4DZ9ljI885xAEU8IoK3c=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs=
|
||||
github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
|
@ -158,14 +130,11 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9
|
|||
github.com/hashicorp/go-netaddrs v0.1.0 h1:TnlYvODD4C/wO+j7cX1z69kV5gOzI87u3OcUinANaW8=
|
||||
github.com/hashicorp/go-netaddrs v0.1.0/go.mod h1:33+a/emi5R5dqRspOuZKO0E+Tuz5WV1F84eRWALkedA=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.7 h1:8/CAEZt/+F7kR7GevNHulKkUjLht3CPmn7egmhieNKo=
|
||||
github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
||||
github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
|
@ -176,33 +145,18 @@ github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09
|
|||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.0 h1:Lf+9eD8m5pncvHAOCQj49GSN6aQI8XGfI5OpXNkoWaA=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.0/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/hil v0.0.0-20200423225030-a18a1cd20038 h1:n9J0rwVWXDpNd5iZnwY7w4WZyq53/rROeI7OVvLW8Ok=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
|
||||
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
|
||||
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
|
||||
github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8=
|
||||
github.com/hashicorp/raft v1.5.0 h1:uNs9EfJ4FwiArZRxxfd/dQ5d33nV31/CdCHArH89hT8=
|
||||
github.com/hashicorp/raft v1.5.0/go.mod h1:pKHB2mf/Y25u3AHNSXVRv+yT+WAnmeTX0BwVppVQV+M=
|
||||
github.com/hashicorp/raft-autopilot v0.1.6 h1:C1q3RNF2FfXNZfHWbvVAu0QixaQK8K5pX4O5lh+9z4I=
|
||||
github.com/hashicorp/raft-autopilot v0.1.6/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw=
|
||||
github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87 h1:xixZ2bWeofWV68J+x6AzmKuVM/JWCQwkWm6GW/MUR6I=
|
||||
github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
|
||||
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
|
||||
github.com/itchyny/gojq v0.12.12 h1:x+xGI9BXqKoJQZkr95ibpe3cdrTbY8D9lonrK433rcA=
|
||||
github.com/itchyny/gojq v0.12.12/go.mod h1:j+3sVkjxwd7A7Z5jrbKibgOLn0ZfLWkV+Awxr/pyzJE=
|
||||
github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE=
|
||||
github.com/itchyny/timefmt-go v0.1.5/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8=
|
||||
github.com/jhump/protoreflect v1.11.0 h1:bvACHUD1Ua/3VxY4aAMpItKMhhwbimlKFJKsLsVgDjU=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
|
@ -228,7 +182,6 @@ github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxec
|
|||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
|
@ -248,15 +201,10 @@ github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa1
|
|||
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.14.0 h1:/x0XQ6h+3U3nAyk1yx+bHPURrKa9sVVvYbuqZ7pIAtI=
|
||||
github.com/mitchellh/go-testing-interface v1.14.0/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452 h1:hOY53G+kBFhbYFpRVxHl5eS7laP6B1+Cq+Z9Dry1iMU=
|
||||
github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.1 h1:ZhBBeX8tSlRpu/FFhXH4RC4OJzFlqsQhoHZAz4x7TIw=
|
||||
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
|
||||
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
|
||||
github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
|
||||
|
@ -272,8 +220,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
|
|||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU=
|
||||
github.com/oklog/ulid/v2 v2.1.0/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc4 h1:oOxKUJWnFC4YGHCCMNql1x4YaDfYBTS5Y4x/Cgeo1E0=
|
||||
|
@ -286,7 +232,6 @@ github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
|
|||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
|
@ -297,7 +242,6 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH
|
|||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
|
@ -308,13 +252,11 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:
|
|||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
|
@ -330,41 +272,23 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
|
|||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569 h1:xzABM9let0HLLqFypcxvLmlvEciCHL7+Lv+4vwZqecI=
|
||||
github.com/teris-io/shortid v0.0.0-20220617161101-71ec9f2aa569/go.mod h1:2Ly+NIftZN4de9zRmENdYbvPQeaVIYKWpLFStLFEBgI=
|
||||
github.com/testcontainers/testcontainers-go v0.22.0 h1:hOK4NzNu82VZcKEB1aP9LO1xYssVFMvlfeuDW9JMmV0=
|
||||
github.com/testcontainers/testcontainers-go v0.22.0/go.mod h1:k0YiPa26xJCRUbUkYqy5rY6NGvSbVCeUBXCvucscBR4=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
|
@ -380,8 +304,6 @@ golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMe
|
|||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
|
@ -392,7 +314,6 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
|||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
|
@ -406,7 +327,6 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
|
|||
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
|
||||
|
@ -422,7 +342,6 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -431,10 +350,8 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
|
|||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -455,7 +372,6 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -464,7 +380,6 @@ golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
|
|||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
|
@ -473,13 +388,11 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
@ -487,7 +400,6 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3
|
|||
golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
|
@ -517,14 +429,12 @@ google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGm
|
|||
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package catalog
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
libtopology "github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||
|
||||
"github.com/hashicorp/consul/internal/catalog/catalogtest"
|
||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
var (
|
||||
cli = rtest.ConfigureTestCLIFlags()
|
||||
)
|
||||
|
||||
func TestCatalog(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cluster, _, _ := libtopology.NewCluster(t, &libtopology.ClusterConfig{
|
||||
NumServers: 3,
|
||||
BuildOpts: &libcluster.BuildOptions{Datacenter: "dc1"},
|
||||
Cmd: `-hcl=experiments=["resource-apis"]`,
|
||||
})
|
||||
|
||||
followers, err := cluster.Followers()
|
||||
require.NoError(t, err)
|
||||
client := pbresource.NewResourceServiceClient(followers[0].GetGRPCConn())
|
||||
|
||||
t.Run("one-shot", func(t *testing.T) {
|
||||
catalogtest.RunCatalogV2Beta1IntegrationTest(t, client, cli.ClientOptions(t)...)
|
||||
})
|
||||
|
||||
t.Run("lifecycle", func(t *testing.T) {
|
||||
catalogtest.RunCatalogV2Beta1LifecycleIntegrationTest(t, client, cli.ClientOptions(t)...)
|
||||
})
|
||||
}
|
|
@ -1,555 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package trafficpermissions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
echoPort = 9999
|
||||
tcpPort = 8888
|
||||
staticServerVIP = "240.0.0.1"
|
||||
staticServerReturnValue = "static-server"
|
||||
staticServerIdentity = "static-server-identity"
|
||||
)
|
||||
|
||||
type trafficPermissionsCase struct {
|
||||
tp1 *pbauth.TrafficPermissions
|
||||
tp2 *pbauth.TrafficPermissions
|
||||
client1TCPSuccess bool
|
||||
client1EchoSuccess bool
|
||||
client2TCPSuccess bool
|
||||
client2EchoSuccess bool
|
||||
}
|
||||
|
||||
// We are using tproxy to test traffic permissions now because explicitly specifying destinations
|
||||
// doesn't work when multiple downstreams specify the same destination yet. In the future, we will need
|
||||
// to update this to use explicit destinations once we infer tproxy destinations from traffic permissions.
|
||||
//
|
||||
// This also explicitly uses virtual IPs and virtual ports because Consul DNS doesn't support v2 resources yet.
|
||||
// We should update this to use Consul DNS when it is working.
|
||||
func runTrafficPermissionsTests(t *testing.T, aclsEnabled bool, cases map[string]trafficPermissionsCase) {
|
||||
t.Parallel()
|
||||
cluster, resourceClient := createCluster(t, aclsEnabled)
|
||||
|
||||
serverDataplane := createServerResources(t, resourceClient, cluster, cluster.Agents[1])
|
||||
client1Dataplane := createClientResources(t, resourceClient, cluster, cluster.Agents[2], 1)
|
||||
client2Dataplane := createClientResources(t, resourceClient, cluster, cluster.Agents[3], 2)
|
||||
|
||||
assertDataplaneContainerState(t, client1Dataplane, "running")
|
||||
assertDataplaneContainerState(t, client2Dataplane, "running")
|
||||
assertDataplaneContainerState(t, serverDataplane, "running")
|
||||
|
||||
for n, tc := range cases {
|
||||
t.Run(n, func(t *testing.T) {
|
||||
storeStaticServerTrafficPermissions(t, resourceClient, tc.tp1, 1)
|
||||
storeStaticServerTrafficPermissions(t, resourceClient, tc.tp2, 2)
|
||||
|
||||
// We must establish a new TCP connection each time because TCP traffic permissions are
|
||||
// enforced at the connection level.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
assertPassing(r, httpRequestToVirtualAddress, client1Dataplane, tc.client1TCPSuccess)
|
||||
assertPassing(r, echoToVirtualAddress, client1Dataplane, tc.client1EchoSuccess)
|
||||
assertPassing(r, httpRequestToVirtualAddress, client2Dataplane, tc.client2TCPSuccess)
|
||||
assertPassing(r, echoToVirtualAddress, client2Dataplane, tc.client2EchoSuccess)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrafficPermission_TCP_DefaultDeny(t *testing.T) {
|
||||
cases := map[string]trafficPermissionsCase{
|
||||
"default deny": {
|
||||
tp1: nil,
|
||||
client1TCPSuccess: false,
|
||||
client1EchoSuccess: false,
|
||||
client2TCPSuccess: false,
|
||||
client2EchoSuccess: false,
|
||||
},
|
||||
"allow everything": {
|
||||
tp1: &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: staticServerIdentity,
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
Permissions: []*pbauth.Permission{
|
||||
{
|
||||
Sources: []*pbauth.Source{
|
||||
{
|
||||
// IdentityName: "static-client-1-identity",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Peer: "local",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
client1TCPSuccess: true,
|
||||
client1EchoSuccess: true,
|
||||
client2TCPSuccess: true,
|
||||
client2EchoSuccess: true,
|
||||
},
|
||||
"allow tcp": {
|
||||
tp1: &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: staticServerIdentity,
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
Permissions: []*pbauth.Permission{
|
||||
{
|
||||
Sources: []*pbauth.Source{
|
||||
{
|
||||
// IdentityName: "static-client-1-identity",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Peer: "local",
|
||||
},
|
||||
},
|
||||
DestinationRules: []*pbauth.DestinationRule{
|
||||
{
|
||||
PortNames: []string{"tcp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
client1TCPSuccess: true,
|
||||
client1EchoSuccess: false,
|
||||
client2TCPSuccess: true,
|
||||
client2EchoSuccess: false,
|
||||
},
|
||||
"client 1 only": {
|
||||
tp1: &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: staticServerIdentity,
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
Permissions: []*pbauth.Permission{
|
||||
{
|
||||
Sources: []*pbauth.Source{
|
||||
{
|
||||
IdentityName: "static-client-1-identity",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Peer: "local",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
client1TCPSuccess: true,
|
||||
client1EchoSuccess: true,
|
||||
client2TCPSuccess: false,
|
||||
client2EchoSuccess: false,
|
||||
},
|
||||
"allow all exclude client 1": {
|
||||
tp1: &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: staticServerIdentity,
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
Permissions: []*pbauth.Permission{
|
||||
{
|
||||
Sources: []*pbauth.Source{
|
||||
{
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Peer: "local",
|
||||
Exclude: []*pbauth.ExcludeSource{
|
||||
{
|
||||
IdentityName: "static-client-1-identity",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Peer: "local",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
client1TCPSuccess: false,
|
||||
client1EchoSuccess: false,
|
||||
client2TCPSuccess: true,
|
||||
client2EchoSuccess: true,
|
||||
},
|
||||
}
|
||||
|
||||
runTrafficPermissionsTests(t, true, cases)
|
||||
}
|
||||
|
||||
func TestTrafficPermission_TCP_DefaultAllow(t *testing.T) {
|
||||
cases := map[string]trafficPermissionsCase{
|
||||
"default allow": {
|
||||
tp1: nil,
|
||||
client1TCPSuccess: true,
|
||||
client1EchoSuccess: true,
|
||||
client2TCPSuccess: true,
|
||||
client2EchoSuccess: true,
|
||||
},
|
||||
"empty allow denies everything": {
|
||||
tp1: &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: staticServerIdentity,
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
},
|
||||
client1TCPSuccess: false,
|
||||
client1EchoSuccess: false,
|
||||
client2TCPSuccess: false,
|
||||
client2EchoSuccess: false,
|
||||
},
|
||||
"allow everything": {
|
||||
tp1: &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: staticServerIdentity,
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
Permissions: []*pbauth.Permission{
|
||||
{
|
||||
Sources: []*pbauth.Source{
|
||||
{
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Peer: "local",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
client1TCPSuccess: true,
|
||||
client1EchoSuccess: true,
|
||||
client2TCPSuccess: true,
|
||||
client2EchoSuccess: true,
|
||||
},
|
||||
"allow one protocol denies the other protocol": {
|
||||
tp1: &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: staticServerIdentity,
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
Permissions: []*pbauth.Permission{
|
||||
{
|
||||
Sources: []*pbauth.Source{
|
||||
{
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Peer: "local",
|
||||
},
|
||||
},
|
||||
DestinationRules: []*pbauth.DestinationRule{
|
||||
{
|
||||
PortNames: []string{"tcp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
client1TCPSuccess: true,
|
||||
client1EchoSuccess: false,
|
||||
client2TCPSuccess: true,
|
||||
client2EchoSuccess: false,
|
||||
},
|
||||
"allow something unrelated": {
|
||||
tp1: &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: staticServerIdentity,
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
Permissions: []*pbauth.Permission{
|
||||
{
|
||||
Sources: []*pbauth.Source{
|
||||
{
|
||||
IdentityName: "something-else",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
Peer: "local",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
client1TCPSuccess: false,
|
||||
client1EchoSuccess: false,
|
||||
client2TCPSuccess: false,
|
||||
client2EchoSuccess: false,
|
||||
},
|
||||
}
|
||||
|
||||
runTrafficPermissionsTests(t, false, cases)
|
||||
}
|
||||
|
||||
func createServiceAndDataplane(t *testing.T, node libcluster.Agent, cluster *libcluster.Cluster, proxyID, serviceName string, httpPort, grpcPort int, serviceBindPorts []int) (*libcluster.ConsulDataplaneContainer, error) {
|
||||
leader, err := cluster.Leader()
|
||||
require.NoError(t, err)
|
||||
leaderIP := leader.GetIP()
|
||||
|
||||
token := cluster.TokenBootstrap
|
||||
|
||||
// Do some trickery to ensure that partial completion is correctly torn
|
||||
// down, but successful execution is not.
|
||||
var deferClean utils.ResettableDefer
|
||||
defer deferClean.Execute()
|
||||
|
||||
// Create a service and proxy instance
|
||||
svc, err := libservice.NewExampleService(context.Background(), serviceName, httpPort, grpcPort, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deferClean.Add(func() {
|
||||
_ = svc.Terminate()
|
||||
})
|
||||
|
||||
// Create Consul Dataplane
|
||||
dp, err := libcluster.NewConsulDataplane(context.Background(), proxyID, leaderIP, 8502, serviceBindPorts, node, true, token)
|
||||
require.NoError(t, err)
|
||||
deferClean.Add(func() {
|
||||
_ = dp.Terminate()
|
||||
})
|
||||
|
||||
// disable cleanup functions now that we have an object with a Terminate() function
|
||||
deferClean.Reset()
|
||||
|
||||
return dp, nil
|
||||
}
|
||||
|
||||
func storeStaticServerTrafficPermissions(t *testing.T, resourceClient *rtest.Client, tp *pbauth.TrafficPermissions, i int) {
|
||||
id := &pbresource.ID{
|
||||
Name: fmt.Sprintf("static-server-tp-%d", i),
|
||||
Type: pbauth.TrafficPermissionsType,
|
||||
}
|
||||
if tp == nil {
|
||||
resourceClient.Delete(resourceClient.Context(t), &pbresource.DeleteRequest{
|
||||
Id: id,
|
||||
})
|
||||
} else {
|
||||
rtest.ResourceID(id).
|
||||
WithData(t, tp).
|
||||
Write(t, resourceClient)
|
||||
}
|
||||
}
|
||||
|
||||
func createServerResources(t *testing.T, resourceClient *rtest.Client, cluster *libcluster.Cluster, node libcluster.Agent) *libcluster.ConsulDataplaneContainer {
|
||||
rtest.ResourceID(&pbresource.ID{
|
||||
Name: "static-server-service",
|
||||
Type: pbcatalog.ServiceType,
|
||||
}).
|
||||
WithData(t, &pbcatalog.Service{
|
||||
Workloads: &pbcatalog.WorkloadSelector{Prefixes: []string{"static-server"}},
|
||||
Ports: []*pbcatalog.ServicePort{
|
||||
{
|
||||
TargetPort: "tcp",
|
||||
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
|
||||
VirtualPort: 8888,
|
||||
},
|
||||
{
|
||||
TargetPort: "echo",
|
||||
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
|
||||
VirtualPort: 9999,
|
||||
},
|
||||
{TargetPort: "mesh", Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
VirtualIps: []string{"240.0.0.1"},
|
||||
}).Write(t, resourceClient)
|
||||
|
||||
workloadPortMap := map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp": {
|
||||
Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
|
||||
},
|
||||
"echo": {
|
||||
Port: 8078, Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
|
||||
},
|
||||
"mesh": {
|
||||
Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH,
|
||||
},
|
||||
}
|
||||
|
||||
rtest.ResourceID(&pbresource.ID{
|
||||
Name: "static-server-workload",
|
||||
Type: pbcatalog.WorkloadType,
|
||||
}).
|
||||
WithData(t, &pbcatalog.Workload{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{
|
||||
{Host: node.GetIP()},
|
||||
},
|
||||
Ports: workloadPortMap,
|
||||
Identity: staticServerIdentity,
|
||||
}).
|
||||
Write(t, resourceClient)
|
||||
|
||||
rtest.ResourceID(&pbresource.ID{
|
||||
Name: staticServerIdentity,
|
||||
Type: pbauth.WorkloadIdentityType,
|
||||
}).
|
||||
Write(t, resourceClient)
|
||||
|
||||
serverDataplane, err := createServiceAndDataplane(t, node, cluster, "static-server-workload", "static-server", 8080, 8079, []int{})
|
||||
require.NoError(t, err)
|
||||
|
||||
return serverDataplane
|
||||
}
|
||||
|
||||
func createClientResources(t *testing.T, resourceClient *rtest.Client, cluster *libcluster.Cluster, node libcluster.Agent, idx int) *libcluster.ConsulDataplaneContainer {
|
||||
prefix := fmt.Sprintf("static-client-%d", idx)
|
||||
rtest.ResourceID(&pbresource.ID{
|
||||
Name: prefix + "-service",
|
||||
Type: pbcatalog.ServiceType,
|
||||
}).
|
||||
WithData(t, &pbcatalog.Service{
|
||||
Workloads: &pbcatalog.WorkloadSelector{Prefixes: []string{prefix}},
|
||||
Ports: []*pbcatalog.ServicePort{
|
||||
{TargetPort: "tcp", Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
{TargetPort: "mesh", Protocol: pbcatalog.Protocol_PROTOCOL_MESH},
|
||||
},
|
||||
}).Write(t, resourceClient)
|
||||
|
||||
workloadPortMap := map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp": {
|
||||
Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
|
||||
},
|
||||
"mesh": {
|
||||
Port: 20000, Protocol: pbcatalog.Protocol_PROTOCOL_MESH,
|
||||
},
|
||||
}
|
||||
|
||||
rtest.ResourceID(&pbresource.ID{
|
||||
Name: prefix + "-workload",
|
||||
Type: pbcatalog.WorkloadType,
|
||||
}).
|
||||
WithData(t, &pbcatalog.Workload{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{
|
||||
{Host: node.GetIP()},
|
||||
},
|
||||
Ports: workloadPortMap,
|
||||
Identity: prefix + "-identity",
|
||||
}).
|
||||
Write(t, resourceClient)
|
||||
|
||||
rtest.ResourceID(&pbresource.ID{
|
||||
Name: prefix + "-identity",
|
||||
Type: pbauth.WorkloadIdentityType,
|
||||
}).
|
||||
Write(t, resourceClient)
|
||||
|
||||
rtest.ResourceID(&pbresource.ID{
|
||||
Name: prefix + "-proxy-configuration",
|
||||
Type: pbmesh.ProxyConfigurationType,
|
||||
}).
|
||||
WithData(t, &pbmesh.ProxyConfiguration{
|
||||
Workloads: &pbcatalog.WorkloadSelector{
|
||||
Prefixes: []string{"static-client"},
|
||||
},
|
||||
DynamicConfig: &pbmesh.DynamicConfig{
|
||||
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
|
||||
},
|
||||
}).
|
||||
Write(t, resourceClient)
|
||||
|
||||
dp, err := createServiceAndDataplane(t, node, cluster, fmt.Sprintf("static-client-%d-workload", idx), "static-client", 8080, 8079, []int{})
|
||||
require.NoError(t, err)
|
||||
|
||||
return dp
|
||||
}
|
||||
|
||||
func createCluster(t *testing.T, aclsEnabled bool) (*libcluster.Cluster, *rtest.Client) {
|
||||
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
NumClients: 3,
|
||||
BuildOpts: &libcluster.BuildOptions{
|
||||
Datacenter: "dc1",
|
||||
InjectAutoEncryption: true,
|
||||
InjectGossipEncryption: true,
|
||||
AllowHTTPAnyway: true,
|
||||
ACLEnabled: aclsEnabled,
|
||||
},
|
||||
Cmd: `-hcl=experiments=["resource-apis"] log_level="TRACE"`,
|
||||
})
|
||||
|
||||
leader, err := cluster.Leader()
|
||||
require.NoError(t, err)
|
||||
client := pbresource.NewResourceServiceClient(leader.GetGRPCConn())
|
||||
resourceClient := rtest.NewClientWithACLToken(client, cluster.TokenBootstrap)
|
||||
|
||||
return cluster, resourceClient
|
||||
}
|
||||
|
||||
// assertDataplaneContainerState validates service container status
|
||||
func assertDataplaneContainerState(t *testing.T, dataplane *libcluster.ConsulDataplaneContainer, state string) {
|
||||
containerStatus, err := dataplane.GetStatus()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, containerStatus, state, fmt.Sprintf("Expected: %s. Got %s", state, containerStatus))
|
||||
}
|
||||
|
||||
func httpRequestToVirtualAddress(dp *libcluster.ConsulDataplaneContainer) (string, error) {
|
||||
addr := fmt.Sprintf("%s:%d", staticServerVIP, tcpPort)
|
||||
|
||||
out, err := dp.Exec(
|
||||
context.Background(),
|
||||
[]string{"sudo", "sh", "-c", fmt.Sprintf(`
|
||||
set -e
|
||||
curl -s "%s/debug?env=dump"
|
||||
`, addr),
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("curl request to upstream virtual address %q\nerr = %v\nout = %s\nservice=%s", addr, err, out, dp.GetServiceName())
|
||||
}
|
||||
|
||||
expected := fmt.Sprintf("FORTIO_NAME=%s", staticServerReturnValue)
|
||||
if !strings.Contains(out, expected) {
|
||||
return out, fmt.Errorf("expected %q to contain %q", out, expected)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func echoToVirtualAddress(dp *libcluster.ConsulDataplaneContainer) (string, error) {
|
||||
out, err := dp.Exec(
|
||||
context.Background(),
|
||||
[]string{"sudo", "sh", "-c", fmt.Sprintf(`
|
||||
set -e
|
||||
echo foo | nc %s %d
|
||||
`, staticServerVIP, echoPort),
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("nc request to upstream virtual address %s:%d\nerr = %v\nout = %s\nservice=%s", staticServerVIP, echoPort, err, out, dp.GetServiceName())
|
||||
}
|
||||
|
||||
if !strings.Contains(out, "foo") {
|
||||
return out, fmt.Errorf("expected %q to contain 'foo'", out)
|
||||
}
|
||||
|
||||
return out, err
|
||||
}
|
||||
|
||||
func assertPassing(t *retry.R, fn func(*libcluster.ConsulDataplaneContainer) (string, error), dp *libcluster.ConsulDataplaneContainer, success bool) {
|
||||
_, err := fn(dp)
|
||||
if success {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
}
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package catalog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
|
||||
"github.com/hashicorp/consul/internal/catalog/catalogtest"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
)
|
||||
|
||||
var minCatalogResourceVersion = version.Must(version.NewVersion("v1.18.0"))
|
||||
|
||||
const (
|
||||
versionUndetermined = `
|
||||
Cannot determine the actual version the starting image represents.
|
||||
Scrutinze test failures to ensure that the starting version should
|
||||
actually be able to be used for creating the initial data set.
|
||||
`
|
||||
)
|
||||
|
||||
func maybeSkipUpgradeTest(t *testing.T, minVersion *version.Version) {
|
||||
t.Helper()
|
||||
|
||||
image := utils.DockerImage(utils.GetLatestImageName(), utils.LatestVersion)
|
||||
latestVersion, err := utils.DockerImageVersion(image)
|
||||
|
||||
if latestVersion != nil && latestVersion.LessThan(minVersion) {
|
||||
t.Skipf("Upgrade test isn't applicable with version %q as the starting version", latestVersion.String())
|
||||
}
|
||||
|
||||
if err != nil || latestVersion == nil {
|
||||
t.Log(versionUndetermined)
|
||||
}
|
||||
}
|
||||
|
||||
// Test upgrade a cluster of latest version to the target version and ensure that the catalog still
|
||||
// functions properly. Note
|
||||
func TestCatalogUpgrade(t *testing.T) {
|
||||
maybeSkipUpgradeTest(t, minCatalogResourceVersion)
|
||||
t.Parallel()
|
||||
|
||||
const numServers = 1
|
||||
buildOpts := &libcluster.BuildOptions{
|
||||
ConsulImageName: utils.GetLatestImageName(),
|
||||
ConsulVersion: utils.LatestVersion,
|
||||
Datacenter: "dc1",
|
||||
InjectAutoEncryption: true,
|
||||
}
|
||||
|
||||
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
|
||||
NumServers: 1,
|
||||
BuildOpts: buildOpts,
|
||||
ApplyDefaultProxySettings: false,
|
||||
Cmd: `-hcl=experiments=["resource-apis"]`,
|
||||
})
|
||||
|
||||
client := cluster.APIClient(0)
|
||||
|
||||
libcluster.WaitForLeader(t, cluster, client)
|
||||
libcluster.WaitForMembers(t, client, numServers)
|
||||
|
||||
leader, err := cluster.Leader()
|
||||
require.NoError(t, err)
|
||||
rscClient := pbresource.NewResourceServiceClient(leader.GetGRPCConn())
|
||||
|
||||
// Initialize some data
|
||||
catalogtest.PublishCatalogV2Beta1IntegrationTestData(t, rscClient)
|
||||
|
||||
// upgrade the cluster to the Target version
|
||||
t.Logf("initiating standard upgrade to version=%q", utils.TargetVersion)
|
||||
err = cluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), utils.TargetVersion)
|
||||
|
||||
require.NoError(t, err)
|
||||
libcluster.WaitForLeader(t, cluster, client)
|
||||
libcluster.WaitForMembers(t, client, numServers)
|
||||
|
||||
catalogtest.VerifyCatalogV2Beta1IntegrationTestResults(t, rscClient)
|
||||
}
|
Loading…
Reference in New Issue