testing/deployer: rename various terms to better align with v2 and avoid confusion (#19600)

Conceptually renaming the following topology terms to avoid confusion with v2 and to better align with it:

- ServiceID -> ID
- Service -> Workload
- Upstream -> Destination
This commit is contained in:
R.B. Boyer 2023-11-10 13:22:06 -06:00 committed by GitHub
parent 68e7f27fd2
commit b2979f6edf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
41 changed files with 1019 additions and 945 deletions

View File

@ -65,12 +65,11 @@ These are comprised of 4 main parts:
- **Nodes**: A "box with ip address(es)". This should feel a bit like a VM or
a Kubernetes Pod as an enclosing entity.
- **Services/Workloads**: The list of service instances (v1) or workloads
(v2) that will execute on the given node. v2
Services will be implied by similarly named
workloads here unless opted out. This helps
define a v1-compatible topology and repurpose it
for v2 without reworking it.
- **Workloads**: The list of service instances (v1) or workloads
(v2) that will execute on the given node. v2 Services will
be implied by similarly named workloads here unless opted
out. This helps define a v1-compatible topology and
repurpose it for v2 without reworking it.
- **Services** (v2): v2 Service definitions to define explicitly, in addition
to the inferred ones.
@ -90,7 +89,7 @@ These are comprised of 4 main parts:
- **Peerings**: The peering relationships between Clusters to establish.
In the [topoutil](./topoutil) package there are some helpers for defining
common sets of nodes or services like Consul Servers, Mesh Gateways, or [fortio
common sets of nodes or workloads like Consul Servers, Mesh Gateways, or [fortio
servers](https://github.com/fortio/fortio)
#### Useful topology concepts

View File

@ -47,41 +47,41 @@ func TestSplitterFeaturesL7ExplicitDestinations(t *testing.T) {
for _, ship := range ships {
t.Run("relationship: "+ship.String(), func(t *testing.T) {
var (
svc = ship.Caller
u = ship.Upstream
wrk = ship.Caller
dest = ship.Destination
)
v1ID := u.ID
v1ID := dest.ID
v1ID.Name = "static-server-v1"
v1ClusterPrefix := clusterPrefix(u.PortName, v1ID, u.Cluster)
v1ClusterPrefix := clusterPrefix(dest.PortName, v1ID, dest.Cluster)
v2ID := u.ID
v2ID := dest.ID
v2ID.Name = "static-server-v2"
v2ClusterPrefix := clusterPrefix(u.PortName, v2ID, u.Cluster)
v2ClusterPrefix := clusterPrefix(dest.PortName, v2ID, dest.Cluster)
// we expect 2 clusters, one for each leg of the split
asserter.UpstreamEndpointStatus(t, svc, v1ClusterPrefix+".", "HEALTHY", 1)
asserter.UpstreamEndpointStatus(t, svc, v2ClusterPrefix+".", "HEALTHY", 1)
asserter.DestinationEndpointStatus(t, wrk, v1ClusterPrefix+".", "HEALTHY", 1)
asserter.DestinationEndpointStatus(t, wrk, v2ClusterPrefix+".", "HEALTHY", 1)
// Both should be possible.
v1Expect := fmt.Sprintf("%s::%s", cluster.Name, v1ID.String())
v2Expect := fmt.Sprintf("%s::%s", cluster.Name, v2ID.String())
switch u.PortName {
switch dest.PortName {
case "tcp":
asserter.CheckBlankspaceNameTrafficSplitViaTCP(t, svc, u,
asserter.CheckBlankspaceNameTrafficSplitViaTCP(t, wrk, dest,
map[string]int{v1Expect: 10, v2Expect: 90})
case "grpc":
asserter.CheckBlankspaceNameTrafficSplitViaGRPC(t, svc, u,
asserter.CheckBlankspaceNameTrafficSplitViaGRPC(t, wrk, dest,
map[string]int{v1Expect: 10, v2Expect: 90})
case "http":
asserter.CheckBlankspaceNameTrafficSplitViaHTTP(t, svc, u, false, "/",
asserter.CheckBlankspaceNameTrafficSplitViaHTTP(t, wrk, dest, false, "/",
map[string]int{v1Expect: 10, v2Expect: 90})
case "http2":
asserter.CheckBlankspaceNameTrafficSplitViaHTTP(t, svc, u, true, "/",
asserter.CheckBlankspaceNameTrafficSplitViaHTTP(t, wrk, dest, true, "/",
map[string]int{v1Expect: 10, v2Expect: 90})
default:
t.Fatalf("unexpected port name: %s", u.PortName)
t.Fatalf("unexpected port name: %s", dest.PortName)
}
})
}
@ -134,8 +134,8 @@ func (c testSplitterFeaturesL7ExplicitDestinationsCreator) topologyConfigAddNode
) {
clusterName := cluster.Name
newServiceID := func(name string) topology.ServiceID {
return topology.ServiceID{
newID := func(name string) topology.ID {
return topology.ID{
Partition: partition,
Namespace: namespace,
Name: name,
@ -153,16 +153,16 @@ func (c testSplitterFeaturesL7ExplicitDestinationsCreator) topologyConfigAddNode
Version: topology.NodeVersionV2,
Partition: partition,
Name: nodeName(),
Services: []*topology.Service{
topoutil.NewBlankspaceServiceWithDefaults(
Workloads: []*topology.Workload{
topoutil.NewBlankspaceWorkloadWithDefaults(
clusterName,
newServiceID("static-server-v1"),
newID("static-server-v1"),
topology.NodeVersionV2,
func(svc *topology.Service) {
svc.Meta = map[string]string{
func(wrk *topology.Workload) {
wrk.Meta = map[string]string{
"version": "v1",
}
svc.WorkloadIdentity = "static-server-v1"
wrk.WorkloadIdentity = "static-server-v1"
},
),
},
@ -172,16 +172,16 @@ func (c testSplitterFeaturesL7ExplicitDestinationsCreator) topologyConfigAddNode
Version: topology.NodeVersionV2,
Partition: partition,
Name: nodeName(),
Services: []*topology.Service{
topoutil.NewBlankspaceServiceWithDefaults(
Workloads: []*topology.Workload{
topoutil.NewBlankspaceWorkloadWithDefaults(
clusterName,
newServiceID("static-server-v2"),
newID("static-server-v2"),
topology.NodeVersionV2,
func(svc *topology.Service) {
svc.Meta = map[string]string{
func(wrk *topology.Workload) {
wrk.Meta = map[string]string{
"version": "v2",
}
svc.WorkloadIdentity = "static-server-v2"
wrk.WorkloadIdentity = "static-server-v2"
},
),
},
@ -191,33 +191,33 @@ func (c testSplitterFeaturesL7ExplicitDestinationsCreator) topologyConfigAddNode
Version: topology.NodeVersionV2,
Partition: partition,
Name: nodeName(),
Services: []*topology.Service{
topoutil.NewBlankspaceServiceWithDefaults(
Workloads: []*topology.Workload{
topoutil.NewBlankspaceWorkloadWithDefaults(
clusterName,
newServiceID("static-client"),
newID("static-client"),
topology.NodeVersionV2,
func(svc *topology.Service) {
svc.Upstreams = []*topology.Upstream{
func(wrk *topology.Workload) {
wrk.Destinations = []*topology.Destination{
{
ID: newServiceID("static-server"),
ID: newID("static-server"),
PortName: "http",
LocalAddress: "0.0.0.0", // needed for an assertion
LocalPort: 5000,
},
{
ID: newServiceID("static-server"),
ID: newID("static-server"),
PortName: "http2",
LocalAddress: "0.0.0.0", // needed for an assertion
LocalPort: 5001,
},
{
ID: newServiceID("static-server"),
ID: newID("static-server"),
PortName: "grpc",
LocalAddress: "0.0.0.0", // needed for an assertion
LocalPort: 5002,
},
{
ID: newServiceID("static-server"),
ID: newID("static-server"),
PortName: "tcp",
LocalAddress: "0.0.0.0", // needed for an assertion
LocalPort: 5003,

View File

@ -68,15 +68,15 @@ func TestBasicL4ExplicitDestinations(t *testing.T) {
for _, ship := range ships {
t.Run("relationship: "+ship.String(), func(t *testing.T) {
var (
svc = ship.Caller
u = ship.Upstream
wrk = ship.Caller
dest = ship.Destination
)
clusterPrefix := clusterPrefixForUpstream(u)
clusterPrefix := clusterPrefixForDestination(dest)
asserter.UpstreamEndpointStatus(t, svc, clusterPrefix+".", "HEALTHY", 1)
asserter.HTTPServiceEchoes(t, svc, u.LocalPort, "")
asserter.FortioFetch2FortioName(t, svc, u, cluster.Name, u.ID)
asserter.DestinationEndpointStatus(t, wrk, clusterPrefix+".", "HEALTHY", 1)
asserter.HTTPServiceEchoes(t, wrk, dest.LocalPort, "")
asserter.FortioFetch2FortioName(t, wrk, dest, cluster.Name, dest.ID)
})
}
}
@ -128,8 +128,8 @@ func (c testBasicL4ExplicitDestinationsCreator) topologyConfigAddNodes(
) {
clusterName := cluster.Name
newServiceID := func(name string) topology.ServiceID {
return topology.ServiceID{
newID := func(name string) topology.ID {
return topology.ID{
Partition: partition,
Namespace: namespace,
Name: name,
@ -147,10 +147,10 @@ func (c testBasicL4ExplicitDestinationsCreator) topologyConfigAddNodes(
Version: topology.NodeVersionV2,
Partition: partition,
Name: nodeName(),
Services: []*topology.Service{
topoutil.NewFortioServiceWithDefaults(
Workloads: []*topology.Workload{
topoutil.NewFortioWorkloadWithDefaults(
clusterName,
newServiceID("single-server"),
newID("single-server"),
topology.NodeVersionV2,
nil,
),
@ -161,16 +161,16 @@ func (c testBasicL4ExplicitDestinationsCreator) topologyConfigAddNodes(
Version: topology.NodeVersionV2,
Partition: partition,
Name: nodeName(),
Services: []*topology.Service{
topoutil.NewFortioServiceWithDefaults(
Workloads: []*topology.Workload{
topoutil.NewFortioWorkloadWithDefaults(
clusterName,
newServiceID("single-client"),
newID("single-client"),
topology.NodeVersionV2,
func(svc *topology.Service) {
delete(svc.Ports, "grpc") // v2 mode turns this on, so turn it off
delete(svc.Ports, "http2") // v2 mode turns this on, so turn it off
svc.Upstreams = []*topology.Upstream{{
ID: newServiceID("single-server"),
func(wrk *topology.Workload) {
delete(wrk.Ports, "grpc") // v2 mode turns this on, so turn it off
delete(wrk.Ports, "http2") // v2 mode turns this on, so turn it off
wrk.Destinations = []*topology.Destination{{
ID: newID("single-server"),
PortName: "http",
LocalAddress: "0.0.0.0", // needed for an assertion
LocalPort: 5000,
@ -203,10 +203,10 @@ func (c testBasicL4ExplicitDestinationsCreator) topologyConfigAddNodes(
Version: topology.NodeVersionV2,
Partition: partition,
Name: nodeName(),
Services: []*topology.Service{
topoutil.NewFortioServiceWithDefaults(
Workloads: []*topology.Workload{
topoutil.NewFortioWorkloadWithDefaults(
clusterName,
newServiceID("multi-server"),
newID("multi-server"),
topology.NodeVersionV2,
nil,
),
@ -217,21 +217,21 @@ func (c testBasicL4ExplicitDestinationsCreator) topologyConfigAddNodes(
Version: topology.NodeVersionV2,
Partition: partition,
Name: nodeName(),
Services: []*topology.Service{
topoutil.NewFortioServiceWithDefaults(
Workloads: []*topology.Workload{
topoutil.NewFortioWorkloadWithDefaults(
clusterName,
newServiceID("multi-client"),
newID("multi-client"),
topology.NodeVersionV2,
func(svc *topology.Service) {
svc.Upstreams = []*topology.Upstream{
func(wrk *topology.Workload) {
wrk.Destinations = []*topology.Destination{
{
ID: newServiceID("multi-server"),
ID: newID("multi-server"),
PortName: "http",
LocalAddress: "0.0.0.0", // needed for an assertion
LocalPort: 5000,
},
{
ID: newServiceID("multi-server"),
ID: newID("multi-server"),
PortName: "http2",
LocalAddress: "0.0.0.0", // needed for an assertion
LocalPort: 5001,

View File

@ -9,15 +9,20 @@ import (
"github.com/hashicorp/consul/testing/deployer/topology"
)
func clusterPrefixForUpstream(u *topology.Upstream) string {
if u.Peer == "" {
return clusterPrefix(u.PortName, u.ID, u.Cluster)
// Deprecated: clusterPrefixForDestination
func clusterPrefixForUpstream(dest *topology.Destination) string {
return clusterPrefixForDestination(dest)
}
func clusterPrefixForDestination(dest *topology.Destination) string {
if dest.Peer == "" {
return clusterPrefix(dest.PortName, dest.ID, dest.Cluster)
} else {
return strings.Join([]string{u.ID.Name, u.ID.Namespace, u.Peer, "external"}, ".")
return strings.Join([]string{dest.ID.Name, dest.ID.Namespace, dest.Peer, "external"}, ".")
}
}
func clusterPrefix(port string, svcID topology.ServiceID, cluster string) string {
func clusterPrefix(port string, svcID topology.ID, cluster string) string {
if svcID.PartitionOrDefault() == "default" {
return strings.Join([]string{port, svcID.Name, svcID.Namespace, cluster, "internal"}, ".")
} else {

View File

@ -66,17 +66,17 @@ func TestBasicL4ImplicitDestinations(t *testing.T) {
for _, ship := range ships {
t.Run("relationship: "+ship.String(), func(t *testing.T) {
var (
svc = ship.Caller
u = ship.Upstream
wrk = ship.Caller
dest = ship.Destination
)
clusterPrefix := clusterPrefixForUpstream(u)
clusterPrefix := clusterPrefixForDestination(dest)
asserter.UpstreamEndpointStatus(t, svc, clusterPrefix+".", "HEALTHY", 1)
if u.LocalPort > 0 {
asserter.HTTPServiceEchoes(t, svc, u.LocalPort, "")
asserter.DestinationEndpointStatus(t, wrk, clusterPrefix+".", "HEALTHY", 1)
if dest.LocalPort > 0 {
asserter.HTTPServiceEchoes(t, wrk, dest.LocalPort, "")
}
asserter.FortioFetch2FortioName(t, svc, u, cluster.Name, u.ID)
asserter.FortioFetch2FortioName(t, wrk, dest, cluster.Name, dest.ID)
})
}
}
@ -128,8 +128,8 @@ func (c testBasicL4ImplicitDestinationsCreator) topologyConfigAddNodes(
) {
clusterName := cluster.Name
newServiceID := func(name string) topology.ServiceID {
return topology.ServiceID{
newID := func(name string) topology.ID {
return topology.ID{
Partition: partition,
Namespace: namespace,
Name: name,
@ -147,13 +147,13 @@ func (c testBasicL4ImplicitDestinationsCreator) topologyConfigAddNodes(
Version: topology.NodeVersionV2,
Partition: partition,
Name: nodeName(),
Services: []*topology.Service{
topoutil.NewFortioServiceWithDefaults(
Workloads: []*topology.Workload{
topoutil.NewFortioWorkloadWithDefaults(
clusterName,
newServiceID("static-server"),
newID("static-server"),
topology.NodeVersionV2,
func(svc *topology.Service) {
svc.EnableTransparentProxy = true
func(wrk *topology.Workload) {
wrk.EnableTransparentProxy = true
},
),
},
@ -163,20 +163,20 @@ func (c testBasicL4ImplicitDestinationsCreator) topologyConfigAddNodes(
Version: topology.NodeVersionV2,
Partition: partition,
Name: nodeName(),
Services: []*topology.Service{
topoutil.NewFortioServiceWithDefaults(
Workloads: []*topology.Workload{
topoutil.NewFortioWorkloadWithDefaults(
clusterName,
newServiceID("static-client"),
newID("static-client"),
topology.NodeVersionV2,
func(svc *topology.Service) {
svc.EnableTransparentProxy = true
svc.ImpliedUpstreams = []*topology.Upstream{
func(wrk *topology.Workload) {
wrk.EnableTransparentProxy = true
wrk.ImpliedDestinations = []*topology.Destination{
{
ID: newServiceID("static-server"),
ID: newID("static-server"),
PortName: "http",
},
{
ID: newServiceID("static-server"),
ID: newID("static-server"),
PortName: "http2",
},
}

View File

@ -6,12 +6,11 @@ package connect
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/test-integ/topoutil"
)
@ -28,13 +27,13 @@ import (
// 1. The test spins up a one-server cluster with static-server and static-client.
// 2. A snapshot is taken and the cluster is restored from the snapshot
// 3. A new static-server replaces the old one
// 4. At the end, we assert the static-client's upstream is updated with the
// 4. At the end, we assert the static-client's destination is updated with the
// new static-server
func Test_Snapshot_Restore_Agentless(t *testing.T) {
t.Parallel()
staticServerSID := topology.NewServiceID("static-server", "default", "default")
staticClientSID := topology.NewServiceID("static-client", "default", "default")
staticServerSID := topology.NewID("static-server", "default", "default")
staticClientSID := topology.NewID("static-client", "default", "default")
clu := &topology.Config{
Images: utils.TargetImages(),
@ -59,7 +58,7 @@ func Test_Snapshot_Restore_Agentless(t *testing.T) {
{
Kind: topology.NodeKindDataplane,
Name: "dc1-client1",
Services: []*topology.Service{
Workloads: []*topology.Workload{
{
ID: staticServerSID,
Image: "docker.mirror.hashicorp.services/fortio/fortio",
@ -77,7 +76,7 @@ func Test_Snapshot_Restore_Agentless(t *testing.T) {
{
Kind: topology.NodeKindDataplane,
Name: "dc1-client2",
Services: []*topology.Service{
Workloads: []*topology.Workload{
{
ID: staticClientSID,
Image: "docker.mirror.hashicorp.services/fortio/fortio",
@ -89,7 +88,7 @@ func Test_Snapshot_Restore_Agentless(t *testing.T) {
"-http-port", "8080",
"-redirect-port", "-disabled",
},
Upstreams: []*topology.Upstream{
Destinations: []*topology.Destination{
{
ID: staticServerSID,
LocalPort: 5000,
@ -103,7 +102,7 @@ func Test_Snapshot_Restore_Agentless(t *testing.T) {
Kind: topology.NodeKindDataplane,
Name: "dc1-client3",
Disabled: true,
Services: []*topology.Service{
Workloads: []*topology.Workload{
{
ID: staticServerSID,
Image: "docker.mirror.hashicorp.services/fortio/fortio",
@ -149,15 +148,15 @@ func Test_Snapshot_Restore_Agentless(t *testing.T) {
sp := sprawltest.Launch(t, clu)
asserter := topoutil.NewAsserter(sp)
staticClient := sp.Topology().Clusters["dc1"].ServiceByID(
staticClient := sp.Topology().Clusters["dc1"].WorkloadByID(
topology.NewNodeID("dc1-client2", "default"),
staticClientSID,
)
asserter.FortioFetch2HeaderEcho(t, staticClient, &topology.Upstream{
asserter.FortioFetch2HeaderEcho(t, staticClient, &topology.Destination{
ID: staticServerSID,
LocalPort: 5000,
})
staticServer := sp.Topology().Clusters["dc1"].ServiceByID(
staticServer := sp.Topology().Clusters["dc1"].WorkloadByID(
topology.NewNodeID("dc1-client1", "default"),
staticServerSID,
)

View File

@ -7,9 +7,8 @@ import (
"fmt"
"testing"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testing/deployer/topology"
)
type ac1BasicSuite struct {
@ -18,21 +17,21 @@ type ac1BasicSuite struct {
Peer string
// test points
sidServerHTTP topology.ServiceID
sidServerTCP topology.ServiceID
sidServerHTTP topology.ID
sidServerTCP topology.ID
nodeServerHTTP topology.NodeID
nodeServerTCP topology.NodeID
// 1.1
sidClientTCP topology.ServiceID
sidClientTCP topology.ID
nodeClientTCP topology.NodeID
// 1.2
sidClientHTTP topology.ServiceID
sidClientHTTP topology.ID
nodeClientHTTP topology.NodeID
upstreamHTTP *topology.Upstream
upstreamTCP *topology.Upstream
upstreamHTTP *topology.Destination
upstreamTCP *topology.Destination
}
var ac1BasicSuites []sharedTopoSuite = []sharedTopoSuite{
@ -58,24 +57,24 @@ func (s *ac1BasicSuite) setup(t *testing.T, ct *commonTopo) {
cluPeerName := LocalPeerName(clu, "default")
const prefix = "ac1-"
tcpServerSID := topology.ServiceID{
tcpServerSID := topology.ID{
Name: prefix + "server-tcp",
Partition: partition,
}
httpServerSID := topology.ServiceID{
httpServerSID := topology.ID{
Name: prefix + "server-http",
Partition: partition,
}
upstreamHTTP := &topology.Upstream{
ID: topology.ServiceID{
upstreamHTTP := &topology.Destination{
ID: topology.ID{
Name: httpServerSID.Name,
Partition: partition,
},
LocalPort: 5001,
Peer: peer,
}
upstreamTCP := &topology.Upstream{
ID: topology.ServiceID{
upstreamTCP := &topology.Destination{
ID: topology.ID{
Name: tcpServerSID.Name,
Partition: partition,
},
@ -85,16 +84,16 @@ func (s *ac1BasicSuite) setup(t *testing.T, ct *commonTopo) {
// Make clients which have server upstreams
setupClientServiceAndConfigs := func(protocol string) (serviceExt, *topology.Node) {
sid := topology.ServiceID{
sid := topology.ID{
Name: prefix + "client-" + protocol,
Partition: partition,
}
svc := serviceExt{
Service: NewFortioServiceWithDefaults(
Workload: NewFortioServiceWithDefaults(
clu.Datacenter,
sid,
func(s *topology.Service) {
s.Upstreams = []*topology.Upstream{
func(s *topology.Workload) {
s.Destinations = []*topology.Destination{
upstreamTCP,
upstreamHTTP,
}
@ -123,7 +122,7 @@ func (s *ac1BasicSuite) setup(t *testing.T, ct *commonTopo) {
httpClient, httpClientNode := setupClientServiceAndConfigs("http")
httpServer := serviceExt{
Service: NewFortioServiceWithDefaults(
Workload: NewFortioServiceWithDefaults(
peerClu.Datacenter,
httpServerSID,
nil,
@ -154,7 +153,7 @@ func (s *ac1BasicSuite) setup(t *testing.T, ct *commonTopo) {
},
}
tcpServer := serviceExt{
Service: NewFortioServiceWithDefaults(
Workload: NewFortioServiceWithDefaults(
peerClu.Datacenter,
tcpServerSID,
nil,
@ -209,20 +208,20 @@ func (s *ac1BasicSuite) test(t *testing.T, ct *commonTopo) {
ac := s
// refresh this from Topology
svcClientTCP := dc.ServiceByID(
svcClientTCP := dc.WorkloadByID(
ac.nodeClientTCP,
ac.sidClientTCP,
)
svcClientHTTP := dc.ServiceByID(
svcClientHTTP := dc.WorkloadByID(
ac.nodeClientHTTP,
ac.sidClientHTTP,
)
// our ac has the node/sid for server in the peer DC
svcServerHTTP := peer.ServiceByID(
svcServerHTTP := peer.WorkloadByID(
ac.nodeServerHTTP,
ac.sidServerHTTP,
)
svcServerTCP := peer.ServiceByID(
svcServerTCP := peer.WorkloadByID(
ac.nodeServerTCP,
ac.sidServerTCP,
)
@ -236,7 +235,7 @@ func (s *ac1BasicSuite) test(t *testing.T, ct *commonTopo) {
tcs := []struct {
acSub int
proto string
svc *topology.Service
svc *topology.Workload
}{
{1, "tcp", svcClientTCP},
{2, "http", svcClientHTTP},

View File

@ -7,17 +7,16 @@ import (
"fmt"
"testing"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/api"
)
type ac2DiscoChainSuite struct {
DC string
Peer string
clientSID topology.ServiceID
clientSID topology.ID
}
var ac2DiscoChainSuites []sharedTopoSuite = []sharedTopoSuite{
@ -42,7 +41,7 @@ func (s *ac2DiscoChainSuite) setup(t *testing.T, ct *commonTopo) {
// Make an HTTP server with discovery chain config entries
server := NewFortioServiceWithDefaults(
clu.Datacenter,
topology.ServiceID{
topology.ID{
Name: "ac2-disco-chain-svc",
Partition: partition,
},
@ -82,11 +81,11 @@ func (s *ac2DiscoChainSuite) setup(t *testing.T, ct *commonTopo) {
},
},
)
ct.AddServiceNode(clu, serviceExt{Service: server})
ct.AddServiceNode(clu, serviceExt{Workload: server})
// Define server as upstream for client
upstream := &topology.Upstream{
ID: topology.ServiceID{
upstream := &topology.Destination{
ID: topology.ID{
Name: server.ID.Name,
Partition: partition, // TODO: iterate over all possible partitions
},
@ -98,15 +97,15 @@ func (s *ac2DiscoChainSuite) setup(t *testing.T, ct *commonTopo) {
}
// Make client which will dial server
clientSID := topology.ServiceID{
clientSID := topology.ID{
Name: "ac2-client",
Partition: partition,
}
client := NewFortioServiceWithDefaults(
clu.Datacenter,
clientSID,
func(s *topology.Service) {
s.Upstreams = []*topology.Upstream{
func(s *topology.Workload) {
s.Destinations = []*topology.Destination{
upstream,
}
},
@ -121,7 +120,7 @@ func (s *ac2DiscoChainSuite) setup(t *testing.T, ct *commonTopo) {
},
},
)
ct.AddServiceNode(clu, serviceExt{Service: client})
ct.AddServiceNode(clu, serviceExt{Workload: client})
clu.InitialConfigEntries = append(clu.InitialConfigEntries,
&api.ServiceConfigEntry{
@ -161,12 +160,12 @@ func (s *ac2DiscoChainSuite) setup(t *testing.T, ct *commonTopo) {
func (s *ac2DiscoChainSuite) test(t *testing.T, ct *commonTopo) {
dc := ct.Sprawl.Topology().Clusters[s.DC]
svcs := dc.ServicesByID(s.clientSID)
svcs := dc.WorkloadsByID(s.clientSID)
require.Len(t, svcs, 1, "expected exactly one client in datacenter")
client := svcs[0]
require.Len(t, client.Upstreams, 1, "expected exactly one upstream for client")
u := client.Upstreams[0]
require.Len(t, client.Destinations, 1, "expected exactly one upstream for client")
u := client.Destinations[0]
t.Run("peered upstream exists in catalog", func(t *testing.T) {
t.Parallel()
@ -177,7 +176,7 @@ func (s *ac2DiscoChainSuite) test(t *testing.T, ct *commonTopo) {
t.Run("peered upstream endpoint status is healthy", func(t *testing.T) {
t.Parallel()
ct.Assert.UpstreamEndpointStatus(t, client, peerClusterPrefix(u), "HEALTHY", 1)
ct.Assert.DestinationEndpointStatus(t, client, peerClusterPrefix(u), "HEALTHY", 1)
})
t.Run("response contains header injected by splitter", func(t *testing.T) {
@ -197,7 +196,7 @@ func (s *ac2DiscoChainSuite) test(t *testing.T, ct *commonTopo) {
// func (s *ResourceGenerator) getTargetClusterName
//
// and connect/sni.go
func peerClusterPrefix(u *topology.Upstream) string {
func peerClusterPrefix(u *topology.Destination) string {
if u.Peer == "" {
panic("upstream is not from a peer")
}

View File

@ -35,12 +35,12 @@ type ac3SvcDefaultsSuite struct {
Peer string
// test points
sidServer topology.ServiceID
sidServer topology.ID
nodeServer topology.NodeID
sidClient topology.ServiceID
sidClient topology.ID
nodeClient topology.NodeID
upstream *topology.Upstream
upstream *topology.Destination
}
func (s *ac3SvcDefaultsSuite) testName() string {
@ -56,12 +56,12 @@ func (s *ac3SvcDefaultsSuite) setup(t *testing.T, ct *commonTopo) {
peer := LocalPeerName(peerClu, "default")
cluPeerName := LocalPeerName(clu, "default")
serverSID := topology.ServiceID{
serverSID := topology.ID{
Name: "ac3-server",
Partition: partition,
}
upstream := &topology.Upstream{
ID: topology.ServiceID{
upstream := &topology.Destination{
ID: topology.ID{
Name: serverSID.Name,
Partition: partition,
},
@ -69,16 +69,16 @@ func (s *ac3SvcDefaultsSuite) setup(t *testing.T, ct *commonTopo) {
Peer: peer,
}
sid := topology.ServiceID{
sid := topology.ID{
Name: "ac3-client",
Partition: partition,
}
client := serviceExt{
Service: NewFortioServiceWithDefaults(
Workload: NewFortioServiceWithDefaults(
clu.Datacenter,
sid,
func(s *topology.Service) {
s.Upstreams = []*topology.Upstream{
func(s *topology.Workload) {
s.Destinations = []*topology.Destination{
upstream,
}
},
@ -112,7 +112,7 @@ func (s *ac3SvcDefaultsSuite) setup(t *testing.T, ct *commonTopo) {
clientNode := ct.AddServiceNode(clu, client)
server := serviceExt{
Service: NewFortioServiceWithDefaults(
Workload: NewFortioServiceWithDefaults(
peerClu.Datacenter,
serverSID,
nil,
@ -158,12 +158,12 @@ func (s *ac3SvcDefaultsSuite) test(t *testing.T, ct *commonTopo) {
peer := ct.Sprawl.Topology().Clusters[s.Peer]
// refresh this from Topology
svcClient := dc.ServiceByID(
svcClient := dc.WorkloadByID(
s.nodeClient,
s.sidClient,
)
// our ac has the node/sid for server in the peer DC
svcServer := peer.ServiceByID(
svcServer := peer.WorkloadByID(
s.nodeServer,
s.sidServer,
)

View File

@ -22,9 +22,9 @@ type ac4ProxyDefaultsSuite struct {
nodeClient topology.NodeID
nodeServer topology.NodeID
serverSID topology.ServiceID
clientSID topology.ServiceID
upstream *topology.Upstream
serverSID topology.ID
clientSID topology.ID
upstream *topology.Destination
}
var ac4ProxyDefaultsSuites []sharedTopoSuite = []sharedTopoSuite{
@ -49,28 +49,28 @@ func (s *ac4ProxyDefaultsSuite) setup(t *testing.T, ct *commonTopo) {
peer := LocalPeerName(peerClu, "default")
cluPeerName := LocalPeerName(clu, "default")
serverSID := topology.ServiceID{
serverSID := topology.ID{
Name: "ac4-server-http",
Partition: partition,
}
// Define server as upstream for client
upstream := &topology.Upstream{
upstream := &topology.Destination{
ID: serverSID,
LocalPort: 5000,
Peer: peer,
}
// Make client which will dial server
clientSID := topology.ServiceID{
clientSID := topology.ID{
Name: "ac4-http-client",
Partition: partition,
}
client := serviceExt{
Service: NewFortioServiceWithDefaults(
Workload: NewFortioServiceWithDefaults(
clu.Datacenter,
clientSID,
func(s *topology.Service) {
s.Upstreams = []*topology.Upstream{
func(s *topology.Workload) {
s.Destinations = []*topology.Destination{
upstream,
}
},
@ -92,7 +92,7 @@ func (s *ac4ProxyDefaultsSuite) setup(t *testing.T, ct *commonTopo) {
clientNode := ct.AddServiceNode(clu, client)
server := serviceExt{
Service: NewFortioServiceWithDefaults(
Workload: NewFortioServiceWithDefaults(
peerClu.Datacenter,
serverSID,
nil,
@ -143,16 +143,16 @@ func (s *ac4ProxyDefaultsSuite) setup(t *testing.T, ct *commonTopo) {
}
func (s *ac4ProxyDefaultsSuite) test(t *testing.T, ct *commonTopo) {
var client *topology.Service
var client *topology.Workload
dc := ct.Sprawl.Topology().Clusters[s.DC]
peer := ct.Sprawl.Topology().Clusters[s.Peer]
clientSVC := dc.ServiceByID(
clientSVC := dc.WorkloadByID(
s.nodeClient,
s.clientSID,
)
serverSVC := peer.ServiceByID(
serverSVC := peer.WorkloadByID(
s.nodeServer,
s.serverSID,
)
@ -162,14 +162,14 @@ func (s *ac4ProxyDefaultsSuite) test(t *testing.T, ct *commonTopo) {
ct.Assert.FortioFetch2HeaderEcho(t, clientSVC, s.upstream)
t.Run("Validate services exist in catalog", func(t *testing.T) {
dcSvcs := dc.ServicesByID(s.clientSID)
dcSvcs := dc.WorkloadsByID(s.clientSID)
require.Len(t, dcSvcs, 1, "expected exactly one client")
client = dcSvcs[0]
require.Len(t, client.Upstreams, 1, "expected exactly one upstream for client")
require.Len(t, client.Destinations, 1, "expected exactly one upstream for client")
server := dc.ServicesByID(s.serverSID)
server := dc.WorkloadsByID(s.serverSID)
require.Len(t, server, 1, "expected exactly one server")
require.Len(t, server[0].Upstreams, 0, "expected no upstream for server")
require.Len(t, server[0].Destinations, 0, "expected no upstream for server")
})
t.Run("peered upstream exists in catalog", func(t *testing.T) {

View File

@ -5,7 +5,6 @@ package peering
import (
"fmt"
"testing"
"github.com/hashicorp/consul/api"
@ -19,8 +18,8 @@ type ac5_1NoSvcMeshSuite struct {
DC string
Peer string
serverSID topology.ServiceID
clientSID topology.ServiceID
serverSID topology.ID
clientSID topology.ID
}
var (
@ -47,23 +46,23 @@ func (s *ac5_1NoSvcMeshSuite) setup(t *testing.T, ct *commonTopo) {
partition := "default"
peer := LocalPeerName(peerClu, partition)
serverSID := topology.ServiceID{
serverSID := topology.ID{
Name: "ac5-server-http",
Partition: partition,
}
// Make client which will dial server
clientSID := topology.ServiceID{
clientSID := topology.ID{
Name: "ac5-http-client",
Partition: partition,
}
// disable service mesh for client in s.DC
client := serviceExt{
Service: NewFortioServiceWithDefaults(
Workload: NewFortioServiceWithDefaults(
clu.Datacenter,
clientSID,
func(s *topology.Service) {
func(s *topology.Workload) {
s.EnvoyAdminPort = 0
s.DisableServiceMesh = true
},
@ -79,7 +78,7 @@ func (s *ac5_1NoSvcMeshSuite) setup(t *testing.T, ct *commonTopo) {
ct.AddServiceNode(clu, client)
server := serviceExt{
Service: NewFortioServiceWithDefaults(
Workload: NewFortioServiceWithDefaults(
clu.Datacenter,
serverSID,
nil,

View File

@ -5,9 +5,8 @@ package peering
import (
"fmt"
"time"
"testing"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
@ -26,10 +25,11 @@ import (
// 9. Delete failing health check from step 3
// 10. Repeat step 2
type ac5_2PQFailoverSuite struct {
clientSID topology.ServiceID
serverSID topology.ServiceID
clientSID topology.ID
serverSID topology.ID
nodeServer topology.NodeID
}
type nodeKey struct {
dc string
partition string
@ -56,21 +56,21 @@ func (s *ac5_2PQFailoverSuite) setupDC(ct *commonTopo, clu, peerClu *topology.Cl
partition := "default"
peer := LocalPeerName(peerClu, partition)
serverSID := topology.ServiceID{
serverSID := topology.ID{
Name: "ac5-server-http",
Partition: partition,
}
clientSID := topology.ServiceID{
clientSID := topology.ID{
Name: "ac5-client-http",
Partition: partition,
}
client := serviceExt{
Service: NewFortioServiceWithDefaults(
Workload: NewFortioServiceWithDefaults(
clu.Datacenter,
clientSID,
func(s *topology.Service) {
func(s *topology.Workload) {
s.EnvoyAdminPort = 0
s.DisableServiceMesh = true
},
@ -87,10 +87,10 @@ func (s *ac5_2PQFailoverSuite) setupDC(ct *commonTopo, clu, peerClu *topology.Cl
ct.AddServiceNode(clu, client)
server := serviceExt{
Service: NewFortioServiceWithDefaults(
Workload: NewFortioServiceWithDefaults(
clu.Datacenter,
serverSID,
func(s *topology.Service) {
func(s *topology.Workload) {
s.EnvoyAdminPort = 0
s.DisableServiceMesh = true
},
@ -113,22 +113,22 @@ func (s *ac5_2PQFailoverSuite) setupDC3(ct *commonTopo, clu, peer1, peer2 *topol
)
peers = append(peers, LocalPeerName(peer1, partition), LocalPeerName(peer2, partition))
serverSID := topology.ServiceID{
serverSID := topology.ID{
Name: "ac5-server-http",
Partition: partition,
}
clientSID := topology.ServiceID{
clientSID := topology.ID{
Name: "ac5-client-http",
Partition: partition,
}
// disable service mesh for client in DC3
client := serviceExt{
Service: NewFortioServiceWithDefaults(
Workload: NewFortioServiceWithDefaults(
clu.Datacenter,
clientSID,
func(s *topology.Service) {
func(s *topology.Workload) {
s.EnvoyAdminPort = 0
s.DisableServiceMesh = true
},
@ -153,10 +153,10 @@ func (s *ac5_2PQFailoverSuite) setupDC3(ct *commonTopo, clu, peer1, peer2 *topol
ct.AddServiceNode(clu, client)
server := serviceExt{
Service: NewFortioServiceWithDefaults(
Workload: NewFortioServiceWithDefaults(
clu.Datacenter,
serverSID,
func(s *topology.Service) {
func(s *topology.Workload) {
s.EnvoyAdminPort = 0
s.DisableServiceMesh = true
},

View File

@ -37,12 +37,12 @@ type ac6FailoversSuite struct {
FarInNSAlt bool
// launch outputs, for querying during test
clientSID topology.ServiceID
clientSID topology.ID
// near = same DC as client; far = other DC
nearServerSID topology.ServiceID
nearServerSID topology.ID
// used to remove the node and trigger failover
nearServerNode topology.NodeID
farServerSID topology.ServiceID
farServerSID topology.ID
farServerNode topology.NodeID
}
@ -217,7 +217,7 @@ func (s *ac6FailoversSuite) setup(t *testing.T, ct *commonTopo) {
}
// - server in clientPartition/DC (main target)
nearServerSID := topology.ServiceID{
nearServerSID := topology.ID{
Name: "ac6-server",
Partition: defaultToEmptyForCE("default"),
Namespace: defaultToEmptyForCE("default"),
@ -233,7 +233,7 @@ func (s *ac6FailoversSuite) setup(t *testing.T, ct *commonTopo) {
nearServerSID,
nil,
)
nearServerNode := ct.AddServiceNode(nearClu, serviceExt{Service: nearServer})
nearServerNode := ct.AddServiceNode(nearClu, serviceExt{Workload: nearServer})
nearClu.InitialConfigEntries = append(nearClu.InitialConfigEntries,
&api.ServiceConfigEntry{
@ -245,7 +245,7 @@ func (s *ac6FailoversSuite) setup(t *testing.T, ct *commonTopo) {
},
)
// - server in otherPartition/otherDC
farServerSID := topology.ServiceID{
farServerSID := topology.ID{
Name: nearServerSID.Name,
Partition: defaultToEmptyForCE("default"),
Namespace: defaultToEmptyForCE("default"),
@ -261,7 +261,7 @@ func (s *ac6FailoversSuite) setup(t *testing.T, ct *commonTopo) {
farServerSID,
nil,
)
farServerNode := ct.AddServiceNode(farClu, serviceExt{Service: farServer})
farServerNode := ct.AddServiceNode(farClu, serviceExt{Workload: farServer})
if nearClu != farClu {
ct.ExportService(farClu, farServerSID.Partition,
api.ExportedService{
@ -337,7 +337,7 @@ func (s *ac6FailoversSuite) setup(t *testing.T, ct *commonTopo) {
},
)
clientSID := topology.ServiceID{
clientSID := topology.ID{
Name: "ac6-client",
Partition: defaultToEmptyForCE(nearServerSID.Partition),
Namespace: defaultToEmptyForCE(nearServerSID.Namespace),
@ -345,11 +345,11 @@ func (s *ac6FailoversSuite) setup(t *testing.T, ct *commonTopo) {
client := NewFortioServiceWithDefaults(
nearClu.Datacenter,
clientSID,
func(s *topology.Service) {
// Upstream per partition
s.Upstreams = []*topology.Upstream{
func(s *topology.Workload) {
// Destination per partition
s.Destinations = []*topology.Destination{
{
ID: topology.ServiceID{
ID: topology.ID{
Name: nearServerSID.Name,
Partition: defaultToEmptyForCE(nearServerSID.Partition),
Namespace: defaultToEmptyForCE(nearServerSID.Namespace),
@ -362,7 +362,7 @@ func (s *ac6FailoversSuite) setup(t *testing.T, ct *commonTopo) {
}
},
)
ct.AddServiceNode(nearClu, serviceExt{Service: client})
ct.AddServiceNode(nearClu, serviceExt{Workload: client})
nearClu.InitialConfigEntries = append(nearClu.InitialConfigEntries,
&api.ServiceConfigEntry{
Kind: api.ServiceDefaults,
@ -432,12 +432,12 @@ func (s *ac6FailoversSuite) test(t *testing.T, ct *commonTopo) {
farClu = ct.Sprawl.Topology().Clusters["dc1"]
}
svcs := nearClu.ServicesByID(s.clientSID)
svcs := nearClu.WorkloadsByID(s.clientSID)
require.Len(t, svcs, 1, "expected exactly one client in datacenter")
client := svcs[0]
require.Len(t, client.Upstreams, 1, "expected one upstream for client")
upstream := client.Upstreams[0]
require.Len(t, client.Destinations, 1, "expected one upstream for client")
upstream := client.Destinations[0]
fmt.Println("### preconditions")

View File

@ -21,13 +21,13 @@ type suiteRotateGW struct {
DC string
Peer string
sidServer topology.ServiceID
sidServer topology.ID
nodeServer topology.NodeID
sidClient topology.ServiceID
sidClient topology.ID
nodeClient topology.NodeID
upstream *topology.Upstream
upstream *topology.Destination
newMGWNodeName string
}
@ -62,7 +62,7 @@ func (s *suiteRotateGW) setup(t *testing.T, ct *commonTopo) {
server := NewFortioServiceWithDefaults(
peerClu.Datacenter,
topology.ServiceID{
topology.ID{
Name: prefix + "server-http",
Partition: partition,
},
@ -70,8 +70,8 @@ func (s *suiteRotateGW) setup(t *testing.T, ct *commonTopo) {
)
// Make clients which have server upstreams
upstream := &topology.Upstream{
ID: topology.ServiceID{
upstream := &topology.Destination{
ID: topology.ID{
Name: server.ID.Name,
Partition: partition,
},
@ -83,17 +83,17 @@ func (s *suiteRotateGW) setup(t *testing.T, ct *commonTopo) {
// create client in us
client := NewFortioServiceWithDefaults(
clu.Datacenter,
topology.ServiceID{
topology.ID{
Name: prefix + "client",
Partition: partition,
},
func(s *topology.Service) {
s.Upstreams = []*topology.Upstream{
func(s *topology.Workload) {
s.Destinations = []*topology.Destination{
upstream,
}
},
)
clientNode := ct.AddServiceNode(clu, serviceExt{Service: client,
clientNode := ct.AddServiceNode(clu, serviceExt{Workload: client,
Config: &api.ServiceConfigEntry{
Kind: api.ServiceDefaults,
Name: client.ID.Name,
@ -110,7 +110,7 @@ func (s *suiteRotateGW) setup(t *testing.T, ct *commonTopo) {
})
// actually to be used by the other pairing
serverNode := ct.AddServiceNode(peerClu, serviceExt{
Service: server,
Workload: server,
Config: &api.ServiceConfigEntry{
Kind: api.ServiceDefaults,
Name: server.ID.Name,
@ -161,11 +161,11 @@ func (s *suiteRotateGW) test(t *testing.T, ct *commonTopo) {
dc := ct.Sprawl.Topology().Clusters[s.DC]
peer := ct.Sprawl.Topology().Clusters[s.Peer]
svcHTTPServer := peer.ServiceByID(
svcHTTPServer := peer.WorkloadByID(
s.nodeServer,
s.sidServer,
)
svcHTTPClient := dc.ServiceByID(
svcHTTPClient := dc.WorkloadByID(
s.nodeClient,
s.sidClient,
)

View File

@ -8,15 +8,13 @@ import (
"testing"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/mitchellh/copystructure"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/testing/deployer/topology"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/sdk/testutil/retry"
)
// TestAC7_2RotateLeader ensures that after a leader rotation, information continues to replicate to peers
@ -25,13 +23,13 @@ type ac7_2RotateLeaderSuite struct {
DC string
Peer string
sidServer topology.ServiceID
sidServer topology.ID
nodeServer topology.NodeID
sidClient topology.ServiceID
sidClient topology.ID
nodeClient topology.NodeID
upstream *topology.Upstream
upstream *topology.Destination
}
func TestAC7_2RotateLeader(t *testing.T) {
@ -65,7 +63,7 @@ func (s *ac7_2RotateLeaderSuite) setup(t *testing.T, ct *commonTopo) {
server := NewFortioServiceWithDefaults(
peerClu.Datacenter,
topology.ServiceID{
topology.ID{
Name: prefix + "server-http",
Partition: partition,
},
@ -73,8 +71,8 @@ func (s *ac7_2RotateLeaderSuite) setup(t *testing.T, ct *commonTopo) {
)
// Make clients which have server upstreams
upstream := &topology.Upstream{
ID: topology.ServiceID{
upstream := &topology.Destination{
ID: topology.ID{
Name: server.ID.Name,
Partition: partition,
},
@ -84,17 +82,17 @@ func (s *ac7_2RotateLeaderSuite) setup(t *testing.T, ct *commonTopo) {
// create client in us
client := NewFortioServiceWithDefaults(
clu.Datacenter,
topology.ServiceID{
topology.ID{
Name: prefix + "client",
Partition: partition,
},
func(s *topology.Service) {
s.Upstreams = []*topology.Upstream{
func(s *topology.Workload) {
s.Destinations = []*topology.Destination{
upstream,
}
},
)
clientNode := ct.AddServiceNode(clu, serviceExt{Service: client,
clientNode := ct.AddServiceNode(clu, serviceExt{Workload: client,
Config: &api.ServiceConfigEntry{
Kind: api.ServiceDefaults,
Name: client.ID.Name,
@ -111,7 +109,7 @@ func (s *ac7_2RotateLeaderSuite) setup(t *testing.T, ct *commonTopo) {
})
// actually to be used by the other pairing
serverNode := ct.AddServiceNode(peerClu, serviceExt{
Service: server,
Workload: server,
Config: &api.ServiceConfigEntry{
Kind: api.ServiceDefaults,
Name: server.ID.Name,
@ -146,8 +144,8 @@ func (s *ac7_2RotateLeaderSuite) test(t *testing.T, ct *commonTopo) {
clDC := ct.APIClientForCluster(t, dc)
clPeer := ct.APIClientForCluster(t, peer)
svcServer := peer.ServiceByID(s.nodeServer, s.sidServer)
svcClient := dc.ServiceByID(s.nodeClient, s.sidClient)
svcServer := peer.WorkloadByID(s.nodeServer, s.sidServer)
svcClient := dc.WorkloadByID(s.nodeClient, s.sidClient)
ct.Assert.HealthyWithPeer(t, dc.Name, svcServer.ID, LocalPeerName(peer, "default"))
ct.Assert.FortioFetch2HeaderEcho(t, svcClient, s.upstream)

View File

@ -48,7 +48,7 @@ type commonTopo struct {
Assert *topoutil.Asserter
// track per-DC services to prevent duplicates
services map[string]map[topology.ServiceID]struct{}
services map[string]map[topology.ID]struct{}
// if zero, no DCs are agentless
agentlessDC string
@ -107,9 +107,9 @@ func newCommonTopo(t *testing.T, agentlessDC string, includeDC3 bool, peerThroug
injectTenancies(dc2)
// dc3 doesn't get tenancies
ct.services = map[string]map[topology.ServiceID]struct{}{}
ct.services = map[string]map[topology.ID]struct{}{}
for _, dc := range clusters {
ct.services[dc.Datacenter] = map[topology.ServiceID]struct{}{}
ct.services[dc.Datacenter] = map[topology.ID]struct{}{}
}
peerings := addPeerings(dc1, dc2)
@ -230,7 +230,7 @@ func LocalPeerName(clu *topology.Cluster, partition string) string {
// TODO: move these to topology
// TODO: alternatively, delete it: we only use it in one place, to bundle up args
type serviceExt struct {
*topology.Service
*topology.Workload
Exports []api.ServiceConsumer
Config *api.ServiceConfigEntry
@ -245,7 +245,7 @@ func (ct *commonTopo) AddServiceNode(clu *topology.Cluster, svc serviceExt) *top
ct.services[clusterName][svc.ID] = struct{}{}
// TODO: inline
serviceHostnameString := func(dc string, id topology.ServiceID) string {
serviceHostnameString := func(dc string, id topology.ID) string {
n := id.Name
// prepend <namespace>- and <partition>- if they are not default/empty
// avoids hostname limit of 63 chars in most cases
@ -279,8 +279,8 @@ func (ct *commonTopo) AddServiceNode(clu *topology.Cluster, svc serviceExt) *top
Addresses: []*topology.Address{
{Network: clu.Datacenter},
},
Services: []*topology.Service{
svc.Service,
Workloads: []*topology.Workload{
svc.Workload,
},
Cluster: clusterName,
}
@ -506,9 +506,9 @@ func injectTenancies(clu *topology.Cluster) {
// Deprecated: topoutil.NewFortioServiceWithDefaults
func NewFortioServiceWithDefaults(
cluster string,
sid topology.ServiceID,
mut func(s *topology.Service),
) *topology.Service {
sid topology.ID,
mut func(s *topology.Workload),
) *topology.Workload {
return topoutil.NewFortioServiceWithDefaults(cluster, sid, topology.NodeVersionV1, mut)
}
@ -519,8 +519,8 @@ func newTopologyMeshGatewaySet(
num int,
networks []string,
mutateFn func(i int, node *topology.Node),
) (topology.ServiceID, []*topology.Node) {
) (topology.ID, []*topology.Node) {
nodes := topoutil.NewTopologyMeshGatewaySet(nodeKind, partition, namePrefix, num, networks, mutateFn)
sid := nodes[0].Services[0].ID
sid := nodes[0].Workloads[0].ID
return sid, nodes
}

View File

@ -29,7 +29,7 @@ import (
// ip/ports if there is only one port that makes sense for the assertion (such
// as use of the envoy admin port 19000).
//
// If it's up to the test (like picking an upstream) leave port as an argument
// If it's up to the test (like picking a destination) leave port as an argument
// but still take the service and use that to grab the local ip from the
// topology.Node.
type Asserter struct {
@ -78,22 +78,22 @@ func (a *Asserter) httpClientFor(cluster string) (*http.Client, error) {
return client, nil
}
// UpstreamEndpointStatus validates that proxy was configured with provided clusterName in the healthStatus
// DestinationEndpointStatus validates that proxy was configured with provided clusterName in the healthStatus
//
// Exposes libassert.UpstreamEndpointStatus for use against a Sprawl.
//
// NOTE: this doesn't take a port b/c you always want to use the envoy admin port.
func (a *Asserter) UpstreamEndpointStatus(
func (a *Asserter) DestinationEndpointStatus(
t *testing.T,
service *topology.Service,
workload *topology.Workload,
clusterName string,
healthStatus string,
count int,
) {
t.Helper()
node := service.Node
node := workload.Node
ip := node.LocalAddress()
port := service.EnvoyAdminPort
port := workload.EnvoyAdminPort
addr := fmt.Sprintf("%s:%d", ip, port)
client := a.mustGetHTTPClient(t, node.Cluster)
@ -106,17 +106,17 @@ func (a *Asserter) UpstreamEndpointStatus(
//
// Exposes libassert.HTTPServiceEchoes for use against a Sprawl.
//
// NOTE: this takes a port b/c you may want to reach this via your choice of upstream.
// NOTE: this takes a port b/c you may want to reach this via your choice of destination.
func (a *Asserter) HTTPServiceEchoes(
t *testing.T,
service *topology.Service,
workload *topology.Workload,
port int,
path string,
) {
t.Helper()
require.True(t, port > 0)
node := service.Node
node := workload.Node
ip := node.LocalAddress()
addr := fmt.Sprintf("%s:%d", ip, port)
@ -130,10 +130,10 @@ func (a *Asserter) HTTPServiceEchoes(
//
// Exposes libassert.HTTPServiceEchoes for use against a Sprawl.
//
// NOTE: this takes a port b/c you may want to reach this via your choice of upstream.
// NOTE: this takes a port b/c you may want to reach this via your choice of destination.
func (a *Asserter) HTTPServiceEchoesResHeader(
t *testing.T,
service *topology.Service,
workload *topology.Workload,
port int,
path string,
expectedResHeader map[string]string,
@ -141,7 +141,7 @@ func (a *Asserter) HTTPServiceEchoesResHeader(
t.Helper()
require.True(t, port > 0)
node := service.Node
node := workload.Node
ip := node.LocalAddress()
addr := fmt.Sprintf("%s:%d", ip, port)
@ -151,14 +151,14 @@ func (a *Asserter) HTTPServiceEchoesResHeader(
func (a *Asserter) HTTPStatus(
t *testing.T,
service *topology.Service,
workload *topology.Workload,
port int,
status int,
) {
t.Helper()
require.True(t, port > 0)
node := service.Node
node := workload.Node
ip := node.LocalAddress()
addr := fmt.Sprintf("%s:%d", ip, port)
@ -179,7 +179,7 @@ func (a *Asserter) HTTPStatus(
}
// asserts that the service sid in cluster and exported by peer localPeerName is passing health checks,
func (a *Asserter) HealthyWithPeer(t *testing.T, cluster string, sid topology.ServiceID, peerName string) {
func (a *Asserter) HealthyWithPeer(t *testing.T, cluster string, sid topology.ID, peerName string) {
t.Helper()
cl := a.mustGetAPIClient(t, cluster)
retry.RunWith(&retry.Timer{Timeout: time.Minute * 1, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
@ -203,30 +203,30 @@ type testingT interface {
Helper()
}
// does a fortio /fetch2 to the given fortio service, targetting the given upstream. Returns
// does a fortio /fetch2 to the given fortio service, targetting the given destination. Returns
// the body, and response with response.Body already Closed.
//
// We treat 400, 503, and 504s as retryable errors
func (a *Asserter) fortioFetch2Upstream(
func (a *Asserter) fortioFetch2Destination(
t testingT,
client *http.Client,
addr string,
upstream *topology.Upstream,
dest *topology.Destination,
path string,
) (body []byte, res *http.Response) {
t.Helper()
var actualURL string
if upstream.Implied {
if dest.Implied {
actualURL = fmt.Sprintf("http://%s--%s--%s.virtual.consul:%d/%s",
upstream.ID.Name,
upstream.ID.Namespace,
upstream.ID.Partition,
upstream.VirtualPort,
dest.ID.Name,
dest.ID.Namespace,
dest.ID.Partition,
dest.VirtualPort,
path,
)
} else {
actualURL = fmt.Sprintf("http://localhost:%d/%s", upstream.LocalPort, path)
actualURL = fmt.Sprintf("http://localhost:%d/%s", dest.LocalPort, path)
}
url := fmt.Sprintf("http://%s/fortio/fetch2?url=%s", addr,
@ -243,7 +243,7 @@ func (a *Asserter) fortioFetch2Upstream(
// not sure when these happen, suspect it's when the mesh gateway in the peer is not yet ready
require.NotEqual(t, http.StatusServiceUnavailable, res.StatusCode)
require.NotEqual(t, http.StatusGatewayTimeout, res.StatusCode)
// not sure when this happens, suspect it's when envoy hasn't configured the local upstream yet
// not sure when this happens, suspect it's when envoy hasn't configured the local destination yet
require.NotEqual(t, http.StatusBadRequest, res.StatusCode)
body, err = io.ReadAll(res.Body)
require.NoError(t, err)
@ -252,20 +252,20 @@ func (a *Asserter) fortioFetch2Upstream(
}
// uses the /fortio/fetch2 endpoint to do a header echo check against an
// upstream fortio
func (a *Asserter) FortioFetch2HeaderEcho(t *testing.T, fortioSvc *topology.Service, upstream *topology.Upstream) {
// destination fortio
func (a *Asserter) FortioFetch2HeaderEcho(t *testing.T, fortioWrk *topology.Workload, dest *topology.Destination) {
const kPassphrase = "x-passphrase"
const passphrase = "hello"
path := (fmt.Sprintf("/?header=%s:%s", kPassphrase, passphrase))
var (
node = fortioSvc.Node
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), fortioSvc.PortOrDefault(upstream.PortName))
node = fortioWrk.Node
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), fortioWrk.PortOrDefault(dest.PortName))
client = a.mustGetHTTPClient(t, node.Cluster)
)
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
_, res := a.fortioFetch2Upstream(r, client, addr, upstream, path)
_, res := a.fortioFetch2Destination(r, client, addr, dest, path)
require.Equal(r, http.StatusOK, res.StatusCode)
v := res.Header.Get(kPassphrase)
require.Equal(r, passphrase, v)
@ -273,20 +273,20 @@ func (a *Asserter) FortioFetch2HeaderEcho(t *testing.T, fortioSvc *topology.Serv
}
// similar to libassert.AssertFortioName,
// uses the /fortio/fetch2 endpoint to hit the debug endpoint on the upstream,
// uses the /fortio/fetch2 endpoint to hit the debug endpoint on the destination,
// and assert that the FORTIO_NAME == name
func (a *Asserter) FortioFetch2FortioName(
t *testing.T,
fortioSvc *topology.Service,
upstream *topology.Upstream,
fortioWrk *topology.Workload,
dest *topology.Destination,
clusterName string,
sid topology.ServiceID,
sid topology.ID,
) {
t.Helper()
var (
node = fortioSvc.Node
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), fortioSvc.PortOrDefault(upstream.PortName))
node = fortioWrk.Node
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), fortioWrk.PortOrDefault(dest.PortName))
client = a.mustGetHTTPClient(t, node.Cluster)
)
@ -294,7 +294,7 @@ func (a *Asserter) FortioFetch2FortioName(
path := "/debug?env=dump"
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
body, res := a.fortioFetch2Upstream(r, client, addr, upstream, path)
body, res := a.fortioFetch2Destination(r, client, addr, dest, path)
require.Equal(r, http.StatusOK, res.StatusCode)

View File

@ -18,16 +18,16 @@ import (
// on the correct instance using HTTP1 or HTTP2.
func (a *Asserter) CheckBlankspaceNameViaHTTP(
t *testing.T,
service *topology.Service,
upstream *topology.Upstream,
workload *topology.Workload,
dest *topology.Destination,
useHTTP2 bool,
path string,
clusterName string,
sid topology.ServiceID,
sid topology.ID,
) {
t.Helper()
a.checkBlankspaceNameViaHTTPWithCallback(t, service, upstream, useHTTP2, path, 1, func(_ *retry.R) {}, func(r *retry.R, remoteName string) {
a.checkBlankspaceNameViaHTTPWithCallback(t, workload, dest, useHTTP2, path, 1, func(_ *retry.R) {}, func(r *retry.R, remoteName string) {
require.Equal(r, fmt.Sprintf("%s::%s", clusterName, sid.String()), remoteName)
}, func(r *retry.R) {})
}
@ -36,8 +36,8 @@ func (a *Asserter) CheckBlankspaceNameViaHTTP(
// but it is verifying a relative traffic split.
func (a *Asserter) CheckBlankspaceNameTrafficSplitViaHTTP(
t *testing.T,
service *topology.Service,
upstream *topology.Upstream,
workload *topology.Workload,
dest *topology.Destination,
useHTTP2 bool,
path string,
expect map[string]int,
@ -45,7 +45,7 @@ func (a *Asserter) CheckBlankspaceNameTrafficSplitViaHTTP(
t.Helper()
got := make(map[string]int)
a.checkBlankspaceNameViaHTTPWithCallback(t, service, upstream, useHTTP2, path, 100, func(_ *retry.R) {
a.checkBlankspaceNameViaHTTPWithCallback(t, workload, dest, useHTTP2, path, 100, func(_ *retry.R) {
got = make(map[string]int)
}, func(_ *retry.R, name string) {
got[name]++
@ -56,8 +56,8 @@ func (a *Asserter) CheckBlankspaceNameTrafficSplitViaHTTP(
func (a *Asserter) checkBlankspaceNameViaHTTPWithCallback(
t *testing.T,
service *topology.Service,
upstream *topology.Upstream,
workload *topology.Workload,
dest *topology.Destination,
useHTTP2 bool,
path string,
count int,
@ -68,8 +68,8 @@ func (a *Asserter) checkBlankspaceNameViaHTTPWithCallback(
t.Helper()
var (
node = service.Node
internalPort = service.PortOrDefault(upstream.PortName)
node = workload.Node
internalPort = workload.PortOrDefault(dest.PortName)
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), internalPort)
client = a.mustGetHTTPClient(t, node.Cluster)
)
@ -86,16 +86,16 @@ func (a *Asserter) checkBlankspaceNameViaHTTPWithCallback(
}
var actualURL string
if upstream.Implied {
if dest.Implied {
actualURL = fmt.Sprintf("http://%s--%s--%s.virtual.consul:%d/%s",
upstream.ID.Name,
upstream.ID.Namespace,
upstream.ID.Partition,
upstream.VirtualPort,
dest.ID.Name,
dest.ID.Namespace,
dest.ID.Partition,
dest.VirtualPort,
path,
)
} else {
actualURL = fmt.Sprintf("http://localhost:%d/%s", upstream.LocalPort, path)
actualURL = fmt.Sprintf("http://localhost:%d/%s", dest.LocalPort, path)
}
multiassert(t, count, resetFn, func(r *retry.R) {
@ -111,14 +111,14 @@ func (a *Asserter) checkBlankspaceNameViaHTTPWithCallback(
// on the correct instance using plain tcp sockets.
func (a *Asserter) CheckBlankspaceNameViaTCP(
t *testing.T,
service *topology.Service,
upstream *topology.Upstream,
workload *topology.Workload,
dest *topology.Destination,
clusterName string,
sid topology.ServiceID,
sid topology.ID,
) {
t.Helper()
a.checkBlankspaceNameViaTCPWithCallback(t, service, upstream, 1, func(_ *retry.R) {}, func(r *retry.R, remoteName string) {
a.checkBlankspaceNameViaTCPWithCallback(t, workload, dest, 1, func(_ *retry.R) {}, func(r *retry.R, remoteName string) {
require.Equal(r, fmt.Sprintf("%s::%s", clusterName, sid.String()), remoteName)
}, func(r *retry.R) {})
}
@ -127,14 +127,14 @@ func (a *Asserter) CheckBlankspaceNameViaTCP(
// but it is verifying a relative traffic split.
func (a *Asserter) CheckBlankspaceNameTrafficSplitViaTCP(
t *testing.T,
service *topology.Service,
upstream *topology.Upstream,
workload *topology.Workload,
dest *topology.Destination,
expect map[string]int,
) {
t.Helper()
got := make(map[string]int)
a.checkBlankspaceNameViaTCPWithCallback(t, service, upstream, 100, func(_ *retry.R) {
a.checkBlankspaceNameViaTCPWithCallback(t, workload, dest, 100, func(_ *retry.R) {
got = make(map[string]int)
}, func(_ *retry.R, name string) {
got[name]++
@ -145,8 +145,8 @@ func (a *Asserter) CheckBlankspaceNameTrafficSplitViaTCP(
func (a *Asserter) checkBlankspaceNameViaTCPWithCallback(
t *testing.T,
service *topology.Service,
upstream *topology.Upstream,
workload *topology.Workload,
dest *topology.Destination,
count int,
resetFn func(r *retry.R),
attemptFn func(r *retry.R, remoteName string),
@ -154,11 +154,11 @@ func (a *Asserter) checkBlankspaceNameViaTCPWithCallback(
) {
t.Helper()
require.False(t, upstream.Implied, "helper does not support tproxy yet")
port := upstream.LocalPort
require.False(t, dest.Implied, "helper does not support tproxy yet")
port := dest.LocalPort
require.True(t, port > 0)
node := service.Node
node := workload.Node
// We can't use the forward proxy for TCP yet, so use the exposed port on localhost instead.
exposedPort := node.ExposedPort(port)
@ -179,14 +179,14 @@ func (a *Asserter) checkBlankspaceNameViaTCPWithCallback(
// on the correct instance using gRPC.
func (a *Asserter) CheckBlankspaceNameViaGRPC(
t *testing.T,
service *topology.Service,
upstream *topology.Upstream,
workload *topology.Workload,
dest *topology.Destination,
clusterName string,
sid topology.ServiceID,
sid topology.ID,
) {
t.Helper()
a.checkBlankspaceNameViaGRPCWithCallback(t, service, upstream, 1, func(_ *retry.R) {}, func(r *retry.R, remoteName string) {
a.checkBlankspaceNameViaGRPCWithCallback(t, workload, dest, 1, func(_ *retry.R) {}, func(r *retry.R, remoteName string) {
require.Equal(r, fmt.Sprintf("%s::%s", clusterName, sid.String()), remoteName)
}, func(_ *retry.R) {})
}
@ -195,14 +195,14 @@ func (a *Asserter) CheckBlankspaceNameViaGRPC(
// but it is verifying a relative traffic split.
func (a *Asserter) CheckBlankspaceNameTrafficSplitViaGRPC(
t *testing.T,
service *topology.Service,
upstream *topology.Upstream,
workload *topology.Workload,
dest *topology.Destination,
expect map[string]int,
) {
t.Helper()
got := make(map[string]int)
a.checkBlankspaceNameViaGRPCWithCallback(t, service, upstream, 100, func(_ *retry.R) {
a.checkBlankspaceNameViaGRPCWithCallback(t, workload, dest, 100, func(_ *retry.R) {
got = make(map[string]int)
}, func(_ *retry.R, name string) {
got[name]++
@ -213,8 +213,8 @@ func (a *Asserter) CheckBlankspaceNameTrafficSplitViaGRPC(
func (a *Asserter) checkBlankspaceNameViaGRPCWithCallback(
t *testing.T,
service *topology.Service,
upstream *topology.Upstream,
workload *topology.Workload,
dest *topology.Destination,
count int,
resetFn func(r *retry.R),
attemptFn func(r *retry.R, remoteName string),
@ -222,11 +222,11 @@ func (a *Asserter) checkBlankspaceNameViaGRPCWithCallback(
) {
t.Helper()
require.False(t, upstream.Implied, "helper does not support tproxy yet")
port := upstream.LocalPort
require.False(t, dest.Implied, "helper does not support tproxy yet")
port := dest.LocalPort
require.True(t, port > 0)
node := service.Node
node := workload.Node
// We can't use the forward proxy for gRPC yet, so use the exposed port on localhost instead.
exposedPort := node.ExposedPort(port)

View File

@ -12,12 +12,12 @@ import (
const HashicorpDockerProxy = "docker.mirror.hashicorp.services"
func NewFortioServiceWithDefaults(
func NewFortioWorkloadWithDefaults(
cluster string,
sid topology.ServiceID,
sid topology.ID,
nodeVersion topology.NodeVersion,
mut func(s *topology.Service),
) *topology.Service {
mut func(*topology.Workload),
) *topology.Workload {
const (
httpPort = 8080
grpcPort = 8079
@ -26,7 +26,7 @@ func NewFortioServiceWithDefaults(
)
sid.Normalize()
svc := &topology.Service{
wrk := &topology.Workload{
ID: sid,
Image: HashicorpDockerProxy + "/fortio/fortio",
EnvoyAdminPort: adminPort,
@ -44,28 +44,28 @@ func NewFortioServiceWithDefaults(
}
if nodeVersion == topology.NodeVersionV2 {
svc.Ports = map[string]*topology.Port{
wrk.Ports = map[string]*topology.Port{
"http": {Number: httpPort, Protocol: "http"},
"http2": {Number: httpPort, Protocol: "http2"},
"grpc": {Number: grpcPort, Protocol: "grpc"},
"tcp": {Number: tcpPort, Protocol: "tcp"},
}
} else {
svc.Port = httpPort
wrk.Port = httpPort
}
if mut != nil {
mut(svc)
mut(wrk)
}
return svc
return wrk
}
func NewBlankspaceServiceWithDefaults(
func NewBlankspaceWorkloadWithDefaults(
cluster string,
sid topology.ServiceID,
sid topology.ID,
nodeVersion topology.NodeVersion,
mut func(s *topology.Service),
) *topology.Service {
mut func(*topology.Workload),
) *topology.Workload {
const (
httpPort = 8080
grpcPort = 8079
@ -74,7 +74,7 @@ func NewBlankspaceServiceWithDefaults(
)
sid.Normalize()
svc := &topology.Service{
wrk := &topology.Workload{
ID: sid,
Image: HashicorpDockerProxy + "/rboyer/blankspace",
EnvoyAdminPort: adminPort,
@ -88,20 +88,20 @@ func NewBlankspaceServiceWithDefaults(
}
if nodeVersion == topology.NodeVersionV2 {
svc.Ports = map[string]*topology.Port{
wrk.Ports = map[string]*topology.Port{
"http": {Number: httpPort, Protocol: "http"},
"http2": {Number: httpPort, Protocol: "http2"},
"grpc": {Number: grpcPort, Protocol: "grpc"},
"tcp": {Number: tcpPort, Protocol: "tcp"},
}
} else {
svc.Port = httpPort
wrk.Port = httpPort
}
if mut != nil {
mut(svc)
mut(wrk)
}
return svc
return wrk
}
func NewTopologyServerSet(
@ -140,9 +140,9 @@ func NewTopologyMeshGatewaySet(
mutateFn func(i int, node *topology.Node),
) []*topology.Node {
var out []*topology.Node
sid := topology.ServiceID{
sid := topology.ID{
Name: "mesh-gateway",
Partition: ConfigEntryPartition(partition),
Partition: topology.DefaultToEmpty(partition),
}
for i := 1; i <= num; i++ {
name := namePrefix + strconv.Itoa(i)
@ -151,7 +151,7 @@ func NewTopologyMeshGatewaySet(
Kind: nodeKind,
Partition: sid.Partition,
Name: name,
Services: []*topology.Service{{
Workloads: []*topology.Workload{{
ID: sid,
Port: 8443,
EnvoyAdminPort: 19000,

View File

@ -0,0 +1,41 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package topoutil
import (
"testing"
"github.com/hashicorp/consul/testing/deployer/topology"
)
// Deprecated: DestinationEndpointStatus
func (a *Asserter) UpstreamEndpointStatus(
t *testing.T,
workload *topology.Workload,
clusterName string,
healthStatus string,
count int,
) {
a.DestinationEndpointStatus(t, workload, clusterName, healthStatus, count)
}
// Deprecated: NewFortioWorkloadWithDefaults
func NewFortioServiceWithDefaults(
cluster string,
sid topology.ID,
nodeVersion topology.NodeVersion,
mut func(*topology.Workload),
) *topology.Workload {
return NewFortioWorkloadWithDefaults(cluster, sid, nodeVersion, mut)
}
// Deprecated: NewBlankspaceWorkloadWithDefaults
func NewBlankspaceServiceWithDefaults(
cluster string,
sid topology.ID,
nodeVersion topology.NodeVersion,
mut func(*topology.Workload),
) *topology.Workload {
return NewBlankspaceWorkloadWithDefaults(cluster, sid, nodeVersion, mut)
}

View File

@ -202,58 +202,56 @@ func (s *Sprawl) createCrossNamespaceCatalogReadPolicies(cluster *topology.Clust
return nil
}
func (s *Sprawl) createAllServiceTokens() error {
func (s *Sprawl) createAllWorkloadTokens() error {
for _, cluster := range s.topology.Clusters {
if err := s.createServiceTokens(cluster); err != nil {
return fmt.Errorf("createServiceTokens[%s]: %w", cluster.Name, err)
if err := s.createWorkloadTokens(cluster); err != nil {
return fmt.Errorf("createWorkloadTokens[%s]: %w", cluster.Name, err)
}
}
return nil
}
func (s *Sprawl) createServiceTokens(cluster *topology.Cluster) error {
func (s *Sprawl) createWorkloadTokens(cluster *topology.Cluster) error {
var (
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
sids := make(map[topology.ServiceID]struct{})
workloadIDs := make(map[topology.ID]struct{})
for _, node := range cluster.Nodes {
if !node.RunsWorkloads() || len(node.Services) == 0 || node.Disabled {
if !node.RunsWorkloads() || len(node.Workloads) == 0 || node.Disabled {
continue
}
for _, svc := range node.Services {
sid := svc.ID
if _, done := sids[sid]; done {
for _, wrk := range node.Workloads {
if _, done := workloadIDs[wrk.ID]; done {
continue
}
var overridePolicy *api.ACLPolicy
if svc.IsMeshGateway {
if wrk.IsMeshGateway {
var err error
overridePolicy, err = CreateOrUpdatePolicy(client, policyForMeshGateway(svc, cluster.Enterprise))
overridePolicy, err = CreateOrUpdatePolicy(client, policyForMeshGateway(wrk, cluster.Enterprise))
if err != nil {
return fmt.Errorf("could not create policy: %w", err)
}
}
token, err := CreateOrUpdateToken(client, tokenForService(svc, overridePolicy, cluster.Enterprise))
token, err := CreateOrUpdateToken(client, tokenForWorkload(wrk, overridePolicy, cluster.Enterprise))
if err != nil {
return fmt.Errorf("could not create token: %w", err)
}
logger.Debug("created service token",
"service", svc.ID.Name,
"namespace", svc.ID.Namespace,
"partition", svc.ID.Partition,
logger.Debug("created workload token",
"workload", wrk.ID.Name,
"namespace", wrk.ID.Namespace,
"partition", wrk.ID.Partition,
"token", token.SecretID,
)
s.secrets.SaveServiceToken(cluster.Name, sid, token.SecretID)
s.secrets.SaveWorkloadToken(cluster.Name, wrk.ID, token.SecretID)
sids[sid] = struct{}{}
workloadIDs[wrk.ID] = struct{}{}
}
}

View File

@ -86,29 +86,34 @@ func tokenForNode(node *topology.Node, enterprise bool) *api.ACLToken {
return token
}
func tokenForService(svc *topology.Service, overridePolicy *api.ACLPolicy, enterprise bool) *api.ACLToken {
// Deprecated: tokenForWorkload
func tokenForService(wrk *topology.Workload, overridePolicy *api.ACLPolicy, enterprise bool) *api.ACLToken {
return tokenForWorkload(wrk, overridePolicy, enterprise)
}
func tokenForWorkload(wrk *topology.Workload, overridePolicy *api.ACLPolicy, enterprise bool) *api.ACLToken {
token := &api.ACLToken{
Description: "service--" + svc.ID.ACLString(),
Description: "service--" + wrk.ID.ACLString(),
Local: false,
}
if overridePolicy != nil {
token.Policies = []*api.ACLTokenPolicyLink{{ID: overridePolicy.ID}}
} else if svc.IsV2() {
} else if wrk.IsV2() {
token.TemplatedPolicies = []*api.ACLTemplatedPolicy{{
TemplateName: api.ACLTemplatedPolicyWorkloadIdentityName,
TemplateVariables: &api.ACLTemplatedPolicyVariables{
Name: svc.ID.Name,
Name: wrk.ID.Name,
},
}}
} else {
token.ServiceIdentities = []*api.ACLServiceIdentity{{
ServiceName: svc.ID.Name,
ServiceName: wrk.ID.Name,
}}
}
if enterprise {
token.Namespace = svc.ID.Namespace
token.Partition = svc.ID.Partition
token.Namespace = wrk.ID.Namespace
token.Partition = wrk.ID.Partition
}
return token
@ -176,20 +181,20 @@ mesh = "write"
`
)
func policyForMeshGateway(svc *topology.Service, enterprise bool) *api.ACLPolicy {
policyName := "mesh-gateway--" + svc.ID.ACLString()
func policyForMeshGateway(wrk *topology.Workload, enterprise bool) *api.ACLPolicy {
policyName := "mesh-gateway--" + wrk.ID.ACLString()
policy := &api.ACLPolicy{
Name: policyName,
Description: policyName,
}
if enterprise {
policy.Partition = svc.ID.Partition
policy.Partition = wrk.ID.Partition
policy.Namespace = "default"
}
if enterprise {
if svc.ID.Partition == "default" {
if wrk.ID.Partition == "default" {
policy.Rules = meshGatewayEntDefaultRules
} else {
policy.Rules = meshGatewayEntNonDefaultRules

View File

@ -299,8 +299,8 @@ func (s *Sprawl) createFirstTime() error {
// Ideally we start services WITH a token initially, so we pre-create them
// before running terraform for them.
if err := s.createAllServiceTokens(); err != nil {
return fmt.Errorf("createAllServiceTokens: %w", err)
if err := s.createAllWorkloadTokens(); err != nil {
return fmt.Errorf("createAllWorkloadTokens: %w", err)
}
if err := s.syncAllServicesForDataplaneInstances(); err != nil {
@ -367,8 +367,8 @@ func (s *Sprawl) preRegenTasks() error {
// Ideally we start services WITH a token initially, so we pre-create them
// before running terraform for them.
if err := s.createAllServiceTokens(); err != nil {
return fmt.Errorf("createAllServiceTokens: %w", err)
if err := s.createAllWorkloadTokens(); err != nil {
return fmt.Errorf("createAllWorkloadTokens: %w", err)
}
if err := s.syncAllServicesForDataplaneInstances(); err != nil {

View File

@ -33,8 +33,8 @@ func (s *Sprawl) registerAllServicesToAgents() error {
func (s *Sprawl) syncAllServicesForDataplaneInstances() error {
for _, cluster := range s.topology.Clusters {
if err := s.syncServicesForDataplaneInstances(cluster); err != nil {
return fmt.Errorf("syncServicesForDataplaneInstances[%s]: %w", cluster.Name, err)
if err := s.syncWorkloadsForDataplaneInstances(cluster); err != nil {
return fmt.Errorf("syncWorkloadsForDataplaneInstances[%s]: %w", cluster.Name, err)
}
}
return nil
@ -42,7 +42,7 @@ func (s *Sprawl) syncAllServicesForDataplaneInstances() error {
func (s *Sprawl) registerServicesToAgents(cluster *topology.Cluster) error {
for _, node := range cluster.Nodes {
if !node.RunsWorkloads() || len(node.Services) == 0 || node.Disabled {
if !node.RunsWorkloads() || len(node.Workloads) == 0 || node.Disabled {
continue
}
@ -63,8 +63,8 @@ func (s *Sprawl) registerServicesToAgents(cluster *topology.Cluster) error {
return err
}
for _, svc := range node.Services {
if err := s.registerAgentService(agentClient, cluster, node, svc); err != nil {
for _, wrk := range node.Workloads {
if err := s.registerAgentService(agentClient, cluster, node, wrk); err != nil {
return err
}
}
@ -77,7 +77,7 @@ func (s *Sprawl) registerAgentService(
agentClient *api.Client,
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
wrk *topology.Workload,
) error {
if !node.IsAgent() {
panic("called wrong method type")
@ -86,7 +86,7 @@ func (s *Sprawl) registerAgentService(
panic("don't call this")
}
if svc.IsMeshGateway {
if wrk.IsMeshGateway {
return nil // handled at startup time for agent-ful, but won't be for agent-less
}
@ -95,31 +95,31 @@ func (s *Sprawl) registerAgentService(
)
reg := &api.AgentServiceRegistration{
ID: svc.ID.Name,
Name: svc.ID.Name,
Port: svc.Port,
Meta: svc.Meta,
ID: wrk.ID.Name,
Name: wrk.ID.Name,
Port: wrk.Port,
Meta: wrk.Meta,
}
if cluster.Enterprise {
reg.Namespace = svc.ID.Namespace
reg.Partition = svc.ID.Partition
reg.Namespace = wrk.ID.Namespace
reg.Partition = wrk.ID.Partition
}
if !svc.DisableServiceMesh {
if !wrk.DisableServiceMesh {
var upstreams []api.Upstream
for _, u := range svc.Upstreams {
for _, dest := range wrk.Destinations {
uAPI := api.Upstream{
DestinationPeer: u.Peer,
DestinationName: u.ID.Name,
LocalBindAddress: u.LocalAddress,
LocalBindPort: u.LocalPort,
DestinationPeer: dest.Peer,
DestinationName: dest.ID.Name,
LocalBindAddress: dest.LocalAddress,
LocalBindPort: dest.LocalPort,
// Config map[string]interface{} `json:",omitempty" bexpr:"-"`
// MeshGateway MeshGatewayConfig `json:",omitempty"`
}
if cluster.Enterprise {
uAPI.DestinationNamespace = u.ID.Namespace
if u.Peer == "" {
uAPI.DestinationPartition = u.ID.Partition
uAPI.DestinationNamespace = dest.ID.Namespace
if dest.Peer == "" {
uAPI.DestinationPartition = dest.ID.Partition
}
}
upstreams = append(upstreams, uAPI)
@ -134,18 +134,18 @@ func (s *Sprawl) registerAgentService(
}
switch {
case svc.CheckTCP != "":
case wrk.CheckTCP != "":
chk := &api.AgentServiceCheck{
Name: "up",
TCP: svc.CheckTCP,
TCP: wrk.CheckTCP,
Interval: "5s",
Timeout: "1s",
}
reg.Checks = append(reg.Checks, chk)
case svc.CheckHTTP != "":
case wrk.CheckHTTP != "":
chk := &api.AgentServiceCheck{
Name: "up",
HTTP: svc.CheckHTTP,
HTTP: wrk.CheckHTTP,
Method: "GET",
Interval: "5s",
Timeout: "1s",
@ -155,7 +155,7 @@ func (s *Sprawl) registerAgentService(
// Switch token for every request.
hdr := make(http.Header)
hdr.Set("X-Consul-Token", s.secrets.ReadServiceToken(cluster.Name, svc.ID))
hdr.Set("X-Consul-Token", s.secrets.ReadWorkloadToken(cluster.Name, wrk.ID))
agentClient.SetHeaders(hdr)
RETRY:
@ -164,29 +164,29 @@ RETRY:
time.Sleep(50 * time.Millisecond)
goto RETRY
}
return fmt.Errorf("failed to register service %q to node %q: %w", svc.ID, node.ID(), err)
return fmt.Errorf("failed to register workload %q to node %q: %w", wrk.ID, node.ID(), err)
}
logger.Debug("registered service to client agent",
"service", svc.ID.Name,
logger.Debug("registered workload to client agent",
"workload", wrk.ID.Name,
"node", node.Name,
"namespace", svc.ID.Namespace,
"partition", svc.ID.Partition,
"namespace", wrk.ID.Namespace,
"partition", wrk.ID.Partition,
)
return nil
}
// syncServicesForDataplaneInstances register/deregister services in the given cluster
func (s *Sprawl) syncServicesForDataplaneInstances(cluster *topology.Cluster) error {
identityInfo := make(map[topology.ServiceID]*Resource[*pbauth.WorkloadIdentity])
// syncWorkloadsForDataplaneInstances register/deregister services in the given cluster
func (s *Sprawl) syncWorkloadsForDataplaneInstances(cluster *topology.Cluster) error {
identityInfo := make(map[topology.ID]*Resource[*pbauth.WorkloadIdentity])
// registerServiceAtNode is called when node is not disabled
registerServiceAtNode := func(node *topology.Node, svc *topology.Service) error {
// registerWorkloadToNode is called when node is not disabled
registerWorkloadToNode := func(node *topology.Node, wrk *topology.Workload) error {
if node.IsV2() {
pending := serviceInstanceToResources(node, svc)
pending := workloadInstanceToResources(node, wrk)
workloadID := topology.NewServiceID(svc.WorkloadIdentity, svc.ID.Namespace, svc.ID.Partition)
workloadID := topology.NewID(wrk.WorkloadIdentity, wrk.ID.Namespace, wrk.ID.Partition)
if _, ok := identityInfo[workloadID]; !ok {
identityInfo[workloadID] = pending.WorkloadIdentity
}
@ -231,11 +231,11 @@ func (s *Sprawl) syncServicesForDataplaneInstances(cluster *topology.Cluster) er
}
}
} else {
if err := s.registerCatalogServiceV1(cluster, node, svc); err != nil {
if err := s.registerCatalogServiceV1(cluster, node, wrk); err != nil {
return fmt.Errorf("error registering service: %w", err)
}
if !svc.DisableServiceMesh {
if err := s.registerCatalogSidecarServiceV1(cluster, node, svc); err != nil {
if !wrk.DisableServiceMesh {
if err := s.registerCatalogSidecarServiceV1(cluster, node, wrk); err != nil {
return fmt.Errorf("error registering sidecar service: %w", err)
}
}
@ -243,17 +243,17 @@ func (s *Sprawl) syncServicesForDataplaneInstances(cluster *topology.Cluster) er
return nil
}
// deregisterServiceAtNode is called when node is disabled
deregisterServiceAtNode := func(node *topology.Node, svc *topology.Service) error {
// deregisterWorkloadFromNode is called when node is disabled
deregisterWorkloadFromNode := func(node *topology.Node, wrk *topology.Workload) error {
if node.IsV2() {
// TODO: implement deregister services for v2
panic("deregister services is not implemented for V2")
// TODO: implement deregister workload for v2
panic("deregister workload is not implemented for V2")
} else {
if err := s.deregisterCatalogServiceV1(cluster, node, svc); err != nil {
if err := s.deregisterCatalogServiceV1(cluster, node, wrk); err != nil {
return fmt.Errorf("error deregistering service: %w", err)
}
if !svc.DisableServiceMesh {
if err := s.deregisterCatalogSidecarServiceV1(cluster, node, svc); err != nil {
if !wrk.DisableServiceMesh {
if err := s.deregisterCatalogSidecarServiceV1(cluster, node, wrk); err != nil {
return fmt.Errorf("error deregistering sidecar service: %w", err)
}
}
@ -261,10 +261,10 @@ func (s *Sprawl) syncServicesForDataplaneInstances(cluster *topology.Cluster) er
return nil
}
var syncService func(node *topology.Node, svc *topology.Service) error
var syncWorkload func(node *topology.Node, wrk *topology.Workload) error
for _, node := range cluster.Nodes {
if !node.RunsWorkloads() || len(node.Services) == 0 {
if !node.RunsWorkloads() || len(node.Workloads) == 0 {
continue
}
@ -280,13 +280,13 @@ func (s *Sprawl) syncServicesForDataplaneInstances(cluster *topology.Cluster) er
}
// Register/deregister services on the node
for _, svc := range node.Services {
for _, wrk := range node.Workloads {
if !node.Disabled {
syncService = registerServiceAtNode
syncWorkload = registerWorkloadToNode
} else {
syncService = deregisterServiceAtNode
syncWorkload = deregisterWorkloadFromNode
}
syncService(node, svc)
syncWorkload(node, wrk)
}
// Deregister the virtual node if node is disabled
@ -508,7 +508,7 @@ RETRY:
func (s *Sprawl) deregisterCatalogServiceV1(
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
wrk *topology.Workload,
) error {
if !node.IsDataplane() {
panic("called wrong method type")
@ -524,7 +524,7 @@ func (s *Sprawl) deregisterCatalogServiceV1(
dereg := &api.CatalogDeregistration{
Node: node.PodName(),
ServiceID: svc.ID.Name,
ServiceID: wrk.ID.Name,
}
RETRY:
if _, err := client.Catalog().Deregister(dereg, nil); err != nil {
@ -532,11 +532,11 @@ RETRY:
time.Sleep(50 * time.Millisecond)
goto RETRY
}
return fmt.Errorf("error deregistering service %s at node %s: %w", svc.ID, node.ID(), err)
return fmt.Errorf("error deregistering service %s at node %s: %w", wrk.ID, node.ID(), err)
}
logger.Info("dataplane service removed",
"service", svc.ID,
"service", wrk.ID,
"node", node.ID(),
)
@ -546,7 +546,7 @@ RETRY:
func (s *Sprawl) registerCatalogServiceV1(
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
wrk *topology.Workload,
) error {
if !node.IsDataplane() {
panic("called wrong method type")
@ -560,7 +560,7 @@ func (s *Sprawl) registerCatalogServiceV1(
logger = s.logger.With("cluster", cluster.Name)
)
reg := serviceToCatalogRegistration(cluster, node, svc)
reg := workloadToCatalogRegistration(cluster, node, wrk)
RETRY:
if _, err := client.Catalog().Register(reg, nil); err != nil {
@ -568,11 +568,11 @@ RETRY:
time.Sleep(50 * time.Millisecond)
goto RETRY
}
return fmt.Errorf("error registering service %s to node %s: %w", svc.ID, node.ID(), err)
return fmt.Errorf("error registering service %s to node %s: %w", wrk.ID, node.ID(), err)
}
logger.Debug("dataplane service created",
"service", svc.ID,
"service", wrk.ID,
"node", node.ID(),
)
@ -582,12 +582,12 @@ RETRY:
func (s *Sprawl) deregisterCatalogSidecarServiceV1(
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
wrk *topology.Workload,
) error {
if !node.IsDataplane() {
panic("called wrong method type")
}
if svc.DisableServiceMesh {
if wrk.DisableServiceMesh {
panic("not valid")
}
if node.IsV2() {
@ -599,7 +599,7 @@ func (s *Sprawl) deregisterCatalogSidecarServiceV1(
logger = s.logger.With("cluster", cluster.Name)
)
pid := svc.ID
pid := wrk.ID
pid.Name += "-sidecar-proxy"
dereg := &api.CatalogDeregistration{
Node: node.PodName(),
@ -612,7 +612,7 @@ RETRY:
time.Sleep(50 * time.Millisecond)
goto RETRY
}
return fmt.Errorf("error deregistering service %s to node %s: %w", svc.ID, node.ID(), err)
return fmt.Errorf("error deregistering service %s to node %s: %w", wrk.ID, node.ID(), err)
}
logger.Info("dataplane sidecar service removed",
@ -626,12 +626,12 @@ RETRY:
func (s *Sprawl) registerCatalogSidecarServiceV1(
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
wrk *topology.Workload,
) error {
if !node.IsDataplane() {
panic("called wrong method type")
}
if svc.DisableServiceMesh {
if wrk.DisableServiceMesh {
panic("not valid")
}
if node.IsV2() {
@ -643,14 +643,14 @@ func (s *Sprawl) registerCatalogSidecarServiceV1(
logger = s.logger.With("cluster", cluster.Name)
)
pid, reg := serviceToSidecarCatalogRegistration(cluster, node, svc)
pid, reg := workloadToSidecarCatalogRegistration(cluster, node, wrk)
RETRY:
if _, err := client.Catalog().Register(reg, nil); err != nil {
if isACLNotFound(err) {
time.Sleep(50 * time.Millisecond)
goto RETRY
}
return fmt.Errorf("error registering service %s to node %s: %w", svc.ID, node.ID(), err)
return fmt.Errorf("error registering service %s to node %s: %w", wrk.ID, node.ID(), err)
}
logger.Debug("dataplane sidecar service created",
@ -683,23 +683,23 @@ type ServiceResources struct {
ProxyConfiguration *Resource[*pbmesh.ProxyConfiguration]
}
func serviceInstanceToResources(
func workloadInstanceToResources(
node *topology.Node,
svc *topology.Service,
wrk *topology.Workload,
) *ServiceResources {
if svc.IsMeshGateway {
if wrk.IsMeshGateway {
panic("v2 does not yet support mesh gateways")
}
tenancy := &pbresource.Tenancy{
Partition: svc.ID.Partition,
Namespace: svc.ID.Namespace,
Partition: wrk.ID.Partition,
Namespace: wrk.ID.Namespace,
}
var (
wlPorts = map[string]*pbcatalog.WorkloadPort{}
)
for name, port := range svc.Ports {
for name, port := range wrk.Ports {
wlPorts[name] = &pbcatalog.WorkloadPort{
Port: uint32(port.Number),
Protocol: port.ActualProtocol,
@ -708,22 +708,22 @@ func serviceInstanceToResources(
var (
selector = &pbcatalog.WorkloadSelector{
Names: []string{svc.Workload},
Names: []string{wrk.Workload},
}
workloadRes = &Resource[*pbcatalog.Workload]{
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Type: pbcatalog.WorkloadType,
Name: svc.Workload,
Name: wrk.Workload,
Tenancy: tenancy,
},
Metadata: svc.Meta,
Metadata: wrk.Meta,
},
Data: &pbcatalog.Workload{
// TODO(rb): disabling this until node scoping makes sense again
// NodeName: node.PodName(),
Identity: svc.ID.Name,
Identity: wrk.ID.Name,
Ports: wlPorts,
Addresses: []*pbcatalog.WorkloadAddress{
{Host: node.LocalAddress()},
@ -734,7 +734,7 @@ func serviceInstanceToResources(
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Type: pbauth.WorkloadIdentityType,
Name: svc.WorkloadIdentity,
Name: wrk.WorkloadIdentity,
Tenancy: tenancy,
},
},
@ -746,13 +746,13 @@ func serviceInstanceToResources(
proxyConfigRes *Resource[*pbmesh.ProxyConfiguration]
)
if svc.HasCheck() {
if wrk.HasCheck() {
// TODO: needs ownerId
checkRes := &Resource[*pbcatalog.HealthStatus]{
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Type: pbcatalog.HealthStatusType,
Name: svc.Workload + "-check-0",
Name: wrk.Workload + "-check-0",
Tenancy: tenancy,
},
},
@ -771,12 +771,12 @@ func serviceInstanceToResources(
)
}
if !svc.DisableServiceMesh {
if !wrk.DisableServiceMesh {
destinationsRes = &Resource[*pbmesh.Destinations]{
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Type: pbmesh.DestinationsType,
Name: svc.Workload,
Name: wrk.Workload,
Tenancy: tenancy,
},
},
@ -785,33 +785,33 @@ func serviceInstanceToResources(
},
}
for _, u := range svc.Upstreams {
dest := &pbmesh.Destination{
for _, dest := range wrk.Destinations {
meshDest := &pbmesh.Destination{
DestinationRef: &pbresource.Reference{
Type: pbcatalog.ServiceType,
Name: u.ID.Name,
Name: dest.ID.Name,
Tenancy: &pbresource.Tenancy{
Partition: u.ID.Partition,
Namespace: u.ID.Namespace,
Partition: dest.ID.Partition,
Namespace: dest.ID.Namespace,
},
},
DestinationPort: u.PortName,
DestinationPort: dest.PortName,
ListenAddr: &pbmesh.Destination_IpPort{
IpPort: &pbmesh.IPPortAddress{
Ip: u.LocalAddress,
Port: uint32(u.LocalPort),
Ip: dest.LocalAddress,
Port: uint32(dest.LocalPort),
},
},
}
destinationsRes.Data.Destinations = append(destinationsRes.Data.Destinations, dest)
destinationsRes.Data.Destinations = append(destinationsRes.Data.Destinations, meshDest)
}
if svc.EnableTransparentProxy {
if wrk.EnableTransparentProxy {
proxyConfigRes = &Resource[*pbmesh.ProxyConfiguration]{
Resource: &pbresource.Resource{
Id: &pbresource.ID{
Type: pbmesh.ProxyConfigurationType,
Name: svc.Workload,
Name: wrk.Workload,
Tenancy: tenancy,
},
},
@ -834,10 +834,10 @@ func serviceInstanceToResources(
}
}
func serviceToCatalogRegistration(
func workloadToCatalogRegistration(
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
wrk *topology.Workload,
) *api.CatalogRegistration {
if node.IsV2() {
panic("don't call this")
@ -847,14 +847,14 @@ func serviceToCatalogRegistration(
SkipNodeUpdate: true,
Service: &api.AgentService{
Kind: api.ServiceKindTypical,
ID: svc.ID.Name,
Service: svc.ID.Name,
Meta: svc.Meta,
Port: svc.Port,
ID: wrk.ID.Name,
Service: wrk.ID.Name,
Meta: wrk.Meta,
Port: wrk.Port,
Address: node.LocalAddress(),
},
}
if svc.IsMeshGateway {
if wrk.IsMeshGateway {
reg.Service.Kind = api.ServiceKindMeshGateway
reg.Service.Proxy = &api.AgentServiceConnectProxyConfig{
Config: map[string]interface{}{
@ -878,46 +878,46 @@ func serviceToCatalogRegistration(
reg.Service.TaggedAddresses = map[string]api.ServiceAddress{
"lan": {
Address: node.LocalAddress(),
Port: svc.Port,
Port: wrk.Port,
},
"lan_ipv4": {
Address: node.LocalAddress(),
Port: svc.Port,
Port: wrk.Port,
},
"wan": {
Address: node.PublicAddress(),
Port: svc.Port,
Port: wrk.Port,
},
"wan_ipv4": {
Address: node.PublicAddress(),
Port: svc.Port,
Port: wrk.Port,
},
}
}
if cluster.Enterprise {
reg.Partition = svc.ID.Partition
reg.Service.Namespace = svc.ID.Namespace
reg.Service.Partition = svc.ID.Partition
reg.Partition = wrk.ID.Partition
reg.Service.Namespace = wrk.ID.Namespace
reg.Service.Partition = wrk.ID.Partition
}
if svc.HasCheck() {
if wrk.HasCheck() {
chk := &api.HealthCheck{
Name: "external sync",
// Type: "external-sync",
Status: "passing", // TODO
ServiceID: svc.ID.Name,
ServiceName: svc.ID.Name,
ServiceID: wrk.ID.Name,
ServiceName: wrk.ID.Name,
Output: "",
}
if cluster.Enterprise {
chk.Namespace = svc.ID.Namespace
chk.Partition = svc.ID.Partition
chk.Namespace = wrk.ID.Namespace
chk.Partition = wrk.ID.Partition
}
switch {
case svc.CheckTCP != "":
chk.Definition.TCP = svc.CheckTCP
case svc.CheckHTTP != "":
chk.Definition.HTTP = svc.CheckHTTP
case wrk.CheckTCP != "":
chk.Definition.TCP = wrk.CheckTCP
case wrk.CheckHTTP != "":
chk.Definition.HTTP = wrk.CheckHTTP
chk.Definition.Method = "GET"
}
reg.Checks = append(reg.Checks, chk)
@ -925,15 +925,15 @@ func serviceToCatalogRegistration(
return reg
}
func serviceToSidecarCatalogRegistration(
func workloadToSidecarCatalogRegistration(
cluster *topology.Cluster,
node *topology.Node,
svc *topology.Service,
) (topology.ServiceID, *api.CatalogRegistration) {
wrk *topology.Workload,
) (topology.ID, *api.CatalogRegistration) {
if node.IsV2() {
panic("don't call this")
}
pid := svc.ID
pid := wrk.ID
pid.Name += "-sidecar-proxy"
reg := &api.CatalogRegistration{
Node: node.PodName(),
@ -942,13 +942,13 @@ func serviceToSidecarCatalogRegistration(
Kind: api.ServiceKindConnectProxy,
ID: pid.Name,
Service: pid.Name,
Meta: svc.Meta,
Port: svc.EnvoyPublicListenerPort,
Meta: wrk.Meta,
Port: wrk.EnvoyPublicListenerPort,
Address: node.LocalAddress(),
Proxy: &api.AgentServiceConnectProxyConfig{
DestinationServiceName: svc.ID.Name,
DestinationServiceID: svc.ID.Name,
LocalServicePort: svc.Port,
DestinationServiceName: wrk.ID.Name,
DestinationServiceID: wrk.ID.Name,
LocalServicePort: wrk.Port,
},
},
Checks: []*api.HealthCheck{{
@ -958,7 +958,7 @@ func serviceToSidecarCatalogRegistration(
ServiceID: pid.Name,
ServiceName: pid.Name,
Definition: api.HealthCheckDefinition{
TCP: fmt.Sprintf("%s:%d", node.LocalAddress(), svc.EnvoyPublicListenerPort),
TCP: fmt.Sprintf("%s:%d", node.LocalAddress(), wrk.EnvoyPublicListenerPort),
},
Output: "",
}},
@ -979,17 +979,17 @@ func serviceToSidecarCatalogRegistration(
reg.Checks[0].Partition = pid.Partition
}
for _, u := range svc.Upstreams {
for _, dest := range wrk.Destinations {
pu := api.Upstream{
DestinationName: u.ID.Name,
DestinationPeer: u.Peer,
LocalBindAddress: u.LocalAddress,
LocalBindPort: u.LocalPort,
DestinationName: dest.ID.Name,
DestinationPeer: dest.Peer,
LocalBindAddress: dest.LocalAddress,
LocalBindPort: dest.LocalPort,
}
if cluster.Enterprise {
pu.DestinationNamespace = u.ID.Namespace
if u.Peer == "" {
pu.DestinationPartition = u.ID.Partition
pu.DestinationNamespace = dest.ID.Namespace
if dest.Peer == "" {
pu.DestinationPartition = dest.ID.Partition
}
}
reg.Service.Proxy.Upstreams = append(reg.Service.Proxy.Upstreams, pu)

View File

@ -59,29 +59,29 @@ func (s *Sprawl) PrintDetails() error {
})
}
for _, svc := range node.Services {
if svc.IsMeshGateway {
for _, wrk := range node.Workloads {
if wrk.IsMeshGateway {
cd.Apps = append(cd.Apps, appDetail{
Type: "mesh-gateway",
Container: node.DockerName(),
ExposedPort: node.ExposedPort(svc.Port),
ExposedEnvoyAdminPort: node.ExposedPort(svc.EnvoyAdminPort),
ExposedPort: node.ExposedPort(wrk.Port),
ExposedEnvoyAdminPort: node.ExposedPort(wrk.EnvoyAdminPort),
Addresses: addrs,
Service: svc.ID.String(),
Service: wrk.ID.String(),
})
} else {
ports := make(map[string]int)
for name, port := range svc.Ports {
for name, port := range wrk.Ports {
ports[name] = node.ExposedPort(port.Number)
}
cd.Apps = append(cd.Apps, appDetail{
Type: "app",
Container: node.DockerName(),
ExposedPort: node.ExposedPort(svc.Port),
ExposedPort: node.ExposedPort(wrk.Port),
ExposedPorts: ports,
ExposedEnvoyAdminPort: node.ExposedPort(svc.EnvoyAdminPort),
ExposedEnvoyAdminPort: node.ExposedPort(wrk.EnvoyAdminPort),
Addresses: addrs,
Service: svc.ID.String(),
Service: wrk.ID.String(),
})
}
}

View File

@ -37,12 +37,22 @@ func (s *Store) ReadAgentToken(cluster string, nid topology.NodeID) string {
return s.read(encode(cluster, "agent", nid.String()))
}
func (s *Store) SaveServiceToken(cluster string, sid topology.ServiceID, value string) {
s.save(encode(cluster, "service", sid.String()), value)
// Deprecated: SaveWorkloadToken
func (s *Store) SaveServiceToken(cluster string, wid topology.ID, value string) {
s.SaveWorkloadToken(cluster, wid, value)
}
func (s *Store) ReadServiceToken(cluster string, sid topology.ServiceID) string {
return s.read(encode(cluster, "service", sid.String()))
func (s *Store) SaveWorkloadToken(cluster string, wid topology.ID, value string) {
s.save(encode(cluster, "workload", wid.String()), value)
}
// Deprecated: ReadWorkloadToken
func (s *Store) ReadServiceToken(cluster string, wid topology.ID) string {
return s.ReadWorkloadToken(cluster, wid)
}
func (s *Store) ReadWorkloadToken(cluster string, wid topology.ID) string {
return s.read(encode(cluster, "workload", wid.String()))
}
func (s *Store) save(key, value string) {

View File

@ -270,8 +270,8 @@ func (g *Generator) Generate(step Step) error {
addVolume(node.DockerName())
}
for _, svc := range node.Services {
addImage("", svc.Image)
for _, wrk := range node.Workloads {
addImage("", wrk.Image)
}
myContainers, err := g.generateNodeContainers(step, c, node)

View File

@ -73,51 +73,51 @@ func (g *Generator) generateNodeContainers(
}
}
svcContainers := []Resource{}
for _, svc := range node.SortedServices() {
token := g.sec.ReadServiceToken(node.Cluster, svc.ID)
wrkContainers := []Resource{}
for _, wrk := range node.SortedWorkloads() {
token := g.sec.ReadWorkloadToken(node.Cluster, wrk.ID)
switch {
case svc.IsMeshGateway && !node.IsDataplane():
svcContainers = append(svcContainers, Eval(tfMeshGatewayT, struct {
case wrk.IsMeshGateway && !node.IsDataplane():
wrkContainers = append(wrkContainers, Eval(tfMeshGatewayT, struct {
terraformPod
ImageResource string
Enterprise bool
Service *topology.Service
Workload *topology.Workload
Token string
}{
terraformPod: pod,
ImageResource: DockerImageResourceName(node.Images.EnvoyConsulImage()),
Enterprise: cluster.Enterprise,
Service: svc,
Workload: wrk,
Token: token,
}))
case svc.IsMeshGateway && node.IsDataplane():
svcContainers = append(svcContainers, Eval(tfMeshGatewayDataplaneT, &struct {
case wrk.IsMeshGateway && node.IsDataplane():
wrkContainers = append(wrkContainers, Eval(tfMeshGatewayDataplaneT, &struct {
terraformPod
ImageResource string
Enterprise bool
Service *topology.Service
Workload *topology.Workload
Token string
}{
terraformPod: pod,
ImageResource: DockerImageResourceName(node.Images.LocalDataplaneImage()),
Enterprise: cluster.Enterprise,
Service: svc,
Workload: wrk,
Token: token,
}))
case !svc.IsMeshGateway:
svcContainers = append(svcContainers, Eval(tfAppT, struct {
case !wrk.IsMeshGateway:
wrkContainers = append(wrkContainers, Eval(tfAppT, struct {
terraformPod
ImageResource string
Service *topology.Service
Workload *topology.Workload
}{
terraformPod: pod,
ImageResource: DockerImageResourceName(svc.Image),
Service: svc,
ImageResource: DockerImageResourceName(wrk.Image),
Workload: wrk,
}))
if svc.DisableServiceMesh {
if wrk.DisableServiceMesh {
break
}
@ -125,7 +125,7 @@ func (g *Generator) generateNodeContainers(
var img string
if node.IsDataplane() {
tmpl = tfAppDataplaneT
if svc.EnableTransparentProxy {
if wrk.EnableTransparentProxy {
img = DockerImageResourceName(node.Images.LocalDataplaneTProxyImage())
} else {
img = DockerImageResourceName(node.Images.LocalDataplaneImage())
@ -133,23 +133,23 @@ func (g *Generator) generateNodeContainers(
} else {
img = DockerImageResourceName(node.Images.EnvoyConsulImage())
}
svcContainers = append(svcContainers, Eval(tmpl, struct {
wrkContainers = append(wrkContainers, Eval(tmpl, struct {
terraformPod
ImageResource string
Service *topology.Service
Workload *topology.Workload
Token string
Enterprise bool
}{
terraformPod: pod,
ImageResource: img,
Service: svc,
Workload: wrk,
Token: token,
Enterprise: cluster.Enterprise,
}))
}
if step.StartServices() {
containers = append(containers, svcContainers...)
containers = append(containers, wrkContainers...)
}
}

View File

@ -1,5 +1,5 @@
resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar" {
name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar"
resource "docker_container" "{{.Node.DockerName}}-{{.Workload.ID.TFString}}-sidecar" {
name = "{{.Node.DockerName}}-{{.Workload.ID.TFString}}-sidecar"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.ImageResource}}.image_id
restart = "on-failure"
@ -17,7 +17,7 @@ resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidec
read_only = true
}
{{ if .Service.EnableTransparentProxy }}
{{ if .Workload.EnableTransparentProxy }}
capabilities {
add = ["NET_ADMIN"]
}
@ -27,17 +27,17 @@ resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidec
env = [
"DP_CONSUL_ADDRESSES=server.{{.Node.Cluster}}-consulcluster.lan",
{{ if .Node.IsV2 }}
"DP_PROXY_ID={{.Service.Workload}}",
"DP_PROXY_ID={{.Workload.Workload}}",
{{ if .Enterprise }}
"DP_PROXY_NAMESPACE={{.Service.ID.Namespace}}",
"DP_PROXY_PARTITION={{.Service.ID.Partition}}",
"DP_PROXY_NAMESPACE={{.Workload.ID.Namespace}}",
"DP_PROXY_PARTITION={{.Workload.ID.Partition}}",
{{ end }}
{{ else }}
"DP_SERVICE_NODE_NAME={{.Node.PodName}}",
"DP_PROXY_SERVICE_ID={{.Service.ID.Name}}-sidecar-proxy",
"DP_PROXY_SERVICE_ID={{.Workload.ID.Name}}-sidecar-proxy",
{{ if .Enterprise }}
"DP_SERVICE_NAMESPACE={{.Service.ID.Namespace}}",
"DP_SERVICE_PARTITION={{.Service.ID.Partition}}",
"DP_SERVICE_NAMESPACE={{.Workload.ID.Namespace}}",
"DP_SERVICE_PARTITION={{.Workload.ID.Partition}}",
{{ end }}
{{ end }}
@ -46,7 +46,7 @@ resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidec
"DP_CREDENTIAL_STATIC_TOKEN={{.Token}}",
{{ end }}
{{ if .Service.EnableTransparentProxy }}
{{ if .Workload.EnableTransparentProxy }}
"REDIRECT_TRAFFIC_ARGS=-exclude-inbound-port=19000",
{{ end }}

View File

@ -1,5 +1,5 @@
resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar" {
name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidecar"
resource "docker_container" "{{.Node.DockerName}}-{{.Workload.ID.TFString}}-sidecar" {
name = "{{.Node.DockerName}}-{{.Workload.ID.TFString}}-sidecar"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.ImageResource}}.image_id
restart = "on-failure"
@ -19,13 +19,13 @@ resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}-sidec
command = [
"consul", "connect", "envoy",
"-sidecar-for={{.Service.ID.Name}}",
"-sidecar-for={{.Workload.ID.Name}}",
"-grpc-addr=http://127.0.0.1:8502",
// for demo purposes (TODO: huh?)
"-admin-bind=0.0.0.0:{{.Service.EnvoyAdminPort}}",
"-admin-bind=0.0.0.0:{{.Workload.EnvoyAdminPort}}",
{{if .Enterprise}}
"-partition={{.Service.ID.Partition}}",
"-namespace={{.Service.ID.Namespace}}",
"-partition={{.Workload.ID.Partition}}",
"-namespace={{.Workload.ID.Namespace}}",
{{end}}
{{if .Token }}
"-token={{.Token}}",

View File

@ -1,5 +1,5 @@
resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}" {
name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}"
resource "docker_container" "{{.Node.DockerName}}-{{.Workload.ID.TFString}}" {
name = "{{.Node.DockerName}}-{{.Workload.ID.TFString}}"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.ImageResource}}.image_id
restart = "on-failure"
@ -12,13 +12,13 @@ resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}" {
{{- end }}
env = [
{{- range .Service.Env }}
{{- range .Workload.Env }}
"{{.}}",
{{- end}}
]
command = [
{{- range .Service.Command }}
{{- range .Workload.Command }}
"{{.}}",
{{- end }}
]

View File

@ -1,5 +1,5 @@
resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}" {
name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}"
resource "docker_container" "{{.Node.DockerName}}-{{.Workload.ID.TFString}}" {
name = "{{.Node.DockerName}}-{{.Workload.ID.TFString}}"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.ImageResource}}.image_id
restart = "on-failure"
@ -20,10 +20,10 @@ resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}" {
env = [
"DP_CONSUL_ADDRESSES=server.{{.Node.Cluster}}-consulcluster.lan",
"DP_SERVICE_NODE_NAME={{.Node.PodName}}",
"DP_PROXY_SERVICE_ID={{.Service.ID.Name}}",
"DP_PROXY_SERVICE_ID={{.Workload.ID.Name}}",
{{ if .Enterprise }}
"DP_SERVICE_NAMESPACE={{.Service.ID.Namespace}}",
"DP_SERVICE_PARTITION={{.Service.ID.Partition}}",
"DP_SERVICE_NAMESPACE={{.Workload.ID.Namespace}}",
"DP_SERVICE_PARTITION={{.Workload.ID.Partition}}",
{{ end }}
{{ if .Token }}
"DP_CREDENTIAL_TYPE=static",

View File

@ -1,5 +1,5 @@
resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}" {
name = "{{.Node.DockerName}}-{{.Service.ID.TFString}}"
resource "docker_container" "{{.Node.DockerName}}-{{.Workload.ID.TFString}}" {
name = "{{.Node.DockerName}}-{{.Workload.ID.TFString}}"
network_mode = "container:${docker_container.{{.PodName}}.id}"
image = docker_image.{{.ImageResource}}.image_id
restart = "on-failure"
@ -21,13 +21,13 @@ resource "docker_container" "{{.Node.DockerName}}-{{.Service.ID.TFString}}" {
"consul", "connect", "envoy",
"-register",
"-mesh-gateway",
"-address={{`{{ GetInterfaceIP \"eth0\" }}`}}:{{.Service.Port}}",
"-wan-address={{`{{ GetInterfaceIP \"eth1\" }}`}}:{{.Service.Port}}",
"-address={{`{{ GetInterfaceIP \"eth0\" }}`}}:{{.Workload.Port}}",
"-wan-address={{`{{ GetInterfaceIP \"eth1\" }}`}}:{{.Workload.Port}}",
"-grpc-addr=http://127.0.0.1:8502",
// for demo purposes (TODO: huh?)
"-admin-bind=0.0.0.0:{{.Service.EnvoyAdminPort}}",
"-admin-bind=0.0.0.0:{{.Workload.EnvoyAdminPort}}",
{{ if .Enterprise }}
"-partition={{.Service.ID.Partition}}",
"-partition={{.Workload.ID.Partition}}",
{{end}}
{{ if .Token }}
"-token={{.Token}}",

View File

@ -190,12 +190,12 @@ func (s *Sprawl) awaitMeshGateways() {
startTime := time.Now()
s.logger.Info("awaiting mesh gateways")
// TODO: maybe a better way to do this
mgws := []*topology.Service{}
mgws := []*topology.Workload{}
for _, clu := range s.topology.Clusters {
for _, node := range clu.Nodes {
for _, svc := range node.Services {
if svc.IsMeshGateway {
mgws = append(mgws, svc)
for _, wrk := range node.Workloads {
if wrk.IsMeshGateway {
mgws = append(mgws, wrk)
}
}
}

View File

@ -390,11 +390,11 @@ func (s *Sprawl) SnapshotEnvoy(ctx context.Context) error {
if n.Disabled {
continue
}
for _, s := range n.Services {
if s.Disabled || s.EnvoyAdminPort <= 0 {
for _, wrk := range n.Workloads {
if wrk.Disabled || wrk.EnvoyAdminPort <= 0 {
continue
}
prefix := fmt.Sprintf("http://%s:%d", n.LocalAddress(), s.EnvoyAdminPort)
prefix := fmt.Sprintf("http://%s:%d", n.LocalAddress(), wrk.EnvoyAdminPort)
for fn, target := range targets {
u := prefix + "/" + target
@ -402,23 +402,23 @@ func (s *Sprawl) SnapshotEnvoy(ctx context.Context) error {
body, err := scrapeURL(client, u)
if err != nil {
merr = multierror.Append(merr, fmt.Errorf("could not scrape %q for %s on %s: %w",
target, s.ID.String(), n.ID().String(), err,
target, wrk.ID.String(), n.ID().String(), err,
))
continue
}
outFn := filepath.Join(snapDir, n.DockerName()+"--"+s.ID.TFString()+"."+fn)
outFn := filepath.Join(snapDir, n.DockerName()+"--"+wrk.ID.TFString()+"."+fn)
if err := os.WriteFile(outFn+".tmp", body, 0644); err != nil {
merr = multierror.Append(merr, fmt.Errorf("could not write output %q for %s on %s: %w",
target, s.ID.String(), n.ID().String(), err,
target, wrk.ID.String(), n.ID().String(), err,
))
continue
}
if err := os.Rename(outFn+".tmp", outFn); err != nil {
merr = multierror.Append(merr, fmt.Errorf("could not write output %q for %s on %s: %w",
target, s.ID.String(), n.ID().String(), err,
target, wrk.ID.String(), n.ID().String(), err,
))
continue
}

View File

@ -40,9 +40,9 @@ func TestSprawl_CatalogV2(t *testing.T) {
Kind: topology.NodeKindDataplane,
Version: topology.NodeVersionV2,
Name: "dc1-client1",
Services: []*topology.Service{
Workloads: []*topology.Workload{
{
ID: topology.ServiceID{Name: "ping"},
ID: topology.ID{Name: "ping"},
Image: "rboyer/pingpong:latest",
Port: 8080,
EnvoyAdminPort: 19000,
@ -53,8 +53,8 @@ func TestSprawl_CatalogV2(t *testing.T) {
"-dialfreq", "250ms",
"-name", "ping",
},
Upstreams: []*topology.Upstream{{
ID: topology.ServiceID{Name: "pong"},
Destinations: []*topology.Destination{{
ID: topology.ID{Name: "pong"},
LocalPort: 9090,
}},
},
@ -64,9 +64,9 @@ func TestSprawl_CatalogV2(t *testing.T) {
Kind: topology.NodeKindDataplane,
Version: topology.NodeVersionV2,
Name: "dc1-client2",
Services: []*topology.Service{
Workloads: []*topology.Workload{
{
ID: topology.ServiceID{Name: "pong"},
ID: topology.ID{Name: "pong"},
Image: "rboyer/pingpong:latest",
Port: 8080,
EnvoyAdminPort: 19000,
@ -77,8 +77,8 @@ func TestSprawl_CatalogV2(t *testing.T) {
"-dialfreq", "250ms",
"-name", "pong",
},
Upstreams: []*topology.Upstream{{
ID: topology.ServiceID{Name: "ping"},
Destinations: []*topology.Destination{{
ID: topology.ID{Name: "ping"},
LocalPort: 9090,
}},
},
@ -174,9 +174,9 @@ func TestSprawl(t *testing.T) {
{
Kind: topology.NodeKindClient,
Name: "dc1-client1",
Services: []*topology.Service{
Workloads: []*topology.Workload{
{
ID: topology.ServiceID{Name: "mesh-gateway"},
ID: topology.ID{Name: "mesh-gateway"},
Port: 8443,
EnvoyAdminPort: 19000,
IsMeshGateway: true,
@ -186,9 +186,9 @@ func TestSprawl(t *testing.T) {
{
Kind: topology.NodeKindClient,
Name: "dc1-client2",
Services: []*topology.Service{
Workloads: []*topology.Workload{
{
ID: topology.ServiceID{Name: "ping"},
ID: topology.ID{Name: "ping"},
Image: "rboyer/pingpong:latest",
Port: 8080,
EnvoyAdminPort: 19000,
@ -199,8 +199,8 @@ func TestSprawl(t *testing.T) {
"-dialfreq", "250ms",
"-name", "ping",
},
Upstreams: []*topology.Upstream{{
ID: topology.ServiceID{Name: "pong"},
Destinations: []*topology.Destination{{
ID: topology.ID{Name: "pong"},
LocalPort: 9090,
Peer: "peer-dc2-default",
}},
@ -226,9 +226,9 @@ func TestSprawl(t *testing.T) {
{
Kind: topology.NodeKindClient,
Name: "dc2-client1",
Services: []*topology.Service{
Workloads: []*topology.Workload{
{
ID: topology.ServiceID{Name: "mesh-gateway"},
ID: topology.ID{Name: "mesh-gateway"},
Port: 8443,
EnvoyAdminPort: 19000,
IsMeshGateway: true,
@ -238,9 +238,9 @@ func TestSprawl(t *testing.T) {
{
Kind: topology.NodeKindDataplane,
Name: "dc2-client2",
Services: []*topology.Service{
Workloads: []*topology.Workload{
{
ID: topology.ServiceID{Name: "pong"},
ID: topology.ID{Name: "pong"},
Image: "rboyer/pingpong:latest",
Port: 8080,
EnvoyAdminPort: 19000,
@ -251,8 +251,8 @@ func TestSprawl(t *testing.T) {
"-dialfreq", "250ms",
"-name", "pong",
},
Upstreams: []*topology.Upstream{{
ID: topology.ServiceID{Name: "ping"},
Destinations: []*topology.Destination{{
ID: topology.ID{Name: "ping"},
LocalPort: 9090,
Peer: "peer-dc1-default",
}},
@ -263,9 +263,9 @@ func TestSprawl(t *testing.T) {
Kind: topology.NodeKindDataplane,
Version: topology.NodeVersionV2,
Name: "dc2-client3",
Services: []*topology.Service{
Workloads: []*topology.Workload{
{
ID: topology.ServiceID{Name: "pong"},
ID: topology.ID{Name: "pong"},
Image: "rboyer/pingpong:latest",
Port: 8080,
EnvoyAdminPort: 19000,
@ -276,8 +276,8 @@ func TestSprawl(t *testing.T) {
"-dialfreq", "250ms",
"-name", "pong",
},
Upstreams: []*topology.Upstream{{
ID: topology.ServiceID{Name: "ping"},
Destinations: []*topology.Destination{{
ID: topology.ID{Name: "ping"},
LocalPort: 9090,
Peer: "peer-dc1-default",
}},

View File

@ -130,7 +130,7 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error
}
if len(c.Services) == 0 { // always initialize this regardless of v2-ness, because we might late-enable it below
c.Services = make(map[ServiceID]*pbcatalog.Service)
c.Services = make(map[ID]*pbcatalog.Service)
}
var implicitV2Services bool
@ -317,53 +317,59 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error
return nil, fmt.Errorf("cluster %q node %q has more than one public address", c.Name, n.Name)
}
if n.IsDataplane() && len(n.Services) > 1 {
if len(n.Services) > 0 {
logger.Warn("please use Node.Workloads instead of Node.Services")
n.Workloads = append(n.Workloads, n.Services...)
n.Services = nil
}
if n.IsDataplane() && len(n.Workloads) > 1 {
// Our use of consul-dataplane here is supposed to mimic that
// of consul-k8s, which ultimately has one IP per Service, so
// we introduce the same limitation here.
return nil, fmt.Errorf("cluster %q node %q uses dataplane, but has more than one service", c.Name, n.Name)
}
seenServices := make(map[ServiceID]struct{})
for _, svc := range n.Services {
seenServices := make(map[ID]struct{})
for _, wrk := range n.Workloads {
if n.IsAgent() {
// Default to that of the enclosing node.
svc.ID.Partition = n.Partition
wrk.ID.Partition = n.Partition
}
svc.ID.Normalize()
wrk.ID.Normalize()
// Denormalize
svc.Node = n
svc.NodeVersion = n.Version
wrk.Node = n
wrk.NodeVersion = n.Version
if n.IsV2() {
svc.Workload = svc.ID.Name + "-" + n.PodName()
wrk.Workload = wrk.ID.Name + "-" + n.PodName()
}
if !IsValidLabel(svc.ID.Partition) {
return nil, fmt.Errorf("service partition is not valid: %s", svc.ID.Partition)
if !IsValidLabel(wrk.ID.Partition) {
return nil, fmt.Errorf("service partition is not valid: %s", wrk.ID.Partition)
}
if !IsValidLabel(svc.ID.Namespace) {
return nil, fmt.Errorf("service namespace is not valid: %s", svc.ID.Namespace)
if !IsValidLabel(wrk.ID.Namespace) {
return nil, fmt.Errorf("service namespace is not valid: %s", wrk.ID.Namespace)
}
if !IsValidLabel(svc.ID.Name) {
return nil, fmt.Errorf("service name is not valid: %s", svc.ID.Name)
if !IsValidLabel(wrk.ID.Name) {
return nil, fmt.Errorf("service name is not valid: %s", wrk.ID.Name)
}
if svc.ID.Partition != n.Partition {
if wrk.ID.Partition != n.Partition {
return nil, fmt.Errorf("service %s on node %s has mismatched partitions: %s != %s",
svc.ID.Name, n.Name, svc.ID.Partition, n.Partition)
wrk.ID.Name, n.Name, wrk.ID.Partition, n.Partition)
}
addTenancy(svc.ID.Partition, svc.ID.Namespace)
addTenancy(wrk.ID.Partition, wrk.ID.Namespace)
if _, exists := seenServices[svc.ID]; exists {
return nil, fmt.Errorf("cannot have two services on the same node %q in the same cluster %q with the same name %q", n.ID(), c.Name, svc.ID)
if _, exists := seenServices[wrk.ID]; exists {
return nil, fmt.Errorf("cannot have two services on the same node %q in the same cluster %q with the same name %q", n.ID(), c.Name, wrk.ID)
}
seenServices[svc.ID] = struct{}{}
seenServices[wrk.ID] = struct{}{}
if !svc.DisableServiceMesh && n.IsDataplane() {
if svc.EnvoyPublicListenerPort <= 0 {
if !wrk.DisableServiceMesh && n.IsDataplane() {
if wrk.EnvoyPublicListenerPort <= 0 {
if _, ok := n.usedPorts[20000]; !ok {
// For convenience the FIRST service on a node can get 20000 for free.
svc.EnvoyPublicListenerPort = 20000
wrk.EnvoyPublicListenerPort = 20000
} else {
return nil, fmt.Errorf("envoy public listener port is required")
}
@ -371,102 +377,102 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error
}
// add all of the service ports
for _, port := range svc.ports() {
for _, port := range wrk.ports() {
if ok := exposePort(port); !ok {
return nil, fmt.Errorf("port used more than once on cluster %q node %q: %d", c.Name, n.ID(), port)
}
}
// TODO(rb): re-expose?
// switch svc.Protocol {
// switch wrk.Protocol {
// case "":
// svc.Protocol = "tcp"
// wrk.Protocol = "tcp"
// fallthrough
// case "tcp":
// if svc.CheckHTTP != "" {
// if wrk.CheckHTTP != "" {
// return nil, fmt.Errorf("cannot set CheckHTTP for tcp service")
// }
// case "http":
// if svc.CheckTCP != "" {
// if wrk.CheckTCP != "" {
// return nil, fmt.Errorf("cannot set CheckTCP for tcp service")
// }
// default:
// return nil, fmt.Errorf("service has invalid protocol: %s", svc.Protocol)
// return nil, fmt.Errorf("service has invalid protocol: %s", wrk.Protocol)
// }
defaultUpstream := func(u *Upstream) error {
defaultDestination := func(dest *Destination) error {
// Default to that of the enclosing service.
if u.Peer == "" {
if u.ID.Partition == "" {
u.ID.Partition = svc.ID.Partition
if dest.Peer == "" {
if dest.ID.Partition == "" {
dest.ID.Partition = wrk.ID.Partition
}
if u.ID.Namespace == "" {
u.ID.Namespace = svc.ID.Namespace
if dest.ID.Namespace == "" {
dest.ID.Namespace = wrk.ID.Namespace
}
} else {
if u.ID.Partition != "" {
u.ID.Partition = "" // irrelevant here; we'll set it to the value of the OTHER side for plumbing purposes in tests
if dest.ID.Partition != "" {
dest.ID.Partition = "" // irrelevant here; we'll set it to the value of the OTHER side for plumbing purposes in tests
}
u.ID.Namespace = NamespaceOrDefault(u.ID.Namespace)
foundPeerNames[c.Name][u.Peer] = struct{}{}
dest.ID.Namespace = NamespaceOrDefault(dest.ID.Namespace)
foundPeerNames[c.Name][dest.Peer] = struct{}{}
}
addTenancy(u.ID.Partition, u.ID.Namespace)
addTenancy(dest.ID.Partition, dest.ID.Namespace)
if u.Implied {
if u.PortName == "" {
return fmt.Errorf("implicit upstreams must use port names in v2")
if dest.Implied {
if dest.PortName == "" {
return fmt.Errorf("implicit destinations must use port names in v2")
}
} else {
if u.LocalAddress == "" {
if dest.LocalAddress == "" {
// v1 defaults to 127.0.0.1 but v2 does not. Safe to do this generally though.
u.LocalAddress = "127.0.0.1"
dest.LocalAddress = "127.0.0.1"
}
if u.PortName != "" && n.IsV1() {
return fmt.Errorf("explicit upstreams cannot use port names in v1")
if dest.PortName != "" && n.IsV1() {
return fmt.Errorf("explicit destinations cannot use port names in v1")
}
if u.PortName == "" && n.IsV2() {
if dest.PortName == "" && n.IsV2() {
// Assume this is a v1->v2 conversion and name it.
u.PortName = "legacy"
dest.PortName = "legacy"
}
}
return nil
}
for _, u := range svc.Upstreams {
if err := defaultUpstream(u); err != nil {
for _, dest := range wrk.Destinations {
if err := defaultDestination(dest); err != nil {
return nil, err
}
}
if n.IsV2() {
for _, u := range svc.ImpliedUpstreams {
u.Implied = true
if err := defaultUpstream(u); err != nil {
for _, dest := range wrk.ImpliedDestinations {
dest.Implied = true
if err := defaultDestination(dest); err != nil {
return nil, err
}
}
} else {
if len(svc.ImpliedUpstreams) > 0 {
return nil, fmt.Errorf("v1 does not support implied upstreams yet")
if len(wrk.ImpliedDestinations) > 0 {
return nil, fmt.Errorf("v1 does not support implied destinations yet")
}
}
if err := svc.Validate(); err != nil {
return nil, fmt.Errorf("cluster %q node %q service %q is not valid: %w", c.Name, n.Name, svc.ID.String(), err)
if err := wrk.Validate(); err != nil {
return nil, fmt.Errorf("cluster %q node %q service %q is not valid: %w", c.Name, n.Name, wrk.ID.String(), err)
}
if svc.EnableTransparentProxy && !n.IsDataplane() {
if wrk.EnableTransparentProxy && !n.IsDataplane() {
return nil, fmt.Errorf("cannot enable tproxy on a non-dataplane node")
}
if n.IsV2() {
if implicitV2Services {
svc.V2Services = []string{svc.ID.Name}
wrk.V2Services = []string{wrk.ID.Name}
var svcPorts []*pbcatalog.ServicePort
for name, cfg := range svc.Ports {
for name, cfg := range wrk.Ports {
svcPorts = append(svcPorts, &pbcatalog.ServicePort{
TargetPort: name,
Protocol: cfg.ActualProtocol,
@ -478,40 +484,40 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error
Ports: svcPorts,
}
prev, ok := c.Services[svc.ID]
prev, ok := c.Services[wrk.ID]
if !ok {
c.Services[svc.ID] = v2svc
c.Services[wrk.ID] = v2svc
prev = v2svc
}
if prev.Workloads == nil {
prev.Workloads = &pbcatalog.WorkloadSelector{}
}
prev.Workloads.Names = append(prev.Workloads.Names, svc.Workload)
prev.Workloads.Names = append(prev.Workloads.Names, wrk.Workload)
} else {
for _, name := range svc.V2Services {
v2ID := NewServiceID(name, svc.ID.Namespace, svc.ID.Partition)
for _, name := range wrk.V2Services {
v2ID := NewServiceID(name, wrk.ID.Namespace, wrk.ID.Partition)
v2svc, ok := c.Services[v2ID]
if !ok {
return nil, fmt.Errorf("cluster %q node %q service %q has a v2 service reference that does not exist %q",
c.Name, n.Name, svc.ID.String(), name)
c.Name, n.Name, wrk.ID.String(), name)
}
if v2svc.Workloads == nil {
v2svc.Workloads = &pbcatalog.WorkloadSelector{}
}
v2svc.Workloads.Names = append(v2svc.Workloads.Names, svc.Workload)
v2svc.Workloads.Names = append(v2svc.Workloads.Names, wrk.Workload)
}
}
if svc.WorkloadIdentity == "" {
svc.WorkloadIdentity = svc.ID.Name
if wrk.WorkloadIdentity == "" {
wrk.WorkloadIdentity = wrk.ID.Name
}
} else {
if len(svc.V2Services) > 0 {
if len(wrk.V2Services) > 0 {
return nil, fmt.Errorf("cannot specify v2 services for v1")
}
if svc.WorkloadIdentity != "" {
if wrk.WorkloadIdentity != "" {
return nil, fmt.Errorf("cannot specify workload identities for v1")
}
}
@ -523,18 +529,18 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error
}
if c.EnableV2 {
// Populate the VirtualPort field on all implied upstreams.
// Populate the VirtualPort field on all implied destinations.
for _, n := range c.Nodes {
for _, svc := range n.Services {
for _, u := range svc.ImpliedUpstreams {
res, ok := c.Services[u.ID]
for _, wrk := range n.Workloads {
for _, dest := range wrk.ImpliedDestinations {
res, ok := c.Services[dest.ID]
if ok {
for _, sp := range res.Ports {
if sp.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
continue
}
if sp.TargetPort == u.PortName {
u.VirtualPort = sp.VirtualPort
if sp.TargetPort == dest.PortName {
dest.VirtualPort = sp.VirtualPort
}
}
}
@ -648,40 +654,40 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error
}
}
// after we decoded the peering stuff, we can fill in some computed data in the upstreams
// after we decoded the peering stuff, we can fill in some computed data in the destinations
for _, c := range clusters {
c.Peerings = clusteredPeerings[c.Name]
for _, n := range c.Nodes {
for _, svc := range n.Services {
for _, u := range svc.Upstreams {
if u.Peer == "" {
u.Cluster = c.Name
u.Peering = nil
for _, wrk := range n.Workloads {
for _, dest := range wrk.Destinations {
if dest.Peer == "" {
dest.Cluster = c.Name
dest.Peering = nil
continue
}
remotePeer, ok := c.Peerings[u.Peer]
remotePeer, ok := c.Peerings[dest.Peer]
if !ok {
return nil, fmt.Errorf("not possible")
}
u.Cluster = remotePeer.Link.Name
u.Peering = remotePeer.Link
dest.Cluster = remotePeer.Link.Name
dest.Peering = remotePeer.Link
// this helps in generating fortio assertions; otherwise field is ignored
u.ID.Partition = remotePeer.Link.Partition
dest.ID.Partition = remotePeer.Link.Partition
}
for _, u := range svc.ImpliedUpstreams {
if u.Peer == "" {
u.Cluster = c.Name
u.Peering = nil
for _, dest := range wrk.ImpliedDestinations {
if dest.Peer == "" {
dest.Cluster = c.Name
dest.Peering = nil
continue
}
remotePeer, ok := c.Peerings[u.Peer]
remotePeer, ok := c.Peerings[dest.Peer]
if !ok {
return nil, fmt.Errorf("not possible")
}
u.Cluster = remotePeer.Link.Name
u.Peering = remotePeer.Link
dest.Cluster = remotePeer.Link.Name
dest.Peering = remotePeer.Link
// this helps in generating fortio assertions; otherwise field is ignored
u.ID.Partition = remotePeer.Link.Partition
dest.ID.Partition = remotePeer.Link.Partition
}
}
}
@ -843,26 +849,26 @@ func inheritAndValidateNodes(
currAddr.inheritFromExisting(prevAddr)
}
svcMap := mapifyServices(currNode.Node.Services)
wrkMap := mapifyWorkloads(currNode.Node.Workloads)
for _, svc := range node.Services {
currSvc, ok := svcMap[svc.ID]
for _, wrk := range node.Workloads {
currWrk, ok := wrkMap[wrk.ID]
if !ok {
continue // service has vanished, this is ok
}
// don't care about index permutation
if currSvc.ID != svc.ID ||
currSvc.Port != svc.Port ||
!maps.Equal(currSvc.Ports, svc.Ports) ||
currSvc.EnvoyAdminPort != svc.EnvoyAdminPort ||
currSvc.EnvoyPublicListenerPort != svc.EnvoyPublicListenerPort ||
isSame(currSvc.Command, svc.Command) != nil ||
isSame(currSvc.Env, svc.Env) != nil {
return fmt.Errorf("cannot edit some address fields for %q", svc.ID)
if currWrk.ID != wrk.ID ||
currWrk.Port != wrk.Port ||
!maps.Equal(currWrk.Ports, wrk.Ports) ||
currWrk.EnvoyAdminPort != wrk.EnvoyAdminPort ||
currWrk.EnvoyPublicListenerPort != wrk.EnvoyPublicListenerPort ||
isSame(currWrk.Command, wrk.Command) != nil ||
isSame(currWrk.Env, wrk.Env) != nil {
return fmt.Errorf("cannot edit some address fields for %q", wrk.ID)
}
currSvc.inheritFromExisting(svc)
currWrk.inheritFromExisting(wrk)
}
}
return nil
@ -935,10 +941,10 @@ type nodeWithPosition struct {
Node *Node
}
func mapifyServices(services []*Service) map[ServiceID]*Service {
m := make(map[ServiceID]*Service)
for _, svc := range services {
m[svc.ID] = svc
func mapifyWorkloads(workloads []*Service) map[ID]*Service {
m := make(map[ID]*Service)
for _, wrk := range workloads {
m[wrk.ID] = wrk
}
return m
}

View File

@ -9,41 +9,6 @@ import (
"github.com/hashicorp/consul/api"
)
type NodeServiceID struct {
Node string
Service string `json:",omitempty"`
Namespace string `json:",omitempty"`
Partition string `json:",omitempty"`
}
func NewNodeServiceID(node, service, namespace, partition string) NodeServiceID {
id := NodeServiceID{
Node: node,
Service: service,
Namespace: namespace,
Partition: partition,
}
id.Normalize()
return id
}
func (id NodeServiceID) NodeID() NodeID {
return NewNodeID(id.Node, id.Partition)
}
func (id NodeServiceID) ServiceID() ServiceID {
return NewServiceID(id.Service, id.Namespace, id.Partition)
}
func (id *NodeServiceID) Normalize() {
id.Namespace = NamespaceOrDefault(id.Namespace)
id.Partition = PartitionOrDefault(id.Partition)
}
func (id NodeServiceID) String() string {
return fmt.Sprintf("%s/%s/%s/%s", id.Partition, id.Node, id.Namespace, id.Service)
}
type NodeID struct {
Name string `json:",omitempty"`
Partition string `json:",omitempty"`
@ -74,14 +39,14 @@ func (id NodeID) TFString() string {
return id.ACLString()
}
type ServiceID struct {
type ID struct {
Name string `json:",omitempty"`
Namespace string `json:",omitempty"`
Partition string `json:",omitempty"`
}
func NewServiceID(name, namespace, partition string) ServiceID {
id := ServiceID{
func NewID(name, namespace, partition string) ID {
id := ID{
Name: name,
Namespace: namespace,
Partition: partition,
@ -90,7 +55,7 @@ func NewServiceID(name, namespace, partition string) ServiceID {
return id
}
func (id ServiceID) Less(other ServiceID) bool {
func (id ID) Less(other ID) bool {
if id.Partition != other.Partition {
return id.Partition < other.Partition
}
@ -100,32 +65,32 @@ func (id ServiceID) Less(other ServiceID) bool {
return id.Name < other.Name
}
func (id *ServiceID) Normalize() {
func (id *ID) Normalize() {
id.Namespace = NamespaceOrDefault(id.Namespace)
id.Partition = PartitionOrDefault(id.Partition)
}
func (id ServiceID) String() string {
func (id ID) String() string {
return fmt.Sprintf("%s/%s/%s", id.Partition, id.Namespace, id.Name)
}
func (id ServiceID) ACLString() string {
func (id ID) ACLString() string {
return fmt.Sprintf("%s--%s--%s", id.Partition, id.Namespace, id.Name)
}
func (id ServiceID) TFString() string {
func (id ID) TFString() string {
return id.ACLString()
}
func (id ServiceID) PartitionOrDefault() string {
func (id ID) PartitionOrDefault() string {
return PartitionOrDefault(id.Partition)
}
func (id ServiceID) NamespaceOrDefault() string {
func (id ID) NamespaceOrDefault() string {
return NamespaceOrDefault(id.Namespace)
}
func (id ServiceID) QueryOptions() *api.QueryOptions {
func (id ID) QueryOptions() *api.QueryOptions {
return &api.QueryOptions{
Partition: DefaultToEmpty(id.Partition),
Namespace: DefaultToEmpty(id.Namespace),

View File

@ -0,0 +1,43 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package topology
// Deprecated: SortedWorkloads
func (n *Node) SortedServices() []*Workload {
return n.SortedWorkloads()
}
// Deprecated: mapifyWorkloads
func mapifyServices(services []*Workload) map[ServiceID]*Workload {
return mapifyWorkloads(services)
}
// Deprecated: WorkloadByID
func (c *Cluster) ServiceByID(nid NodeID, sid ServiceID) *Workload {
return c.WorkloadByID(nid, sid)
}
// Deprecated: WorkloadsByID
func (c *Cluster) ServicesByID(sid ServiceID) []*Workload {
return c.WorkloadsByID(sid)
}
// Deprecated: WorkloadByID
func (n *Node) ServiceByID(sid ServiceID) *Workload {
return n.WorkloadByID(sid)
}
// Deprecated: Workload
type Service = Workload
// Deprecated: ID
type ServiceID = ID
// Deprecated: NewID
func NewServiceID(name, namespace, partition string) ID {
return NewID(name, namespace, partition)
}
// Deprecated: Destination
type Upstream = Destination

View File

@ -10,22 +10,24 @@ import (
)
// ComputeRelationships will analyze a full topology and generate all of the
// downstream/upstream information for all of them.
// caller/destination information for all of them.
func (t *Topology) ComputeRelationships() []Relationship {
var out []Relationship
for _, cluster := range t.Clusters {
for _, n := range cluster.Nodes {
for _, s := range n.Services {
for _, u := range s.Upstreams {
for _, w := range n.Workloads {
for _, dest := range w.Destinations {
out = append(out, Relationship{
Caller: s,
Upstream: u,
Caller: w,
Destination: dest,
Upstream: dest,
})
}
for _, u := range s.ImpliedUpstreams {
for _, dest := range w.ImpliedDestinations {
out = append(out, Relationship{
Caller: s,
Upstream: u,
Caller: w,
Destination: dest,
Upstream: dest,
})
}
}
@ -39,20 +41,20 @@ func (t *Topology) ComputeRelationships() []Relationship {
func RenderRelationships(ships []Relationship) string {
var buf bytes.Buffer
w := tabwriter.NewWriter(&buf, 0, 0, 3, ' ', tabwriter.Debug)
fmt.Fprintf(w, "DOWN\tnode\tservice\tport\tUP\tservice\t\n")
fmt.Fprintf(w, "CALLER\tnode\tservice\tport\tDEST\tservice\t\n")
for _, r := range ships {
suffix := ""
if r.Upstream.Implied {
if r.Destination.Implied {
suffix = " (implied)"
}
fmt.Fprintf(w,
"%s\t%s\t%s\t%d\t%s\t%s\t\n",
r.downCluster(),
r.callingCluster(),
r.Caller.Node.ID().String(),
r.Caller.ID.String(),
r.Upstream.LocalPort,
r.upCluster(),
r.Upstream.ID.String()+suffix,
r.Destination.LocalPort,
r.destinationCluster(),
r.Destination.ID.String()+suffix,
)
}
fmt.Fprintf(w, "\t\t\t\t\t\t\n")
@ -62,31 +64,34 @@ func RenderRelationships(ships []Relationship) string {
}
type Relationship struct {
Caller *Service
Upstream *Upstream
Caller *Workload
Destination *Destination
// Deprecated: Destination
Upstream *Destination
}
func (r Relationship) String() string {
suffix := ""
if r.Upstream.PortName != "" {
suffix = " port " + r.Upstream.PortName
if r.Destination.PortName != "" {
suffix = " port " + r.Destination.PortName
}
return fmt.Sprintf(
"%s on %s in %s via :%d => %s in %s%s",
r.Caller.ID.String(),
r.Caller.Node.ID().String(),
r.downCluster(),
r.Upstream.LocalPort,
r.Upstream.ID.String(),
r.upCluster(),
r.callingCluster(),
r.Destination.LocalPort,
r.Destination.ID.String(),
r.destinationCluster(),
suffix,
)
}
func (r Relationship) downCluster() string {
func (r Relationship) callingCluster() string {
return r.Caller.Node.Cluster
}
func (r Relationship) upCluster() string {
return r.Upstream.Cluster
func (r Relationship) destinationCluster() string {
return r.Destination.Cluster
}

View File

@ -246,7 +246,7 @@ type Cluster struct {
// Use of this is optional. If you elect not to use it, then v2 Services
// definitions are inferred from the list of service instances defined on
// the nodes in this cluster.
Services map[ServiceID]*pbcatalog.Service `json:"omitempty"`
Services map[ID]*pbcatalog.Service `json:"omitempty"`
// Nodes is the definition of the nodes (agent-less and agent-ful).
Nodes []*Node
@ -410,26 +410,18 @@ func (c *Cluster) SortedNodes() []*Node {
return out
}
func (c *Cluster) FindService(id NodeServiceID) *Service {
func (c *Cluster) WorkloadByID(nid NodeID, sid ID) *Workload {
return c.NodeByID(nid).WorkloadByID(sid)
}
func (c *Cluster) WorkloadsByID(id ID) []*Workload {
id.Normalize()
nid := id.NodeID()
sid := id.ServiceID()
return c.ServiceByID(nid, sid)
}
func (c *Cluster) ServiceByID(nid NodeID, sid ServiceID) *Service {
return c.NodeByID(nid).ServiceByID(sid)
}
func (c *Cluster) ServicesByID(sid ServiceID) []*Service {
sid.Normalize()
var out []*Service
var out []*Workload
for _, n := range c.Nodes {
for _, svc := range n.Services {
if svc.ID == sid {
out = append(out, svc)
for _, wrk := range n.Workloads {
if wrk.ID == id {
out = append(out, wrk)
}
}
}
@ -504,7 +496,9 @@ type Node struct {
Disabled bool `json:",omitempty"`
Addresses []*Address
Services []*Service
Workloads []*Workload
// Deprecated: use Workloads
Services []*Workload
// denormalized at topology compile
Cluster string
@ -663,9 +657,9 @@ func (n *Node) IsDataplane() bool {
return n.Kind == NodeKindDataplane
}
func (n *Node) SortedServices() []*Service {
var out []*Service
out = append(out, n.Services...)
func (n *Node) SortedWorkloads() []*Workload {
var out []*Workload
out = append(out, n.Workloads...)
sort.Slice(out, func(i, j int) bool {
mi := out[i].IsMeshGateway
mj := out[j].IsMeshGateway
@ -680,7 +674,7 @@ func (n *Node) SortedServices() []*Service {
}
func (n *Node) NeedsTransparentProxy() bool {
for _, svc := range n.Services {
for _, svc := range n.Workloads {
if svc.EnableTransparentProxy {
return true
}
@ -705,26 +699,21 @@ func (n *Node) DigestExposedPorts(ports map[int]int) bool {
))
}
}
for _, svc := range n.Services {
for _, svc := range n.Workloads {
svc.DigestExposedPorts(ports)
}
return true
}
func (n *Node) ServiceByID(sid ServiceID) *Service {
sid.Normalize()
for _, svc := range n.Services {
if svc.ID == sid {
return svc
func (n *Node) WorkloadByID(id ID) *Workload {
id.Normalize()
for _, wrk := range n.Workloads {
if wrk.ID == id {
return wrk
}
}
panic("service not found: " + sid.String())
}
type ServiceAndNode struct {
Service *Service
Node *Node
panic("workload not found: " + id.String())
}
// Protocol is a convenience function to use when authoring topology configs.
@ -753,9 +742,8 @@ type Port struct {
ActualProtocol pbcatalog.Protocol `json:",omitempty"`
}
// TODO(rb): really this should now be called "workload" or "instance"
type Service struct {
ID ServiceID
type Workload struct {
ID ID
Image string
// Port is the v1 single-port of this service.
@ -802,11 +790,16 @@ type Service struct {
Command []string `json:",omitempty"` // optional
Env []string `json:",omitempty"` // optional
EnableTransparentProxy bool `json:",omitempty"`
DisableServiceMesh bool `json:",omitempty"`
IsMeshGateway bool `json:",omitempty"`
Upstreams []*Upstream `json:",omitempty"`
ImpliedUpstreams []*Upstream `json:",omitempty"`
EnableTransparentProxy bool `json:",omitempty"`
DisableServiceMesh bool `json:",omitempty"`
IsMeshGateway bool `json:",omitempty"`
Destinations []*Destination `json:",omitempty"`
ImpliedDestinations []*Destination `json:",omitempty"`
// Deprecated: Destinations
Upstreams []*Destination `json:",omitempty"`
// Deprecated: ImpliedDestinations
ImpliedUpstreams []*Destination `json:",omitempty"`
// denormalized at topology compile
Node *Node `json:"-"`
@ -814,113 +807,123 @@ type Service struct {
Workload string `json:"-"`
}
func (s *Service) ExposedPort(name string) int {
if s.Node == nil {
func (w *Workload) ExposedPort(name string) int {
if w.Node == nil {
panic("ExposedPort cannot be called until after Compile")
}
var internalPort int
if name == "" {
internalPort = s.Port
internalPort = w.Port
} else {
port, ok := s.Ports[name]
port, ok := w.Ports[name]
if !ok {
panic("port with name " + name + " not present on service")
}
internalPort = port.Number
}
return s.Node.ExposedPort(internalPort)
return w.Node.ExposedPort(internalPort)
}
func (s *Service) PortOrDefault(name string) int {
if len(s.Ports) > 0 {
return s.Ports[name].Number
func (w *Workload) PortOrDefault(name string) int {
if len(w.Ports) > 0 {
return w.Ports[name].Number
}
return s.Port
return w.Port
}
func (s *Service) IsV2() bool {
return s.NodeVersion == NodeVersionV2
func (w *Workload) IsV2() bool {
return w.NodeVersion == NodeVersionV2
}
func (s *Service) IsV1() bool {
return !s.IsV2()
func (w *Workload) IsV1() bool {
return !w.IsV2()
}
func (s *Service) inheritFromExisting(existing *Service) {
s.ExposedEnvoyAdminPort = existing.ExposedEnvoyAdminPort
func (w *Workload) inheritFromExisting(existing *Workload) {
w.ExposedEnvoyAdminPort = existing.ExposedEnvoyAdminPort
}
func (s *Service) ports() []int {
func (w *Workload) ports() []int {
var out []int
if len(s.Ports) > 0 {
if len(w.Ports) > 0 {
seen := make(map[int]struct{})
for _, port := range s.Ports {
for _, port := range w.Ports {
if _, ok := seen[port.Number]; !ok {
// It's totally fine to expose the same port twice in a workload.
seen[port.Number] = struct{}{}
out = append(out, port.Number)
}
}
} else if s.Port > 0 {
out = append(out, s.Port)
} else if w.Port > 0 {
out = append(out, w.Port)
}
if s.EnvoyAdminPort > 0 {
out = append(out, s.EnvoyAdminPort)
if w.EnvoyAdminPort > 0 {
out = append(out, w.EnvoyAdminPort)
}
if s.EnvoyPublicListenerPort > 0 {
out = append(out, s.EnvoyPublicListenerPort)
if w.EnvoyPublicListenerPort > 0 {
out = append(out, w.EnvoyPublicListenerPort)
}
for _, u := range s.Upstreams {
if u.LocalPort > 0 {
out = append(out, u.LocalPort)
for _, dest := range w.Destinations {
if dest.LocalPort > 0 {
out = append(out, dest.LocalPort)
}
}
return out
}
func (s *Service) HasCheck() bool {
return s.CheckTCP != "" || s.CheckHTTP != ""
func (w *Workload) HasCheck() bool {
return w.CheckTCP != "" || w.CheckHTTP != ""
}
func (s *Service) DigestExposedPorts(ports map[int]int) {
if s.EnvoyAdminPort > 0 {
s.ExposedEnvoyAdminPort = ports[s.EnvoyAdminPort]
func (w *Workload) DigestExposedPorts(ports map[int]int) {
if w.EnvoyAdminPort > 0 {
w.ExposedEnvoyAdminPort = ports[w.EnvoyAdminPort]
} else {
s.ExposedEnvoyAdminPort = 0
w.ExposedEnvoyAdminPort = 0
}
}
func (s *Service) Validate() error {
if s.ID.Name == "" {
func (w *Workload) Validate() error {
if w.ID.Name == "" {
return fmt.Errorf("service name is required")
}
if s.Image == "" && !s.IsMeshGateway {
if w.Image == "" && !w.IsMeshGateway {
return fmt.Errorf("service image is required")
}
if s.IsV2() {
if len(s.Ports) > 0 && s.Port > 0 {
if len(w.Upstreams) > 0 {
w.Destinations = append(w.Destinations, w.Upstreams...)
w.Upstreams = nil
}
if len(w.ImpliedUpstreams) > 0 {
w.ImpliedDestinations = append(w.ImpliedDestinations, w.ImpliedUpstreams...)
w.ImpliedUpstreams = nil
}
if w.IsV2() {
if len(w.Ports) > 0 && w.Port > 0 {
return fmt.Errorf("cannot specify both singleport and multiport on service in v2")
}
if s.Port > 0 {
s.Ports = map[string]*Port{
if w.Port > 0 {
w.Ports = map[string]*Port{
"legacy": {
Number: s.Port,
Number: w.Port,
Protocol: "tcp",
},
}
s.Port = 0
w.Port = 0
}
if !s.DisableServiceMesh && s.EnvoyPublicListenerPort > 0 {
s.Ports["mesh"] = &Port{
Number: s.EnvoyPublicListenerPort,
if !w.DisableServiceMesh && w.EnvoyPublicListenerPort > 0 {
w.Ports["mesh"] = &Port{
Number: w.EnvoyPublicListenerPort,
Protocol: "mesh",
}
}
for name, port := range s.Ports {
for name, port := range w.Ports {
if port == nil {
return fmt.Errorf("cannot be nil")
}
@ -938,79 +941,79 @@ func (s *Service) Validate() error {
port.ActualProtocol = proto
}
} else {
if len(s.Ports) > 0 {
if len(w.Ports) > 0 {
return fmt.Errorf("cannot specify mulitport on service in v1")
}
if s.Port <= 0 {
if w.Port <= 0 {
return fmt.Errorf("service has invalid port")
}
if s.EnableTransparentProxy {
if w.EnableTransparentProxy {
return fmt.Errorf("tproxy does not work with v1 yet")
}
}
if s.DisableServiceMesh && s.IsMeshGateway {
if w.DisableServiceMesh && w.IsMeshGateway {
return fmt.Errorf("cannot disable service mesh and still run a mesh gateway")
}
if s.DisableServiceMesh && len(s.Upstreams) > 0 {
return fmt.Errorf("cannot disable service mesh and configure upstreams")
if w.DisableServiceMesh && len(w.Destinations) > 0 {
return fmt.Errorf("cannot disable service mesh and configure destinations")
}
if s.DisableServiceMesh && len(s.ImpliedUpstreams) > 0 {
return fmt.Errorf("cannot disable service mesh and configure implied upstreams")
if w.DisableServiceMesh && len(w.ImpliedDestinations) > 0 {
return fmt.Errorf("cannot disable service mesh and configure implied destinations")
}
if s.DisableServiceMesh && s.EnableTransparentProxy {
if w.DisableServiceMesh && w.EnableTransparentProxy {
return fmt.Errorf("cannot disable service mesh and activate tproxy")
}
if s.DisableServiceMesh {
if s.EnvoyAdminPort != 0 {
if w.DisableServiceMesh {
if w.EnvoyAdminPort != 0 {
return fmt.Errorf("cannot use envoy admin port without a service mesh")
}
} else {
if s.EnvoyAdminPort <= 0 {
if w.EnvoyAdminPort <= 0 {
return fmt.Errorf("envoy admin port is required")
}
}
for _, u := range s.Upstreams {
if u.ID.Name == "" {
return fmt.Errorf("upstream service name is required")
for _, dest := range w.Destinations {
if dest.ID.Name == "" {
return fmt.Errorf("destination service name is required")
}
if u.LocalPort <= 0 {
return fmt.Errorf("upstream local port is required")
if dest.LocalPort <= 0 {
return fmt.Errorf("destination local port is required")
}
if u.LocalAddress != "" {
ip := net.ParseIP(u.LocalAddress)
if dest.LocalAddress != "" {
ip := net.ParseIP(dest.LocalAddress)
if ip == nil {
return fmt.Errorf("upstream local address is invalid: %s", u.LocalAddress)
return fmt.Errorf("destination local address is invalid: %s", dest.LocalAddress)
}
}
if u.Implied {
if dest.Implied {
return fmt.Errorf("implied field cannot be set")
}
}
for _, u := range s.ImpliedUpstreams {
if u.ID.Name == "" {
return fmt.Errorf("implied upstream service name is required")
for _, dest := range w.ImpliedDestinations {
if dest.ID.Name == "" {
return fmt.Errorf("implied destination service name is required")
}
if u.LocalPort > 0 {
return fmt.Errorf("implied upstream local port cannot be set")
if dest.LocalPort > 0 {
return fmt.Errorf("implied destination local port cannot be set")
}
if u.LocalAddress != "" {
return fmt.Errorf("implied upstream local address cannot be set")
if dest.LocalAddress != "" {
return fmt.Errorf("implied destination local address cannot be set")
}
}
return nil
}
type Upstream struct {
ID ServiceID
type Destination struct {
ID ID
LocalAddress string `json:",omitempty"` // defaults to 127.0.0.1
LocalPort int
Peer string `json:",omitempty"`
// PortName is the named port of this Upstream to route traffic to.
// PortName is the named port of this Destination to route traffic to.
//
// This only applies for multi-port (v2).
PortName string `json:",omitempty"`