mirror of https://github.com/status-im/consul.git
deployer: remove catalog/mesh v2 support (#21194)
- Low level plumbing for resources is still retained for now. - Retain "Workload" terminology over "Service". - Revert "Destination" terminology back to "Upstream". - Remove TPROXY support as it only worked for v2.
This commit is contained in:
parent
6d088db52b
commit
50b26aa56a
|
@ -25,7 +25,7 @@ You can run the entire set of deployer integration tests using:
|
|||
|
||||
You can also run them one by one if you like:
|
||||
|
||||
go test ./catalogv2 -run TestBasicL4ExplicitDestinations -v
|
||||
go test ./connect/ -run Test_Snapshot_Restore_Agentless -v
|
||||
|
||||
You can have the logs stream unbuffered directly to your terminal which can
|
||||
help diagnose stuck tests that would otherwise need to fully timeout before the
|
||||
|
@ -65,26 +65,18 @@ These are comprised of 4 main parts:
|
|||
- **Nodes**: A "box with ip address(es)". This should feel a bit like a VM or
|
||||
a Kubernetes Pod as an enclosing entity.
|
||||
|
||||
- **Workloads**: The list of service instances (v1) or workloads
|
||||
(v2) that will execute on the given node. v2 Services will
|
||||
be implied by similarly named workloads here unless opted
|
||||
out. This helps define a v1-compatible topology and
|
||||
repurpose it for v2 without reworking it.
|
||||
- **Workloads**: The list of service instances that will execute on the given node.
|
||||
|
||||
- **Services** (v2): v2 Service definitions to define explicitly, in addition
|
||||
to the inferred ones.
|
||||
- **InitialConfigEntries**: Config entries that should be created as
|
||||
part of the fixture and that make sense to
|
||||
include as part of the test definition, rather
|
||||
than something created during the test assertion
|
||||
phase.
|
||||
|
||||
- **InitialConfigEntries** (v1): Config entries that should be created as
|
||||
part of the fixture and that make sense to
|
||||
include as part of the test definition,
|
||||
rather than something created during the
|
||||
test assertion phase.
|
||||
|
||||
- **InitialResources** (v2): v2 Resources that should be created as part of
|
||||
the fixture and that make sense to include as
|
||||
part of the test definition, rather than
|
||||
something created during the test assertion
|
||||
phase.
|
||||
- **InitialResources**: Resources that should be created as part of
|
||||
the fixture and that make sense to include as part of
|
||||
the test definition, rather than something created
|
||||
during the test assertion phase.
|
||||
|
||||
- **Peerings**: The peering relationships between Clusters to establish.
|
||||
|
||||
|
@ -102,15 +94,13 @@ a variety of axes:
|
|||
- agentful (clients) vs agentless (dataplane)
|
||||
- tenancies (partitions, namespaces)
|
||||
- locally or across a peering
|
||||
- catalog v1 or v2 object model
|
||||
|
||||
Since the topology is just a declarative struct, a test author could rewrite
|
||||
any one of these attributes with a single field (such as `Node.Kind` or
|
||||
`Node.Version`) and cause the identical test to run against the other
|
||||
configuration. With the addition of a few `if enterprise {}` blocks and `for`
|
||||
loops, a test author could easily write one test of a behavior and execute it
|
||||
to cover agentless, agentful, non-default tenancy, and v1/v2 in a few extra
|
||||
lines of code.
|
||||
any one of these attributes with a single field (such as `Node.Kind`) and cause
|
||||
the identical test to run against the other configuration. With the addition of
|
||||
a few `if enterprise {}` blocks and `for` loops, a test author could easily
|
||||
write one test of a behavior and execute it to cover agentless, agentful, and
|
||||
non-default tenancy in a few extra lines of code.
|
||||
|
||||
#### Non-optional security settings
|
||||
|
||||
|
@ -197,12 +187,3 @@ and Envoy that you can create in your test:
|
|||
asserter := topoutil.NewAsserter(sp)
|
||||
|
||||
asserter.UpstreamEndpointStatus(t, svc, clusterPrefix+".", "HEALTHY", 1)
|
||||
|
||||
## Examples
|
||||
|
||||
- `catalogv2`
|
||||
- [Explicit L4 destinations](./catalogv2/explicit_destinations_test.go)
|
||||
- [Implicit L4 destinations](./catalogv2/implicit_destinations_test.go)
|
||||
- [Explicit L7 destinations with traffic splits](./catalogv2/explicit_destinations_l7_test.go)
|
||||
- [`peering_commontopo`](./peering_commontopo)
|
||||
- A variety of extensive v1 Peering tests.
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
// 1. The test spins up a one-server cluster with static-server and static-client.
|
||||
// 2. A snapshot is taken and the cluster is restored from the snapshot
|
||||
// 3. A new static-server replaces the old one
|
||||
// 4. At the end, we assert the static-client's destination is updated with the
|
||||
// 4. At the end, we assert the static-client's upstream is updated with the
|
||||
// new static-server
|
||||
func Test_Snapshot_Restore_Agentless(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
@ -89,7 +89,7 @@ func Test_Snapshot_Restore_Agentless(t *testing.T) {
|
|||
"-http-port", "8080",
|
||||
"-redirect-port", "-disabled",
|
||||
},
|
||||
Destinations: []*topology.Destination{
|
||||
Upstreams: []*topology.Upstream{
|
||||
{
|
||||
ID: staticServerSID,
|
||||
LocalPort: 5000,
|
||||
|
@ -153,7 +153,7 @@ func Test_Snapshot_Restore_Agentless(t *testing.T) {
|
|||
topology.NewNodeID("dc1-client2", "default"),
|
||||
staticClientSID,
|
||||
)
|
||||
asserter.FortioFetch2HeaderEcho(t, staticClient, &topology.Destination{
|
||||
asserter.FortioFetch2HeaderEcho(t, staticClient, &topology.Upstream{
|
||||
ID: staticServerSID,
|
||||
LocalPort: 5000,
|
||||
})
|
||||
|
@ -182,7 +182,7 @@ func Test_Snapshot_Restore_Agentless(t *testing.T) {
|
|||
require.NoError(t, sp.Relaunch(cfg))
|
||||
|
||||
// Ensure the static-client connected to the new static-server
|
||||
asserter.FortioFetch2HeaderEcho(t, staticClient, &topology.Destination{
|
||||
asserter.FortioFetch2HeaderEcho(t, staticClient, &topology.Upstream{
|
||||
ID: staticServerSID,
|
||||
LocalPort: 5000,
|
||||
})
|
||||
|
|
|
@ -30,8 +30,8 @@ type ac1BasicSuite struct {
|
|||
sidClientHTTP topology.ID
|
||||
nodeClientHTTP topology.NodeID
|
||||
|
||||
upstreamHTTP *topology.Destination
|
||||
upstreamTCP *topology.Destination
|
||||
upstreamHTTP *topology.Upstream
|
||||
upstreamTCP *topology.Upstream
|
||||
}
|
||||
|
||||
var ac1BasicSuites []sharedTopoSuite = []sharedTopoSuite{
|
||||
|
@ -65,7 +65,7 @@ func (s *ac1BasicSuite) setup(t *testing.T, ct *commonTopo) {
|
|||
Name: prefix + "server-http",
|
||||
Partition: partition,
|
||||
}
|
||||
upstreamHTTP := &topology.Destination{
|
||||
upstreamHTTP := &topology.Upstream{
|
||||
ID: topology.ID{
|
||||
Name: httpServerSID.Name,
|
||||
Partition: partition,
|
||||
|
@ -73,7 +73,7 @@ func (s *ac1BasicSuite) setup(t *testing.T, ct *commonTopo) {
|
|||
LocalPort: 5001,
|
||||
Peer: peer,
|
||||
}
|
||||
upstreamTCP := &topology.Destination{
|
||||
upstreamTCP := &topology.Upstream{
|
||||
ID: topology.ID{
|
||||
Name: tcpServerSID.Name,
|
||||
Partition: partition,
|
||||
|
@ -93,7 +93,7 @@ func (s *ac1BasicSuite) setup(t *testing.T, ct *commonTopo) {
|
|||
clu.Datacenter,
|
||||
sid,
|
||||
func(s *topology.Workload) {
|
||||
s.Destinations = []*topology.Destination{
|
||||
s.Upstreams = []*topology.Upstream{
|
||||
upstreamTCP,
|
||||
upstreamHTTP,
|
||||
}
|
||||
|
|
|
@ -7,9 +7,10 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type ac2DiscoChainSuite struct {
|
||||
|
@ -84,7 +85,7 @@ func (s *ac2DiscoChainSuite) setup(t *testing.T, ct *commonTopo) {
|
|||
ct.AddServiceNode(clu, serviceExt{Workload: server})
|
||||
|
||||
// Define server as upstream for client
|
||||
upstream := &topology.Destination{
|
||||
upstream := &topology.Upstream{
|
||||
ID: topology.ID{
|
||||
Name: server.ID.Name,
|
||||
Partition: partition, // TODO: iterate over all possible partitions
|
||||
|
@ -105,7 +106,7 @@ func (s *ac2DiscoChainSuite) setup(t *testing.T, ct *commonTopo) {
|
|||
clu.Datacenter,
|
||||
clientSID,
|
||||
func(s *topology.Workload) {
|
||||
s.Destinations = []*topology.Destination{
|
||||
s.Upstreams = []*topology.Upstream{
|
||||
upstream,
|
||||
}
|
||||
},
|
||||
|
@ -164,8 +165,8 @@ func (s *ac2DiscoChainSuite) test(t *testing.T, ct *commonTopo) {
|
|||
require.Len(t, svcs, 1, "expected exactly one client in datacenter")
|
||||
|
||||
client := svcs[0]
|
||||
require.Len(t, client.Destinations, 1, "expected exactly one upstream for client")
|
||||
u := client.Destinations[0]
|
||||
require.Len(t, client.Upstreams, 1, "expected exactly one upstream for client")
|
||||
u := client.Upstreams[0]
|
||||
|
||||
t.Run("peered upstream exists in catalog", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
@ -176,7 +177,7 @@ func (s *ac2DiscoChainSuite) test(t *testing.T, ct *commonTopo) {
|
|||
|
||||
t.Run("peered upstream endpoint status is healthy", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ct.Assert.DestinationEndpointStatus(t, client, peerClusterPrefix(u), "HEALTHY", 1)
|
||||
ct.Assert.UpstreamEndpointStatus(t, client, peerClusterPrefix(u), "HEALTHY", 1)
|
||||
})
|
||||
|
||||
t.Run("response contains header injected by splitter", func(t *testing.T) {
|
||||
|
@ -196,7 +197,7 @@ func (s *ac2DiscoChainSuite) test(t *testing.T, ct *commonTopo) {
|
|||
// func (s *ResourceGenerator) getTargetClusterName
|
||||
//
|
||||
// and connect/sni.go
|
||||
func peerClusterPrefix(u *topology.Destination) string {
|
||||
func peerClusterPrefix(u *topology.Upstream) string {
|
||||
if u.Peer == "" {
|
||||
panic("upstream is not from a peer")
|
||||
}
|
||||
|
|
|
@ -11,13 +11,15 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/itchyny/gojq"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/itchyny/gojq"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var ac3SvcDefaultsSuites []sharedTopoSuite = []sharedTopoSuite{
|
||||
|
@ -40,7 +42,7 @@ type ac3SvcDefaultsSuite struct {
|
|||
sidClient topology.ID
|
||||
nodeClient topology.NodeID
|
||||
|
||||
upstream *topology.Destination
|
||||
upstream *topology.Upstream
|
||||
}
|
||||
|
||||
func (s *ac3SvcDefaultsSuite) testName() string {
|
||||
|
@ -60,7 +62,7 @@ func (s *ac3SvcDefaultsSuite) setup(t *testing.T, ct *commonTopo) {
|
|||
Name: "ac3-server",
|
||||
Partition: partition,
|
||||
}
|
||||
upstream := &topology.Destination{
|
||||
upstream := &topology.Upstream{
|
||||
ID: topology.ID{
|
||||
Name: serverSID.Name,
|
||||
Partition: partition,
|
||||
|
@ -78,7 +80,7 @@ func (s *ac3SvcDefaultsSuite) setup(t *testing.T, ct *commonTopo) {
|
|||
clu.Datacenter,
|
||||
sid,
|
||||
func(s *topology.Workload) {
|
||||
s.Destinations = []*topology.Destination{
|
||||
s.Upstreams = []*topology.Upstream{
|
||||
upstream,
|
||||
}
|
||||
},
|
||||
|
@ -183,7 +185,7 @@ func (s *ac3SvcDefaultsSuite) test(t *testing.T, ct *commonTopo) {
|
|||
// TODO: what is default? namespace? partition?
|
||||
clusterName := fmt.Sprintf("%s.default.%s.external", s.upstream.ID.Name, s.upstream.Peer)
|
||||
nonceStatus := http.StatusInsufficientStorage
|
||||
url507 := fmt.Sprintf("http://localhost:%d/fortio/fetch2?url=%s", svcClient.ExposedPort(""),
|
||||
url507 := fmt.Sprintf("http://localhost:%d/fortio/fetch2?url=%s", svcClient.ExposedPort(),
|
||||
url.QueryEscape(fmt.Sprintf("http://localhost:%d/?status=%d", s.upstream.LocalPort, nonceStatus)),
|
||||
)
|
||||
|
||||
|
@ -219,7 +221,7 @@ func (s *ac3SvcDefaultsSuite) test(t *testing.T, ct *commonTopo) {
|
|||
require.True(r, resultAsBool)
|
||||
})
|
||||
|
||||
url200 := fmt.Sprintf("http://localhost:%d/fortio/fetch2?url=%s", svcClient.ExposedPort(""),
|
||||
url200 := fmt.Sprintf("http://localhost:%d/fortio/fetch2?url=%s", svcClient.ExposedPort(),
|
||||
url.QueryEscape(fmt.Sprintf("http://localhost:%d/", s.upstream.LocalPort)),
|
||||
)
|
||||
retry.RunWith(&retry.Timer{Timeout: time.Minute * 1, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
|
||||
|
|
|
@ -9,10 +9,12 @@ import (
|
|||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type ac4ProxyDefaultsSuite struct {
|
||||
|
@ -24,7 +26,7 @@ type ac4ProxyDefaultsSuite struct {
|
|||
|
||||
serverSID topology.ID
|
||||
clientSID topology.ID
|
||||
upstream *topology.Destination
|
||||
upstream *topology.Upstream
|
||||
}
|
||||
|
||||
var ac4ProxyDefaultsSuites []sharedTopoSuite = []sharedTopoSuite{
|
||||
|
@ -54,7 +56,7 @@ func (s *ac4ProxyDefaultsSuite) setup(t *testing.T, ct *commonTopo) {
|
|||
Partition: partition,
|
||||
}
|
||||
// Define server as upstream for client
|
||||
upstream := &topology.Destination{
|
||||
upstream := &topology.Upstream{
|
||||
ID: serverSID,
|
||||
LocalPort: 5000,
|
||||
Peer: peer,
|
||||
|
@ -70,7 +72,7 @@ func (s *ac4ProxyDefaultsSuite) setup(t *testing.T, ct *commonTopo) {
|
|||
clu.Datacenter,
|
||||
clientSID,
|
||||
func(s *topology.Workload) {
|
||||
s.Destinations = []*topology.Destination{
|
||||
s.Upstreams = []*topology.Upstream{
|
||||
upstream,
|
||||
}
|
||||
},
|
||||
|
@ -165,11 +167,11 @@ func (s *ac4ProxyDefaultsSuite) test(t *testing.T, ct *commonTopo) {
|
|||
dcSvcs := dc.WorkloadsByID(s.clientSID)
|
||||
require.Len(t, dcSvcs, 1, "expected exactly one client")
|
||||
client = dcSvcs[0]
|
||||
require.Len(t, client.Destinations, 1, "expected exactly one upstream for client")
|
||||
require.Len(t, client.Upstreams, 1, "expected exactly one upstream for client")
|
||||
|
||||
server := dc.WorkloadsByID(s.serverSID)
|
||||
require.Len(t, server, 1, "expected exactly one server")
|
||||
require.Len(t, server[0].Destinations, 0, "expected no upstream for server")
|
||||
require.Len(t, server[0].Upstreams, 0, "expected no upstream for server")
|
||||
})
|
||||
|
||||
t.Run("peered upstream exists in catalog", func(t *testing.T) {
|
||||
|
@ -179,11 +181,11 @@ func (s *ac4ProxyDefaultsSuite) test(t *testing.T, ct *commonTopo) {
|
|||
})
|
||||
|
||||
t.Run("HTTP service fails due to connection timeout", func(t *testing.T) {
|
||||
url504 := fmt.Sprintf("http://localhost:%d/fortio/fetch2?url=%s", client.ExposedPort(""),
|
||||
url504 := fmt.Sprintf("http://localhost:%d/fortio/fetch2?url=%s", client.ExposedPort(),
|
||||
url.QueryEscape(fmt.Sprintf("http://localhost:%d/?delay=1000ms", s.upstream.LocalPort)),
|
||||
)
|
||||
|
||||
url200 := fmt.Sprintf("http://localhost:%d/fortio/fetch2?url=%s", client.ExposedPort(""),
|
||||
url200 := fmt.Sprintf("http://localhost:%d/fortio/fetch2?url=%s", client.ExposedPort(),
|
||||
url.QueryEscape(fmt.Sprintf("http://localhost:%d/", s.upstream.LocalPort)),
|
||||
)
|
||||
|
||||
|
|
|
@ -7,11 +7,12 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type ac6FailoversSuite struct {
|
||||
|
@ -347,8 +348,8 @@ func (s *ac6FailoversSuite) setup(t *testing.T, ct *commonTopo) {
|
|||
nearClu.Datacenter,
|
||||
clientSID,
|
||||
func(s *topology.Workload) {
|
||||
// Destination per partition
|
||||
s.Destinations = []*topology.Destination{
|
||||
// Upstream per partition
|
||||
s.Upstreams = []*topology.Upstream{
|
||||
{
|
||||
ID: topology.ID{
|
||||
Name: nearServerSID.Name,
|
||||
|
@ -437,8 +438,8 @@ func (s *ac6FailoversSuite) test(t *testing.T, ct *commonTopo) {
|
|||
require.Len(t, svcs, 1, "expected exactly one client in datacenter")
|
||||
|
||||
client := svcs[0]
|
||||
require.Len(t, client.Destinations, 1, "expected one upstream for client")
|
||||
upstream := client.Destinations[0]
|
||||
require.Len(t, client.Upstreams, 1, "expected one upstream for client")
|
||||
upstream := client.Upstreams[0]
|
||||
|
||||
fmt.Println("### preconditions")
|
||||
|
||||
|
|
|
@ -8,9 +8,10 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestRotateGW ensures that peered services continue to be able to talk to their
|
||||
|
@ -27,7 +28,7 @@ type suiteRotateGW struct {
|
|||
sidClient topology.ID
|
||||
nodeClient topology.NodeID
|
||||
|
||||
upstream *topology.Destination
|
||||
upstream *topology.Upstream
|
||||
|
||||
newMGWNodeName string
|
||||
}
|
||||
|
@ -70,7 +71,7 @@ func (s *suiteRotateGW) setup(t *testing.T, ct *commonTopo) {
|
|||
)
|
||||
|
||||
// Make clients which have server upstreams
|
||||
upstream := &topology.Destination{
|
||||
upstream := &topology.Upstream{
|
||||
ID: topology.ID{
|
||||
Name: server.ID.Name,
|
||||
Partition: partition,
|
||||
|
@ -88,7 +89,7 @@ func (s *suiteRotateGW) setup(t *testing.T, ct *commonTopo) {
|
|||
Partition: partition,
|
||||
},
|
||||
func(s *topology.Workload) {
|
||||
s.Destinations = []*topology.Destination{
|
||||
s.Upstreams = []*topology.Upstream{
|
||||
upstream,
|
||||
}
|
||||
},
|
||||
|
|
|
@ -8,13 +8,14 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/copystructure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
"github.com/mitchellh/copystructure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestAC7_2RotateLeader ensures that after a leader rotation, information continues to replicate to peers
|
||||
|
@ -29,7 +30,7 @@ type ac7_2RotateLeaderSuite struct {
|
|||
sidClient topology.ID
|
||||
nodeClient topology.NodeID
|
||||
|
||||
upstream *topology.Destination
|
||||
upstream *topology.Upstream
|
||||
}
|
||||
|
||||
func TestAC7_2RotateLeader(t *testing.T) {
|
||||
|
@ -71,7 +72,7 @@ func (s *ac7_2RotateLeaderSuite) setup(t *testing.T, ct *commonTopo) {
|
|||
)
|
||||
|
||||
// Make clients which have server upstreams
|
||||
upstream := &topology.Destination{
|
||||
upstream := &topology.Upstream{
|
||||
ID: topology.ID{
|
||||
Name: server.ID.Name,
|
||||
Partition: partition,
|
||||
|
@ -87,7 +88,7 @@ func (s *ac7_2RotateLeaderSuite) setup(t *testing.T, ct *commonTopo) {
|
|||
Partition: partition,
|
||||
},
|
||||
func(s *topology.Workload) {
|
||||
s.Destinations = []*topology.Destination{
|
||||
s.Upstreams = []*topology.Upstream{
|
||||
upstream,
|
||||
}
|
||||
},
|
||||
|
|
|
@ -9,16 +9,16 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/test-integ/topoutil"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/test-integ/topoutil"
|
||||
)
|
||||
|
||||
// commonTopo helps create a shareable topology configured to represent
|
||||
|
@ -509,7 +509,7 @@ func NewFortioServiceWithDefaults(
|
|||
sid topology.ID,
|
||||
mut func(s *topology.Workload),
|
||||
) *topology.Workload {
|
||||
return topoutil.NewFortioServiceWithDefaults(cluster, sid, topology.NodeVersionV1, mut)
|
||||
return topoutil.NewFortioServiceWithDefaults(cluster, sid, mut)
|
||||
}
|
||||
|
||||
func newTopologyMeshGatewaySet(
|
||||
|
|
|
@ -6,16 +6,6 @@ package topoutil
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -24,6 +14,18 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
)
|
||||
|
||||
// Asserter is a utility to help in reducing boilerplate in invoking test
|
||||
|
@ -33,7 +35,7 @@ import (
|
|||
// ip/ports if there is only one port that makes sense for the assertion (such
|
||||
// as use of the envoy admin port 19000).
|
||||
//
|
||||
// If it's up to the test (like picking a destination) leave port as an argument
|
||||
// If it's up to the test (like picking an upstream) leave port as an argument
|
||||
// but still take the service and use that to grab the local ip from the
|
||||
// topology.Node.
|
||||
type Asserter struct {
|
||||
|
@ -82,12 +84,12 @@ func (a *Asserter) httpClientFor(cluster string) (*http.Client, error) {
|
|||
return client, nil
|
||||
}
|
||||
|
||||
// DestinationEndpointStatus validates that proxy was configured with provided clusterName in the healthStatus
|
||||
// UpstreamEndpointStatus validates that proxy was configured with provided clusterName in the healthStatus
|
||||
//
|
||||
// Exposes libassert.UpstreamEndpointStatus for use against a Sprawl.
|
||||
//
|
||||
// NOTE: this doesn't take a port b/c you always want to use the envoy admin port.
|
||||
func (a *Asserter) DestinationEndpointStatus(
|
||||
func (a *Asserter) UpstreamEndpointStatus(
|
||||
t *testing.T,
|
||||
workload *topology.Workload,
|
||||
clusterName string,
|
||||
|
@ -169,7 +171,7 @@ func (a *Asserter) AssertEnvoyHTTPrbacFiltersContainIntentions(t *testing.T, wor
|
|||
//
|
||||
// Exposes libassert.HTTPServiceEchoes for use against a Sprawl.
|
||||
//
|
||||
// NOTE: this takes a port b/c you may want to reach this via your choice of destination.
|
||||
// NOTE: this takes a port b/c you may want to reach this via your choice of upstream.
|
||||
func (a *Asserter) HTTPServiceEchoes(
|
||||
t *testing.T,
|
||||
workload *topology.Workload,
|
||||
|
@ -193,7 +195,7 @@ func (a *Asserter) HTTPServiceEchoes(
|
|||
//
|
||||
// Exposes libassert.HTTPServiceEchoes for use against a Sprawl.
|
||||
//
|
||||
// NOTE: this takes a port b/c you may want to reach this via your choice of destination.
|
||||
// NOTE: this takes a port b/c you may want to reach this via your choice of upstream.
|
||||
func (a *Asserter) HTTPServiceEchoesResHeader(
|
||||
t *testing.T,
|
||||
workload *topology.Workload,
|
||||
|
@ -266,27 +268,27 @@ type testingT interface {
|
|||
Helper()
|
||||
}
|
||||
|
||||
// does a fortio /fetch2 to the given fortio service, targetting the given destination. Returns
|
||||
// does a fortio /fetch2 to the given fortio service, targetting the given upstream. Returns
|
||||
// the body, and response with response.Body already Closed.
|
||||
//
|
||||
// We treat 400, 503, and 504s as retryable errors
|
||||
func (a *Asserter) fortioFetch2Destination(
|
||||
func (a *Asserter) fortioFetch2Upstream(
|
||||
t testutil.TestingTB,
|
||||
client *http.Client,
|
||||
addr string,
|
||||
dest *topology.Destination,
|
||||
us *topology.Upstream,
|
||||
path string,
|
||||
) (body []byte, res *http.Response) {
|
||||
t.Helper()
|
||||
|
||||
err, res := getFortioFetch2DestinationResponse(t, client, addr, dest, path, nil)
|
||||
err, res := getFortioFetch2UpstreamResponse(t, client, addr, us, path, nil)
|
||||
require.NoError(t, err)
|
||||
defer res.Body.Close()
|
||||
|
||||
// not sure when these happen, suspect it's when the mesh gateway in the peer is not yet ready
|
||||
require.NotEqual(t, http.StatusServiceUnavailable, res.StatusCode)
|
||||
require.NotEqual(t, http.StatusGatewayTimeout, res.StatusCode)
|
||||
// not sure when this happens, suspect it's when envoy hasn't configured the local destination yet
|
||||
// not sure when this happens, suspect it's when envoy hasn't configured the local upstream yet
|
||||
require.NotEqual(t, http.StatusBadRequest, res.StatusCode)
|
||||
body, err = io.ReadAll(res.Body)
|
||||
require.NoError(t, err)
|
||||
|
@ -294,19 +296,8 @@ func (a *Asserter) fortioFetch2Destination(
|
|||
return body, res
|
||||
}
|
||||
|
||||
func getFortioFetch2DestinationResponse(t testutil.TestingTB, client *http.Client, addr string, dest *topology.Destination, path string, headers map[string]string) (error, *http.Response) {
|
||||
var actualURL string
|
||||
if dest.Implied {
|
||||
actualURL = fmt.Sprintf("http://%s--%s--%s.virtual.consul:%d/%s",
|
||||
dest.ID.Name,
|
||||
dest.ID.Namespace,
|
||||
dest.ID.Partition,
|
||||
dest.VirtualPort,
|
||||
path,
|
||||
)
|
||||
} else {
|
||||
actualURL = fmt.Sprintf("http://localhost:%d/%s", dest.LocalPort, path)
|
||||
}
|
||||
func getFortioFetch2UpstreamResponse(t testutil.TestingTB, client *http.Client, addr string, us *topology.Upstream, path string, headers map[string]string) (error, *http.Response) {
|
||||
actualURL := fmt.Sprintf("http://localhost:%d/%s", us.LocalPort, path)
|
||||
|
||||
url := fmt.Sprintf("http://%s/fortio/fetch2?url=%s", addr,
|
||||
url.QueryEscape(actualURL),
|
||||
|
@ -324,20 +315,20 @@ func getFortioFetch2DestinationResponse(t testutil.TestingTB, client *http.Clien
|
|||
}
|
||||
|
||||
// uses the /fortio/fetch2 endpoint to do a header echo check against an
|
||||
// destination fortio
|
||||
func (a *Asserter) FortioFetch2HeaderEcho(t *testing.T, fortioWrk *topology.Workload, dest *topology.Destination) {
|
||||
// upstream fortio
|
||||
func (a *Asserter) FortioFetch2HeaderEcho(t *testing.T, fortioWrk *topology.Workload, us *topology.Upstream) {
|
||||
const kPassphrase = "x-passphrase"
|
||||
const passphrase = "hello"
|
||||
path := (fmt.Sprintf("/?header=%s:%s", kPassphrase, passphrase))
|
||||
|
||||
var (
|
||||
node = fortioWrk.Node
|
||||
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), fortioWrk.PortOrDefault(dest.PortName))
|
||||
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), fortioWrk.Port)
|
||||
client = a.mustGetHTTPClient(t, node.Cluster)
|
||||
)
|
||||
|
||||
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
|
||||
_, res := a.fortioFetch2Destination(r, client, addr, dest, path)
|
||||
_, res := a.fortioFetch2Upstream(r, client, addr, us, path)
|
||||
require.Equal(r, http.StatusOK, res.StatusCode)
|
||||
v := res.Header.Get(kPassphrase)
|
||||
require.Equal(r, passphrase, v)
|
||||
|
@ -345,12 +336,12 @@ func (a *Asserter) FortioFetch2HeaderEcho(t *testing.T, fortioWrk *topology.Work
|
|||
}
|
||||
|
||||
// similar to libassert.AssertFortioName,
|
||||
// uses the /fortio/fetch2 endpoint to hit the debug endpoint on the destination,
|
||||
// uses the /fortio/fetch2 endpoint to hit the debug endpoint on the upstream,
|
||||
// and assert that the FORTIO_NAME == name
|
||||
func (a *Asserter) FortioFetch2FortioName(
|
||||
t *testing.T,
|
||||
fortioWrk *topology.Workload,
|
||||
dest *topology.Destination,
|
||||
us *topology.Upstream,
|
||||
clusterName string,
|
||||
sid topology.ID,
|
||||
) {
|
||||
|
@ -358,7 +349,7 @@ func (a *Asserter) FortioFetch2FortioName(
|
|||
|
||||
var (
|
||||
node = fortioWrk.Node
|
||||
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), fortioWrk.PortOrDefault(dest.PortName))
|
||||
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), fortioWrk.Port)
|
||||
client = a.mustGetHTTPClient(t, node.Cluster)
|
||||
)
|
||||
|
||||
|
@ -366,7 +357,7 @@ func (a *Asserter) FortioFetch2FortioName(
|
|||
path := "/debug?env=dump"
|
||||
|
||||
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
|
||||
body, res := a.fortioFetch2Destination(r, client, addr, dest, path)
|
||||
body, res := a.fortioFetch2Upstream(r, client, addr, us, path)
|
||||
|
||||
require.Equal(r, http.StatusOK, res.StatusCode)
|
||||
|
||||
|
@ -378,24 +369,24 @@ func (a *Asserter) FortioFetch2FortioName(
|
|||
})
|
||||
}
|
||||
|
||||
func (a *Asserter) FortioFetch2ServiceUnavailable(t *testing.T, fortioWrk *topology.Workload, dest *topology.Destination) {
|
||||
func (a *Asserter) FortioFetch2ServiceUnavailable(t *testing.T, fortioWrk *topology.Workload, us *topology.Upstream) {
|
||||
const kPassphrase = "x-passphrase"
|
||||
const passphrase = "hello"
|
||||
path := (fmt.Sprintf("/?header=%s:%s", kPassphrase, passphrase))
|
||||
a.FortioFetch2ServiceStatusCodes(t, fortioWrk, dest, path, nil, []int{http.StatusServiceUnavailable})
|
||||
a.FortioFetch2ServiceStatusCodes(t, fortioWrk, us, path, nil, []int{http.StatusServiceUnavailable})
|
||||
}
|
||||
|
||||
// FortioFetch2ServiceStatusCodes uses the /fortio/fetch2 endpoint to do a header echo check against a destination
|
||||
// FortioFetch2ServiceStatusCodes uses the /fortio/fetch2 endpoint to do a header echo check against a upstream
|
||||
// fortio and asserts that the returned status code matches the desired one(s)
|
||||
func (a *Asserter) FortioFetch2ServiceStatusCodes(t *testing.T, fortioWrk *topology.Workload, dest *topology.Destination, path string, headers map[string]string, statuses []int) {
|
||||
func (a *Asserter) FortioFetch2ServiceStatusCodes(t *testing.T, fortioWrk *topology.Workload, us *topology.Upstream, path string, headers map[string]string, statuses []int) {
|
||||
var (
|
||||
node = fortioWrk.Node
|
||||
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), fortioWrk.PortOrDefault(dest.PortName))
|
||||
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), fortioWrk.Port)
|
||||
client = a.mustGetHTTPClient(t, node.Cluster)
|
||||
)
|
||||
|
||||
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
|
||||
_, res := getFortioFetch2DestinationResponse(r, client, addr, dest, path, headers)
|
||||
_, res := getFortioFetch2UpstreamResponse(r, client, addr, us, path, headers)
|
||||
defer res.Body.Close()
|
||||
require.Contains(r, statuses, res.StatusCode)
|
||||
})
|
||||
|
|
|
@ -9,9 +9,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// CheckBlankspaceNameViaHTTP calls a copy of blankspace and asserts it arrived
|
||||
|
@ -19,7 +20,7 @@ import (
|
|||
func (a *Asserter) CheckBlankspaceNameViaHTTP(
|
||||
t *testing.T,
|
||||
workload *topology.Workload,
|
||||
dest *topology.Destination,
|
||||
us *topology.Upstream,
|
||||
useHTTP2 bool,
|
||||
path string,
|
||||
clusterName string,
|
||||
|
@ -27,7 +28,7 @@ func (a *Asserter) CheckBlankspaceNameViaHTTP(
|
|||
) {
|
||||
t.Helper()
|
||||
|
||||
a.checkBlankspaceNameViaHTTPWithCallback(t, workload, dest, useHTTP2, path, 1, func(_ *retry.R) {}, func(r *retry.R, remoteName string) {
|
||||
a.checkBlankspaceNameViaHTTPWithCallback(t, workload, us, useHTTP2, path, 1, func(_ *retry.R) {}, func(r *retry.R, remoteName string) {
|
||||
require.Equal(r, fmt.Sprintf("%s::%s", clusterName, sid.String()), remoteName)
|
||||
}, func(r *retry.R) {})
|
||||
}
|
||||
|
@ -37,7 +38,7 @@ func (a *Asserter) CheckBlankspaceNameViaHTTP(
|
|||
func (a *Asserter) CheckBlankspaceNameTrafficSplitViaHTTP(
|
||||
t *testing.T,
|
||||
workload *topology.Workload,
|
||||
dest *topology.Destination,
|
||||
us *topology.Upstream,
|
||||
useHTTP2 bool,
|
||||
path string,
|
||||
expect map[string]int,
|
||||
|
@ -45,7 +46,7 @@ func (a *Asserter) CheckBlankspaceNameTrafficSplitViaHTTP(
|
|||
t.Helper()
|
||||
|
||||
got := make(map[string]int)
|
||||
a.checkBlankspaceNameViaHTTPWithCallback(t, workload, dest, useHTTP2, path, 100, func(_ *retry.R) {
|
||||
a.checkBlankspaceNameViaHTTPWithCallback(t, workload, us, useHTTP2, path, 100, func(_ *retry.R) {
|
||||
got = make(map[string]int)
|
||||
}, func(_ *retry.R, name string) {
|
||||
got[name]++
|
||||
|
@ -57,7 +58,7 @@ func (a *Asserter) CheckBlankspaceNameTrafficSplitViaHTTP(
|
|||
func (a *Asserter) checkBlankspaceNameViaHTTPWithCallback(
|
||||
t *testing.T,
|
||||
workload *topology.Workload,
|
||||
dest *topology.Destination,
|
||||
us *topology.Upstream,
|
||||
useHTTP2 bool,
|
||||
path string,
|
||||
count int,
|
||||
|
@ -69,7 +70,7 @@ func (a *Asserter) checkBlankspaceNameViaHTTPWithCallback(
|
|||
|
||||
var (
|
||||
node = workload.Node
|
||||
internalPort = workload.PortOrDefault(dest.PortName)
|
||||
internalPort = workload.Port
|
||||
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), internalPort)
|
||||
client = a.mustGetHTTPClient(t, node.Cluster)
|
||||
)
|
||||
|
@ -85,18 +86,7 @@ func (a *Asserter) checkBlankspaceNameViaHTTPWithCallback(
|
|||
client = EnableHTTP2(client)
|
||||
}
|
||||
|
||||
var actualURL string
|
||||
if dest.Implied {
|
||||
actualURL = fmt.Sprintf("http://%s--%s--%s.virtual.consul:%d/%s",
|
||||
dest.ID.Name,
|
||||
dest.ID.Namespace,
|
||||
dest.ID.Partition,
|
||||
dest.VirtualPort,
|
||||
path,
|
||||
)
|
||||
} else {
|
||||
actualURL = fmt.Sprintf("http://localhost:%d/%s", dest.LocalPort, path)
|
||||
}
|
||||
actualURL := fmt.Sprintf("http://localhost:%d/%s", us.LocalPort, path)
|
||||
|
||||
multiassert(t, count, resetFn, func(r *retry.R) {
|
||||
name, err := GetBlankspaceNameViaHTTP(context.Background(), client, addr, actualURL)
|
||||
|
@ -112,13 +102,13 @@ func (a *Asserter) checkBlankspaceNameViaHTTPWithCallback(
|
|||
func (a *Asserter) CheckBlankspaceNameViaTCP(
|
||||
t *testing.T,
|
||||
workload *topology.Workload,
|
||||
dest *topology.Destination,
|
||||
us *topology.Upstream,
|
||||
clusterName string,
|
||||
sid topology.ID,
|
||||
) {
|
||||
t.Helper()
|
||||
|
||||
a.checkBlankspaceNameViaTCPWithCallback(t, workload, dest, 1, func(_ *retry.R) {}, func(r *retry.R, remoteName string) {
|
||||
a.checkBlankspaceNameViaTCPWithCallback(t, workload, us, 1, func(_ *retry.R) {}, func(r *retry.R, remoteName string) {
|
||||
require.Equal(r, fmt.Sprintf("%s::%s", clusterName, sid.String()), remoteName)
|
||||
}, func(r *retry.R) {})
|
||||
}
|
||||
|
@ -128,13 +118,13 @@ func (a *Asserter) CheckBlankspaceNameViaTCP(
|
|||
func (a *Asserter) CheckBlankspaceNameTrafficSplitViaTCP(
|
||||
t *testing.T,
|
||||
workload *topology.Workload,
|
||||
dest *topology.Destination,
|
||||
us *topology.Upstream,
|
||||
expect map[string]int,
|
||||
) {
|
||||
t.Helper()
|
||||
|
||||
got := make(map[string]int)
|
||||
a.checkBlankspaceNameViaTCPWithCallback(t, workload, dest, 100, func(_ *retry.R) {
|
||||
a.checkBlankspaceNameViaTCPWithCallback(t, workload, us, 100, func(_ *retry.R) {
|
||||
got = make(map[string]int)
|
||||
}, func(_ *retry.R, name string) {
|
||||
got[name]++
|
||||
|
@ -146,7 +136,7 @@ func (a *Asserter) CheckBlankspaceNameTrafficSplitViaTCP(
|
|||
func (a *Asserter) checkBlankspaceNameViaTCPWithCallback(
|
||||
t *testing.T,
|
||||
workload *topology.Workload,
|
||||
dest *topology.Destination,
|
||||
us *topology.Upstream,
|
||||
count int,
|
||||
resetFn func(r *retry.R),
|
||||
attemptFn func(r *retry.R, remoteName string),
|
||||
|
@ -154,8 +144,7 @@ func (a *Asserter) checkBlankspaceNameViaTCPWithCallback(
|
|||
) {
|
||||
t.Helper()
|
||||
|
||||
require.False(t, dest.Implied, "helper does not support tproxy yet")
|
||||
port := dest.LocalPort
|
||||
port := us.LocalPort
|
||||
require.True(t, port > 0)
|
||||
|
||||
node := workload.Node
|
||||
|
@ -180,13 +169,13 @@ func (a *Asserter) checkBlankspaceNameViaTCPWithCallback(
|
|||
func (a *Asserter) CheckBlankspaceNameViaGRPC(
|
||||
t *testing.T,
|
||||
workload *topology.Workload,
|
||||
dest *topology.Destination,
|
||||
us *topology.Upstream,
|
||||
clusterName string,
|
||||
sid topology.ID,
|
||||
) {
|
||||
t.Helper()
|
||||
|
||||
a.checkBlankspaceNameViaGRPCWithCallback(t, workload, dest, 1, func(_ *retry.R) {}, func(r *retry.R, remoteName string) {
|
||||
a.checkBlankspaceNameViaGRPCWithCallback(t, workload, us, 1, func(_ *retry.R) {}, func(r *retry.R, remoteName string) {
|
||||
require.Equal(r, fmt.Sprintf("%s::%s", clusterName, sid.String()), remoteName)
|
||||
}, func(_ *retry.R) {})
|
||||
}
|
||||
|
@ -196,13 +185,13 @@ func (a *Asserter) CheckBlankspaceNameViaGRPC(
|
|||
func (a *Asserter) CheckBlankspaceNameTrafficSplitViaGRPC(
|
||||
t *testing.T,
|
||||
workload *topology.Workload,
|
||||
dest *topology.Destination,
|
||||
us *topology.Upstream,
|
||||
expect map[string]int,
|
||||
) {
|
||||
t.Helper()
|
||||
|
||||
got := make(map[string]int)
|
||||
a.checkBlankspaceNameViaGRPCWithCallback(t, workload, dest, 100, func(_ *retry.R) {
|
||||
a.checkBlankspaceNameViaGRPCWithCallback(t, workload, us, 100, func(_ *retry.R) {
|
||||
got = make(map[string]int)
|
||||
}, func(_ *retry.R, name string) {
|
||||
got[name]++
|
||||
|
@ -214,7 +203,7 @@ func (a *Asserter) CheckBlankspaceNameTrafficSplitViaGRPC(
|
|||
func (a *Asserter) checkBlankspaceNameViaGRPCWithCallback(
|
||||
t *testing.T,
|
||||
workload *topology.Workload,
|
||||
dest *topology.Destination,
|
||||
us *topology.Upstream,
|
||||
count int,
|
||||
resetFn func(r *retry.R),
|
||||
attemptFn func(r *retry.R, remoteName string),
|
||||
|
@ -222,8 +211,7 @@ func (a *Asserter) checkBlankspaceNameViaGRPCWithCallback(
|
|||
) {
|
||||
t.Helper()
|
||||
|
||||
require.False(t, dest.Implied, "helper does not support tproxy yet")
|
||||
port := dest.LocalPort
|
||||
port := us.LocalPort
|
||||
require.True(t, port > 0)
|
||||
|
||||
node := workload.Node
|
||||
|
@ -244,7 +232,7 @@ func (a *Asserter) checkBlankspaceNameViaGRPCWithCallback(
|
|||
}
|
||||
|
||||
// assertTrafficSplitFor100Requests compares the counts of 100 requests that
|
||||
// did reach an observed set of destinations (nameCounts) against the expected
|
||||
// did reach an observed set of upstreams (nameCounts) against the expected
|
||||
// counts of those same services is the same within a fixed difference of 2.
|
||||
func assertTrafficSplitFor100Requests(t require.TestingT, nameCounts map[string]int, expect map[string]int) {
|
||||
const (
|
||||
|
@ -265,7 +253,7 @@ func sumMapValues(m map[string]int) int {
|
|||
}
|
||||
|
||||
// assertTrafficSplit compares the counts of requests that did reach an
|
||||
// observed set of destinations (nameCounts) against the expected counts of
|
||||
// observed set of upstreams (nameCounts) against the expected counts of
|
||||
// those same services is the same within the provided allowedDelta value.
|
||||
//
|
||||
// When doing random traffic splits it'll never be perfect so we need the
|
||||
|
|
|
@ -15,12 +15,8 @@ const HashicorpDockerProxy = "docker.mirror.hashicorp.services"
|
|||
func NewFortioWorkloadWithDefaults(
|
||||
cluster string,
|
||||
sid topology.ID,
|
||||
nodeVersion topology.NodeVersion,
|
||||
mut func(*topology.Workload),
|
||||
) *topology.Workload {
|
||||
if nodeVersion == topology.NodeVersionV2 {
|
||||
panic("v2 nodes are not supported")
|
||||
}
|
||||
const (
|
||||
httpPort = 8080
|
||||
grpcPort = 8079
|
||||
|
@ -56,12 +52,8 @@ func NewFortioWorkloadWithDefaults(
|
|||
func NewBlankspaceWorkloadWithDefaults(
|
||||
cluster string,
|
||||
sid topology.ID,
|
||||
nodeVersion topology.NodeVersion,
|
||||
mut func(*topology.Workload),
|
||||
) *topology.Workload {
|
||||
if nodeVersion == topology.NodeVersionV2 {
|
||||
panic("v2 nodes are not supported")
|
||||
}
|
||||
const (
|
||||
httpPort = 8080
|
||||
grpcPort = 8079
|
||||
|
|
|
@ -4,38 +4,23 @@
|
|||
package topoutil
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
)
|
||||
|
||||
// Deprecated: DestinationEndpointStatus
|
||||
func (a *Asserter) UpstreamEndpointStatus(
|
||||
t *testing.T,
|
||||
workload *topology.Workload,
|
||||
clusterName string,
|
||||
healthStatus string,
|
||||
count int,
|
||||
) {
|
||||
a.DestinationEndpointStatus(t, workload, clusterName, healthStatus, count)
|
||||
}
|
||||
|
||||
// Deprecated: NewFortioWorkloadWithDefaults
|
||||
func NewFortioServiceWithDefaults(
|
||||
cluster string,
|
||||
sid topology.ID,
|
||||
nodeVersion topology.NodeVersion,
|
||||
mut func(*topology.Workload),
|
||||
) *topology.Workload {
|
||||
return NewFortioWorkloadWithDefaults(cluster, sid, nodeVersion, mut)
|
||||
return NewFortioWorkloadWithDefaults(cluster, sid, mut)
|
||||
}
|
||||
|
||||
// Deprecated: NewBlankspaceWorkloadWithDefaults
|
||||
func NewBlankspaceServiceWithDefaults(
|
||||
cluster string,
|
||||
sid topology.ID,
|
||||
nodeVersion topology.NodeVersion,
|
||||
mut func(*topology.Workload),
|
||||
) *topology.Workload {
|
||||
return NewBlankspaceWorkloadWithDefaults(cluster, sid, nodeVersion, mut)
|
||||
return NewBlankspaceWorkloadWithDefaults(cluster, sid, mut)
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ func newCommonTopo(t *testing.T) *commonTopo {
|
|||
"-http-port", "8080",
|
||||
"-redirect-port", "-disabled",
|
||||
},
|
||||
Upstreams: []*topology.Destination{
|
||||
Upstreams: []*topology.Upstream{
|
||||
{
|
||||
ID: staticServerSID,
|
||||
LocalPort: 5000,
|
||||
|
@ -218,7 +218,7 @@ func (ct *commonTopo) PostUpgradeValidation(t *testing.T) {
|
|||
cluster.Nodes[ct.StaticServerInstTwo].Disabled = false // client 3 -- new static-server
|
||||
require.NoError(t, ct.Sprawl.RelaunchWithPhase(cfg, sprawl.LaunchPhaseRegular))
|
||||
// Ensure the static-client connected to the new static-server
|
||||
ct.Assert.FortioFetch2HeaderEcho(t, ct.StaticClientWorkload, &topology.Destination{
|
||||
ct.Assert.FortioFetch2HeaderEcho(t, ct.StaticClientWorkload, &topology.Upstream{
|
||||
ID: ct.StaticServerSID,
|
||||
LocalPort: 5000,
|
||||
})
|
||||
|
@ -244,7 +244,7 @@ func (ct *commonTopo) Launch(t *testing.T) {
|
|||
topology.NewNodeID("dc1-client2", "default"),
|
||||
ct.StaticClientSID,
|
||||
)
|
||||
ct.Assert.FortioFetch2HeaderEcho(t, staticClientWorkload, &topology.Destination{
|
||||
ct.Assert.FortioFetch2HeaderEcho(t, staticClientWorkload, &topology.Upstream{
|
||||
ID: ct.StaticServerSID,
|
||||
LocalPort: 5000,
|
||||
})
|
||||
|
|
|
@ -8,12 +8,11 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/test-integ/topoutil"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
|
||||
"github.com/hashicorp/consul/test-integ/topoutil"
|
||||
)
|
||||
|
||||
type commonTopo struct {
|
||||
|
@ -51,11 +50,11 @@ func NewCommonTopo(t *testing.T) *commonTopo {
|
|||
//
|
||||
// dataplane
|
||||
// - workload(fortio) static-server on node dc1-client1
|
||||
// - workload(fortio) static-client on node dc1-client2 with destination to static-server
|
||||
// - workload(fortio) static-client on node dc1-client2 with upstream to static-server
|
||||
// - static-client, static-server are registered at 2 agentless nodes.
|
||||
//
|
||||
// Intentions
|
||||
// - static-client has destination to static-server
|
||||
// - static-client has upstream to static-server
|
||||
func newCommonTopo(t *testing.T) *commonTopo {
|
||||
t.Helper()
|
||||
|
||||
|
@ -107,7 +106,7 @@ func newCommonTopo(t *testing.T) *commonTopo {
|
|||
},
|
||||
},
|
||||
},
|
||||
// static-client on dc1-client2 with destination to static-server
|
||||
// static-client on dc1-client2 with upstream to static-server
|
||||
{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Name: "dc1-client2",
|
||||
|
@ -123,7 +122,7 @@ func newCommonTopo(t *testing.T) *commonTopo {
|
|||
"-http-port", "8080",
|
||||
"-redirect-port", "-disabled",
|
||||
},
|
||||
Destinations: []*topology.Destination{
|
||||
Upstreams: []*topology.Upstream{
|
||||
{
|
||||
ID: staticServerSID, // static-server
|
||||
LocalPort: 5000,
|
||||
|
@ -208,7 +207,7 @@ func (ct *commonTopo) ValidateWorkloads(t *testing.T) {
|
|||
// check the service exists in catalog
|
||||
svcs := cluster.WorkloadsByID(ct.StaticClientSID)
|
||||
client := svcs[0]
|
||||
upstream := client.Destinations[0]
|
||||
upstream := client.Upstreams[0]
|
||||
ct.Assert.CatalogServiceExists(t, cluster.Name, upstream.ID.Name, utils.CompatQueryOpts(&api.QueryOptions{
|
||||
Partition: upstream.ID.Partition,
|
||||
Namespace: upstream.ID.Namespace,
|
||||
|
|
|
@ -38,15 +38,15 @@ func TestTrafficManagement_ResolverDefaultSubset_Agentless(t *testing.T) {
|
|||
topology.NewNodeID("dc1-client2", defaultPartition),
|
||||
ct.StaticClientSID,
|
||||
)
|
||||
ct.Assert.FortioFetch2HeaderEcho(t, staticClientWorkload, &topology.Destination{
|
||||
ct.Assert.FortioFetch2HeaderEcho(t, staticClientWorkload, &topology.Upstream{
|
||||
ID: ct.StaticServerSID,
|
||||
LocalPort: 5000,
|
||||
})
|
||||
ct.Assert.FortioFetch2FortioName(t, staticClientWorkload, &topology.Destination{
|
||||
ct.Assert.FortioFetch2FortioName(t, staticClientWorkload, &topology.Upstream{
|
||||
ID: ct.StaticServerSID,
|
||||
LocalPort: 5000,
|
||||
}, dc1, staticServerSID)
|
||||
ct.Assert.DestinationEndpointStatus(t, staticClientWorkload, "v2.static-server.default", "HEALTHY", 1)
|
||||
ct.Assert.UpstreamEndpointStatus(t, staticClientWorkload, "v2.static-server.default", "HEALTHY", 1)
|
||||
}
|
||||
resolverV2AssertFn()
|
||||
|
||||
|
@ -82,7 +82,7 @@ func TestTrafficManagement_ResolverDefaultSubset_Agentless(t *testing.T) {
|
|||
topology.NewNodeID("dc1-client2", defaultPartition),
|
||||
ct.StaticClientSID,
|
||||
)
|
||||
ct.Assert.FortioFetch2ServiceUnavailable(t, staticClientWorkload, &topology.Destination{
|
||||
ct.Assert.FortioFetch2ServiceUnavailable(t, staticClientWorkload, &topology.Upstream{
|
||||
ID: ct.StaticServerSID,
|
||||
LocalPort: 5000,
|
||||
})
|
||||
|
|
|
@ -12,7 +12,7 @@ provider to manage a fleet of local docker containers and networks.
|
|||
|
||||
The complete topology of Consul clusters is defined using a topology.Config
|
||||
which allows you to define a set of networks and reference those networks when
|
||||
assigning nodes and services to clusters. Both Consul clients and
|
||||
assigning nodes and workloads to clusters. Both Consul clients and
|
||||
`consul-dataplane` instances are supported.
|
||||
|
||||
Here is an example configuration with two peered clusters:
|
||||
|
@ -39,9 +39,9 @@ cfg := &topology.Config{
|
|||
{
|
||||
Kind: topology.NodeKindClient,
|
||||
Name: "dc1-client1",
|
||||
Services: []*topology.Service{
|
||||
Workloads: []*topology.Workload{
|
||||
{
|
||||
ID: topology.ServiceID{Name: "mesh-gateway"},
|
||||
ID: topology.ID{Name: "mesh-gateway"},
|
||||
Port: 8443,
|
||||
EnvoyAdminPort: 19000,
|
||||
IsMeshGateway: true,
|
||||
|
@ -51,9 +51,9 @@ cfg := &topology.Config{
|
|||
{
|
||||
Kind: topology.NodeKindClient,
|
||||
Name: "dc1-client2",
|
||||
Services: []*topology.Service{
|
||||
Workloads: []*topology.Workload{
|
||||
{
|
||||
ID: topology.ServiceID{Name: "ping"},
|
||||
ID: topology.ID{Name: "ping"},
|
||||
Image: "rboyer/pingpong:latest",
|
||||
Port: 8080,
|
||||
EnvoyAdminPort: 19000,
|
||||
|
@ -65,7 +65,7 @@ cfg := &topology.Config{
|
|||
"-name", "ping",
|
||||
},
|
||||
Upstreams: []*topology.Upstream{{
|
||||
ID: topology.ServiceID{Name: "pong"},
|
||||
ID: topology.ID{Name: "pong"},
|
||||
LocalPort: 9090,
|
||||
Peer: "peer-dc2-default",
|
||||
}},
|
||||
|
@ -99,9 +99,9 @@ cfg := &topology.Config{
|
|||
{
|
||||
Kind: topology.NodeKindClient,
|
||||
Name: "dc2-client1",
|
||||
Services: []*topology.Service{
|
||||
Workloads: []*topology.Workload{
|
||||
{
|
||||
ID: topology.ServiceID{Name: "mesh-gateway"},
|
||||
ID: topology.ID{Name: "mesh-gateway"},
|
||||
Port: 8443,
|
||||
EnvoyAdminPort: 19000,
|
||||
IsMeshGateway: true,
|
||||
|
@ -111,9 +111,9 @@ cfg := &topology.Config{
|
|||
{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Name: "dc2-client2",
|
||||
Services: []*topology.Service{
|
||||
Workloads: []*topology.Workload{
|
||||
{
|
||||
ID: topology.ServiceID{Name: "pong"},
|
||||
ID: topology.ID{Name: "pong"},
|
||||
Image: "rboyer/pingpong:latest",
|
||||
Port: 8080,
|
||||
EnvoyAdminPort: 19000,
|
||||
|
@ -125,7 +125,7 @@ cfg := &topology.Config{
|
|||
"-name", "pong",
|
||||
},
|
||||
Upstreams: []*topology.Upstream{{
|
||||
ID: topology.ServiceID{Name: "ping"},
|
||||
ID: topology.ID{Name: "ping"},
|
||||
LocalPort: 9090,
|
||||
Peer: "peer-dc1-default",
|
||||
}},
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
)
|
||||
|
||||
|
@ -98,13 +97,6 @@ func tokenForWorkload(wrk *topology.Workload, overridePolicy *api.ACLPolicy, ent
|
|||
}
|
||||
if overridePolicy != nil {
|
||||
token.Policies = []*api.ACLTokenPolicyLink{{ID: overridePolicy.ID}}
|
||||
} else if wrk.IsV2() {
|
||||
token.TemplatedPolicies = []*api.ACLTemplatedPolicy{{
|
||||
TemplateName: api.ACLTemplatedPolicyWorkloadIdentityName,
|
||||
TemplateVariables: &api.ACLTemplatedPolicyVariables{
|
||||
Name: wrk.WorkloadIdentity,
|
||||
},
|
||||
}}
|
||||
} else {
|
||||
token.ServiceIdentities = []*api.ACLServiceIdentity{{
|
||||
ServiceName: wrk.ID.Name,
|
||||
|
|
|
@ -13,9 +13,10 @@ import (
|
|||
"time"
|
||||
|
||||
retry "github.com/avast/retry-go"
|
||||
"github.com/hashicorp/consul/api"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/build"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/secrets"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/internal/tfgen"
|
||||
|
@ -258,8 +259,8 @@ func (s *Sprawl) initConsulServers() error {
|
|||
return fmt.Errorf("error creating final client for cluster=%s: %v", cluster.Name, err)
|
||||
}
|
||||
|
||||
// Connect to gRPC as well.
|
||||
if cluster.EnableV2 {
|
||||
// Connect to gRPC as well for the resource service.
|
||||
{
|
||||
s.grpcConns[cluster.Name], s.grpcConnCancel[cluster.Name], err = s.dialServerGRPC(cluster, node, mgmtToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating gRPC client conn for cluster=%s: %w", cluster.Name, err)
|
||||
|
@ -281,11 +282,8 @@ func (s *Sprawl) initConsulServers() error {
|
|||
return fmt.Errorf("populateInitialConfigEntries[%s]: %w", cluster.Name, err)
|
||||
}
|
||||
|
||||
if cluster.EnableV2 {
|
||||
// Resources are available only in V2
|
||||
if err := s.populateInitialResources(cluster); err != nil {
|
||||
return fmt.Errorf("populateInitialResources[%s]: %w", cluster.Name, err)
|
||||
}
|
||||
if err := s.populateInitialResources(cluster); err != nil {
|
||||
return fmt.Errorf("populateInitialResources[%s]: %w", cluster.Name, err)
|
||||
}
|
||||
|
||||
if err := s.createAnonymousToken(cluster); err != nil {
|
||||
|
@ -535,9 +533,6 @@ func (s *Sprawl) waitForLocalWrites(cluster *topology.Cluster, token string) {
|
|||
}
|
||||
|
||||
func (s *Sprawl) waitForClientAntiEntropyOnce(cluster *topology.Cluster) error {
|
||||
if cluster.EnableV2 {
|
||||
return nil // v1 catalog is disabled when v2 catalog is enabled
|
||||
}
|
||||
var (
|
||||
client = s.clients[cluster.Name]
|
||||
logger = s.logger.With("cluster", cluster.Name)
|
||||
|
|
|
@ -9,14 +9,11 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
"github.com/hashicorp/consul/testing/deployer/util"
|
||||
)
|
||||
|
@ -49,9 +46,6 @@ func (s *Sprawl) registerServicesToAgents(cluster *topology.Cluster) error {
|
|||
if !node.IsAgent() {
|
||||
continue
|
||||
}
|
||||
if node.IsV2() {
|
||||
panic("don't call this")
|
||||
}
|
||||
|
||||
agentClient, err := util.ProxyAPIClient(
|
||||
node.LocalProxyPort(),
|
||||
|
@ -82,9 +76,6 @@ func (s *Sprawl) registerAgentService(
|
|||
if !node.IsAgent() {
|
||||
panic("called wrong method type")
|
||||
}
|
||||
if node.IsV2() {
|
||||
panic("don't call this")
|
||||
}
|
||||
|
||||
if wrk.IsMeshGateway {
|
||||
return nil // handled at startup time for agent-ful, but won't be for agent-less
|
||||
|
@ -107,19 +98,19 @@ func (s *Sprawl) registerAgentService(
|
|||
|
||||
if !wrk.DisableServiceMesh {
|
||||
var upstreams []api.Upstream
|
||||
for _, dest := range wrk.Destinations {
|
||||
for _, us := range wrk.Upstreams {
|
||||
uAPI := api.Upstream{
|
||||
DestinationPeer: dest.Peer,
|
||||
DestinationName: dest.ID.Name,
|
||||
LocalBindAddress: dest.LocalAddress,
|
||||
LocalBindPort: dest.LocalPort,
|
||||
DestinationPeer: us.Peer,
|
||||
DestinationName: us.ID.Name,
|
||||
LocalBindAddress: us.LocalAddress,
|
||||
LocalBindPort: us.LocalPort,
|
||||
// Config map[string]interface{} `json:",omitempty" bexpr:"-"`
|
||||
// MeshGateway MeshGatewayConfig `json:",omitempty"`
|
||||
}
|
||||
if cluster.Enterprise {
|
||||
uAPI.DestinationNamespace = dest.ID.Namespace
|
||||
if dest.Peer == "" {
|
||||
uAPI.DestinationPartition = dest.ID.Partition
|
||||
uAPI.DestinationNamespace = us.ID.Namespace
|
||||
if us.Peer == "" {
|
||||
uAPI.DestinationPartition = us.ID.Partition
|
||||
}
|
||||
}
|
||||
upstreams = append(upstreams, uAPI)
|
||||
|
@ -179,65 +170,14 @@ RETRY:
|
|||
|
||||
// syncWorkloadsForDataplaneInstances register/deregister services in the given cluster
|
||||
func (s *Sprawl) syncWorkloadsForDataplaneInstances(cluster *topology.Cluster) error {
|
||||
identityInfo := make(map[topology.ID]*Resource[*pbauth.WorkloadIdentity])
|
||||
|
||||
// registerWorkloadToNode is called when node is not disabled
|
||||
registerWorkloadToNode := func(node *topology.Node, wrk *topology.Workload) error {
|
||||
if node.IsV2() {
|
||||
pending := workloadInstanceToResources(node, wrk)
|
||||
|
||||
workloadID := topology.NewID(wrk.WorkloadIdentity, wrk.ID.Namespace, wrk.ID.Partition)
|
||||
if _, ok := identityInfo[workloadID]; !ok {
|
||||
identityInfo[workloadID] = pending.WorkloadIdentity
|
||||
}
|
||||
|
||||
// Write workload
|
||||
res, err := pending.Workload.Build()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error serializing resource %s: %w", util.IDToString(pending.Workload.Resource.Id), err)
|
||||
}
|
||||
workload, err := s.writeResource(cluster, res)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Write check linked to workload
|
||||
for _, check := range pending.HealthStatuses {
|
||||
check.Resource.Owner = workload.Id
|
||||
res, err := check.Build()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error serializing resource %s: %w", util.IDToString(check.Resource.Id), err)
|
||||
}
|
||||
if _, err := s.writeResource(cluster, res); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// maybe write destinations
|
||||
if pending.Destinations != nil {
|
||||
res, err := pending.Destinations.Build()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error serializing resource %s: %w", util.IDToString(pending.Destinations.Resource.Id), err)
|
||||
}
|
||||
if _, err := s.writeResource(cluster, res); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if pending.ProxyConfiguration != nil {
|
||||
res, err := pending.ProxyConfiguration.Build()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error serializing resource %s: %w", util.IDToString(pending.ProxyConfiguration.Resource.Id), err)
|
||||
}
|
||||
if _, err := s.writeResource(cluster, res); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := s.registerCatalogServiceV1(cluster, node, wrk); err != nil {
|
||||
return fmt.Errorf("error registering service: %w", err)
|
||||
}
|
||||
if !wrk.DisableServiceMesh {
|
||||
if err := s.registerCatalogSidecarServiceV1(cluster, node, wrk); err != nil {
|
||||
return fmt.Errorf("error registering sidecar service: %w", err)
|
||||
}
|
||||
if err := s.registerCatalogServiceV1(cluster, node, wrk); err != nil {
|
||||
return fmt.Errorf("error registering service: %w", err)
|
||||
}
|
||||
if !wrk.DisableServiceMesh {
|
||||
if err := s.registerCatalogSidecarServiceV1(cluster, node, wrk); err != nil {
|
||||
return fmt.Errorf("error registering sidecar service: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -245,17 +185,12 @@ func (s *Sprawl) syncWorkloadsForDataplaneInstances(cluster *topology.Cluster) e
|
|||
|
||||
// deregisterWorkloadFromNode is called when node is disabled
|
||||
deregisterWorkloadFromNode := func(node *topology.Node, wrk *topology.Workload) error {
|
||||
if node.IsV2() {
|
||||
// TODO: implement deregister workload for v2
|
||||
panic("deregister workload is not implemented for V2")
|
||||
} else {
|
||||
if err := s.deregisterCatalogServiceV1(cluster, node, wrk); err != nil {
|
||||
return fmt.Errorf("error deregistering service: %w", err)
|
||||
}
|
||||
if !wrk.DisableServiceMesh {
|
||||
if err := s.deregisterCatalogSidecarServiceV1(cluster, node, wrk); err != nil {
|
||||
return fmt.Errorf("error deregistering sidecar service: %w", err)
|
||||
}
|
||||
if err := s.deregisterCatalogServiceV1(cluster, node, wrk); err != nil {
|
||||
return fmt.Errorf("error deregistering service: %w", err)
|
||||
}
|
||||
if !wrk.DisableServiceMesh {
|
||||
if err := s.deregisterCatalogSidecarServiceV1(cluster, node, wrk); err != nil {
|
||||
return fmt.Errorf("error deregistering sidecar service: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -299,42 +234,6 @@ func (s *Sprawl) syncWorkloadsForDataplaneInstances(cluster *topology.Cluster) e
|
|||
}
|
||||
}
|
||||
|
||||
if cluster.EnableV2 {
|
||||
for _, identity := range identityInfo {
|
||||
res, err := identity.Build()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error serializing resource %s: %w", util.IDToString(identity.Resource.Id), err)
|
||||
}
|
||||
if _, err := s.writeResource(cluster, res); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for id, svcData := range cluster.Services {
|
||||
svcInfo := &Resource[*pbcatalog.Service]{
|
||||
Resource: &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Name: id.Name,
|
||||
Tenancy: &pbresource.Tenancy{
|
||||
Partition: id.Partition,
|
||||
Namespace: id.Namespace,
|
||||
},
|
||||
},
|
||||
},
|
||||
Data: svcData,
|
||||
}
|
||||
|
||||
res, err := svcInfo.Build()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error serializing resource %s: %w", util.IDToString(svcInfo.Resource.Id), err)
|
||||
}
|
||||
if _, err := s.writeResource(cluster, res); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -342,9 +241,6 @@ func (s *Sprawl) registerCatalogNode(
|
|||
cluster *topology.Cluster,
|
||||
node *topology.Node,
|
||||
) error {
|
||||
if node.IsV2() {
|
||||
return s.registerCatalogNodeV2(cluster, node)
|
||||
}
|
||||
return s.registerCatalogNodeV1(cluster, node)
|
||||
}
|
||||
|
||||
|
@ -352,49 +248,9 @@ func (s *Sprawl) deregisterCatalogNode(
|
|||
cluster *topology.Cluster,
|
||||
node *topology.Node,
|
||||
) error {
|
||||
if node.IsV2() {
|
||||
panic("deregister V2 node is not implemented")
|
||||
}
|
||||
return s.deregisterCatalogNodeV1(cluster, node)
|
||||
}
|
||||
|
||||
func (s *Sprawl) registerCatalogNodeV2(
|
||||
cluster *topology.Cluster,
|
||||
node *topology.Node,
|
||||
) error {
|
||||
if !node.IsDataplane() {
|
||||
panic("called wrong method type")
|
||||
}
|
||||
|
||||
nodeRes := &Resource[*pbcatalog.Node]{
|
||||
Resource: &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbcatalog.NodeType,
|
||||
Name: node.PodName(),
|
||||
Tenancy: &pbresource.Tenancy{
|
||||
Partition: node.Partition,
|
||||
},
|
||||
},
|
||||
Metadata: map[string]string{
|
||||
"dataplane-faux": "1",
|
||||
},
|
||||
},
|
||||
Data: &pbcatalog.Node{
|
||||
Addresses: []*pbcatalog.NodeAddress{
|
||||
{Host: node.LocalAddress()},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
res, err := nodeRes.Build()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = s.writeResource(cluster, res)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Sprawl) writeResource(cluster *topology.Cluster, res *pbresource.Resource) (*pbresource.Resource, error) {
|
||||
var (
|
||||
client = s.getResourceClient(cluster.Name)
|
||||
|
@ -505,9 +361,6 @@ func (s *Sprawl) deregisterCatalogServiceV1(
|
|||
if !node.IsDataplane() {
|
||||
panic("called wrong method type")
|
||||
}
|
||||
if node.IsV2() {
|
||||
panic("don't call this")
|
||||
}
|
||||
|
||||
var (
|
||||
client = s.clients[cluster.Name]
|
||||
|
@ -543,9 +396,6 @@ func (s *Sprawl) registerCatalogServiceV1(
|
|||
if !node.IsDataplane() {
|
||||
panic("called wrong method type")
|
||||
}
|
||||
if node.IsV2() {
|
||||
panic("don't call this")
|
||||
}
|
||||
|
||||
var (
|
||||
client = s.clients[cluster.Name]
|
||||
|
@ -582,9 +432,6 @@ func (s *Sprawl) deregisterCatalogSidecarServiceV1(
|
|||
if wrk.DisableServiceMesh {
|
||||
panic("not valid")
|
||||
}
|
||||
if node.IsV2() {
|
||||
panic("don't call this")
|
||||
}
|
||||
|
||||
var (
|
||||
client = s.clients[cluster.Name]
|
||||
|
@ -626,9 +473,6 @@ func (s *Sprawl) registerCatalogSidecarServiceV1(
|
|||
if wrk.DisableServiceMesh {
|
||||
panic("not valid")
|
||||
}
|
||||
if node.IsV2() {
|
||||
panic("don't call this")
|
||||
}
|
||||
|
||||
var (
|
||||
client = s.clients[cluster.Name]
|
||||
|
@ -667,172 +511,11 @@ func (r *Resource[V]) Build() (*pbresource.Resource, error) {
|
|||
return r.Resource, nil
|
||||
}
|
||||
|
||||
type ServiceResources struct {
|
||||
Workload *Resource[*pbcatalog.Workload]
|
||||
HealthStatuses []*Resource[*pbcatalog.HealthStatus]
|
||||
Destinations *Resource[*pbmesh.Destinations]
|
||||
WorkloadIdentity *Resource[*pbauth.WorkloadIdentity]
|
||||
ProxyConfiguration *Resource[*pbmesh.ProxyConfiguration]
|
||||
}
|
||||
|
||||
func workloadInstanceToResources(
|
||||
node *topology.Node,
|
||||
wrk *topology.Workload,
|
||||
) *ServiceResources {
|
||||
if wrk.IsMeshGateway {
|
||||
panic("v2 does not yet support mesh gateways")
|
||||
}
|
||||
|
||||
tenancy := &pbresource.Tenancy{
|
||||
Partition: wrk.ID.Partition,
|
||||
Namespace: wrk.ID.Namespace,
|
||||
}
|
||||
|
||||
var (
|
||||
wlPorts = map[string]*pbcatalog.WorkloadPort{}
|
||||
)
|
||||
for name, port := range wrk.Ports {
|
||||
wlPorts[name] = &pbcatalog.WorkloadPort{
|
||||
Port: uint32(port.Number),
|
||||
Protocol: port.ActualProtocol,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
selector = &pbcatalog.WorkloadSelector{
|
||||
Names: []string{wrk.Workload},
|
||||
}
|
||||
|
||||
workloadRes = &Resource[*pbcatalog.Workload]{
|
||||
Resource: &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbcatalog.WorkloadType,
|
||||
Name: wrk.Workload,
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
Metadata: wrk.Meta,
|
||||
},
|
||||
Data: &pbcatalog.Workload{
|
||||
NodeName: node.PodName(),
|
||||
Identity: wrk.WorkloadIdentity,
|
||||
Ports: wlPorts,
|
||||
Addresses: []*pbcatalog.WorkloadAddress{
|
||||
{Host: node.LocalAddress()},
|
||||
},
|
||||
},
|
||||
}
|
||||
workloadIdentityRes = &Resource[*pbauth.WorkloadIdentity]{
|
||||
Resource: &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbauth.WorkloadIdentityType,
|
||||
Name: wrk.WorkloadIdentity,
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
},
|
||||
Data: &pbauth.WorkloadIdentity{},
|
||||
}
|
||||
|
||||
healthResList []*Resource[*pbcatalog.HealthStatus]
|
||||
destinationsRes *Resource[*pbmesh.Destinations]
|
||||
proxyConfigRes *Resource[*pbmesh.ProxyConfiguration]
|
||||
)
|
||||
|
||||
if wrk.HasCheck() {
|
||||
// TODO: needs ownerId
|
||||
checkRes := &Resource[*pbcatalog.HealthStatus]{
|
||||
Resource: &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbcatalog.HealthStatusType,
|
||||
Name: wrk.Workload + "-check-0",
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
},
|
||||
Data: &pbcatalog.HealthStatus{
|
||||
Type: "external-sync",
|
||||
Status: pbcatalog.Health_HEALTH_PASSING,
|
||||
},
|
||||
}
|
||||
|
||||
healthResList = []*Resource[*pbcatalog.HealthStatus]{checkRes}
|
||||
}
|
||||
|
||||
if node.HasPublicAddress() {
|
||||
workloadRes.Data.Addresses = append(workloadRes.Data.Addresses,
|
||||
&pbcatalog.WorkloadAddress{Host: node.PublicAddress(), External: true},
|
||||
)
|
||||
}
|
||||
|
||||
if !wrk.DisableServiceMesh {
|
||||
destinationsRes = &Resource[*pbmesh.Destinations]{
|
||||
Resource: &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbmesh.DestinationsType,
|
||||
Name: wrk.Workload,
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
},
|
||||
Data: &pbmesh.Destinations{
|
||||
Workloads: selector,
|
||||
},
|
||||
}
|
||||
|
||||
for _, dest := range wrk.Destinations {
|
||||
meshDest := &pbmesh.Destination{
|
||||
DestinationRef: &pbresource.Reference{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Name: dest.ID.Name,
|
||||
Tenancy: &pbresource.Tenancy{
|
||||
Partition: dest.ID.Partition,
|
||||
Namespace: dest.ID.Namespace,
|
||||
},
|
||||
},
|
||||
DestinationPort: dest.PortName,
|
||||
ListenAddr: &pbmesh.Destination_IpPort{
|
||||
IpPort: &pbmesh.IPPortAddress{
|
||||
Ip: dest.LocalAddress,
|
||||
Port: uint32(dest.LocalPort),
|
||||
},
|
||||
},
|
||||
}
|
||||
destinationsRes.Data.Destinations = append(destinationsRes.Data.Destinations, meshDest)
|
||||
}
|
||||
|
||||
if wrk.EnableTransparentProxy {
|
||||
proxyConfigRes = &Resource[*pbmesh.ProxyConfiguration]{
|
||||
Resource: &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbmesh.ProxyConfigurationType,
|
||||
Name: wrk.Workload,
|
||||
Tenancy: tenancy,
|
||||
},
|
||||
},
|
||||
Data: &pbmesh.ProxyConfiguration{
|
||||
Workloads: selector,
|
||||
DynamicConfig: &pbmesh.DynamicConfig{
|
||||
Mode: pbmesh.ProxyMode_PROXY_MODE_TRANSPARENT,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &ServiceResources{
|
||||
Workload: workloadRes,
|
||||
HealthStatuses: healthResList,
|
||||
Destinations: destinationsRes,
|
||||
WorkloadIdentity: workloadIdentityRes,
|
||||
ProxyConfiguration: proxyConfigRes,
|
||||
}
|
||||
}
|
||||
|
||||
func workloadToCatalogRegistration(
|
||||
cluster *topology.Cluster,
|
||||
node *topology.Node,
|
||||
wrk *topology.Workload,
|
||||
) *api.CatalogRegistration {
|
||||
if node.IsV2() {
|
||||
panic("don't call this")
|
||||
}
|
||||
reg := &api.CatalogRegistration{
|
||||
Node: node.PodName(),
|
||||
SkipNodeUpdate: true,
|
||||
|
@ -921,9 +604,6 @@ func workloadToSidecarCatalogRegistration(
|
|||
node *topology.Node,
|
||||
wrk *topology.Workload,
|
||||
) (topology.ID, *api.CatalogRegistration) {
|
||||
if node.IsV2() {
|
||||
panic("don't call this")
|
||||
}
|
||||
pid := wrk.ID
|
||||
pid.Name += "-sidecar-proxy"
|
||||
reg := &api.CatalogRegistration{
|
||||
|
@ -970,17 +650,17 @@ func workloadToSidecarCatalogRegistration(
|
|||
reg.Checks[0].Partition = pid.Partition
|
||||
}
|
||||
|
||||
for _, dest := range wrk.Destinations {
|
||||
for _, us := range wrk.Upstreams {
|
||||
pu := api.Upstream{
|
||||
DestinationName: dest.ID.Name,
|
||||
DestinationPeer: dest.Peer,
|
||||
LocalBindAddress: dest.LocalAddress,
|
||||
LocalBindPort: dest.LocalPort,
|
||||
DestinationName: us.ID.Name,
|
||||
DestinationPeer: us.Peer,
|
||||
LocalBindAddress: us.LocalAddress,
|
||||
LocalBindPort: us.LocalPort,
|
||||
}
|
||||
if cluster.Enterprise {
|
||||
pu.DestinationNamespace = dest.ID.Namespace
|
||||
if dest.Peer == "" {
|
||||
pu.DestinationPartition = dest.ID.Partition
|
||||
pu.DestinationNamespace = us.ID.Namespace
|
||||
if us.Peer == "" {
|
||||
pu.DestinationPartition = us.ID.Partition
|
||||
}
|
||||
}
|
||||
reg.Service.Proxy.Upstreams = append(reg.Service.Proxy.Upstreams, pu)
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"time"
|
||||
|
||||
retry "github.com/avast/retry-go"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
)
|
||||
|
||||
|
@ -86,15 +87,10 @@ func (s *Sprawl) PrintDetails() error {
|
|||
Service: wrk.ID.String(),
|
||||
})
|
||||
} else {
|
||||
ports := make(map[string]int)
|
||||
for name, port := range wrk.Ports {
|
||||
ports[name] = node.ExposedPort(port.Number)
|
||||
}
|
||||
cd.Apps = append(cd.Apps, appDetail{
|
||||
Type: "app",
|
||||
Container: node.DockerName(),
|
||||
ExposedPort: node.ExposedPort(wrk.Port),
|
||||
ExposedPorts: ports,
|
||||
ExposedEnvoyAdminPort: node.ExposedPort(wrk.EnvoyAdminPort),
|
||||
Addresses: addrs,
|
||||
Service: wrk.ID.String(),
|
||||
|
@ -142,17 +138,7 @@ func (s *Sprawl) PrintDetails() error {
|
|||
if d.Type == "server" && d.Container == cluster.Leader {
|
||||
d.Type = "leader"
|
||||
}
|
||||
var portStr string
|
||||
if len(d.ExposedPorts) > 0 {
|
||||
var out []string
|
||||
for name, exposed := range d.ExposedPorts {
|
||||
out = append(out, fmt.Sprintf("app:%s=%d", name, exposed))
|
||||
}
|
||||
sort.Strings(out)
|
||||
portStr = strings.Join(out, " ")
|
||||
} else {
|
||||
portStr = "app=" + strconv.Itoa(d.ExposedPort)
|
||||
}
|
||||
portStr := "app=" + strconv.Itoa(d.ExposedPort)
|
||||
if d.ExposedEnvoyAdminPort > 0 {
|
||||
portStr += " envoy=" + strconv.Itoa(d.ExposedEnvoyAdminPort)
|
||||
}
|
||||
|
@ -191,9 +177,8 @@ type appDetail struct {
|
|||
Type string // server|mesh-gateway|app
|
||||
Container string
|
||||
Addresses []string
|
||||
ExposedPort int `json:",omitempty"`
|
||||
ExposedPorts map[string]int `json:",omitempty"`
|
||||
ExposedEnvoyAdminPort int `json:",omitempty"`
|
||||
ExposedPort int `json:",omitempty"`
|
||||
ExposedEnvoyAdminPort int `json:",omitempty"`
|
||||
// just services
|
||||
Service string `json:",omitempty"`
|
||||
}
|
||||
|
|
|
@ -102,7 +102,7 @@ func DockerImages(
|
|||
built := make(map[string]struct{})
|
||||
for _, c := range t.Clusters {
|
||||
for _, n := range c.Nodes {
|
||||
needsTproxy := n.NeedsTransparentProxy()
|
||||
const needsTproxy = false // TODO: see if we can bring this back for v1 CDP
|
||||
|
||||
joint := n.Images.EnvoyConsulImage()
|
||||
if _, ok := built[joint]; joint != "" && !ok {
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
)
|
||||
|
||||
func (g *Generator) generateAgentHCL(node *topology.Node, enableV2, enableV2Tenancy bool) string {
|
||||
func (g *Generator) generateAgentHCL(node *topology.Node) string {
|
||||
if !node.IsAgent() {
|
||||
panic("generateAgentHCL only applies to agents")
|
||||
}
|
||||
|
@ -41,17 +41,6 @@ func (g *Generator) generateAgentHCL(node *topology.Node, enableV2, enableV2Tena
|
|||
b.add("enable_debug", true)
|
||||
b.add("use_streaming_backend", true)
|
||||
|
||||
var experiments []string
|
||||
if enableV2 {
|
||||
experiments = append(experiments, "resource-apis")
|
||||
}
|
||||
if enableV2Tenancy {
|
||||
experiments = append(experiments, "v2tenancy")
|
||||
}
|
||||
if len(experiments) > 0 {
|
||||
b.addSlice("experiments", experiments)
|
||||
}
|
||||
|
||||
// speed up leaves
|
||||
b.addBlock("performance", func() {
|
||||
b.add("leave_drain_time", "50ms")
|
||||
|
|
|
@ -8,11 +8,8 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
"github.com/hashicorp/consul/testing/deployer/util"
|
||||
)
|
||||
|
@ -66,22 +63,6 @@ func (g *Generator) writeCoreDNSFiles(net *topology.Network, dnsIPAddress string
|
|||
}
|
||||
}
|
||||
|
||||
// Until Consul DNS understands v2, simulate it.
|
||||
//
|
||||
// NOTE: this DNS is not quite what consul normally does. It's simpler
|
||||
// to simulate this format here.
|
||||
virtualNames := make(map[string][]string)
|
||||
for id, svcData := range cluster.Services {
|
||||
if len(svcData.VirtualIps) == 0 {
|
||||
continue
|
||||
}
|
||||
vips := svcData.VirtualIps
|
||||
|
||||
// <service>--<namespace>--<partition>.virtual.<domain>
|
||||
name := fmt.Sprintf("%s--%s--%s", id.Name, id.Namespace, id.Partition)
|
||||
virtualNames[name] = vips
|
||||
}
|
||||
|
||||
var (
|
||||
clusterDNSName = cluster.Name + "-consulcluster.lan"
|
||||
virtualDNSName = "virtual.consul"
|
||||
|
@ -132,7 +113,6 @@ func (g *Generator) writeCoreDNSFiles(net *topology.Network, dnsIPAddress string
|
|||
generateCoreDNSVirtualZoneFile(
|
||||
dnsIPAddress,
|
||||
virtualDNSName,
|
||||
virtualNames,
|
||||
),
|
||||
virtualZonefilePath,
|
||||
0644,
|
||||
|
@ -230,7 +210,6 @@ server IN A %s ; Consul server
|
|||
func generateCoreDNSVirtualZoneFile(
|
||||
dnsIPAddress string,
|
||||
virtualDNSName string,
|
||||
nameToAddr map[string][]string,
|
||||
) []byte {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(fmt.Sprintf(`
|
||||
|
@ -247,17 +226,5 @@ $ORIGIN %[1]s.
|
|||
ns IN A %[2]s ; self
|
||||
`, virtualDNSName, dnsIPAddress))
|
||||
|
||||
names := maps.Keys(nameToAddr)
|
||||
sort.Strings(names)
|
||||
|
||||
for _, name := range names {
|
||||
vips := nameToAddr[name]
|
||||
for _, vip := range vips {
|
||||
buf.WriteString(fmt.Sprintf(`
|
||||
%s IN A %s ; Consul server
|
||||
`, name, vip))
|
||||
}
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
|
|
@ -262,9 +262,6 @@ func (g *Generator) Generate(step Step) error {
|
|||
addImage("", node.Images.Consul)
|
||||
addImage("", node.Images.EnvoyConsulImage())
|
||||
addImage("", node.Images.LocalDataplaneImage())
|
||||
if node.NeedsTransparentProxy() {
|
||||
addImage("", node.Images.LocalDataplaneTProxyImage())
|
||||
}
|
||||
|
||||
if node.IsAgent() {
|
||||
addVolume(node.DockerName())
|
||||
|
|
|
@ -67,7 +67,7 @@ func (g *Generator) generateNodeContainers(
|
|||
}{
|
||||
terraformPod: pod,
|
||||
ImageResource: DockerImageResourceName(node.Images.Consul),
|
||||
HCL: g.generateAgentHCL(node, cluster.EnableV2 && node.IsServer(), cluster.EnableV2Tenancy && node.IsServer()),
|
||||
HCL: g.generateAgentHCL(node),
|
||||
EnterpriseLicense: g.license,
|
||||
}))
|
||||
}
|
||||
|
@ -125,11 +125,7 @@ func (g *Generator) generateNodeContainers(
|
|||
var img string
|
||||
if node.IsDataplane() {
|
||||
tmpl = tfAppDataplaneT
|
||||
if wrk.EnableTransparentProxy {
|
||||
img = DockerImageResourceName(node.Images.LocalDataplaneTProxyImage())
|
||||
} else {
|
||||
img = DockerImageResourceName(node.Images.LocalDataplaneImage())
|
||||
}
|
||||
img = DockerImageResourceName(node.Images.LocalDataplaneImage())
|
||||
} else {
|
||||
img = DockerImageResourceName(node.Images.EnvoyConsulImage())
|
||||
}
|
||||
|
|
|
@ -17,39 +17,20 @@ resource "docker_container" "{{.Node.DockerName}}-{{.Workload.ID.TFString}}-side
|
|||
read_only = true
|
||||
}
|
||||
|
||||
{{ if .Workload.EnableTransparentProxy }}
|
||||
capabilities {
|
||||
add = ["NET_ADMIN"]
|
||||
}
|
||||
entrypoint = [ "/bin/tproxy-startup.sh" ]
|
||||
{{ end }}
|
||||
|
||||
env = [
|
||||
"DP_CONSUL_ADDRESSES=server.{{.Node.Cluster}}-consulcluster.lan",
|
||||
{{ if .Node.IsV2 }}
|
||||
"DP_PROXY_ID={{.Workload.Workload}}",
|
||||
{{ if .Enterprise }}
|
||||
"DP_PROXY_NAMESPACE={{.Workload.ID.Namespace}}",
|
||||
"DP_PROXY_PARTITION={{.Workload.ID.Partition}}",
|
||||
{{ end }}
|
||||
{{ else }}
|
||||
"DP_SERVICE_NODE_NAME={{.Node.PodName}}",
|
||||
"DP_PROXY_SERVICE_ID={{.Workload.ID.Name}}-sidecar-proxy",
|
||||
{{ if .Enterprise }}
|
||||
"DP_SERVICE_NAMESPACE={{.Workload.ID.Namespace}}",
|
||||
"DP_SERVICE_PARTITION={{.Workload.ID.Partition}}",
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
{{ if .Token }}
|
||||
"DP_CREDENTIAL_TYPE=static",
|
||||
"DP_CREDENTIAL_STATIC_TOKEN={{.Token}}",
|
||||
{{ end }}
|
||||
|
||||
{{ if .Workload.EnableTransparentProxy }}
|
||||
"REDIRECT_TRAFFIC_ARGS=-exclude-inbound-port=19000",
|
||||
{{ end }}
|
||||
|
||||
// for demo purposes
|
||||
"DP_ENVOY_ADMIN_BIND_ADDRESS=0.0.0.0",
|
||||
"DP_ENVOY_ADMIN_BIND_PORT=19000",
|
||||
|
|
|
@ -12,147 +12,10 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
|
||||
"github.com/hashicorp/consul/testing/deployer/topology"
|
||||
)
|
||||
|
||||
func TestSprawl_CatalogV2(t *testing.T) {
|
||||
serversDC1 := newTopologyServerSet("dc1-server", 3, []string{"dc1", "wan"}, nil)
|
||||
|
||||
cfg := &topology.Config{
|
||||
Images: topology.Images{
|
||||
ConsulCE: "hashicorppreview/consul:1.17-dev",
|
||||
ConsulEnterprise: "hashicorppreview/consul-enterprise:1.17-dev",
|
||||
Dataplane: "hashicorppreview/consul-dataplane:1.3-dev",
|
||||
},
|
||||
Networks: []*topology.Network{
|
||||
{Name: "dc1"},
|
||||
{Name: "wan", Type: "wan"},
|
||||
},
|
||||
Clusters: []*topology.Cluster{
|
||||
{
|
||||
Enterprise: true,
|
||||
Name: "dc1",
|
||||
Nodes: topology.MergeSlices(serversDC1, []*topology.Node{
|
||||
{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Version: topology.NodeVersionV2,
|
||||
Name: "dc1-client1",
|
||||
Workloads: []*topology.Workload{
|
||||
{
|
||||
ID: topology.ID{Name: "ping"},
|
||||
Image: "rboyer/pingpong:latest",
|
||||
Port: 8080,
|
||||
EnvoyAdminPort: 19000,
|
||||
Command: []string{
|
||||
"-bind", "0.0.0.0:8080",
|
||||
"-dial", "127.0.0.1:9090",
|
||||
"-pong-chaos",
|
||||
"-dialfreq", "250ms",
|
||||
"-name", "ping",
|
||||
},
|
||||
Destinations: []*topology.Destination{{
|
||||
ID: topology.ID{Name: "pong"},
|
||||
LocalPort: 9090,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Version: topology.NodeVersionV2,
|
||||
Name: "dc1-client2",
|
||||
Workloads: []*topology.Workload{
|
||||
{
|
||||
ID: topology.ID{Name: "pong"},
|
||||
Image: "rboyer/pingpong:latest",
|
||||
Port: 8080,
|
||||
EnvoyAdminPort: 19000,
|
||||
Command: []string{
|
||||
"-bind", "0.0.0.0:8080",
|
||||
"-dial", "127.0.0.1:9090",
|
||||
"-pong-chaos",
|
||||
"-dialfreq", "250ms",
|
||||
"-name", "pong",
|
||||
},
|
||||
Destinations: []*topology.Destination{{
|
||||
ID: topology.ID{Name: "ping"},
|
||||
LocalPort: 9090,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
InitialResources: []*pbresource.Resource{
|
||||
sprawltest.MustSetResourceData(t, &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbmesh.HTTPRouteType,
|
||||
Name: "test-http-route",
|
||||
},
|
||||
}, &pbmesh.HTTPRoute{
|
||||
ParentRefs: []*pbmesh.ParentReference{{
|
||||
Ref: &pbresource.Reference{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Name: "test",
|
||||
},
|
||||
}},
|
||||
}),
|
||||
sprawltest.MustSetResourceData(t, &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbauth.TrafficPermissionsType,
|
||||
Name: "ping-perms",
|
||||
},
|
||||
}, &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: "ping",
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
Permissions: []*pbauth.Permission{{
|
||||
Sources: []*pbauth.Source{{
|
||||
IdentityName: "pong",
|
||||
}},
|
||||
}},
|
||||
}),
|
||||
sprawltest.MustSetResourceData(t, &pbresource.Resource{
|
||||
Id: &pbresource.ID{
|
||||
Type: pbauth.TrafficPermissionsType,
|
||||
Name: "pong-perms",
|
||||
},
|
||||
}, &pbauth.TrafficPermissions{
|
||||
Destination: &pbauth.Destination{
|
||||
IdentityName: "pong",
|
||||
},
|
||||
Action: pbauth.Action_ACTION_ALLOW,
|
||||
Permissions: []*pbauth.Permission{{
|
||||
Sources: []*pbauth.Source{{
|
||||
IdentityName: "ping",
|
||||
}},
|
||||
}},
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
sp := sprawltest.Launch(t, cfg)
|
||||
|
||||
for _, cluster := range sp.Topology().Clusters {
|
||||
leader, err := sp.Leader(cluster.Name)
|
||||
require.NoError(t, err)
|
||||
t.Logf("%s: leader = %s", cluster.Name, leader.ID())
|
||||
|
||||
followers, err := sp.Followers(cluster.Name)
|
||||
require.NoError(t, err)
|
||||
for _, f := range followers {
|
||||
t.Logf("%s: follower = %s", cluster.Name, f.ID())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSprawl(t *testing.T) {
|
||||
serversDC1 := newTopologyServerSet("dc1-server", 3, []string{"dc1", "wan"}, nil)
|
||||
serversDC2 := newTopologyServerSet("dc2-server", 3, []string{"dc2", "wan"}, nil)
|
||||
|
@ -201,7 +64,7 @@ func TestSprawl(t *testing.T) {
|
|||
"-dialfreq", "250ms",
|
||||
"-name", "ping",
|
||||
},
|
||||
Destinations: []*topology.Destination{{
|
||||
Upstreams: []*topology.Upstream{{
|
||||
ID: topology.ID{Name: "pong"},
|
||||
LocalPort: 9090,
|
||||
Peer: "peer-dc2-default",
|
||||
|
@ -253,32 +116,7 @@ func TestSprawl(t *testing.T) {
|
|||
"-dialfreq", "250ms",
|
||||
"-name", "pong",
|
||||
},
|
||||
Destinations: []*topology.Destination{{
|
||||
ID: topology.ID{Name: "ping"},
|
||||
LocalPort: 9090,
|
||||
Peer: "peer-dc1-default",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Kind: topology.NodeKindDataplane,
|
||||
Version: topology.NodeVersionV2,
|
||||
Name: "dc2-client3",
|
||||
Workloads: []*topology.Workload{
|
||||
{
|
||||
ID: topology.ID{Name: "pong"},
|
||||
Image: "rboyer/pingpong:latest",
|
||||
Port: 8080,
|
||||
EnvoyAdminPort: 19000,
|
||||
Command: []string{
|
||||
"-bind", "0.0.0.0:8080",
|
||||
"-dial", "127.0.0.1:9090",
|
||||
"-pong-chaos",
|
||||
"-dialfreq", "250ms",
|
||||
"-name", "pong",
|
||||
},
|
||||
Destinations: []*topology.Destination{{
|
||||
Upstreams: []*topology.Upstream{{
|
||||
ID: topology.ID{Name: "ping"},
|
||||
LocalPort: 9090,
|
||||
Peer: "peer-dc1-default",
|
||||
|
|
|
@ -17,9 +17,6 @@ import (
|
|||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
pbauth "github.com/hashicorp/consul/proto-public/pbauth/v2beta1"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/testing/deployer/util"
|
||||
)
|
||||
|
@ -133,22 +130,6 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology, testingID string)
|
|||
return nil, fmt.Errorf("cluster %q has no nodes", c.Name)
|
||||
}
|
||||
|
||||
if len(c.Services) == 0 { // always initialize this regardless of v2-ness, because we might late-enable it below
|
||||
c.Services = make(map[ID]*pbcatalog.Service)
|
||||
}
|
||||
|
||||
var implicitV2Services bool
|
||||
if len(c.Services) > 0 {
|
||||
c.EnableV2 = true
|
||||
for name, svc := range c.Services {
|
||||
if svc.Workloads != nil {
|
||||
return nil, fmt.Errorf("the workloads field for v2 service %q is not user settable", name)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
implicitV2Services = true
|
||||
}
|
||||
|
||||
if c.TLSVolumeName != "" {
|
||||
return nil, fmt.Errorf("user cannot specify the TLSVolumeName field")
|
||||
}
|
||||
|
@ -177,31 +158,17 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology, testingID string)
|
|||
addTenancy(ce.GetPartition(), ce.GetNamespace())
|
||||
}
|
||||
|
||||
if len(c.InitialResources) > 0 {
|
||||
c.EnableV2 = true
|
||||
}
|
||||
for _, res := range c.InitialResources {
|
||||
if res.Id.Tenancy == nil {
|
||||
res.Id.Tenancy = &pbresource.Tenancy{}
|
||||
}
|
||||
// TODO(peering/v2) prevent non-local peer resources
|
||||
res.Id.Tenancy.Partition = PartitionOrDefault(res.Id.Tenancy.Partition)
|
||||
if !util.IsTypePartitionScoped(res.Id.Type) {
|
||||
res.Id.Tenancy.Namespace = NamespaceOrDefault(res.Id.Tenancy.Namespace)
|
||||
}
|
||||
|
||||
switch {
|
||||
case util.EqualType(pbauth.ComputedTrafficPermissionsType, res.Id.GetType()),
|
||||
util.EqualType(pbauth.WorkloadIdentityType, res.Id.GetType()):
|
||||
fallthrough
|
||||
case util.EqualType(pbmesh.ComputedRoutesType, res.Id.GetType()),
|
||||
util.EqualType(pbmesh.ProxyStateTemplateType, res.Id.GetType()):
|
||||
fallthrough
|
||||
case util.EqualType(pbcatalog.HealthChecksType, res.Id.GetType()),
|
||||
util.EqualType(pbcatalog.HealthStatusType, res.Id.GetType()),
|
||||
util.EqualType(pbcatalog.NodeType, res.Id.GetType()),
|
||||
util.EqualType(pbcatalog.ServiceEndpointsType, res.Id.GetType()),
|
||||
util.EqualType(pbcatalog.WorkloadType, res.Id.GetType()):
|
||||
// TODO: if we reintroduce new resources for v1, allow them here
|
||||
if true {
|
||||
return nil, fmt.Errorf("you should not create a resource of type %q this way", util.TypeToString(res.Id.Type))
|
||||
}
|
||||
|
||||
|
@ -222,20 +189,6 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology, testingID string)
|
|||
return nil, fmt.Errorf("cluster %q node %q has invalid kind: %s", c.Name, n.Name, n.Kind)
|
||||
}
|
||||
|
||||
if n.Version == NodeVersionUnknown {
|
||||
n.Version = NodeVersionV1
|
||||
}
|
||||
switch n.Version {
|
||||
case NodeVersionV1:
|
||||
case NodeVersionV2:
|
||||
if n.Kind == NodeKindClient {
|
||||
return nil, fmt.Errorf("v2 does not support client agents at this time")
|
||||
}
|
||||
c.EnableV2 = true
|
||||
default:
|
||||
return nil, fmt.Errorf("cluster %q node %q has invalid version: %s", c.Name, n.Name, n.Version)
|
||||
}
|
||||
|
||||
n.Partition = PartitionOrDefault(n.Partition)
|
||||
if !IsValidLabel(n.Partition) {
|
||||
return nil, fmt.Errorf("node partition is not valid: %s", n.Partition)
|
||||
|
@ -318,12 +271,6 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology, testingID string)
|
|||
return nil, fmt.Errorf("cluster %q node %q has more than one public address", c.Name, n.Name)
|
||||
}
|
||||
|
||||
if len(n.Services) > 0 {
|
||||
logger.Warn("please use Node.Workloads instead of Node.Services")
|
||||
n.Workloads = append(n.Workloads, n.Services...)
|
||||
n.Services = nil
|
||||
}
|
||||
|
||||
if n.IsDataplane() && len(n.Workloads) > 1 {
|
||||
// Our use of consul-dataplane here is supposed to mimic that
|
||||
// of consul-k8s, which ultimately has one IP per Service, so
|
||||
|
@ -344,10 +291,6 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology, testingID string)
|
|||
|
||||
// Denormalize
|
||||
wrk.Node = n
|
||||
wrk.NodeVersion = n.Version
|
||||
if n.IsV2() {
|
||||
wrk.Workload = wrk.ID.Name + "-" + n.Name
|
||||
}
|
||||
|
||||
if !IsValidLabel(wrk.ID.Partition) {
|
||||
return nil, fmt.Errorf("service partition is not valid: %s", wrk.ID.Partition)
|
||||
|
@ -404,136 +347,42 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology, testingID string)
|
|||
// return nil, fmt.Errorf("service has invalid protocol: %s", wrk.Protocol)
|
||||
// }
|
||||
|
||||
defaultDestination := func(dest *Destination) error {
|
||||
defaultUpstream := func(us *Upstream) error {
|
||||
// Default to that of the enclosing service.
|
||||
if dest.Peer == "" {
|
||||
if dest.ID.Partition == "" {
|
||||
dest.ID.Partition = wrk.ID.Partition
|
||||
if us.Peer == "" {
|
||||
if us.ID.Partition == "" {
|
||||
us.ID.Partition = wrk.ID.Partition
|
||||
}
|
||||
if dest.ID.Namespace == "" {
|
||||
dest.ID.Namespace = wrk.ID.Namespace
|
||||
if us.ID.Namespace == "" {
|
||||
us.ID.Namespace = wrk.ID.Namespace
|
||||
}
|
||||
} else {
|
||||
if dest.ID.Partition != "" {
|
||||
dest.ID.Partition = "" // irrelevant here; we'll set it to the value of the OTHER side for plumbing purposes in tests
|
||||
if us.ID.Partition != "" {
|
||||
us.ID.Partition = "" // irrelevant here; we'll set it to the value of the OTHER side for plumbing purposes in tests
|
||||
}
|
||||
dest.ID.Namespace = NamespaceOrDefault(dest.ID.Namespace)
|
||||
foundPeerNames[dest.Peer] = struct{}{}
|
||||
us.ID.Namespace = NamespaceOrDefault(us.ID.Namespace)
|
||||
foundPeerNames[us.Peer] = struct{}{}
|
||||
}
|
||||
|
||||
addTenancy(dest.ID.Partition, dest.ID.Namespace)
|
||||
addTenancy(us.ID.Partition, us.ID.Namespace)
|
||||
|
||||
if dest.Implied {
|
||||
if dest.PortName == "" {
|
||||
return fmt.Errorf("implicit destinations must use port names in v2")
|
||||
}
|
||||
} else {
|
||||
if dest.LocalAddress == "" {
|
||||
// v1 defaults to 127.0.0.1 but v2 does not. Safe to do this generally though.
|
||||
dest.LocalAddress = "127.0.0.1"
|
||||
}
|
||||
if dest.PortName != "" && n.IsV1() {
|
||||
return fmt.Errorf("explicit destinations cannot use port names in v1")
|
||||
}
|
||||
if dest.PortName == "" && n.IsV2() {
|
||||
// Assume this is a v1->v2 conversion and name it.
|
||||
dest.PortName = V1DefaultPortName
|
||||
}
|
||||
if us.LocalAddress == "" {
|
||||
// v1 consul code defaults this to 127.0.0.1, but safer to not rely upon that.
|
||||
us.LocalAddress = "127.0.0.1"
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, dest := range wrk.Destinations {
|
||||
if err := defaultDestination(dest); err != nil {
|
||||
for _, us := range wrk.Upstreams {
|
||||
if err := defaultUpstream(us); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if n.IsV2() {
|
||||
for _, dest := range wrk.ImpliedDestinations {
|
||||
dest.Implied = true
|
||||
if err := defaultDestination(dest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if len(wrk.ImpliedDestinations) > 0 {
|
||||
return nil, fmt.Errorf("v1 does not support implied destinations yet")
|
||||
}
|
||||
}
|
||||
|
||||
if err := wrk.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("cluster %q node %q service %q is not valid: %w", c.Name, n.Name, wrk.ID.String(), err)
|
||||
}
|
||||
|
||||
if wrk.EnableTransparentProxy && !n.IsDataplane() {
|
||||
return nil, fmt.Errorf("cannot enable tproxy on a non-dataplane node")
|
||||
}
|
||||
|
||||
if n.IsV2() {
|
||||
if implicitV2Services {
|
||||
wrk.V2Services = []string{wrk.ID.Name}
|
||||
|
||||
var svcPorts []*pbcatalog.ServicePort
|
||||
for name, cfg := range wrk.Ports {
|
||||
svcPorts = append(svcPorts, &pbcatalog.ServicePort{
|
||||
TargetPort: name,
|
||||
Protocol: cfg.ActualProtocol,
|
||||
})
|
||||
}
|
||||
sort.Slice(svcPorts, func(i, j int) bool {
|
||||
a, b := svcPorts[i], svcPorts[j]
|
||||
if a.TargetPort < b.TargetPort {
|
||||
return true
|
||||
} else if a.TargetPort > b.TargetPort {
|
||||
return false
|
||||
}
|
||||
return a.Protocol < b.Protocol
|
||||
})
|
||||
|
||||
v2svc := &pbcatalog.Service{
|
||||
Workloads: &pbcatalog.WorkloadSelector{},
|
||||
Ports: svcPorts,
|
||||
}
|
||||
|
||||
prev, ok := c.Services[wrk.ID]
|
||||
if !ok {
|
||||
c.Services[wrk.ID] = v2svc
|
||||
prev = v2svc
|
||||
}
|
||||
if prev.Workloads == nil {
|
||||
prev.Workloads = &pbcatalog.WorkloadSelector{}
|
||||
}
|
||||
prev.Workloads.Names = append(prev.Workloads.Names, wrk.Workload)
|
||||
|
||||
} else {
|
||||
for _, name := range wrk.V2Services {
|
||||
v2ID := NewServiceID(name, wrk.ID.Namespace, wrk.ID.Partition)
|
||||
|
||||
v2svc, ok := c.Services[v2ID]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cluster %q node %q service %q has a v2 service reference that does not exist %q",
|
||||
c.Name, n.Name, wrk.ID.String(), name)
|
||||
}
|
||||
if v2svc.Workloads == nil {
|
||||
v2svc.Workloads = &pbcatalog.WorkloadSelector{}
|
||||
}
|
||||
v2svc.Workloads.Names = append(v2svc.Workloads.Names, wrk.Workload)
|
||||
}
|
||||
}
|
||||
|
||||
if wrk.WorkloadIdentity == "" {
|
||||
wrk.WorkloadIdentity = wrk.ID.Name
|
||||
}
|
||||
} else {
|
||||
if len(wrk.V2Services) > 0 {
|
||||
return nil, fmt.Errorf("cannot specify v2 services for v1")
|
||||
}
|
||||
if wrk.WorkloadIdentity != "" {
|
||||
return nil, fmt.Errorf("cannot specify workload identities for v1")
|
||||
}
|
||||
}
|
||||
}
|
||||
return foundPeerNames, nil
|
||||
}
|
||||
|
@ -553,53 +402,6 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology, testingID string)
|
|||
maps.Copy(foundPeerNames, peerNames)
|
||||
}
|
||||
|
||||
// Default anything in the toplevel services map.
|
||||
for _, svc := range c.Services {
|
||||
for _, port := range svc.Ports {
|
||||
if port.Protocol == pbcatalog.Protocol_PROTOCOL_UNSPECIFIED {
|
||||
port.Protocol = pbcatalog.Protocol_PROTOCOL_TCP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := assignVirtualIPs(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c.EnableV2 {
|
||||
// Populate the VirtualPort field on all destinations.
|
||||
for _, n := range c.Nodes {
|
||||
for _, wrk := range n.Workloads {
|
||||
for _, dest := range wrk.ImpliedDestinations {
|
||||
res, ok := c.Services[dest.ID]
|
||||
if ok {
|
||||
for _, sp := range res.Ports {
|
||||
if sp.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
|
||||
continue
|
||||
}
|
||||
if sp.MatchesPortId(dest.PortName) {
|
||||
dest.VirtualPort = sp.VirtualPort
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, dest := range wrk.Destinations {
|
||||
res, ok := c.Services[dest.ID]
|
||||
if ok {
|
||||
for _, sp := range res.Ports {
|
||||
if sp.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
|
||||
continue
|
||||
}
|
||||
if sp.MatchesPortId(dest.PortName) {
|
||||
dest.VirtualPort = sp.VirtualPort
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Explode this into the explicit list based on stray references made.
|
||||
c.Partitions = nil
|
||||
for ap, nsMap := range tenancies {
|
||||
|
@ -723,40 +525,25 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology, testingID string)
|
|||
}
|
||||
}
|
||||
|
||||
// after we decoded the peering stuff, we can fill in some computed data in the destinations
|
||||
// after we decoded the peering stuff, we can fill in some computed data in the upstreams
|
||||
for _, c := range clusters {
|
||||
c.Peerings = clusteredPeerings[c.Name]
|
||||
for _, n := range c.Nodes {
|
||||
for _, wrk := range n.Workloads {
|
||||
for _, dest := range wrk.Destinations {
|
||||
if dest.Peer == "" {
|
||||
dest.Cluster = c.Name
|
||||
dest.Peering = nil
|
||||
for _, us := range wrk.Upstreams {
|
||||
if us.Peer == "" {
|
||||
us.Cluster = c.Name
|
||||
us.Peering = nil
|
||||
continue
|
||||
}
|
||||
remotePeer, ok := c.Peerings[dest.Peer]
|
||||
remotePeer, ok := c.Peerings[us.Peer]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not possible")
|
||||
}
|
||||
dest.Cluster = remotePeer.Link.Name
|
||||
dest.Peering = remotePeer.Link
|
||||
us.Cluster = remotePeer.Link.Name
|
||||
us.Peering = remotePeer.Link
|
||||
// this helps in generating fortio assertions; otherwise field is ignored
|
||||
dest.ID.Partition = remotePeer.Link.Partition
|
||||
}
|
||||
for _, dest := range wrk.ImpliedDestinations {
|
||||
if dest.Peer == "" {
|
||||
dest.Cluster = c.Name
|
||||
dest.Peering = nil
|
||||
continue
|
||||
}
|
||||
remotePeer, ok := c.Peerings[dest.Peer]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not possible")
|
||||
}
|
||||
dest.Cluster = remotePeer.Link.Name
|
||||
dest.Peering = remotePeer.Link
|
||||
// this helps in generating fortio assertions; otherwise field is ignored
|
||||
dest.ID.Partition = remotePeer.Link.Partition
|
||||
us.ID.Partition = remotePeer.Link.Partition
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -825,51 +612,6 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology, testingID string)
|
|||
return t, nil
|
||||
}
|
||||
|
||||
func assignVirtualIPs(c *Cluster) error {
|
||||
lastVIPIndex := 1
|
||||
for _, svcData := range c.Services {
|
||||
lastVIPIndex++
|
||||
if lastVIPIndex > 250 {
|
||||
return fmt.Errorf("too many ips using this approach to VIPs")
|
||||
}
|
||||
svcData.VirtualIps = []string{
|
||||
fmt.Sprintf("10.244.0.%d", lastVIPIndex),
|
||||
}
|
||||
|
||||
// populate virtual ports where we forgot them
|
||||
var (
|
||||
usedPorts = make(map[uint32]struct{})
|
||||
next = uint32(8080)
|
||||
)
|
||||
for _, sp := range svcData.Ports {
|
||||
if sp.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
|
||||
continue
|
||||
}
|
||||
if sp.VirtualPort > 0 {
|
||||
usedPorts[sp.VirtualPort] = struct{}{}
|
||||
}
|
||||
}
|
||||
for _, sp := range svcData.Ports {
|
||||
if sp.Protocol == pbcatalog.Protocol_PROTOCOL_MESH {
|
||||
continue
|
||||
}
|
||||
if sp.VirtualPort > 0 {
|
||||
continue
|
||||
}
|
||||
RETRY:
|
||||
attempt := next
|
||||
next++
|
||||
_, used := usedPorts[attempt]
|
||||
if used {
|
||||
goto RETRY
|
||||
}
|
||||
usedPorts[attempt] = struct{}{}
|
||||
sp.VirtualPort = attempt
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const permutedWarning = "use the disabled node kind if you want to ignore a node"
|
||||
|
||||
func inheritAndValidateNodes(
|
||||
|
@ -893,7 +635,6 @@ func inheritAndValidateNodes(
|
|||
}
|
||||
|
||||
if currNode.Node.Kind != node.Kind ||
|
||||
currNode.Node.Version != node.Version ||
|
||||
currNode.Node.Partition != node.Partition ||
|
||||
currNode.Node.Name != node.Name ||
|
||||
currNode.Node.Index != node.Index ||
|
||||
|
@ -930,7 +671,6 @@ func inheritAndValidateNodes(
|
|||
|
||||
if currWrk.ID != wrk.ID ||
|
||||
currWrk.Port != wrk.Port ||
|
||||
!maps.Equal(currWrk.Ports, wrk.Ports) ||
|
||||
currWrk.EnvoyAdminPort != wrk.EnvoyAdminPort ||
|
||||
currWrk.EnvoyPublicListenerPort != wrk.EnvoyPublicListenerPort ||
|
||||
isSame(currWrk.Command, wrk.Command) != nil ||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -39,5 +39,5 @@ func NewServiceID(name, namespace, partition string) ID {
|
|||
return NewID(name, namespace, partition)
|
||||
}
|
||||
|
||||
// Deprecated: Destination
|
||||
type Upstream = Destination
|
||||
// Deprecated:
|
||||
type Destination = Upstream
|
||||
|
|
|
@ -10,24 +10,16 @@ import (
|
|||
)
|
||||
|
||||
// ComputeRelationships will analyze a full topology and generate all of the
|
||||
// caller/destination information for all of them.
|
||||
// caller/upstream information for all of them.
|
||||
func (t *Topology) ComputeRelationships() []Relationship {
|
||||
var out []Relationship
|
||||
for _, cluster := range t.Clusters {
|
||||
for _, n := range cluster.Nodes {
|
||||
for _, w := range n.Workloads {
|
||||
for _, dest := range w.Destinations {
|
||||
for _, us := range w.Upstreams {
|
||||
out = append(out, Relationship{
|
||||
Caller: w,
|
||||
Destination: dest,
|
||||
Upstream: dest,
|
||||
})
|
||||
}
|
||||
for _, dest := range w.ImpliedDestinations {
|
||||
out = append(out, Relationship{
|
||||
Caller: w,
|
||||
Destination: dest,
|
||||
Upstream: dest,
|
||||
Caller: w,
|
||||
Upstream: us,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -43,18 +35,14 @@ func RenderRelationships(ships []Relationship) string {
|
|||
w := tabwriter.NewWriter(&buf, 0, 0, 3, ' ', tabwriter.Debug)
|
||||
fmt.Fprintf(w, "CALLER\tnode\tservice\tport\tDEST\tservice\t\n")
|
||||
for _, r := range ships {
|
||||
suffix := ""
|
||||
if r.Destination.Implied {
|
||||
suffix = " (implied)"
|
||||
}
|
||||
fmt.Fprintf(w,
|
||||
"%s\t%s\t%s\t%d\t%s\t%s\t\n",
|
||||
r.callingCluster(),
|
||||
r.Caller.Node.ID().String(),
|
||||
r.Caller.ID.String(),
|
||||
r.Destination.LocalPort,
|
||||
r.destinationCluster(),
|
||||
r.Destination.ID.String()+suffix,
|
||||
r.Upstream.LocalPort,
|
||||
r.upstreamCluster(),
|
||||
r.Upstream.ID.String(),
|
||||
)
|
||||
}
|
||||
fmt.Fprintf(w, "\t\t\t\t\t\t\n")
|
||||
|
@ -64,27 +52,19 @@ func RenderRelationships(ships []Relationship) string {
|
|||
}
|
||||
|
||||
type Relationship struct {
|
||||
Caller *Workload
|
||||
Destination *Destination
|
||||
|
||||
// Deprecated: Destination
|
||||
Upstream *Destination
|
||||
Caller *Workload
|
||||
Upstream *Upstream
|
||||
}
|
||||
|
||||
func (r Relationship) String() string {
|
||||
suffix := ""
|
||||
if r.Destination.PortName != "" {
|
||||
suffix = " port " + r.Destination.PortName
|
||||
}
|
||||
return fmt.Sprintf(
|
||||
"%s on %s in %s via :%d => %s in %s%s",
|
||||
"%s on %s in %s via :%d => %s in %s",
|
||||
r.Caller.ID.String(),
|
||||
r.Caller.Node.ID().String(),
|
||||
r.callingCluster(),
|
||||
r.Destination.LocalPort,
|
||||
r.Destination.ID.String(),
|
||||
r.destinationCluster(),
|
||||
suffix,
|
||||
r.Upstream.LocalPort,
|
||||
r.Upstream.ID.String(),
|
||||
r.upstreamCluster(),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -92,6 +72,6 @@ func (r Relationship) callingCluster() string {
|
|||
return r.Caller.Node.Cluster
|
||||
}
|
||||
|
||||
func (r Relationship) destinationCluster() string {
|
||||
return r.Destination.Cluster
|
||||
func (r Relationship) upstreamCluster() string {
|
||||
return r.Upstream.Cluster
|
||||
}
|
||||
|
|
|
@ -10,17 +10,11 @@ import (
|
|||
"net/netip"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
const (
|
||||
V1DefaultPortName = "legacy"
|
||||
)
|
||||
|
||||
type Topology struct {
|
||||
ID string
|
||||
|
||||
|
@ -252,14 +246,6 @@ type Cluster struct {
|
|||
// components.
|
||||
Enterprise bool `json:",omitempty"`
|
||||
|
||||
// Services is a forward declaration of V2 services. This goes in hand with
|
||||
// the V2Services field on the Service (instance) struct.
|
||||
//
|
||||
// Use of this is optional. If you elect not to use it, then v2 Services
|
||||
// definitions are inferred from the list of service instances defined on
|
||||
// the nodes in this cluster.
|
||||
Services map[ID]*pbcatalog.Service `json:"omitempty"`
|
||||
|
||||
// Nodes is the definition of the nodes (agent-less and agent-ful).
|
||||
Nodes []*Node
|
||||
|
||||
|
@ -295,14 +281,6 @@ type Cluster struct {
|
|||
// Denormalized during compile.
|
||||
Peerings map[string]*PeerCluster `json:",omitempty"`
|
||||
|
||||
// EnableV2 activates V2 on the servers. If any node in the cluster needs
|
||||
// V2 this will be turned on automatically.
|
||||
EnableV2 bool `json:",omitempty"`
|
||||
|
||||
// EnableV2Tenancy activates V2 tenancy on the servers. If not enabled,
|
||||
// V2 resources are bridged to V1 tenancy counterparts.
|
||||
EnableV2Tenancy bool `json:",omitempty"`
|
||||
|
||||
// Segments is a map of network segment name and the ports
|
||||
Segments map[string]int
|
||||
|
||||
|
@ -505,14 +483,6 @@ const (
|
|||
NodeKindDataplane NodeKind = "dataplane"
|
||||
)
|
||||
|
||||
type NodeVersion string
|
||||
|
||||
const (
|
||||
NodeVersionUnknown NodeVersion = ""
|
||||
NodeVersionV1 NodeVersion = "v1"
|
||||
NodeVersionV2 NodeVersion = "v2"
|
||||
)
|
||||
|
||||
type NetworkSegment struct {
|
||||
Name string
|
||||
Port int
|
||||
|
@ -521,7 +491,6 @@ type NetworkSegment struct {
|
|||
// TODO: rename pod
|
||||
type Node struct {
|
||||
Kind NodeKind
|
||||
Version NodeVersion
|
||||
Partition string // will be not empty
|
||||
Name string // logical name
|
||||
|
||||
|
@ -534,8 +503,6 @@ type Node struct {
|
|||
|
||||
Addresses []*Address
|
||||
Workloads []*Workload
|
||||
// Deprecated: use Workloads
|
||||
Services []*Workload
|
||||
|
||||
// denormalized at topology compile
|
||||
Cluster string
|
||||
|
@ -685,14 +652,6 @@ func (n *Node) PublicProxyPort() int {
|
|||
panic("node has no public network")
|
||||
}
|
||||
|
||||
func (n *Node) IsV2() bool {
|
||||
return n.Version == NodeVersionV2
|
||||
}
|
||||
|
||||
func (n *Node) IsV1() bool {
|
||||
return !n.IsV2()
|
||||
}
|
||||
|
||||
func (n *Node) IsServer() bool {
|
||||
return n.Kind == NodeKindServer
|
||||
}
|
||||
|
@ -725,15 +684,6 @@ func (n *Node) SortedWorkloads() []*Workload {
|
|||
return out
|
||||
}
|
||||
|
||||
func (n *Node) NeedsTransparentProxy() bool {
|
||||
for _, svc := range n.Workloads {
|
||||
if svc.EnableTransparentProxy {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// DigestExposedPorts returns true if it was changed.
|
||||
func (n *Node) DigestExposedPorts(ports map[int]int) bool {
|
||||
if reflect.DeepEqual(n.usedPorts, ports) {
|
||||
|
@ -768,63 +718,12 @@ func (n *Node) WorkloadByID(id ID) *Workload {
|
|||
panic("workload not found: " + id.String())
|
||||
}
|
||||
|
||||
// Protocol is a convenience function to use when authoring topology configs.
|
||||
func Protocol(s string) (pbcatalog.Protocol, bool) {
|
||||
switch strings.ToLower(s) {
|
||||
case "tcp":
|
||||
return pbcatalog.Protocol_PROTOCOL_TCP, true
|
||||
case "http":
|
||||
return pbcatalog.Protocol_PROTOCOL_HTTP, true
|
||||
case "http2":
|
||||
return pbcatalog.Protocol_PROTOCOL_HTTP2, true
|
||||
case "grpc":
|
||||
return pbcatalog.Protocol_PROTOCOL_GRPC, true
|
||||
case "mesh":
|
||||
return pbcatalog.Protocol_PROTOCOL_MESH, true
|
||||
default:
|
||||
return pbcatalog.Protocol_PROTOCOL_UNSPECIFIED, false
|
||||
}
|
||||
}
|
||||
|
||||
type Port struct {
|
||||
Number int
|
||||
Protocol string `json:",omitempty"`
|
||||
|
||||
// denormalized at topology compile
|
||||
ActualProtocol pbcatalog.Protocol `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Workload struct {
|
||||
ID ID
|
||||
Image string
|
||||
|
||||
// Port is the v1 single-port of this service.
|
||||
Port int `json:",omitempty"`
|
||||
|
||||
// Ports is the v2 multi-port list for this service.
|
||||
//
|
||||
// This only applies for multi-port (v2).
|
||||
Ports map[string]*Port `json:",omitempty"`
|
||||
|
||||
// V2Services contains service names (which are merged with the tenancy
|
||||
// info from ID) to resolve services in the Services slice in the Cluster
|
||||
// definition.
|
||||
//
|
||||
// If omitted it is inferred that the ID.Name field is the singular service
|
||||
// for this workload.
|
||||
//
|
||||
// This only applies for multi-port (v2).
|
||||
V2Services []string `json:",omitempty"`
|
||||
|
||||
// WorkloadIdentity contains named WorkloadIdentity to assign to this
|
||||
// workload.
|
||||
//
|
||||
// If omitted it is inferred that the ID.Name field is the singular
|
||||
// identity for this workload.
|
||||
//
|
||||
// This only applies for multi-port (v2).
|
||||
WorkloadIdentity string `json:",omitempty"`
|
||||
|
||||
Disabled bool `json:",omitempty"` // TODO
|
||||
|
||||
// TODO: expose extra port here?
|
||||
|
@ -842,55 +741,19 @@ type Workload struct {
|
|||
Command []string `json:",omitempty"` // optional
|
||||
Env []string `json:",omitempty"` // optional
|
||||
|
||||
EnableTransparentProxy bool `json:",omitempty"`
|
||||
DisableServiceMesh bool `json:",omitempty"`
|
||||
IsMeshGateway bool `json:",omitempty"`
|
||||
Destinations []*Destination `json:",omitempty"`
|
||||
ImpliedDestinations []*Destination `json:",omitempty"`
|
||||
|
||||
// Deprecated: Destinations
|
||||
Upstreams []*Destination `json:",omitempty"`
|
||||
// Deprecated: ImpliedDestinations
|
||||
ImpliedUpstreams []*Destination `json:",omitempty"`
|
||||
DisableServiceMesh bool `json:",omitempty"`
|
||||
IsMeshGateway bool `json:",omitempty"`
|
||||
Upstreams []*Upstream `json:",omitempty"`
|
||||
|
||||
// denormalized at topology compile
|
||||
Node *Node `json:"-"`
|
||||
NodeVersion NodeVersion `json:"-"`
|
||||
Workload string `json:"-"`
|
||||
Node *Node `json:"-"`
|
||||
}
|
||||
|
||||
func (w *Workload) ExposedPort(name string) int {
|
||||
func (w *Workload) ExposedPort() int {
|
||||
if w.Node == nil {
|
||||
panic("ExposedPort cannot be called until after Compile")
|
||||
}
|
||||
|
||||
var internalPort int
|
||||
if name == "" {
|
||||
internalPort = w.Port
|
||||
} else {
|
||||
port, ok := w.Ports[name]
|
||||
if !ok {
|
||||
panic("port with name " + name + " not present on service")
|
||||
}
|
||||
internalPort = port.Number
|
||||
}
|
||||
|
||||
return w.Node.ExposedPort(internalPort)
|
||||
}
|
||||
|
||||
func (w *Workload) PortOrDefault(name string) int {
|
||||
if len(w.Ports) > 0 {
|
||||
return w.Ports[name].Number
|
||||
}
|
||||
return w.Port
|
||||
}
|
||||
|
||||
func (w *Workload) IsV2() bool {
|
||||
return w.NodeVersion == NodeVersionV2
|
||||
}
|
||||
|
||||
func (w *Workload) IsV1() bool {
|
||||
return !w.IsV2()
|
||||
return w.Node.ExposedPort(w.Port)
|
||||
}
|
||||
|
||||
func (w *Workload) inheritFromExisting(existing *Workload) {
|
||||
|
@ -899,19 +762,7 @@ func (w *Workload) inheritFromExisting(existing *Workload) {
|
|||
|
||||
func (w *Workload) ports() []int {
|
||||
var out []int
|
||||
if len(w.Ports) > 0 {
|
||||
seen := make(map[int]struct{})
|
||||
for _, port := range w.Ports {
|
||||
if port == nil {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[port.Number]; !ok {
|
||||
// It's totally fine to expose the same port twice in a workload.
|
||||
seen[port.Number] = struct{}{}
|
||||
out = append(out, port.Number)
|
||||
}
|
||||
}
|
||||
} else if w.Port > 0 {
|
||||
if w.Port > 0 {
|
||||
out = append(out, w.Port)
|
||||
}
|
||||
if w.EnvoyAdminPort > 0 {
|
||||
|
@ -920,9 +771,9 @@ func (w *Workload) ports() []int {
|
|||
if w.EnvoyPublicListenerPort > 0 {
|
||||
out = append(out, w.EnvoyPublicListenerPort)
|
||||
}
|
||||
for _, dest := range w.Destinations {
|
||||
if dest.LocalPort > 0 {
|
||||
out = append(out, dest.LocalPort)
|
||||
for _, us := range w.Upstreams {
|
||||
if us.LocalPort > 0 {
|
||||
out = append(out, us.LocalPort)
|
||||
}
|
||||
}
|
||||
return out
|
||||
|
@ -950,78 +801,14 @@ func (w *Workload) Validate() error {
|
|||
return fmt.Errorf("service image is required")
|
||||
}
|
||||
|
||||
if len(w.Upstreams) > 0 {
|
||||
w.Destinations = append(w.Destinations, w.Upstreams...)
|
||||
w.Upstreams = nil
|
||||
}
|
||||
if len(w.ImpliedUpstreams) > 0 {
|
||||
w.ImpliedDestinations = append(w.ImpliedDestinations, w.ImpliedUpstreams...)
|
||||
w.ImpliedUpstreams = nil
|
||||
}
|
||||
|
||||
if w.IsV2() {
|
||||
if len(w.Ports) > 0 && w.Port > 0 {
|
||||
return fmt.Errorf("cannot specify both singleport and multiport on service in v2")
|
||||
}
|
||||
if w.Port > 0 {
|
||||
w.Ports = map[string]*Port{
|
||||
V1DefaultPortName: {
|
||||
Number: w.Port,
|
||||
Protocol: "tcp",
|
||||
},
|
||||
}
|
||||
w.Port = 0
|
||||
}
|
||||
if w.Ports == nil {
|
||||
w.Ports = make(map[string]*Port)
|
||||
}
|
||||
|
||||
if !w.DisableServiceMesh && w.EnvoyPublicListenerPort > 0 {
|
||||
w.Ports["mesh"] = &Port{
|
||||
Number: w.EnvoyPublicListenerPort,
|
||||
Protocol: "mesh",
|
||||
}
|
||||
}
|
||||
|
||||
for name, port := range w.Ports {
|
||||
if port == nil {
|
||||
return fmt.Errorf("cannot be nil")
|
||||
}
|
||||
if port.Number <= 0 {
|
||||
return fmt.Errorf("service has invalid port number %q", name)
|
||||
}
|
||||
if port.ActualProtocol != pbcatalog.Protocol_PROTOCOL_UNSPECIFIED {
|
||||
return fmt.Errorf("user cannot specify ActualProtocol field")
|
||||
}
|
||||
|
||||
proto, valid := Protocol(port.Protocol)
|
||||
if !valid {
|
||||
return fmt.Errorf("service has invalid port protocol %q", port.Protocol)
|
||||
}
|
||||
port.ActualProtocol = proto
|
||||
}
|
||||
} else {
|
||||
if len(w.Ports) > 0 {
|
||||
return fmt.Errorf("cannot specify multiport on service in v1")
|
||||
}
|
||||
if w.Port <= 0 {
|
||||
return fmt.Errorf("service has invalid port")
|
||||
}
|
||||
if w.EnableTransparentProxy {
|
||||
return fmt.Errorf("tproxy does not work with v1 yet")
|
||||
}
|
||||
if w.Port <= 0 {
|
||||
return fmt.Errorf("service has invalid port")
|
||||
}
|
||||
if w.DisableServiceMesh && w.IsMeshGateway {
|
||||
return fmt.Errorf("cannot disable service mesh and still run a mesh gateway")
|
||||
}
|
||||
if w.DisableServiceMesh && len(w.Destinations) > 0 {
|
||||
return fmt.Errorf("cannot disable service mesh and configure destinations")
|
||||
}
|
||||
if w.DisableServiceMesh && len(w.ImpliedDestinations) > 0 {
|
||||
return fmt.Errorf("cannot disable service mesh and configure implied destinations")
|
||||
}
|
||||
if w.DisableServiceMesh && w.EnableTransparentProxy {
|
||||
return fmt.Errorf("cannot disable service mesh and activate tproxy")
|
||||
if w.DisableServiceMesh && len(w.Upstreams) > 0 {
|
||||
return fmt.Errorf("cannot disable service mesh and configure upstreams")
|
||||
}
|
||||
|
||||
if w.DisableServiceMesh {
|
||||
|
@ -1034,59 +821,36 @@ func (w *Workload) Validate() error {
|
|||
}
|
||||
}
|
||||
|
||||
for _, dest := range w.Destinations {
|
||||
if dest.ID.Name == "" {
|
||||
return fmt.Errorf("destination service name is required")
|
||||
for _, us := range w.Upstreams {
|
||||
if us.ID.Name == "" {
|
||||
return fmt.Errorf("upstream service name is required")
|
||||
}
|
||||
if dest.LocalPort <= 0 {
|
||||
return fmt.Errorf("destination local port is required")
|
||||
if us.LocalPort <= 0 {
|
||||
return fmt.Errorf("upstream local port is required")
|
||||
}
|
||||
|
||||
if dest.LocalAddress != "" {
|
||||
ip := net.ParseIP(dest.LocalAddress)
|
||||
if us.LocalAddress != "" {
|
||||
ip := net.ParseIP(us.LocalAddress)
|
||||
if ip == nil {
|
||||
return fmt.Errorf("destination local address is invalid: %s", dest.LocalAddress)
|
||||
return fmt.Errorf("upstream local address is invalid: %s", us.LocalAddress)
|
||||
}
|
||||
}
|
||||
if dest.Implied {
|
||||
return fmt.Errorf("implied field cannot be set")
|
||||
}
|
||||
}
|
||||
for _, dest := range w.ImpliedDestinations {
|
||||
if dest.ID.Name == "" {
|
||||
return fmt.Errorf("implied destination service name is required")
|
||||
}
|
||||
if dest.LocalPort > 0 {
|
||||
return fmt.Errorf("implied destination local port cannot be set")
|
||||
}
|
||||
if dest.LocalAddress != "" {
|
||||
return fmt.Errorf("implied destination local address cannot be set")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type Destination struct {
|
||||
type Upstream struct {
|
||||
ID ID
|
||||
LocalAddress string `json:",omitempty"` // defaults to 127.0.0.1
|
||||
LocalPort int
|
||||
Peer string `json:",omitempty"`
|
||||
|
||||
// PortName is the port of this Destination to route traffic to.
|
||||
//
|
||||
// For more details on potential values of this field, see documentation
|
||||
// for Service.ServicePort.
|
||||
//
|
||||
// This only applies for multi-port (v2).
|
||||
PortName string `json:",omitempty"`
|
||||
// TODO: what about mesh gateway mode overrides?
|
||||
|
||||
// computed at topology compile
|
||||
Cluster string `json:",omitempty"`
|
||||
Peering *PeerCluster `json:",omitempty"` // this will have Link!=nil
|
||||
Implied bool `json:",omitempty"`
|
||||
VirtualPort uint32 `json:",omitempty"`
|
||||
Cluster string `json:",omitempty"`
|
||||
Peering *PeerCluster `json:",omitempty"` // this will have Link!=nil
|
||||
}
|
||||
|
||||
type Peering struct {
|
||||
|
|
Loading…
Reference in New Issue