2023-08-11 13:12:13 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: BUSL-1.1
|
|
|
|
|
2023-11-02 19:25:48 +00:00
|
|
|
package topoutil
|
2023-07-18 23:41:30 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"regexp"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/hashicorp/consul/api"
|
2023-11-02 19:25:48 +00:00
|
|
|
"github.com/hashicorp/consul/proto-public/pbresource"
|
2023-12-06 17:11:32 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil"
|
2023-07-18 23:41:30 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
|
|
|
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
|
|
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
2023-11-02 19:25:48 +00:00
|
|
|
"github.com/hashicorp/consul/testing/deployer/topology"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
2023-07-18 23:41:30 +00:00
|
|
|
)
|
|
|
|
|
2023-11-02 19:25:48 +00:00
|
|
|
// Asserter is a utility to help in reducing boilerplate in invoking test
|
2023-07-18 23:41:30 +00:00
|
|
|
// assertions against consul-topology Sprawl components.
|
|
|
|
//
|
|
|
|
// The methods should largely take in *topology.Service instances in lieu of
|
|
|
|
// ip/ports if there is only one port that makes sense for the assertion (such
|
|
|
|
// as use of the envoy admin port 19000).
|
|
|
|
//
|
2023-11-10 19:22:06 +00:00
|
|
|
// If it's up to the test (like picking a destination) leave port as an argument
|
2023-07-18 23:41:30 +00:00
|
|
|
// but still take the service and use that to grab the local ip from the
|
|
|
|
// topology.Node.
|
2023-11-02 19:25:48 +00:00
|
|
|
type Asserter struct {
|
|
|
|
sp SprawlLite
|
2023-07-18 23:41:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// *sprawl.Sprawl satisfies this. We don't need anything else.
|
2023-11-02 19:25:48 +00:00
|
|
|
type SprawlLite interface {
|
2023-07-18 23:41:30 +00:00
|
|
|
HTTPClientForCluster(clusterName string) (*http.Client, error)
|
|
|
|
APIClientForNode(clusterName string, nid topology.NodeID, token string) (*api.Client, error)
|
2023-09-06 23:46:34 +00:00
|
|
|
APIClientForCluster(clusterName string, token string) (*api.Client, error)
|
2023-11-02 19:25:48 +00:00
|
|
|
ResourceServiceClientForCluster(clusterName string) pbresource.ResourceServiceClient
|
2023-07-18 23:41:30 +00:00
|
|
|
Topology() *topology.Topology
|
|
|
|
}
|
|
|
|
|
2023-11-02 19:25:48 +00:00
|
|
|
// NewAsserter creates a new assertion helper for the provided sprawl.
|
|
|
|
func NewAsserter(sp SprawlLite) *Asserter {
|
|
|
|
return &Asserter{
|
2023-07-18 23:41:30 +00:00
|
|
|
sp: sp,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-06 17:11:32 +00:00
|
|
|
func (a *Asserter) mustGetHTTPClient(t testutil.TestingTB, cluster string) *http.Client {
|
2023-07-18 23:41:30 +00:00
|
|
|
client, err := a.httpClientFor(cluster)
|
|
|
|
require.NoError(t, err)
|
|
|
|
return client
|
|
|
|
}
|
|
|
|
|
2023-12-06 17:11:32 +00:00
|
|
|
func (a *Asserter) mustGetAPIClient(t testutil.TestingTB, cluster string) *api.Client {
|
2023-09-06 23:46:34 +00:00
|
|
|
clu := a.sp.Topology().Clusters[cluster]
|
|
|
|
cl, err := a.sp.APIClientForCluster(clu.Name, "")
|
2023-07-18 23:41:30 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
return cl
|
|
|
|
}
|
|
|
|
|
|
|
|
// httpClientFor returns a pre-configured http.Client that proxies requests
|
|
|
|
// through the embedded squid instance in each LAN.
|
|
|
|
//
|
|
|
|
// Use this in methods below to magically pick the right proxied http client
|
|
|
|
// given the home of each node being checked.
|
2023-11-02 19:25:48 +00:00
|
|
|
func (a *Asserter) httpClientFor(cluster string) (*http.Client, error) {
|
2023-07-18 23:41:30 +00:00
|
|
|
client, err := a.sp.HTTPClientForCluster(cluster)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return client, nil
|
|
|
|
}
|
|
|
|
|
2023-11-10 19:22:06 +00:00
|
|
|
// DestinationEndpointStatus validates that proxy was configured with provided clusterName in the healthStatus
|
2023-07-18 23:41:30 +00:00
|
|
|
//
|
|
|
|
// Exposes libassert.UpstreamEndpointStatus for use against a Sprawl.
|
|
|
|
//
|
|
|
|
// NOTE: this doesn't take a port b/c you always want to use the envoy admin port.
|
2023-11-10 19:22:06 +00:00
|
|
|
func (a *Asserter) DestinationEndpointStatus(
|
2023-07-18 23:41:30 +00:00
|
|
|
t *testing.T,
|
2023-11-10 19:22:06 +00:00
|
|
|
workload *topology.Workload,
|
2023-07-18 23:41:30 +00:00
|
|
|
clusterName string,
|
|
|
|
healthStatus string,
|
|
|
|
count int,
|
|
|
|
) {
|
|
|
|
t.Helper()
|
2023-11-10 19:22:06 +00:00
|
|
|
node := workload.Node
|
2023-07-18 23:41:30 +00:00
|
|
|
ip := node.LocalAddress()
|
2023-11-10 19:22:06 +00:00
|
|
|
port := workload.EnvoyAdminPort
|
2023-07-18 23:41:30 +00:00
|
|
|
addr := fmt.Sprintf("%s:%d", ip, port)
|
|
|
|
|
|
|
|
client := a.mustGetHTTPClient(t, node.Cluster)
|
|
|
|
libassert.AssertUpstreamEndpointStatusWithClient(t, client, addr, clusterName, healthStatus, count)
|
|
|
|
}
|
|
|
|
|
|
|
|
// HTTPServiceEchoes verifies that a post to the given ip/port combination
|
|
|
|
// returns the data in the response body. Optional path can be provided to
|
|
|
|
// differentiate requests.
|
|
|
|
//
|
|
|
|
// Exposes libassert.HTTPServiceEchoes for use against a Sprawl.
|
|
|
|
//
|
2023-11-10 19:22:06 +00:00
|
|
|
// NOTE: this takes a port b/c you may want to reach this via your choice of destination.
|
2023-11-02 19:25:48 +00:00
|
|
|
func (a *Asserter) HTTPServiceEchoes(
|
2023-07-18 23:41:30 +00:00
|
|
|
t *testing.T,
|
2023-11-10 19:22:06 +00:00
|
|
|
workload *topology.Workload,
|
2023-07-18 23:41:30 +00:00
|
|
|
port int,
|
|
|
|
path string,
|
|
|
|
) {
|
|
|
|
t.Helper()
|
|
|
|
require.True(t, port > 0)
|
|
|
|
|
2023-11-10 19:22:06 +00:00
|
|
|
node := workload.Node
|
2023-07-18 23:41:30 +00:00
|
|
|
ip := node.LocalAddress()
|
|
|
|
addr := fmt.Sprintf("%s:%d", ip, port)
|
|
|
|
|
|
|
|
client := a.mustGetHTTPClient(t, node.Cluster)
|
|
|
|
libassert.HTTPServiceEchoesWithClient(t, client, addr, path)
|
|
|
|
}
|
|
|
|
|
|
|
|
// HTTPServiceEchoesResHeader verifies that a post to the given ip/port combination
|
|
|
|
// returns the data in the response body with expected response headers.
|
|
|
|
// Optional path can be provided to differentiate requests.
|
|
|
|
//
|
|
|
|
// Exposes libassert.HTTPServiceEchoes for use against a Sprawl.
|
|
|
|
//
|
2023-11-10 19:22:06 +00:00
|
|
|
// NOTE: this takes a port b/c you may want to reach this via your choice of destination.
|
2023-11-02 19:25:48 +00:00
|
|
|
func (a *Asserter) HTTPServiceEchoesResHeader(
|
2023-07-18 23:41:30 +00:00
|
|
|
t *testing.T,
|
2023-11-10 19:22:06 +00:00
|
|
|
workload *topology.Workload,
|
2023-07-18 23:41:30 +00:00
|
|
|
port int,
|
|
|
|
path string,
|
|
|
|
expectedResHeader map[string]string,
|
|
|
|
) {
|
|
|
|
t.Helper()
|
|
|
|
require.True(t, port > 0)
|
|
|
|
|
2023-11-10 19:22:06 +00:00
|
|
|
node := workload.Node
|
2023-07-18 23:41:30 +00:00
|
|
|
ip := node.LocalAddress()
|
|
|
|
addr := fmt.Sprintf("%s:%d", ip, port)
|
|
|
|
|
|
|
|
client := a.mustGetHTTPClient(t, node.Cluster)
|
|
|
|
libassert.HTTPServiceEchoesResHeaderWithClient(t, client, addr, path, expectedResHeader)
|
|
|
|
}
|
|
|
|
|
2023-11-02 19:25:48 +00:00
|
|
|
func (a *Asserter) HTTPStatus(
|
2023-07-18 23:41:30 +00:00
|
|
|
t *testing.T,
|
2023-11-10 19:22:06 +00:00
|
|
|
workload *topology.Workload,
|
2023-07-18 23:41:30 +00:00
|
|
|
port int,
|
|
|
|
status int,
|
|
|
|
) {
|
|
|
|
t.Helper()
|
|
|
|
require.True(t, port > 0)
|
|
|
|
|
2023-11-10 19:22:06 +00:00
|
|
|
node := workload.Node
|
2023-07-18 23:41:30 +00:00
|
|
|
ip := node.LocalAddress()
|
|
|
|
addr := fmt.Sprintf("%s:%d", ip, port)
|
|
|
|
|
|
|
|
client := a.mustGetHTTPClient(t, node.Cluster)
|
|
|
|
|
|
|
|
url := "http://" + addr
|
|
|
|
|
|
|
|
retry.RunWith(&retry.Timer{Timeout: 30 * time.Second, Wait: 500 * time.Millisecond}, t, func(r *retry.R) {
|
|
|
|
resp, err := client.Get(url)
|
|
|
|
if err != nil {
|
|
|
|
r.Fatalf("could not make request to %q: %v", url, err)
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
if resp.StatusCode != status {
|
|
|
|
r.Fatalf("expected status %d, got %d", status, resp.StatusCode)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// asserts that the service sid in cluster and exported by peer localPeerName is passing health checks,
|
2023-11-10 19:22:06 +00:00
|
|
|
func (a *Asserter) HealthyWithPeer(t *testing.T, cluster string, sid topology.ID, peerName string) {
|
2023-07-18 23:41:30 +00:00
|
|
|
t.Helper()
|
|
|
|
cl := a.mustGetAPIClient(t, cluster)
|
|
|
|
retry.RunWith(&retry.Timer{Timeout: time.Minute * 1, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
|
|
|
|
svcs, _, err := cl.Health().Service(
|
|
|
|
sid.Name,
|
|
|
|
"",
|
|
|
|
true,
|
|
|
|
utils.CompatQueryOpts(&api.QueryOptions{
|
|
|
|
Partition: sid.Partition,
|
|
|
|
Namespace: sid.Namespace,
|
|
|
|
Peer: peerName,
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
require.NoError(r, err)
|
|
|
|
assert.GreaterOrEqual(r, len(svcs), 1)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-11-02 19:25:48 +00:00
|
|
|
type testingT interface {
|
|
|
|
require.TestingT
|
|
|
|
Helper()
|
|
|
|
}
|
|
|
|
|
2023-11-10 19:22:06 +00:00
|
|
|
// does a fortio /fetch2 to the given fortio service, targetting the given destination. Returns
|
2023-07-18 23:41:30 +00:00
|
|
|
// the body, and response with response.Body already Closed.
|
|
|
|
//
|
|
|
|
// We treat 400, 503, and 504s as retryable errors
|
2023-11-10 19:22:06 +00:00
|
|
|
func (a *Asserter) fortioFetch2Destination(
|
2023-12-06 17:11:32 +00:00
|
|
|
t testutil.TestingTB,
|
2023-11-02 19:25:48 +00:00
|
|
|
client *http.Client,
|
|
|
|
addr string,
|
2023-11-10 19:22:06 +00:00
|
|
|
dest *topology.Destination,
|
2023-11-02 19:25:48 +00:00
|
|
|
path string,
|
|
|
|
) (body []byte, res *http.Response) {
|
2023-07-18 23:41:30 +00:00
|
|
|
t.Helper()
|
|
|
|
|
2023-11-02 21:13:16 +00:00
|
|
|
var actualURL string
|
2023-11-10 19:22:06 +00:00
|
|
|
if dest.Implied {
|
2023-11-02 21:13:16 +00:00
|
|
|
actualURL = fmt.Sprintf("http://%s--%s--%s.virtual.consul:%d/%s",
|
2023-11-10 19:22:06 +00:00
|
|
|
dest.ID.Name,
|
|
|
|
dest.ID.Namespace,
|
|
|
|
dest.ID.Partition,
|
|
|
|
dest.VirtualPort,
|
2023-11-02 21:13:16 +00:00
|
|
|
path,
|
|
|
|
)
|
|
|
|
} else {
|
2023-11-10 19:22:06 +00:00
|
|
|
actualURL = fmt.Sprintf("http://localhost:%d/%s", dest.LocalPort, path)
|
2023-11-02 21:13:16 +00:00
|
|
|
}
|
2023-07-18 23:41:30 +00:00
|
|
|
|
2023-11-02 19:25:48 +00:00
|
|
|
url := fmt.Sprintf("http://%s/fortio/fetch2?url=%s", addr,
|
2023-11-02 21:13:16 +00:00
|
|
|
url.QueryEscape(actualURL),
|
2023-07-18 23:41:30 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
req, err := http.NewRequest(http.MethodPost, url, nil)
|
|
|
|
require.NoError(t, err)
|
2023-11-02 19:25:48 +00:00
|
|
|
|
|
|
|
res, err = client.Do(req)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer res.Body.Close()
|
2023-11-02 21:13:16 +00:00
|
|
|
|
2023-11-02 19:25:48 +00:00
|
|
|
// not sure when these happen, suspect it's when the mesh gateway in the peer is not yet ready
|
|
|
|
require.NotEqual(t, http.StatusServiceUnavailable, res.StatusCode)
|
|
|
|
require.NotEqual(t, http.StatusGatewayTimeout, res.StatusCode)
|
2023-11-10 19:22:06 +00:00
|
|
|
// not sure when this happens, suspect it's when envoy hasn't configured the local destination yet
|
2023-11-02 19:25:48 +00:00
|
|
|
require.NotEqual(t, http.StatusBadRequest, res.StatusCode)
|
|
|
|
body, err = io.ReadAll(res.Body)
|
|
|
|
require.NoError(t, err)
|
2023-07-18 23:41:30 +00:00
|
|
|
|
|
|
|
return body, res
|
|
|
|
}
|
|
|
|
|
|
|
|
// uses the /fortio/fetch2 endpoint to do a header echo check against an
|
2023-11-10 19:22:06 +00:00
|
|
|
// destination fortio
|
|
|
|
func (a *Asserter) FortioFetch2HeaderEcho(t *testing.T, fortioWrk *topology.Workload, dest *topology.Destination) {
|
2023-07-18 23:41:30 +00:00
|
|
|
const kPassphrase = "x-passphrase"
|
|
|
|
const passphrase = "hello"
|
|
|
|
path := (fmt.Sprintf("/?header=%s:%s", kPassphrase, passphrase))
|
|
|
|
|
2023-11-02 19:25:48 +00:00
|
|
|
var (
|
2023-11-10 19:22:06 +00:00
|
|
|
node = fortioWrk.Node
|
|
|
|
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), fortioWrk.PortOrDefault(dest.PortName))
|
2023-11-02 19:25:48 +00:00
|
|
|
client = a.mustGetHTTPClient(t, node.Cluster)
|
|
|
|
)
|
|
|
|
|
2023-07-18 23:41:30 +00:00
|
|
|
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
|
2023-11-10 19:22:06 +00:00
|
|
|
_, res := a.fortioFetch2Destination(r, client, addr, dest, path)
|
2023-11-02 19:25:48 +00:00
|
|
|
require.Equal(r, http.StatusOK, res.StatusCode)
|
2023-07-18 23:41:30 +00:00
|
|
|
v := res.Header.Get(kPassphrase)
|
2023-11-02 19:25:48 +00:00
|
|
|
require.Equal(r, passphrase, v)
|
2023-07-18 23:41:30 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// similar to libassert.AssertFortioName,
|
2023-11-10 19:22:06 +00:00
|
|
|
// uses the /fortio/fetch2 endpoint to hit the debug endpoint on the destination,
|
2023-07-18 23:41:30 +00:00
|
|
|
// and assert that the FORTIO_NAME == name
|
2023-11-02 21:13:16 +00:00
|
|
|
func (a *Asserter) FortioFetch2FortioName(
|
|
|
|
t *testing.T,
|
2023-11-10 19:22:06 +00:00
|
|
|
fortioWrk *topology.Workload,
|
|
|
|
dest *topology.Destination,
|
2023-11-02 21:13:16 +00:00
|
|
|
clusterName string,
|
2023-11-10 19:22:06 +00:00
|
|
|
sid topology.ID,
|
2023-11-02 21:13:16 +00:00
|
|
|
) {
|
2023-07-18 23:41:30 +00:00
|
|
|
t.Helper()
|
|
|
|
|
2023-11-02 19:25:48 +00:00
|
|
|
var (
|
2023-11-10 19:22:06 +00:00
|
|
|
node = fortioWrk.Node
|
|
|
|
addr = fmt.Sprintf("%s:%d", node.LocalAddress(), fortioWrk.PortOrDefault(dest.PortName))
|
2023-11-02 19:25:48 +00:00
|
|
|
client = a.mustGetHTTPClient(t, node.Cluster)
|
|
|
|
)
|
|
|
|
|
2023-07-18 23:41:30 +00:00
|
|
|
var fortioNameRE = regexp.MustCompile(("\nFORTIO_NAME=(.+)\n"))
|
|
|
|
path := "/debug?env=dump"
|
|
|
|
|
|
|
|
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
|
2023-11-10 19:22:06 +00:00
|
|
|
body, res := a.fortioFetch2Destination(r, client, addr, dest, path)
|
2023-11-02 21:13:16 +00:00
|
|
|
|
2023-11-02 19:25:48 +00:00
|
|
|
require.Equal(r, http.StatusOK, res.StatusCode)
|
2023-07-18 23:41:30 +00:00
|
|
|
|
|
|
|
// TODO: not sure we should retry these?
|
|
|
|
m := fortioNameRE.FindStringSubmatch(string(body))
|
|
|
|
require.GreaterOrEqual(r, len(m), 2)
|
|
|
|
// TODO: dedupe from NewFortioService
|
|
|
|
require.Equal(r, fmt.Sprintf("%s::%s", clusterName, sid.String()), m[1])
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// CatalogServiceExists is the same as libassert.CatalogServiceExists, except that it uses
|
|
|
|
// a proxied API client
|
2023-11-02 19:25:48 +00:00
|
|
|
func (a *Asserter) CatalogServiceExists(t *testing.T, cluster string, svc string, opts *api.QueryOptions) {
|
2023-07-18 23:41:30 +00:00
|
|
|
t.Helper()
|
|
|
|
cl := a.mustGetAPIClient(t, cluster)
|
|
|
|
libassert.CatalogServiceExists(t, cl, svc, opts)
|
|
|
|
}
|