mirror of https://github.com/status-im/consul.git
Post upgrade test validation: envoy endpoint and register service (#16067)
This commit is contained in:
parent
6e425f7428
commit
dbaab52786
|
@ -50,6 +50,29 @@ func GetEnvoyListenerTCPFilters(t *testing.T, adminPort int) {
|
|||
require.Contains(t, filteredResult, "envoy.filters.network.tcp_proxy")
|
||||
}
|
||||
|
||||
// AssertUpstreamEndpointStatus validates that proxy was configured with provided clusterName in the healthStatus
|
||||
func AssertUpstreamEndpointStatus(t *testing.T, adminPort int, clusterName, healthStatus string, count int) {
|
||||
var (
|
||||
clusters string
|
||||
err error
|
||||
)
|
||||
failer := func() *retry.Timer {
|
||||
return &retry.Timer{Timeout: 30 * time.Second, Wait: 500 * time.Millisecond}
|
||||
}
|
||||
|
||||
retry.RunWith(failer(), t, func(r *retry.R) {
|
||||
clusters, err = libservice.GetEnvoyClusters(adminPort)
|
||||
if err != nil {
|
||||
r.Fatal("could not fetch envoy configuration")
|
||||
}
|
||||
|
||||
filter := fmt.Sprintf(`.cluster_statuses[] | select(.name|contains("%s")) | [.host_statuses[].health_status.eds_health_status] | [select(.[] == "%s")] | length`, clusterName, healthStatus)
|
||||
results, err := utils.JQFilter(clusters, filter)
|
||||
require.NoError(r, err, "could not parse envoy configuration")
|
||||
require.Equal(r, count, len(results))
|
||||
})
|
||||
}
|
||||
|
||||
// GetEnvoyHTTPrbacFilters validates that proxy was configured with an http connection manager
|
||||
// this assertion is currently unused current tests use http protocol
|
||||
func GetEnvoyHTTPrbacFilters(t *testing.T, port int) {
|
||||
|
|
|
@ -142,3 +142,21 @@ func GetEnvoyConfigDump(port int, filter string) (string, error) {
|
|||
|
||||
return string(body), nil
|
||||
}
|
||||
|
||||
func GetEnvoyClusters(port int) (string, error) {
|
||||
client := cleanhttp.DefaultClient()
|
||||
url := fmt.Sprintf("http://localhost:%d/clusters?format=json", port)
|
||||
|
||||
res, err := client.Get(url)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(body), nil
|
||||
}
|
||||
|
|
|
@ -62,7 +62,6 @@ func BasicPeeringTwoClustersSetup(
|
|||
libassert.CatalogServiceExists(t, acceptingClient, "static-server-sidecar-proxy")
|
||||
|
||||
require.NoError(t, serverSidecarService.Export("default", AcceptingPeerName, acceptingClient))
|
||||
|
||||
}
|
||||
|
||||
// Register an static-client service in dialing cluster and set upstream to static-server service
|
||||
|
@ -78,6 +77,8 @@ func BasicPeeringTwoClustersSetup(
|
|||
libassert.CatalogServiceExists(t, dialingClient, "static-client-sidecar-proxy")
|
||||
}
|
||||
|
||||
_, adminPort := clientSidecarService.GetAdminAddr()
|
||||
libassert.AssertUpstreamEndpointStatus(t, adminPort, fmt.Sprintf("static-server.default.%s.external", DialingPeerName), "HEALTHY", 1)
|
||||
_, port := clientSidecarService.GetAddr()
|
||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||
|
||||
|
|
|
@ -28,8 +28,10 @@ func TestBasicConnectService(t *testing.T) {
|
|||
_, port := clientService.GetAddr()
|
||||
_, adminPort := clientService.GetAdminAddr()
|
||||
|
||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||
libassert.AssertUpstreamEndpointStatus(t, adminPort, "static-server.default", "HEALTHY", 1)
|
||||
libassert.GetEnvoyListenerTCPFilters(t, adminPort)
|
||||
|
||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||
}
|
||||
|
||||
func createCluster(t *testing.T) *libcluster.Cluster {
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/api"
|
||||
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
|
||||
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
|
||||
libtopology "github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
|
||||
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
||||
)
|
||||
|
@ -50,9 +51,22 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
|
|||
|
||||
// Upgrade the accepting cluster and assert peering is still ACTIVE
|
||||
require.NoError(t, acceptingCluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
|
||||
|
||||
libassert.PeeringStatus(t, acceptingClient, libtopology.AcceptingPeerName, api.PeeringStateActive)
|
||||
libassert.PeeringStatus(t, dialingClient, libtopology.DialingPeerName, api.PeeringStateActive)
|
||||
|
||||
require.NoError(t, dialingCluster.StandardUpgrade(t, context.Background(), tc.targetVersion))
|
||||
libassert.PeeringStatus(t, acceptingClient, libtopology.AcceptingPeerName, api.PeeringStateActive)
|
||||
libassert.PeeringStatus(t, dialingClient, libtopology.DialingPeerName, api.PeeringStateActive)
|
||||
|
||||
// POST upgrade validation
|
||||
// - Register a new static-client service in dialing cluster and
|
||||
// - set upstream to static-server service in peered cluster
|
||||
clientSidecarService, err := libservice.CreateAndRegisterStaticClientSidecar(dialingCluster.Servers()[0], libtopology.DialingPeerName, true)
|
||||
require.NoError(t, err)
|
||||
_, port := clientSidecarService.GetAddr()
|
||||
_, adminPort := clientSidecarService.GetAdminAddr()
|
||||
libassert.AssertUpstreamEndpointStatus(t, adminPort, fmt.Sprintf("static-server.default.%s.external", libtopology.DialingPeerName), "HEALTHY", 1)
|
||||
libassert.HTTPServiceEchoes(t, "localhost", port, "")
|
||||
}
|
||||
|
||||
for _, tc := range tcs {
|
||||
|
|
Loading…
Reference in New Issue