upgrade test (LTS): utility functions to support ent users (#20186)

* upgrade test (LTS): utility functions to support ent users

* go mod tidy

* add comment
This commit is contained in:
cskh 2024-01-12 18:35:44 -05:00 committed by GitHub
parent 93e06b799e
commit 748458a07b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 103 additions and 20 deletions

View File

@ -3,6 +3,7 @@ module github.com/hashicorp/consul/test-integ
go 1.20 go 1.20
require ( require (
github.com/google/go-cmp v0.5.9
github.com/hashicorp/consul/api v1.26.1 github.com/hashicorp/consul/api v1.26.1
github.com/hashicorp/consul/proto-public v0.5.1 github.com/hashicorp/consul/proto-public v0.5.1
github.com/hashicorp/consul/sdk v0.15.0 github.com/hashicorp/consul/sdk v0.15.0
@ -48,7 +49,6 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/golang/protobuf v1.5.3 // indirect
github.com/google/btree v1.0.1 // indirect github.com/google/btree v1.0.1 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.0 // indirect
github.com/hashicorp/consul v1.16.1 // indirect github.com/hashicorp/consul v1.16.1 // indirect

View File

@ -12,6 +12,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -366,17 +367,31 @@ func (a *Asserter) CatalogServiceExists(t *testing.T, cluster string, svc string
libassert.CatalogServiceExists(t, cl, svc, opts) libassert.CatalogServiceExists(t, cl, svc, opts)
} }
// AssertServiceHealth asserts whether the given service is healthy or not // HealthServiceEntries asserts the service has the expected number of instances
func (a *Asserter) AssertServiceHealth(t *testing.T, cl *api.Client, serverSVC string, onlypassing bool, count int) { func (a *Asserter) HealthServiceEntries(t *testing.T, cluster string, svc string, passingOnly bool, opts *api.QueryOptions, expectedInstance int) []*api.ServiceEntry {
t.Helper() t.Helper()
retry.RunWith(&retry.Timer{Timeout: time.Second * 20, Wait: time.Millisecond * 500}, t, func(r *retry.R) { cl := a.mustGetAPIClient(t, cluster)
svcs, _, err := cl.Health().Service( health := cl.Health()
serverSVC,
"", var serviceEntries []*api.ServiceEntry
onlypassing, var err error
nil, retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
) serviceEntries, _, err = health.Service(svc, "", passingOnly, opts)
require.NoError(r, err) require.NoError(r, err)
require.Equal(r, count, len(svcs)) require.Equal(r, expectedInstance, len(serviceEntries))
})
return serviceEntries
}
// TokenExist asserts the token exists in the cluster and identical to the expected token
func (a *Asserter) TokenExist(t *testing.T, cluster string, expectedToken *api.ACLToken) {
t.Helper()
cl := a.mustGetAPIClient(t, cluster)
acl := cl.ACL()
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
retrievedToken, _, err := acl.TokenRead(expectedToken.AccessorID, &api.QueryOptions{})
require.NoError(r, err)
require.True(r, cmp.Equal(expectedToken, retrievedToken), "token %s", expectedToken.Description)
}) })
} }

View File

@ -0,0 +1,50 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package topoutil
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/url"
)
// RequestRegisterService registers a service at the given node address
// using consul http request.
//
// The service definition must be a JSON string.
func RequestRegisterService(clusterHttpCli *http.Client, nodeAddress string, serviceDefinition string, token string) error {
var js json.RawMessage
if err := json.Unmarshal([]byte(serviceDefinition), &js); err != nil {
return fmt.Errorf("failed to unmarshal service definition: %s", err)
}
u, err := url.Parse(nodeAddress)
if err != nil {
return fmt.Errorf("failed to parse node address: %s", err)
}
u.Path = "/v1/agent/service/register"
req, err := http.NewRequest(http.MethodPut, u.String(), bytes.NewBuffer(js))
if err != nil {
return fmt.Errorf("failed to create request: %s", err)
}
if token != "" {
req.Header.Set("X-Consul-Token", token)
}
resp, err := clusterHttpCli.Do(req)
if err != nil {
return fmt.Errorf("failed to send request: %s", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("failed to register service: %s", resp.Status)
}
return nil
}

View File

@ -5,7 +5,6 @@ package l7_traffic_management
import ( import (
"fmt" "fmt"
"github.com/stretchr/testify/require"
"testing" "testing"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
@ -191,21 +190,18 @@ func (ct *commonTopo) ValidateWorkloads(t *testing.T) {
ct.Assert = topoutil.NewAsserter(ct.Sprawl) ct.Assert = topoutil.NewAsserter(ct.Sprawl)
cluster := ct.Sprawl.Topology().Clusters[dc1] cluster := ct.Sprawl.Topology().Clusters[dc1]
cl, err := ct.Sprawl.APIClientForCluster(cluster.Name, "")
require.NoError(t, err)
staticServerWorkload := cluster.WorkloadByID( staticServerWorkload := cluster.WorkloadByID(
topology.NewNodeID("dc1-client1", defaultPartition), topology.NewNodeID("dc1-client1", defaultPartition),
ct.StaticServerSID, ct.StaticServerSID,
) )
ct.Assert.HTTPStatus(t, staticServerWorkload, staticServerWorkload.Port, 200) ct.Assert.HTTPStatus(t, staticServerWorkload, staticServerWorkload.Port, 200)
ct.Assert.AssertServiceHealth(t, cl, ct.StaticServerSID.Name, true, 1) ct.Assert.HealthServiceEntries(t, cluster.Name, ct.StaticServerSID.Name, true, &api.QueryOptions{}, 1)
staticClientWorkload := cluster.WorkloadByID( staticClientWorkload := cluster.WorkloadByID(
topology.NewNodeID("dc1-client2", defaultPartition), topology.NewNodeID("dc1-client2", defaultPartition),
ct.StaticClientSID, ct.StaticClientSID,
) )
ct.Assert.AssertServiceHealth(t, cl, ct.StaticClientSID.Name, true, 1) ct.Assert.HealthServiceEntries(t, cluster.Name, ct.StaticClientSID.Name, true, &api.QueryOptions{}, 1)
// check the service exists in catalog // check the service exists in catalog
svcs := cluster.WorkloadsByID(ct.StaticClientSID) svcs := cluster.WorkloadsByID(ct.StaticClientSID)

View File

@ -100,6 +100,19 @@ func (s *Sprawl) HTTPClientForCluster(clusterName string) (*http.Client, error)
return &http.Client{Transport: transport}, nil return &http.Client{Transport: transport}, nil
} }
// LocalAddressForNode returns the local address for the given node in the cluster
func (s *Sprawl) LocalAddressForNode(clusterName string, nid topology.NodeID) (string, error) {
cluster, ok := s.topology.Clusters[clusterName]
if !ok {
return "", fmt.Errorf("no such cluster: %s", clusterName)
}
node := cluster.NodeByID(nid)
if !node.IsAgent() {
return "", fmt.Errorf("node is not an agent")
}
return node.LocalAddress(), nil
}
// APIClientForNode gets a pooled api.Client connected to the agent running on // APIClientForNode gets a pooled api.Client connected to the agent running on
// the provided node. // the provided node.
// //
@ -138,7 +151,7 @@ func (s *Sprawl) APIClientForNode(clusterName string, nid topology.NodeID, token
func (s *Sprawl) APIClientForCluster(clusterName, token string) (*api.Client, error) { func (s *Sprawl) APIClientForCluster(clusterName, token string) (*api.Client, error) {
clu := s.topology.Clusters[clusterName] clu := s.topology.Clusters[clusterName]
// TODO: this always goes to the first client, but we might want to balance this // TODO: this always goes to the first client, but we might want to balance this
firstAgent := clu.FirstClient() firstAgent := clu.FirstClient("")
if firstAgent == nil { if firstAgent == nil {
firstAgent = clu.FirstServer() firstAgent = clu.FirstServer()
} }

View File

@ -376,12 +376,21 @@ func (c *Cluster) FirstServer() *Node {
return nil return nil
} }
func (c *Cluster) FirstClient() *Node { // FirstClient returns the first client agent in the cluster.
// If segment is non-empty, it will return the first client agent in that segment.
func (c *Cluster) FirstClient(segment string) *Node {
for _, node := range c.Nodes { for _, node := range c.Nodes {
if node.Kind != NodeKindClient || node.Disabled { if node.Kind != NodeKindClient || node.Disabled {
continue continue
} }
return node if segment == "" {
// return a client agent in default segment
return node
} else {
if node.Segment != nil && node.Segment.Name == segment {
return node
}
}
} }
return nil return nil
} }