upgrade test: run validation func at every node during upgrade (#20293)

* upgrade test: run validation func at every node during upgrade

* add 3 servers in each cluster
This commit is contained in:
cskh 2024-01-22 18:35:06 -05:00 committed by GitHub
parent 995ba32cc0
commit 528147e5ad
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 19 additions and 10 deletions

View File

@ -367,27 +367,29 @@ func (a *Asserter) CatalogServiceExists(t *testing.T, cluster string, svc string
libassert.CatalogServiceExists(t, cl, svc, opts) libassert.CatalogServiceExists(t, cl, svc, opts)
} }
// HealthServiceEntries asserts the service has the expected number of instances // HealthServiceEntries asserts the service has the expected number of instances and checks
func (a *Asserter) HealthServiceEntries(t *testing.T, cluster string, svc string, passingOnly bool, opts *api.QueryOptions, expectedInstance int) []*api.ServiceEntry { func (a *Asserter) HealthServiceEntries(t *testing.T, cluster string, node *topology.Node, svc string, passingOnly bool, opts *api.QueryOptions, expectedInstance int, expectedChecks int) []*api.ServiceEntry {
t.Helper() t.Helper()
cl := a.mustGetAPIClient(t, cluster) cl, err := a.sp.APIClientForNode(cluster, node.ID(), "")
require.NoError(t, err)
health := cl.Health() health := cl.Health()
var serviceEntries []*api.ServiceEntry var serviceEntries []*api.ServiceEntry
var err error
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) { retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
serviceEntries, _, err = health.Service(svc, "", passingOnly, opts) serviceEntries, _, err = health.Service(svc, "", passingOnly, opts)
require.NoError(r, err) require.NoError(r, err)
require.Equal(r, expectedInstance, len(serviceEntries)) require.Equalf(r, expectedInstance, len(serviceEntries), "dc: %s, service: %s", cluster, serviceEntries[0].Service.Service)
require.Equalf(r, expectedChecks, len(serviceEntries[0].Checks), "dc: %s, service: %s", cluster, serviceEntries[0].Service.Service)
}) })
return serviceEntries return serviceEntries
} }
// TokenExist asserts the token exists in the cluster and identical to the expected token // TokenExist asserts the token exists in the cluster and identical to the expected token
func (a *Asserter) TokenExist(t *testing.T, cluster string, expectedToken *api.ACLToken) { func (a *Asserter) TokenExist(t *testing.T, cluster string, node *topology.Node, expectedToken *api.ACLToken) {
t.Helper() t.Helper()
cl := a.mustGetAPIClient(t, cluster) cl, err := a.sp.APIClientForNode(cluster, node.ID(), "")
require.NoError(t, err)
acl := cl.ACL() acl := cl.ACL()
retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) { retry.RunWith(&retry.Timer{Timeout: 60 * time.Second, Wait: time.Millisecond * 500}, t, func(r *retry.R) {
retrievedToken, _, err := acl.TokenRead(expectedToken.AccessorID, &api.QueryOptions{}) retrievedToken, _, err := acl.TokenRead(expectedToken.AccessorID, &api.QueryOptions{})

View File

@ -189,19 +189,21 @@ func (ct *commonTopo) ValidateWorkloads(t *testing.T) {
t.Helper() t.Helper()
ct.Assert = topoutil.NewAsserter(ct.Sprawl) ct.Assert = topoutil.NewAsserter(ct.Sprawl)
cluster := ct.Sprawl.Topology().Clusters[dc1] cluster := ct.Sprawl.Topology().Clusters[dc1]
node := cluster.Nodes[0]
staticServerWorkload := cluster.WorkloadByID( staticServerWorkload := cluster.WorkloadByID(
topology.NewNodeID("dc1-client1", defaultPartition), topology.NewNodeID("dc1-client1", defaultPartition),
ct.StaticServerSID, ct.StaticServerSID,
) )
ct.Assert.HTTPStatus(t, staticServerWorkload, staticServerWorkload.Port, 200) ct.Assert.HTTPStatus(t, staticServerWorkload, staticServerWorkload.Port, 200)
ct.Assert.HealthServiceEntries(t, cluster.Name, ct.StaticServerSID.Name, true, &api.QueryOptions{}, 1) ct.Assert.HealthServiceEntries(t, cluster.Name, node, ct.StaticServerSID.Name, true, &api.QueryOptions{}, 1, 0)
staticClientWorkload := cluster.WorkloadByID( staticClientWorkload := cluster.WorkloadByID(
topology.NewNodeID("dc1-client2", defaultPartition), topology.NewNodeID("dc1-client2", defaultPartition),
ct.StaticClientSID, ct.StaticClientSID,
) )
ct.Assert.HealthServiceEntries(t, cluster.Name, ct.StaticClientSID.Name, true, &api.QueryOptions{}, 1)
ct.Assert.HealthServiceEntries(t, cluster.Name, node, ct.StaticClientSID.Name, true, &api.QueryOptions{}, 1, 0)
// check the service exists in catalog // check the service exists in catalog
svcs := cluster.WorkloadsByID(ct.StaticClientSID) svcs := cluster.WorkloadsByID(ct.StaticClientSID)

View File

@ -74,7 +74,12 @@ func (s *Sprawl) rejoinServers(cluster *topology.Cluster) error {
servers := cluster.ServerNodes() servers := cluster.ServerNodes()
recoveryToken := s.secrets.ReadGeneric(cluster.Name, secrets.AgentRecovery) var recoveryToken string
if servers[0].Images.GreaterThanVersion(topology.MinVersionAgentTokenPartition) {
recoveryToken = s.secrets.ReadGeneric(cluster.Name, secrets.AgentRecovery)
} else {
recoveryToken = s.secrets.ReadGeneric(cluster.Name, secrets.BootstrapToken)
}
node0, rest := servers[0], servers[1:] node0, rest := servers[0], servers[1:]
client, err := util.ProxyNotPooledAPIClient( client, err := util.ProxyNotPooledAPIClient(