test: fix more flakes in the compatibility test (#13145)

This commit is contained in:
R.B. Boyer 2022-05-19 14:05:41 -05:00 committed by GitHub
parent 1e31dc891a
commit 851c8c32b4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 67 additions and 17 deletions

View File

@ -2,25 +2,38 @@ package cluster
import (
"context"
"crypto/rand"
"encoding/base64"
"fmt"
"strings"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/integration/consul-container/libs/node"
)
// Cluster provides an interface for creating and controlling a Consul cluster
// in integration tests, with nodes running in containers.
type Cluster struct {
Nodes []node.Node
Nodes []node.Node
EncryptKey string
}
// New creates a Consul cluster. A node will be started for each of the given
// configs and joined to the cluster.
func New(configs []node.Config) (*Cluster, error) {
cluster := Cluster{}
serfKey, err := newSerfEncryptionKey()
if err != nil {
return nil, err
}
cluster := Cluster{
EncryptKey: serfKey,
}
nodes := make([]node.Node, len(configs))
for idx, c := range configs {
c.HCL += fmt.Sprintf(" encrypt=%q", serfKey)
n, err := node.NewConsulContainer(context.Background(), c)
if err != nil {
return nil, err
@ -71,13 +84,12 @@ func (c *Cluster) Leader() (node.Node, error) {
return nil, fmt.Errorf("no node available")
}
n0 := c.Nodes[0]
leaderAdd, err := n0.GetClient().Status().Leader()
leaderAdd, err := GetLeader(n0.GetClient())
if err != nil {
return nil, err
}
if leaderAdd == "" {
return nil, fmt.Errorf("no leader available")
}
for _, n := range c.Nodes {
addr, _ := n.GetAddr()
if strings.Contains(leaderAdd, addr) {
@ -86,3 +98,27 @@ func (c *Cluster) Leader() (node.Node, error) {
}
return nil, fmt.Errorf("leader not found")
}
func newSerfEncryptionKey() (string, error) {
key := make([]byte, 32)
n, err := rand.Reader.Read(key)
if err != nil {
return "", fmt.Errorf("Error reading random data: %w", err)
}
if n != 32 {
return "", fmt.Errorf("Couldn't read enough entropy. Generate more entropy!")
}
return base64.StdEncoding.EncodeToString(key), nil
}
func GetLeader(client *api.Client) (string, error) {
leaderAdd, err := client.Status().Leader()
if err != nil {
return "", err
}
if leaderAdd == "" {
return "", fmt.Errorf("no leader available")
}
return leaderAdd, nil
}

View File

@ -24,13 +24,13 @@ func TestTargetServersWithLatestGAClients(t *testing.T) {
cluster := serversCluster(t, numServers, *targetImage)
defer Terminate(t, cluster)
clients := clientsCreate(t, numClients, *latestImage)
clients := clientsCreate(t, numClients, *latestImage, cluster.EncryptKey)
require.NoError(t, cluster.AddNodes(clients))
client := cluster.Nodes[0].GetClient()
waitForLeader(t, cluster)
waitForLeader(t, cluster, client)
waitForMembers(t, client, 4)
serviceName := "api"
@ -101,13 +101,13 @@ func TestMixedServersMajorityLatestGAClient(t *testing.T) {
numClients = 1
)
clients := clientsCreate(t, numClients, *latestImage)
clients := clientsCreate(t, numClients, *latestImage, cluster.EncryptKey)
require.NoError(t, cluster.AddNodes(clients))
client := clients[0].GetClient()
waitForLeader(t, cluster)
waitForLeader(t, cluster, client)
waitForMembers(t, client, 4)
serviceName := "api"
@ -176,13 +176,13 @@ func TestMixedServersMajorityTargetGAClient(t *testing.T) {
numClients = 1
)
clients := clientsCreate(t, numClients, *latestImage)
clients := clientsCreate(t, numClients, *latestImage, cluster.EncryptKey)
require.NoError(t, cluster.AddNodes(clients))
client := clients[0].GetClient()
waitForLeader(t, cluster)
waitForLeader(t, cluster, client)
waitForMembers(t, client, 4)
serviceName := "api"
@ -219,14 +219,16 @@ func TestMixedServersMajorityTargetGAClient(t *testing.T) {
}
}
func clientsCreate(t *testing.T, numClients int, version string) []node.Node {
func clientsCreate(t *testing.T, numClients int, version string, serfKey string) []node.Node {
clients := make([]node.Node, numClients)
for i := 0; i < numClients; i++ {
var err error
clients[i], err = node.NewConsulContainer(context.Background(),
node.Config{
HCL: `node_name="` + utils.RandName("consul-client") + `"
log_level="TRACE"`,
HCL: fmt.Sprintf(`
node_name = %q
log_level = "TRACE"
encrypt = %q`, utils.RandName("consul-client"), serfKey),
Cmd: []string{"agent", "-client=0.0.0.0"},
Version: version,
})
@ -263,7 +265,7 @@ func serversCluster(t *testing.T, numServers int, version string) *cluster.Clust
cluster, err := cluster.New(configs)
require.NoError(t, err)
waitForLeader(t, cluster)
waitForLeader(t, cluster, nil)
waitForMembers(t, cluster.Nodes[0].GetClient(), numServers)
return cluster

View File

@ -18,12 +18,24 @@ func LongFailer() *retry.Timer {
return &retry.Timer{Timeout: retryTimeout, Wait: retryFrequency}
}
func waitForLeader(t *testing.T, Cluster *cluster.Cluster) {
func waitForLeader(t *testing.T, Cluster *cluster.Cluster, client *api.Client) {
retry.RunWith(LongFailer(), t, func(r *retry.R) {
leader, err := Cluster.Leader()
require.NoError(r, err)
require.NotEmpty(r, leader)
})
if client != nil {
waitForLeaderFromClient(t, client)
}
}
func waitForLeaderFromClient(t *testing.T, client *api.Client) {
retry.RunWith(LongFailer(), t, func(r *retry.R) {
leader, err := cluster.GetLeader(client)
require.NoError(r, err)
require.NotEmpty(r, leader)
})
}
func waitForMembers(t *testing.T, client *api.Client, expectN int) {