upgrade test: discovery chain across partition (#16543)

This commit is contained in:
cskh 2023-03-06 13:28:02 -05:00 committed by GitHub
parent 9d8e00db24
commit 94ecb9c5d5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 57 additions and 17 deletions

View File

@ -128,11 +128,7 @@ func ServiceLogContains(t *testing.T, service libservice.Service, target string)
func AssertFortioName(t *testing.T, urlbase string, name string, reqHost string) {
t.Helper()
var fortioNameRE = regexp.MustCompile(("\nFORTIO_NAME=(.+)\n"))
client := &http.Client{
Transport: &http.Transport{
DisableKeepAlives: true,
},
}
client := cleanhttp.DefaultClient()
retry.RunWith(&retry.Timer{Timeout: defaultHTTPTimeout, Wait: defaultHTTPWait}, t, func(r *retry.R) {
fullurl := fmt.Sprintf("%s/debug?env=dump", urlbase)
req, err := http.NewRequest("GET", fullurl, nil)

View File

@ -17,6 +17,7 @@ type Agent interface {
NewClient(string, bool) (*api.Client, error)
GetName() string
GetAgentName() string
GetPartition() string
GetPod() testcontainers.Container
ClaimAdminPort() (int, error)
GetConfig() Config

View File

@ -245,6 +245,11 @@ func (b *Builder) Peering(enable bool) *Builder {
return b
}
func (b *Builder) Partition(name string) *Builder {
b.conf.Set("partition", name)
return b
}
func (b *Builder) RetryJoin(names ...string) *Builder {
b.conf.Set("retry_join", names)
return b

View File

@ -315,6 +315,16 @@ func (c *Cluster) StandardUpgrade(t *testing.T, ctx context.Context, targetVersi
}
t.Logf("The number of followers = %d", len(followers))
// NOTE: we only assert the number of agents in default partition
// TODO: add partition to the cluster struct to assert partition size
clusterSize := 0
for _, agent := range c.Agents {
if agent.GetPartition() == "" || agent.GetPartition() == "default" {
clusterSize++
}
}
t.Logf("The number of agents in default partition = %d", clusterSize)
upgradeFn := func(agent Agent, clientFactory func() (*api.Client, error)) error {
config := agent.GetConfig()
config.Version = targetVersion
@ -349,8 +359,10 @@ func (c *Cluster) StandardUpgrade(t *testing.T, ctx context.Context, targetVersi
return err
}
// wait until the agent rejoin and leader is elected
WaitForMembers(t, client, len(c.Agents))
// wait until the agent rejoin and leader is elected; skip non-default agent
if agent.GetPartition() == "" || agent.GetPartition() == "default" {
WaitForMembers(t, client, clusterSize)
}
WaitForLeader(t, c, client)
return nil
@ -478,7 +490,23 @@ func (c *Cluster) Servers() []Agent {
return servers
}
// Clients returns the handle to client agents
// Clients returns the handle to client agents in provided partition
func (c *Cluster) ClientsInPartition(partition string) []Agent {
var clients []Agent
for _, n := range c.Agents {
if n.IsServer() {
continue
}
if n.GetPartition() == partition {
clients = append(clients, n)
}
}
return clients
}
// Clients returns the handle to client agents in all partitions
func (c *Cluster) Clients() []Agent {
var clients []Agent

View File

@ -38,6 +38,7 @@ type consulContainerNode struct {
container testcontainers.Container
serverMode bool
datacenter string
partition string
config Config
podReq testcontainers.ContainerRequest
consulReq testcontainers.ContainerRequest
@ -228,6 +229,7 @@ func NewConsulContainer(ctx context.Context, config Config, cluster *Cluster, po
container: consulContainer,
serverMode: pc.Server,
datacenter: pc.Datacenter,
partition: pc.Partition,
ctx: ctx,
podReq: podReq,
consulReq: consulReq,
@ -318,6 +320,10 @@ func (c *consulContainerNode) GetDatacenter() string {
return c.datacenter
}
func (c *consulContainerNode) GetPartition() string {
return c.partition
}
func (c *consulContainerNode) IsServer() bool {
return c.serverMode
}
@ -641,6 +647,7 @@ type parsedConfig struct {
Datacenter string `json:"datacenter"`
Server bool `json:"server"`
Ports parsedPorts `json:"ports"`
Partition string `json:"partition"`
}
type parsedPorts struct {

View File

@ -122,7 +122,7 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
}
clientConnectProxy, err := createAndRegisterStaticClientSidecarWith2Upstreams(dialing,
[]string{"split-static-server", "peer-static-server"},
[]string{"split-static-server", "peer-static-server"}, true,
)
if err != nil {
return nil, nil, nil, fmt.Errorf("error creating client connect proxy in cluster %s", dialing.NetworkName)
@ -236,7 +236,7 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
}
clientConnectProxy, err := createAndRegisterStaticClientSidecarWith2Upstreams(dialing,
[]string{libservice.StaticServerServiceName, "peer-static-server"},
[]string{libservice.StaticServerServiceName, "peer-static-server"}, true,
)
if err != nil {
return nil, nil, nil, fmt.Errorf("error creating client connect proxy in cluster %s", dialing.NetworkName)
@ -385,7 +385,8 @@ func TestPeering_UpgradeToTarget_fromLatest(t *testing.T) {
// createAndRegisterStaticClientSidecarWith2Upstreams creates a static-client that
// has two upstreams connecting to destinationNames: local bind addresses are 5000
// and 5001.
func createAndRegisterStaticClientSidecarWith2Upstreams(c *cluster.Cluster, destinationNames []string) (*libservice.ConnectContainer, error) {
// - crossCluster: true if upstream is in another cluster
func createAndRegisterStaticClientSidecarWith2Upstreams(c *cluster.Cluster, destinationNames []string, crossCluster bool) (*libservice.ConnectContainer, error) {
// Do some trickery to ensure that partial completion is correctly torn
// down, but successful execution is not.
var deferClean utils.ResettableDefer
@ -407,17 +408,11 @@ func createAndRegisterStaticClientSidecarWith2Upstreams(c *cluster.Cluster, dest
DestinationName: destinationNames[0],
LocalBindAddress: "0.0.0.0",
LocalBindPort: cluster.ServiceUpstreamLocalBindPort,
MeshGateway: api.MeshGatewayConfig{
Mode: mgwMode,
},
},
{
DestinationName: destinationNames[1],
LocalBindAddress: "0.0.0.0",
LocalBindPort: cluster.ServiceUpstreamLocalBindPort2,
MeshGateway: api.MeshGatewayConfig{
Mode: mgwMode,
},
},
},
},
@ -425,6 +420,14 @@ func createAndRegisterStaticClientSidecarWith2Upstreams(c *cluster.Cluster, dest
},
}
if crossCluster {
for _, upstream := range req.Connect.SidecarService.Proxy.Upstreams {
upstream.MeshGateway = api.MeshGatewayConfig{
Mode: mgwMode,
}
}
}
if err := node.GetClient().Agent().ServiceRegister(req); err != nil {
return nil, err
}