upgrade test(LTS): add segments to version 1.10 (#19861)

This commit is contained in:
cskh 2023-12-08 12:22:16 -05:00 committed by GitHub
parent d4fda945bb
commit 0ca070b301
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 50 additions and 73 deletions

View File

@ -1,70 +0,0 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package usage_profiles
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/testing/deployer/sprawl"
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
"github.com/hashicorp/consul/testing/deployer/topology"
)
const (
// The long term support version
ltsVersion = "1.15.7"
)
// Test_Upgrade_ServiceDiscovery_Wan_Segment test upgrade from a source version
// to a specified long term support version
// Clusters: multi-segment and multi-cluster (TODO)
// Workload: service discovery (no mesh) (TODO)
func Test_Upgrade_ServiceDiscovery_Wan_Segment(t *testing.T) {
utils.LatestVersion = "1.10.8"
utils.TargetVersion = ltsVersion
dc1, err := createTopology("dc1")
require.NoError(t, err)
t.Log("Created topology:", dc1.Name, "enterprise:", utils.IsEnterprise())
toplogyConfig := &topology.Config{
Networks: []*topology.Network{
{Name: "dc1"},
},
}
toplogyConfig.Clusters = append(toplogyConfig.Clusters, dc1)
sp := sprawltest.Launch(t, toplogyConfig)
cfg := sp.Config()
require.NoError(t, sp.Upgrade(cfg, "dc1", sprawl.UpgradeTypeStandard, utils.TargetImages(), nil))
t.Log("Finished standard upgrade ...")
time.Sleep(30 * time.Second)
}
func createTopology(name string) (*topology.Cluster, error) {
clu := &topology.Cluster{
Name: name,
Images: utils.LatestImages(),
Nodes: []*topology.Node{
{
Kind: topology.NodeKindServer,
Name: "dc1-server1",
Addresses: []*topology.Address{
{Network: "dc1"},
},
},
{
Kind: topology.NodeKindClient,
Name: "dc1-client1",
},
},
Enterprise: utils.IsEnterprise(),
}
return clu, nil
}

View File

@ -170,6 +170,8 @@ agent_prefix "" {
node_prefix "" {
policy = "write"
}
operator = "read"
`
policy, _, err := acl.PolicyCreate(
&api.ACLPolicy{

View File

@ -316,7 +316,7 @@ func (s *Sprawl) createFirstTime() error {
}
for _, cluster := range s.topology.Clusters {
if err := s.waitForClientAntiEntropyOnce(cluster); err != nil {
return fmt.Errorf("waitForClientAntiEntropyOnce[%s]: %w", cluster.Name, err)
return fmt.Errorf("create first time - waitForClientAntiEntropyOnce[%s]: %w", cluster.Name, err)
}
}
@ -447,7 +447,7 @@ func (s *Sprawl) postRegenTasks(firstTime bool) error {
for _, cluster := range s.topology.Clusters {
if err := s.waitForClientAntiEntropyOnce(cluster); err != nil {
return fmt.Errorf("waitForClientAntiEntropyOnce[%s]: %w", cluster.Name, err)
return fmt.Errorf("post regenerate waitForClientAntiEntropyOnce[%s]: %w", cluster.Name, err)
}
}

View File

@ -10,6 +10,10 @@ import (
"strconv"
"strings"
"text/tabwriter"
"time"
retry "github.com/avast/retry-go"
"github.com/hashicorp/consul/api"
)
// PrintDetails will dump relevant addressing and naming data to the logger for
@ -22,7 +26,19 @@ func (s *Sprawl) PrintDetails() error {
for _, cluster := range s.topology.Clusters {
client := s.clients[cluster.Name]
cfg, err := client.Operator().RaftGetConfiguration(nil)
var cfg *api.RaftConfiguration
var err error
err = retry.Do(
func() error {
cfg, err = client.Operator().RaftGetConfiguration(nil)
if err != nil {
return fmt.Errorf("error get raft config: %w", err)
}
return nil
},
retry.MaxDelay(5*time.Second),
retry.Attempts(15),
)
if err != nil {
return fmt.Errorf("could not get raft config for cluster %q: %w", cluster.Name, err)
}

View File

@ -60,6 +60,13 @@ func (g *Generator) generateAgentHCL(node *topology.Node, enableV2, enableV2Tena
b.add("retry_interval", "1s")
// }
if node.Segment != nil {
b.add("segment", node.Segment.Name)
b.addSlice("retry_join", []string{
fmt.Sprintf("server.%s-consulcluster.lan:%d", node.Cluster, node.Segment.Port),
})
}
if node.Images.GreaterThanVersion(topology.MinVersionPeering) {
if node.IsServer() {
b.addBlock("peering", func() {
@ -209,6 +216,17 @@ func (g *Generator) generateAgentHCL(node *topology.Node, enableV2, enableV2Tena
}
})
}
if cluster.Segments != nil {
b.format("segments = [")
for name, port := range cluster.Segments {
b.format("{")
b.add("name", name)
b.add("port", port)
b.format("},")
}
b.format("]")
}
} else {
if cluster.Enterprise && node.Images.GreaterThanVersion(topology.MinVersionAgentTokenPartition) {
b.add("partition", node.Partition)

View File

@ -290,6 +290,9 @@ type Cluster struct {
// EnableV2Tenancy activates V2 tenancy on the servers. If not enabled,
// V2 resources are bridged to V1 tenancy counterparts.
EnableV2Tenancy bool `json:",omitempty"`
// Segments is a map of network segment name and the ports
Segments map[string]int
}
func (c *Cluster) inheritFromExisting(existing *Cluster) {
@ -485,6 +488,11 @@ const (
NodeVersionV2 NodeVersion = "v2"
)
type NetworkSegment struct {
Name string
Port int
}
// TODO: rename pod
type Node struct {
Kind NodeKind
@ -530,6 +538,9 @@ type Node struct {
// AutopilotConfig of the server agent
AutopilotConfig map[string]string
// Network segment of the agent - applicable to client agent only
Segment *NetworkSegment
}
func (n *Node) DockerName() string {