consul-container: mitigate the drift from ent repo (#17323)

This commit is contained in:
cskh 2023-05-12 13:03:30 -04:00 committed by GitHub
parent b9102c295d
commit 2edfda998a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 114 additions and 108 deletions

View File

@ -57,7 +57,7 @@ func GetLatestImageName() string {
func DockerImage(image, version string) string { func DockerImage(image, version string) string {
v := image + ":" + version v := image + ":" + version
if image == DefaultImageNameENT && isSemVer(version) { if strings.Contains(image, DefaultImageNameENT) && isSemVer(version) {
// Enterprise versions get a suffix. // Enterprise versions get a suffix.
v += ImageVersionSuffixENT v += ImageVersionSuffixENT
} }

View File

@ -14,6 +14,7 @@ import (
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster" libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service" libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils" "github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
) )
@ -21,21 +22,19 @@ import (
func TestBasic(t *testing.T) { func TestBasic(t *testing.T) {
t.Parallel() t.Parallel()
configCtx := libcluster.NewBuildContext(t, libcluster.BuildOptions{
ConsulImageName: utils.GetLatestImageName(),
ConsulVersion: utils.LatestVersion,
})
const numServers = 1 const numServers = 1
buildOpts := &libcluster.BuildOptions{
ConsulImageName: utils.GetLatestImageName(),
ConsulVersion: utils.LatestVersion,
Datacenter: "dc1",
InjectAutoEncryption: true,
}
serverConf := libcluster.NewConfigBuilder(configCtx). cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
Bootstrap(numServers). NumServers: 1,
ToAgentConfig(t) BuildOpts: buildOpts,
t.Logf("Cluster config:\n%s", serverConf.JSON) ApplyDefaultProxySettings: true,
require.Equal(t, utils.LatestVersion, serverConf.Version) // TODO: remove })
cluster, err := libcluster.NewN(t, *serverConf, numServers)
require.NoError(t, err)
client := cluster.APIClient(0) client := cluster.APIClient(0)
@ -53,7 +52,7 @@ func TestBasic(t *testing.T) {
// upgrade the cluster to the Target version // upgrade the cluster to the Target version
t.Logf("initiating standard upgrade to version=%q", utils.TargetVersion) t.Logf("initiating standard upgrade to version=%q", utils.TargetVersion)
err = cluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), utils.TargetVersion) err := cluster.StandardUpgrade(t, context.Background(), utils.GetTargetImageName(), utils.TargetVersion)
require.NoError(t, err) require.NoError(t, err)
libcluster.WaitForLeader(t, cluster, client) libcluster.WaitForLeader(t, cluster, client)

View File

@ -16,6 +16,7 @@ import (
"github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/sdk/testutil/retry"
libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster" libcluster "github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service" libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
"github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils" "github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
) )
@ -50,24 +51,19 @@ func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
) )
run := func(t *testing.T, tc testcase) { run := func(t *testing.T, tc testcase) {
configCtx := libcluster.NewBuildContext(t, libcluster.BuildOptions{ const numServers = 1
ConsulImageName: utils.GetLatestImageName(), buildOpts := &libcluster.BuildOptions{
ConsulVersion: tc.oldVersion, ConsulImageName: utils.GetLatestImageName(),
ConsulVersion: utils.LatestVersion,
Datacenter: "dc1",
InjectAutoEncryption: true,
}
cluster, _, _ := topology.NewCluster(t, &topology.ClusterConfig{
NumServers: numServers,
BuildOpts: buildOpts,
ApplyDefaultProxySettings: true,
}) })
const (
numServers = 1
)
serverConf := libcluster.NewConfigBuilder(configCtx).
Bootstrap(numServers).
ToAgentConfig(t)
t.Logf("Cluster config:\n%s", serverConf.JSON)
require.Equal(t, tc.oldVersion, serverConf.Version) // TODO: remove
cluster, err := libcluster.NewN(t, *serverConf, numServers)
require.NoError(t, err)
client := cluster.APIClient(0) client := cluster.APIClient(0)
libcluster.WaitForLeader(t, cluster, client) libcluster.WaitForLeader(t, cluster, client)
@ -80,7 +76,7 @@ func TestStandardUpgradeToTarget_fromLatest(t *testing.T) {
require.NoError(t, client.Agent().ServiceRegister( require.NoError(t, client.Agent().ServiceRegister(
&api.AgentServiceRegistration{Name: serviceName, Port: 9998}, &api.AgentServiceRegistration{Name: serviceName, Port: 9998},
)) ))
err = goretry.Do( err := goretry.Do(
func() error { func() error {
ch, errCh := libservice.ServiceHealthBlockingQuery(client, serviceName, index) ch, errCh := libservice.ServiceHealthBlockingQuery(client, serviceName, index)
select { select {

View File

@ -85,7 +85,7 @@ func TestMixedServersMajorityTargetGAClient(t *testing.T) {
func testMixedServersGAClient(t *testing.T, majorityIsTarget bool) { func testMixedServersGAClient(t *testing.T, majorityIsTarget bool) {
var ( var (
latestOpts = libcluster.BuildOptions{ latestOpts = libcluster.BuildOptions{
ConsulImageName: utils.LatestImageName, ConsulImageName: utils.GetLatestImageName(),
ConsulVersion: utils.LatestVersion, ConsulVersion: utils.LatestVersion,
} }
targetOpts = libcluster.BuildOptions{ targetOpts = libcluster.BuildOptions{
@ -137,7 +137,7 @@ func testMixedServersGAClient(t *testing.T, majorityIsTarget bool) {
cluster, err := libcluster.New(t, configs) cluster, err := libcluster.New(t, configs)
require.NoError(t, err) require.NoError(t, err)
libservice.ClientsCreate(t, numClients, utils.LatestImageName, utils.LatestVersion, cluster) libservice.ClientsCreate(t, numClients, utils.GetLatestImageName(), utils.LatestVersion, cluster)
client := cluster.APIClient(0) client := cluster.APIClient(0)

View File

@ -0,0 +1,81 @@
package upgrade
import (
"context"
"fmt"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
)
// CreateAndRegisterStaticClientSidecarWith2Upstreams creates a static-client that
// has two upstreams connecting to destinationNames: local bind addresses are 5000
// and 5001.
// - crossCluster: true if upstream is in another cluster
func CreateAndRegisterStaticClientSidecarWith2Upstreams(c *cluster.Cluster, destinationNames []string, crossCluster bool) (*libservice.ConnectContainer, error) {
// Do some trickery to ensure that partial completion is correctly torn
// down, but successful execution is not.
var deferClean utils.ResettableDefer
defer deferClean.Execute()
node := c.Servers()[0]
mgwMode := api.MeshGatewayModeLocal
// Register the static-client service and sidecar first to prevent race with sidecar
// trying to get xDS before it's ready
req := &api.AgentServiceRegistration{
Name: libservice.StaticClientServiceName,
Port: 8080,
Connect: &api.AgentServiceConnect{
SidecarService: &api.AgentServiceRegistration{
Proxy: &api.AgentServiceConnectProxyConfig{
Upstreams: []api.Upstream{
{
DestinationName: destinationNames[0],
LocalBindAddress: "0.0.0.0",
LocalBindPort: cluster.ServiceUpstreamLocalBindPort,
},
{
DestinationName: destinationNames[1],
LocalBindAddress: "0.0.0.0",
LocalBindPort: cluster.ServiceUpstreamLocalBindPort2,
},
},
},
},
},
}
if crossCluster {
for _, upstream := range req.Connect.SidecarService.Proxy.Upstreams {
upstream.MeshGateway = api.MeshGatewayConfig{
Mode: mgwMode,
}
}
}
if err := node.GetClient().Agent().ServiceRegister(req); err != nil {
return nil, err
}
// Create a service and proxy instance
sidecarCfg := libservice.SidecarConfig{
Name: fmt.Sprintf("%s-sidecar", libservice.StaticClientServiceName),
ServiceID: libservice.StaticClientServiceName,
}
clientConnectProxy, err := libservice.NewConnectService(context.Background(), sidecarCfg, []int{cluster.ServiceUpstreamLocalBindPort, cluster.ServiceUpstreamLocalBindPort2}, node)
if err != nil {
return nil, err
}
deferClean.Add(func() {
_ = clientConnectProxy.Terminate()
})
// disable cleanup functions now that we have an object with a Terminate() function
deferClean.Reset()
return clientConnectProxy, nil
}

View File

@ -12,10 +12,10 @@ import (
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert" libassert "github.com/hashicorp/consul/test/integration/consul-container/libs/assert"
"github.com/hashicorp/consul/test/integration/consul-container/libs/cluster"
libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service" libservice "github.com/hashicorp/consul/test/integration/consul-container/libs/service"
libtopology "github.com/hashicorp/consul/test/integration/consul-container/libs/topology" libtopology "github.com/hashicorp/consul/test/integration/consul-container/libs/topology"
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils" "github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
"github.com/hashicorp/consul/test/integration/consul-container/test/upgrade"
) )
func TestPeering_Basic(t *testing.T) { func TestPeering_Basic(t *testing.T) {
@ -101,7 +101,7 @@ func TestPeering_HTTPResolverAndFailover(t *testing.T) {
}, },
})) }))
clientConnectProxy, err := createAndRegisterStaticClientSidecarWith2Upstreams(dialingCluster, clientConnectProxy, err := upgrade.CreateAndRegisterStaticClientSidecarWith2Upstreams(dialingCluster,
[]string{libservice.StaticServerServiceName, "peer-static-server"}, true, []string{libservice.StaticServerServiceName, "peer-static-server"}, true,
) )
require.NoErrorf(t, err, "error creating client connect proxy in cluster %s", dialingCluster.NetworkName) require.NoErrorf(t, err, "error creating client connect proxy in cluster %s", dialingCluster.NetworkName)
@ -194,7 +194,7 @@ func TestPeering_HTTPResolverAndSplitter(t *testing.T) {
}, },
})) }))
clientConnectProxy, err := createAndRegisterStaticClientSidecarWith2Upstreams(dialingCluster, clientConnectProxy, err := upgrade.CreateAndRegisterStaticClientSidecarWith2Upstreams(dialingCluster,
[]string{"split-static-server", "peer-static-server"}, true, []string{"split-static-server", "peer-static-server"}, true,
) )
require.NoErrorf(t, err, "creating client connect proxy in cluster %s", dialingCluster.NetworkName) require.NoErrorf(t, err, "creating client connect proxy in cluster %s", dialingCluster.NetworkName)
@ -332,73 +332,3 @@ func peeringPostUpgradeValidation(t *testing.T, dialing *libtopology.BuiltCluste
libassert.HTTPServiceEchoes(t, "localhost", port, "") libassert.HTTPServiceEchoes(t, "localhost", port, "")
libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), libservice.StaticServerServiceName, "") libassert.AssertFortioName(t, fmt.Sprintf("http://localhost:%d", port), libservice.StaticServerServiceName, "")
} }
// createAndRegisterStaticClientSidecarWith2Upstreams creates a static-client that
// has two upstreams connecting to destinationNames: local bind addresses are 5000
// and 5001.
// - crossCluster: true if upstream is in another cluster
func createAndRegisterStaticClientSidecarWith2Upstreams(c *cluster.Cluster, destinationNames []string, crossCluster bool) (*libservice.ConnectContainer, error) {
// Do some trickery to ensure that partial completion is correctly torn
// down, but successful execution is not.
var deferClean utils.ResettableDefer
defer deferClean.Execute()
node := c.Servers()[0]
mgwMode := api.MeshGatewayModeLocal
// Register the static-client service and sidecar first to prevent race with sidecar
// trying to get xDS before it's ready
req := &api.AgentServiceRegistration{
Name: libservice.StaticClientServiceName,
Port: 8080,
Connect: &api.AgentServiceConnect{
SidecarService: &api.AgentServiceRegistration{
Proxy: &api.AgentServiceConnectProxyConfig{
Upstreams: []api.Upstream{
{
DestinationName: destinationNames[0],
LocalBindAddress: "0.0.0.0",
LocalBindPort: cluster.ServiceUpstreamLocalBindPort,
},
{
DestinationName: destinationNames[1],
LocalBindAddress: "0.0.0.0",
LocalBindPort: cluster.ServiceUpstreamLocalBindPort2,
},
},
},
},
},
}
if crossCluster {
for _, upstream := range req.Connect.SidecarService.Proxy.Upstreams {
upstream.MeshGateway = api.MeshGatewayConfig{
Mode: mgwMode,
}
}
}
if err := node.GetClient().Agent().ServiceRegister(req); err != nil {
return nil, err
}
// Create a service and proxy instance
sidecarCfg := libservice.SidecarConfig{
Name: fmt.Sprintf("%s-sidecar", libservice.StaticClientServiceName),
ServiceID: libservice.StaticClientServiceName,
}
clientConnectProxy, err := libservice.NewConnectService(context.Background(), sidecarCfg, []int{cluster.ServiceUpstreamLocalBindPort, cluster.ServiceUpstreamLocalBindPort2}, node)
if err != nil {
return nil, err
}
deferClean.Add(func() {
_ = clientConnectProxy.Terminate()
})
// disable cleanup functions now that we have an object with a Terminate() function
deferClean.Reset()
return clientConnectProxy, nil
}