2023-10-30 22:20:23 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: BUSL-1.1
|
|
|
|
|
|
|
|
package connect
|
|
|
|
|
|
|
|
import (
|
|
|
|
"testing"
|
|
|
|
|
2023-11-30 17:41:30 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2023-10-30 22:20:23 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2023-11-30 17:41:30 +00:00
|
|
|
"github.com/hashicorp/consul/test-integ/topoutil"
|
2023-10-30 22:20:23 +00:00
|
|
|
"github.com/hashicorp/consul/test/integration/consul-container/libs/utils"
|
|
|
|
"github.com/hashicorp/consul/testing/deployer/sprawl/sprawltest"
|
|
|
|
"github.com/hashicorp/consul/testing/deployer/topology"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Test_Snapshot_Restore_Agentless verifies consul agent can continue
|
2023-11-30 17:41:30 +00:00
|
|
|
// to push envoy config after restoring from a snapshot.
|
2023-10-30 22:20:23 +00:00
|
|
|
//
|
|
|
|
// - This test is to detect server agent frozen after restoring from a snapshot
|
|
|
|
// (https://github.com/hashicorp/consul/pull/18636)
|
|
|
|
//
|
|
|
|
// - This bug only appeared in agentless mode
|
|
|
|
//
|
|
|
|
// Steps:
|
|
|
|
// 1. The test spins up a one-server cluster with static-server and static-client.
|
|
|
|
// 2. A snapshot is taken and the cluster is restored from the snapshot
|
|
|
|
// 3. A new static-server replaces the old one
|
2024-05-21 19:52:19 +00:00
|
|
|
// 4. At the end, we assert the static-client's upstream is updated with the
|
2023-10-30 22:20:23 +00:00
|
|
|
// new static-server
|
|
|
|
func Test_Snapshot_Restore_Agentless(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2023-11-10 19:22:06 +00:00
|
|
|
staticServerSID := topology.NewID("static-server", "default", "default")
|
|
|
|
staticClientSID := topology.NewID("static-client", "default", "default")
|
2023-10-30 22:20:23 +00:00
|
|
|
|
|
|
|
clu := &topology.Config{
|
|
|
|
Images: utils.TargetImages(),
|
|
|
|
Networks: []*topology.Network{
|
|
|
|
{Name: "dc1"},
|
|
|
|
},
|
|
|
|
Clusters: []*topology.Cluster{
|
|
|
|
{
|
|
|
|
Name: "dc1",
|
|
|
|
Nodes: []*topology.Node{
|
|
|
|
{
|
|
|
|
Kind: topology.NodeKindServer,
|
|
|
|
// NOTE: uncomment the following lines to trigger the agent frozen bug
|
|
|
|
// Images: topology.Images{
|
|
|
|
// ConsulEnterprise: "hashicorp/consul-enterprise:1.16.1-ent",
|
|
|
|
// },
|
|
|
|
Name: "dc1-server1",
|
|
|
|
Addresses: []*topology.Address{
|
|
|
|
{Network: "dc1"},
|
|
|
|
},
|
|
|
|
},
|
2023-11-16 00:32:37 +00:00
|
|
|
// Static-server
|
2023-10-30 22:20:23 +00:00
|
|
|
{
|
|
|
|
Kind: topology.NodeKindDataplane,
|
|
|
|
Name: "dc1-client1",
|
2023-11-10 19:22:06 +00:00
|
|
|
Workloads: []*topology.Workload{
|
2023-10-30 22:20:23 +00:00
|
|
|
{
|
|
|
|
ID: staticServerSID,
|
|
|
|
Image: "docker.mirror.hashicorp.services/fortio/fortio",
|
|
|
|
Port: 8080,
|
|
|
|
EnvoyAdminPort: 19000,
|
|
|
|
CheckTCP: "127.0.0.1:8080",
|
|
|
|
Command: []string{
|
|
|
|
"server",
|
|
|
|
"-http-port", "8080",
|
|
|
|
"-redirect-port", "-disabled",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Kind: topology.NodeKindDataplane,
|
|
|
|
Name: "dc1-client2",
|
2023-11-10 19:22:06 +00:00
|
|
|
Workloads: []*topology.Workload{
|
2023-10-30 22:20:23 +00:00
|
|
|
{
|
|
|
|
ID: staticClientSID,
|
|
|
|
Image: "docker.mirror.hashicorp.services/fortio/fortio",
|
|
|
|
Port: 8080,
|
|
|
|
EnvoyAdminPort: 19000,
|
|
|
|
CheckTCP: "127.0.0.1:8080",
|
|
|
|
Command: []string{
|
|
|
|
"server",
|
|
|
|
"-http-port", "8080",
|
|
|
|
"-redirect-port", "-disabled",
|
|
|
|
},
|
2024-05-21 19:52:19 +00:00
|
|
|
Upstreams: []*topology.Upstream{
|
2023-10-30 22:20:23 +00:00
|
|
|
{
|
|
|
|
ID: staticServerSID,
|
|
|
|
LocalPort: 5000,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
// Client3 for second static-server
|
|
|
|
{
|
|
|
|
Kind: topology.NodeKindDataplane,
|
|
|
|
Name: "dc1-client3",
|
|
|
|
Disabled: true,
|
2023-11-10 19:22:06 +00:00
|
|
|
Workloads: []*topology.Workload{
|
2023-10-30 22:20:23 +00:00
|
|
|
{
|
|
|
|
ID: staticServerSID,
|
|
|
|
Image: "docker.mirror.hashicorp.services/fortio/fortio",
|
|
|
|
Port: 8080,
|
|
|
|
EnvoyAdminPort: 19000,
|
|
|
|
CheckTCP: "127.0.0.1:8080",
|
|
|
|
Command: []string{
|
|
|
|
"server",
|
|
|
|
"-http-port", "8080",
|
|
|
|
"-redirect-port", "-disabled",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Enterprise: utils.IsEnterprise(),
|
|
|
|
InitialConfigEntries: []api.ConfigEntry{
|
|
|
|
&api.ProxyConfigEntry{
|
|
|
|
Kind: api.ProxyDefaults,
|
|
|
|
Name: "global",
|
|
|
|
Config: map[string]any{
|
|
|
|
"protocol": "http",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
&api.ServiceConfigEntry{
|
|
|
|
Kind: api.ServiceDefaults,
|
|
|
|
Name: "static-server",
|
|
|
|
},
|
|
|
|
&api.ServiceIntentionsConfigEntry{
|
|
|
|
Kind: api.ServiceIntentions,
|
|
|
|
Name: "static-server",
|
|
|
|
Sources: []*api.SourceIntention{
|
|
|
|
{
|
|
|
|
Name: "static-client",
|
|
|
|
Action: api.IntentionActionAllow,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
sp := sprawltest.Launch(t, clu)
|
2023-11-09 23:26:16 +00:00
|
|
|
asserter := topoutil.NewAsserter(sp)
|
2023-10-30 22:20:23 +00:00
|
|
|
|
2023-11-10 19:22:06 +00:00
|
|
|
staticClient := sp.Topology().Clusters["dc1"].WorkloadByID(
|
2023-10-30 22:20:23 +00:00
|
|
|
topology.NewNodeID("dc1-client2", "default"),
|
|
|
|
staticClientSID,
|
|
|
|
)
|
2024-05-21 19:52:19 +00:00
|
|
|
asserter.FortioFetch2HeaderEcho(t, staticClient, &topology.Upstream{
|
2023-11-09 23:26:16 +00:00
|
|
|
ID: staticServerSID,
|
2024-09-13 16:30:25 +00:00
|
|
|
LocalPort: 5000,
|
2023-11-09 23:26:16 +00:00
|
|
|
})
|
2023-11-10 19:22:06 +00:00
|
|
|
staticServer := sp.Topology().Clusters["dc1"].WorkloadByID(
|
2023-11-09 23:26:16 +00:00
|
|
|
topology.NewNodeID("dc1-client1", "default"),
|
|
|
|
staticServerSID,
|
2023-10-30 22:20:23 +00:00
|
|
|
)
|
2023-11-09 23:26:16 +00:00
|
|
|
asserter.HTTPStatus(t, staticServer, staticServer.Port, 200)
|
2023-10-30 22:20:23 +00:00
|
|
|
|
|
|
|
t.Log("Take a snapshot of the cluster and restore ...")
|
2023-11-30 17:41:30 +00:00
|
|
|
err := sp.SnapshotSaveAndRestore("dc1")
|
2023-10-30 22:20:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Shutdown existing static-server
|
|
|
|
cfg := sp.Config()
|
|
|
|
cluster := cfg.Cluster("dc1")
|
|
|
|
cluster.Nodes[1].Disabled = true // client 1 -- static-server
|
|
|
|
require.NoError(t, sp.Relaunch(cfg))
|
2023-11-09 23:26:16 +00:00
|
|
|
// verify static-server is down
|
|
|
|
asserter.HTTPStatus(t, staticServer, staticServer.Port, 504)
|
2023-10-30 22:20:23 +00:00
|
|
|
|
|
|
|
// Add a new static-server
|
|
|
|
cfg = sp.Config()
|
|
|
|
cluster = cfg.Cluster("dc1")
|
2023-11-07 22:29:13 +00:00
|
|
|
cluster.Nodes[3].Disabled = false // client 3 -- new static-server
|
2023-10-30 22:20:23 +00:00
|
|
|
require.NoError(t, sp.Relaunch(cfg))
|
|
|
|
|
2023-11-09 23:26:16 +00:00
|
|
|
// Ensure the static-client connected to the new static-server
|
2024-05-21 19:52:19 +00:00
|
|
|
asserter.FortioFetch2HeaderEcho(t, staticClient, &topology.Upstream{
|
2023-11-16 00:32:37 +00:00
|
|
|
ID: staticServerSID,
|
2024-09-13 16:30:25 +00:00
|
|
|
LocalPort: 5000,
|
2023-11-16 00:32:37 +00:00
|
|
|
})
|
2023-10-30 22:20:23 +00:00
|
|
|
}
|