2023-03-28 18:39:22 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
2023-08-11 13:12:13 +00:00
|
|
|
// SPDX-License-Identifier: BUSL-1.1
|
2023-03-28 18:39:22 +00:00
|
|
|
|
2016-10-26 02:20:24 +00:00
|
|
|
package consul
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"os"
|
|
|
|
"strings"
|
|
|
|
"testing"
|
2017-09-26 21:07:28 +00:00
|
|
|
"time"
|
2016-10-26 02:20:24 +00:00
|
|
|
|
2021-08-06 22:00:58 +00:00
|
|
|
autopilot "github.com/hashicorp/raft-autopilot"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2022-04-05 21:10:06 +00:00
|
|
|
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
|
|
|
|
|
2017-08-23 14:52:48 +00:00
|
|
|
"github.com/hashicorp/consul/acl"
|
2017-07-06 10:34:00 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2017-04-19 23:00:11 +00:00
|
|
|
"github.com/hashicorp/consul/api"
|
2019-03-27 12:54:56 +00:00
|
|
|
"github.com/hashicorp/consul/sdk/testutil/retry"
|
2019-04-25 16:26:33 +00:00
|
|
|
"github.com/hashicorp/consul/testrpc"
|
2016-10-26 02:20:24 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// verifySnapshot is a helper that does a snapshot and restore.
|
|
|
|
func verifySnapshot(t *testing.T, s *Server, dc, token string) {
|
|
|
|
codec := rpcClient(t, s)
|
|
|
|
defer codec.Close()
|
|
|
|
|
|
|
|
// Set a key to a before value.
|
|
|
|
{
|
|
|
|
args := structs.KVSRequest{
|
|
|
|
Datacenter: dc,
|
2017-04-19 23:00:11 +00:00
|
|
|
Op: api.KVSet,
|
2016-10-26 02:20:24 +00:00
|
|
|
DirEnt: structs.DirEntry{
|
|
|
|
Key: "test",
|
|
|
|
Value: []byte("hello"),
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Token: token,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out bool
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "KVS.Apply", &args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Take a snapshot.
|
|
|
|
args := structs.SnapshotRequest{
|
|
|
|
Datacenter: dc,
|
|
|
|
Token: token,
|
|
|
|
Op: structs.SnapshotSave,
|
|
|
|
}
|
|
|
|
var reply structs.SnapshotResponse
|
2020-05-28 08:18:30 +00:00
|
|
|
snap, err := SnapshotRPC(s.connPool, s.config.Datacenter, s.config.NodeName, s.config.RPCAddr,
|
2016-10-26 02:20:24 +00:00
|
|
|
&args, bytes.NewReader([]byte("")), &reply)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer snap.Close()
|
|
|
|
|
|
|
|
// Read back the before value.
|
|
|
|
{
|
|
|
|
getR := structs.KeyRequest{
|
|
|
|
Datacenter: dc,
|
|
|
|
Key: "test",
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: token,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var dirent structs.IndexedDirEntries
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "KVS.Get", &getR, &dirent); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if len(dirent.Entries) != 1 {
|
|
|
|
t.Fatalf("Bad: %v", dirent)
|
|
|
|
}
|
|
|
|
d := dirent.Entries[0]
|
|
|
|
if string(d.Value) != "hello" {
|
|
|
|
t.Fatalf("bad: %v", d)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set a key to an after value.
|
|
|
|
{
|
|
|
|
args := structs.KVSRequest{
|
|
|
|
Datacenter: dc,
|
2017-04-19 23:00:11 +00:00
|
|
|
Op: api.KVSet,
|
2016-10-26 02:20:24 +00:00
|
|
|
DirEnt: structs.DirEntry{
|
|
|
|
Key: "test",
|
|
|
|
Value: []byte("goodbye"),
|
|
|
|
},
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Token: token,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var out bool
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "KVS.Apply", &args, &out); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-26 21:07:28 +00:00
|
|
|
// Read back the before value. We do this with a retry and stale mode so
|
|
|
|
// we can query the server we are working with, which might not be the
|
|
|
|
// leader.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2016-10-26 02:20:24 +00:00
|
|
|
getR := structs.KeyRequest{
|
|
|
|
Datacenter: dc,
|
|
|
|
Key: "test",
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2017-09-26 21:07:28 +00:00
|
|
|
Token: token,
|
|
|
|
AllowStale: true,
|
2016-10-26 02:20:24 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
var dirent structs.IndexedDirEntries
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "KVS.Get", &getR, &dirent); err != nil {
|
2017-09-26 21:07:28 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2016-10-26 02:20:24 +00:00
|
|
|
}
|
|
|
|
if len(dirent.Entries) != 1 {
|
2017-09-26 21:07:28 +00:00
|
|
|
r.Fatalf("Bad: %v", dirent)
|
2016-10-26 02:20:24 +00:00
|
|
|
}
|
|
|
|
d := dirent.Entries[0]
|
|
|
|
if string(d.Value) != "goodbye" {
|
2017-09-26 21:07:28 +00:00
|
|
|
r.Fatalf("bad: %v", d)
|
2016-10-26 02:20:24 +00:00
|
|
|
}
|
2017-09-26 21:07:28 +00:00
|
|
|
})
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Restore the snapshot.
|
|
|
|
args.Op = structs.SnapshotRestore
|
2020-05-28 08:18:30 +00:00
|
|
|
restore, err := SnapshotRPC(s.connPool, s.config.Datacenter, s.config.NodeName, s.config.RPCAddr,
|
2016-10-26 02:20:24 +00:00
|
|
|
&args, snap, &reply)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer restore.Close()
|
|
|
|
|
2017-09-26 21:07:28 +00:00
|
|
|
// Read back the before value post-snapshot. Similar rationale here; use
|
|
|
|
// stale to query the server we are working with.
|
|
|
|
retry.Run(t, func(r *retry.R) {
|
2016-10-26 02:20:24 +00:00
|
|
|
getR := structs.KeyRequest{
|
|
|
|
Datacenter: dc,
|
|
|
|
Key: "test",
|
|
|
|
QueryOptions: structs.QueryOptions{
|
2017-09-26 21:07:28 +00:00
|
|
|
Token: token,
|
|
|
|
AllowStale: true,
|
2016-10-26 02:20:24 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
var dirent structs.IndexedDirEntries
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "KVS.Get", &getR, &dirent); err != nil {
|
2017-09-26 21:07:28 +00:00
|
|
|
r.Fatalf("err: %v", err)
|
2016-10-26 02:20:24 +00:00
|
|
|
}
|
|
|
|
if len(dirent.Entries) != 1 {
|
2017-09-26 21:07:28 +00:00
|
|
|
r.Fatalf("Bad: %v", dirent)
|
2016-10-26 02:20:24 +00:00
|
|
|
}
|
|
|
|
d := dirent.Entries[0]
|
|
|
|
if string(d.Value) != "hello" {
|
2017-09-26 21:07:28 +00:00
|
|
|
r.Fatalf("bad: %v", d)
|
2016-10-26 02:20:24 +00:00
|
|
|
}
|
2017-09-26 21:07:28 +00:00
|
|
|
})
|
2016-10-26 02:20:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestSnapshot(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2016-10-26 02:20:24 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2016-10-26 02:20:24 +00:00
|
|
|
verifySnapshot(t, s1, "dc1", "")
|
2021-01-27 16:14:52 +00:00
|
|
|
|
|
|
|
// ensure autopilot is still running
|
|
|
|
// https://github.com/hashicorp/consul/issues/9626
|
|
|
|
apstatus, _ := s1.autopilot.IsRunning()
|
|
|
|
require.Equal(t, autopilot.Running, apstatus)
|
2016-10-26 02:20:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestSnapshot_LeaderState(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2016-10-26 02:20:24 +00:00
|
|
|
dir1, s1 := testServer(t)
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
2019-11-25 17:07:04 +00:00
|
|
|
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
|
|
|
// Make a before session.
|
|
|
|
var before string
|
|
|
|
{
|
|
|
|
args := structs.SessionRequest{
|
|
|
|
Datacenter: s1.config.Datacenter,
|
|
|
|
Op: structs.SessionCreate,
|
|
|
|
Session: structs.Session{
|
|
|
|
Node: s1.config.NodeName,
|
|
|
|
TTL: "60s",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &args, &before); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Take a snapshot.
|
|
|
|
args := structs.SnapshotRequest{
|
|
|
|
Datacenter: s1.config.Datacenter,
|
|
|
|
Op: structs.SnapshotSave,
|
|
|
|
}
|
|
|
|
var reply structs.SnapshotResponse
|
2020-05-28 08:18:30 +00:00
|
|
|
snap, err := SnapshotRPC(s1.connPool, s1.config.Datacenter, s1.config.NodeName, s1.config.RPCAddr,
|
2016-10-26 02:20:24 +00:00
|
|
|
&args, bytes.NewReader([]byte("")), &reply)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer snap.Close()
|
|
|
|
|
|
|
|
// Make an after session.
|
|
|
|
var after string
|
|
|
|
{
|
|
|
|
args := structs.SessionRequest{
|
|
|
|
Datacenter: s1.config.Datacenter,
|
|
|
|
Op: structs.SessionCreate,
|
|
|
|
Session: structs.Session{
|
|
|
|
Node: s1.config.NodeName,
|
|
|
|
TTL: "60s",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &args, &after); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure the leader has timers setup.
|
2017-06-27 13:25:25 +00:00
|
|
|
if s1.sessionTimers.Get(before) == nil {
|
2016-10-26 02:20:24 +00:00
|
|
|
t.Fatalf("missing session timer")
|
|
|
|
}
|
2017-06-27 13:25:25 +00:00
|
|
|
if s1.sessionTimers.Get(after) == nil {
|
2016-10-26 02:20:24 +00:00
|
|
|
t.Fatalf("missing session timer")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restore the snapshot.
|
|
|
|
args.Op = structs.SnapshotRestore
|
2020-05-28 08:18:30 +00:00
|
|
|
restore, err := SnapshotRPC(s1.connPool, s1.config.Datacenter, s1.config.NodeName, s1.config.RPCAddr,
|
2016-10-26 02:20:24 +00:00
|
|
|
&args, snap, &reply)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
defer restore.Close()
|
|
|
|
|
|
|
|
// Make sure the before time is still there, and that the after timer
|
|
|
|
// got reverted. This proves we fully cycled the leader state.
|
2017-06-27 13:25:25 +00:00
|
|
|
if s1.sessionTimers.Get(before) == nil {
|
2016-10-26 02:20:24 +00:00
|
|
|
t.Fatalf("missing session timer")
|
|
|
|
}
|
2017-06-27 13:25:25 +00:00
|
|
|
if s1.sessionTimers.Get(after) != nil {
|
2016-10-26 02:20:24 +00:00
|
|
|
t.Fatalf("unexpected session timer")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSnapshot_ACLDeny(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2016-10-26 02:20:24 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
2021-08-06 22:00:58 +00:00
|
|
|
c.PrimaryDatacenter = "dc1"
|
2018-10-19 16:04:07 +00:00
|
|
|
c.ACLsEnabled = true
|
2021-12-07 12:39:28 +00:00
|
|
|
c.ACLInitialManagementToken = "root"
|
2021-08-06 22:39:39 +00:00
|
|
|
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
|
2016-10-26 02:20:24 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
codec := rpcClient(t, s1)
|
|
|
|
defer codec.Close()
|
|
|
|
|
2017-04-19 23:00:11 +00:00
|
|
|
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Take a snapshot.
|
|
|
|
func() {
|
|
|
|
args := structs.SnapshotRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.SnapshotSave,
|
|
|
|
}
|
|
|
|
var reply structs.SnapshotResponse
|
2020-05-28 08:18:30 +00:00
|
|
|
_, err := SnapshotRPC(s1.connPool, s1.config.Datacenter, s1.config.NodeName, s1.config.RPCAddr,
|
2016-10-26 02:20:24 +00:00
|
|
|
&args, bytes.NewReader([]byte("")), &reply)
|
2017-08-23 14:52:48 +00:00
|
|
|
if !acl.IsErrPermissionDenied(err) {
|
2016-10-26 02:20:24 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Restore a snapshot.
|
|
|
|
func() {
|
|
|
|
args := structs.SnapshotRequest{
|
|
|
|
Datacenter: "dc1",
|
|
|
|
Op: structs.SnapshotRestore,
|
|
|
|
}
|
|
|
|
var reply structs.SnapshotResponse
|
2020-05-28 08:18:30 +00:00
|
|
|
_, err := SnapshotRPC(s1.connPool, s1.config.Datacenter, s1.config.NodeName, s1.config.RPCAddr,
|
2016-10-26 02:20:24 +00:00
|
|
|
&args, bytes.NewReader([]byte("")), &reply)
|
2017-08-23 14:52:48 +00:00
|
|
|
if !acl.IsErrPermissionDenied(err) {
|
2016-10-26 02:20:24 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// With the token in place everything should go through.
|
|
|
|
verifySnapshot(t, s1, "dc1", "root")
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSnapshot_Forward_Leader(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2016-10-26 02:20:24 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = true
|
2019-07-12 15:52:26 +00:00
|
|
|
c.SerfWANConfig = nil
|
|
|
|
|
|
|
|
// Effectively disable autopilot
|
|
|
|
// Changes in server config leads flakiness because snapshotting
|
|
|
|
// fails if there are config changes outstanding
|
|
|
|
c.AutopilotInterval = 50 * time.Second
|
2017-09-26 21:07:28 +00:00
|
|
|
|
|
|
|
// Since we are doing multiple restores to the same leader,
|
|
|
|
// the default short time for a reconcile can cause the
|
|
|
|
// reconcile to get aborted by our snapshot restore. By
|
|
|
|
// setting it much longer than the test, we avoid this case.
|
|
|
|
c.ReconcileInterval = 60 * time.Second
|
2016-10-26 02:20:24 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
2019-07-12 15:52:26 +00:00
|
|
|
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = false
|
2019-07-12 15:52:26 +00:00
|
|
|
c.SerfWANConfig = nil
|
|
|
|
c.AutopilotInterval = 50 * time.Second
|
2016-10-26 02:20:24 +00:00
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Try to join.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinLAN(t, s2, s1)
|
2019-07-12 15:52:26 +00:00
|
|
|
testrpc.WaitForLeader(t, s2.RPC, "dc1")
|
2016-10-26 02:20:24 +00:00
|
|
|
|
2017-09-26 21:07:28 +00:00
|
|
|
// Run against the leader and the follower to ensure we forward. When
|
|
|
|
// we changed to Raft protocol version 3, since we only have two servers,
|
|
|
|
// the second one isn't a voter, so the snapshot API doesn't wait for
|
|
|
|
// that to replicate before returning success. We added some logic to
|
|
|
|
// verifySnapshot() to poll the server we are working with in stale mode
|
|
|
|
// in order to verify that the snapshot contents are there. Previously,
|
|
|
|
// with Raft protocol version 2, the snapshot API would wait until the
|
|
|
|
// follower got the information as well since it was required to meet
|
|
|
|
// the quorum (2/2 servers), so things were synchronized properly with
|
|
|
|
// no special logic.
|
|
|
|
verifySnapshot(t, s1, "dc1", "")
|
|
|
|
verifySnapshot(t, s2, "dc1", "")
|
2016-10-26 02:20:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestSnapshot_Forward_Datacenter(t *testing.T) {
|
2020-12-07 18:42:55 +00:00
|
|
|
if testing.Short() {
|
|
|
|
t.Skip("too slow for testing.Short")
|
|
|
|
}
|
|
|
|
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2016-10-26 02:20:24 +00:00
|
|
|
dir1, s1 := testServerDC(t, "dc1")
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerDC(t, "dc2")
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
2018-09-12 13:49:27 +00:00
|
|
|
testrpc.WaitForTestAgent(t, s1.RPC, "dc1")
|
|
|
|
testrpc.WaitForTestAgent(t, s2.RPC, "dc2")
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Try to WAN join.
|
2017-05-05 10:29:49 +00:00
|
|
|
joinWAN(t, s2, s1)
|
2017-05-04 22:52:53 +00:00
|
|
|
retry.Run(t, func(r *retry.R) {
|
2017-04-29 16:34:02 +00:00
|
|
|
if got, want := len(s1.WANMembers()), 2; got < want {
|
|
|
|
r.Fatalf("got %d WAN members want at least %d", got, want)
|
|
|
|
}
|
|
|
|
})
|
2016-10-26 02:20:24 +00:00
|
|
|
|
|
|
|
// Run a snapshot from each server locally and remotely to ensure we
|
|
|
|
// forward.
|
|
|
|
for _, s := range []*Server{s1, s2} {
|
|
|
|
verifySnapshot(t, s, "dc1", "")
|
|
|
|
verifySnapshot(t, s, "dc2", "")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSnapshot_AllowStale(t *testing.T) {
|
2017-06-27 13:22:18 +00:00
|
|
|
t.Parallel()
|
2016-10-26 02:20:24 +00:00
|
|
|
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = false
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir1)
|
|
|
|
defer s1.Shutdown()
|
|
|
|
|
|
|
|
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
|
|
|
c.Bootstrap = false
|
|
|
|
})
|
|
|
|
defer os.RemoveAll(dir2)
|
|
|
|
defer s2.Shutdown()
|
|
|
|
|
|
|
|
// Run against the servers which aren't haven't been set up to establish
|
|
|
|
// a leader and make sure we get a no leader error.
|
|
|
|
for _, s := range []*Server{s1, s2} {
|
|
|
|
// Take a snapshot.
|
|
|
|
args := structs.SnapshotRequest{
|
|
|
|
Datacenter: s.config.Datacenter,
|
|
|
|
Op: structs.SnapshotSave,
|
|
|
|
}
|
|
|
|
var reply structs.SnapshotResponse
|
2020-05-28 08:18:30 +00:00
|
|
|
_, err := SnapshotRPC(s.connPool, s.config.Datacenter, s.config.NodeName, s.config.RPCAddr,
|
2016-10-26 02:20:24 +00:00
|
|
|
&args, bytes.NewReader([]byte("")), &reply)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), structs.ErrNoLeader.Error()) {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run in stale mode and make sure we get an error from Raft (snapshot
|
|
|
|
// was attempted), and not a no leader error.
|
|
|
|
for _, s := range []*Server{s1, s2} {
|
|
|
|
// Take a snapshot.
|
|
|
|
args := structs.SnapshotRequest{
|
|
|
|
Datacenter: s.config.Datacenter,
|
|
|
|
AllowStale: true,
|
|
|
|
Op: structs.SnapshotSave,
|
|
|
|
}
|
|
|
|
var reply structs.SnapshotResponse
|
2020-05-28 08:18:30 +00:00
|
|
|
_, err := SnapshotRPC(s.connPool, s.config.Datacenter, s.config.NodeName, s.config.RPCAddr,
|
2016-10-26 02:20:24 +00:00
|
|
|
&args, bytes.NewReader([]byte("")), &reply)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "Raft error when taking snapshot") {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|