chore: fix ce/ent drift in sdk and testing/deployer submodules (#19041)

This commit is contained in:
R.B. Boyer 2023-10-03 10:06:50 -05:00 committed by GitHub
parent 6c92dd1359
commit df930a59ac
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 117 additions and 63 deletions

View File

@ -10,6 +10,7 @@ import (
)
var noCleanup = strings.ToLower(os.Getenv("TEST_NOCLEANUP")) == "true"
var saveSnapshot = strings.ToLower(os.Getenv("TEST_SAVE_SNAPSHOT")) == "true"
// TempDir creates a temporary directory within tmpdir with the name 'testname-name'.
// If the directory cannot be created t.Fatal is called.

View File

@ -177,9 +177,16 @@ type ServerConfigCallback func(c *TestServerConfig)
// defaultServerConfig returns a new TestServerConfig struct
// with all of the listen ports incremented by one.
func defaultServerConfig(t TestingTB, consulVersion *version.Version) *TestServerConfig {
nodeID, err := uuid.GenerateUUID()
if err != nil {
panic(err)
var nodeID string
var err error
if id, ok := os.LookupEnv("TEST_NODE_ID"); ok {
nodeID = id
} else {
nodeID, err = uuid.GenerateUUID()
if err != nil {
panic(err)
}
}
ports := freeport.GetN(t, 7)
@ -287,14 +294,29 @@ func NewTestServerConfigT(t TestingTB, cb ServerConfigCallback) (*TestServer, er
"consul or skip this test")
}
prefix := "consul"
if t != nil {
// Use test name for tmpdir if available
prefix = strings.Replace(t.Name(), "/", "_", -1)
}
tmpdir, err := os.MkdirTemp("", prefix)
if err != nil {
return nil, errors.Wrap(err, "failed to create tempdir")
var tmpdir string
if dir, ok := os.LookupEnv("TEST_TMP_DIR"); ok {
// NOTE(CTIA): using TEST_TMP_DIR may cause conflict when NewTestServerConfigT
// is called > 1 since two agent will uses the same directory
tmpdir = dir
if _, err := os.Stat(tmpdir); os.IsNotExist(err) {
if err = os.Mkdir(tmpdir, 0750); err != nil {
return nil, errors.Wrap(err, "failed to create tempdir from env TEST_TMP_DIR")
}
} else {
t.Logf("WARNING: using tempdir that already exists %s", tmpdir)
}
} else {
prefix := "consul"
if t != nil {
// Use test name for tmpdir if available
prefix = strings.Replace(t.Name(), "/", "_", -1)
}
tmpdir, err = os.MkdirTemp("", prefix)
if err != nil {
return nil, errors.Wrap(err, "failed to create tempdir")
}
}
consulVersion, err := findConsulVersion()
@ -302,8 +324,12 @@ func NewTestServerConfigT(t TestingTB, cb ServerConfigCallback) (*TestServer, er
return nil, err
}
datadir := filepath.Join(tmpdir, "data")
if _, err := os.Stat(datadir); !os.IsNotExist(err) {
t.Logf("WARNING: using a data that already exists %s", datadir)
}
cfg := defaultServerConfig(t, consulVersion)
cfg.DataDir = filepath.Join(tmpdir, "data")
cfg.DataDir = datadir
if cb != nil {
cb(cfg)
}
@ -324,6 +350,7 @@ func NewTestServerConfigT(t TestingTB, cb ServerConfigCallback) (*TestServer, er
// Start the server
args := []string{"agent", "-config-file", configFile}
args = append(args, cfg.Args...)
t.Logf("test cmd args: consul args: %s", args)
cmd := exec.Command("consul", args...)
cmd.Stdout = cfg.Stdout
cmd.Stderr = cfg.Stderr
@ -388,6 +415,21 @@ func (s *TestServer) Stop() error {
}
if s.cmd.Process != nil {
if saveSnapshot {
fmt.Println("Saving snapshot")
// create a snapshot prior to upgrade test
args := []string{"snapshot", "save", "-http-addr",
fmt.Sprintf("http://%s", s.HTTPAddr), filepath.Join(s.tmpdir, "backup.snap")}
fmt.Printf("Saving snapshot: consul args: %s\n", args)
cmd := exec.Command("consul", args...)
cmd.Stdout = s.Config.Stdout
cmd.Stderr = s.Config.Stderr
if err := cmd.Run(); err != nil {
return errors.Wrap(err, "failed to save a snapshot")
}
}
if runtime.GOOS == "windows" {
if err := s.cmd.Process.Kill(); err != nil {
return errors.Wrap(err, "failed to kill consul server")

View File

@ -17,14 +17,14 @@ import (
// be used by tests to set the log level of a hclog.Logger. Defaults to
// hclog.Warn if the environment variable is unset, or if the value of the
// environment variable can not be matched to a log level.
var TestLogLevel = testLogLevel()
var TestLogLevel = TestLogLevelWithDefault(hclog.Warn)
func testLogLevel() hclog.Level {
func TestLogLevelWithDefault(l hclog.Level) hclog.Level {
level := hclog.LevelFromString(os.Getenv("TEST_LOG_LEVEL"))
if level != hclog.NoLevel {
return level
}
return hclog.Warn
return l
}
func Logger(t TestingTB) hclog.InterceptLogger {

View File

@ -43,3 +43,5 @@ require (
golang.org/x/text v0.11.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
replace github.com/hashicorp/consul/sdk => ../../sdk

View File

@ -48,8 +48,6 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/hashicorp/consul/api v1.24.0 h1:u2XyStA2j0jnCiVUU7Qyrt8idjRn4ORhK6DlvZ3bWhA=
github.com/hashicorp/consul/api v1.24.0/go.mod h1:NZJGRFYruc/80wYowkPFCp1LbGmJC9L8izrwfyVx/Wg=
github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs=
github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=

View File

@ -56,7 +56,7 @@ func (s *Sprawl) bootstrapACLs(cluster string) error {
return fmt.Errorf("management token no longer works: %w", err)
}
logger.Info("current management token", "token", mgmtToken)
logger.Debug("current management token", "token", mgmtToken)
return nil
}
@ -65,7 +65,7 @@ TRYAGAIN2:
tok, _, err := ac.Bootstrap()
if err != nil {
if isACLNotBootstrapped(err) {
logger.Warn("system is rebooting", "error", err)
logger.Debug("system is rebooting", "error", err)
time.Sleep(250 * time.Millisecond)
goto TRYAGAIN2
}
@ -74,7 +74,7 @@ TRYAGAIN2:
mgmtToken = tok.SecretID
s.secrets.SaveGeneric(cluster, secrets.BootstrapToken, mgmtToken)
logger.Info("current management token", "token", mgmtToken)
logger.Debug("current management token", "token", mgmtToken)
return nil
@ -120,7 +120,7 @@ func (s *Sprawl) createAnonymousToken(cluster *topology.Cluster) error {
return err
}
logger.Info("created anonymous token",
logger.Debug("created anonymous token",
"token", token.SecretID,
)
@ -138,7 +138,7 @@ func (s *Sprawl) createAnonymousPolicy(cluster *topology.Cluster) error {
return err
}
logger.Info("created anonymous policy",
logger.Debug("created anonymous policy",
"policy-name", op.Name,
"policy-id", op.ID,
)
@ -164,7 +164,7 @@ func (s *Sprawl) createAgentTokens(cluster *topology.Cluster) error {
return err
}
logger.Info("created agent token",
logger.Debug("created agent token",
"node", node.ID(),
"token", token.SecretID,
)
@ -193,7 +193,7 @@ func (s *Sprawl) createCrossNamespaceCatalogReadPolicies(cluster *topology.Clust
return err
}
logger.Info("created cross-ns-catalog-read policy",
logger.Debug("created cross-ns-catalog-read policy",
"policy-name", op.Name,
"policy-id", op.ID,
"partition", partition,
@ -244,7 +244,7 @@ func (s *Sprawl) createServiceTokens(cluster *topology.Cluster) error {
return fmt.Errorf("could not create token: %w", err)
}
logger.Info("created service token",
logger.Debug("created service token",
"service", svc.ID.Name,
"namespace", svc.ID.Namespace,
"partition", svc.ID.Partition,

View File

@ -177,7 +177,6 @@ func policyForMeshGateway(svc *topology.Service, enterprise bool) *api.ACLPolicy
Description: policyName,
}
if enterprise {
fmt.Printf("Enterprise mgw ACLS - Partition: %s, Namespace: default", svc.ID.Partition)
policy.Partition = svc.ID.Partition
policy.Namespace = "default"
}

View File

@ -427,12 +427,12 @@ func (s *Sprawl) waitForLocalWrites(cluster *topology.Cluster, token string) {
start := time.Now()
for attempts := 0; ; attempts++ {
if err := tryKV(); err != nil {
logger.Warn("local kv write failed; something is not ready yet", "error", err)
logger.Debug("local kv write failed; something is not ready yet", "error", err)
time.Sleep(500 * time.Millisecond)
continue
} else {
dur := time.Since(start)
logger.Info("local kv write success", "elapsed", dur, "retries", attempts)
logger.Debug("local kv write success", "elapsed", dur, "retries", attempts)
}
break
@ -442,12 +442,12 @@ func (s *Sprawl) waitForLocalWrites(cluster *topology.Cluster, token string) {
start = time.Now()
for attempts := 0; ; attempts++ {
if err := tryAP(); err != nil {
logger.Warn("local partition write failed; something is not ready yet", "error", err)
logger.Debug("local partition write failed; something is not ready yet", "error", err)
time.Sleep(500 * time.Millisecond)
continue
} else {
dur := time.Since(start)
logger.Info("local partition write success", "elapsed", dur, "retries", attempts)
logger.Debug("local partition write success", "elapsed", dur, "retries", attempts)
}
break
@ -501,10 +501,10 @@ func (s *Sprawl) waitForClientAntiEntropyOnce(cluster *topology.Cluster) error {
if len(stragglers) == 0 {
dur := time.Since(start)
logger.Info("all nodes have posted node updates, so first anti-entropy has happened", "elapsed", dur)
logger.Debug("all nodes have posted node updates, so first anti-entropy has happened", "elapsed", dur)
return nil
}
logger.Info("not all client nodes have posted node updates yet", "nodes", stragglers)
logger.Debug("not all client nodes have posted node updates yet", "nodes", stragglers)
time.Sleep(1 * time.Second)
}

View File

@ -153,7 +153,7 @@ RETRY:
return fmt.Errorf("failed to register service %q to node %q: %w", svc.ID, node.ID(), err)
}
logger.Info("registered service to client agent",
logger.Debug("registered service to client agent",
"service", svc.ID.Name,
"node", node.Name,
"namespace", svc.ID.Namespace,
@ -226,7 +226,7 @@ RETRY:
return fmt.Errorf("error registering virtual node %s: %w", node.ID(), err)
}
logger.Info("virtual node created",
logger.Debug("virtual node created",
"node", node.ID(),
)
@ -258,7 +258,7 @@ RETRY:
return fmt.Errorf("error registering service %s to node %s: %w", svc.ID, node.ID(), err)
}
logger.Info("dataplane service created",
logger.Debug("dataplane service created",
"service", svc.ID,
"node", node.ID(),
)
@ -293,7 +293,7 @@ RETRY:
return fmt.Errorf("error registering service %s to node %s: %w", svc.ID, node.ID(), err)
}
logger.Info("dataplane sidecar service created",
logger.Debug("dataplane sidecar service created",
"service", pid,
"node", node.ID(),
)

View File

@ -47,7 +47,7 @@ func (s *Sprawl) populateInitialConfigEntries(cluster *topology.Cluster) error {
err,
)
}
logger.Info("wrote initial config entry",
logger.Debug("wrote initial config entry",
"kind", ce.GetKind(),
"name", ce.GetName(),
"namespace", ce.GetNamespace(),

View File

@ -31,13 +31,14 @@ func (s *Sprawl) waitForLeader(cluster *topology.Cluster) {
client = s.clients[cluster.Name]
logger = s.logger.With("cluster", cluster.Name)
)
logger.Info("waiting for cluster to elect leader")
for {
leader, err := client.Status().Leader()
if leader != "" && err == nil {
logger.Info("cluster has leader", "leader_addr", leader)
return
}
logger.Info("cluster has no leader yet", "error", err)
logger.Debug("cluster has no leader yet", "error", err)
time.Sleep(500 * time.Millisecond)
}
}

View File

@ -145,7 +145,7 @@ func (s *Sprawl) PrintDetails() error {
w.Flush()
s.logger.Info("CURRENT SPRAWL DETAILS", "details", buf.String())
s.logger.Debug("CURRENT SPRAWL DETAILS", "details", buf.String())
return nil
}

View File

@ -74,7 +74,7 @@ func (s *Sprawl) initTenancies(cluster *topology.Cluster) error {
if err != nil {
return fmt.Errorf("error creating partition %q: %w", ap.Name, err)
}
logger.Info("created partition", "partition", ap.Name)
logger.Debug("created partition", "partition", ap.Name)
}
partitionNameList = append(partitionNameList, ap.Name)
@ -105,13 +105,13 @@ func (s *Sprawl) initTenancies(cluster *topology.Cluster) error {
if err != nil {
return err
}
logger.Info("updated namespace", "namespace", ns, "partition", ap.Name)
logger.Debug("updated namespace", "namespace", ns, "partition", ap.Name)
} else {
_, _, err := nsClient.Create(obj, nil)
if err != nil {
return err
}
logger.Info("created namespace", "namespace", ns, "partition", ap.Name)
logger.Debug("created namespace", "namespace", ns, "partition", ap.Name)
}
}
}

View File

@ -40,7 +40,7 @@ func DockerImages(
run *runner.Runner,
t *topology.Topology,
) error {
logw := logger.Named("docker").StandardWriter(&hclog.StandardLoggerOptions{ForceLevel: hclog.Info})
logw := logger.Named("docker").StandardWriter(&hclog.StandardLoggerOptions{ForceLevel: hclog.Debug})
built := make(map[string]struct{})
for _, c := range t.Clusters {

View File

@ -22,7 +22,7 @@ func (g *Generator) digestOutputs(out *Outputs) error {
}
if node.DigestExposedPorts(nodeOut.Ports) {
g.logger.Info("discovered exposed port mappings",
g.logger.Debug("discovered exposed port mappings",
"cluster", clusterName,
"node", nid.String(),
"ports", nodeOut.Ports,
@ -37,7 +37,7 @@ func (g *Generator) digestOutputs(out *Outputs) error {
return err
}
if changed {
g.logger.Info("discovered exposed forward proxy port",
g.logger.Debug("discovered exposed forward proxy port",
"network", netName,
"port", proxyPort,
)

View File

@ -69,9 +69,7 @@ func NewGenerator(
workdir: workdir,
license: license,
tfLogger: logger.Named("terraform").StandardWriter(&hclog.StandardLoggerOptions{
ForceLevel: hclog.Info,
}),
tfLogger: logger.Named("terraform").StandardWriter(&hclog.StandardLoggerOptions{ForceLevel: hclog.Debug}),
}
g.SetTopology(topo)
@ -166,7 +164,7 @@ func (g *Generator) Generate(step Step) error {
imageNames[image] = name
g.logger.Info("registering image", "resource", name, "image", image)
g.logger.Debug("registering image", "resource", name, "image", image)
images = append(images, DockerImage(name, image))
}

View File

@ -57,14 +57,14 @@ func UpdateFileIfDifferent(
if !os.IsNotExist(err) {
return result, err
}
logger.Info("writing new file", "path", path)
logger.Debug("writing new file", "path", path)
result = UpdateResultCreated
} else {
// loaded
if bytes.Equal(body, prev) {
return result, nil
}
logger.Info("file has changed", "path", path)
logger.Debug("file has changed", "path", path)
result = UpdateResultModified
}

View File

@ -73,7 +73,7 @@ func (s *Sprawl) initPeerings() error {
}
peeringToken := resp.PeeringToken
logger.Info("generated peering token", "peering", peering.String())
logger.Debug("generated peering token", "peering", peering.String())
req2 := api.PeeringEstablishRequest{
PeerName: peering.Dialing.PeerName,
@ -83,7 +83,7 @@ func (s *Sprawl) initPeerings() error {
req2.Partition = peering.Dialing.Partition
}
logger.Info("establishing peering with token", "peering", peering.String())
logger.Info("registering peering with token", "peering", peering.String())
ESTABLISH:
_, _, err = dialingClient.Peerings().Establish(context.Background(), req2, nil)
if err != nil {
@ -101,7 +101,7 @@ func (s *Sprawl) initPeerings() error {
return fmt.Errorf("error establishing peering with token for %q: %#v", peering.String(), err)
}
logger.Info("peering established", "peering", peering.String())
logger.Info("peering registered", "peering", peering.String())
}
return nil
@ -111,6 +111,8 @@ func (s *Sprawl) waitForPeeringEstablishment() error {
var (
logger = s.logger.Named("peering")
)
logger.Info("awaiting peering establishment")
startTimeTotal := time.Now()
for _, peering := range s.topology.Peerings {
dialingCluster, ok := s.topology.Clusters[peering.Dialing.Name]
@ -139,6 +141,7 @@ func (s *Sprawl) waitForPeeringEstablishment() error {
s.checkPeeringDirection(dialingLogger, dialingClient, peering.Dialing, dialingCluster.Enterprise)
s.checkPeeringDirection(acceptingLogger, acceptingClient, peering.Accepting, acceptingCluster.Enterprise)
}
logger.Info("peering established", "dur", time.Since(startTimeTotal).Round(time.Second))
return nil
}
@ -146,8 +149,11 @@ func (s *Sprawl) checkPeeringDirection(logger hclog.Logger, client *api.Client,
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
startTime := time.Now()
for {
opts := &api.QueryOptions{}
logger2 := logger.With("dur", time.Since(startTime).Round(time.Second))
if enterprise {
opts.Partition = pc.Partition
}
@ -157,21 +163,21 @@ func (s *Sprawl) checkPeeringDirection(logger hclog.Logger, client *api.Client,
continue
}
if err != nil {
logger.Info("error looking up peering", "error", err)
logger2.Debug("error looking up peering", "error", err)
time.Sleep(100 * time.Millisecond)
continue
}
if res == nil {
logger.Info("peering not found")
logger2.Debug("peering not found")
time.Sleep(100 * time.Millisecond)
continue
}
if res.State == api.PeeringStateActive {
logger.Info("peering is active")
return
break
}
logger.Info("peering not active yet", "state", res.State)
logger2.Debug("peering not active yet", "state", res.State)
time.Sleep(500 * time.Millisecond)
}
logger.Debug("peering is active", "dur", time.Since(startTime).Round(time.Second))
}

View File

@ -188,7 +188,7 @@ func Launch(
return nil, fmt.Errorf("topology.Compile: %w", err)
}
s.logger.Info("compiled topology", "ct", jd(s.topology)) // TODO
s.logger.Debug("compiled topology", "ct", jd(s.topology)) // TODO
start := time.Now()
if err := s.launch(); err != nil {
@ -220,7 +220,7 @@ func (s *Sprawl) Relaunch(
s.topology = newTopology
s.logger.Info("compiled replacement topology", "ct", jd(s.topology)) // TODO
s.logger.Debug("compiled replacement topology", "ct", jd(s.topology)) // TODO
start := time.Now()
if err := s.relaunch(); err != nil {
@ -401,7 +401,7 @@ func (s *Sprawl) CaptureLogs(ctx context.Context) error {
return err
}
s.logger.Info("Capturing logs")
s.logger.Debug("Capturing logs")
var merr error
for _, container := range containers {

View File

@ -92,8 +92,11 @@ func init() {
// environment variable SKIP_TEARDOWN=1.
func Launch(t *testing.T, cfg *topology.Config) *sprawl.Sprawl {
SkipIfTerraformNotPresent(t)
logger := testutil.Logger(t)
// IMO default level for tests should be info, not warn
logger.SetLevel(testutil.TestLogLevelWithDefault(hclog.Info))
sp, err := sprawl.Launch(
testutil.Logger(t),
logger,
initWorkingDirectory(t),
cfg,
)

View File

@ -267,6 +267,10 @@ func compile(logger hclog.Logger, raw *Config, prev *Topology) (*Topology, error
if !IsValidLabel(svc.ID.Name) {
return nil, fmt.Errorf("service name is not valid: %s", svc.ID.Name)
}
if svc.ID.Partition != n.Partition {
return nil, fmt.Errorf("service %s on node %s has mismatched partitions: %s != %s",
svc.ID.Name, n.Name, svc.ID.Partition, n.Partition)
}
addTenancy(svc.ID.Partition, svc.ID.Namespace)
if _, exists := seenServices[svc.ID]; exists {