diff --git a/agent/acl.go b/agent/acl.go index 91f53430df..71aee6c209 100644 --- a/agent/acl.go +++ b/agent/acl.go @@ -25,11 +25,6 @@ func (a *Agent) resolveTokenAndDefaultMeta(id string, entMeta *structs.Enterpris return nil, nil } - // Disable ACLs if version 8 enforcement isn't enabled. - if !a.config.ACLEnforceVersion8 { - return nil, nil - } - if acl.RootAuthorizer(id) != nil { return nil, acl.ErrRootDenied } diff --git a/agent/acl_test.go b/agent/acl_test.go index b3bdc9439a..bb95d4a751 100644 --- a/agent/acl_test.go +++ b/agent/acl_test.go @@ -181,33 +181,19 @@ func (a *TestACLAgent) ReloadConfig(config *consul.Config) error { return fmt.Errorf("Unimplemented") } -func TestACL_Version8(t *testing.T) { +func TestACL_Version8EnabledByDefault(t *testing.T) { t.Parallel() - t.Run("version 8 disabled", func(t *testing.T) { - a := NewTestACLAgent(t, t.Name(), TestACLConfig()+` - acl_enforce_version_8 = false - `, nil, nil) + called := false + resolveFn := func(string) (structs.ACLIdentity, acl.Authorizer, error) { + called = true + return nil, nil, acl.ErrNotFound + } + a := NewTestACLAgent(t, t.Name(), TestACLConfig(), resolveFn, nil) - token, err := a.resolveToken("nope") - require.Nil(t, token) - require.Nil(t, err) - }) - - t.Run("version 8 enabled", func(t *testing.T) { - called := false - resolveFn := func(string) (structs.ACLIdentity, acl.Authorizer, error) { - called = true - return nil, nil, acl.ErrNotFound - } - a := NewTestACLAgent(t, t.Name(), TestACLConfig()+` - acl_enforce_version_8 = true - `, resolveFn, nil) - - _, err := a.resolveToken("nope") - require.Error(t, err) - require.True(t, called) - }) + _, err := a.resolveToken("nope") + require.Error(t, err) + require.True(t, called) } func TestACL_AgentMasterToken(t *testing.T) { diff --git a/agent/agent.go b/agent/agent.go index 339286c1d9..ce69d8da5c 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -1278,7 +1278,6 @@ func (a *Agent) consulConfig() (*consul.Config, error) { if a.config.ACLDownPolicy != "" { base.ACLDownPolicy = a.config.ACLDownPolicy } - base.ACLEnforceVersion8 = a.config.ACLEnforceVersion8 base.ACLTokenReplication = a.config.ACLTokenReplication base.ACLsEnabled = a.config.ACLsEnabled if a.config.ACLEnableKeyListPolicy { diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index fee4ee5d74..facfb0ea3f 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -1307,7 +1307,6 @@ func TestAgent_Reload(t *testing.T) { t.Parallel() dc1 := "dc1" a := NewTestAgent(t, ` - acl_enforce_version_8 = false services = [ { name = "redis" @@ -1341,7 +1340,6 @@ func TestAgent_Reload(t *testing.T) { node_id = "` + string(a.Config.NodeID) + `" node_name = "` + a.Config.NodeName + `" - acl_enforce_version_8 = false services = [ { name = "redis-reloaded" @@ -1387,7 +1385,6 @@ func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) { handlerShell := fmt.Sprintf("(cat ; echo CONSUL_INDEX $CONSUL_INDEX) | tee '%s.atomic' ; mv '%s.atomic' '%s'", tmpFile, tmpFile, tmpFile) a := NewTestAgent(t, ` - acl_enforce_version_8 = false services = [ { name = "redis" @@ -1477,7 +1474,6 @@ func TestAgent_ReloadDoesNotTriggerWatch(t *testing.T) { node_id = "` + string(a.Config.NodeID) + `" node_name = "` + a.Config.NodeName + `" - acl_enforce_version_8 = false services = [ { name = "redis" @@ -6031,7 +6027,6 @@ func TestAgentConnectAuthorize_defaultAllow(t *testing.T) { acl_master_token = "root" acl_agent_token = "root" acl_agent_master_token = "towel" - acl_enforce_version_8 = true `) defer a.Shutdown() testrpc.WaitForTestAgent(t, a.RPC, dc1) @@ -6063,7 +6058,6 @@ func TestAgent_Host(t *testing.T) { acl_master_token = "master" acl_agent_token = "agent" acl_agent_master_token = "towel" - acl_enforce_version_8 = true `) defer a.Shutdown() @@ -6091,7 +6085,6 @@ func TestAgent_HostBadACL(t *testing.T) { acl_master_token = "root" acl_agent_token = "agent" acl_agent_master_token = "towel" - acl_enforce_version_8 = true `) defer a.Shutdown() diff --git a/agent/config/builder.go b/agent/config/builder.go index 6ab53e1bcb..6b69737b63 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -279,13 +279,26 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { if s.Name == "" || s.Data == "" { continue } - c2, keys, err := Parse(s.Data, s.Format) + c2, md, err := Parse(s.Data, s.Format) if err != nil { return RuntimeConfig{}, fmt.Errorf("Error parsing %s: %s", s.Name, err) } + var unusedErr error + for _, k := range md.Unused { + switch k { + case "acl_enforce_version_8": + b.warn("config key %q is deprecated and should be removed", k) + default: + unusedErr = multierror.Append(unusedErr, fmt.Errorf("invalid config key %s", k)) + } + } + if unusedErr != nil { + return RuntimeConfig{}, fmt.Errorf("Error parsing %s: %s", s.Name, unusedErr) + } + // for now this is a soft failure that will cause warnings but not actual problems - b.validateEnterpriseConfigKeys(&c2, keys) + b.validateEnterpriseConfigKeys(&c2, md.Keys) // if we have a single 'check' or 'service' we need to add them to the // list of checks and services first since we cannot merge them @@ -790,7 +803,6 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { GossipWANRetransmitMult: b.intVal(c.GossipWAN.RetransmitMult), // ACL - ACLEnforceVersion8: b.boolValWithDefault(c.ACLEnforceVersion8, true), ACLsEnabled: aclsEnabled, ACLAgentMasterToken: b.stringValWithDefault(c.ACL.Tokens.AgentMaster, b.stringVal(c.ACLAgentMasterToken)), ACLAgentToken: b.stringValWithDefault(c.ACL.Tokens.Agent, b.stringVal(c.ACLAgentToken)), diff --git a/agent/config/config.go b/agent/config/config.go index df5bca3c72..6d1da49f24 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib/decode" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" "github.com/mitchellh/mapstructure" ) @@ -35,7 +34,7 @@ func FormatFrom(name string) string { } // Parse parses a config fragment in either JSON or HCL format. -func Parse(data string, format string) (c Config, keys []string, err error) { +func Parse(data string, format string) (c Config, md mapstructure.Metadata, err error) { var raw map[string]interface{} switch format { case "json": @@ -46,7 +45,7 @@ func Parse(data string, format string) (c Config, keys []string, err error) { err = fmt.Errorf("invalid format: %s", format) } if err != nil { - return Config{}, nil, err + return Config{}, mapstructure.Metadata{}, err } // We want to be able to report fields which we cannot map as an @@ -109,28 +108,19 @@ func Parse(data string, format string) (c Config, keys []string, err error) { "config_entries.bootstrap", // completely ignore this tree (fixed elsewhere) }) - var md mapstructure.Metadata d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ DecodeHook: decode.HookTranslateKeys, Metadata: &md, Result: &c, }) if err != nil { - return Config{}, nil, err + return Config{}, mapstructure.Metadata{}, err } if err := d.Decode(m); err != nil { - return Config{}, nil, err + return Config{}, mapstructure.Metadata{}, err } - for _, k := range md.Unused { - err = multierror.Append(err, fmt.Errorf("invalid config key %s", k)) - } - - // Don't check these here. The builder can emit warnings for fields it - // doesn't like - keys = md.Keys - - return + return c, md, nil } // Config defines the format of a configuration file in either JSON or @@ -155,8 +145,6 @@ type Config struct { ACLDownPolicy *string `json:"acl_down_policy,omitempty" hcl:"acl_down_policy" mapstructure:"acl_down_policy"` // DEPRECATED (ACL-Legacy-Compat) - moved into the "acl" stanza ACLEnableKeyListPolicy *bool `json:"acl_enable_key_list_policy,omitempty" hcl:"acl_enable_key_list_policy" mapstructure:"acl_enable_key_list_policy"` - // DEPRECATED (ACL-Legacy-Compat) - pre-version8 enforcement is deprecated. - ACLEnforceVersion8 *bool `json:"acl_enforce_version_8,omitempty" hcl:"acl_enforce_version_8" mapstructure:"acl_enforce_version_8"` // DEPRECATED (ACL-Legacy-Compat) - moved into the "acl" stanza ACLMasterToken *string `json:"acl_master_token,omitempty" hcl:"acl_master_token" mapstructure:"acl_master_token"` // DEPRECATED (ACL-Legacy-Compat) - moved into the "acl.tokens" stanza diff --git a/agent/config/default.go b/agent/config/default.go index ddb4407750..531f0469d2 100644 --- a/agent/config/default.go +++ b/agent/config/default.go @@ -10,18 +10,6 @@ import ( "github.com/hashicorp/raft" ) -func DefaultRPCProtocol() (int, error) { - src := DefaultSource() - c, _, err := Parse(src.Data, src.Format) - if err != nil { - return 0, fmt.Errorf("Error parsing default config: %s", err) - } - if c.RPCProtocol == nil { - return 0, fmt.Errorf("No default RPC protocol set") - } - return *c.RPCProtocol, nil -} - // DefaultSource is the default agent configuration. // This needs to be merged first in the head. // todo(fs): The values are sourced from multiple sources. @@ -43,7 +31,6 @@ func DefaultSource() Source { Data: ` acl_default_policy = "allow" acl_down_policy = "extend-cache" - acl_enforce_version_8 = true acl_ttl = "30s" acl = { policy_ttl = "30s" @@ -65,7 +52,7 @@ func DefaultSource() Source { log_level = "INFO" max_query_time = "600s" primary_gateways_interval = "30s" - protocol = 2 + protocol = ` + strconv.Itoa(consul.DefaultRPCProtocol) + ` retry_interval = "30s" retry_interval_wan = "30s" server = false diff --git a/agent/config/runtime.go b/agent/config/runtime.go index 9cdfcd1109..b97ca8619c 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -104,13 +104,6 @@ type RuntimeConfig struct { // hcl: acl.down_policy = ("allow"|"deny"|"extend-cache"|"async-cache") ACLDownPolicy string - // DEPRECATED (ACL-Legacy-Compat) - // ACLEnforceVersion8 is used to gate a set of ACL policy features that - // are opt-in prior to Consul 0.8 and opt-out in Consul 0.8 and later. - // - // hcl: acl_enforce_version_8 = (true|false) - ACLEnforceVersion8 bool - // ACLEnableKeyListPolicy is used to opt-in to the "list" policy added to // KV ACLs in Consul 1.0. // diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 5081e19136..8045bf2571 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -1619,6 +1619,16 @@ func TestConfigFlagsAndEdgecases(t *testing.T) { rt.DataDir = dataDir }, }, + { + desc: "acl_enforce_version_8 is deprecated", + args: []string{`-data-dir=` + dataDir}, + json: []string{`{ "acl_enforce_version_8": true }`}, + hcl: []string{`acl_enforce_version_8 = true`}, + patch: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + }, + warns: []string{`config key "acl_enforce_version_8" is deprecated and should be removed`}, + }, { desc: "advertise address detect fails v4", args: []string{`-data-dir=` + dataDir}, @@ -3934,7 +3944,6 @@ func TestFullConfig(t *testing.T) { "acl_datacenter": "m3urck3z", "acl_default_policy": "ArK3WIfE", "acl_down_policy": "vZXMfMP0", - "acl_enforce_version_8": true, "acl_enable_key_list_policy": true, "acl_master_token": "C1Q1oIwh", "acl_replication_token": "LMmgy5dO", @@ -4570,7 +4579,6 @@ func TestFullConfig(t *testing.T) { acl_datacenter = "m3urck3z" acl_default_policy = "ArK3WIfE" acl_down_policy = "vZXMfMP0" - acl_enforce_version_8 = true acl_enable_key_list_policy = true acl_master_token = "C1Q1oIwh" acl_replication_token = "LMmgy5dO" @@ -5332,7 +5340,6 @@ func TestFullConfig(t *testing.T) { ACLDatacenter: "ejtmd43d", ACLDefaultPolicy: "72c2e7a0", ACLDownPolicy: "03eb2aee", - ACLEnforceVersion8: true, ACLEnableKeyListPolicy: true, ACLEnableTokenPersistence: true, ACLMasterToken: "8a19ac27", @@ -6251,7 +6258,6 @@ func TestSanitize(t *testing.T) { "ACLDownPolicy": "", "ACLEnableKeyListPolicy": false, "ACLEnableTokenPersistence": false, - "ACLEnforceVersion8": false, "ACLMasterToken": "hidden", "ACLPolicyTTL": "0s", "ACLReplicationToken": "hidden", diff --git a/agent/consul/acl.go b/agent/consul/acl.go index 6460f66fd5..a2e68de3bd 100644 --- a/agent/consul/acl.go +++ b/agent/consul/acl.go @@ -1169,29 +1169,23 @@ func (r *ACLResolver) GetMergedPolicyForToken(token string) (*acl.Policy, error) // aclFilter is used to filter results from our state store based on ACL rules // configured for the provided token. type aclFilter struct { - authorizer acl.Authorizer - logger hclog.Logger - enforceVersion8 bool + authorizer acl.Authorizer + logger hclog.Logger } // newACLFilter constructs a new aclFilter. -func newACLFilter(authorizer acl.Authorizer, logger hclog.Logger, enforceVersion8 bool) *aclFilter { +func newACLFilter(authorizer acl.Authorizer, logger hclog.Logger) *aclFilter { if logger == nil { logger = hclog.New(&hclog.LoggerOptions{}) } return &aclFilter{ - authorizer: authorizer, - logger: logger, - enforceVersion8: enforceVersion8, + authorizer: authorizer, + logger: logger, } } // allowNode is used to determine if a node is accessible for an ACL. func (f *aclFilter) allowNode(node string, ent *acl.AuthorizerContext) bool { - if !f.enforceVersion8 { - return true - } - return f.authorizer.NodeRead(node, ent) == acl.Allow } @@ -1218,18 +1212,12 @@ func (f *aclFilter) allowService(service string, ent *acl.AuthorizerContext) boo return true } - if !f.enforceVersion8 && service == structs.ConsulServiceID { - return true - } return f.authorizer.ServiceRead(service, ent) == acl.Allow } // allowSession is used to determine if a session for a node is accessible for // an ACL. func (f *aclFilter) allowSession(node string, ent *acl.AuthorizerContext) bool { - if !f.enforceVersion8 { - return true - } return f.authorizer.SessionRead(node, ent) == acl.Allow } @@ -1793,7 +1781,7 @@ func (r *ACLResolver) filterACLWithAuthorizer(authorizer acl.Authorizer, subj in return nil } // Create the filter - filt := newACLFilter(authorizer, r.logger, r.config.ACLEnforceVersion8) + filt := newACLFilter(authorizer, r.logger) switch v := subj.(type) { case *structs.CheckServiceNodes: diff --git a/agent/consul/acl_test.go b/agent/consul/acl_test.go index 73816c36a7..55020b36a4 100644 --- a/agent/consul/acl_test.go +++ b/agent/consul/acl_test.go @@ -2258,20 +2258,9 @@ func TestACL_filterHealthChecks(t *testing.T) { } } - // Try permissive filtering. { hc := fill() - filt := newACLFilter(acl.AllowAll(), nil, false) - filt.filterHealthChecks(&hc) - if len(hc) != 1 { - t.Fatalf("bad: %#v", hc) - } - } - - // Try restrictive filtering. - { - hc := fill() - filt := newACLFilter(acl.DenyAll(), nil, false) + filt := newACLFilter(acl.DenyAll(), nil) filt.filterHealthChecks(&hc) if len(hc) != 0 { t.Fatalf("bad: %#v", hc) @@ -2292,20 +2281,9 @@ service "foo" { t.Fatalf("err: %v", err) } - // This will work because version 8 ACLs aren't being enforced. { hc := fill() - filt := newACLFilter(perms, nil, false) - filt.filterHealthChecks(&hc) - if len(hc) != 1 { - t.Fatalf("bad: %#v", hc) - } - } - - // But with version 8 the node will block it. - { - hc := fill() - filt := newACLFilter(perms, nil, true) + filt := newACLFilter(perms, nil) filt.filterHealthChecks(&hc) if len(hc) != 0 { t.Fatalf("bad: %#v", hc) @@ -2329,7 +2307,7 @@ node "node1" { // Now it should go through. { hc := fill() - filt := newACLFilter(perms, nil, true) + filt := newACLFilter(perms, nil) filt.filterHealthChecks(&hc) if len(hc) != 1 { t.Fatalf("bad: %#v", hc) @@ -2357,7 +2335,7 @@ func TestACL_filterIntentions(t *testing.T) { // Try permissive filtering. { ixns := fill() - filt := newACLFilter(acl.AllowAll(), nil, false) + filt := newACLFilter(acl.AllowAll(), nil) filt.filterIntentions(&ixns) assert.Len(ixns, 2) } @@ -2365,7 +2343,7 @@ func TestACL_filterIntentions(t *testing.T) { // Try restrictive filtering. { ixns := fill() - filt := newACLFilter(acl.DenyAll(), nil, false) + filt := newACLFilter(acl.DenyAll(), nil) filt.filterIntentions(&ixns) assert.Len(ixns, 0) } @@ -2383,7 +2361,7 @@ service "foo" { // Filter { ixns := fill() - filt := newACLFilter(perms, nil, false) + filt := newACLFilter(perms, nil) filt.filterIntentions(&ixns) assert.Len(ixns, 1) } @@ -2399,24 +2377,14 @@ func TestACL_filterServices(t *testing.T) { } // Try permissive filtering. - filt := newACLFilter(acl.AllowAll(), nil, false) + filt := newACLFilter(acl.AllowAll(), nil) filt.filterServices(services, nil) if len(services) != 3 { t.Fatalf("bad: %#v", services) } // Try restrictive filtering. - filt = newACLFilter(acl.DenyAll(), nil, false) - filt.filterServices(services, nil) - if len(services) != 1 { - t.Fatalf("bad: %#v", services) - } - if _, ok := services["consul"]; !ok { - t.Fatalf("bad: %#v", services) - } - - // Try restrictive filtering with version 8 enforcement. - filt = newACLFilter(acl.DenyAll(), nil, true) + filt = newACLFilter(acl.DenyAll(), nil) filt.filterServices(services, nil) if len(services) != 0 { t.Fatalf("bad: %#v", services) @@ -2438,7 +2406,7 @@ func TestACL_filterServiceNodes(t *testing.T) { // Try permissive filtering. { nodes := fill() - filt := newACLFilter(acl.AllowAll(), nil, false) + filt := newACLFilter(acl.AllowAll(), nil) filt.filterServiceNodes(&nodes) if len(nodes) != 1 { t.Fatalf("bad: %#v", nodes) @@ -2448,7 +2416,7 @@ func TestACL_filterServiceNodes(t *testing.T) { // Try restrictive filtering. { nodes := fill() - filt := newACLFilter(acl.DenyAll(), nil, false) + filt := newACLFilter(acl.DenyAll(), nil) filt.filterServiceNodes(&nodes) if len(nodes) != 0 { t.Fatalf("bad: %#v", nodes) @@ -2469,20 +2437,10 @@ service "foo" { t.Fatalf("err: %v", err) } - // This will work because version 8 ACLs aren't being enforced. - { - nodes := fill() - filt := newACLFilter(perms, nil, false) - filt.filterServiceNodes(&nodes) - if len(nodes) != 1 { - t.Fatalf("bad: %#v", nodes) - } - } - // But with version 8 the node will block it. { nodes := fill() - filt := newACLFilter(perms, nil, true) + filt := newACLFilter(perms, nil) filt.filterServiceNodes(&nodes) if len(nodes) != 0 { t.Fatalf("bad: %#v", nodes) @@ -2506,7 +2464,7 @@ node "node1" { // Now it should go through. { nodes := fill() - filt := newACLFilter(perms, nil, true) + filt := newACLFilter(perms, nil) filt.filterServiceNodes(&nodes) if len(nodes) != 1 { t.Fatalf("bad: %#v", nodes) @@ -2534,7 +2492,7 @@ func TestACL_filterNodeServices(t *testing.T) { // Try nil, which is a possible input. { var services *structs.NodeServices - filt := newACLFilter(acl.AllowAll(), nil, false) + filt := newACLFilter(acl.AllowAll(), nil) filt.filterNodeServices(&services) if services != nil { t.Fatalf("bad: %#v", services) @@ -2544,7 +2502,7 @@ func TestACL_filterNodeServices(t *testing.T) { // Try permissive filtering. { services := fill() - filt := newACLFilter(acl.AllowAll(), nil, false) + filt := newACLFilter(acl.AllowAll(), nil) filt.filterNodeServices(&services) if len(services.Services) != 1 { t.Fatalf("bad: %#v", services.Services) @@ -2554,10 +2512,10 @@ func TestACL_filterNodeServices(t *testing.T) { // Try restrictive filtering. { services := fill() - filt := newACLFilter(acl.DenyAll(), nil, false) + filt := newACLFilter(acl.DenyAll(), nil) filt.filterNodeServices(&services) - if len((*services).Services) != 0 { - t.Fatalf("bad: %#v", (*services).Services) + if services != nil { + t.Fatalf("bad: %#v", *services) } } @@ -2575,20 +2533,10 @@ service "foo" { t.Fatalf("err: %v", err) } - // This will work because version 8 ACLs aren't being enforced. + // Node will block it. { services := fill() - filt := newACLFilter(perms, nil, false) - filt.filterNodeServices(&services) - if len((*services).Services) != 1 { - t.Fatalf("bad: %#v", (*services).Services) - } - } - - // But with version 8 the node will block it. - { - services := fill() - filt := newACLFilter(perms, nil, true) + filt := newACLFilter(perms, nil) filt.filterNodeServices(&services) if services != nil { t.Fatalf("bad: %#v", services) @@ -2612,7 +2560,7 @@ node "node1" { // Now it should go through. { services := fill() - filt := newACLFilter(perms, nil, true) + filt := newACLFilter(perms, nil) filt.filterNodeServices(&services) if len((*services).Services) != 1 { t.Fatalf("bad: %#v", (*services).Services) @@ -2647,7 +2595,7 @@ func TestACL_filterCheckServiceNodes(t *testing.T) { // Try permissive filtering. { nodes := fill() - filt := newACLFilter(acl.AllowAll(), nil, false) + filt := newACLFilter(acl.AllowAll(), nil) filt.filterCheckServiceNodes(&nodes) if len(nodes) != 1 { t.Fatalf("bad: %#v", nodes) @@ -2660,7 +2608,7 @@ func TestACL_filterCheckServiceNodes(t *testing.T) { // Try restrictive filtering. { nodes := fill() - filt := newACLFilter(acl.DenyAll(), nil, false) + filt := newACLFilter(acl.DenyAll(), nil) filt.filterCheckServiceNodes(&nodes) if len(nodes) != 0 { t.Fatalf("bad: %#v", nodes) @@ -2681,23 +2629,9 @@ service "foo" { t.Fatalf("err: %v", err) } - // This will work because version 8 ACLs aren't being enforced. { nodes := fill() - filt := newACLFilter(perms, nil, false) - filt.filterCheckServiceNodes(&nodes) - if len(nodes) != 1 { - t.Fatalf("bad: %#v", nodes) - } - if len(nodes[0].Checks) != 1 { - t.Fatalf("bad: %#v", nodes[0].Checks) - } - } - - // But with version 8 the node will block it. - { - nodes := fill() - filt := newACLFilter(perms, nil, true) + filt := newACLFilter(perms, nil) filt.filterCheckServiceNodes(&nodes) if len(nodes) != 0 { t.Fatalf("bad: %#v", nodes) @@ -2721,7 +2655,7 @@ node "node1" { // Now it should go through. { nodes := fill() - filt := newACLFilter(perms, nil, true) + filt := newACLFilter(perms, nil) filt.filterCheckServiceNodes(&nodes) if len(nodes) != 1 { t.Fatalf("bad: %#v", nodes) @@ -2747,21 +2681,14 @@ func TestACL_filterCoordinates(t *testing.T) { } // Try permissive filtering. - filt := newACLFilter(acl.AllowAll(), nil, false) + filt := newACLFilter(acl.AllowAll(), nil) filt.filterCoordinates(&coords) if len(coords) != 2 { t.Fatalf("bad: %#v", coords) } - // Try restrictive filtering without version 8 ACL enforcement. - filt = newACLFilter(acl.DenyAll(), nil, false) - filt.filterCoordinates(&coords) - if len(coords) != 2 { - t.Fatalf("bad: %#v", coords) - } - - // Try restrictive filtering with version 8 ACL enforcement. - filt = newACLFilter(acl.DenyAll(), nil, true) + // Try restrictive filtering + filt = newACLFilter(acl.DenyAll(), nil) filt.filterCoordinates(&coords) if len(coords) != 0 { t.Fatalf("bad: %#v", coords) @@ -2781,21 +2708,14 @@ func TestACL_filterSessions(t *testing.T) { } // Try permissive filtering. - filt := newACLFilter(acl.AllowAll(), nil, true) + filt := newACLFilter(acl.AllowAll(), nil) filt.filterSessions(&sessions) if len(sessions) != 2 { t.Fatalf("bad: %#v", sessions) } - // Try restrictive filtering but with version 8 enforcement turned off. - filt = newACLFilter(acl.DenyAll(), nil, false) - filt.filterSessions(&sessions) - if len(sessions) != 2 { - t.Fatalf("bad: %#v", sessions) - } - - // Try restrictive filtering with version 8 enforcement turned on. - filt = newACLFilter(acl.DenyAll(), nil, true) + // Try restrictive filtering + filt = newACLFilter(acl.DenyAll(), nil) filt.filterSessions(&sessions) if len(sessions) != 0 { t.Fatalf("bad: %#v", sessions) @@ -2829,7 +2749,7 @@ func TestACL_filterNodeDump(t *testing.T) { // Try permissive filtering. { dump := fill() - filt := newACLFilter(acl.AllowAll(), nil, false) + filt := newACLFilter(acl.AllowAll(), nil) filt.filterNodeDump(&dump) if len(dump) != 1 { t.Fatalf("bad: %#v", dump) @@ -2845,17 +2765,11 @@ func TestACL_filterNodeDump(t *testing.T) { // Try restrictive filtering. { dump := fill() - filt := newACLFilter(acl.DenyAll(), nil, false) + filt := newACLFilter(acl.DenyAll(), nil) filt.filterNodeDump(&dump) - if len(dump) != 1 { + if len(dump) != 0 { t.Fatalf("bad: %#v", dump) } - if len(dump[0].Services) != 0 { - t.Fatalf("bad: %#v", dump[0].Services) - } - if len(dump[0].Checks) != 0 { - t.Fatalf("bad: %#v", dump[0].Checks) - } } // Allowed to see the service but not the node. @@ -2872,26 +2786,10 @@ service "foo" { t.Fatalf("err: %v", err) } - // This will work because version 8 ACLs aren't being enforced. + // But the node will block it. { dump := fill() - filt := newACLFilter(perms, nil, false) - filt.filterNodeDump(&dump) - if len(dump) != 1 { - t.Fatalf("bad: %#v", dump) - } - if len(dump[0].Services) != 1 { - t.Fatalf("bad: %#v", dump[0].Services) - } - if len(dump[0].Checks) != 1 { - t.Fatalf("bad: %#v", dump[0].Checks) - } - } - - // But with version 8 the node will block it. - { - dump := fill() - filt := newACLFilter(perms, nil, true) + filt := newACLFilter(perms, nil) filt.filterNodeDump(&dump) if len(dump) != 0 { t.Fatalf("bad: %#v", dump) @@ -2915,7 +2813,7 @@ node "node1" { // Now it should go through. { dump := fill() - filt := newACLFilter(perms, nil, true) + filt := newACLFilter(perms, nil) filt.filterNodeDump(&dump) if len(dump) != 1 { t.Fatalf("bad: %#v", dump) @@ -2942,21 +2840,14 @@ func TestACL_filterNodes(t *testing.T) { } // Try permissive filtering. - filt := newACLFilter(acl.AllowAll(), nil, true) + filt := newACLFilter(acl.AllowAll(), nil) filt.filterNodes(&nodes) if len(nodes) != 2 { t.Fatalf("bad: %#v", nodes) } - // Try restrictive filtering but with version 8 enforcement turned off. - filt = newACLFilter(acl.DenyAll(), nil, false) - filt.filterNodes(&nodes) - if len(nodes) != 2 { - t.Fatalf("bad: %#v", nodes) - } - - // Try restrictive filtering with version 8 enforcement turned on. - filt = newACLFilter(acl.DenyAll(), nil, true) + // Try restrictive filtering + filt = newACLFilter(acl.DenyAll(), nil) filt.filterNodes(&nodes) if len(nodes) != 0 { t.Fatalf("bad: %#v", nodes) @@ -2995,7 +2886,7 @@ func TestACL_filterDatacenterCheckServiceNodes(t *testing.T) { // Try permissive filtering. { dcNodes := fill(t) - filt := newACLFilter(acl.AllowAll(), nil, true) + filt := newACLFilter(acl.AllowAll(), nil) filt.filterDatacenterCheckServiceNodes(&dcNodes) require.Len(t, dcNodes, 2) require.Equal(t, fill(t), dcNodes) @@ -3004,7 +2895,7 @@ func TestACL_filterDatacenterCheckServiceNodes(t *testing.T) { // Try restrictive filtering. { dcNodes := fill(t) - filt := newACLFilter(acl.DenyAll(), nil, true) + filt := newACLFilter(acl.DenyAll(), nil) filt.filterDatacenterCheckServiceNodes(&dcNodes) require.Len(t, dcNodes, 0) } @@ -3024,7 +2915,7 @@ func TestACL_filterDatacenterCheckServiceNodes(t *testing.T) { { dcNodes := fill(t) - filt := newACLFilter(perms, nil, true) + filt := newACLFilter(perms, nil) filt.filterDatacenterCheckServiceNodes(&dcNodes) require.Len(t, dcNodes, 0) } @@ -3039,7 +2930,7 @@ func TestACL_filterDatacenterCheckServiceNodes(t *testing.T) { { dcNodes := fill(t) - filt := newACLFilter(perms, nil, true) + filt := newACLFilter(perms, nil) filt.filterDatacenterCheckServiceNodes(&dcNodes) require.Len(t, dcNodes, 0) } @@ -3056,7 +2947,7 @@ func TestACL_filterDatacenterCheckServiceNodes(t *testing.T) { // Now it should go through. { dcNodes := fill(t) - filt := newACLFilter(acl.AllowAll(), nil, true) + filt := newACLFilter(acl.AllowAll(), nil) filt.filterDatacenterCheckServiceNodes(&dcNodes) require.Len(t, dcNodes, 2) require.Equal(t, fill(t), dcNodes) @@ -3077,7 +2968,7 @@ func TestACL_redactPreparedQueryTokens(t *testing.T) { // Try permissive filtering with a management token. This will allow the // embedded token to be seen. - filt := newACLFilter(acl.ManageAll(), nil, false) + filt := newACLFilter(acl.ManageAll(), nil) filt.redactPreparedQueryTokens(&query) if !reflect.DeepEqual(query, expected) { t.Fatalf("bad: %#v", &query) @@ -3089,7 +2980,7 @@ func TestACL_redactPreparedQueryTokens(t *testing.T) { // Now try permissive filtering with a client token, which should cause // the embedded token to get redacted. - filt = newACLFilter(acl.AllowAll(), nil, false) + filt = newACLFilter(acl.AllowAll(), nil) filt.redactPreparedQueryTokens(&query) expected.Token = redactedToken if !reflect.DeepEqual(query, expected) { @@ -3190,7 +3081,7 @@ func TestACL_filterPreparedQueries(t *testing.T) { // Try permissive filtering with a management token. This will allow the // embedded token to be seen. - filt := newACLFilter(acl.ManageAll(), nil, false) + filt := newACLFilter(acl.ManageAll(), nil) filt.filterPreparedQueries(&queries) if !reflect.DeepEqual(queries, expected) { t.Fatalf("bad: %#v", queries) @@ -3203,7 +3094,7 @@ func TestACL_filterPreparedQueries(t *testing.T) { // Now try permissive filtering with a client token, which should cause // the embedded token to get redacted, and the query with no name to get // filtered out. - filt = newACLFilter(acl.AllowAll(), nil, false) + filt = newACLFilter(acl.AllowAll(), nil) filt.filterPreparedQueries(&queries) expected[2].Token = redactedToken expected = append(structs.PreparedQueries{}, expected[1], expected[2]) @@ -3217,7 +3108,7 @@ func TestACL_filterPreparedQueries(t *testing.T) { } // Now try restrictive filtering. - filt = newACLFilter(acl.DenyAll(), nil, false) + filt = newACLFilter(acl.DenyAll(), nil) filt.filterPreparedQueries(&queries) if len(queries) != 0 { t.Fatalf("bad: %#v", queries) diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index 6c069d29a4..8335ff8dae 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -150,7 +150,7 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error } // Check the complete register request against the given ACL policy. - if authz != nil && c.srv.config.ACLEnforceVersion8 { + if authz != nil { state := c.srv.fsm.State() _, ns, err := state.NodeServices(nil, args.Node, entMeta) if err != nil { @@ -194,7 +194,7 @@ func (c *Catalog) Deregister(args *structs.DeregisterRequest, reply *struct{}) e } // Check the complete deregister request against the given ACL policy. - if authz != nil && c.srv.config.ACLEnforceVersion8 { + if authz != nil { state := c.srv.fsm.State() var ns *structs.NodeService diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index 4533e5ca67..3663b45753 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -163,11 +163,10 @@ func TestCatalog_Register_ACLDeny(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) codec := rpcClient(t, s1) defer codec.Close() @@ -182,6 +181,9 @@ func TestCatalog_Register_ACLDeny(t *testing.T) { service "foo" { policy = "write" } +node "foo" { + policy = "write" +} `, }, WriteRequest: structs.WriteRequest{Token: "root"}, @@ -218,18 +220,9 @@ service "foo" { t.Fatalf("err: %v", err) } - // Try the special case for the "consul" service that allows it no matter - // what with pre-version 8 ACL enforcement. + // Try the former special case for the "consul" service. argR.Service.Service = "consul" err = msgpackrpc.CallWithCodec(codec, "Catalog.Register", &argR, &outR) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Make sure the exception goes away when we turn on version 8 ACL - // enforcement. - s1.config.ACLEnforceVersion8 = true - err = msgpackrpc.CallWithCodec(codec, "Catalog.Register", &argR, &outR) if !acl.IsErrPermissionDenied(err) { t.Fatalf("err: %v", err) } @@ -415,6 +408,9 @@ func TestCatalog_Register_ConnectProxy_ACLDestinationServiceName(t *testing.T) { service "foo" { policy = "write" } +node "foo" { + policy = "write" +} `, }, WriteRequest: structs.WriteRequest{Token: "root"}, @@ -510,7 +506,6 @@ func TestCatalog_Deregister_ACLDeny(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -570,50 +565,9 @@ service "service" { t.Fatalf("err: %v", err) } - // First pass with version 8 ACL enforcement disabled, we should be able - // to deregister everything even without a token. + // We should be not be able to deregister everything without a token. var err error var out struct{} - err = msgpackrpc.CallWithCodec(codec, "Catalog.Deregister", - &structs.DeregisterRequest{ - Datacenter: "dc1", - Node: "node", - CheckID: "service-check"}, &out) - if err != nil { - t.Fatalf("err: %v", err) - } - err = msgpackrpc.CallWithCodec(codec, "Catalog.Deregister", - &structs.DeregisterRequest{ - Datacenter: "dc1", - Node: "node", - CheckID: "node-check"}, &out) - if err != nil { - t.Fatalf("err: %v", err) - } - err = msgpackrpc.CallWithCodec(codec, "Catalog.Deregister", - &structs.DeregisterRequest{ - Datacenter: "dc1", - Node: "node", - ServiceID: "service"}, &out) - if err != nil { - t.Fatalf("err: %v", err) - } - err = msgpackrpc.CallWithCodec(codec, "Catalog.Deregister", - &structs.DeregisterRequest{ - Datacenter: "dc1", - Node: "node"}, &out) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Turn on version 8 ACL enforcement and put the catalog entry back. - s1.config.ACLEnforceVersion8 = true - if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &argR, &outR); err != nil { - t.Fatalf("err: %v", err) - } - - // Second pass with version 8 ACL enforcement enabled, these should all - // get rejected. err = msgpackrpc.CallWithCodec(codec, "Catalog.Deregister", &structs.DeregisterRequest{ Datacenter: "dc1", @@ -646,7 +600,7 @@ service "service" { t.Fatalf("err: %v", err) } - // Third pass these should all go through with the token set. + // Second pass these should all go through with the token set. err = msgpackrpc.CallWithCodec(codec, "Catalog.Deregister", &structs.DeregisterRequest{ Datacenter: "dc1", @@ -1237,7 +1191,6 @@ func TestCatalog_ListNodes_ACLFilter(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -1250,22 +1203,10 @@ func TestCatalog_ListNodes_ACLFilter(t *testing.T) { // existing slice if the incoming one is nil, so it's best to start // clean each time. - // Prior to version 8, the node policy should be ignored. + // The node policy should not be ignored. args := structs.DCSpecificRequest{ Datacenter: "dc1", } - { - reply := structs.IndexedNodes{} - if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply); err != nil { - t.Fatalf("err: %v", err) - } - if len(reply.Nodes) != 1 { - t.Fatalf("bad: %v", reply.Nodes) - } - } - - // Now turn on version 8 enforcement and try again. - s1.config.ACLEnforceVersion8 = true { reply := structs.IndexedNodes{} if err := msgpackrpc.CallWithCodec(codec, "Catalog.ListNodes", &args, &reply); err != nil { @@ -2285,7 +2226,6 @@ func TestCatalog_ListServiceNodes_ConnectDestinationNative(t *testing.T) { func TestCatalog_ListServiceNodes_ConnectProxy_ACL(t *testing.T) { t.Parallel() - assert := assert.New(t) dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLsEnabled = true @@ -2310,12 +2250,13 @@ func TestCatalog_ListServiceNodes_ConnectProxy_ACL(t *testing.T) { service "foo" { policy = "write" } +node "" { policy = "read" } `, }, WriteRequest: structs.WriteRequest{Token: "root"}, } var token string - assert.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &token)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &token)) { // Register a proxy @@ -2324,21 +2265,21 @@ service "foo" { args.Service.Proxy.DestinationServiceName = "bar" args.WriteRequest.Token = "root" var out struct{} - assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) // Register a proxy args = structs.TestRegisterRequestProxy(t) args.Service.Service = "foo-proxy" args.Service.Proxy.DestinationServiceName = "foo" args.WriteRequest.Token = "root" - assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) // Register a proxy args = structs.TestRegisterRequestProxy(t) args.Service.Service = "another-proxy" args.Service.Proxy.DestinationServiceName = "foo" args.WriteRequest.Token = "root" - assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) } // List w/ token. This should disallow because we don't have permission @@ -2350,8 +2291,8 @@ service "foo" { QueryOptions: structs.QueryOptions{Token: token}, } var resp structs.IndexedServiceNodes - assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp)) - assert.Len(resp.ServiceNodes, 0) + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp)) + require.Len(t, resp.ServiceNodes, 0) // List w/ token. This should work since we're requesting "foo", but should // also only contain the proxies with names that adhere to our ACL. @@ -2361,10 +2302,11 @@ service "foo" { ServiceName: "foo", QueryOptions: structs.QueryOptions{Token: token}, } - assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp)) - assert.Len(resp.ServiceNodes, 1) + resp = structs.IndexedServiceNodes{} + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp)) + require.Len(t, resp.ServiceNodes, 1) v := resp.ServiceNodes[0] - assert.Equal("foo-proxy", v.ServiceName) + require.Equal(t, "foo-proxy", v.ServiceName) } func TestCatalog_ListServiceNodes_ConnectNative(t *testing.T) { @@ -2564,11 +2506,10 @@ func testACLFilterServer(t *testing.T) (dir, token string, srv *Server, codec rp c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) codec = rpcClient(t, srv) - testrpc.WaitForLeader(t, srv.RPC, "dc1") + testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken("root")) // Create a new token arg := structs.ACLRequest{ @@ -2581,6 +2522,9 @@ func testACLFilterServer(t *testing.T) (dir, token string, srv *Server, codec rp service "foo" { policy = "write" } +node "" { + policy = "read" +} `, }, WriteRequest: structs.WriteRequest{Token: "root"}, @@ -2638,7 +2582,7 @@ func TestCatalog_ListServices_FilterACL(t *testing.T) { defer os.RemoveAll(dir) defer srv.Shutdown() defer codec.Close() - testrpc.WaitForTestAgent(t, srv.RPC, "dc1") + testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken("root")) opt := structs.DCSpecificRequest{ Datacenter: "dc1", @@ -2713,16 +2657,15 @@ func TestCatalog_NodeServices_ACLDeny(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) - // Prior to version 8, the node policy should be ignored. + // The node policy should not be ignored. args := structs.NodeSpecificRequest{ Datacenter: "dc1", Node: s1.config.NodeName, @@ -2731,15 +2674,6 @@ func TestCatalog_NodeServices_ACLDeny(t *testing.T) { if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &reply); err != nil { t.Fatalf("err: %v", err) } - if reply.NodeServices == nil { - t.Fatalf("should not be nil") - } - - // Now turn on version 8 enforcement and try again. - s1.config.ACLEnforceVersion8 = true - if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &args, &reply); err != nil { - t.Fatalf("err: %v", err) - } if reply.NodeServices != nil { t.Fatalf("should not nil") } @@ -2789,28 +2723,21 @@ func TestCatalog_NodeServices_FilterACL(t *testing.T) { defer os.RemoveAll(dir) defer srv.Shutdown() defer codec.Close() - testrpc.WaitForTestAgent(t, srv.RPC, "dc1") + testrpc.WaitForTestAgent(t, srv.RPC, "dc1", testrpc.WithToken("root")) opt := structs.NodeSpecificRequest{ Datacenter: "dc1", Node: srv.config.NodeName, QueryOptions: structs.QueryOptions{Token: token}, } - reply := structs.IndexedNodeServices{} - if err := msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &opt, &reply); err != nil { - t.Fatalf("err: %s", err) - } - found := false - for _, svc := range reply.NodeServices.Services { - if svc.ID == "bar" { - t.Fatalf("bad: %#v", reply.NodeServices.Services) - } - if svc.ID == "foo" { - found = true - break - } - } - if !found { - t.Fatalf("bad: %#v", reply.NodeServices) - } + + var reply structs.IndexedNodeServices + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &opt, &reply)) + + require.NotNil(t, reply.NodeServices) + require.Len(t, reply.NodeServices.Services, 1) + + svc, ok := reply.NodeServices.Services["foo"] + require.True(t, ok) + require.Equal(t, "foo", svc.ID) } diff --git a/agent/consul/config.go b/agent/consul/config.go index aa4ea7352c..91f03436af 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -237,10 +237,6 @@ type Config struct { // ACLEnabled is used to enable ACLs ACLsEnabled bool - // ACLEnforceVersion8 is used to gate a set of ACL policy features that - // are opt-in prior to Consul 0.8 and opt-out in Consul 0.8 and later. - ACLEnforceVersion8 bool - // ACLMasterToken is used to bootstrap the ACL system. It should be specified // on the servers in the ACLDatacenter. When the leader comes online, it ensures // that the Master token is available. This provides the initial token. diff --git a/agent/consul/config_endpoint_test.go b/agent/consul/config_endpoint_test.go index 6f984f38d5..50e6acc78d 100644 --- a/agent/consul/config_endpoint_test.go +++ b/agent/consul/config_endpoint_test.go @@ -145,7 +145,7 @@ func TestConfigEntry_Apply_ACLDeny(t *testing.T) { }) defer os.RemoveAll(dir1) defer s1.Shutdown() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) codec := rpcClient(t, s1) defer codec.Close() @@ -266,7 +266,7 @@ func TestConfigEntry_Get_ACLDeny(t *testing.T) { }) defer os.RemoveAll(dir1) defer s1.Shutdown() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) codec := rpcClient(t, s1) defer codec.Close() @@ -421,7 +421,7 @@ func TestConfigEntry_List_ACLDeny(t *testing.T) { }) defer os.RemoveAll(dir1) defer s1.Shutdown() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) codec := rpcClient(t, s1) defer codec.Close() @@ -502,7 +502,7 @@ func TestConfigEntry_ListAll_ACLDeny(t *testing.T) { }) defer os.RemoveAll(dir1) defer s1.Shutdown() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) codec := rpcClient(t, s1) defer codec.Close() @@ -652,7 +652,7 @@ func TestConfigEntry_Delete_ACLDeny(t *testing.T) { }) defer os.RemoveAll(dir1) defer s1.Shutdown() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) codec := rpcClient(t, s1) defer codec.Close() @@ -1119,7 +1119,7 @@ func TestConfigEntry_ResolveServiceConfig_ACLDeny(t *testing.T) { }) defer os.RemoveAll(dir1) defer s1.Shutdown() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) codec := rpcClient(t, s1) defer codec.Close() diff --git a/agent/consul/coordinate_endpoint.go b/agent/consul/coordinate_endpoint.go index fb381810da..d31136ebc8 100644 --- a/agent/consul/coordinate_endpoint.go +++ b/agent/consul/coordinate_endpoint.go @@ -143,7 +143,7 @@ func (c *Coordinate) Update(args *structs.CoordinateUpdateRequest, reply *struct if err != nil { return err } - if authz != nil && c.srv.config.ACLEnforceVersion8 { + if authz != nil { var authzContext acl.AuthorizerContext structs.DefaultEnterpriseMeta().FillAuthzContext(&authzContext) if authz.NodeWrite(args.Node, &authzContext) != acl.Allow { @@ -217,7 +217,7 @@ func (c *Coordinate) Node(args *structs.NodeSpecificRequest, reply *structs.Inde if err != nil { return err } - if authz != nil && c.srv.config.ACLEnforceVersion8 { + if authz != nil { var authzContext acl.AuthorizerContext structs.WildcardEnterpriseMeta().FillAuthzContext(&authzContext) if authz.NodeRead(args.Node, &authzContext) != acl.Allow { diff --git a/agent/consul/coordinate_endpoint_test.go b/agent/consul/coordinate_endpoint_test.go index e1b90f5043..7ec7e41934 100644 --- a/agent/consul/coordinate_endpoint_test.go +++ b/agent/consul/coordinate_endpoint_test.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/consul/testrpc" - "github.com/hashicorp/net-rpc-msgpackrpc" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/serf/coordinate" "github.com/pascaldekloe/goe/verify" ) @@ -51,7 +51,7 @@ func TestCoordinate_Update(t *testing.T) { // Register some nodes. nodes := []string{"node1", "node2"} - if err := registerNodes(nodes, codec); err != nil { + if err := registerNodes(nodes, codec, ""); err != nil { t.Fatal(err) } @@ -184,22 +184,21 @@ func TestCoordinate_Update_ACLDeny(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Register some nodes. nodes := []string{"node1", "node2"} - if err := registerNodes(nodes, codec); err != nil { + if err := registerNodes(nodes, codec, "root"); err != nil { t.Fatal(err) } - // Send an update for the first node. This should go through since we + // Send an update for the first node. // don't have version 8 ACLs enforced yet. req := structs.CoordinateUpdateRequest{ Datacenter: "dc1", @@ -207,12 +206,6 @@ func TestCoordinate_Update_ACLDeny(t *testing.T) { Coord: generateRandomCoordinate(), } var out struct{} - if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &req, &out); err != nil { - t.Fatalf("err: %v", err) - } - - // Now turn on version 8 enforcement and try again. - s1.config.ACLEnforceVersion8 = true err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &req, &out) if !acl.IsErrPermissionDenied(err) { t.Fatalf("err: %v", err) @@ -295,7 +288,7 @@ func TestCoordinate_ListNodes(t *testing.T) { // Register some nodes. nodes := []string{"foo", "bar", "baz"} - if err := registerNodes(nodes, codec); err != nil { + if err := registerNodes(nodes, codec, ""); err != nil { t.Fatal(err) } @@ -355,14 +348,13 @@ func TestCoordinate_ListNodes_ACLFilter(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Register some nodes. nodes := []string{"foo", "bar", "baz"} @@ -418,12 +410,12 @@ func TestCoordinate_ListNodes_ACLFilter(t *testing.T) { if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &arg3, &out); err != nil { t.Fatalf("err: %v", err) } - // Wait for all the coordinate updates to apply. Since we aren't - // enforcing version 8 ACLs, this should also allow us to read - // everything back without a token. + + // Wait for all the coordinate updates to apply. retry.Run(t, func(r *retry.R) { arg := structs.DCSpecificRequest{ - Datacenter: "dc1", + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: "root"}, } resp := structs.IndexedCoordinates{} if err := msgpackrpc.CallWithCodec(codec, "Coordinate.ListNodes", &arg, &resp); err != nil { @@ -435,10 +427,7 @@ func TestCoordinate_ListNodes_ACLFilter(t *testing.T) { }) // Now that we've waited for the batch processing to ingest the - // coordinates we can do the rest of the requests without the loop. We - // will start by turning on version 8 ACL support which should block - // everything. - s1.config.ACLEnforceVersion8 = true + // coordinates we can do the rest of the requests without the loop. arg := structs.DCSpecificRequest{ Datacenter: "dc1", } @@ -494,7 +483,7 @@ func TestCoordinate_Node(t *testing.T) { // Register some nodes. nodes := []string{"foo", "bar"} - if err := registerNodes(nodes, codec); err != nil { + if err := registerNodes(nodes, codec, ""); err != nil { t.Fatal(err) } @@ -543,52 +532,38 @@ func TestCoordinate_Node_ACLDeny(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Register some nodes. nodes := []string{"node1", "node2"} - if err := registerNodes(nodes, codec); err != nil { + if err := registerNodes(nodes, codec, "root"); err != nil { t.Fatal(err) } coord := generateRandomCoordinate() req := structs.CoordinateUpdateRequest{ - Datacenter: "dc1", - Node: "node1", - Coord: coord, + Datacenter: "dc1", + Node: "node1", + Coord: coord, + WriteRequest: structs.WriteRequest{Token: "root"}, } var out struct{} if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Update", &req, &out); err != nil { t.Fatalf("err: %v", err) } - // Try a read for the first node. This should go through since we - // don't have version 8 ACLs enforced yet. + // Try a read for the first node. This should fail without a token. arg := structs.NodeSpecificRequest{ Node: "node1", Datacenter: "dc1", } resp := structs.IndexedCoordinates{} - retry.Run(t, func(r *retry.R) { - if err := msgpackrpc.CallWithCodec(codec, "Coordinate.Node", &arg, &resp); err != nil { - r.Fatalf("err: %v", err) - } - if len(resp.Coordinates) != 1 || - resp.Coordinates[0].Node != "node1" { - r.Fatalf("bad: %v", resp.Coordinates) - } - verify.Values(t, "", resp.Coordinates[0].Coord, coord) - }) - - // Now turn on version 8 enforcement and try again. - s1.config.ACLEnforceVersion8 = true err := msgpackrpc.CallWithCodec(codec, "Coordinate.Node", &arg, &resp) if !acl.IsErrPermissionDenied(err) { t.Fatalf("err: %v", err) @@ -628,12 +603,13 @@ node "node1" { } } -func registerNodes(nodes []string, codec rpc.ClientCodec) error { +func registerNodes(nodes []string, codec rpc.ClientCodec, token string) error { for _, node := range nodes { req := structs.RegisterRequest{ - Datacenter: "dc1", - Node: node, - Address: "127.0.0.1", + Datacenter: "dc1", + Node: node, + Address: "127.0.0.1", + WriteRequest: structs.WriteRequest{Token: token}, } var reply struct{} if err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &req, &reply); err != nil { diff --git a/agent/consul/discovery_chain_endpoint_test.go b/agent/consul/discovery_chain_endpoint_test.go index 17fc1c2f91..d228b1faf4 100644 --- a/agent/consul/discovery_chain_endpoint_test.go +++ b/agent/consul/discovery_chain_endpoint_test.go @@ -29,8 +29,8 @@ func TestDiscoveryChainEndpoint_Get(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") - testrpc.WaitForLeader(t, s1.RPC, "dc1") + waitForLeaderEstablishment(t, s1) + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) denyToken, err := upsertTestTokenWithPolicyRules(codec, "root", "dc1", "") require.NoError(t, err) diff --git a/agent/consul/federation_state_endpoint_test.go b/agent/consul/federation_state_endpoint_test.go index 5d8ee7934d..37b0bb1fc1 100644 --- a/agent/consul/federation_state_endpoint_test.go +++ b/agent/consul/federation_state_endpoint_test.go @@ -112,7 +112,7 @@ func TestFederationState_Apply_Upsert_ACLDeny(t *testing.T) { defer os.RemoveAll(dir1) defer s1.Shutdown() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) codec := rpcClient(t, s1) defer codec.Close() @@ -224,7 +224,7 @@ func TestFederationState_Get_ACLDeny(t *testing.T) { defer os.RemoveAll(dir1) defer s1.Shutdown() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) codec := rpcClient(t, s1) defer codec.Close() @@ -383,7 +383,6 @@ func TestFederationState_List_ACLDeny(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = true // apparently this is still not defaulted to true in server code }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -400,7 +399,6 @@ func TestFederationState_List_ACLDeny(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = true // ugh }) defer os.RemoveAll(dir2) defer s2.Shutdown() diff --git a/agent/consul/health_endpoint_test.go b/agent/consul/health_endpoint_test.go index 5fe77e48d7..f08e9ed528 100644 --- a/agent/consul/health_endpoint_test.go +++ b/agent/consul/health_endpoint_test.go @@ -936,14 +936,13 @@ func TestHealth_ServiceNodes_ConnectProxy_ACL(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Create the ACL. arg := structs.ACLRequest{ @@ -956,6 +955,9 @@ func TestHealth_ServiceNodes_ConnectProxy_ACL(t *testing.T) { service "foo" { policy = "write" } +node "foo" { + policy = "write" +} `, }, WriteRequest: structs.WriteRequest{Token: "root"}, @@ -1236,14 +1238,13 @@ func TestHealth_ServiceNodes_Ingress_ACL(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = true }) defer os.RemoveAll(dir1) defer s1.Shutdown() codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Create the ACL. token, err := upsertTestTokenWithPolicyRules(codec, "root", "dc1", ` diff --git a/agent/consul/internal_endpoint_test.go b/agent/consul/internal_endpoint_test.go index 818be16580..99b700e4ec 100644 --- a/agent/consul/internal_endpoint_test.go +++ b/agent/consul/internal_endpoint_test.go @@ -967,7 +967,6 @@ func TestInternal_GatewayServices_ACLFiltering(t *testing.T) { dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLsEnabled = true - c.ACLEnforceVersion8 = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" }) @@ -1355,7 +1354,6 @@ func TestInternal_GatewayServiceDump_Terminating_ACL(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = true }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -1691,7 +1689,6 @@ func TestInternal_GatewayServiceDump_Ingress_ACL(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = true }) defer os.RemoveAll(dir1) defer s1.Shutdown() diff --git a/agent/consul/kvs_endpoint_test.go b/agent/consul/kvs_endpoint_test.go index 85f1b9f6d3..30bbdbc3a1 100644 --- a/agent/consul/kvs_endpoint_test.go +++ b/agent/consul/kvs_endpoint_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/testrpc" - "github.com/hashicorp/net-rpc-msgpackrpc" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/pascaldekloe/goe/verify" ) @@ -83,7 +83,7 @@ func TestKVS_Apply_ACLDeny(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Create the ACL arg := structs.ACLRequest{ @@ -195,7 +195,7 @@ func TestKVS_Get_ACLDeny(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) arg := structs.KVSRequest{ Datacenter: "dc1", @@ -404,7 +404,7 @@ func TestKVSEndpoint_List_ACLDeny(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) keys := []string{ "abe", @@ -491,7 +491,7 @@ func TestKVSEndpoint_List_ACLEnableKeyListPolicy(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) keys := []string{ "abe", @@ -685,7 +685,7 @@ func TestKVSEndpoint_ListKeys_ACLDeny(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) keys := []string{ "abe", diff --git a/agent/consul/leader_test.go b/agent/consul/leader_test.go index 8c2dbb81ea..4dafc01f4d 100644 --- a/agent/consul/leader_test.go +++ b/agent/consul/leader_test.go @@ -26,7 +26,6 @@ func TestLeader_RegisterMember(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = true }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -98,7 +97,6 @@ func TestLeader_FailedMember(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = true }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -160,7 +158,6 @@ func TestLeader_LeftMember(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = true }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -207,7 +204,6 @@ func TestLeader_ReapMember(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = true }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -269,7 +265,6 @@ func TestLeader_ReapServer(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "allow" - c.ACLEnforceVersion8 = true c.Bootstrap = true }) defer os.RemoveAll(dir1) @@ -280,7 +275,6 @@ func TestLeader_ReapServer(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "allow" - c.ACLEnforceVersion8 = true c.Bootstrap = false }) defer os.RemoveAll(dir2) @@ -291,7 +285,6 @@ func TestLeader_ReapServer(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "allow" - c.ACLEnforceVersion8 = true c.Bootstrap = false }) defer os.RemoveAll(dir3) @@ -347,7 +340,6 @@ func TestLeader_Reconcile_ReapMember(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = true }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -397,7 +389,6 @@ func TestLeader_Reconcile(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = true }) defer os.RemoveAll(dir1) defer s1.Shutdown() diff --git a/agent/consul/operator_raft_endpoint_test.go b/agent/consul/operator_raft_endpoint_test.go index 4f89f63a3d..8cf8f99f59 100644 --- a/agent/consul/operator_raft_endpoint_test.go +++ b/agent/consul/operator_raft_endpoint_test.go @@ -71,7 +71,7 @@ func TestOperator_RaftGetConfiguration_ACLDeny(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Make a request with no token to make sure it gets denied. arg := structs.DCSpecificRequest{ @@ -211,7 +211,7 @@ func TestOperator_RaftRemovePeerByAddress_ACLDeny(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Make a request with no token to make sure it gets denied. arg := structs.RaftRemovePeerRequest{ diff --git a/agent/consul/prepared_query_endpoint.go b/agent/consul/prepared_query_endpoint.go index 00c1359fc2..6f6c5274da 100644 --- a/agent/consul/prepared_query_endpoint.go +++ b/agent/consul/prepared_query_endpoint.go @@ -101,7 +101,7 @@ func (p *PreparedQuery) Apply(args *structs.PreparedQueryRequest, reply *string) // Parse the query and prep it for the state store. switch args.Op { case structs.PreparedQueryCreate, structs.PreparedQueryUpdate: - if err := parseQuery(args.Query, p.srv.config.ACLEnforceVersion8); err != nil { + if err := parseQuery(args.Query); err != nil { return fmt.Errorf("Invalid prepared query: %v", err) } @@ -130,7 +130,7 @@ func (p *PreparedQuery) Apply(args *structs.PreparedQueryRequest, reply *string) // update operation. Some of the fields are not checked or are partially // checked, as noted in the comments below. This also updates all the parsed // fields of the query. -func parseQuery(query *structs.PreparedQuery, enforceVersion8 bool) error { +func parseQuery(query *structs.PreparedQuery) error { // We skip a few fields: // - ID is checked outside this fn. // - Name is optional with no restrictions, except for uniqueness which @@ -142,10 +142,8 @@ func parseQuery(query *structs.PreparedQuery, enforceVersion8 bool) error { // compile it. // Anonymous queries require a session or need to be part of a template. - if enforceVersion8 { - if query.Name == "" && query.Template.Type == "" && query.Session == "" { - return fmt.Errorf("Must be bound to a session") - } + if query.Name == "" && query.Template.Type == "" && query.Session == "" { + return fmt.Errorf("Must be bound to a session") } // Token is checked when the query is executed, but we do make sure the diff --git a/agent/consul/prepared_query_endpoint_test.go b/agent/consul/prepared_query_endpoint_test.go index c3fce61832..eb1edddc7b 100644 --- a/agent/consul/prepared_query_endpoint_test.go +++ b/agent/consul/prepared_query_endpoint_test.go @@ -21,6 +21,7 @@ import ( "github.com/hashicorp/go-hclog" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/serf/coordinate" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -198,7 +199,7 @@ func TestPreparedQuery_Apply_ACLDeny(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Create an ACL with write permissions for redis queries. var token string @@ -540,88 +541,86 @@ func TestPreparedQuery_parseQuery(t *testing.T) { t.Parallel() query := &structs.PreparedQuery{} - err := parseQuery(query, true) + err := parseQuery(query) if err == nil || !strings.Contains(err.Error(), "Must be bound to a session") { t.Fatalf("bad: %v", err) } query.Session = "adf4238a-882b-9ddc-4a9d-5b6758e4159e" - err = parseQuery(query, true) + err = parseQuery(query) if err == nil || !strings.Contains(err.Error(), "Must provide a Service") { t.Fatalf("bad: %v", err) } query.Session = "" query.Template.Type = "some-kind-of-template" - err = parseQuery(query, true) + err = parseQuery(query) if err == nil || !strings.Contains(err.Error(), "Must provide a Service") { t.Fatalf("bad: %v", err) } query.Template.Type = "" - err = parseQuery(query, false) - if err == nil || !strings.Contains(err.Error(), "Must provide a Service") { + err = parseQuery(query) + if err == nil || !strings.Contains(err.Error(), "Must be bound to a session") { t.Fatalf("bad: %v", err) } // None of the rest of these care about version 8 ACL enforcement. - for _, version8 := range []bool{true, false} { - query = &structs.PreparedQuery{} - query.Session = "adf4238a-882b-9ddc-4a9d-5b6758e4159e" - query.Service.Service = "foo" - if err := parseQuery(query, version8); err != nil { - t.Fatalf("err: %v", err) - } + query = &structs.PreparedQuery{} + query.Session = "adf4238a-882b-9ddc-4a9d-5b6758e4159e" + query.Service.Service = "foo" + if err := parseQuery(query); err != nil { + t.Fatalf("err: %v", err) + } - query.Token = redactedToken - err = parseQuery(query, version8) - if err == nil || !strings.Contains(err.Error(), "Bad Token") { - t.Fatalf("bad: %v", err) - } + query.Token = redactedToken + err = parseQuery(query) + if err == nil || !strings.Contains(err.Error(), "Bad Token") { + t.Fatalf("bad: %v", err) + } - query.Token = "adf4238a-882b-9ddc-4a9d-5b6758e4159e" - if err := parseQuery(query, version8); err != nil { - t.Fatalf("err: %v", err) - } + query.Token = "adf4238a-882b-9ddc-4a9d-5b6758e4159e" + if err := parseQuery(query); err != nil { + t.Fatalf("err: %v", err) + } - query.Service.Failover.NearestN = -1 - err = parseQuery(query, version8) - if err == nil || !strings.Contains(err.Error(), "Bad NearestN") { - t.Fatalf("bad: %v", err) - } + query.Service.Failover.NearestN = -1 + err = parseQuery(query) + if err == nil || !strings.Contains(err.Error(), "Bad NearestN") { + t.Fatalf("bad: %v", err) + } - query.Service.Failover.NearestN = 3 - if err := parseQuery(query, version8); err != nil { - t.Fatalf("err: %v", err) - } + query.Service.Failover.NearestN = 3 + if err := parseQuery(query); err != nil { + t.Fatalf("err: %v", err) + } - query.DNS.TTL = "two fortnights" - err = parseQuery(query, version8) - if err == nil || !strings.Contains(err.Error(), "Bad DNS TTL") { - t.Fatalf("bad: %v", err) - } + query.DNS.TTL = "two fortnights" + err = parseQuery(query) + if err == nil || !strings.Contains(err.Error(), "Bad DNS TTL") { + t.Fatalf("bad: %v", err) + } - query.DNS.TTL = "-3s" - err = parseQuery(query, version8) - if err == nil || !strings.Contains(err.Error(), "must be >=0") { - t.Fatalf("bad: %v", err) - } + query.DNS.TTL = "-3s" + err = parseQuery(query) + if err == nil || !strings.Contains(err.Error(), "must be >=0") { + t.Fatalf("bad: %v", err) + } - query.DNS.TTL = "3s" - if err := parseQuery(query, version8); err != nil { - t.Fatalf("err: %v", err) - } + query.DNS.TTL = "3s" + if err := parseQuery(query); err != nil { + t.Fatalf("err: %v", err) + } - query.Service.NodeMeta = map[string]string{"": "somevalue"} - err = parseQuery(query, version8) - if err == nil || !strings.Contains(err.Error(), "cannot be blank") { - t.Fatalf("bad: %v", err) - } + query.Service.NodeMeta = map[string]string{"": "somevalue"} + err = parseQuery(query) + if err == nil || !strings.Contains(err.Error(), "cannot be blank") { + t.Fatalf("bad: %v", err) + } - query.Service.NodeMeta = map[string]string{"somekey": "somevalue"} - if err := parseQuery(query, version8); err != nil { - t.Fatalf("err: %v", err) - } + query.Service.NodeMeta = map[string]string{"somekey": "somevalue"} + if err := parseQuery(query); err != nil { + t.Fatalf("err: %v", err) } } @@ -638,7 +637,7 @@ func TestPreparedQuery_ACLDeny_Catchall_Template(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Create an ACL with write permissions for any prefix. var token string @@ -853,7 +852,7 @@ func TestPreparedQuery_Get(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Create an ACL with write permissions for redis queries. var token string @@ -974,6 +973,7 @@ func TestPreparedQuery_Get(t *testing.T) { Session: structs.Session{ Node: s1.config.NodeName, }, + WriteRequest: structs.WriteRequest{Token: "root"}, } if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &req, &session); err != nil { t.Fatalf("err: %v", err) @@ -1106,7 +1106,7 @@ func TestPreparedQuery_List(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Create an ACL with write permissions for redis queries. var token string @@ -1245,6 +1245,7 @@ func TestPreparedQuery_List(t *testing.T) { Session: structs.Session{ Node: s1.config.NodeName, }, + WriteRequest: structs.WriteRequest{Token: "root"}, } if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &req, &session); err != nil { t.Fatalf("err: %v", err) @@ -1314,7 +1315,7 @@ func TestPreparedQuery_Explain(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Create an ACL with write permissions for prod- queries. var token string @@ -1445,10 +1446,10 @@ func TestPreparedQuery_Execute(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() + waitForLeaderEstablishment(t, s1) codec1 := rpcClient(t, s1) defer codec1.Close() @@ -1456,15 +1457,15 @@ func TestPreparedQuery_Execute(t *testing.T) { c.Datacenter = "dc2" c.ACLDatacenter = "dc1" c.ACLsEnabled = true + c.ACLDefaultPolicy = "deny" }) defer os.RemoveAll(dir2) defer s2.Shutdown() + waitForLeaderEstablishment(t, s2) codec2 := rpcClient(t, s2) defer codec2.Close() s2.tokens.UpdateReplicationToken("root", tokenStore.TokenSourceConfig) - testrpc.WaitForLeader(t, s1.RPC, "dc1") - testrpc.WaitForLeader(t, s2.RPC, "dc2") // Try to WAN join. joinWAN(t, s2, s1) @@ -1472,30 +1473,77 @@ func TestPreparedQuery_Execute(t *testing.T) { if got, want := len(s1.WANMembers()), 2; got != want { r.Fatalf("got %d WAN members want %d", got, want) } + if got, want := len(s2.WANMembers()), 2; got != want { + r.Fatalf("got %d WAN members want %d", got, want) + } }) - // Create an ACL with read permission to the service. - var execToken string - { - var rules = ` - service "foo" { - policy = "read" - } - ` + // check for RPC forwarding + testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) + testrpc.WaitForLeader(t, s1.RPC, "dc2", testrpc.WithToken("root")) + // Create ACL tokens with read permission to the service and to the service + // and all nodes. + var execNoNodesToken string + { req := structs.ACLRequest{ Datacenter: "dc1", Op: structs.ACLSet, ACL: structs.ACL{ Name: "User token", Type: structs.ACLTokenTypeClient, - Rules: rules, + Rules: `service "foo" { policy = "read" }`, }, WriteRequest: structs.WriteRequest{Token: "root"}, } - if err := msgpackrpc.CallWithCodec(codec1, "ACL.Apply", &req, &execToken); err != nil { + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "ACL.Apply", &req, &execNoNodesToken)) + } + var execToken string + { + req := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTokenTypeClient, + Rules: `service "foo" { policy = "read" } + node "" { policy = "read" }`, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "ACL.Apply", &req, &execToken)) + } + // Make a new exec token that can't read the service. + var denyToken string + { + req := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTokenTypeClient, + Rules: `service "foo" { policy = "deny" }`, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "ACL.Apply", &req, &denyToken)) + } + + newSessionDC1 := func(t *testing.T) string { + t.Helper() + req := structs.SessionRequest{ + Datacenter: "dc1", + Op: structs.SessionCreate, + Session: structs.Session{ + Node: s1.config.NodeName, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + var session string + if err := msgpackrpc.CallWithCodec(codec1, "Session.Apply", &req, &session); err != nil { t.Fatalf("err: %v", err) } + return session } // Set up some nodes in each DC that host the service. @@ -1561,7 +1609,7 @@ func TestPreparedQuery_Execute(t *testing.T) { } // Run a query that doesn't exist. - { + t.Run("run query that doesn't exist", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: "nope", @@ -1569,17 +1617,30 @@ func TestPreparedQuery_Execute(t *testing.T) { var reply structs.PreparedQueryExecuteResponse err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply) - if err == nil || err.Error() != ErrQueryNotFound.Error() { - t.Fatalf("bad: %v", err) - } + assert.EqualError(t, err, ErrQueryNotFound.Error()) + assert.Len(t, reply.Nodes, 0) + }) - if len(reply.Nodes) != 0 { - t.Fatalf("bad: %v", reply) - } + expectNodes := func(t *testing.T, query *structs.PreparedQueryRequest, reply *structs.PreparedQueryExecuteResponse, n int) { + t.Helper() + assert.Len(t, reply.Nodes, n) + assert.Equal(t, "dc1", reply.Datacenter) + assert.Equal(t, 0, reply.Failovers) + assert.Equal(t, query.Query.Service.Service, reply.Service) + assert.Equal(t, query.Query.DNS, reply.DNS) + assert.True(t, reply.QueryMeta.KnownLeader) + } + expectFailoverNodes := func(t *testing.T, query *structs.PreparedQueryRequest, reply *structs.PreparedQueryExecuteResponse, n int) { + t.Helper() + assert.Len(t, reply.Nodes, n) + assert.Equal(t, "dc2", reply.Datacenter) + assert.Equal(t, 1, reply.Failovers) + assert.Equal(t, query.Query.Service.Service, reply.Service) + assert.Equal(t, query.Query.DNS, reply.DNS) + assert.True(t, reply.QueryMeta.KnownLeader) } - // Run the registered query. - { + t.Run("run the registered query", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -1587,21 +1648,11 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) + expectNodes(t, &query, &reply, 10) + }) - if len(reply.Nodes) != 10 || - reply.Datacenter != "dc1" || reply.Failovers != 0 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } - } - - // Try with a limit. - { + t.Run("try with a limit", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -1610,48 +1661,40 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } - - if len(reply.Nodes) != 3 || - reply.Datacenter != "dc1" || reply.Failovers != 0 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) + expectNodes(t, &query, &reply, 3) + }) // Run various service queries with node metadata filters. - { - cases := []struct { - filters map[string]string - numNodes int - }{ - { - filters: map[string]string{}, - numNodes: 10, - }, - { - filters: map[string]string{"instance_type": "t2.micro"}, - numNodes: 10, - }, - { - filters: map[string]string{"group": "1"}, - numNodes: 5, - }, - { - filters: map[string]string{"group": "0", "unique": "true"}, - numNodes: 1, - }, - } - - for _, tc := range cases { + for name, tc := range map[string]struct { + filters map[string]string + numNodes int + }{ + "no filter 10 nodes": { + filters: map[string]string{}, + numNodes: 10, + }, + "instance filter 10 nodes": { + filters: map[string]string{"instance_type": "t2.micro"}, + numNodes: 10, + }, + "group filter 5 nodes": { + filters: map[string]string{"group": "1"}, + numNodes: 5, + }, + "group filter unique 1 node": { + filters: map[string]string{"group": "0", "unique": "true"}, + numNodes: 1, + }, + } { + tc := tc + t.Run("node metadata - "+name, func(t *testing.T) { + session := newSessionDC1(t) nodeMetaQuery := structs.PreparedQueryRequest{ Datacenter: "dc1", Op: structs.PreparedQueryCreate, Query: &structs.PreparedQuery{ + Session: session, Service: structs.ServiceQuery{ Service: "foo", NodeMeta: tc.filters, @@ -1662,9 +1705,7 @@ func TestPreparedQuery_Execute(t *testing.T) { }, WriteRequest: structs.WriteRequest{Token: "root"}, } - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &nodeMetaQuery, &nodeMetaQuery.Query.ID); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &nodeMetaQuery, &nodeMetaQuery.Query.ID)) req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", @@ -1673,55 +1714,49 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } - - if len(reply.Nodes) != tc.numNodes { - t.Fatalf("bad: %v, %v", len(reply.Nodes), tc.numNodes) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) + assert.Len(t, reply.Nodes, tc.numNodes) for _, node := range reply.Nodes { - if !structs.SatisfiesMetaFilters(node.Node.Meta, tc.filters) { - t.Fatalf("bad: %v", node.Node.Meta) - } + assert.True(t, structs.SatisfiesMetaFilters(node.Node.Meta, tc.filters), "meta: %v", node.Node.Meta) } - } + }) } // Run various service queries with service metadata filters - { - cases := []struct { - filters map[string]string - numNodes int - }{ - { - filters: map[string]string{}, - numNodes: 10, - }, - { - filters: map[string]string{"foo": "true"}, - numNodes: 10, - }, - { - filters: map[string]string{"svc-group": "0"}, - numNodes: 5, - }, - { - filters: map[string]string{"svc-group": "1"}, - numNodes: 5, - }, - { - filters: map[string]string{"svc-group": "0", "unique": "true"}, - numNodes: 1, - }, - } - - for _, tc := range cases { + for name, tc := range map[string]struct { + filters map[string]string + numNodes int + }{ + "no filter 10 nodes": { + filters: map[string]string{}, + numNodes: 10, + }, + "foo filter 10 nodes": { + filters: map[string]string{"foo": "true"}, + numNodes: 10, + }, + "group filter 0 - 5 nodes": { + filters: map[string]string{"svc-group": "0"}, + numNodes: 5, + }, + "group filter 1 - 5 nodes": { + filters: map[string]string{"svc-group": "1"}, + numNodes: 5, + }, + "group filter 0 - unique 1 node": { + filters: map[string]string{"svc-group": "0", "unique": "true"}, + numNodes: 1, + }, + } { + tc := tc + require.True(t, t.Run("service metadata - "+name, func(t *testing.T) { + session := newSessionDC1(t) svcMetaQuery := structs.PreparedQueryRequest{ Datacenter: "dc1", Op: structs.PreparedQueryCreate, Query: &structs.PreparedQuery{ + Session: session, Service: structs.ServiceQuery{ Service: "foo", ServiceMeta: tc.filters, @@ -1743,25 +1778,24 @@ func TestPreparedQuery_Execute(t *testing.T) { var reply structs.PreparedQueryExecuteResponse require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - require.Len(t, reply.Nodes, tc.numNodes) + assert.Len(t, reply.Nodes, tc.numNodes) for _, node := range reply.Nodes { - require.True(t, structs.SatisfiesMetaFilters(node.Service.Meta, tc.filters)) + assert.True(t, structs.SatisfiesMetaFilters(node.Service.Meta, tc.filters), "meta: %v", node.Service.Meta) } - } + })) } // Push a coordinate for one of the nodes so we can try an RTT sort. We // have to sleep a little while for the coordinate batch to get flushed. { req := structs.CoordinateUpdateRequest{ - Datacenter: "dc1", - Node: "node3", - Coord: coordinate.NewCoordinate(coordinate.DefaultConfig()), + Datacenter: "dc1", + Node: "node3", + Coord: coordinate.NewCoordinate(coordinate.DefaultConfig()), + WriteRequest: structs.WriteRequest{Token: "root"}, } var out struct{} - if err := msgpackrpc.CallWithCodec(codec1, "Coordinate.Update", &req, &out); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "Coordinate.Update", &req, &out)) time.Sleep(3 * s1.config.CoordinateUpdatePeriod) } @@ -1769,60 +1803,47 @@ func TestPreparedQuery_Execute(t *testing.T) { // showing that the node with a coordinate is always first proves we // call the RTT sorting function, which is tested elsewhere. for i := 0; i < 100; i++ { - req := structs.PreparedQueryExecuteRequest{ - Datacenter: "dc1", - QueryIDOrName: query.Query.ID, - Source: structs.QuerySource{ - Datacenter: "dc1", - Node: "node3", - }, - QueryOptions: structs.QueryOptions{Token: execToken}, - } + t.Run(fmt.Sprintf("rtt sort iter %d", i), func(t *testing.T) { + req := structs.PreparedQueryExecuteRequest{ + Datacenter: "dc1", + QueryIDOrName: query.Query.ID, + Source: structs.QuerySource{ + Datacenter: "dc1", + Node: "node3", + }, + QueryOptions: structs.QueryOptions{Token: execToken}, + } - var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + var reply structs.PreparedQueryExecuteResponse + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 10 || - reply.Datacenter != "dc1" || reply.Failovers != 0 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } - if reply.Nodes[0].Node.Node != "node3" { - t.Fatalf("bad: %v", reply) - } + expectNodes(t, &query, &reply, 10) + assert.Equal(t, "node3", reply.Nodes[0].Node.Node) + }) } // Make sure the shuffle looks like it's working. uniques := make(map[string]struct{}) for i := 0; i < 100; i++ { - req := structs.PreparedQueryExecuteRequest{ - Datacenter: "dc1", - QueryIDOrName: query.Query.ID, - QueryOptions: structs.QueryOptions{Token: execToken}, - } + t.Run(fmt.Sprintf("shuffle iter %d", i), func(t *testing.T) { + req := structs.PreparedQueryExecuteRequest{ + Datacenter: "dc1", + QueryIDOrName: query.Query.ID, + QueryOptions: structs.QueryOptions{Token: execToken}, + } - var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + var reply structs.PreparedQueryExecuteResponse + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 10 || - reply.Datacenter != "dc1" || reply.Failovers != 0 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } - var names []string - for _, node := range reply.Nodes { - names = append(names, node.Node.Node) - } - key := strings.Join(names, "|") - uniques[key] = struct{}{} + expectNodes(t, &query, &reply, 10) + + var names []string + for _, node := range reply.Nodes { + names = append(names, node.Node.Node) + } + key := strings.Join(names, "|") + uniques[key] = struct{}{} + }) } // We have to allow for the fact that there won't always be a unique @@ -1837,39 +1858,31 @@ func TestPreparedQuery_Execute(t *testing.T) { // so node3 should always show up first. query.Op = structs.PreparedQueryUpdate query.Query.Service.Near = "node3" - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) // Now run the query and make sure the sort looks right. - { - req := structs.PreparedQueryExecuteRequest{ - Agent: structs.QuerySource{ - Datacenter: "dc1", - Node: "node3", - }, - Datacenter: "dc1", - QueryIDOrName: query.Query.ID, - QueryOptions: structs.QueryOptions{Token: execToken}, - } + for i := 0; i < 10; i++ { + t.Run(fmt.Sprintf("run nearest query iter %d", i), func(t *testing.T) { + req := structs.PreparedQueryExecuteRequest{ + Agent: structs.QuerySource{ + Datacenter: "dc1", + Node: "node3", + }, + Datacenter: "dc1", + QueryIDOrName: query.Query.ID, + QueryOptions: structs.QueryOptions{Token: execToken}, + } - for i := 0; i < 10; i++ { var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } - if n := len(reply.Nodes); n != 10 { - t.Fatalf("expect 10 nodes, got: %d", n) - } - if node := reply.Nodes[0].Node.Node; node != "node3" { - t.Fatalf("expect node3 first, got: %q", node) - } - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) + assert.Len(t, reply.Nodes, 10) + assert.Equal(t, "node3", reply.Nodes[0].Node.Node) + }) } // Query again, but this time set a client-supplied query source. This // proves that we allow overriding the baked-in value with ?near. - { + t.Run("nearest fallback to shuffle", func(t *testing.T) { // Set up the query with a non-existent node. This will cause the // nodes to be shuffled if the passed node is respected, proving // that we allow the override to happen. @@ -1890,26 +1903,21 @@ func TestPreparedQuery_Execute(t *testing.T) { shuffled := false for i := 0; i < 10; i++ { var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } - if n := len(reply.Nodes); n != 10 { - t.Fatalf("expect 10 nodes, got: %d", n) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) + assert.Len(t, reply.Nodes, 10) + if node := reply.Nodes[0].Node.Node; node != "node3" { shuffled = true break } } - if !shuffled { - t.Fatalf("expect nodes to be shuffled") - } - } + require.True(t, shuffled, "expect nodes to be shuffled") + }) // If the exact node we are sorting near appears in the list, make sure it // gets popped to the front of the result. - { + t.Run("nearest bypasses shuffle", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Source: structs.QuerySource{ Datacenter: "dc1", @@ -1922,26 +1930,18 @@ func TestPreparedQuery_Execute(t *testing.T) { for i := 0; i < 10; i++ { var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } - if n := len(reply.Nodes); n != 10 { - t.Fatalf("expect 10 nodes, got: %d", n) - } - if node := reply.Nodes[0].Node.Node; node != "node1" { - t.Fatalf("expect node1 first, got: %q", node) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) + assert.Len(t, reply.Nodes, 10) + assert.Equal(t, "node1", reply.Nodes[0].Node.Node) } - } + }) // Bake the magic "_agent" flag into the query. query.Query.Service.Near = "_agent" - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) // Check that we sort the local agent first when the magic flag is set. - { + t.Run("local agent is first using _agent on node3", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Agent: structs.QuerySource{ Datacenter: "dc1", @@ -1954,22 +1954,16 @@ func TestPreparedQuery_Execute(t *testing.T) { for i := 0; i < 10; i++ { var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } - if n := len(reply.Nodes); n != 10 { - t.Fatalf("expect 10 nodes, got: %d", n) - } - if node := reply.Nodes[0].Node.Node; node != "node3" { - t.Fatalf("expect node3 first, got: %q", node) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) + assert.Len(t, reply.Nodes, 10) + assert.Equal(t, "node3", reply.Nodes[0].Node.Node) } - } + }) // Check that the query isn't just sorting "node3" first because we // provided it in the Agent query source. Proves that we use the // Agent source when the magic "_agent" flag is passed. - { + t.Run("local agent is first using _agent on foo", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Agent: structs.QuerySource{ Datacenter: "dc1", @@ -1985,26 +1979,20 @@ func TestPreparedQuery_Execute(t *testing.T) { shuffled := false for i := 0; i < 10; i++ { var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } - if n := len(reply.Nodes); n != 10 { - t.Fatalf("expect 10 nodes, got: %d", n) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) + assert.Len(t, reply.Nodes, 10) if node := reply.Nodes[0].Node.Node; node != "node3" { shuffled = true break } } - if !shuffled { - t.Fatal("expect nodes to be shuffled") - } - } + require.True(t, shuffled, "expect nodes to be shuffled") + }) // Shuffles if the response comes from a non-local DC. Proves that the // agent query source does not interfere with the order. - { + t.Run("shuffles if coming from non-local dc", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Source: structs.QuerySource{ Datacenter: "dc2", @@ -2022,31 +2010,24 @@ func TestPreparedQuery_Execute(t *testing.T) { shuffled := false for i := 0; i < 10; i++ { var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } - if n := len(reply.Nodes); n != 10 { - t.Fatalf("expect 10 nodes, got: %d", n) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) + assert.Len(t, reply.Nodes, 10) if reply.Nodes[0].Node.Node != "node3" { shuffled = true break } } - if !shuffled { - t.Fatal("expect node shuffle for remote results") - } - } + require.True(t, shuffled, "expect node shuffle for remote results") + }) // Un-bake the near parameter. query.Query.Service.Near = "" - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) // Update the health of a node to mark it critical. - setHealth := func(node string, health string) { + setHealth := func(t *testing.T, node string, health string) { + t.Helper() req := structs.RegisterRequest{ Datacenter: "dc1", Node: node, @@ -2064,14 +2045,12 @@ func TestPreparedQuery_Execute(t *testing.T) { WriteRequest: structs.WriteRequest{Token: "root"}, } var reply struct{} - if err := msgpackrpc.CallWithCodec(codec1, "Catalog.Register", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "Catalog.Register", &req, &reply)) } - setHealth("node1", api.HealthCritical) + setHealth(t, "node1", api.HealthCritical) // The failing node should be filtered. - { + t.Run("failing node filtered", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -2079,27 +2058,17 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 9 || - reply.Datacenter != "dc1" || reply.Failovers != 0 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } + expectNodes(t, &query, &reply, 9) for _, node := range reply.Nodes { - if node.Node.Node == "node1" { - t.Fatalf("bad: %v", node) - } + assert.NotEqual(t, "node1", node.Node.Node) } - } + }) // Upgrade it to a warning and re-query, should be 10 nodes again. - setHealth("node1", api.HealthWarning) - { + setHealth(t, "node1", api.HealthWarning) + t.Run("warning nodes are included", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -2107,27 +2076,17 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 10 || - reply.Datacenter != "dc1" || reply.Failovers != 0 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } - } + expectNodes(t, &query, &reply, 10) + }) // Make the query more picky so it excludes warning nodes. query.Query.Service.OnlyPassing = true - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) // The node in the warning state should be filtered. - { + t.Run("warning nodes are omitted with onlypassing=true", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -2135,33 +2094,21 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 9 || - reply.Datacenter != "dc1" || reply.Failovers != 0 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } + expectNodes(t, &query, &reply, 9) for _, node := range reply.Nodes { - if node.Node.Node == "node1" { - t.Fatalf("bad: %v", node) - } + assert.NotEqual(t, "node1", node.Node.Node) } - } + }) // Make the query ignore all our health checks (which have "failing" ID // implicitly from their name). query.Query.Service.IgnoreCheckIDs = []types.CheckID{"failing"} - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) // We should end up with 10 nodes again - { + t.Run("all nodes including when ignoring failing checks", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -2169,36 +2116,24 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 10 || - reply.Datacenter != "dc1" || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } - } + expectNodes(t, &query, &reply, 10) + }) // Undo that so all the following tests aren't broken! query.Query.Service.IgnoreCheckIDs = nil - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) // Make the query more picky by adding a tag filter. This just proves we // call into the tag filter, it is tested more thoroughly in a separate // test. query.Query.Service.Tags = []string{"!tag3"} - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) // The node in the warning state should be filtered as well as the node // with the filtered tag. - { + t.Run("filter node in warning state and filtered node", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -2206,50 +2141,17 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 8 || - reply.Datacenter != "dc1" || reply.Failovers != 0 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } + expectNodes(t, &query, &reply, 8) for _, node := range reply.Nodes { - if node.Node.Node == "node1" || node.Node.Node == "node3" { - t.Fatalf("bad: %v", node) - } + assert.NotEqual(t, "node1", node.Node.Node) + assert.NotEqual(t, "node3", node.Node.Node) } - } - - // Make a new exec token that can't read the service. - var denyToken string - { - var rules = ` - service "foo" { - policy = "deny" - } - ` - - req := structs.ACLRequest{ - Datacenter: "dc1", - Op: structs.ACLSet, - ACL: structs.ACL{ - Name: "User token", - Type: structs.ACLTokenTypeClient, - Rules: rules, - }, - WriteRequest: structs.WriteRequest{Token: "root"}, - } - if err := msgpackrpc.CallWithCodec(codec1, "ACL.Apply", &req, &denyToken); err != nil { - t.Fatalf("err: %v", err) - } - } + }) // Make sure the query gets denied with this token. - { + t.Run("query denied with deny token", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -2257,27 +2159,17 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 0 || - reply.Datacenter != "dc1" || reply.Failovers != 0 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } - } + expectNodes(t, &query, &reply, 0) + }) // Bake the exec token into the query. query.Query.Token = execToken - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) // Now even querying with the deny token should work. - { + t.Run("query with deny token still works", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -2285,32 +2177,21 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 8 || - reply.Datacenter != "dc1" || reply.Failovers != 0 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } + expectNodes(t, &query, &reply, 8) for _, node := range reply.Nodes { - if node.Node.Node == "node1" || node.Node.Node == "node3" { - t.Fatalf("bad: %v", node) - } + assert.NotEqual(t, "node1", node.Node.Node) + assert.NotEqual(t, "node3", node.Node.Node) } - } + }) // Un-bake the token. query.Query.Token = "" - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) // Make sure the query gets denied again with the deny token. - { + t.Run("denied with deny token when no query token", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -2318,23 +2199,25 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) + + expectNodes(t, &query, &reply, 0) + }) + + t.Run("filter nodes with exec token without node privileges", func(t *testing.T) { + req := structs.PreparedQueryExecuteRequest{ + Datacenter: "dc1", + QueryIDOrName: query.Query.ID, + QueryOptions: structs.QueryOptions{Token: execNoNodesToken}, } - if len(reply.Nodes) != 0 || - reply.Datacenter != "dc1" || reply.Failovers != 0 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } - } + var reply structs.PreparedQueryExecuteResponse + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - // Turn on version 8 ACLs, which will start to filter even with the exec - // token. - s1.config.ACLEnforceVersion8 = true - { + expectNodes(t, &query, &reply, 0) + }) + + t.Run("normal operation again with exec token", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -2342,52 +2225,20 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 0 || - reply.Datacenter != "dc1" || reply.Failovers != 0 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } - } - - // Revert version 8 ACLs and make sure the query works again. - s1.config.ACLEnforceVersion8 = false - { - req := structs.PreparedQueryExecuteRequest{ - Datacenter: "dc1", - QueryIDOrName: query.Query.ID, - QueryOptions: structs.QueryOptions{Token: execToken}, - } - - var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } - - if len(reply.Nodes) != 8 || - reply.Datacenter != "dc1" || reply.Failovers != 0 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } + expectNodes(t, &query, &reply, 8) for _, node := range reply.Nodes { - if node.Node.Node == "node1" || node.Node.Node == "node3" { - t.Fatalf("bad: %v", node) - } + assert.NotEqual(t, "node1", node.Node.Node) + assert.NotEqual(t, "node3", node.Node.Node) } - } + }) // Now fail everything in dc1 and we should get an empty list back. for i := 0; i < 10; i++ { - setHealth(fmt.Sprintf("node%d", i+1), api.HealthCritical) + setHealth(t, fmt.Sprintf("node%d", i+1), api.HealthCritical) } - { + t.Run("everything is failing so should get empty list", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -2395,27 +2246,17 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 0 || - reply.Datacenter != "dc1" || reply.Failovers != 0 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } - } + expectNodes(t, &query, &reply, 0) + }) // Modify the query to have it fail over to a bogus DC and then dc2. query.Query.Service.Failover.Datacenters = []string{"bogus", "dc2"} - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) // Now we should see 9 nodes from dc2 (we have the tag filter still). - { + t.Run("see 9 nodes from dc2 using tag filter", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -2423,26 +2264,16 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 9 || - reply.Datacenter != "dc2" || reply.Failovers != 1 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } + expectFailoverNodes(t, &query, &reply, 9) for _, node := range reply.Nodes { - if node.Node.Node == "node3" { - t.Fatalf("bad: %v", node) - } + assert.NotEqual(t, "node3", node.Node.Node) } - } + }) // Make sure the limit and query options are forwarded. - { + t.Run("forward limit and query options", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -2454,51 +2285,35 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 3 || - reply.Datacenter != "dc2" || reply.Failovers != 1 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } + expectFailoverNodes(t, &query, &reply, 3) for _, node := range reply.Nodes { - if node.Node.Node == "node3" { - t.Fatalf("bad: %v", node) - } + assert.NotEqual(t, "node3", node.Node.Node) } - } + }) // Make sure the remote shuffle looks like it's working. uniques = make(map[string]struct{}) for i := 0; i < 100; i++ { - req := structs.PreparedQueryExecuteRequest{ - Datacenter: "dc1", - QueryIDOrName: query.Query.ID, - QueryOptions: structs.QueryOptions{Token: execToken}, - } + t.Run(fmt.Sprintf("remote shuffle iter %d", i), func(t *testing.T) { + req := structs.PreparedQueryExecuteRequest{ + Datacenter: "dc1", + QueryIDOrName: query.Query.ID, + QueryOptions: structs.QueryOptions{Token: execToken}, + } - var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + var reply structs.PreparedQueryExecuteResponse + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 9 || - reply.Datacenter != "dc2" || reply.Failovers != 1 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } - var names []string - for _, node := range reply.Nodes { - names = append(names, node.Node.Node) - } - key := strings.Join(names, "|") - uniques[key] = struct{}{} + expectFailoverNodes(t, &query, &reply, 9) + var names []string + for _, node := range reply.Nodes { + names = append(names, node.Node.Node) + } + key := strings.Join(names, "|") + uniques[key] = struct{}{} + }) } // We have to allow for the fact that there won't always be a unique @@ -2509,7 +2324,7 @@ func TestPreparedQuery_Execute(t *testing.T) { } // Make sure the query response from dc2 gets denied with the deny token. - { + t.Run("query from dc2 denied with deny token", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -2517,27 +2332,17 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 0 || - reply.Datacenter != "dc2" || reply.Failovers != 1 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } - } + expectFailoverNodes(t, &query, &reply, 0) + }) // Bake the exec token into the query. query.Query.Token = execToken - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Apply", &query, &query.Query.ID)) // Now even querying with the deny token should work. - { + t.Run("query from dc2 with exec token using deny token works", func(t *testing.T) { req := structs.PreparedQueryExecuteRequest{ Datacenter: "dc1", QueryIDOrName: query.Query.ID, @@ -2545,23 +2350,13 @@ func TestPreparedQuery_Execute(t *testing.T) { } var reply structs.PreparedQueryExecuteResponse - if err := msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply); err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, msgpackrpc.CallWithCodec(codec1, "PreparedQuery.Execute", &req, &reply)) - if len(reply.Nodes) != 9 || - reply.Datacenter != "dc2" || reply.Failovers != 1 || - reply.Service != query.Query.Service.Service || - !reflect.DeepEqual(reply.DNS, query.Query.DNS) || - !reply.QueryMeta.KnownLeader { - t.Fatalf("bad: %v", reply) - } + expectFailoverNodes(t, &query, &reply, 9) for _, node := range reply.Nodes { - if node.Node.Node == "node3" { - t.Fatalf("bad: %v", node) - } + assert.NotEqual(t, "node3", node.Node.Node) } - } + }) } func TestPreparedQuery_Execute_ForwardLeader(t *testing.T) { @@ -2960,8 +2755,8 @@ func TestPreparedQuery_Wrapper(t *testing.T) { defer s2.Shutdown() s2.tokens.UpdateReplicationToken("root", tokenStore.TokenSourceConfig) - testrpc.WaitForLeader(t, s1.RPC, "dc1") - testrpc.WaitForLeader(t, s2.RPC, "dc2") + testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) + testrpc.WaitForLeader(t, s2.RPC, "dc2", testrpc.WithToken("root")) // Try to WAN join. joinWAN(t, s2, s1) diff --git a/agent/consul/rpc_test.go b/agent/consul/rpc_test.go index fdd37705c9..f3afda1c03 100644 --- a/agent/consul/rpc_test.go +++ b/agent/consul/rpc_test.go @@ -743,7 +743,6 @@ func TestRPC_LocalTokenStrippedOnForward(t *testing.T) { c.ACLsEnabled = true c.ACLDefaultPolicy = "deny" c.ACLMasterToken = "root" - c.ACLEnforceVersion8 = true }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -757,7 +756,6 @@ func TestRPC_LocalTokenStrippedOnForward(t *testing.T) { c.ACLsEnabled = true c.ACLDefaultPolicy = "deny" c.ACLTokenReplication = true - c.ACLEnforceVersion8 = true c.ACLReplicationRate = 100 c.ACLReplicationBurst = 100 c.ACLReplicationApplyLimit = 1000000 diff --git a/agent/consul/server.go b/agent/consul/server.go index fec1efd6c1..fc5bf34ab1 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -46,6 +46,8 @@ import ( // Consul-level protocol versions, that are used to configure the Serf // protocol versions. const ( + DefaultRPCProtocol = 2 + ProtocolVersionMin uint8 = 2 // Version 3 added support for network coordinates but we kept the diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index a49d18e926..ce88a0ab71 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -47,7 +47,6 @@ func testServerACLConfig(cb func(*Config)) func(*Config) { c.ACLsEnabled = true c.ACLMasterToken = TestDefaultMasterToken c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = true if cb != nil { cb(c) diff --git a/agent/consul/session_endpoint.go b/agent/consul/session_endpoint.go index 353635768d..2dd0527c29 100644 --- a/agent/consul/session_endpoint.go +++ b/agent/consul/session_endpoint.go @@ -59,7 +59,7 @@ func (s *Session) Apply(args *structs.SessionRequest, reply *string) error { return err } - if authz != nil && s.srv.config.ACLEnforceVersion8 { + if authz != nil { switch args.Op { case structs.SessionDestroy: state := s.srv.fsm.State() @@ -302,7 +302,7 @@ func (s *Session) Renew(args *structs.SessionSpecificRequest, return nil } - if authz != nil && s.srv.config.ACLEnforceVersion8 && authz.SessionWrite(session.Node, &authzContext) != acl.Allow { + if authz != nil && authz.SessionWrite(session.Node, &authzContext) != acl.Allow { return acl.ErrPermissionDenied } diff --git a/agent/consul/session_endpoint_test.go b/agent/consul/session_endpoint_test.go index 5d0a013b2f..e8984f44b5 100644 --- a/agent/consul/session_endpoint_test.go +++ b/agent/consul/session_endpoint_test.go @@ -145,7 +145,6 @@ func TestSession_Apply_ACLDeny(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -153,7 +152,7 @@ func TestSession_Apply_ACLDeny(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Create the ACL. req := structs.ACLRequest{ @@ -179,8 +178,7 @@ session "foo" { // Just add a node. s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}) - // Try to create without a token, which will go through since version 8 - // enforcement isn't enabled. + // Try to create without a token, which will be denied. arg := structs.SessionRequest{ Datacenter: "dc1", Op: structs.SessionCreate, @@ -189,40 +187,23 @@ session "foo" { Name: "my-session", }, } - var id1 string - if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &arg, &id1); err != nil { - t.Fatalf("err: %v", err) - } - - // Now turn on version 8 enforcement and try again, it should be denied. - var id2 string - s1.config.ACLEnforceVersion8 = true - err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &arg, &id2) + var id string + err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &arg, &id) if !acl.IsErrPermissionDenied(err) { t.Fatalf("err: %v", err) } // Now set a token and try again. This should go through. arg.Token = token - if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &arg, &id2); err != nil { + if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &arg, &id); err != nil { t.Fatalf("err: %v", err) } - // Do a delete on the first session with version 8 enforcement off and - // no token. This should go through. + // Make sure the delete of the session fails without a token. var out string - s1.config.ACLEnforceVersion8 = false arg.Op = structs.SessionDestroy + arg.Session.ID = id arg.Token = "" - arg.Session.ID = id1 - if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &arg, &out); err != nil { - t.Fatalf("err: %v", err) - } - - // Turn on version 8 enforcement and make sure the delete of the second - // session fails. - s1.config.ACLEnforceVersion8 = true - arg.Session.ID = id2 err = msgpackrpc.CallWithCodec(codec, "Session.Apply", &arg, &out) if !acl.IsErrPermissionDenied(err) { t.Fatalf("err: %v", err) @@ -386,7 +367,6 @@ func TestSession_Get_List_NodeSessions_ACLFilter(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -394,7 +374,7 @@ func TestSession_Get_List_NodeSessions_ACLFilter(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Create the ACL. req := structs.ACLRequest{ @@ -432,8 +412,7 @@ session "foo" { t.Fatalf("err: %v", err) } - // Perform all the read operations, which should go through since version - // 8 ACL enforcement isn't enabled. + // Perform all the read operations, and make sure everything is empty. getR := structs.SessionSpecificRequest{ Datacenter: "dc1", SessionID: out, @@ -443,7 +422,7 @@ session "foo" { if err := msgpackrpc.CallWithCodec(codec, "Session.Get", &getR, &sessions); err != nil { t.Fatalf("err: %v", err) } - if len(sessions.Sessions) != 1 { + if len(sessions.Sessions) != 0 { t.Fatalf("bad: %v", sessions.Sessions) } } @@ -452,10 +431,11 @@ session "foo" { } { var sessions structs.IndexedSessions + if err := msgpackrpc.CallWithCodec(codec, "Session.List", &listR, &sessions); err != nil { t.Fatalf("err: %v", err) } - if len(sessions.Sessions) != 1 { + if len(sessions.Sessions) != 0 { t.Fatalf("bad: %v", sessions.Sessions) } } @@ -463,37 +443,6 @@ session "foo" { Datacenter: "dc1", Node: "foo", } - { - var sessions structs.IndexedSessions - if err := msgpackrpc.CallWithCodec(codec, "Session.NodeSessions", &nodeR, &sessions); err != nil { - t.Fatalf("err: %v", err) - } - if len(sessions.Sessions) != 1 { - t.Fatalf("bad: %v", sessions.Sessions) - } - } - - // Now turn on version 8 enforcement and make sure everything is empty. - s1.config.ACLEnforceVersion8 = true - { - var sessions structs.IndexedSessions - if err := msgpackrpc.CallWithCodec(codec, "Session.Get", &getR, &sessions); err != nil { - t.Fatalf("err: %v", err) - } - if len(sessions.Sessions) != 0 { - t.Fatalf("bad: %v", sessions.Sessions) - } - } - { - var sessions structs.IndexedSessions - - if err := msgpackrpc.CallWithCodec(codec, "Session.List", &listR, &sessions); err != nil { - t.Fatalf("err: %v", err) - } - if len(sessions.Sessions) != 0 { - t.Fatalf("bad: %v", sessions.Sessions) - } - } { var sessions structs.IndexedSessions if err := msgpackrpc.CallWithCodec(codec, "Session.NodeSessions", &nodeR, &sessions); err != nil { @@ -765,7 +714,6 @@ func TestSession_Renew_ACLDeny(t *testing.T) { c.ACLsEnabled = true c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -773,7 +721,7 @@ func TestSession_Renew_ACLDeny(t *testing.T) { codec := rpcClient(t, s1) defer codec.Close() - testrpc.WaitForTestAgent(t, s1.RPC, "dc1") + testrpc.WaitForTestAgent(t, s1.RPC, "dc1", testrpc.WithToken("root")) // Create the ACL. req := structs.ACLRequest{ @@ -799,8 +747,7 @@ session "foo" { // Just add a node. s1.fsm.State().EnsureNode(1, &structs.Node{Node: "foo", Address: "127.0.0.1"}) - // Create a session. The token won't matter here since we don't have - // version 8 ACL enforcement on yet. + // Create a session. arg := structs.SessionRequest{ Datacenter: "dc1", Op: structs.SessionCreate, @@ -808,25 +755,19 @@ session "foo" { Node: "foo", Name: "my-session", }, + WriteRequest: structs.WriteRequest{Token: token}, } var id string if err := msgpackrpc.CallWithCodec(codec, "Session.Apply", &arg, &id); err != nil { t.Fatalf("err: %v", err) } - // Renew without a token should go through without version 8 ACL - // enforcement. + // Renew without a token should be rejected. renewR := structs.SessionSpecificRequest{ Datacenter: "dc1", SessionID: id, } var session structs.IndexedSessions - if err := msgpackrpc.CallWithCodec(codec, "Session.Renew", &renewR, &session); err != nil { - t.Fatalf("err: %v", err) - } - - // Now turn on version 8 enforcement and the renew should be rejected. - s1.config.ACLEnforceVersion8 = true err := msgpackrpc.CallWithCodec(codec, "Session.Renew", &renewR, &session) if !acl.IsErrPermissionDenied(err) { t.Fatalf("err: %v", err) diff --git a/agent/http_test.go b/agent/http_test.go index 759e7fd237..56025966e5 100644 --- a/agent/http_test.go +++ b/agent/http_test.go @@ -810,7 +810,6 @@ func TestPProfHandlers_ACLs(t *testing.T) { acl_master_token = "master" acl_agent_token = "agent" acl_agent_master_token = "towel" - acl_enforce_version_8 = true enable_debug = false `) diff --git a/agent/local/state_test.go b/agent/local/state_test.go index bdd2b16365..f73757b071 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -754,8 +754,7 @@ func TestAgentAntiEntropy_Services_ACLDeny(t *testing.T) { a := agent.NewTestAgent(t, ` acl_datacenter = "dc1" acl_master_token = "root" - acl_default_policy = "deny" - acl_enforce_version_8 = true`) + acl_default_policy = "deny" `) defer a.Shutdown() testrpc.WaitForLeader(t, a.RPC, "dc1") @@ -1170,8 +1169,7 @@ func TestAgentAntiEntropy_Checks_ACLDeny(t *testing.T) { a := &agent.TestAgent{HCL: ` acl_datacenter = "` + dc + `" acl_master_token = "root" - acl_default_policy = "deny" - acl_enforce_version_8 = true`} + acl_default_policy = "deny" `} if err := a.Start(t); err != nil { t.Fatal(err) } diff --git a/agent/testagent.go b/agent/testagent.go index 28cac61ca5..776c381573 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -470,7 +470,6 @@ func TestACLConfig() string { acl_master_token = "root" acl_agent_token = "root" acl_agent_master_token = "towel" - acl_enforce_version_8 = true ` } diff --git a/api/api_test.go b/api/api_test.go index 913e9530d9..f8de4486f6 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -38,9 +38,10 @@ func makeACLClient(t *testing.T) (*Client, *testutil.TestServer) { clientConfig.Token = "root" }, func(serverConfig *testutil.TestServerConfig) { serverConfig.PrimaryDatacenter = "dc1" - serverConfig.ACLMasterToken = "root" + serverConfig.ACL.Tokens.Master = "root" + serverConfig.ACL.Tokens.Agent = "root" serverConfig.ACL.Enabled = true - serverConfig.ACLDefaultPolicy = "deny" + serverConfig.ACL.DefaultPolicy = "deny" }) } diff --git a/command/version/version.go b/command/version/version.go index 9e67bda865..7736cb36aa 100644 --- a/command/version/version.go +++ b/command/version/version.go @@ -3,7 +3,6 @@ package version import ( "fmt" - "github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/consul" "github.com/mitchellh/cli" ) @@ -20,11 +19,8 @@ type cmd struct { func (c *cmd) Run(_ []string) int { c.UI.Output(fmt.Sprintf("Consul %s", c.version)) - rpcProtocol, err := config.DefaultRPCProtocol() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } + const rpcProtocol = consul.DefaultRPCProtocol + var supplement string if rpcProtocol < consul.ProtocolVersionMax { supplement = fmt.Sprintf(" (agent will automatically use protocol >%d when speaking to compatible agents)", diff --git a/sdk/testutil/server.go b/sdk/testutil/server.go index 66c7f43f94..a80f4cea19 100644 --- a/sdk/testutil/server.go +++ b/sdk/testutil/server.go @@ -88,7 +88,6 @@ type TestServerConfig struct { ACLDatacenter string `json:"acl_datacenter,omitempty"` PrimaryDatacenter string `json:"primary_datacenter,omitempty"` ACLDefaultPolicy string `json:"acl_default_policy,omitempty"` - ACLEnforceVersion8 bool `json:"acl_enforce_version_8"` ACL TestACLs `json:"acl,omitempty"` Encrypt string `json:"encrypt,omitempty"` CAFile string `json:"ca_file,omitempty"` @@ -381,7 +380,8 @@ func (s *TestServer) waitForAPI() error { for !time.Now().After(deadline) { time.Sleep(timer.Wait) - resp, err := s.HTTPClient.Get(s.url("/v1/agent/self")) + url := s.url("/v1/agent/self") + resp, err := s.masterGet(url) if err != nil { failed = true continue @@ -407,7 +407,7 @@ func (s *TestServer) WaitForLeader(t *testing.T) { retry.Run(t, func(r *retry.R) { // Query the API and check the status code. url := s.url("/v1/catalog/nodes") - resp, err := s.HTTPClient.Get(url) + resp, err := s.masterGet(url) if err != nil { r.Fatalf("failed http get '%s': %v", url, err) } @@ -443,7 +443,7 @@ func (s *TestServer) WaitForActiveCARoot(t *testing.T) { retry.Run(t, func(r *retry.R) { // Query the API and check the status code. url := s.url("/v1/agent/connect/ca/roots") - resp, err := s.HTTPClient.Get(url) + resp, err := s.masterGet(url) if err != nil { r.Fatalf("failed http get '%s': %v", url, err) } @@ -475,7 +475,7 @@ func (s *TestServer) WaitForSerfCheck(t *testing.T) { retry.Run(t, func(r *retry.R) { // Query the API and check the status code. url := s.url("/v1/catalog/nodes?index=0") - resp, err := s.HTTPClient.Get(url) + resp, err := s.masterGet(url) if err != nil { r.Fatal("failed http get", err) } @@ -496,7 +496,7 @@ func (s *TestServer) WaitForSerfCheck(t *testing.T) { // Ensure the serfHealth check is registered url = s.url(fmt.Sprintf("/v1/health/node/%s", payload[0]["Node"])) - resp, err = s.HTTPClient.Get(url) + resp, err = s.masterGet(url) if err != nil { r.Fatal("failed http get", err) } @@ -521,3 +521,14 @@ func (s *TestServer) WaitForSerfCheck(t *testing.T) { } }) } + +func (s *TestServer) masterGet(url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + if s.Config.ACL.Tokens.Master != "" { + req.Header.Set("x-consul-token", s.Config.ACL.Tokens.Master) + } + return s.HTTPClient.Do(req) +} diff --git a/testrpc/wait.go b/testrpc/wait.go index f4b859a163..737bbe1376 100644 --- a/testrpc/wait.go +++ b/testrpc/wait.go @@ -11,12 +11,20 @@ import ( type rpcFn func(string, interface{}, interface{}) error // WaitForLeader ensures we have a leader and a node registration. -func WaitForLeader(t *testing.T, rpc rpcFn, dc string) { +func WaitForLeader(t *testing.T, rpc rpcFn, dc string, options ...waitOption) { t.Helper() + flat := flattenOptions(options) + if flat.WaitForAntiEntropySync { + t.Fatalf("WaitForLeader doesn't accept the WaitForAntiEntropySync option") + } + var out structs.IndexedNodes retry.Run(t, func(r *retry.R) { - args := &structs.DCSpecificRequest{Datacenter: dc} + args := &structs.DCSpecificRequest{ + Datacenter: dc, + QueryOptions: structs.QueryOptions{Token: flat.Token}, + } if err := rpc("Catalog.ListNodes", args, &out); err != nil { r.Fatalf("Catalog.ListNodes failed: %v", err) } @@ -30,12 +38,20 @@ func WaitForLeader(t *testing.T, rpc rpcFn, dc string) { } // WaitUntilNoLeader ensures no leader is present, useful for testing lost leadership. -func WaitUntilNoLeader(t *testing.T, rpc rpcFn, dc string) { +func WaitUntilNoLeader(t *testing.T, rpc rpcFn, dc string, options ...waitOption) { t.Helper() + flat := flattenOptions(options) + if flat.WaitForAntiEntropySync { + t.Fatalf("WaitUntilNoLeader doesn't accept the WaitForAntiEntropySync option") + } + var out structs.IndexedNodes retry.Run(t, func(r *retry.R) { - args := &structs.DCSpecificRequest{Datacenter: dc} + args := &structs.DCSpecificRequest{ + Datacenter: dc, + QueryOptions: structs.QueryOptions{Token: flat.Token}, + } if err := rpc("Catalog.ListNodes", args, &out); err == nil { r.Fatalf("It still has a leader: %#v", out) } @@ -58,30 +74,32 @@ func WaitForAntiEntropySync() waitOption { return waitOption{WaitForAntiEntropySync: true} } +func flattenOptions(options []waitOption) waitOption { + var flat waitOption + for _, opt := range options { + if opt.Token != "" { + flat.Token = opt.Token + } + if opt.WaitForAntiEntropySync { + flat.WaitForAntiEntropySync = true + } + } + return flat +} + // WaitForTestAgent ensures we have a node with serfHealth check registered func WaitForTestAgent(t *testing.T, rpc rpcFn, dc string, options ...waitOption) { t.Helper() + flat := flattenOptions(options) + var nodes structs.IndexedNodes var checks structs.IndexedHealthChecks - var ( - token string - waitForAntiEntropySync bool - ) - for _, opt := range options { - if opt.Token != "" { - token = opt.Token - } - if opt.WaitForAntiEntropySync { - waitForAntiEntropySync = true - } - } - retry.Run(t, func(r *retry.R) { dcReq := &structs.DCSpecificRequest{ Datacenter: dc, - QueryOptions: structs.QueryOptions{Token: token}, + QueryOptions: structs.QueryOptions{Token: flat.Token}, } if err := rpc("Catalog.ListNodes", dcReq, &nodes); err != nil { r.Fatalf("Catalog.ListNodes failed: %v", err) @@ -90,7 +108,7 @@ func WaitForTestAgent(t *testing.T, rpc rpcFn, dc string, options ...waitOption) r.Fatalf("No registered nodes") } - if waitForAntiEntropySync { + if flat.WaitForAntiEntropySync { if len(nodes.Nodes[0].TaggedAddresses) == 0 { r.Fatalf("Not synced via anti entropy yet") } @@ -100,7 +118,7 @@ func WaitForTestAgent(t *testing.T, rpc rpcFn, dc string, options ...waitOption) nodeReq := &structs.NodeSpecificRequest{ Datacenter: dc, Node: nodes.Nodes[0].Node, - QueryOptions: structs.QueryOptions{Token: token}, + QueryOptions: structs.QueryOptions{Token: flat.Token}, } if err := rpc("Health.NodeChecks", nodeReq, &checks); err != nil { r.Fatalf("Health.NodeChecks failed: %v", err) diff --git a/website/pages/docs/agent/options.mdx b/website/pages/docs/agent/options.mdx index bac8220c5a..eaed2adc9b 100644 --- a/website/pages/docs/agent/options.mdx +++ b/website/pages/docs/agent/options.mdx @@ -735,7 +735,7 @@ Valid time units are 'ns', 'us' (or 'µs'), 'ms', 's', 'm', 'h'." of the node-level information in the catalog such as metadata, or the node's tagged addresses. - `acl_enforce_version_8` - **Deprecated in - Consul 1.4.0** Used for clients and servers to determine if enforcement should + Consul 1.4.0 and removed in 1.8.0.** Used for clients and servers to determine if enforcement should occur for new ACL policies being previewed before Consul 0.8. Added in Consul 0.7.2, this defaults to false in versions of Consul prior to 0.8, and defaults to true in Consul 0.8 and later. This helps ease the transition to the new ACL features diff --git a/website/pages/docs/upgrading/upgrade-specific.mdx b/website/pages/docs/upgrading/upgrade-specific.mdx index 71d36e2ca8..0dca1792ad 100644 --- a/website/pages/docs/upgrading/upgrade-specific.mdx +++ b/website/pages/docs/upgrading/upgrade-specific.mdx @@ -15,6 +15,12 @@ provided for their upgrades as a result of new features or changed behavior. This page is used to document those details separately from the standard upgrade flow. +## Consul 1.8.0 + +The [`acl_enforce_version_8`](/docs/agent/options#acl_enforce_version_8) +configuration has been removed (with version 8 ACL support by being on by +default). + ## Consul 1.7.0 Consul 1.7.0 contains three major changes that impact upgrades: