diff --git a/agent/consul/acl_test.go b/agent/consul/acl_test.go index fd08b54ab8..45048f80a1 100644 --- a/agent/consul/acl_test.go +++ b/agent/consul/acl_test.go @@ -589,116 +589,120 @@ func TestACL_DownPolicy_ExtendCache(t *testing.T) { func TestACL_Replication(t *testing.T) { t.Parallel() - dir1, s1 := testServerWithConfig(t, func(c *Config) { - c.ACLDatacenter = "dc1" - c.ACLMasterToken = "root" - }) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - client := rpcClient(t, s1) - defer client.Close() + aclExtendPolicies := []string{"extend-cache", "async-cache"} //"async-cache" - dir2, s2 := testServerWithConfig(t, func(c *Config) { - c.Datacenter = "dc2" - c.ACLDatacenter = "dc1" - c.ACLDefaultPolicy = "deny" - c.ACLDownPolicy = "extend-cache" - c.EnableACLReplication = true - c.ACLReplicationInterval = 10 * time.Millisecond - c.ACLReplicationApplyLimit = 1000000 - }) - s2.tokens.UpdateACLReplicationToken("root") - defer os.RemoveAll(dir2) - defer s2.Shutdown() + for _, aclDownPolicy := range aclExtendPolicies { + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + client := rpcClient(t, s1) + defer client.Close() - dir3, s3 := testServerWithConfig(t, func(c *Config) { - c.Datacenter = "dc3" - c.ACLDatacenter = "dc1" - c.ACLDownPolicy = "deny" - c.EnableACLReplication = true - c.ACLReplicationInterval = 10 * time.Millisecond - c.ACLReplicationApplyLimit = 1000000 - }) - s3.tokens.UpdateACLReplicationToken("root") - defer os.RemoveAll(dir3) - defer s3.Shutdown() + dir2, s2 := testServerWithConfig(t, func(c *Config) { + c.Datacenter = "dc2" + c.ACLDatacenter = "dc1" + c.ACLDefaultPolicy = "deny" + c.ACLDownPolicy = aclDownPolicy + c.EnableACLReplication = true + c.ACLReplicationInterval = 10 * time.Millisecond + c.ACLReplicationApplyLimit = 1000000 + }) + s2.tokens.UpdateACLReplicationToken("root") + defer os.RemoveAll(dir2) + defer s2.Shutdown() - // Try to join. - joinWAN(t, s2, s1) - joinWAN(t, s3, s1) - testrpc.WaitForLeader(t, s1.RPC, "dc1") - testrpc.WaitForLeader(t, s1.RPC, "dc2") - testrpc.WaitForLeader(t, s1.RPC, "dc3") + dir3, s3 := testServerWithConfig(t, func(c *Config) { + c.Datacenter = "dc3" + c.ACLDatacenter = "dc1" + c.ACLDownPolicy = "deny" + c.EnableACLReplication = true + c.ACLReplicationInterval = 10 * time.Millisecond + c.ACLReplicationApplyLimit = 1000000 + }) + s3.tokens.UpdateACLReplicationToken("root") + defer os.RemoveAll(dir3) + defer s3.Shutdown() - // Create a new token. - arg := structs.ACLRequest{ - Datacenter: "dc1", - Op: structs.ACLSet, - ACL: structs.ACL{ - Name: "User token", - Type: structs.ACLTypeClient, - Rules: testACLPolicy, - }, - WriteRequest: structs.WriteRequest{Token: "root"}, - } - var id string - if err := s1.RPC("ACL.Apply", &arg, &id); err != nil { - t.Fatalf("err: %v", err) - } - // Wait for replication to occur. - retry.Run(t, func(r *retry.R) { - _, acl, err := s2.fsm.State().ACLGet(nil, id) + // Try to join. + joinWAN(t, s2, s1) + joinWAN(t, s3, s1) + testrpc.WaitForLeader(t, s1.RPC, "dc1") + testrpc.WaitForLeader(t, s1.RPC, "dc2") + testrpc.WaitForLeader(t, s1.RPC, "dc3") + + // Create a new token. + arg := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTypeClient, + Rules: testACLPolicy, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + var id string + if err := s1.RPC("ACL.Apply", &arg, &id); err != nil { + t.Fatalf("err: %v", err) + } + // Wait for replication to occur. + retry.Run(t, func(r *retry.R) { + _, acl, err := s2.fsm.State().ACLGet(nil, id) + if err != nil { + r.Fatal(err) + } + if acl == nil { + r.Fatal(nil) + } + _, acl, err = s3.fsm.State().ACLGet(nil, id) + if err != nil { + r.Fatal(err) + } + if acl == nil { + r.Fatal(nil) + } + }) + + // Kill the ACL datacenter. + s1.Shutdown() + + // Token should resolve on s2, which has replication + extend-cache. + acl, err := s2.resolveToken(id) if err != nil { - r.Fatal(err) + t.Fatalf("err: %v", err) } if acl == nil { - r.Fatal(nil) + t.Fatalf("missing acl") } - _, acl, err = s3.fsm.State().ACLGet(nil, id) + + // Check the policy + if acl.KeyRead("bar") { + t.Fatalf("unexpected read") + } + if !acl.KeyRead("foo/test") { + t.Fatalf("unexpected failed read") + } + + // Although s3 has replication, and we verified that the ACL is there, + // it can not be used because of the down policy. + acl, err = s3.resolveToken(id) if err != nil { - r.Fatal(err) + t.Fatalf("err: %v", err) } if acl == nil { - r.Fatal(nil) + t.Fatalf("missing acl") } - }) - // Kill the ACL datacenter. - s1.Shutdown() - - // Token should resolve on s2, which has replication + extend-cache. - acl, err := s2.resolveToken(id) - if err != nil { - t.Fatalf("err: %v", err) - } - if acl == nil { - t.Fatalf("missing acl") - } - - // Check the policy - if acl.KeyRead("bar") { - t.Fatalf("unexpected read") - } - if !acl.KeyRead("foo/test") { - t.Fatalf("unexpected failed read") - } - - // Although s3 has replication, and we verified that the ACL is there, - // it can not be used because of the down policy. - acl, err = s3.resolveToken(id) - if err != nil { - t.Fatalf("err: %v", err) - } - if acl == nil { - t.Fatalf("missing acl") - } - - // Check the policy. - if acl.KeyRead("bar") { - t.Fatalf("unexpected read") - } - if acl.KeyRead("foo/test") { - t.Fatalf("unexpected read") + // Check the policy. + if acl.KeyRead("bar") { + t.Fatalf("unexpected read") + } + if acl.KeyRead("foo/test") { + t.Fatalf("unexpected read") + } } } diff --git a/website/source/docs/agent/options.html.md b/website/source/docs/agent/options.html.md index 4e2ecb3033..65402c2caf 100644 --- a/website/source/docs/agent/options.html.md +++ b/website/source/docs/agent/options.html.md @@ -496,11 +496,13 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass to enable ACL support. * `acl_down_policy` - Either - "allow", "deny" or "extend-cache"; "extend-cache" is the default. In the case that the + "allow", "deny", "extend-cache" or "async-cache"; "extend-cache" is the default. In the case that the policy for a token cannot be read from the [`acl_datacenter`](#acl_datacenter) or leader node, the down policy is applied. In "allow" mode, all actions are permitted, "deny" restricts all operations, and "extend-cache" allows any cached ACLs to be used, ignoring their TTL - values. If a non-cached ACL is used, "extend-cache" acts like "deny". + values. If a non-cached ACL is used, "extend-cache" acts like "deny". "async-cache" acts the same + way as "extend-cache" but performs updates asynchronously when ACL is present but its TTL is + expired. * `acl_agent_master_token` - Used to access agent endpoints that require agent read diff --git a/website/source/docs/guides/acl.html.md b/website/source/docs/guides/acl.html.md index f6aad52bce..c25666ab94 100644 --- a/website/source/docs/guides/acl.html.md +++ b/website/source/docs/guides/acl.html.md @@ -1062,9 +1062,10 @@ is set to "extend-cache", tokens will be resolved during the outage using the replicated set of ACLs. An [ACL replication status](/api/acl.html#acl_replication_status) endpoint is available to monitor the health of the replication process. Also note that in recent versions of Consul (greater than 1.2.0), using -`acl_down_policy = "extend-cache"` refreshes token asynchronously when an ACL is -already cached and is expired. It allows to avoid having issues when connectivity with -the authoritative is not completely broken, but very slow. +`acl_down_policy = "async-cache"` refreshes token asynchronously when an ACL is +already cached and is expired while similar semantics than "extend-cache". +It allows to avoid having issues when connectivity with the authoritative is not completely +broken, but very slow. Locally-resolved ACLs will be cached using the [`acl_ttl`](/docs/agent/options.html#acl_ttl) setting of the non-authoritative datacenter, so these entries may persist in the