mirror of https://github.com/status-im/consul.git
Updated documentation and adding more test case for async-cache
This commit is contained in:
parent
abde81a3e7
commit
1e7665c0d5
|
@ -589,116 +589,120 @@ func TestACL_DownPolicy_ExtendCache(t *testing.T) {
|
|||
|
||||
func TestACL_Replication(t *testing.T) {
|
||||
t.Parallel()
|
||||
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.ACLDatacenter = "dc1"
|
||||
c.ACLMasterToken = "root"
|
||||
})
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
client := rpcClient(t, s1)
|
||||
defer client.Close()
|
||||
aclExtendPolicies := []string{"extend-cache", "async-cache"} //"async-cache"
|
||||
|
||||
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Datacenter = "dc2"
|
||||
c.ACLDatacenter = "dc1"
|
||||
c.ACLDefaultPolicy = "deny"
|
||||
c.ACLDownPolicy = "extend-cache"
|
||||
c.EnableACLReplication = true
|
||||
c.ACLReplicationInterval = 10 * time.Millisecond
|
||||
c.ACLReplicationApplyLimit = 1000000
|
||||
})
|
||||
s2.tokens.UpdateACLReplicationToken("root")
|
||||
defer os.RemoveAll(dir2)
|
||||
defer s2.Shutdown()
|
||||
for _, aclDownPolicy := range aclExtendPolicies {
|
||||
dir1, s1 := testServerWithConfig(t, func(c *Config) {
|
||||
c.ACLDatacenter = "dc1"
|
||||
c.ACLMasterToken = "root"
|
||||
})
|
||||
defer os.RemoveAll(dir1)
|
||||
defer s1.Shutdown()
|
||||
client := rpcClient(t, s1)
|
||||
defer client.Close()
|
||||
|
||||
dir3, s3 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Datacenter = "dc3"
|
||||
c.ACLDatacenter = "dc1"
|
||||
c.ACLDownPolicy = "deny"
|
||||
c.EnableACLReplication = true
|
||||
c.ACLReplicationInterval = 10 * time.Millisecond
|
||||
c.ACLReplicationApplyLimit = 1000000
|
||||
})
|
||||
s3.tokens.UpdateACLReplicationToken("root")
|
||||
defer os.RemoveAll(dir3)
|
||||
defer s3.Shutdown()
|
||||
dir2, s2 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Datacenter = "dc2"
|
||||
c.ACLDatacenter = "dc1"
|
||||
c.ACLDefaultPolicy = "deny"
|
||||
c.ACLDownPolicy = aclDownPolicy
|
||||
c.EnableACLReplication = true
|
||||
c.ACLReplicationInterval = 10 * time.Millisecond
|
||||
c.ACLReplicationApplyLimit = 1000000
|
||||
})
|
||||
s2.tokens.UpdateACLReplicationToken("root")
|
||||
defer os.RemoveAll(dir2)
|
||||
defer s2.Shutdown()
|
||||
|
||||
// Try to join.
|
||||
joinWAN(t, s2, s1)
|
||||
joinWAN(t, s3, s1)
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc3")
|
||||
dir3, s3 := testServerWithConfig(t, func(c *Config) {
|
||||
c.Datacenter = "dc3"
|
||||
c.ACLDatacenter = "dc1"
|
||||
c.ACLDownPolicy = "deny"
|
||||
c.EnableACLReplication = true
|
||||
c.ACLReplicationInterval = 10 * time.Millisecond
|
||||
c.ACLReplicationApplyLimit = 1000000
|
||||
})
|
||||
s3.tokens.UpdateACLReplicationToken("root")
|
||||
defer os.RemoveAll(dir3)
|
||||
defer s3.Shutdown()
|
||||
|
||||
// Create a new token.
|
||||
arg := structs.ACLRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.ACLSet,
|
||||
ACL: structs.ACL{
|
||||
Name: "User token",
|
||||
Type: structs.ACLTypeClient,
|
||||
Rules: testACLPolicy,
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: "root"},
|
||||
}
|
||||
var id string
|
||||
if err := s1.RPC("ACL.Apply", &arg, &id); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
// Wait for replication to occur.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, acl, err := s2.fsm.State().ACLGet(nil, id)
|
||||
// Try to join.
|
||||
joinWAN(t, s2, s1)
|
||||
joinWAN(t, s3, s1)
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc2")
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc3")
|
||||
|
||||
// Create a new token.
|
||||
arg := structs.ACLRequest{
|
||||
Datacenter: "dc1",
|
||||
Op: structs.ACLSet,
|
||||
ACL: structs.ACL{
|
||||
Name: "User token",
|
||||
Type: structs.ACLTypeClient,
|
||||
Rules: testACLPolicy,
|
||||
},
|
||||
WriteRequest: structs.WriteRequest{Token: "root"},
|
||||
}
|
||||
var id string
|
||||
if err := s1.RPC("ACL.Apply", &arg, &id); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
// Wait for replication to occur.
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, acl, err := s2.fsm.State().ACLGet(nil, id)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
if acl == nil {
|
||||
r.Fatal(nil)
|
||||
}
|
||||
_, acl, err = s3.fsm.State().ACLGet(nil, id)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
}
|
||||
if acl == nil {
|
||||
r.Fatal(nil)
|
||||
}
|
||||
})
|
||||
|
||||
// Kill the ACL datacenter.
|
||||
s1.Shutdown()
|
||||
|
||||
// Token should resolve on s2, which has replication + extend-cache.
|
||||
acl, err := s2.resolveToken(id)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if acl == nil {
|
||||
r.Fatal(nil)
|
||||
t.Fatalf("missing acl")
|
||||
}
|
||||
_, acl, err = s3.fsm.State().ACLGet(nil, id)
|
||||
|
||||
// Check the policy
|
||||
if acl.KeyRead("bar") {
|
||||
t.Fatalf("unexpected read")
|
||||
}
|
||||
if !acl.KeyRead("foo/test") {
|
||||
t.Fatalf("unexpected failed read")
|
||||
}
|
||||
|
||||
// Although s3 has replication, and we verified that the ACL is there,
|
||||
// it can not be used because of the down policy.
|
||||
acl, err = s3.resolveToken(id)
|
||||
if err != nil {
|
||||
r.Fatal(err)
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if acl == nil {
|
||||
r.Fatal(nil)
|
||||
t.Fatalf("missing acl")
|
||||
}
|
||||
})
|
||||
|
||||
// Kill the ACL datacenter.
|
||||
s1.Shutdown()
|
||||
|
||||
// Token should resolve on s2, which has replication + extend-cache.
|
||||
acl, err := s2.resolveToken(id)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if acl == nil {
|
||||
t.Fatalf("missing acl")
|
||||
}
|
||||
|
||||
// Check the policy
|
||||
if acl.KeyRead("bar") {
|
||||
t.Fatalf("unexpected read")
|
||||
}
|
||||
if !acl.KeyRead("foo/test") {
|
||||
t.Fatalf("unexpected failed read")
|
||||
}
|
||||
|
||||
// Although s3 has replication, and we verified that the ACL is there,
|
||||
// it can not be used because of the down policy.
|
||||
acl, err = s3.resolveToken(id)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if acl == nil {
|
||||
t.Fatalf("missing acl")
|
||||
}
|
||||
|
||||
// Check the policy.
|
||||
if acl.KeyRead("bar") {
|
||||
t.Fatalf("unexpected read")
|
||||
}
|
||||
if acl.KeyRead("foo/test") {
|
||||
t.Fatalf("unexpected read")
|
||||
// Check the policy.
|
||||
if acl.KeyRead("bar") {
|
||||
t.Fatalf("unexpected read")
|
||||
}
|
||||
if acl.KeyRead("foo/test") {
|
||||
t.Fatalf("unexpected read")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -496,11 +496,13 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass
|
|||
to enable ACL support.
|
||||
|
||||
* <a name="acl_down_policy"></a><a href="#acl_down_policy">`acl_down_policy`</a> - Either
|
||||
"allow", "deny" or "extend-cache"; "extend-cache" is the default. In the case that the
|
||||
"allow", "deny", "extend-cache" or "async-cache"; "extend-cache" is the default. In the case that the
|
||||
policy for a token cannot be read from the [`acl_datacenter`](#acl_datacenter) or leader
|
||||
node, the down policy is applied. In "allow" mode, all actions are permitted, "deny" restricts
|
||||
all operations, and "extend-cache" allows any cached ACLs to be used, ignoring their TTL
|
||||
values. If a non-cached ACL is used, "extend-cache" acts like "deny".
|
||||
values. If a non-cached ACL is used, "extend-cache" acts like "deny". "async-cache" acts the same
|
||||
way as "extend-cache" but performs updates asynchronously when ACL is present but its TTL is
|
||||
expired.
|
||||
|
||||
* <a name="acl_agent_master_token"></a><a href="#acl_agent_master_token">`acl_agent_master_token`</a> -
|
||||
Used to access <a href="/api/agent.html">agent endpoints</a> that require agent read
|
||||
|
|
|
@ -1062,9 +1062,10 @@ is set to "extend-cache", tokens will be resolved during the outage using the
|
|||
replicated set of ACLs. An [ACL replication status](/api/acl.html#acl_replication_status)
|
||||
endpoint is available to monitor the health of the replication process.
|
||||
Also note that in recent versions of Consul (greater than 1.2.0), using
|
||||
`acl_down_policy = "extend-cache"` refreshes token asynchronously when an ACL is
|
||||
already cached and is expired. It allows to avoid having issues when connectivity with
|
||||
the authoritative is not completely broken, but very slow.
|
||||
`acl_down_policy = "async-cache"` refreshes token asynchronously when an ACL is
|
||||
already cached and is expired while similar semantics than "extend-cache".
|
||||
It allows to avoid having issues when connectivity with the authoritative is not completely
|
||||
broken, but very slow.
|
||||
|
||||
Locally-resolved ACLs will be cached using the [`acl_ttl`](/docs/agent/options.html#acl_ttl)
|
||||
setting of the non-authoritative datacenter, so these entries may persist in the
|
||||
|
|
Loading…
Reference in New Issue