mirror of https://github.com/status-im/consul.git
remove v2 tenancy, catalog, and mesh (#21592)
* remove v2 tenancy, catalog, and mesh - Inline the v2tenancy experiment to false - Inline the resource-apis experiment to false - Inline the hcp-v2-resource-apis experiment to false - Remove ACL policy templates and rule language changes related to workload identities (a v2-only concept) (e.g. identity and identity_prefix) - Update the gRPC endpoint used by consul-dataplane to no longer respond specially for v2 - Remove stray v2 references scattered throughout the DNS v1.5 newer implementation. * changelog * go mod tidy on consul containers * lint fixes from ENT --------- Co-authored-by: John Murret <john.murret@hashicorp.com>
This commit is contained in:
parent
188af1ccb0
commit
3e6f1c1fe1
|
@ -0,0 +1,3 @@
|
|||
```release-note:feature
|
||||
server: remove v2 tenancy, catalog, and mesh experiments
|
||||
```
|
1
Makefile
1
Makefile
|
@ -294,7 +294,6 @@ lint-container-test-deps: ## Check that the test-container module only imports a
|
|||
@cd test/integration/consul-container && \
|
||||
$(CURDIR)/build-support/scripts/check-allowed-imports.sh \
|
||||
github.com/hashicorp/consul \
|
||||
"internal/catalog/catalogtest" \
|
||||
"internal/resource/resourcetest"
|
||||
|
||||
##@ Testing
|
||||
|
|
|
@ -59,31 +59,6 @@ func (m *MockAuthorizer) EventWrite(segment string, ctx *AuthorizerContext) Enfo
|
|||
return ret.Get(0).(EnforcementDecision)
|
||||
}
|
||||
|
||||
// IdentityRead checks for permission to read a given workload identity.
|
||||
func (m *MockAuthorizer) IdentityRead(segment string, ctx *AuthorizerContext) EnforcementDecision {
|
||||
ret := m.Called(segment, ctx)
|
||||
return ret.Get(0).(EnforcementDecision)
|
||||
}
|
||||
|
||||
// IdentityReadAll checks for permission to read all workload identities.
|
||||
func (m *MockAuthorizer) IdentityReadAll(ctx *AuthorizerContext) EnforcementDecision {
|
||||
ret := m.Called(ctx)
|
||||
return ret.Get(0).(EnforcementDecision)
|
||||
}
|
||||
|
||||
// IdentityWrite checks for permission to create or update a given
|
||||
// workload identity.
|
||||
func (m *MockAuthorizer) IdentityWrite(segment string, ctx *AuthorizerContext) EnforcementDecision {
|
||||
ret := m.Called(segment, ctx)
|
||||
return ret.Get(0).(EnforcementDecision)
|
||||
}
|
||||
|
||||
// IdentityWriteAny checks for write permission on any workload identity.
|
||||
func (m *MockAuthorizer) IdentityWriteAny(ctx *AuthorizerContext) EnforcementDecision {
|
||||
ret := m.Called(ctx)
|
||||
return ret.Get(0).(EnforcementDecision)
|
||||
}
|
||||
|
||||
// IntentionDefaultAllow determines the default authorized behavior
|
||||
// when no intentions match a Connect request.
|
||||
func (m *MockAuthorizer) IntentionDefaultAllow(ctx *AuthorizerContext) EnforcementDecision {
|
||||
|
|
188
acl/acl_test.go
188
acl/acl_test.go
|
@ -40,22 +40,6 @@ func checkAllowEventWrite(t *testing.T, authz Authorizer, prefix string, entCtx
|
|||
require.Equal(t, Allow, authz.EventWrite(prefix, entCtx))
|
||||
}
|
||||
|
||||
func checkAllowIdentityRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Allow, authz.IdentityRead(prefix, entCtx))
|
||||
}
|
||||
|
||||
func checkAllowIdentityReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Allow, authz.IdentityReadAll(entCtx))
|
||||
}
|
||||
|
||||
func checkAllowIdentityWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Allow, authz.IdentityWrite(prefix, entCtx))
|
||||
}
|
||||
|
||||
func checkAllowIdentityWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Allow, authz.IdentityWriteAny(entCtx))
|
||||
}
|
||||
|
||||
func checkAllowIntentionDefaultAllow(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Allow, authz.IntentionDefaultAllow(entCtx))
|
||||
}
|
||||
|
@ -196,22 +180,6 @@ func checkDenyEventWrite(t *testing.T, authz Authorizer, prefix string, entCtx *
|
|||
require.Equal(t, Deny, authz.EventWrite(prefix, entCtx))
|
||||
}
|
||||
|
||||
func checkDenyIdentityRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Deny, authz.IdentityRead(prefix, entCtx))
|
||||
}
|
||||
|
||||
func checkDenyIdentityReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Deny, authz.IdentityReadAll(entCtx))
|
||||
}
|
||||
|
||||
func checkDenyIdentityWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Deny, authz.IdentityWrite(prefix, entCtx))
|
||||
}
|
||||
|
||||
func checkDenyIdentityWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Deny, authz.IdentityWriteAny(entCtx))
|
||||
}
|
||||
|
||||
func checkDenyIntentionDefaultAllow(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Deny, authz.IntentionDefaultAllow(entCtx))
|
||||
}
|
||||
|
@ -360,22 +328,6 @@ func checkDefaultEventWrite(t *testing.T, authz Authorizer, prefix string, entCt
|
|||
require.Equal(t, Default, authz.EventWrite(prefix, entCtx))
|
||||
}
|
||||
|
||||
func checkDefaultIdentityRead(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Default, authz.IdentityRead(prefix, entCtx))
|
||||
}
|
||||
|
||||
func checkDefaultIdentityReadAll(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Default, authz.IdentityReadAll(entCtx))
|
||||
}
|
||||
|
||||
func checkDefaultIdentityWrite(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Default, authz.IdentityWrite(prefix, entCtx))
|
||||
}
|
||||
|
||||
func checkDefaultIdentityWriteAny(t *testing.T, authz Authorizer, _ string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Default, authz.IdentityWriteAny(entCtx))
|
||||
}
|
||||
|
||||
func checkDefaultIntentionDefaultAllow(t *testing.T, authz Authorizer, prefix string, entCtx *AuthorizerContext) {
|
||||
require.Equal(t, Default, authz.IntentionDefaultAllow(entCtx))
|
||||
}
|
||||
|
@ -516,10 +468,6 @@ func TestACL(t *testing.T) {
|
|||
{name: "DenyIntentionDefaultAllow", check: checkDenyIntentionDefaultAllow},
|
||||
{name: "DenyIntentionRead", check: checkDenyIntentionRead},
|
||||
{name: "DenyIntentionWrite", check: checkDenyIntentionWrite},
|
||||
{name: "DenyIdentityRead", check: checkDenyIdentityRead},
|
||||
{name: "DenyIdentityReadAll", check: checkDenyIdentityReadAll},
|
||||
{name: "DenyIdentityWrite", check: checkDenyIdentityWrite},
|
||||
{name: "DenyIdentityWriteAny", check: checkDenyIdentityWriteAny},
|
||||
{name: "DenyKeyRead", check: checkDenyKeyRead},
|
||||
{name: "DenyKeyringRead", check: checkDenyKeyringRead},
|
||||
{name: "DenyKeyringWrite", check: checkDenyKeyringWrite},
|
||||
|
@ -554,10 +502,6 @@ func TestACL(t *testing.T) {
|
|||
{name: "AllowAgentWrite", check: checkAllowAgentWrite},
|
||||
{name: "AllowEventRead", check: checkAllowEventRead},
|
||||
{name: "AllowEventWrite", check: checkAllowEventWrite},
|
||||
{name: "AllowIdentityRead", check: checkAllowIdentityRead},
|
||||
{name: "AllowIdentityReadAll", check: checkAllowIdentityReadAll},
|
||||
{name: "AllowIdentityWrite", check: checkAllowIdentityWrite},
|
||||
{name: "AllowIdentityWriteAny", check: checkAllowIdentityWriteAny},
|
||||
{name: "AllowIntentionDefaultAllow", check: checkAllowIntentionDefaultAllow},
|
||||
{name: "AllowIntentionRead", check: checkAllowIntentionRead},
|
||||
{name: "AllowIntentionWrite", check: checkAllowIntentionWrite},
|
||||
|
@ -597,10 +541,6 @@ func TestACL(t *testing.T) {
|
|||
{name: "AllowAgentWrite", check: checkAllowAgentWrite},
|
||||
{name: "AllowEventRead", check: checkAllowEventRead},
|
||||
{name: "AllowEventWrite", check: checkAllowEventWrite},
|
||||
{name: "AllowIdentityRead", check: checkAllowIdentityRead},
|
||||
{name: "AllowIdentityReadAll", check: checkAllowIdentityReadAll},
|
||||
{name: "AllowIdentityWrite", check: checkAllowIdentityWrite},
|
||||
{name: "AllowIdentityWriteAny", check: checkAllowIdentityWriteAny},
|
||||
{name: "AllowIntentionDefaultAllow", check: checkAllowIntentionDefaultAllow},
|
||||
{name: "AllowIntentionRead", check: checkAllowIntentionRead},
|
||||
{name: "AllowIntentionWrite", check: checkAllowIntentionWrite},
|
||||
|
@ -1000,134 +940,6 @@ func TestACL(t *testing.T) {
|
|||
{name: "ChildOverrideWriteAllowed", prefix: "override", check: checkAllowAgentWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "IdentityDefaultAllowPolicyDeny",
|
||||
defaultPolicy: AllowAll(),
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Identities: []*IdentityRule{
|
||||
{
|
||||
Name: "foo",
|
||||
Policy: PolicyDeny,
|
||||
},
|
||||
},
|
||||
IdentityPrefixes: []*IdentityRule{
|
||||
{
|
||||
Name: "prefix",
|
||||
Policy: PolicyDeny,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "IdentityFooReadDenied", prefix: "foo", check: checkDenyIdentityRead},
|
||||
{name: "IdentityFooWriteDenied", prefix: "foo", check: checkDenyIdentityWrite},
|
||||
{name: "IdentityPrefixReadDenied", prefix: "prefix", check: checkDenyIdentityRead},
|
||||
{name: "IdentityPrefixWriteDenied", prefix: "prefix", check: checkDenyIdentityWrite},
|
||||
{name: "IdentityBarReadAllowed", prefix: "fail", check: checkAllowIdentityRead},
|
||||
{name: "IdentityBarWriteAllowed", prefix: "fail", check: checkAllowIdentityWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "IdentityDefaultDenyPolicyAllow",
|
||||
defaultPolicy: DenyAll(),
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Identities: []*IdentityRule{
|
||||
{
|
||||
Name: "foo",
|
||||
Policy: PolicyWrite,
|
||||
},
|
||||
},
|
||||
IdentityPrefixes: []*IdentityRule{
|
||||
{
|
||||
Name: "prefix",
|
||||
Policy: PolicyRead,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "IdentityFooReadAllowed", prefix: "foo", check: checkAllowIdentityRead},
|
||||
{name: "IdentityFooWriteAllowed", prefix: "foo", check: checkAllowIdentityWrite},
|
||||
{name: "IdentityPrefixReadAllowed", prefix: "prefix", check: checkAllowIdentityRead},
|
||||
{name: "IdentityPrefixWriteDenied", prefix: "prefix", check: checkDenyIdentityWrite},
|
||||
{name: "IdentityBarReadDenied", prefix: "fail", check: checkDenyIdentityRead},
|
||||
{name: "IdentityBarWriteDenied", prefix: "fail", check: checkDenyIdentityWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "IdentityDefaultDenyPolicyComplex",
|
||||
defaultPolicy: DenyAll(),
|
||||
policyStack: []*Policy{
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Identities: []*IdentityRule{
|
||||
{
|
||||
Name: "football",
|
||||
Policy: PolicyRead,
|
||||
},
|
||||
{
|
||||
Name: "prefix-forbidden",
|
||||
Policy: PolicyDeny,
|
||||
Intentions: PolicyDeny,
|
||||
},
|
||||
},
|
||||
IdentityPrefixes: []*IdentityRule{
|
||||
{
|
||||
Name: "foo",
|
||||
Policy: PolicyWrite,
|
||||
Intentions: PolicyWrite,
|
||||
},
|
||||
{
|
||||
Name: "prefix",
|
||||
Policy: PolicyRead,
|
||||
Intentions: PolicyWrite,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PolicyRules: PolicyRules{
|
||||
Identities: []*IdentityRule{
|
||||
{
|
||||
Name: "foozball",
|
||||
Policy: PolicyWrite,
|
||||
Intentions: PolicyRead,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
checks: []aclCheck{
|
||||
{name: "IdentityReadAllowed", prefix: "foo", check: checkAllowIdentityRead},
|
||||
{name: "IdentityWriteAllowed", prefix: "foo", check: checkAllowIdentityWrite},
|
||||
{name: "TrafficPermissionsReadAllowed", prefix: "foo", check: checkAllowTrafficPermissionsRead},
|
||||
{name: "TrafficPermissionsWriteAllowed", prefix: "foo", check: checkAllowTrafficPermissionsWrite},
|
||||
{name: "IdentityReadAllowed", prefix: "football", check: checkAllowIdentityRead},
|
||||
{name: "IdentityWriteDenied", prefix: "football", check: checkDenyIdentityWrite},
|
||||
{name: "TrafficPermissionsReadAllowed", prefix: "football", check: checkAllowTrafficPermissionsRead},
|
||||
// This might be surprising but omitting intention rule gives at most intention:read
|
||||
// if we have identity:write perms. This matches services as well.
|
||||
{name: "TrafficPermissionsWriteDenied", prefix: "football", check: checkDenyTrafficPermissionsWrite},
|
||||
{name: "IdentityReadAllowed", prefix: "prefix", check: checkAllowIdentityRead},
|
||||
{name: "IdentityWriteDenied", prefix: "prefix", check: checkDenyIdentityWrite},
|
||||
{name: "TrafficPermissionsReadAllowed", prefix: "prefix", check: checkAllowTrafficPermissionsRead},
|
||||
{name: "TrafficPermissionsWriteDenied", prefix: "prefix", check: checkAllowTrafficPermissionsWrite},
|
||||
{name: "IdentityReadDenied", prefix: "prefix-forbidden", check: checkDenyIdentityRead},
|
||||
{name: "IdentityWriteDenied", prefix: "prefix-forbidden", check: checkDenyIdentityWrite},
|
||||
{name: "TrafficPermissionsReadDenied", prefix: "prefix-forbidden", check: checkDenyTrafficPermissionsRead},
|
||||
{name: "TrafficPermissionsWriteDenied", prefix: "prefix-forbidden", check: checkDenyTrafficPermissionsWrite},
|
||||
{name: "IdentityReadAllowed", prefix: "foozball", check: checkAllowIdentityRead},
|
||||
{name: "IdentityWriteAllowed", prefix: "foozball", check: checkAllowIdentityWrite},
|
||||
{name: "TrafficPermissionsReadAllowed", prefix: "foozball", check: checkAllowTrafficPermissionsRead},
|
||||
{name: "TrafficPermissionsWriteDenied", prefix: "foozball", check: checkDenyTrafficPermissionsWrite},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "KeyringDefaultAllowPolicyDeny",
|
||||
defaultPolicy: AllowAll(),
|
||||
|
|
|
@ -43,7 +43,6 @@ const (
|
|||
ResourceACL Resource = "acl"
|
||||
ResourceAgent Resource = "agent"
|
||||
ResourceEvent Resource = "event"
|
||||
ResourceIdentity Resource = "identity"
|
||||
ResourceIntention Resource = "intention"
|
||||
ResourceKey Resource = "key"
|
||||
ResourceKeyring Resource = "keyring"
|
||||
|
@ -78,19 +77,6 @@ type Authorizer interface {
|
|||
// EventWrite determines if a specific event may be fired.
|
||||
EventWrite(string, *AuthorizerContext) EnforcementDecision
|
||||
|
||||
// IdentityRead checks for permission to read a given workload identity.
|
||||
IdentityRead(string, *AuthorizerContext) EnforcementDecision
|
||||
|
||||
// IdentityReadAll checks for permission to read all workload identities.
|
||||
IdentityReadAll(*AuthorizerContext) EnforcementDecision
|
||||
|
||||
// IdentityWrite checks for permission to create or update a given
|
||||
// workload identity.
|
||||
IdentityWrite(string, *AuthorizerContext) EnforcementDecision
|
||||
|
||||
// IdentityWriteAny checks for write permission on any workload identity.
|
||||
IdentityWriteAny(*AuthorizerContext) EnforcementDecision
|
||||
|
||||
// IntentionDefaultAllow determines the default authorized behavior
|
||||
// when no intentions match a Connect request.
|
||||
//
|
||||
|
@ -267,40 +253,6 @@ func (a AllowAuthorizer) EventWriteAllowed(name string, ctx *AuthorizerContext)
|
|||
return nil
|
||||
}
|
||||
|
||||
// IdentityReadAllowed checks for permission to read a given workload identity,
|
||||
func (a AllowAuthorizer) IdentityReadAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.IdentityRead(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceIdentity, AccessRead, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IdentityReadAllAllowed checks for permission to read all workload identities.
|
||||
func (a AllowAuthorizer) IdentityReadAllAllowed(ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.IdentityReadAll(ctx) != Allow {
|
||||
// This is only used to gate certain UI functions right now (e.g metrics)
|
||||
return PermissionDeniedByACL(a, ctx, ResourceIdentity, AccessRead, "all identities") // read
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IdentityWriteAllowed checks for permission to create or update a given
|
||||
// workload identity.
|
||||
func (a AllowAuthorizer) IdentityWriteAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.IdentityWrite(name, ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceIdentity, AccessWrite, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IdentityWriteAnyAllowed checks for write permission on any workload identity
|
||||
func (a AllowAuthorizer) IdentityWriteAnyAllowed(ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.IdentityWriteAny(ctx) != Allow {
|
||||
return PermissionDeniedByACL(a, ctx, ResourceIdentity, AccessWrite, "any identity")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IntentionReadAllowed determines if a specific intention can be read.
|
||||
func (a AllowAuthorizer) IntentionReadAllowed(name string, ctx *AuthorizerContext) error {
|
||||
if a.Authorizer.IntentionRead(name, ctx) != Allow {
|
||||
|
@ -579,13 +531,6 @@ func Enforce(authz Authorizer, rsc Resource, segment string, access string, ctx
|
|||
case "write":
|
||||
return authz.EventWrite(segment, ctx), nil
|
||||
}
|
||||
case ResourceIdentity:
|
||||
switch lowerAccess {
|
||||
case "read":
|
||||
return authz.IdentityRead(segment, ctx), nil
|
||||
case "write":
|
||||
return authz.IdentityWrite(segment, ctx), nil
|
||||
}
|
||||
case ResourceIntention:
|
||||
switch lowerAccess {
|
||||
case "read":
|
||||
|
|
|
@ -188,34 +188,6 @@ func TestACL_Enforce(t *testing.T) {
|
|||
ret: Deny,
|
||||
err: "Invalid access level",
|
||||
},
|
||||
{
|
||||
method: "IdentityRead",
|
||||
resource: ResourceIdentity,
|
||||
segment: "foo",
|
||||
access: "read",
|
||||
ret: Deny,
|
||||
},
|
||||
{
|
||||
method: "IdentityRead",
|
||||
resource: ResourceIdentity,
|
||||
segment: "foo",
|
||||
access: "read",
|
||||
ret: Allow,
|
||||
},
|
||||
{
|
||||
method: "IdentityWrite",
|
||||
resource: ResourceIdentity,
|
||||
segment: "foo",
|
||||
access: "write",
|
||||
ret: Deny,
|
||||
},
|
||||
{
|
||||
method: "IdentityWrite",
|
||||
resource: ResourceIdentity,
|
||||
segment: "foo",
|
||||
access: "write",
|
||||
ret: Allow,
|
||||
},
|
||||
{
|
||||
method: "IntentionRead",
|
||||
resource: ResourceIntention,
|
||||
|
|
|
@ -80,35 +80,6 @@ func (c *ChainedAuthorizer) EventWrite(name string, entCtx *AuthorizerContext) E
|
|||
})
|
||||
}
|
||||
|
||||
// IdentityRead checks for permission to read a given workload identity.
|
||||
func (c *ChainedAuthorizer) IdentityRead(name string, entCtx *AuthorizerContext) EnforcementDecision {
|
||||
return c.executeChain(func(authz Authorizer) EnforcementDecision {
|
||||
return authz.IdentityRead(name, entCtx)
|
||||
})
|
||||
}
|
||||
|
||||
// IdentityReadAll checks for permission to read all workload identities.
|
||||
func (c *ChainedAuthorizer) IdentityReadAll(entCtx *AuthorizerContext) EnforcementDecision {
|
||||
return c.executeChain(func(authz Authorizer) EnforcementDecision {
|
||||
return authz.IdentityReadAll(entCtx)
|
||||
})
|
||||
}
|
||||
|
||||
// IdentityWrite checks for permission to create or update a given
|
||||
// workload identity.
|
||||
func (c *ChainedAuthorizer) IdentityWrite(name string, entCtx *AuthorizerContext) EnforcementDecision {
|
||||
return c.executeChain(func(authz Authorizer) EnforcementDecision {
|
||||
return authz.IdentityWrite(name, entCtx)
|
||||
})
|
||||
}
|
||||
|
||||
// IdentityWriteAny checks for write permission on any workload identity.
|
||||
func (c *ChainedAuthorizer) IdentityWriteAny(entCtx *AuthorizerContext) EnforcementDecision {
|
||||
return c.executeChain(func(authz Authorizer) EnforcementDecision {
|
||||
return authz.IdentityWriteAny(entCtx)
|
||||
})
|
||||
}
|
||||
|
||||
// IntentionDefaultAllow determines the default authorized behavior
|
||||
// when no intentions match a Connect request.
|
||||
func (c *ChainedAuthorizer) IntentionDefaultAllow(entCtx *AuthorizerContext) EnforcementDecision {
|
||||
|
|
|
@ -59,8 +59,6 @@ type PolicyRules struct {
|
|||
ACL string `hcl:"acl,expand"`
|
||||
Agents []*AgentRule `hcl:"agent,expand"`
|
||||
AgentPrefixes []*AgentRule `hcl:"agent_prefix,expand"`
|
||||
Identities []*IdentityRule `hcl:"identity,expand"`
|
||||
IdentityPrefixes []*IdentityRule `hcl:"identity_prefix,expand"`
|
||||
Keys []*KeyRule `hcl:"key,expand"`
|
||||
KeyPrefixes []*KeyRule `hcl:"key_prefix,expand"`
|
||||
Nodes []*NodeRule `hcl:"node,expand"`
|
||||
|
@ -77,6 +75,11 @@ type PolicyRules struct {
|
|||
Operator string `hcl:"operator"`
|
||||
Mesh string `hcl:"mesh"`
|
||||
Peering string `hcl:"peering"`
|
||||
|
||||
// Deprecated: exists just to track the former field for decoding
|
||||
Identities []*IdentityRule `hcl:"identity,expand"`
|
||||
// Deprecated: exists just to track the former field for decoding
|
||||
IdentityPrefixes []*IdentityRule `hcl:"identity_prefix,expand"`
|
||||
}
|
||||
|
||||
// Policy is used to represent the policy specified by an ACL configuration.
|
||||
|
@ -93,6 +96,8 @@ type AgentRule struct {
|
|||
}
|
||||
|
||||
// IdentityRule represents a policy for a workload identity
|
||||
//
|
||||
// Deprecated: exists just to track the former field for decoding
|
||||
type IdentityRule struct {
|
||||
Name string `hcl:",key"`
|
||||
Policy string
|
||||
|
@ -183,29 +188,9 @@ func (pr *PolicyRules) Validate(conf *Config) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Validate the identity policies
|
||||
for _, id := range pr.Identities {
|
||||
if !isPolicyValid(id.Policy, false) {
|
||||
return fmt.Errorf("Invalid identity policy: %#v", id)
|
||||
}
|
||||
if id.Intentions != "" && !isPolicyValid(id.Intentions, false) {
|
||||
return fmt.Errorf("Invalid identity intentions policy: %#v", id)
|
||||
}
|
||||
if err := id.EnterpriseRule.Validate(id.Policy, conf); err != nil {
|
||||
return fmt.Errorf("Invalid identity enterprise policy: %#v, got error: %v", id, err)
|
||||
}
|
||||
}
|
||||
for _, id := range pr.IdentityPrefixes {
|
||||
if !isPolicyValid(id.Policy, false) {
|
||||
return fmt.Errorf("Invalid identity_prefix policy: %#v", id)
|
||||
}
|
||||
if id.Intentions != "" && !isPolicyValid(id.Intentions, false) {
|
||||
return fmt.Errorf("Invalid identity_prefix intentions policy: %#v", id)
|
||||
}
|
||||
if err := id.EnterpriseRule.Validate(id.Policy, conf); err != nil {
|
||||
return fmt.Errorf("Invalid identity_prefix enterprise policy: %#v, got error: %v", id, err)
|
||||
}
|
||||
}
|
||||
// Identity rules are deprecated, zero them out.
|
||||
pr.Identities = nil
|
||||
pr.IdentityPrefixes = nil
|
||||
|
||||
// Validate the key policy
|
||||
for _, kp := range pr.Keys {
|
||||
|
|
|
@ -14,9 +14,6 @@ type policyAuthorizer struct {
|
|||
// agentRules contain the exact-match agent policies
|
||||
agentRules *radix.Tree
|
||||
|
||||
// identityRules contains the identity exact-match policies
|
||||
identityRules *radix.Tree
|
||||
|
||||
// intentionRules contains the service intention exact-match policies
|
||||
intentionRules *radix.Tree
|
||||
|
||||
|
@ -186,48 +183,6 @@ func (p *policyAuthorizer) loadRules(policy *PolicyRules) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Load the identity policy (exact matches)
|
||||
for _, id := range policy.Identities {
|
||||
if err := insertPolicyIntoRadix(id.Name, id.Policy, &id.EnterpriseRule, p.identityRules, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
intention := id.Intentions
|
||||
if intention == "" {
|
||||
switch id.Policy {
|
||||
case PolicyRead, PolicyWrite:
|
||||
intention = PolicyRead
|
||||
default:
|
||||
intention = PolicyDeny
|
||||
}
|
||||
}
|
||||
|
||||
if err := insertPolicyIntoRadix(id.Name, intention, &id.EnterpriseRule, p.trafficPermissionsRules, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Load the identity policy (prefix matches)
|
||||
for _, id := range policy.IdentityPrefixes {
|
||||
if err := insertPolicyIntoRadix(id.Name, id.Policy, &id.EnterpriseRule, p.identityRules, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
intention := id.Intentions
|
||||
if intention == "" {
|
||||
switch id.Policy {
|
||||
case PolicyRead, PolicyWrite:
|
||||
intention = PolicyRead
|
||||
default:
|
||||
intention = PolicyDeny
|
||||
}
|
||||
}
|
||||
|
||||
if err := insertPolicyIntoRadix(id.Name, intention, &id.EnterpriseRule, p.trafficPermissionsRules, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Load the key policy (exact matches)
|
||||
for _, kp := range policy.Keys {
|
||||
if err := insertPolicyIntoRadix(kp.Prefix, kp.Policy, &kp.EnterpriseRule, p.keyRules, false); err != nil {
|
||||
|
@ -397,7 +352,6 @@ func newPolicyAuthorizer(policies []*Policy, ent *Config) (*policyAuthorizer, er
|
|||
func newPolicyAuthorizerFromRules(rules *PolicyRules, ent *Config) (*policyAuthorizer, error) {
|
||||
p := &policyAuthorizer{
|
||||
agentRules: radix.New(),
|
||||
identityRules: radix.New(),
|
||||
intentionRules: radix.New(),
|
||||
trafficPermissionsRules: radix.New(),
|
||||
keyRules: radix.New(),
|
||||
|
@ -578,33 +532,6 @@ func (p *policyAuthorizer) EventWrite(name string, _ *AuthorizerContext) Enforce
|
|||
return Default
|
||||
}
|
||||
|
||||
// IdentityRead checks for permission to read a given workload identity.
|
||||
func (p *policyAuthorizer) IdentityRead(name string, _ *AuthorizerContext) EnforcementDecision {
|
||||
if rule, ok := getPolicy(name, p.identityRules); ok {
|
||||
return enforce(rule.access, AccessRead)
|
||||
}
|
||||
return Default
|
||||
}
|
||||
|
||||
// IdentityReadAll checks for permission to read all workload identities.
|
||||
func (p *policyAuthorizer) IdentityReadAll(_ *AuthorizerContext) EnforcementDecision {
|
||||
return p.allAllowed(p.identityRules, AccessRead)
|
||||
}
|
||||
|
||||
// IdentityWrite checks for permission to create or update a given
|
||||
// workload identity.
|
||||
func (p *policyAuthorizer) IdentityWrite(name string, _ *AuthorizerContext) EnforcementDecision {
|
||||
if rule, ok := getPolicy(name, p.identityRules); ok {
|
||||
return enforce(rule.access, AccessWrite)
|
||||
}
|
||||
return Default
|
||||
}
|
||||
|
||||
// IdentityWriteAny checks for write permission on any workload identity.
|
||||
func (p *policyAuthorizer) IdentityWriteAny(_ *AuthorizerContext) EnforcementDecision {
|
||||
return p.anyAllowed(p.identityRules, AccessWrite)
|
||||
}
|
||||
|
||||
// IntentionDefaultAllow returns whether the default behavior when there are
|
||||
// no matching intentions is to allow or deny.
|
||||
func (p *policyAuthorizer) IntentionDefaultAllow(_ *AuthorizerContext) EnforcementDecision {
|
||||
|
|
|
@ -41,9 +41,6 @@ func TestPolicyAuthorizer(t *testing.T) {
|
|||
{name: "DefaultAgentWrite", prefix: "foo", check: checkDefaultAgentWrite},
|
||||
{name: "DefaultEventRead", prefix: "foo", check: checkDefaultEventRead},
|
||||
{name: "DefaultEventWrite", prefix: "foo", check: checkDefaultEventWrite},
|
||||
{name: "DefaultIdentityRead", prefix: "foo", check: checkDefaultIdentityRead},
|
||||
{name: "DefaultIdentityWrite", prefix: "foo", check: checkDefaultIdentityWrite},
|
||||
{name: "DefaultIdentityWriteAny", prefix: "", check: checkDefaultIdentityWriteAny},
|
||||
{name: "DefaultIntentionDefaultAllow", prefix: "foo", check: checkDefaultIntentionDefaultAllow},
|
||||
{name: "DefaultIntentionRead", prefix: "foo", check: checkDefaultIntentionRead},
|
||||
{name: "DefaultIntentionWrite", prefix: "foo", check: checkDefaultIntentionWrite},
|
||||
|
@ -190,29 +187,6 @@ func TestPolicyAuthorizer(t *testing.T) {
|
|||
Policy: PolicyRead,
|
||||
},
|
||||
},
|
||||
Identities: []*IdentityRule{
|
||||
{
|
||||
Name: "foo",
|
||||
Policy: PolicyWrite,
|
||||
Intentions: PolicyWrite,
|
||||
},
|
||||
{
|
||||
Name: "football",
|
||||
Policy: PolicyDeny,
|
||||
},
|
||||
},
|
||||
IdentityPrefixes: []*IdentityRule{
|
||||
{
|
||||
Name: "foot",
|
||||
Policy: PolicyRead,
|
||||
Intentions: PolicyRead,
|
||||
},
|
||||
{
|
||||
Name: "fo",
|
||||
Policy: PolicyRead,
|
||||
Intentions: PolicyRead,
|
||||
},
|
||||
},
|
||||
Keys: []*KeyRule{
|
||||
{
|
||||
Prefix: "foo",
|
||||
|
@ -400,22 +374,6 @@ func TestPolicyAuthorizer(t *testing.T) {
|
|||
{name: "ServiceWriteAnyAllowed", prefix: "", check: checkAllowServiceWriteAny},
|
||||
{name: "ServiceReadWithinPrefixDenied", prefix: "foot", check: checkDenyServiceReadPrefix},
|
||||
|
||||
{name: "IdentityReadPrefixAllowed", prefix: "fo", check: checkAllowIdentityRead},
|
||||
{name: "IdentityWritePrefixDenied", prefix: "fo", check: checkDenyIdentityWrite},
|
||||
{name: "IdentityReadPrefixAllowed", prefix: "for", check: checkAllowIdentityRead},
|
||||
{name: "IdentityWritePrefixDenied", prefix: "for", check: checkDenyIdentityWrite},
|
||||
{name: "IdentityReadAllowed", prefix: "foo", check: checkAllowIdentityRead},
|
||||
{name: "IdentityWriteAllowed", prefix: "foo", check: checkAllowIdentityWrite},
|
||||
{name: "IdentityReadPrefixAllowed", prefix: "foot", check: checkAllowIdentityRead},
|
||||
{name: "IdentityWritePrefixDenied", prefix: "foot", check: checkDenyIdentityWrite},
|
||||
{name: "IdentityReadPrefixAllowed", prefix: "foot2", check: checkAllowIdentityRead},
|
||||
{name: "IdentityWritePrefixDenied", prefix: "foot2", check: checkDenyIdentityWrite},
|
||||
{name: "IdentityReadPrefixAllowed", prefix: "food", check: checkAllowIdentityRead},
|
||||
{name: "IdentityWritePrefixDenied", prefix: "food", check: checkDenyIdentityWrite},
|
||||
{name: "IdentityReadDenied", prefix: "football", check: checkDenyIdentityRead},
|
||||
{name: "IdentityWriteDenied", prefix: "football", check: checkDenyIdentityWrite},
|
||||
{name: "IdentityWriteAnyAllowed", prefix: "", check: checkAllowIdentityWriteAny},
|
||||
|
||||
{name: "IntentionReadPrefixAllowed", prefix: "fo", check: checkAllowIntentionRead},
|
||||
{name: "IntentionWritePrefixDenied", prefix: "fo", check: checkDenyIntentionWrite},
|
||||
{name: "IntentionReadPrefixAllowed", prefix: "for", check: checkAllowIntentionRead},
|
||||
|
|
|
@ -42,12 +42,6 @@ func TestPolicySourceParse(t *testing.T) {
|
|||
event "bar" {
|
||||
policy = "deny"
|
||||
}
|
||||
identity_prefix "" {
|
||||
policy = "write"
|
||||
}
|
||||
identity "foo" {
|
||||
policy = "read"
|
||||
}
|
||||
key_prefix "" {
|
||||
policy = "read"
|
||||
}
|
||||
|
@ -123,16 +117,6 @@ func TestPolicySourceParse(t *testing.T) {
|
|||
"policy": "deny"
|
||||
}
|
||||
},
|
||||
"identity_prefix": {
|
||||
"": {
|
||||
"policy": "write"
|
||||
}
|
||||
},
|
||||
"identity": {
|
||||
"foo": {
|
||||
"policy": "read"
|
||||
}
|
||||
},
|
||||
"key_prefix": {
|
||||
"": {
|
||||
"policy": "read"
|
||||
|
@ -233,18 +217,6 @@ func TestPolicySourceParse(t *testing.T) {
|
|||
Policy: PolicyDeny,
|
||||
},
|
||||
},
|
||||
IdentityPrefixes: []*IdentityRule{
|
||||
{
|
||||
Name: "",
|
||||
Policy: PolicyWrite,
|
||||
},
|
||||
},
|
||||
Identities: []*IdentityRule{
|
||||
{
|
||||
Name: "foo",
|
||||
Policy: PolicyRead,
|
||||
},
|
||||
},
|
||||
Keyring: PolicyDeny,
|
||||
KeyPrefixes: []*KeyRule{
|
||||
{
|
||||
|
@ -331,39 +303,6 @@ func TestPolicySourceParse(t *testing.T) {
|
|||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: "Identity No Intentions",
|
||||
Rules: `identity "foo" { policy = "write" }`,
|
||||
RulesJSON: `{ "identity": { "foo": { "policy": "write" }}}`,
|
||||
Expected: &Policy{PolicyRules: PolicyRules{
|
||||
Identities: []*IdentityRule{
|
||||
{
|
||||
Name: "foo",
|
||||
Policy: "write",
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: "Identity Intentions",
|
||||
Rules: `identity "foo" { policy = "write" intentions = "read" }`,
|
||||
RulesJSON: `{ "identity": { "foo": { "policy": "write", "intentions": "read" }}}`,
|
||||
Expected: &Policy{PolicyRules: PolicyRules{
|
||||
Identities: []*IdentityRule{
|
||||
{
|
||||
Name: "foo",
|
||||
Policy: "write",
|
||||
Intentions: "read",
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
Name: "Identity Intention: invalid value",
|
||||
Rules: `identity "foo" { policy = "write" intentions = "foo" }`,
|
||||
RulesJSON: `{ "identity": { "foo": { "policy": "write", "intentions": "foo" }}}`,
|
||||
Err: "Invalid identity intentions policy",
|
||||
},
|
||||
{
|
||||
Name: "Service No Intentions",
|
||||
Rules: `service "foo" { policy = "write" }`,
|
||||
|
@ -415,18 +354,6 @@ func TestPolicySourceParse(t *testing.T) {
|
|||
RulesJSON: `{ "agent_prefix": { "foo": { "policy": "nope" }}}`,
|
||||
Err: "Invalid agent_prefix policy",
|
||||
},
|
||||
{
|
||||
Name: "Bad Policy - Identity",
|
||||
Rules: `identity "foo" { policy = "nope" }`,
|
||||
RulesJSON: `{ "identity": { "foo": { "policy": "nope" }}}`,
|
||||
Err: "Invalid identity policy",
|
||||
},
|
||||
{
|
||||
Name: "Bad Policy - Identity Prefix",
|
||||
Rules: `identity_prefix "foo" { policy = "nope" }`,
|
||||
RulesJSON: `{ "identity_prefix": { "foo": { "policy": "nope" }}}`,
|
||||
Err: "Invalid identity_prefix policy",
|
||||
},
|
||||
{
|
||||
Name: "Bad Policy - Key",
|
||||
Rules: `key "foo" { policy = "nope" }`,
|
||||
|
@ -758,109 +685,6 @@ func TestMergePolicies(t *testing.T) {
|
|||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "Identities",
|
||||
input: []*Policy{
|
||||
{PolicyRules: PolicyRules{
|
||||
Identities: []*IdentityRule{
|
||||
{
|
||||
Name: "foo",
|
||||
Policy: PolicyWrite,
|
||||
Intentions: PolicyWrite,
|
||||
},
|
||||
{
|
||||
Name: "bar",
|
||||
Policy: PolicyRead,
|
||||
Intentions: PolicyRead,
|
||||
},
|
||||
{
|
||||
Name: "baz",
|
||||
Policy: PolicyWrite,
|
||||
Intentions: PolicyWrite,
|
||||
},
|
||||
},
|
||||
IdentityPrefixes: []*IdentityRule{
|
||||
{
|
||||
Name: "000",
|
||||
Policy: PolicyWrite,
|
||||
Intentions: PolicyWrite,
|
||||
},
|
||||
{
|
||||
Name: "111",
|
||||
Policy: PolicyRead,
|
||||
Intentions: PolicyRead,
|
||||
},
|
||||
{
|
||||
Name: "222",
|
||||
Policy: PolicyWrite,
|
||||
Intentions: PolicyWrite,
|
||||
},
|
||||
},
|
||||
}},
|
||||
{PolicyRules: PolicyRules{
|
||||
Identities: []*IdentityRule{
|
||||
{
|
||||
Name: "foo",
|
||||
Policy: PolicyRead,
|
||||
Intentions: PolicyRead,
|
||||
},
|
||||
{
|
||||
Name: "baz",
|
||||
Policy: PolicyDeny,
|
||||
Intentions: PolicyDeny,
|
||||
},
|
||||
},
|
||||
IdentityPrefixes: []*IdentityRule{
|
||||
{
|
||||
Name: "000",
|
||||
Policy: PolicyRead,
|
||||
Intentions: PolicyRead,
|
||||
},
|
||||
{
|
||||
Name: "222",
|
||||
Policy: PolicyDeny,
|
||||
Intentions: PolicyDeny,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
expected: &Policy{PolicyRules: PolicyRules{
|
||||
Identities: []*IdentityRule{
|
||||
{
|
||||
Name: "foo",
|
||||
Policy: PolicyWrite,
|
||||
Intentions: PolicyWrite,
|
||||
},
|
||||
{
|
||||
Name: "bar",
|
||||
Policy: PolicyRead,
|
||||
Intentions: PolicyRead,
|
||||
},
|
||||
{
|
||||
Name: "baz",
|
||||
Policy: PolicyDeny,
|
||||
Intentions: PolicyDeny,
|
||||
},
|
||||
},
|
||||
IdentityPrefixes: []*IdentityRule{
|
||||
{
|
||||
Name: "000",
|
||||
Policy: PolicyWrite,
|
||||
Intentions: PolicyWrite,
|
||||
},
|
||||
{
|
||||
Name: "111",
|
||||
Policy: PolicyRead,
|
||||
Intentions: PolicyRead,
|
||||
},
|
||||
{
|
||||
Name: "222",
|
||||
Policy: PolicyDeny,
|
||||
Intentions: PolicyDeny,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
name: "Node",
|
||||
input: []*Policy{
|
||||
|
|
|
@ -15,9 +15,10 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/go-jose/go-jose/v3/jwt"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/go-uuid"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/consul/authmethod/testauth"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
|
@ -1407,7 +1408,7 @@ func TestACL_HTTP(t *testing.T) {
|
|||
|
||||
var list map[string]api.ACLTemplatedPolicyResponse
|
||||
require.NoError(t, json.NewDecoder(resp.Body).Decode(&list))
|
||||
require.Len(t, list, 7)
|
||||
require.Len(t, list, 6)
|
||||
|
||||
require.Equal(t, api.ACLTemplatedPolicyResponse{
|
||||
TemplateName: api.ACLTemplatedPolicyServiceName,
|
||||
|
@ -2225,7 +2226,7 @@ func TestACL_Authorize(t *testing.T) {
|
|||
policyReq := structs.ACLPolicySetRequest{
|
||||
Policy: structs.ACLPolicy{
|
||||
Name: "test",
|
||||
Rules: `acl = "read" operator = "write" identity_prefix "" { policy = "read"} service_prefix "" { policy = "read"} node_prefix "" { policy= "write" } key_prefix "/foo" { policy = "write" } `,
|
||||
Rules: `acl = "read" operator = "write" service_prefix "" { policy = "read"} node_prefix "" { policy= "write" } key_prefix "/foo" { policy = "write" } `,
|
||||
},
|
||||
Datacenter: "dc1",
|
||||
WriteRequest: structs.WriteRequest{Token: TestDefaultInitialManagementToken},
|
||||
|
@ -2311,16 +2312,6 @@ func TestACL_Authorize(t *testing.T) {
|
|||
Segment: "foo",
|
||||
Access: "write",
|
||||
},
|
||||
{
|
||||
Resource: "identity",
|
||||
Segment: "foo",
|
||||
Access: "read",
|
||||
},
|
||||
{
|
||||
Resource: "identity",
|
||||
Segment: "foo",
|
||||
Access: "write",
|
||||
},
|
||||
{
|
||||
Resource: "intention",
|
||||
Segment: "foo",
|
||||
|
@ -2471,16 +2462,6 @@ func TestACL_Authorize(t *testing.T) {
|
|||
Segment: "foo",
|
||||
Access: "write",
|
||||
},
|
||||
{
|
||||
Resource: "identity",
|
||||
Segment: "foo",
|
||||
Access: "read",
|
||||
},
|
||||
{
|
||||
Resource: "identity",
|
||||
Segment: "foo",
|
||||
Access: "write",
|
||||
},
|
||||
{
|
||||
Resource: "intention",
|
||||
Segment: "foo",
|
||||
|
@ -2587,8 +2568,6 @@ func TestACL_Authorize(t *testing.T) {
|
|||
false, // agent:write
|
||||
false, // event:read
|
||||
false, // event:write
|
||||
true, // identity:read
|
||||
false, // identity:write
|
||||
true, // intentions:read
|
||||
false, // intention:write
|
||||
false, // key:read
|
||||
|
|
|
@ -152,14 +152,6 @@ const (
|
|||
retryFullSyncState fsmState = "retryFullSync"
|
||||
)
|
||||
|
||||
// HardDisableSync is like PauseSync but is one-way. It causes other
|
||||
// Pause/Resume/Start operations to be completely ignored.
|
||||
func (s *StateSyncer) HardDisableSync() {
|
||||
s.pauseLock.Lock()
|
||||
s.hardDisabled = true
|
||||
s.pauseLock.Unlock()
|
||||
}
|
||||
|
||||
// Run is the long running method to perform state synchronization
|
||||
// between local and remote servers.
|
||||
func (s *StateSyncer) Run() {
|
||||
|
|
|
@ -69,7 +69,6 @@ import (
|
|||
"github.com/hashicorp/consul/api/watch"
|
||||
libdns "github.com/hashicorp/consul/internal/dnsutil"
|
||||
"github.com/hashicorp/consul/internal/gossip/librtt"
|
||||
proxytracker "github.com/hashicorp/consul/internal/mesh/proxy-tracker"
|
||||
"github.com/hashicorp/consul/ipaddr"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/lib/file"
|
||||
|
@ -639,9 +638,6 @@ func (a *Agent) Start(ctx context.Context) error {
|
|||
// create the state synchronization manager which performs
|
||||
// regular and on-demand state synchronizations (anti-entropy).
|
||||
a.sync = ae.NewStateSyncer(a.State, c.AEInterval, a.shutdownCh, a.logger)
|
||||
if a.baseDeps.UseV2Resources() {
|
||||
a.sync.HardDisableSync()
|
||||
}
|
||||
|
||||
err = validateFIPSConfig(a.config)
|
||||
if err != nil {
|
||||
|
@ -673,10 +669,6 @@ func (a *Agent) Start(ctx context.Context) error {
|
|||
return fmt.Errorf("failed to start Consul enterprise component: %v", err)
|
||||
}
|
||||
|
||||
// proxyTracker will be used in the creation of the XDS server and also
|
||||
// in the registration of the v2 xds controller
|
||||
var proxyTracker *proxytracker.ProxyTracker
|
||||
|
||||
// Setup either the client or the server.
|
||||
var consulServer *consul.Server
|
||||
if c.ServerMode {
|
||||
|
@ -716,13 +708,7 @@ func (a *Agent) Start(ctx context.Context) error {
|
|||
nil,
|
||||
)
|
||||
|
||||
if a.baseDeps.UseV2Resources() {
|
||||
proxyTracker = proxytracker.NewProxyTracker(proxytracker.ProxyTrackerConfig{
|
||||
Logger: a.logger.Named("proxy-tracker"),
|
||||
SessionLimiter: a.baseDeps.XDSStreamLimiter,
|
||||
})
|
||||
}
|
||||
consulServer, err = consul.NewServer(consulCfg, a.baseDeps.Deps, a.externalGRPCServer, incomingRPCLimiter, serverLogger, proxyTracker)
|
||||
consulServer, err = consul.NewServer(consulCfg, a.baseDeps.Deps, a.externalGRPCServer, incomingRPCLimiter, serverLogger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to start Consul server: %v", err)
|
||||
}
|
||||
|
@ -746,10 +732,6 @@ func (a *Agent) Start(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
} else {
|
||||
if a.baseDeps.UseV2Resources() {
|
||||
return fmt.Errorf("can't start agent: client agents are not supported with v2 resources")
|
||||
}
|
||||
|
||||
// the conn is used to connect to the consul server agent
|
||||
conn, err := a.baseDeps.GRPCConnPool.ClientConn(a.baseDeps.RuntimeConfig.Datacenter)
|
||||
if err != nil {
|
||||
|
@ -895,7 +877,7 @@ func (a *Agent) Start(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// Start grpc and grpc_tls servers.
|
||||
if err := a.listenAndServeGRPC(proxyTracker, consulServer); err != nil {
|
||||
if err := a.listenAndServeGRPC(consulServer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -956,28 +938,20 @@ func (a *Agent) configureXDSServer(proxyWatcher xds.ProxyWatcher, server *consul
|
|||
// TODO(agentless): rather than asserting the concrete type of delegate, we
|
||||
// should add a method to the Delegate interface to build a ConfigSource.
|
||||
if server != nil {
|
||||
switch proxyWatcher.(type) {
|
||||
case *proxytracker.ProxyTracker:
|
||||
go func() {
|
||||
<-a.shutdownCh
|
||||
proxyWatcher.(*proxytracker.ProxyTracker).Shutdown()
|
||||
}()
|
||||
default:
|
||||
catalogCfg := catalogproxycfg.NewConfigSource(catalogproxycfg.Config{
|
||||
NodeName: a.config.NodeName,
|
||||
LocalState: a.State,
|
||||
LocalConfigSource: proxyWatcher,
|
||||
Manager: a.proxyConfig,
|
||||
GetStore: func() catalogproxycfg.Store { return server.FSM().State() },
|
||||
Logger: a.proxyConfig.Logger.Named("server-catalog"),
|
||||
SessionLimiter: a.baseDeps.XDSStreamLimiter,
|
||||
})
|
||||
go func() {
|
||||
<-a.shutdownCh
|
||||
catalogCfg.Shutdown()
|
||||
}()
|
||||
proxyWatcher = catalogCfg
|
||||
}
|
||||
catalogCfg := catalogproxycfg.NewConfigSource(catalogproxycfg.Config{
|
||||
NodeName: a.config.NodeName,
|
||||
LocalState: a.State,
|
||||
LocalConfigSource: proxyWatcher,
|
||||
Manager: a.proxyConfig,
|
||||
GetStore: func() catalogproxycfg.Store { return server.FSM().State() },
|
||||
Logger: a.proxyConfig.Logger.Named("server-catalog"),
|
||||
SessionLimiter: a.baseDeps.XDSStreamLimiter,
|
||||
})
|
||||
go func() {
|
||||
<-a.shutdownCh
|
||||
catalogCfg.Shutdown()
|
||||
}()
|
||||
proxyWatcher = catalogCfg
|
||||
}
|
||||
a.xdsServer = xds.NewServer(
|
||||
a.config.NodeName,
|
||||
|
@ -991,16 +965,11 @@ func (a *Agent) configureXDSServer(proxyWatcher xds.ProxyWatcher, server *consul
|
|||
a.xdsServer.Register(a.externalGRPCServer)
|
||||
}
|
||||
|
||||
func (a *Agent) listenAndServeGRPC(proxyTracker *proxytracker.ProxyTracker, server *consul.Server) error {
|
||||
func (a *Agent) listenAndServeGRPC(server *consul.Server) error {
|
||||
if len(a.config.GRPCAddrs) < 1 && len(a.config.GRPCTLSAddrs) < 1 {
|
||||
return nil
|
||||
}
|
||||
var proxyWatcher xds.ProxyWatcher
|
||||
if a.baseDeps.UseV2Resources() {
|
||||
proxyWatcher = proxyTracker
|
||||
} else {
|
||||
proxyWatcher = localproxycfg.NewConfigSource(a.proxyConfig)
|
||||
}
|
||||
var proxyWatcher xds.ProxyWatcher = localproxycfg.NewConfigSource(a.proxyConfig)
|
||||
|
||||
a.configureXDSServer(proxyWatcher, server)
|
||||
|
||||
|
|
|
@ -21,14 +21,15 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
"github.com/mitchellh/hashstructure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/acl/resolver"
|
||||
"github.com/hashicorp/consul/agent/config"
|
||||
|
@ -79,46 +80,6 @@ func createACLTokenWithAgentReadPolicy(t *testing.T, srv *HTTPHandlers) string {
|
|||
return svcToken.SecretID
|
||||
}
|
||||
|
||||
func TestAgentEndpointsFailInV2(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
a := NewTestAgent(t, `experiments = ["resource-apis"]`)
|
||||
|
||||
checkRequest := func(method, url string) {
|
||||
t.Run(method+" "+url, func(t *testing.T) {
|
||||
assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, `{}`)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("agent-self-with-params", func(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", "/v1/agent/self?dc=dc1", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
require.Equal(t, http.StatusOK, resp.Code)
|
||||
|
||||
_, err = io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
checkRequest("PUT", "/v1/agent/maintenance")
|
||||
checkRequest("GET", "/v1/agent/services")
|
||||
checkRequest("GET", "/v1/agent/service/web")
|
||||
checkRequest("GET", "/v1/agent/checks")
|
||||
checkRequest("GET", "/v1/agent/health/service/id/web")
|
||||
checkRequest("GET", "/v1/agent/health/service/name/web")
|
||||
checkRequest("PUT", "/v1/agent/check/register")
|
||||
checkRequest("PUT", "/v1/agent/check/deregister/web")
|
||||
checkRequest("PUT", "/v1/agent/check/pass/web")
|
||||
checkRequest("PUT", "/v1/agent/check/warn/web")
|
||||
checkRequest("PUT", "/v1/agent/check/fail/web")
|
||||
checkRequest("PUT", "/v1/agent/check/update/web")
|
||||
checkRequest("PUT", "/v1/agent/service/register")
|
||||
checkRequest("PUT", "/v1/agent/service/deregister/web")
|
||||
checkRequest("PUT", "/v1/agent/service/maintenance/web")
|
||||
}
|
||||
|
||||
func TestAgent_Services(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
@ -1660,6 +1621,7 @@ func newDefaultBaseDeps(t *testing.T) BaseDeps {
|
|||
}
|
||||
|
||||
func TestHTTPHandlers_AgentMetricsStream_ACLDeny(t *testing.T) {
|
||||
t.Skip("this test panics without a license manager in enterprise")
|
||||
bd := newDefaultBaseDeps(t)
|
||||
bd.Tokens = new(tokenStore.Store)
|
||||
sink := metrics.NewInmemSink(30*time.Millisecond, time.Second)
|
||||
|
@ -1691,6 +1653,7 @@ func TestHTTPHandlers_AgentMetricsStream_ACLDeny(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTPHandlers_AgentMetricsStream(t *testing.T) {
|
||||
t.Skip("this test panics without a license manager in enterprise")
|
||||
bd := newDefaultBaseDeps(t)
|
||||
bd.Tokens = new(tokenStore.Store)
|
||||
sink := metrics.NewInmemSink(20*time.Millisecond, time.Second)
|
||||
|
|
|
@ -6,18 +6,17 @@ package agent
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
|
@ -31,49 +30,6 @@ func addQueryParam(req *http.Request, param, value string) {
|
|||
req.URL.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
func TestCatalogEndpointsFailInV2(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
a := NewTestAgent(t, `experiments = ["resource-apis"]`)
|
||||
|
||||
checkRequest := func(method, url string) {
|
||||
t.Run(method+" "+url, func(t *testing.T) {
|
||||
assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, "{}")
|
||||
})
|
||||
}
|
||||
|
||||
checkRequest("PUT", "/v1/catalog/register")
|
||||
checkRequest("GET", "/v1/catalog/connect/")
|
||||
checkRequest("PUT", "/v1/catalog/deregister")
|
||||
checkRequest("GET", "/v1/catalog/datacenters")
|
||||
checkRequest("GET", "/v1/catalog/nodes")
|
||||
checkRequest("GET", "/v1/catalog/services")
|
||||
checkRequest("GET", "/v1/catalog/service/")
|
||||
checkRequest("GET", "/v1/catalog/node/")
|
||||
checkRequest("GET", "/v1/catalog/node-services/")
|
||||
checkRequest("GET", "/v1/catalog/gateway-services/")
|
||||
}
|
||||
|
||||
func assertV1CatalogEndpointDoesNotWorkWithV2(t *testing.T, a *TestAgent, method, url string, requestBody string) {
|
||||
var body io.Reader
|
||||
switch method {
|
||||
case http.MethodPost, http.MethodPut:
|
||||
body = strings.NewReader(requestBody + "\n")
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, url, body)
|
||||
require.NoError(t, err)
|
||||
|
||||
resp := httptest.NewRecorder()
|
||||
a.srv.h.ServeHTTP(resp, req)
|
||||
require.Equal(t, http.StatusBadRequest, resp.Code)
|
||||
|
||||
got, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Contains(t, string(got), structs.ErrUsingV2CatalogExperiment.Error())
|
||||
}
|
||||
|
||||
func TestCatalogRegister_PeeringRegistration(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
|
|
@ -1144,23 +1144,6 @@ func (b *builder) build() (rt RuntimeConfig, err error) {
|
|||
return RuntimeConfig{}, fmt.Errorf("cache.entry_fetch_rate must be strictly positive, was: %v", rt.Cache.EntryFetchRate)
|
||||
}
|
||||
|
||||
// TODO(CC-6389): Remove once resource-apis is no longer considered experimental and is supported by HCP
|
||||
if stringslice.Contains(rt.Experiments, consul.CatalogResourceExperimentName) && rt.IsCloudEnabled() {
|
||||
// Allow override of this check for development/testing purposes. Should not be used in production
|
||||
if !stringslice.Contains(rt.Experiments, consul.HCPAllowV2ResourceAPIs) {
|
||||
return RuntimeConfig{}, fmt.Errorf("`experiments` cannot include 'resource-apis' when HCP `cloud` configuration is set")
|
||||
}
|
||||
}
|
||||
|
||||
// For now, disallow usage of several v2 experiments in secondary datacenters.
|
||||
if rt.ServerMode && rt.PrimaryDatacenter != rt.Datacenter {
|
||||
for _, name := range rt.Experiments {
|
||||
if !consul.IsExperimentAllowedOnSecondaries(name) {
|
||||
return RuntimeConfig{}, fmt.Errorf("`experiments` cannot include `%s` for servers in secondary datacenters", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if rt.UIConfig.MetricsProvider == "prometheus" {
|
||||
// Handle defaulting for the built-in version of prometheus.
|
||||
if len(rt.UIConfig.MetricsProxy.PathAllowlist) == 0 {
|
||||
|
|
|
@ -615,23 +615,9 @@ func TestBuilder_CheckExperimentsInSecondaryDatacenters(t *testing.T) {
|
|||
"primary server no experiments": {
|
||||
hcl: primary + `experiments = []`,
|
||||
},
|
||||
"primary server v2catalog": {
|
||||
hcl: primary + `experiments = ["resource-apis"]`,
|
||||
},
|
||||
"primary server v2tenancy": {
|
||||
hcl: primary + `experiments = ["v2tenancy"]`,
|
||||
},
|
||||
"secondary server no experiments": {
|
||||
hcl: secondary + `experiments = []`,
|
||||
},
|
||||
"secondary server v2catalog": {
|
||||
hcl: secondary + `experiments = ["resource-apis"]`,
|
||||
expectErr: true,
|
||||
},
|
||||
"secondary server v2tenancy": {
|
||||
hcl: secondary + `experiments = ["v2tenancy"]`,
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
|
@ -641,67 +627,6 @@ func TestBuilder_CheckExperimentsInSecondaryDatacenters(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestBuilder_WarnCloudConfigWithResourceApis(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
hcl string
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "base_case",
|
||||
hcl: ``,
|
||||
},
|
||||
{
|
||||
name: "resource-apis_no_cloud",
|
||||
hcl: `experiments = ["resource-apis"]`,
|
||||
},
|
||||
{
|
||||
name: "cloud-config_no_experiments",
|
||||
hcl: `cloud{ resource_id = "abc" client_id = "abc" client_secret = "abc"}`,
|
||||
},
|
||||
{
|
||||
name: "cloud-config_resource-apis_experiment",
|
||||
hcl: `
|
||||
experiments = ["resource-apis"]
|
||||
cloud{ resource_id = "abc" client_id = "abc" client_secret = "abc"}`,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "cloud-config_other_experiment",
|
||||
hcl: `
|
||||
experiments = ["test"]
|
||||
cloud{ resource_id = "abc" client_id = "abc" client_secret = "abc"}`,
|
||||
},
|
||||
{
|
||||
name: "cloud-config_resource-apis_experiment_override",
|
||||
hcl: `
|
||||
experiments = ["resource-apis", "hcp-v2-resource-apis"]
|
||||
cloud{ resource_id = "abc" client_id = "abc" client_secret = "abc"}`,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
// using dev mode skips the need for a data dir
|
||||
devMode := true
|
||||
builderOpts := LoadOpts{
|
||||
DevMode: &devMode,
|
||||
Overrides: []Source{
|
||||
FileSource{
|
||||
Name: "overrides",
|
||||
Format: "hcl",
|
||||
Data: tc.hcl,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := Load(builderOpts)
|
||||
if tc.expectErr {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "cannot include 'resource-apis' when HCP")
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuilder_CloudConfigWithEnvironmentVars(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
hcl string
|
||||
|
|
|
@ -6015,24 +6015,6 @@ func TestLoad_IntegrationWithFlags(t *testing.T) {
|
|||
rt.RaftLogStoreConfig.WAL.SegmentSize = 64 * 1024 * 1024
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
desc: "logstore defaults",
|
||||
args: []string{
|
||||
`-data-dir=` + dataDir,
|
||||
},
|
||||
json: []string{`
|
||||
{
|
||||
"experiments": ["resource-apis"]
|
||||
}
|
||||
`},
|
||||
hcl: []string{`experiments=["resource-apis"]`},
|
||||
expected: func(rt *RuntimeConfig) {
|
||||
rt.DataDir = dataDir
|
||||
rt.RaftLogStoreConfig.Backend = consul.LogStoreBackendDefault
|
||||
rt.RaftLogStoreConfig.WAL.SegmentSize = 64 * 1024 * 1024
|
||||
rt.Experiments = []string{"resource-apis"}
|
||||
},
|
||||
})
|
||||
run(t, testCase{
|
||||
// this was a bug in the initial config commit. Specifying part of this
|
||||
// stanza should still result in sensible defaults for the other parts.
|
||||
|
|
|
@ -20,23 +20,6 @@ import (
|
|||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func TestConfigEndpointsFailInV2(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
a := NewTestAgent(t, `experiments = ["resource-apis"]`)
|
||||
|
||||
checkRequest := func(method, url string) {
|
||||
t.Run(method+" "+url, func(t *testing.T) {
|
||||
assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, `{"kind":"service-defaults", "name":"web"}`)
|
||||
})
|
||||
}
|
||||
|
||||
checkRequest("GET", "/v1/config/service-defaults")
|
||||
checkRequest("GET", "/v1/config/service-defaults/web")
|
||||
checkRequest("DELETE", "/v1/config/service-defaults/web")
|
||||
checkRequest("PUT", "/v1/config")
|
||||
}
|
||||
|
||||
func TestConfig_Get(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
|
|
@ -23,8 +23,6 @@ type CertURI interface {
|
|||
}
|
||||
|
||||
var (
|
||||
spiffeIDWorkloadIdentityRegexp = regexp.MustCompile(
|
||||
`^(?:/ap/([^/]+))/ns/([^/]+)/identity/([^/]+)$`)
|
||||
spiffeIDServiceRegexp = regexp.MustCompile(
|
||||
`^(?:/ap/([^/]+))?/ns/([^/]+)/dc/([^/]+)/svc/([^/]+)$`)
|
||||
spiffeIDAgentRegexp = regexp.MustCompile(
|
||||
|
@ -96,32 +94,6 @@ func ParseCertURI(input *url.URL) (CertURI, error) {
|
|||
Datacenter: dc,
|
||||
Service: service,
|
||||
}, nil
|
||||
} else if v := spiffeIDWorkloadIdentityRegexp.FindStringSubmatch(path); v != nil {
|
||||
// Determine the values. We assume they're reasonable to save cycles,
|
||||
// but if the raw path is not empty that means that something is
|
||||
// URL encoded so we go to the slow path.
|
||||
ap := v[1]
|
||||
ns := v[2]
|
||||
workloadIdentity := v[3]
|
||||
if input.RawPath != "" {
|
||||
var err error
|
||||
if ap, err = url.PathUnescape(v[1]); err != nil {
|
||||
return nil, fmt.Errorf("Invalid admin partition: %s", err)
|
||||
}
|
||||
if ns, err = url.PathUnescape(v[2]); err != nil {
|
||||
return nil, fmt.Errorf("Invalid namespace: %s", err)
|
||||
}
|
||||
if workloadIdentity, err = url.PathUnescape(v[3]); err != nil {
|
||||
return nil, fmt.Errorf("Invalid workload identity: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &SpiffeIDWorkloadIdentity{
|
||||
TrustDomain: input.Host,
|
||||
Partition: ap,
|
||||
Namespace: ns,
|
||||
WorkloadIdentity: workloadIdentity,
|
||||
}, nil
|
||||
} else if v := spiffeIDAgentRegexp.FindStringSubmatch(path); v != nil {
|
||||
// Determine the values. We assume they're reasonable to save cycles,
|
||||
// but if the raw path is not empty that means that something is
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"net/url"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
// SpiffeIDService is the structure to represent the SPIFFE ID for a service.
|
||||
|
@ -53,14 +52,3 @@ func (id SpiffeIDService) uriPath() string {
|
|||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// SpiffeIDFromIdentityRef creates the SPIFFE ID from a workload identity.
|
||||
// TODO (ishustava): make sure ref type is workload identity.
|
||||
func SpiffeIDFromIdentityRef(trustDomain string, ref *pbresource.Reference) string {
|
||||
return SpiffeIDWorkloadIdentity{
|
||||
TrustDomain: trustDomain,
|
||||
Partition: ref.Tenancy.Partition,
|
||||
Namespace: ref.Tenancy.Namespace,
|
||||
WorkloadIdentity: ref.Name,
|
||||
}.URI().String()
|
||||
}
|
||||
|
|
|
@ -51,12 +51,6 @@ func (id SpiffeIDSigning) CanSign(cu CertURI) bool {
|
|||
// worry about Unicode domains if we start allowing customisation beyond the
|
||||
// built-in cluster ids.
|
||||
return strings.ToLower(other.Host) == id.Host()
|
||||
case *SpiffeIDWorkloadIdentity:
|
||||
// The trust domain component of the workload identity SPIFFE ID must be an exact match for now under
|
||||
// ascii case folding (since hostnames are case-insensitive). Later we might
|
||||
// worry about Unicode domains if we start allowing customisation beyond the
|
||||
// built-in cluster ids.
|
||||
return strings.ToLower(other.TrustDomain) == id.Host()
|
||||
case *SpiffeIDMeshGateway:
|
||||
// The host component of the mesh gateway SPIFFE ID must be an exact match for now under
|
||||
// ascii case folding (since hostnames are case-insensitive). Later we might
|
||||
|
|
|
@ -98,30 +98,6 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) {
|
|||
input: &SpiffeIDService{Host: TestClusterID + ".fake", Namespace: "default", Datacenter: "dc1", Service: "web"},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "workload - good",
|
||||
id: testSigning,
|
||||
input: &SpiffeIDWorkloadIdentity{TrustDomain: TestClusterID + ".consul", Namespace: "default", WorkloadIdentity: "web"},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "workload - good mixed case",
|
||||
id: testSigning,
|
||||
input: &SpiffeIDWorkloadIdentity{TrustDomain: strings.ToUpper(TestClusterID) + ".CONsuL", Namespace: "defAUlt", WorkloadIdentity: "WEB"},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "workload - different cluster",
|
||||
id: testSigning,
|
||||
input: &SpiffeIDWorkloadIdentity{TrustDomain: "55555555-4444-3333-2222-111111111111.consul", Namespace: "default", WorkloadIdentity: "web"},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "workload - different TLD",
|
||||
id: testSigning,
|
||||
input: &SpiffeIDWorkloadIdentity{TrustDomain: TestClusterID + ".fake", Namespace: "default", WorkloadIdentity: "web"},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "mesh gateway - good",
|
||||
id: testSigning,
|
||||
|
|
|
@ -51,61 +51,6 @@ func TestParseCertURIFromString(t *testing.T) {
|
|||
},
|
||||
ParseError: "",
|
||||
},
|
||||
{
|
||||
Name: "basic workload ID",
|
||||
URI: "spiffe://1234.consul/ap/default/ns/default/identity/web",
|
||||
Struct: &SpiffeIDWorkloadIdentity{
|
||||
TrustDomain: "1234.consul",
|
||||
Partition: defaultEntMeta.PartitionOrDefault(),
|
||||
Namespace: "default",
|
||||
WorkloadIdentity: "web",
|
||||
},
|
||||
ParseError: "",
|
||||
},
|
||||
{
|
||||
Name: "basic workload ID with nondefault partition",
|
||||
URI: "spiffe://1234.consul/ap/bizdev/ns/default/identity/web",
|
||||
Struct: &SpiffeIDWorkloadIdentity{
|
||||
TrustDomain: "1234.consul",
|
||||
Partition: "bizdev",
|
||||
Namespace: "default",
|
||||
WorkloadIdentity: "web",
|
||||
},
|
||||
ParseError: "",
|
||||
},
|
||||
{
|
||||
Name: "workload ID error - missing identity",
|
||||
URI: "spiffe://1234.consul/ns/default",
|
||||
Struct: &SpiffeIDWorkloadIdentity{
|
||||
TrustDomain: "1234.consul",
|
||||
Partition: defaultEntMeta.PartitionOrDefault(),
|
||||
Namespace: "default",
|
||||
WorkloadIdentity: "web",
|
||||
},
|
||||
ParseError: "SPIFFE ID is not in the expected format",
|
||||
},
|
||||
{
|
||||
Name: "workload ID error - missing partition",
|
||||
URI: "spiffe://1234.consul/ns/default/identity/web",
|
||||
Struct: &SpiffeIDWorkloadIdentity{
|
||||
TrustDomain: "1234.consul",
|
||||
Partition: defaultEntMeta.PartitionOrDefault(),
|
||||
Namespace: "default",
|
||||
WorkloadIdentity: "web",
|
||||
},
|
||||
ParseError: "SPIFFE ID is not in the expected format",
|
||||
},
|
||||
{
|
||||
Name: "workload ID error - missing namespace",
|
||||
URI: "spiffe://1234.consul/ap/default/identity/web",
|
||||
Struct: &SpiffeIDWorkloadIdentity{
|
||||
TrustDomain: "1234.consul",
|
||||
Partition: defaultEntMeta.PartitionOrDefault(),
|
||||
Namespace: "default",
|
||||
WorkloadIdentity: "web",
|
||||
},
|
||||
ParseError: "SPIFFE ID is not in the expected format",
|
||||
},
|
||||
{
|
||||
Name: "basic agent ID",
|
||||
URI: "spiffe://1234.consul/agent/client/dc/dc1/id/uuid",
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package connect
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// SpiffeIDWorkloadIdentity is the structure to represent the SPIFFE ID for a workload.
|
||||
type SpiffeIDWorkloadIdentity struct {
|
||||
TrustDomain string
|
||||
Partition string
|
||||
Namespace string
|
||||
WorkloadIdentity string
|
||||
}
|
||||
|
||||
// URI returns the *url.URL for this SPIFFE ID.
|
||||
func (id SpiffeIDWorkloadIdentity) URI() *url.URL {
|
||||
var result url.URL
|
||||
result.Scheme = "spiffe"
|
||||
result.Host = id.TrustDomain
|
||||
result.Path = id.uriPath()
|
||||
return &result
|
||||
}
|
||||
|
||||
func (id SpiffeIDWorkloadIdentity) uriPath() string {
|
||||
// Although CE has no support for partitions, it still needs to be able to
|
||||
// handle exportedPartition from peered Consul Enterprise clusters in order
|
||||
// to generate the correct SpiffeID.
|
||||
// We intentionally avoid using pbpartition.DefaultName here to be CE friendly.
|
||||
path := fmt.Sprintf("/ap/%s/ns/%s/identity/%s",
|
||||
id.Partition,
|
||||
id.Namespace,
|
||||
id.WorkloadIdentity,
|
||||
)
|
||||
|
||||
return path
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
//go:build !consulent
|
||||
|
||||
package connect
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/acl"
|
||||
)
|
||||
|
||||
// TODO: this will need to somehow be updated to set namespace here when we include namespaces in CE
|
||||
|
||||
// GetEnterpriseMeta will synthesize an EnterpriseMeta struct from the SpiffeIDWorkloadIdentity.
|
||||
// in CE this just returns an empty (but never nil) struct pointer
|
||||
func (id SpiffeIDWorkloadIdentity) GetEnterpriseMeta() *acl.EnterpriseMeta {
|
||||
return &acl.EnterpriseMeta{}
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package connect
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSpiffeIDWorkloadURI(t *testing.T) {
|
||||
t.Run("spiffe id workload uri default tenancy", func(t *testing.T) {
|
||||
wl := &SpiffeIDWorkloadIdentity{
|
||||
TrustDomain: "1234.consul",
|
||||
WorkloadIdentity: "web",
|
||||
Partition: "default",
|
||||
Namespace: "default",
|
||||
}
|
||||
require.Equal(t, "spiffe://1234.consul/ap/default/ns/default/identity/web", wl.URI().String())
|
||||
})
|
||||
t.Run("spiffe id workload uri non-default tenancy", func(t *testing.T) {
|
||||
wl := &SpiffeIDWorkloadIdentity{
|
||||
TrustDomain: "1234.consul",
|
||||
WorkloadIdentity: "web",
|
||||
Partition: "part1",
|
||||
Namespace: "dev",
|
||||
}
|
||||
require.Equal(t, "spiffe://1234.consul/ap/part1/ns/dev/identity/web", wl.URI().String())
|
||||
})
|
||||
}
|
|
@ -5,7 +5,6 @@ package consul
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
@ -16,10 +15,7 @@ import (
|
|||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/armon/go-metrics/prometheus"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"golang.org/x/time/rate"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
|
@ -32,13 +28,8 @@ import (
|
|||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/agent/structs/aclfilter"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/storage"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/logging"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1"
|
||||
)
|
||||
|
||||
var LeaderSummaries = []prometheus.SummaryDefinition{
|
||||
|
@ -349,18 +340,6 @@ func (s *Server) establishLeadership(ctx context.Context) error {
|
|||
s.startLogVerification(ctx)
|
||||
}
|
||||
|
||||
if s.useV2Tenancy {
|
||||
if err := s.initTenancy(ctx, s.storageBackend); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if s.useV2Resources {
|
||||
if err := s.initConsulService(ctx, pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if s.config.Reporting.License.Enabled && s.reportingManager != nil {
|
||||
s.reportingManager.StartReportingAgent()
|
||||
}
|
||||
|
@ -771,12 +750,6 @@ func (s *Server) runACLReplicator(
|
|||
|
||||
index, exit, err := replicateFunc(ctx, logger, lastRemoteIndex)
|
||||
if exit {
|
||||
metrics.SetGauge([]string{"leader", "replication", metricName, "status"},
|
||||
0,
|
||||
)
|
||||
metrics.SetGauge([]string{"leader", "replication", metricName, "index"},
|
||||
0,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1316,121 +1289,3 @@ func (s *serversIntentionsAsConfigEntriesInfo) update(srv *metadata.Server) bool
|
|||
// prevent continuing server evaluation
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Server) initConsulService(ctx context.Context, client pbresource.ResourceServiceClient) error {
|
||||
service := &pbcatalog.Service{
|
||||
Workloads: &pbcatalog.WorkloadSelector{
|
||||
Prefixes: []string{consulWorkloadPrefix},
|
||||
},
|
||||
Ports: []*pbcatalog.ServicePort{
|
||||
{
|
||||
TargetPort: consulPortNameServer,
|
||||
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
|
||||
// No virtual port defined for now, as we assume this is generally for Service Discovery
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
serviceData, err := anypb.New(service)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not convert Service to `any` message: %w", err)
|
||||
}
|
||||
|
||||
// create a default namespace in default partition
|
||||
serviceID := &pbresource.ID{
|
||||
Type: pbcatalog.ServiceType,
|
||||
Name: structs.ConsulServiceName,
|
||||
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||
}
|
||||
|
||||
serviceResource := &pbresource.Resource{
|
||||
Id: serviceID,
|
||||
Data: serviceData,
|
||||
}
|
||||
|
||||
res, err := client.Read(ctx, &pbresource.ReadRequest{Id: serviceID})
|
||||
if err != nil && !grpcNotFoundErr(err) {
|
||||
return fmt.Errorf("failed to read the %s Service: %w", structs.ConsulServiceName, err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
existingService := res.GetResource()
|
||||
s.logger.Debug("existingService consul Service found")
|
||||
|
||||
// If the Service is identical, we're done.
|
||||
if cmp.Equal(serviceResource, existingService, resourceCmpOptions...) {
|
||||
s.logger.Debug("no updates to perform on consul Service")
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the existing Service is different, add the Version to the patch for CAS write.
|
||||
serviceResource.Id = existingService.Id
|
||||
serviceResource.Version = existingService.Version
|
||||
}
|
||||
|
||||
_, err = client.Write(ctx, &pbresource.WriteRequest{Resource: serviceResource})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create the %s service: %w", structs.ConsulServiceName, err)
|
||||
}
|
||||
|
||||
s.logger.Info("Created consul Service in catalog")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) initTenancy(ctx context.Context, b storage.Backend) error {
|
||||
// we write these defaults directly to the storage backend
|
||||
// without going through the resource service since tenancy
|
||||
// validation hooks block writes to the default namespace
|
||||
// and partition.
|
||||
if err := s.createDefaultPartition(ctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.createDefaultNamespace(ctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) createDefaultNamespace(ctx context.Context, b storage.Backend) error {
|
||||
readID := &pbresource.ID{
|
||||
Type: pbtenancy.NamespaceType,
|
||||
Name: resource.DefaultNamespaceName,
|
||||
Tenancy: resource.DefaultPartitionedTenancy(),
|
||||
}
|
||||
|
||||
read, err := b.Read(ctx, storage.StrongConsistency, readID)
|
||||
|
||||
if err != nil && !errors.Is(err, storage.ErrNotFound) {
|
||||
return fmt.Errorf("failed to read the %q namespace: %v", resource.DefaultNamespaceName, err)
|
||||
}
|
||||
if read == nil && errors.Is(err, storage.ErrNotFound) {
|
||||
nsData, err := anypb.New(&pbtenancy.Namespace{Description: "default namespace in default partition"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create a default namespace in default partition
|
||||
nsID := &pbresource.ID{
|
||||
Type: pbtenancy.NamespaceType,
|
||||
Name: resource.DefaultNamespaceName,
|
||||
Tenancy: resource.DefaultPartitionedTenancy(),
|
||||
Uid: ulid.Make().String(),
|
||||
}
|
||||
|
||||
_, err = b.WriteCAS(ctx, &pbresource.Resource{
|
||||
Id: nsID,
|
||||
Generation: ulid.Make().String(),
|
||||
Data: nsData,
|
||||
Metadata: map[string]string{
|
||||
"generated_at": time.Now().Format(time.RFC3339),
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create the %q namespace: %v", resource.DefaultNamespaceName, err)
|
||||
}
|
||||
}
|
||||
s.logger.Info("Created", "namespace", resource.DefaultNamespaceName)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
//go:build !consulent
|
||||
|
||||
package consul
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/consul/internal/storage"
|
||||
)
|
||||
|
||||
func (s *Server) createDefaultPartition(ctx context.Context, b storage.Backend) error {
|
||||
// no-op
|
||||
return nil
|
||||
}
|
|
@ -6,17 +6,7 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/internal/gossip/libserf"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/storage"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func updateSerfTags(s *Server, key, value string) {
|
||||
|
@ -26,41 +16,3 @@ func updateSerfTags(s *Server, key, value string) {
|
|||
libserf.UpdateTag(s.serfWAN, key, value)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_InitTenancy(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
_, conf := testServerConfig(t)
|
||||
deps := newDefaultDeps(t, conf)
|
||||
deps.Experiments = []string{"v2tenancy"}
|
||||
deps.Registry = NewTypeRegistry()
|
||||
|
||||
s, err := newServerWithDeps(t, conf, deps)
|
||||
require.NoError(t, err)
|
||||
|
||||
// first initTenancy call happens here
|
||||
waitForLeaderEstablishment(t, s)
|
||||
testrpc.WaitForLeader(t, s.RPC, "dc1")
|
||||
|
||||
nsID := &pbresource.ID{
|
||||
Type: pbtenancy.NamespaceType,
|
||||
Tenancy: resource.DefaultPartitionedTenancy(),
|
||||
Name: resource.DefaultNamespaceName,
|
||||
}
|
||||
|
||||
ns, err := s.storageBackend.Read(context.Background(), storage.StrongConsistency, nsID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, resource.DefaultNamespaceName, ns.Id.Name)
|
||||
|
||||
// explicitly call initiTenancy to verify we do not re-create namespace
|
||||
err = s.initTenancy(context.Background(), s.storageBackend)
|
||||
require.NoError(t, err)
|
||||
|
||||
// read again
|
||||
actual, err := s.storageBackend.Read(context.Background(), storage.StrongConsistency, nsID)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, ns.Id.Uid, actual.Id.Uid)
|
||||
require.Equal(t, ns.Generation, actual.Generation)
|
||||
require.Equal(t, ns.Version, actual.Version)
|
||||
}
|
||||
|
|
|
@ -14,9 +14,10 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
|
@ -1455,11 +1456,6 @@ func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, au
|
|||
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different datacenter: %s, "+
|
||||
"we are %s", v.Datacenter, dc)
|
||||
}
|
||||
case *connect.SpiffeIDWorkloadIdentity:
|
||||
v.GetEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||
if err := allow.IdentityWriteAllowed(v.WorkloadIdentity, &authzContext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *connect.SpiffeIDAgent:
|
||||
v.GetEnterpriseMeta().FillAuthzContext(&authzContext)
|
||||
if err := allow.NodeWriteAllowed(v.Agent, &authzContext); err != nil {
|
||||
|
@ -1520,7 +1516,6 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
|
|||
agentID, isAgent := spiffeID.(*connect.SpiffeIDAgent)
|
||||
serverID, isServer := spiffeID.(*connect.SpiffeIDServer)
|
||||
mgwID, isMeshGateway := spiffeID.(*connect.SpiffeIDMeshGateway)
|
||||
wID, isWorkloadIdentity := spiffeID.(*connect.SpiffeIDWorkloadIdentity)
|
||||
|
||||
var entMeta acl.EnterpriseMeta
|
||||
switch {
|
||||
|
@ -1530,12 +1525,6 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
|
|||
"we are %s", serviceID.Host, signingID.Host())
|
||||
}
|
||||
entMeta.Merge(serviceID.GetEnterpriseMeta())
|
||||
case isWorkloadIdentity:
|
||||
if !signingID.CanSign(spiffeID) {
|
||||
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+
|
||||
"we are %s", wID.TrustDomain, signingID.Host())
|
||||
}
|
||||
entMeta.Merge(wID.GetEnterpriseMeta())
|
||||
case isMeshGateway:
|
||||
if !signingID.CanSign(spiffeID) {
|
||||
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+
|
||||
|
@ -1658,9 +1647,6 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
|
|||
case isService:
|
||||
reply.Service = serviceID.Service
|
||||
reply.ServiceURI = cert.URIs[0].String()
|
||||
case isWorkloadIdentity:
|
||||
reply.WorkloadIdentity = wID.WorkloadIdentity
|
||||
reply.WorkloadIdentityURI = cert.URIs[0].String()
|
||||
case isMeshGateway:
|
||||
reply.Kind = structs.ServiceKindMeshGateway
|
||||
reply.KindURI = cert.URIs[0].String()
|
||||
|
|
|
@ -19,13 +19,14 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
||||
vaultapi "github.com/hashicorp/vault/api"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
||||
vaultapi "github.com/hashicorp/vault/api"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/connect/ca"
|
||||
|
@ -566,7 +567,7 @@ func TestCAManager_Initialize_Logging(t *testing.T) {
|
|||
deps := newDefaultDeps(t, conf1)
|
||||
deps.Logger = logger
|
||||
|
||||
s1, err := NewServer(conf1, deps, grpc.NewServer(), nil, logger, nil)
|
||||
s1, err := NewServer(conf1, deps, grpc.NewServer(), nil, logger)
|
||||
require.NoError(t, err)
|
||||
defer s1.Shutdown()
|
||||
testrpc.WaitForLeader(t, s1.RPC, "dc1")
|
||||
|
@ -1317,12 +1318,6 @@ func TestCAManager_AuthorizeAndSignCertificate(t *testing.T) {
|
|||
Host: "test-host",
|
||||
Partition: "test-partition",
|
||||
}.URI()
|
||||
identityURL := connect.SpiffeIDWorkloadIdentity{
|
||||
TrustDomain: "test-trust-domain",
|
||||
Partition: "test-partition",
|
||||
Namespace: "test-namespace",
|
||||
WorkloadIdentity: "test-workload-identity",
|
||||
}.URI()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -1418,15 +1413,6 @@ func TestCAManager_AuthorizeAndSignCertificate(t *testing.T) {
|
|||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "err_identity_write_not_allowed",
|
||||
expectErr: "Permission denied",
|
||||
getCSR: func() *x509.CertificateRequest {
|
||||
return &x509.CertificateRequest{
|
||||
URIs: []*url.URL{identityURL},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
|
|
|
@ -1,411 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package consul
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/testing/protocmp"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/metadata"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/types"
|
||||
)
|
||||
|
||||
const (
|
||||
consulWorkloadPrefix = "consul-server-"
|
||||
consulPortNameServer = "server"
|
||||
)
|
||||
|
||||
var _ ConsulRegistrator = (*V2ConsulRegistrator)(nil)
|
||||
|
||||
var resourceCmpOptions = []cmp.Option{
|
||||
protocmp.IgnoreFields(&pbresource.Resource{}, "status", "generation", "version"),
|
||||
protocmp.IgnoreFields(&pbresource.ID{}, "uid"),
|
||||
protocmp.Transform(),
|
||||
// Stringify any type passed to the sorter so that we can reliably compare most values.
|
||||
cmpopts.SortSlices(func(a, b any) bool { return fmt.Sprintf("%v", a) < fmt.Sprintf("%v", b) }),
|
||||
}
|
||||
|
||||
type V2ConsulRegistrator struct {
|
||||
Logger hclog.Logger
|
||||
NodeName string
|
||||
EntMeta *acl.EnterpriseMeta
|
||||
|
||||
Client pbresource.ResourceServiceClient
|
||||
}
|
||||
|
||||
// HandleAliveMember is used to ensure the server is registered as a Workload
|
||||
// with a passing health check.
|
||||
func (r V2ConsulRegistrator) HandleAliveMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, joinServer func(m serf.Member, parts *metadata.Server) error) error {
|
||||
valid, parts := metadata.IsConsulServer(member)
|
||||
if !valid {
|
||||
return nil
|
||||
}
|
||||
|
||||
if nodeEntMeta == nil {
|
||||
nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition()
|
||||
}
|
||||
|
||||
// Attempt to join the consul server, regardless of the existing catalog state
|
||||
if err := joinServer(member, parts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Logger.Info("member joined, creating catalog entries",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
)
|
||||
|
||||
workloadResource, err := r.createWorkloadFromMember(member, parts, nodeEntMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if the Workload already exists and if it's the same
|
||||
res, err := r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: workloadResource.Id})
|
||||
if err != nil && !grpcNotFoundErr(err) {
|
||||
return fmt.Errorf("error checking for existing Workload %s: %w", workloadResource.Id.Name, err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
existingWorkload := res.GetResource()
|
||||
|
||||
r.Logger.Debug("existing Workload matching the member found",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
)
|
||||
|
||||
// If the Workload is identical, move to updating the health status
|
||||
if cmp.Equal(workloadResource, existingWorkload, resourceCmpOptions...) {
|
||||
r.Logger.Debug("no updates to perform on member Workload",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
)
|
||||
goto HEALTHSTATUS
|
||||
}
|
||||
|
||||
// If the existing Workload different, add the existing Version into the patch for CAS write
|
||||
workloadResource.Id = existingWorkload.Id
|
||||
workloadResource.Version = existingWorkload.Version
|
||||
}
|
||||
|
||||
if _, err := r.Client.Write(context.TODO(), &pbresource.WriteRequest{Resource: workloadResource}); err != nil {
|
||||
return fmt.Errorf("failed to write Workload %s: %w", workloadResource.Id.Name, err)
|
||||
}
|
||||
|
||||
r.Logger.Info("updated consul Workload in catalog",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
)
|
||||
|
||||
HEALTHSTATUS:
|
||||
hsResource, err := r.createHealthStatusFromMember(member, workloadResource.Id, true, nodeEntMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if the HealthStatus already exists and if it's the same
|
||||
res, err = r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: hsResource.Id})
|
||||
if err != nil && !grpcNotFoundErr(err) {
|
||||
return fmt.Errorf("error checking for existing HealthStatus %s: %w", hsResource.Id.Name, err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
existingHS := res.GetResource()
|
||||
|
||||
r.Logger.Debug("existing HealthStatus matching the member found",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
)
|
||||
|
||||
// If the HealthStatus is identical, we're done.
|
||||
if cmp.Equal(hsResource, existingHS, resourceCmpOptions...) {
|
||||
r.Logger.Debug("no updates to perform on member HealthStatus",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the existing HealthStatus is different, add the Version to the patch for CAS write.
|
||||
hsResource.Id = existingHS.Id
|
||||
hsResource.Version = existingHS.Version
|
||||
}
|
||||
|
||||
if _, err := r.Client.Write(context.TODO(), &pbresource.WriteRequest{Resource: hsResource}); err != nil {
|
||||
return fmt.Errorf("failed to write HealthStatus %s: %w", hsResource.Id.Name, err)
|
||||
}
|
||||
r.Logger.Info("updated consul HealthStatus in catalog",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r V2ConsulRegistrator) createWorkloadFromMember(member serf.Member, parts *metadata.Server, nodeEntMeta *acl.EnterpriseMeta) (*pbresource.Resource, error) {
|
||||
workloadMeta := map[string]string{
|
||||
"read_replica": strconv.FormatBool(member.Tags["read_replica"] == "1"),
|
||||
"raft_version": strconv.Itoa(parts.RaftVersion),
|
||||
"serf_protocol_current": strconv.FormatUint(uint64(member.ProtocolCur), 10),
|
||||
"serf_protocol_min": strconv.FormatUint(uint64(member.ProtocolMin), 10),
|
||||
"serf_protocol_max": strconv.FormatUint(uint64(member.ProtocolMax), 10),
|
||||
"version": parts.Build.String(),
|
||||
}
|
||||
|
||||
if parts.ExternalGRPCPort > 0 {
|
||||
workloadMeta["grpc_port"] = strconv.Itoa(parts.ExternalGRPCPort)
|
||||
}
|
||||
if parts.ExternalGRPCTLSPort > 0 {
|
||||
workloadMeta["grpc_tls_port"] = strconv.Itoa(parts.ExternalGRPCTLSPort)
|
||||
}
|
||||
|
||||
if parts.Port < 0 || parts.Port > 65535 {
|
||||
return nil, fmt.Errorf("invalid port: %d", parts.Port)
|
||||
}
|
||||
|
||||
workload := &pbcatalog.Workload{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{
|
||||
{Host: member.Addr.String(), Ports: []string{consulPortNameServer}},
|
||||
},
|
||||
// Don't include identity since Consul is not routable through the mesh.
|
||||
// Don't include locality because these values are not passed along through serf, and they are probably
|
||||
// different from the leader's values.
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
consulPortNameServer: {
|
||||
Port: uint32(parts.Port),
|
||||
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
|
||||
},
|
||||
// TODO: add other agent ports
|
||||
},
|
||||
}
|
||||
|
||||
workloadData, err := anypb.New(workload)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not convert Workload to 'any' type: %w", err)
|
||||
}
|
||||
|
||||
workloadId := &pbresource.ID{
|
||||
Name: fmt.Sprintf("%s%s", consulWorkloadPrefix, types.NodeID(member.Tags["id"])),
|
||||
Type: pbcatalog.WorkloadType,
|
||||
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||
}
|
||||
workloadId.Tenancy.Partition = nodeEntMeta.PartitionOrDefault()
|
||||
|
||||
return &pbresource.Resource{
|
||||
Id: workloadId,
|
||||
Data: workloadData,
|
||||
Metadata: workloadMeta,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r V2ConsulRegistrator) createHealthStatusFromMember(member serf.Member, workloadId *pbresource.ID, passing bool, nodeEntMeta *acl.EnterpriseMeta) (*pbresource.Resource, error) {
|
||||
hs := &pbcatalog.HealthStatus{
|
||||
Type: string(structs.SerfCheckID),
|
||||
Description: structs.SerfCheckName,
|
||||
}
|
||||
|
||||
if passing {
|
||||
hs.Status = pbcatalog.Health_HEALTH_PASSING
|
||||
hs.Output = structs.SerfCheckAliveOutput
|
||||
} else {
|
||||
hs.Status = pbcatalog.Health_HEALTH_CRITICAL
|
||||
hs.Output = structs.SerfCheckFailedOutput
|
||||
}
|
||||
|
||||
hsData, err := anypb.New(hs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not convert HealthStatus to 'any' type: %w", err)
|
||||
}
|
||||
|
||||
hsId := &pbresource.ID{
|
||||
Name: fmt.Sprintf("%s%s", consulWorkloadPrefix, types.NodeID(member.Tags["id"])),
|
||||
Type: pbcatalog.HealthStatusType,
|
||||
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||
}
|
||||
hsId.Tenancy.Partition = nodeEntMeta.PartitionOrDefault()
|
||||
|
||||
return &pbresource.Resource{
|
||||
Id: hsId,
|
||||
Data: hsData,
|
||||
Owner: workloadId,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HandleFailedMember is used to mark the workload's associated HealthStatus.
|
||||
func (r V2ConsulRegistrator) HandleFailedMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta) error {
|
||||
if valid, _ := metadata.IsConsulServer(member); !valid {
|
||||
return nil
|
||||
}
|
||||
|
||||
if nodeEntMeta == nil {
|
||||
nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition()
|
||||
}
|
||||
|
||||
r.Logger.Info("member failed",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
)
|
||||
|
||||
// Validate that the associated workload exists
|
||||
workloadId := &pbresource.ID{
|
||||
Name: fmt.Sprintf("%s%s", consulWorkloadPrefix, types.NodeID(member.Tags["id"])),
|
||||
Type: pbcatalog.WorkloadType,
|
||||
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||
}
|
||||
workloadId.Tenancy.Partition = nodeEntMeta.PartitionOrDefault()
|
||||
|
||||
res, err := r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: workloadId})
|
||||
if err != nil && !grpcNotFoundErr(err) {
|
||||
return fmt.Errorf("error checking for existing Workload %s: %w", workloadId.Name, err)
|
||||
}
|
||||
if grpcNotFoundErr(err) {
|
||||
r.Logger.Info("ignoring failed event for member because it does not exist in the catalog",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
// Overwrite the workload ID with the one that has UID populated.
|
||||
existingWorkload := res.GetResource()
|
||||
|
||||
hsResource, err := r.createHealthStatusFromMember(member, existingWorkload.Id, false, nodeEntMeta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err = r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: hsResource.Id})
|
||||
if err != nil && !grpcNotFoundErr(err) {
|
||||
return fmt.Errorf("error checking for existing HealthStatus %s: %w", hsResource.Id.Name, err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
existingHS := res.GetResource()
|
||||
r.Logger.Debug("existing HealthStatus matching the member found",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
)
|
||||
|
||||
// If the HealthStatus is identical, we're done.
|
||||
if cmp.Equal(hsResource, existingHS, resourceCmpOptions...) {
|
||||
r.Logger.Debug("no updates to perform on member HealthStatus",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the existing HealthStatus is different, add the Version to the patch for CAS write.
|
||||
hsResource.Id = existingHS.Id
|
||||
hsResource.Version = existingHS.Version
|
||||
}
|
||||
|
||||
if _, err := r.Client.Write(context.TODO(), &pbresource.WriteRequest{Resource: hsResource}); err != nil {
|
||||
return fmt.Errorf("failed to write HealthStatus %s: %w", hsResource.Id.Name, err)
|
||||
}
|
||||
r.Logger.Info("updated consul HealthStatus in catalog",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleLeftMember is used to handle members that gracefully
|
||||
// left. They are removed if necessary.
|
||||
func (r V2ConsulRegistrator) HandleLeftMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, removeServerFunc func(m serf.Member) error) error {
|
||||
return r.handleDeregisterMember("left", member, nodeEntMeta, removeServerFunc)
|
||||
}
|
||||
|
||||
// HandleReapMember is used to handle members that have been
|
||||
// reaped after a prolonged failure. They are removed from the catalog.
|
||||
func (r V2ConsulRegistrator) HandleReapMember(member serf.Member, nodeEntMeta *acl.EnterpriseMeta, removeServerFunc func(m serf.Member) error) error {
|
||||
return r.handleDeregisterMember("reaped", member, nodeEntMeta, removeServerFunc)
|
||||
}
|
||||
|
||||
// handleDeregisterMember is used to remove a member of a given reason
|
||||
func (r V2ConsulRegistrator) handleDeregisterMember(reason string, member serf.Member, nodeEntMeta *acl.EnterpriseMeta, removeServerFunc func(m serf.Member) error) error {
|
||||
if valid, _ := metadata.IsConsulServer(member); !valid {
|
||||
return nil
|
||||
}
|
||||
|
||||
if nodeEntMeta == nil {
|
||||
nodeEntMeta = structs.NodeEnterpriseMetaInDefaultPartition()
|
||||
}
|
||||
|
||||
r.Logger.Info("removing member",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
"reason", reason,
|
||||
)
|
||||
|
||||
if err := removeServerFunc(member); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do not remove our self. This can only happen if the current leader
|
||||
// is leaving. Instead, we should allow a follower to take-over and
|
||||
// remove us later.
|
||||
if strings.EqualFold(member.Name, r.NodeName) &&
|
||||
strings.EqualFold(nodeEntMeta.PartitionOrDefault(), r.EntMeta.PartitionOrDefault()) {
|
||||
r.Logger.Warn("removing self should be done by follower",
|
||||
"name", r.NodeName,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
"reason", reason,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if the workload exists
|
||||
workloadID := &pbresource.ID{
|
||||
Name: fmt.Sprintf("%s%s", consulWorkloadPrefix, types.NodeID(member.Tags["id"])),
|
||||
Type: pbcatalog.WorkloadType,
|
||||
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||
}
|
||||
workloadID.Tenancy.Partition = nodeEntMeta.PartitionOrDefault()
|
||||
|
||||
res, err := r.Client.Read(context.TODO(), &pbresource.ReadRequest{Id: workloadID})
|
||||
if err != nil && !grpcNotFoundErr(err) {
|
||||
return fmt.Errorf("error checking for existing Workload %s: %w", workloadID.Name, err)
|
||||
}
|
||||
if grpcNotFoundErr(err) {
|
||||
r.Logger.Info("ignoring reap event for member because it does not exist in the catalog",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
existingWorkload := res.GetResource()
|
||||
|
||||
// The HealthStatus should be reaped automatically
|
||||
if _, err := r.Client.Delete(context.TODO(), &pbresource.DeleteRequest{Id: existingWorkload.Id}); err != nil {
|
||||
return fmt.Errorf("failed to delete Workload %s: %w", existingWorkload.Id.Name, err)
|
||||
}
|
||||
r.Logger.Info("deleted consul Workload",
|
||||
"member", member.Name,
|
||||
"partition", getSerfMemberEnterpriseMeta(member).PartitionOrDefault(),
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func grpcNotFoundErr(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
s, ok := status.FromError(err)
|
||||
return ok && s.Code() == codes.NotFound
|
||||
}
|
|
@ -1,583 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package consul
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/metadata"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
mockpbresource "github.com/hashicorp/consul/grpcmocks/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
var (
|
||||
fakeWrappedErr = fmt.Errorf("fake test error")
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
member serf.Member
|
||||
nodeNameOverride string // This is used in the HandleLeftMember test to avoid deregistering ourself
|
||||
|
||||
existingWorkload *pbresource.Resource
|
||||
workloadReadErr bool
|
||||
workloadWriteErr bool
|
||||
workloadDeleteErr bool
|
||||
|
||||
existingHealthStatus *pbresource.Resource
|
||||
healthstatusReadErr bool
|
||||
healthstatusWriteErr bool
|
||||
|
||||
mutatedWorkload *pbresource.Resource // leaving one of these out means the mock expects not to have a write/delete called
|
||||
mutatedHealthStatus *pbresource.Resource
|
||||
expErr string
|
||||
}
|
||||
|
||||
func Test_HandleAliveMember(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
run := func(t *testing.T, tt testCase) {
|
||||
client := mockpbresource.NewResourceServiceClient(t)
|
||||
mockClient := client.EXPECT()
|
||||
|
||||
// Build mock expectations based on the order of HandleAliveMember resource calls
|
||||
setupReadExpectation(t, mockClient, getTestWorkloadId(), tt.existingWorkload, tt.workloadReadErr)
|
||||
setupWriteExpectation(t, mockClient, tt.mutatedWorkload, tt.workloadWriteErr)
|
||||
if !tt.workloadReadErr && !tt.workloadWriteErr {
|
||||
// We expect to bail before this read if there is an error earlier in the function
|
||||
setupReadExpectation(t, mockClient, getTestHealthstatusId(), tt.existingHealthStatus, tt.healthstatusReadErr)
|
||||
}
|
||||
setupWriteExpectation(t, mockClient, tt.mutatedHealthStatus, tt.healthstatusWriteErr)
|
||||
|
||||
registrator := V2ConsulRegistrator{
|
||||
Logger: hclog.New(&hclog.LoggerOptions{}),
|
||||
NodeName: "test-server-1",
|
||||
Client: client,
|
||||
}
|
||||
|
||||
// Mock join function
|
||||
var joinMockCalled bool
|
||||
joinMock := func(_ serf.Member, _ *metadata.Server) error {
|
||||
joinMockCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
err := registrator.HandleAliveMember(tt.member, acl.DefaultEnterpriseMeta(), joinMock)
|
||||
if tt.expErr != "" {
|
||||
require.Contains(t, err.Error(), tt.expErr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.True(t, joinMockCalled, "the mock join function was not called")
|
||||
}
|
||||
|
||||
tests := []testCase{
|
||||
{
|
||||
name: "New alive member",
|
||||
member: getTestSerfMember(serf.StatusAlive),
|
||||
mutatedWorkload: getTestWorkload(t),
|
||||
mutatedHealthStatus: getTestHealthStatus(t, true),
|
||||
},
|
||||
{
|
||||
name: "No updates needed",
|
||||
member: getTestSerfMember(serf.StatusAlive),
|
||||
existingWorkload: getTestWorkload(t),
|
||||
existingHealthStatus: getTestHealthStatus(t, true),
|
||||
},
|
||||
{
|
||||
name: "Existing Workload and HS need to be updated",
|
||||
member: getTestSerfMember(serf.StatusAlive),
|
||||
existingWorkload: getTestWorkloadWithPort(t, 8301),
|
||||
existingHealthStatus: getTestHealthStatus(t, false),
|
||||
mutatedWorkload: getTestWorkload(t),
|
||||
mutatedHealthStatus: getTestHealthStatus(t, true),
|
||||
},
|
||||
{
|
||||
name: "Only the HS needs to be updated",
|
||||
member: getTestSerfMember(serf.StatusAlive),
|
||||
existingWorkload: getTestWorkload(t),
|
||||
existingHealthStatus: getTestHealthStatus(t, false),
|
||||
mutatedHealthStatus: getTestHealthStatus(t, true),
|
||||
},
|
||||
{
|
||||
name: "Error reading Workload",
|
||||
member: getTestSerfMember(serf.StatusAlive),
|
||||
workloadReadErr: true,
|
||||
expErr: "error checking for existing Workload",
|
||||
},
|
||||
{
|
||||
name: "Error writing Workload",
|
||||
member: getTestSerfMember(serf.StatusAlive),
|
||||
workloadWriteErr: true,
|
||||
mutatedWorkload: getTestWorkload(t),
|
||||
expErr: "failed to write Workload",
|
||||
},
|
||||
{
|
||||
name: "Error reading HealthStatus",
|
||||
member: getTestSerfMember(serf.StatusAlive),
|
||||
healthstatusReadErr: true,
|
||||
mutatedWorkload: getTestWorkload(t),
|
||||
expErr: "error checking for existing HealthStatus",
|
||||
},
|
||||
{
|
||||
name: "Error writing HealthStatus",
|
||||
member: getTestSerfMember(serf.StatusAlive),
|
||||
healthstatusWriteErr: true,
|
||||
mutatedWorkload: getTestWorkload(t),
|
||||
mutatedHealthStatus: getTestHealthStatus(t, true),
|
||||
expErr: "failed to write HealthStatus",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
run(t, tt)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_HandleFailedMember(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
run := func(t *testing.T, tt testCase) {
|
||||
client := mockpbresource.NewResourceServiceClient(t)
|
||||
mockClient := client.EXPECT()
|
||||
|
||||
// Build mock expectations based on the order of HandleFailed resource calls
|
||||
setupReadExpectation(t, mockClient, getTestWorkloadId(), tt.existingWorkload, tt.workloadReadErr)
|
||||
if !tt.workloadReadErr && tt.existingWorkload != nil {
|
||||
// We expect to bail before this read if there is an error earlier in the function or there is no workload
|
||||
setupReadExpectation(t, mockClient, getTestHealthstatusId(), tt.existingHealthStatus, tt.healthstatusReadErr)
|
||||
}
|
||||
setupWriteExpectation(t, mockClient, tt.mutatedHealthStatus, tt.healthstatusWriteErr)
|
||||
|
||||
registrator := V2ConsulRegistrator{
|
||||
Logger: hclog.New(&hclog.LoggerOptions{}),
|
||||
NodeName: "test-server-1",
|
||||
Client: client,
|
||||
}
|
||||
|
||||
err := registrator.HandleFailedMember(tt.member, acl.DefaultEnterpriseMeta())
|
||||
if tt.expErr != "" {
|
||||
require.Contains(t, err.Error(), tt.expErr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
tests := []testCase{
|
||||
{
|
||||
name: "Update non-existent HealthStatus",
|
||||
member: getTestSerfMember(serf.StatusFailed),
|
||||
existingWorkload: getTestWorkload(t),
|
||||
mutatedHealthStatus: getTestHealthStatus(t, false),
|
||||
},
|
||||
{
|
||||
name: "Underlying Workload does not exist",
|
||||
member: getTestSerfMember(serf.StatusFailed),
|
||||
},
|
||||
{
|
||||
name: "Update an existing HealthStatus",
|
||||
member: getTestSerfMember(serf.StatusFailed),
|
||||
existingWorkload: getTestWorkload(t),
|
||||
existingHealthStatus: getTestHealthStatus(t, true),
|
||||
mutatedHealthStatus: getTestHealthStatus(t, false),
|
||||
},
|
||||
{
|
||||
name: "HealthStatus is already critical - no updates needed",
|
||||
member: getTestSerfMember(serf.StatusFailed),
|
||||
existingWorkload: getTestWorkload(t),
|
||||
existingHealthStatus: getTestHealthStatus(t, false),
|
||||
},
|
||||
{
|
||||
name: "Error reading Workload",
|
||||
member: getTestSerfMember(serf.StatusFailed),
|
||||
workloadReadErr: true,
|
||||
expErr: "error checking for existing Workload",
|
||||
},
|
||||
{
|
||||
name: "Error reading HealthStatus",
|
||||
member: getTestSerfMember(serf.StatusFailed),
|
||||
existingWorkload: getTestWorkload(t),
|
||||
healthstatusReadErr: true,
|
||||
expErr: "error checking for existing HealthStatus",
|
||||
},
|
||||
{
|
||||
name: "Error writing HealthStatus",
|
||||
member: getTestSerfMember(serf.StatusFailed),
|
||||
existingWorkload: getTestWorkload(t),
|
||||
healthstatusWriteErr: true,
|
||||
mutatedHealthStatus: getTestHealthStatus(t, false),
|
||||
expErr: "failed to write HealthStatus",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
run(t, tt)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test_HandleLeftMember also tests HandleReapMembers, which are the same core logic with some different logs.
|
||||
func Test_HandleLeftMember(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
run := func(t *testing.T, tt testCase) {
|
||||
client := mockpbresource.NewResourceServiceClient(t)
|
||||
mockClient := client.EXPECT()
|
||||
|
||||
// Build mock expectations based on the order of HandleLeftMember resource calls
|
||||
// We check for the override, which we use to skip self de-registration
|
||||
if tt.nodeNameOverride == "" {
|
||||
setupReadExpectation(t, mockClient, getTestWorkloadId(), tt.existingWorkload, tt.workloadReadErr)
|
||||
if tt.existingWorkload != nil && !tt.workloadReadErr {
|
||||
setupDeleteExpectation(t, mockClient, tt.mutatedWorkload, tt.workloadDeleteErr)
|
||||
}
|
||||
}
|
||||
|
||||
nodeName := "test-server-2" // This is not the same as the serf node so we don't dergister ourself.
|
||||
if tt.nodeNameOverride != "" {
|
||||
nodeName = tt.nodeNameOverride
|
||||
}
|
||||
|
||||
registrator := V2ConsulRegistrator{
|
||||
Logger: hclog.New(&hclog.LoggerOptions{}),
|
||||
NodeName: nodeName, // We change this so that we don't deregister ourself
|
||||
Client: client,
|
||||
}
|
||||
|
||||
// Mock join function
|
||||
var removeMockCalled bool
|
||||
removeMock := func(_ serf.Member) error {
|
||||
removeMockCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
err := registrator.HandleLeftMember(tt.member, acl.DefaultEnterpriseMeta(), removeMock)
|
||||
if tt.expErr != "" {
|
||||
require.Contains(t, err.Error(), tt.expErr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.True(t, removeMockCalled, "the mock remove function was not called")
|
||||
}
|
||||
|
||||
tests := []testCase{
|
||||
{
|
||||
name: "Remove member",
|
||||
member: getTestSerfMember(serf.StatusAlive),
|
||||
existingWorkload: getTestWorkload(t),
|
||||
mutatedWorkload: getTestWorkload(t),
|
||||
},
|
||||
{
|
||||
name: "Don't deregister ourself",
|
||||
member: getTestSerfMember(serf.StatusAlive),
|
||||
nodeNameOverride: "test-server-1",
|
||||
},
|
||||
{
|
||||
name: "Don't do anything if the Workload is already gone",
|
||||
member: getTestSerfMember(serf.StatusAlive),
|
||||
},
|
||||
{
|
||||
name: "Remove member regardless of Workload payload",
|
||||
member: getTestSerfMember(serf.StatusAlive),
|
||||
existingWorkload: getTestWorkloadWithPort(t, 8301),
|
||||
mutatedWorkload: getTestWorkload(t),
|
||||
},
|
||||
{
|
||||
name: "Error reading Workload",
|
||||
member: getTestSerfMember(serf.StatusAlive),
|
||||
workloadReadErr: true,
|
||||
expErr: "error checking for existing Workload",
|
||||
},
|
||||
{
|
||||
name: "Error deleting Workload",
|
||||
member: getTestSerfMember(serf.StatusAlive),
|
||||
workloadDeleteErr: true,
|
||||
existingWorkload: getTestWorkloadWithPort(t, 8301),
|
||||
mutatedWorkload: getTestWorkload(t),
|
||||
expErr: "failed to delete Workload",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
run(t, tt)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func setupReadExpectation(
|
||||
t *testing.T,
|
||||
mockClient *mockpbresource.ResourceServiceClient_Expecter,
|
||||
expectedId *pbresource.ID,
|
||||
existingResource *pbresource.Resource,
|
||||
sendErr bool) {
|
||||
|
||||
if sendErr {
|
||||
mockClient.Read(mock.Anything, mock.Anything).
|
||||
Return(nil, fakeWrappedErr).
|
||||
Once().
|
||||
Run(func(args mock.Arguments) {
|
||||
req := args.Get(1).(*pbresource.ReadRequest)
|
||||
require.True(t, proto.Equal(expectedId, req.Id))
|
||||
})
|
||||
} else if existingResource != nil {
|
||||
mockClient.Read(mock.Anything, mock.Anything).
|
||||
Return(&pbresource.ReadResponse{
|
||||
Resource: existingResource,
|
||||
}, nil).
|
||||
Once().
|
||||
Run(func(args mock.Arguments) {
|
||||
req := args.Get(1).(*pbresource.ReadRequest)
|
||||
require.True(t, proto.Equal(expectedId, req.Id))
|
||||
})
|
||||
} else {
|
||||
mockClient.Read(mock.Anything, mock.Anything).
|
||||
Return(nil, status.Error(codes.NotFound, "not found")).
|
||||
Once().
|
||||
Run(func(args mock.Arguments) {
|
||||
req := args.Get(1).(*pbresource.ReadRequest)
|
||||
require.True(t, proto.Equal(expectedId, req.Id))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func setupWriteExpectation(
|
||||
t *testing.T,
|
||||
mockClient *mockpbresource.ResourceServiceClient_Expecter,
|
||||
expectedResource *pbresource.Resource,
|
||||
sendErr bool) {
|
||||
|
||||
// If there is no expected resource, we take that to mean we don't expect any client writes.
|
||||
if expectedResource == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if sendErr {
|
||||
mockClient.Write(mock.Anything, mock.Anything).
|
||||
Return(nil, fakeWrappedErr).
|
||||
Once().
|
||||
Run(func(args mock.Arguments) {
|
||||
req := args.Get(1).(*pbresource.WriteRequest)
|
||||
require.True(t, proto.Equal(expectedResource, req.Resource))
|
||||
})
|
||||
} else {
|
||||
mockClient.Write(mock.Anything, mock.Anything).
|
||||
Return(nil, nil).
|
||||
Once().
|
||||
Run(func(args mock.Arguments) {
|
||||
req := args.Get(1).(*pbresource.WriteRequest)
|
||||
require.True(t, proto.Equal(expectedResource, req.Resource))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func setupDeleteExpectation(
|
||||
t *testing.T,
|
||||
mockClient *mockpbresource.ResourceServiceClient_Expecter,
|
||||
expectedResource *pbresource.Resource,
|
||||
sendErr bool) {
|
||||
|
||||
expectedId := expectedResource.GetId()
|
||||
|
||||
if sendErr {
|
||||
mockClient.Delete(mock.Anything, mock.Anything).
|
||||
Return(nil, fakeWrappedErr).
|
||||
Once().
|
||||
Run(func(args mock.Arguments) {
|
||||
req := args.Get(1).(*pbresource.DeleteRequest)
|
||||
require.True(t, proto.Equal(expectedId, req.Id))
|
||||
})
|
||||
} else {
|
||||
mockClient.Delete(mock.Anything, mock.Anything).
|
||||
Return(nil, nil).
|
||||
Once().
|
||||
Run(func(args mock.Arguments) {
|
||||
req := args.Get(1).(*pbresource.DeleteRequest)
|
||||
require.True(t, proto.Equal(expectedId, req.Id))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getTestWorkload(t *testing.T) *pbresource.Resource {
|
||||
return getTestWorkloadWithPort(t, 8300)
|
||||
}
|
||||
|
||||
func getTestWorkloadWithPort(t *testing.T, port int) *pbresource.Resource {
|
||||
workload := &pbcatalog.Workload{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{
|
||||
{Host: "127.0.0.1", Ports: []string{consulPortNameServer}},
|
||||
},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
consulPortNameServer: {
|
||||
Port: uint32(port),
|
||||
Protocol: pbcatalog.Protocol_PROTOCOL_TCP,
|
||||
},
|
||||
},
|
||||
}
|
||||
data, err := anypb.New(workload)
|
||||
require.NoError(t, err)
|
||||
|
||||
return &pbresource.Resource{
|
||||
Id: getTestWorkloadId(),
|
||||
Data: data,
|
||||
Metadata: map[string]string{
|
||||
"read_replica": "false",
|
||||
"raft_version": "3",
|
||||
"serf_protocol_current": "2",
|
||||
"serf_protocol_min": "1",
|
||||
"serf_protocol_max": "5",
|
||||
"version": "1.18.0",
|
||||
"grpc_port": "8502",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getTestWorkloadId() *pbresource.ID {
|
||||
return &pbresource.ID{
|
||||
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||
Type: pbcatalog.WorkloadType,
|
||||
Name: "consul-server-72af047d-1857-2493-969e-53614a70b25a",
|
||||
}
|
||||
}
|
||||
|
||||
func getTestHealthStatus(t *testing.T, passing bool) *pbresource.Resource {
|
||||
healthStatus := &pbcatalog.HealthStatus{
|
||||
Type: string(structs.SerfCheckID),
|
||||
Description: structs.SerfCheckName,
|
||||
}
|
||||
|
||||
if passing {
|
||||
healthStatus.Status = pbcatalog.Health_HEALTH_PASSING
|
||||
healthStatus.Output = structs.SerfCheckAliveOutput
|
||||
} else {
|
||||
healthStatus.Status = pbcatalog.Health_HEALTH_CRITICAL
|
||||
healthStatus.Output = structs.SerfCheckFailedOutput
|
||||
}
|
||||
|
||||
data, err := anypb.New(healthStatus)
|
||||
require.NoError(t, err)
|
||||
|
||||
return &pbresource.Resource{
|
||||
Id: getTestHealthstatusId(),
|
||||
Data: data,
|
||||
Owner: getTestWorkloadId(),
|
||||
}
|
||||
}
|
||||
|
||||
func getTestHealthstatusId() *pbresource.ID {
|
||||
return &pbresource.ID{
|
||||
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||
Type: pbcatalog.HealthStatusType,
|
||||
Name: "consul-server-72af047d-1857-2493-969e-53614a70b25a",
|
||||
}
|
||||
}
|
||||
|
||||
func getTestSerfMember(status serf.MemberStatus) serf.Member {
|
||||
return serf.Member{
|
||||
Name: "test-server-1",
|
||||
Addr: net.ParseIP("127.0.0.1"),
|
||||
Port: 8300,
|
||||
// representative tags from a local dev deployment of ENT
|
||||
Tags: map[string]string{
|
||||
"vsn_min": "2",
|
||||
"vsn": "2",
|
||||
"acls": "1",
|
||||
"ft_si": "1",
|
||||
"raft_vsn": "3",
|
||||
"grpc_port": "8502",
|
||||
"wan_join_port": "8500",
|
||||
"dc": "dc1",
|
||||
"segment": "",
|
||||
"id": "72af047d-1857-2493-969e-53614a70b25a",
|
||||
"ft_admpart": "1",
|
||||
"role": "consul",
|
||||
"build": "1.18.0",
|
||||
"ft_ns": "1",
|
||||
"vsn_max": "3",
|
||||
"bootstrap": "1",
|
||||
"expect": "1",
|
||||
"port": "8300",
|
||||
},
|
||||
Status: status,
|
||||
ProtocolMin: 1,
|
||||
ProtocolMax: 5,
|
||||
ProtocolCur: 2,
|
||||
DelegateMin: 2,
|
||||
DelegateMax: 5,
|
||||
DelegateCur: 4,
|
||||
}
|
||||
}
|
||||
|
||||
// Test_ResourceCmpOptions_GeneratedFieldInsensitive makes sure are protocmp options are working as expected.
|
||||
func Test_ResourceCmpOptions_GeneratedFieldInsensitive(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
res1 := getTestWorkload(t)
|
||||
res2 := getTestWorkload(t)
|
||||
|
||||
// Modify the generated fields
|
||||
res2.Id.Uid = "123456"
|
||||
res2.Version = "789"
|
||||
res2.Generation = "millenial"
|
||||
res2.Status = map[string]*pbresource.Status{
|
||||
"foo": {ObservedGeneration: "124"},
|
||||
}
|
||||
|
||||
require.True(t, cmp.Equal(res1, res2, resourceCmpOptions...))
|
||||
|
||||
res1.Metadata["foo"] = "bar"
|
||||
|
||||
require.False(t, cmp.Equal(res1, res2, resourceCmpOptions...))
|
||||
}
|
||||
|
||||
// Test gRPC Error Codes Conditions
|
||||
func Test_grpcNotFoundErr(t *testing.T) {
|
||||
t.Parallel()
|
||||
tests := []struct {
|
||||
name string
|
||||
err error
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "Nil Error",
|
||||
},
|
||||
{
|
||||
name: "Nonsense Error",
|
||||
err: fmt.Errorf("boooooo!"),
|
||||
},
|
||||
{
|
||||
name: "gRPC Permission Denied Error",
|
||||
err: status.Error(codes.PermissionDenied, "permission denied is not NotFound"),
|
||||
},
|
||||
{
|
||||
name: "gRPC NotFound Error",
|
||||
err: status.Error(codes.NotFound, "bingo: not found"),
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
require.Equal(t, tt.expected, grpcNotFoundErr(tt.err))
|
||||
})
|
||||
}
|
||||
}
|
|
@ -14,84 +14,23 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
msgpackrpc "github.com/hashicorp/consul-net-rpc/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/serf/serf"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/leafcert"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
tokenStore "github.com/hashicorp/consul/agent/token"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/consul/sdk/testutil/retry"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
func enableV2(t *testing.T) func(deps *Deps) {
|
||||
return func(deps *Deps) {
|
||||
deps.Experiments = []string{"resource-apis"}
|
||||
m, _ := leafcert.NewTestManager(t, nil)
|
||||
deps.LeafCertManager = m
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Consul service is created in V2.
|
||||
// In V1, the service is implicitly created - this is covered in leader_registrator_v1_test.go
|
||||
func Test_InitConsulService(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
dir, s := testServerWithDepsAndConfig(t, enableV2(t),
|
||||
func(c *Config) {
|
||||
c.PrimaryDatacenter = "dc1"
|
||||
c.ACLsEnabled = true
|
||||
c.ACLInitialManagementToken = "root"
|
||||
c.ACLResolverSettings.ACLDefaultPolicy = "deny"
|
||||
})
|
||||
defer os.RemoveAll(dir)
|
||||
defer s.Shutdown()
|
||||
|
||||
testrpc.WaitForRaftLeader(t, s.RPC, "dc1", testrpc.WithToken("root"))
|
||||
|
||||
client := pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan)
|
||||
|
||||
consulServiceID := &pbresource.ID{
|
||||
Name: structs.ConsulServiceName,
|
||||
Type: pbcatalog.ServiceType,
|
||||
Tenancy: resource.DefaultNamespacedTenancy(),
|
||||
}
|
||||
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
res, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: consulServiceID})
|
||||
if err != nil {
|
||||
r.Fatalf("err: %v", err)
|
||||
}
|
||||
data := res.GetResource().GetData()
|
||||
require.NotNil(r, data)
|
||||
|
||||
var service pbcatalog.Service
|
||||
err = data.UnmarshalTo(&service)
|
||||
require.NoError(r, err)
|
||||
|
||||
// Spot check the Service
|
||||
require.Equal(r, service.GetWorkloads().GetPrefixes(), []string{consulWorkloadPrefix})
|
||||
require.GreaterOrEqual(r, len(service.GetPorts()), 1)
|
||||
|
||||
//Since we're not running a full agent w/ serf, we can't check for valid endpoints
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeader_TombstoneGC_Reset(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
@ -834,7 +773,7 @@ func TestLeader_ConfigEntryBootstrap_Fail(t *testing.T) {
|
|||
deps := newDefaultDeps(t, config)
|
||||
deps.Logger = logger
|
||||
|
||||
srv, err := NewServer(config, deps, grpc.NewServer(), nil, logger, nil)
|
||||
srv, err := NewServer(config, deps, grpc.NewServer(), nil, logger)
|
||||
require.NoError(t, err)
|
||||
defer srv.Shutdown()
|
||||
|
||||
|
|
|
@ -6,8 +6,6 @@ package consul
|
|||
import (
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
|
||||
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
|
@ -50,33 +48,6 @@ type Deps struct {
|
|||
EnterpriseDeps
|
||||
}
|
||||
|
||||
// UseV2Resources returns true if "resource-apis" is present in the Experiments
|
||||
// array of the agent config.
|
||||
func (d Deps) UseV2Resources() bool {
|
||||
if stringslice.Contains(d.Experiments, CatalogResourceExperimentName) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// UseV2Tenancy returns true if "v2tenancy" is present in the Experiments
|
||||
// array of the agent config.
|
||||
func (d Deps) UseV2Tenancy() bool {
|
||||
if stringslice.Contains(d.Experiments, V2TenancyExperimentName) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HCPAllowV2Resources returns true if "hcp-v2-resource-apis" is present in the Experiments
|
||||
// array of the agent config.
|
||||
func (d Deps) HCPAllowV2Resources() bool {
|
||||
if stringslice.Contains(d.Experiments, HCPAllowV2ResourceAPIs) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type GRPCClientConner interface {
|
||||
ClientConn(datacenter string) (*grpc.ClientConn, error)
|
||||
ClientConnLeader() (*grpc.ClientConn, error)
|
||||
|
|
|
@ -63,25 +63,18 @@ import (
|
|||
"github.com/hashicorp/consul/agent/rpc/peering"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/agent/token"
|
||||
"github.com/hashicorp/consul/internal/auth"
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/controller"
|
||||
"github.com/hashicorp/consul/internal/gossip/librtt"
|
||||
hcpctl "github.com/hashicorp/consul/internal/hcp"
|
||||
"github.com/hashicorp/consul/internal/mesh"
|
||||
proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot"
|
||||
"github.com/hashicorp/consul/internal/multicluster"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/resource/demo"
|
||||
"github.com/hashicorp/consul/internal/resource/reaper"
|
||||
"github.com/hashicorp/consul/internal/storage"
|
||||
raftstorage "github.com/hashicorp/consul/internal/storage/raft"
|
||||
"github.com/hashicorp/consul/internal/tenancy"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/lib/routine"
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
"github.com/hashicorp/consul/logging"
|
||||
"github.com/hashicorp/consul/proto-public/pbmesh/v2beta1/pbproxystate"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/tlsutil"
|
||||
"github.com/hashicorp/consul/types"
|
||||
|
@ -131,25 +124,9 @@ const (
|
|||
// and wait for a periodic reconcile.
|
||||
reconcileChSize = 256
|
||||
|
||||
LeaderTransferMinVersion = "1.6.0"
|
||||
CatalogResourceExperimentName = "resource-apis"
|
||||
V2TenancyExperimentName = "v2tenancy"
|
||||
HCPAllowV2ResourceAPIs = "hcp-v2-resource-apis"
|
||||
LeaderTransferMinVersion = "1.6.0"
|
||||
)
|
||||
|
||||
// IsExperimentAllowedOnSecondaries returns true if an experiment is currently
|
||||
// disallowed for wan federated secondary datacenters.
|
||||
//
|
||||
// Likely these will all be short lived exclusions.
|
||||
func IsExperimentAllowedOnSecondaries(name string) bool {
|
||||
switch name {
|
||||
case CatalogResourceExperimentName, V2TenancyExperimentName:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
aclPolicyReplicationRoutineName = "ACL policy replication"
|
||||
aclRoleReplicationRoutineName = "ACL role replication"
|
||||
|
@ -474,15 +451,6 @@ type Server struct {
|
|||
reportingManager *reporting.ReportingManager
|
||||
|
||||
registry resource.Registry
|
||||
|
||||
useV2Resources bool
|
||||
|
||||
// useV2Tenancy is tied to the "v2tenancy" feature flag.
|
||||
useV2Tenancy bool
|
||||
|
||||
// whether v2 resources are enabled for use with HCP
|
||||
// TODO(CC-6389): Remove once resource-apis is no longer considered experimental and is supported by HCP
|
||||
hcpAllowV2Resources bool
|
||||
}
|
||||
|
||||
func (s *Server) DecrementBlockingQueries() uint64 {
|
||||
|
@ -504,22 +472,10 @@ type connHandler interface {
|
|||
Shutdown() error
|
||||
}
|
||||
|
||||
// ProxyUpdater is an interface for ProxyTracker.
|
||||
type ProxyUpdater interface {
|
||||
// PushChange allows pushing a computed ProxyState to xds for xds resource generation to send to a proxy.
|
||||
PushChange(id *pbresource.ID, snapshot proxysnapshot.ProxySnapshot) error
|
||||
|
||||
// ProxyConnectedToServer returns whether this id is connected to this server. If it is connected, it also returns
|
||||
// the token as the first argument.
|
||||
ProxyConnectedToServer(id *pbresource.ID) (string, bool)
|
||||
|
||||
EventChannel() chan controller.Event
|
||||
}
|
||||
|
||||
// NewServer is used to construct a new Consul server from the configuration
|
||||
// and extra options, potentially returning an error.
|
||||
func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server,
|
||||
incomingRPCLimiter rpcRate.RequestLimitsHandler, serverLogger hclog.InterceptLogger, proxyUpdater ProxyUpdater) (*Server, error) {
|
||||
incomingRPCLimiter rpcRate.RequestLimitsHandler, serverLogger hclog.InterceptLogger) (*Server, error) {
|
||||
logger := flat.Logger
|
||||
if err := config.CheckProtocolVersion(); err != nil {
|
||||
return nil, err
|
||||
|
@ -572,9 +528,6 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server,
|
|||
incomingRPCLimiter: incomingRPCLimiter,
|
||||
routineManager: routine.NewManager(logger.Named(logging.ConsulServer)),
|
||||
registry: flat.Registry,
|
||||
useV2Resources: flat.UseV2Resources(),
|
||||
useV2Tenancy: flat.UseV2Tenancy(),
|
||||
hcpAllowV2Resources: flat.HCPAllowV2Resources(),
|
||||
}
|
||||
incomingRPCLimiter.Register(s)
|
||||
|
||||
|
@ -636,15 +589,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server,
|
|||
|
||||
rpcServerOpts := []func(*rpc.Server){
|
||||
rpc.WithPreBodyInterceptor(
|
||||
middleware.ChainedRPCPreBodyInterceptor(
|
||||
func(reqServiceMethod string, sourceAddr net.Addr) error {
|
||||
if s.useV2Resources && isV1CatalogRequest(reqServiceMethod) {
|
||||
return structs.ErrUsingV2CatalogExperiment
|
||||
}
|
||||
return nil
|
||||
},
|
||||
middleware.GetNetRPCRateLimitingInterceptor(s.incomingRPCLimiter, middleware.NewPanicHandler(s.logger)),
|
||||
),
|
||||
middleware.GetNetRPCRateLimitingInterceptor(s.incomingRPCLimiter, middleware.NewPanicHandler(s.logger)),
|
||||
),
|
||||
}
|
||||
|
||||
|
@ -747,7 +692,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server,
|
|||
}
|
||||
|
||||
// Initialize the Raft server.
|
||||
if err := s.setupRaft(stringslice.Contains(flat.Experiments, CatalogResourceExperimentName)); err != nil {
|
||||
if err := s.setupRaft(); err != nil {
|
||||
s.Shutdown()
|
||||
return nil, fmt.Errorf("Failed to start Raft: %v", err)
|
||||
}
|
||||
|
@ -925,7 +870,7 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server,
|
|||
pbresource.NewResourceServiceClient(s.insecureUnsafeGRPCChan),
|
||||
s.loggers.Named(logging.ControllerRuntime),
|
||||
)
|
||||
if err := s.registerControllers(flat, proxyUpdater); err != nil {
|
||||
if err := s.registerControllers(flat); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go s.controllerManager.Run(&lib.StopChannelContext{StopCh: shutdownCh})
|
||||
|
@ -943,22 +888,12 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server,
|
|||
// as establishing leadership could attempt to use autopilot and cause a panic.
|
||||
s.initAutopilot(config)
|
||||
|
||||
// Construct the registrator that makes sense for the catalog version
|
||||
if s.useV2Resources {
|
||||
s.registrator = V2ConsulRegistrator{
|
||||
Logger: serverLogger,
|
||||
NodeName: s.config.NodeName,
|
||||
EntMeta: s.config.AgentEnterpriseMeta(),
|
||||
Client: pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan),
|
||||
}
|
||||
} else {
|
||||
s.registrator = V1ConsulRegistrator{
|
||||
Datacenter: s.config.Datacenter,
|
||||
FSM: s.fsm,
|
||||
Logger: serverLogger,
|
||||
NodeName: s.config.NodeName,
|
||||
RaftApplyFunc: s.raftApplyMsgpack,
|
||||
}
|
||||
s.registrator = V1ConsulRegistrator{
|
||||
Datacenter: s.config.Datacenter,
|
||||
FSM: s.fsm,
|
||||
Logger: serverLogger,
|
||||
NodeName: s.config.NodeName,
|
||||
RaftApplyFunc: s.raftApplyMsgpack,
|
||||
}
|
||||
|
||||
// Start monitoring leadership. This must happen after Serf is set up
|
||||
|
@ -993,86 +928,17 @@ func NewServer(config *Config, flat Deps, externalGRPCServer *grpc.Server,
|
|||
return s, nil
|
||||
}
|
||||
|
||||
func isV1CatalogRequest(rpcName string) bool {
|
||||
switch {
|
||||
case strings.HasPrefix(rpcName, "Catalog."),
|
||||
strings.HasPrefix(rpcName, "Health."),
|
||||
strings.HasPrefix(rpcName, "ConfigEntry."):
|
||||
return true
|
||||
}
|
||||
|
||||
switch rpcName {
|
||||
case "Internal.EventFire", "Internal.KeyringOperation", "Internal.OIDCAuthMethods":
|
||||
return false
|
||||
default:
|
||||
if strings.HasPrefix(rpcName, "Internal.") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) registerControllers(deps Deps, proxyUpdater ProxyUpdater) error {
|
||||
func (s *Server) registerControllers(deps Deps) error {
|
||||
if s.config.Cloud.IsConfigured() {
|
||||
hcpctl.RegisterControllers(
|
||||
s.controllerManager, hcpctl.ControllerDependencies{
|
||||
ResourceApisEnabled: s.useV2Resources,
|
||||
HCPAllowV2ResourceApis: s.hcpAllowV2Resources,
|
||||
CloudConfig: deps.HCP.Config,
|
||||
CloudConfig: deps.HCP.Config,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// When not enabled, the v1 tenancy bridge is used by default.
|
||||
if s.useV2Tenancy {
|
||||
tenancy.RegisterControllers(
|
||||
s.controllerManager,
|
||||
tenancy.Dependencies{Registry: deps.Registry},
|
||||
)
|
||||
}
|
||||
|
||||
if s.useV2Resources {
|
||||
catalog.RegisterControllers(s.controllerManager)
|
||||
defaultAllow, err := s.config.ACLResolverSettings.IsDefaultAllow()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mesh.RegisterControllers(s.controllerManager, mesh.ControllerDependencies{
|
||||
TrustBundleFetcher: func() (*pbproxystate.TrustBundle, error) {
|
||||
var bundle pbproxystate.TrustBundle
|
||||
roots, err := s.getCARoots(nil, s.GetState())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bundle.TrustDomain = roots.TrustDomain
|
||||
for _, root := range roots.Roots {
|
||||
bundle.Roots = append(bundle.Roots, root.RootCert)
|
||||
}
|
||||
return &bundle, nil
|
||||
},
|
||||
// This function is adapted from server_connect.go:getCARoots.
|
||||
TrustDomainFetcher: func() (string, error) {
|
||||
_, caConfig, err := s.fsm.State().CAConfig(nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return s.getTrustDomain(caConfig)
|
||||
},
|
||||
|
||||
LeafCertManager: deps.LeafCertManager,
|
||||
LocalDatacenter: s.config.Datacenter,
|
||||
DefaultAllow: defaultAllow,
|
||||
ProxyUpdater: proxyUpdater,
|
||||
})
|
||||
|
||||
auth.RegisterControllers(s.controllerManager, auth.DefaultControllerDependencies())
|
||||
multicluster.RegisterControllers(s.controllerManager)
|
||||
} else {
|
||||
shim := NewExportedServicesShim(s)
|
||||
multicluster.RegisterCompatControllers(s.controllerManager, multicluster.DefaultCompatControllerDependencies(shim))
|
||||
}
|
||||
shim := NewExportedServicesShim(s)
|
||||
multicluster.RegisterCompatControllers(s.controllerManager, multicluster.DefaultCompatControllerDependencies(shim))
|
||||
|
||||
reaper.RegisterControllers(s.controllerManager)
|
||||
|
||||
|
@ -1109,7 +975,7 @@ func (s *Server) connectCARootsMonitor(ctx context.Context) {
|
|||
}
|
||||
|
||||
// setupRaft is used to setup and initialize Raft
|
||||
func (s *Server) setupRaft(isCatalogResourceExperiment bool) error {
|
||||
func (s *Server) setupRaft() error {
|
||||
// If we have an unclean exit then attempt to close the Raft store.
|
||||
defer func() {
|
||||
if s.raft == nil && s.raftStore != nil {
|
||||
|
@ -1190,7 +1056,7 @@ func (s *Server) setupRaft(isCatalogResourceExperiment bool) error {
|
|||
return nil
|
||||
}
|
||||
// Only use WAL if there is no existing raft.db, even if it's enabled.
|
||||
if s.config.LogStoreConfig.Backend == LogStoreBackendDefault && !boltFileExists && isCatalogResourceExperiment {
|
||||
if s.config.LogStoreConfig.Backend == LogStoreBackendDefault && !boltFileExists {
|
||||
s.config.LogStoreConfig.Backend = LogStoreBackendWAL
|
||||
if !s.config.LogStoreConfig.Verification.Enabled {
|
||||
s.config.LogStoreConfig.Verification.Enabled = true
|
||||
|
|
|
@ -205,6 +205,5 @@ func (s *Server) newResourceServiceConfig(typeRegistry resource.Registry, resolv
|
|||
ACLResolver: resolver,
|
||||
Logger: s.loggers.Named(logging.GRPCAPI).Named(logging.Resource),
|
||||
TenancyBridge: tenancyBridge,
|
||||
UseV2Tenancy: s.useV2Tenancy,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,8 +29,6 @@ import (
|
|||
"github.com/hashicorp/consul/agent/rpc/peering"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/tenancy"
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
"github.com/hashicorp/consul/logging"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/proto/private/pbsubscribe"
|
||||
|
@ -316,7 +314,6 @@ func (s *Server) setupGRPCServices(config *Config, deps Deps) error {
|
|||
// for anything internal in Consul to use the service. If that changes
|
||||
// we could register it on the in-process interfaces as well.
|
||||
err = s.registerDataplaneServer(
|
||||
deps,
|
||||
s.externalGRPCServer,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -344,20 +341,7 @@ func (s *Server) registerResourceServiceServer(typeRegistry resource.Registry, r
|
|||
return fmt.Errorf("storage backend cannot be nil")
|
||||
}
|
||||
|
||||
var tenancyBridge resourcegrpc.TenancyBridge
|
||||
if s.useV2Tenancy {
|
||||
tenancyBridge = tenancy.NewV2TenancyBridge().WithClient(
|
||||
// This assumes that the resource service will be registered with
|
||||
// the insecureUnsafeGRPCChan. We are using the insecure and unsafe
|
||||
// channel here because the V2 Tenancy bridge only reads data
|
||||
// from the client and does not modify it. Therefore sharing memory
|
||||
// with the resource services canonical immutable data is advantageous
|
||||
// to prevent wasting CPU time for every resource op to clone things.
|
||||
pbresource.NewResourceServiceClient(s.insecureUnsafeGRPCChan),
|
||||
)
|
||||
} else {
|
||||
tenancyBridge = NewV1TenancyBridge(s)
|
||||
}
|
||||
tenancyBridge := NewV1TenancyBridge(s)
|
||||
|
||||
// Create the Resource Service Server
|
||||
srv := resourcegrpc.NewServer(s.newResourceServiceConfig(typeRegistry, resolver, tenancyBridge))
|
||||
|
@ -510,14 +494,12 @@ func (s *Server) registerConnectCAServer(registrars ...grpc.ServiceRegistrar) er
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) registerDataplaneServer(deps Deps, registrars ...grpc.ServiceRegistrar) error {
|
||||
func (s *Server) registerDataplaneServer(registrars ...grpc.ServiceRegistrar) error {
|
||||
srv := dataplane.NewServer(dataplane.Config{
|
||||
GetStore: func() dataplane.StateStore { return s.FSM().State() },
|
||||
Logger: s.loggers.Named(logging.GRPCAPI).Named(logging.Dataplane),
|
||||
ACLResolver: s.ACLResolver,
|
||||
Datacenter: s.config.Datacenter,
|
||||
EnableV2: stringslice.Contains(deps.Experiments, CatalogResourceExperimentName),
|
||||
ResourceAPIClient: pbresource.NewResourceServiceClient(s.insecureSafeGRPCChan),
|
||||
GetStore: func() dataplane.StateStore { return s.FSM().State() },
|
||||
Logger: s.loggers.Named(logging.GRPCAPI).Named(logging.Dataplane),
|
||||
ACLResolver: s.ACLResolver,
|
||||
Datacenter: s.config.Datacenter,
|
||||
})
|
||||
|
||||
for _, reg := range registrars {
|
||||
|
|
|
@ -19,10 +19,6 @@ import (
|
|||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/google/tcpproxy"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/memberlist"
|
||||
"github.com/hashicorp/raft"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/time/rate"
|
||||
|
@ -30,6 +26,10 @@ import (
|
|||
"google.golang.org/grpc/keepalive"
|
||||
|
||||
"github.com/hashicorp/consul-net-rpc/net/rpc"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/memberlist"
|
||||
"github.com/hashicorp/raft"
|
||||
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/consul/multilimiter"
|
||||
|
@ -43,7 +43,6 @@ import (
|
|||
"github.com/hashicorp/consul/agent/rpc/middleware"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/agent/token"
|
||||
proxytracker "github.com/hashicorp/consul/internal/mesh/proxy-tracker"
|
||||
"github.com/hashicorp/consul/ipaddr"
|
||||
"github.com/hashicorp/consul/sdk/freeport"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
|
@ -352,8 +351,7 @@ func newServerWithDeps(t testutil.TestingTB, c *Config, deps Deps) (*Server, err
|
|||
}
|
||||
}
|
||||
grpcServer := external.NewServer(deps.Logger.Named("grpc.external"), nil, deps.TLSConfigurator, rpcRate.NullRequestLimitsHandler(), keepalive.ServerParameters{}, nil)
|
||||
proxyUpdater := proxytracker.NewProxyTracker(proxytracker.ProxyTrackerConfig{})
|
||||
srv, err := NewServer(c, deps, grpcServer, nil, deps.Logger, proxyUpdater)
|
||||
srv, err := NewServer(c, deps, grpcServer, nil, deps.Logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1260,7 +1258,7 @@ func TestServer_RPC_MetricsIntercept_Off(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
s1, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger, nil)
|
||||
s1, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -1298,7 +1296,7 @@ func TestServer_RPC_MetricsIntercept_Off(t *testing.T) {
|
|||
return nil
|
||||
}
|
||||
|
||||
s2, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger, nil)
|
||||
s2, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -1332,7 +1330,7 @@ func TestServer_RPC_RequestRecorder(t *testing.T) {
|
|||
deps := newDefaultDeps(t, conf)
|
||||
deps.NewRequestRecorderFunc = nil
|
||||
|
||||
s1, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger, nil)
|
||||
s1, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger)
|
||||
|
||||
require.Error(t, err, "need err when provider func is nil")
|
||||
require.Equal(t, err.Error(), "cannot initialize server without an RPC request recorder provider")
|
||||
|
@ -1351,7 +1349,7 @@ func TestServer_RPC_RequestRecorder(t *testing.T) {
|
|||
return nil
|
||||
}
|
||||
|
||||
s2, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger, nil)
|
||||
s2, err := NewServer(conf, deps, grpc.NewServer(), nil, deps.Logger)
|
||||
|
||||
require.Error(t, err, "need err when RequestRecorder is nil")
|
||||
require.Equal(t, err.Error(), "cannot initialize server with a nil RPC request recorder")
|
||||
|
@ -2315,7 +2313,7 @@ func TestServer_ControllerDependencies(t *testing.T) {
|
|||
|
||||
_, conf := testServerConfig(t)
|
||||
deps := newDefaultDeps(t, conf)
|
||||
deps.Experiments = []string{"resource-apis", "v2tenancy"}
|
||||
deps.Experiments = []string{"resource-apis"}
|
||||
deps.LeafCertManager = &leafcert.Manager{}
|
||||
|
||||
s1, err := newServerWithDeps(t, conf, deps)
|
||||
|
@ -2325,6 +2323,10 @@ func TestServer_ControllerDependencies(t *testing.T) {
|
|||
// gotest.tools/v3 defines CLI flags which are incompatible wit the golden package
|
||||
// Once we eliminate gotest.tools/v3 from usage within Consul we could uncomment this
|
||||
// actual := fmt.Sprintf("```mermaid\n%s\n```", s1.controllerManager.CalculateDependencies(s1.registry.Types()).ToMermaid())
|
||||
// expected := golden.Get(t, actual, "v2-resource-dependencies")
|
||||
// markdownFileName := "v2-resource-dependencies"
|
||||
// if versiontest.IsEnterprise() {
|
||||
// markdownFileName += "-enterprise"
|
||||
// }
|
||||
// expected := golden.Get(t, actual, markdownFileName)
|
||||
// require.Equal(t, expected, actual)
|
||||
}
|
||||
|
|
|
@ -1,24 +1,5 @@
|
|||
```mermaid
|
||||
flowchart TD
|
||||
auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/namespacetrafficpermissions
|
||||
auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/partitiontrafficpermissions
|
||||
auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/trafficpermissions
|
||||
auth/v2beta1/computedtrafficpermissions --> auth/v2beta1/workloadidentity
|
||||
auth/v2beta1/namespacetrafficpermissions
|
||||
auth/v2beta1/partitiontrafficpermissions
|
||||
auth/v2beta1/trafficpermissions
|
||||
auth/v2beta1/workloadidentity
|
||||
catalog/v2beta1/computedfailoverpolicy --> catalog/v2beta1/failoverpolicy
|
||||
catalog/v2beta1/computedfailoverpolicy --> catalog/v2beta1/service
|
||||
catalog/v2beta1/failoverpolicy
|
||||
catalog/v2beta1/healthstatus
|
||||
catalog/v2beta1/node --> catalog/v2beta1/nodehealthstatus
|
||||
catalog/v2beta1/nodehealthstatus
|
||||
catalog/v2beta1/service
|
||||
catalog/v2beta1/serviceendpoints --> catalog/v2beta1/service
|
||||
catalog/v2beta1/serviceendpoints --> catalog/v2beta1/workload
|
||||
catalog/v2beta1/workload --> catalog/v2beta1/healthstatus
|
||||
catalog/v2beta1/workload --> catalog/v2beta1/node
|
||||
demo/v1/album
|
||||
demo/v1/artist
|
||||
demo/v1/concept
|
||||
|
@ -27,42 +8,12 @@ flowchart TD
|
|||
demo/v2/album
|
||||
demo/v2/artist
|
||||
hcp/v2/link
|
||||
hcp/v2/telemetrystate --> hcp/v2/link
|
||||
hcp/v2/telemetrystate
|
||||
internal/v1/tombstone
|
||||
mesh/v2beta1/computedexplicitdestinations --> catalog/v2beta1/service
|
||||
mesh/v2beta1/computedexplicitdestinations --> catalog/v2beta1/workload
|
||||
mesh/v2beta1/computedexplicitdestinations --> mesh/v2beta1/computedroutes
|
||||
mesh/v2beta1/computedexplicitdestinations --> mesh/v2beta1/destinations
|
||||
mesh/v2beta1/computedproxyconfiguration --> catalog/v2beta1/workload
|
||||
mesh/v2beta1/computedproxyconfiguration --> mesh/v2beta1/proxyconfiguration
|
||||
mesh/v2beta1/computedroutes --> catalog/v2beta1/computedfailoverpolicy
|
||||
mesh/v2beta1/computedroutes --> catalog/v2beta1/service
|
||||
mesh/v2beta1/computedroutes --> mesh/v2beta1/destinationpolicy
|
||||
mesh/v2beta1/computedroutes --> mesh/v2beta1/grpcroute
|
||||
mesh/v2beta1/computedroutes --> mesh/v2beta1/httproute
|
||||
mesh/v2beta1/computedroutes --> mesh/v2beta1/tcproute
|
||||
mesh/v2beta1/destinationpolicy
|
||||
mesh/v2beta1/destinations
|
||||
mesh/v2beta1/grpcroute
|
||||
mesh/v2beta1/httproute
|
||||
mesh/v2beta1/meshconfiguration
|
||||
mesh/v2beta1/meshgateway
|
||||
mesh/v2beta1/proxyconfiguration
|
||||
mesh/v2beta1/proxystatetemplate --> auth/v2beta1/computedtrafficpermissions
|
||||
mesh/v2beta1/proxystatetemplate --> catalog/v2beta1/service
|
||||
mesh/v2beta1/proxystatetemplate --> catalog/v2beta1/serviceendpoints
|
||||
mesh/v2beta1/proxystatetemplate --> catalog/v2beta1/workload
|
||||
mesh/v2beta1/proxystatetemplate --> mesh/v2beta1/computedexplicitdestinations
|
||||
mesh/v2beta1/proxystatetemplate --> mesh/v2beta1/computedproxyconfiguration
|
||||
mesh/v2beta1/proxystatetemplate --> mesh/v2beta1/computedroutes
|
||||
mesh/v2beta1/proxystatetemplate --> multicluster/v2/computedexportedservices
|
||||
mesh/v2beta1/tcproute
|
||||
multicluster/v2/computedexportedservices --> catalog/v2beta1/service
|
||||
multicluster/v2/computedexportedservices --> multicluster/v2/exportedservices
|
||||
multicluster/v2/computedexportedservices --> multicluster/v2/namespaceexportedservices
|
||||
multicluster/v2/computedexportedservices --> multicluster/v2/partitionexportedservices
|
||||
multicluster/v2/exportedservices
|
||||
multicluster/v2/namespaceexportedservices
|
||||
multicluster/v2/partitionexportedservices
|
||||
tenancy/v2beta1/namespace
|
||||
```
|
|
@ -4,14 +4,10 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/internal/auth"
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/hcp"
|
||||
"github.com/hashicorp/consul/internal/mesh"
|
||||
"github.com/hashicorp/consul/internal/multicluster"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/resource/demo"
|
||||
"github.com/hashicorp/consul/internal/tenancy"
|
||||
)
|
||||
|
||||
// NewTypeRegistry returns a registry populated with all supported resource
|
||||
|
@ -25,10 +21,6 @@ func NewTypeRegistry() resource.Registry {
|
|||
registry := resource.NewRegistry()
|
||||
|
||||
demo.RegisterTypes(registry)
|
||||
mesh.RegisterTypes(registry)
|
||||
catalog.RegisterTypes(registry)
|
||||
auth.RegisterTypes(registry)
|
||||
tenancy.RegisterTypes(registry)
|
||||
multicluster.RegisterTypes(registry)
|
||||
hcp.RegisterTypes(registry)
|
||||
|
||||
|
|
|
@ -8,16 +8,12 @@ import (
|
|||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
"google.golang.org/protobuf/types/known/structpb"
|
||||
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/configentry"
|
||||
|
@ -50,72 +46,6 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G
|
|||
return nil, status.Error(codes.Unauthenticated, err.Error())
|
||||
}
|
||||
|
||||
if s.EnableV2 {
|
||||
// Get the workload.
|
||||
workloadId := &pbresource.ID{
|
||||
Name: proxyID,
|
||||
Tenancy: &pbresource.Tenancy{
|
||||
Namespace: req.Namespace,
|
||||
Partition: req.Partition,
|
||||
},
|
||||
Type: pbcatalog.WorkloadType,
|
||||
}
|
||||
workloadRsp, err := s.ResourceAPIClient.Read(ctx, &pbresource.ReadRequest{
|
||||
Id: workloadId,
|
||||
})
|
||||
if err != nil {
|
||||
// This error should already include the gRPC status code and so we don't need to wrap it
|
||||
// in status.Error.
|
||||
logger.Error("Error looking up workload", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
var workload pbcatalog.Workload
|
||||
err = workloadRsp.Resource.Data.UnmarshalTo(&workload)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, "failed to parse workload data")
|
||||
}
|
||||
|
||||
// Only workloads that have an associated identity can ask for proxy bootstrap parameters.
|
||||
if workload.Identity == "" {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "workload %q doesn't have identity associated with it", req.ProxyId)
|
||||
}
|
||||
|
||||
// verify identity:write is allowed. if not, give permission denied error.
|
||||
if err := authz.ToAllowAuthorizer().IdentityWriteAllowed(workload.Identity, &authzContext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
computedProxyConfig, err := resource.GetDecodedResource[*pbmesh.ComputedProxyConfiguration](
|
||||
ctx,
|
||||
s.ResourceAPIClient,
|
||||
resource.ReplaceType(pbmesh.ComputedProxyConfigurationType, workloadId))
|
||||
|
||||
if err != nil {
|
||||
logger.Error("Error looking up ComputedProxyConfiguration for this workload", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rsp := &pbdataplane.GetEnvoyBootstrapParamsResponse{
|
||||
Identity: workload.Identity,
|
||||
Partition: workloadRsp.Resource.Id.Tenancy.Partition,
|
||||
Namespace: workloadRsp.Resource.Id.Tenancy.Namespace,
|
||||
Datacenter: s.Datacenter,
|
||||
NodeName: workload.NodeName,
|
||||
}
|
||||
|
||||
if computedProxyConfig != nil {
|
||||
if computedProxyConfig.GetData().GetDynamicConfig() != nil {
|
||||
rsp.AccessLogs = makeAccessLogs(computedProxyConfig.GetData().GetDynamicConfig().GetAccessLogs(), logger)
|
||||
}
|
||||
|
||||
rsp.BootstrapConfig = computedProxyConfig.GetData().GetBootstrapConfig()
|
||||
}
|
||||
|
||||
return rsp, nil
|
||||
}
|
||||
|
||||
// The remainder of this file focuses on v1 implementation of this endpoint.
|
||||
|
||||
store := s.GetStore()
|
||||
|
||||
_, svc, err := store.ServiceNode(req.GetNodeId(), req.GetNodeName(), proxyID, &entMeta, structs.DefaultPeerKeyword)
|
||||
|
@ -181,9 +111,9 @@ func (s *Server) GetEnvoyBootstrapParams(ctx context.Context, req *pbdataplane.G
|
|||
}, nil
|
||||
}
|
||||
|
||||
func makeAccessLogs(logs structs.AccessLogs, logger hclog.Logger) []string {
|
||||
func makeAccessLogs(logs *structs.AccessLogsConfig, logger hclog.Logger) []string {
|
||||
var accessLogs []string
|
||||
if logs.GetEnabled() {
|
||||
if logs.Enabled {
|
||||
envoyLoggers, err := accesslogs.MakeAccessLogs(logs, false)
|
||||
if err != nil {
|
||||
logger.Warn("Error creating the envoy access log config", "error", err)
|
||||
|
|
|
@ -18,18 +18,9 @@ import (
|
|||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/acl/resolver"
|
||||
external "github.com/hashicorp/consul/agent/grpc-external"
|
||||
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
|
||||
"github.com/hashicorp/consul/agent/grpc-external/testutils"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/internal/catalog"
|
||||
"github.com/hashicorp/consul/internal/mesh"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
pbcatalog "github.com/hashicorp/consul/proto-public/pbcatalog/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbdataplane"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/proto/private/prototest"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -252,156 +243,6 @@ func TestGetEnvoyBootstrapParams_Success(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetEnvoyBootstrapParams_Success_EnableV2(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
workloadData *pbcatalog.Workload
|
||||
proxyCfg *pbmesh.ComputedProxyConfiguration
|
||||
expBootstrapCfg *pbmesh.BootstrapConfig
|
||||
expAccessLogs string
|
||||
}
|
||||
|
||||
run := func(t *testing.T, tc testCase) {
|
||||
resourceClient := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(catalog.RegisterTypes, mesh.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
options := structs.QueryOptions{Token: testToken}
|
||||
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
|
||||
require.NoError(t, err)
|
||||
|
||||
aclResolver := &MockACLResolver{}
|
||||
|
||||
server := NewServer(Config{
|
||||
Logger: hclog.NewNullLogger(),
|
||||
ACLResolver: aclResolver,
|
||||
Datacenter: serverDC,
|
||||
EnableV2: true,
|
||||
ResourceAPIClient: resourceClient,
|
||||
})
|
||||
client := testClient(t, server)
|
||||
|
||||
// Add required fields to workload data.
|
||||
tc.workloadData.Addresses = []*pbcatalog.WorkloadAddress{
|
||||
{
|
||||
Host: "127.0.0.1",
|
||||
},
|
||||
}
|
||||
tc.workloadData.Ports = map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp": {Port: 8080, Protocol: pbcatalog.Protocol_PROTOCOL_TCP},
|
||||
}
|
||||
workloadResource := resourcetest.Resource(pbcatalog.WorkloadType, "test-workload").
|
||||
WithData(t, tc.workloadData).
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||
Write(t, resourceClient)
|
||||
|
||||
// Create computed proxy cfg resource.
|
||||
resourcetest.Resource(pbmesh.ComputedProxyConfigurationType, workloadResource.Id.Name).
|
||||
WithData(t, tc.proxyCfg).
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||
Write(t, resourceClient)
|
||||
|
||||
req := &pbdataplane.GetEnvoyBootstrapParamsRequest{
|
||||
ProxyId: workloadResource.Id.Name,
|
||||
Namespace: workloadResource.Id.Tenancy.Namespace,
|
||||
Partition: workloadResource.Id.Tenancy.Partition,
|
||||
}
|
||||
|
||||
aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything).
|
||||
Return(testutils.ACLUseProvidedPolicy(t,
|
||||
&acl.Policy{
|
||||
PolicyRules: acl.PolicyRules{
|
||||
Services: []*acl.ServiceRule{
|
||||
{
|
||||
Name: workloadResource.Id.Name,
|
||||
Policy: acl.PolicyRead,
|
||||
},
|
||||
},
|
||||
Identities: []*acl.IdentityRule{
|
||||
{
|
||||
Name: testIdentity,
|
||||
Policy: acl.PolicyWrite,
|
||||
},
|
||||
},
|
||||
},
|
||||
}), nil)
|
||||
|
||||
resp, err := client.GetEnvoyBootstrapParams(ctx, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tc.workloadData.Identity, resp.Identity)
|
||||
require.Equal(t, serverDC, resp.Datacenter)
|
||||
require.Equal(t, workloadResource.Id.Tenancy.Partition, resp.Partition)
|
||||
require.Equal(t, workloadResource.Id.Tenancy.Namespace, resp.Namespace)
|
||||
require.Equal(t, resp.NodeName, tc.workloadData.NodeName)
|
||||
prototest.AssertDeepEqual(t, tc.expBootstrapCfg, resp.BootstrapConfig)
|
||||
if tc.expAccessLogs != "" {
|
||||
require.JSONEq(t, tc.expAccessLogs, resp.AccessLogs[0])
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
name: "workload without node",
|
||||
workloadData: &pbcatalog.Workload{
|
||||
Identity: testIdentity,
|
||||
},
|
||||
expBootstrapCfg: nil,
|
||||
},
|
||||
{
|
||||
name: "workload with node",
|
||||
workloadData: &pbcatalog.Workload{
|
||||
Identity: testIdentity,
|
||||
NodeName: "test-node",
|
||||
},
|
||||
expBootstrapCfg: nil,
|
||||
},
|
||||
{
|
||||
name: "single proxy configuration",
|
||||
workloadData: &pbcatalog.Workload{
|
||||
Identity: testIdentity,
|
||||
},
|
||||
proxyCfg: &pbmesh.ComputedProxyConfiguration{
|
||||
BootstrapConfig: &pbmesh.BootstrapConfig{
|
||||
DogstatsdUrl: "dogstats-url",
|
||||
},
|
||||
},
|
||||
expBootstrapCfg: &pbmesh.BootstrapConfig{
|
||||
DogstatsdUrl: "dogstats-url",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple proxy configurations",
|
||||
workloadData: &pbcatalog.Workload{
|
||||
Identity: testIdentity,
|
||||
},
|
||||
proxyCfg: &pbmesh.ComputedProxyConfiguration{
|
||||
BootstrapConfig: &pbmesh.BootstrapConfig{
|
||||
DogstatsdUrl: "dogstats-url",
|
||||
StatsdUrl: "stats-url",
|
||||
},
|
||||
DynamicConfig: &pbmesh.DynamicConfig{
|
||||
AccessLogs: &pbmesh.AccessLogsConfig{
|
||||
Enabled: true,
|
||||
JsonFormat: "{ \"custom_field\": \"%START_TIME%\" }",
|
||||
},
|
||||
},
|
||||
},
|
||||
expBootstrapCfg: &pbmesh.BootstrapConfig{
|
||||
DogstatsdUrl: "dogstats-url",
|
||||
StatsdUrl: "stats-url",
|
||||
},
|
||||
expAccessLogs: testAccessLogs,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetEnvoyBootstrapParams_Error(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
|
@ -483,100 +324,6 @@ func TestGetEnvoyBootstrapParams_Error(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
func TestGetEnvoyBootstrapParams_Error_EnableV2(t *testing.T) {
|
||||
type testCase struct {
|
||||
name string
|
||||
expectedErrCode codes.Code
|
||||
expecteErrMsg string
|
||||
workload *pbresource.Resource
|
||||
}
|
||||
|
||||
run := func(t *testing.T, tc testCase) {
|
||||
resourceClient := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(catalog.RegisterTypes, mesh.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
options := structs.QueryOptions{Token: testToken}
|
||||
ctx, err := external.ContextWithQueryOptions(context.Background(), options)
|
||||
require.NoError(t, err)
|
||||
|
||||
aclResolver := &MockACLResolver{}
|
||||
aclResolver.On("ResolveTokenAndDefaultMeta", testToken, mock.Anything, mock.Anything).
|
||||
Return(testutils.ACLServiceRead(t, "doesn't matter"), nil)
|
||||
|
||||
server := NewServer(Config{
|
||||
Logger: hclog.NewNullLogger(),
|
||||
ACLResolver: aclResolver,
|
||||
Datacenter: serverDC,
|
||||
EnableV2: true,
|
||||
ResourceAPIClient: resourceClient,
|
||||
})
|
||||
client := testClient(t, server)
|
||||
|
||||
var req pbdataplane.GetEnvoyBootstrapParamsRequest
|
||||
// Write the workload resource.
|
||||
if tc.workload != nil {
|
||||
_, err = resourceClient.Write(context.Background(), &pbresource.WriteRequest{
|
||||
Resource: tc.workload,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
req = pbdataplane.GetEnvoyBootstrapParamsRequest{
|
||||
ProxyId: tc.workload.Id.Name,
|
||||
Namespace: tc.workload.Id.Tenancy.Namespace,
|
||||
Partition: tc.workload.Id.Tenancy.Partition,
|
||||
}
|
||||
} else {
|
||||
req = pbdataplane.GetEnvoyBootstrapParamsRequest{
|
||||
ProxyId: "not-found",
|
||||
Namespace: "default",
|
||||
Partition: "default",
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := client.GetEnvoyBootstrapParams(ctx, &req)
|
||||
require.Nil(t, resp)
|
||||
require.Error(t, err)
|
||||
errStatus, ok := status.FromError(err)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, tc.expectedErrCode.String(), errStatus.Code().String())
|
||||
require.Equal(t, tc.expecteErrMsg, errStatus.Message())
|
||||
}
|
||||
|
||||
workload := resourcetest.Resource(pbcatalog.WorkloadType, "test-workload").
|
||||
WithData(t, &pbcatalog.Workload{
|
||||
Addresses: []*pbcatalog.WorkloadAddress{
|
||||
{Host: "127.0.0.1"},
|
||||
},
|
||||
Ports: map[string]*pbcatalog.WorkloadPort{
|
||||
"tcp": {Port: 8080},
|
||||
},
|
||||
}).
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||
Build()
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
name: "workload doesn't exist",
|
||||
expectedErrCode: codes.NotFound,
|
||||
expecteErrMsg: "resource not found",
|
||||
},
|
||||
{
|
||||
name: "workload without identity",
|
||||
expectedErrCode: codes.InvalidArgument,
|
||||
expecteErrMsg: "workload \"test-workload\" doesn't have identity associated with it",
|
||||
workload: workload,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
run(t, tc)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetEnvoyBootstrapParams_Unauthenticated(t *testing.T) {
|
||||
// Mock the ACL resolver to return ErrNotFound.
|
||||
aclResolver := &MockACLResolver{}
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
package dataplane
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
@ -27,10 +26,6 @@ type Config struct {
|
|||
ACLResolver ACLResolver
|
||||
// Datacenter of the Consul server this gRPC server is hosted on
|
||||
Datacenter string
|
||||
|
||||
// EnableV2 indicates whether a feature flag for v2 APIs is provided.
|
||||
EnableV2 bool
|
||||
ResourceAPIClient pbresource.ResourceServiceClient
|
||||
}
|
||||
|
||||
type StateStore interface {
|
||||
|
|
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/storage"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1"
|
||||
)
|
||||
|
||||
// Delete deletes a resource.
|
||||
|
@ -200,17 +199,10 @@ func (s *Server) ensureDeleteRequestValid(req *pbresource.DeleteRequest) (*resou
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err = checkV2Tenancy(s.UseV2Tenancy, req.Id.Type); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := validateScopedTenancy(reg.Scope, reg.Type, req.Id.Tenancy, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := blockBuiltinsDeletion(reg.Type, req.Id); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reg, nil
|
||||
}
|
||||
|
||||
|
@ -220,12 +212,3 @@ func TombstoneNameFor(deleteId *pbresource.ID) string {
|
|||
// deleteId.Name is just included for easier identification
|
||||
return fmt.Sprintf("tombstone-%v-%v", deleteId.Name, strings.ToLower(deleteId.Uid))
|
||||
}
|
||||
|
||||
func blockDefaultNamespaceDeletion(rtype *pbresource.Type, id *pbresource.ID) error {
|
||||
if id.Name == resource.DefaultNamespaceName &&
|
||||
id.Tenancy.Partition == resource.DefaultPartitionName &&
|
||||
resource.EqualType(rtype, pbtenancy.NamespaceType) {
|
||||
return status.Errorf(codes.InvalidArgument, "cannot delete default namespace")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
//go:build !consulent
|
||||
|
||||
package resource
|
||||
|
||||
import "github.com/hashicorp/consul/proto-public/pbresource"
|
||||
|
||||
func blockBuiltinsDeletion(rtype *pbresource.Type, id *pbresource.ID) error {
|
||||
if err := blockDefaultNamespaceDeletion(rtype, id); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -5,7 +5,6 @@ package resource_test
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -22,7 +21,6 @@ import (
|
|||
"github.com/hashicorp/consul/internal/resource/demo"
|
||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1"
|
||||
pbdemo "github.com/hashicorp/consul/proto/private/pbdemo/v1"
|
||||
)
|
||||
|
||||
|
@ -137,37 +135,28 @@ func TestDelete_InputValidation(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
for _, useV2Tenancy := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(useV2Tenancy).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
for desc, tc := range testCases {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
run(t, client, tc)
|
||||
})
|
||||
}
|
||||
for desc, tc := range testCases {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
run(t, client, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete_TypeNotRegistered(t *testing.T) {
|
||||
for _, useV2Tenancy := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().WithV2Tenancy(useV2Tenancy).Run(t)
|
||||
client := svctest.NewResourceServiceBuilder().Run(t)
|
||||
|
||||
artist, err := demo.GenerateV2Artist()
|
||||
require.NoError(t, err)
|
||||
artist, err := demo.GenerateV2Artist()
|
||||
require.NoError(t, err)
|
||||
|
||||
// delete artist with unregistered type
|
||||
_, err = client.Delete(context.Background(), &pbresource.DeleteRequest{Id: artist.Id, Version: ""})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
|
||||
require.ErrorContains(t, err, "not registered")
|
||||
})
|
||||
}
|
||||
// delete artist with unregistered type
|
||||
_, err = client.Delete(context.Background(), &pbresource.DeleteRequest{Id: artist.Id, Version: ""})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
|
||||
require.ErrorContains(t, err, "not registered")
|
||||
}
|
||||
|
||||
func TestDelete_ACLs(t *testing.T) {
|
||||
|
@ -274,15 +263,10 @@ func TestDelete_Success(t *testing.T) {
|
|||
t.Run(desc, func(t *testing.T) {
|
||||
for tenancyDesc, modFn := range tenancyCases() {
|
||||
t.Run(tenancyDesc, func(t *testing.T) {
|
||||
for _, useV2Tenancy := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(useV2Tenancy).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
run(t, client, tc, modFn)
|
||||
})
|
||||
}
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
run(t, client, tc, modFn)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
@ -338,46 +322,41 @@ func TestDelete_NonCAS_Retry(t *testing.T) {
|
|||
func TestDelete_TombstoneDeletionDoesNotCreateNewTombstone(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, useV2Tenancy := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(useV2Tenancy).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
ctx := context.Background()
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
artist, err := demo.GenerateV2Artist()
|
||||
require.NoError(t, err)
|
||||
artist, err := demo.GenerateV2Artist()
|
||||
require.NoError(t, err)
|
||||
|
||||
rsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: artist})
|
||||
require.NoError(t, err)
|
||||
artist = rsp.Resource
|
||||
rsp, err := client.Write(ctx, &pbresource.WriteRequest{Resource: artist})
|
||||
require.NoError(t, err)
|
||||
artist = rsp.Resource
|
||||
|
||||
// delete artist
|
||||
_, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: artist.Id, Version: ""})
|
||||
require.NoError(t, err)
|
||||
// delete artist
|
||||
_, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: artist.Id, Version: ""})
|
||||
require.NoError(t, err)
|
||||
|
||||
// verify artist's tombstone created
|
||||
rsp2, err := client.Read(ctx, &pbresource.ReadRequest{
|
||||
Id: &pbresource.ID{
|
||||
Name: svc.TombstoneNameFor(artist.Id),
|
||||
Type: resource.TypeV1Tombstone,
|
||||
Tenancy: artist.Id.Tenancy,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
tombstone := rsp2.Resource
|
||||
// verify artist's tombstone created
|
||||
rsp2, err := client.Read(ctx, &pbresource.ReadRequest{
|
||||
Id: &pbresource.ID{
|
||||
Name: svc.TombstoneNameFor(artist.Id),
|
||||
Type: resource.TypeV1Tombstone,
|
||||
Tenancy: artist.Id.Tenancy,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
tombstone := rsp2.Resource
|
||||
|
||||
// delete artist's tombstone
|
||||
_, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: tombstone.Id, Version: tombstone.Version})
|
||||
require.NoError(t, err)
|
||||
// delete artist's tombstone
|
||||
_, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: tombstone.Id, Version: tombstone.Version})
|
||||
require.NoError(t, err)
|
||||
|
||||
// verify no new tombstones created and artist's existing tombstone deleted
|
||||
rsp3, err := client.List(ctx, &pbresource.ListRequest{Type: resource.TypeV1Tombstone, Tenancy: artist.Id.Tenancy})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, rsp3.Resources)
|
||||
})
|
||||
}
|
||||
// verify no new tombstones created and artist's existing tombstone deleted
|
||||
rsp3, err := client.List(ctx, &pbresource.ListRequest{Type: resource.TypeV1Tombstone, Tenancy: artist.Id.Tenancy})
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, rsp3.Resources)
|
||||
}
|
||||
|
||||
func TestDelete_NotFound(t *testing.T) {
|
||||
|
@ -392,18 +371,13 @@ func TestDelete_NotFound(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for _, useV2Tenancy := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(useV2Tenancy).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
for desc, tc := range deleteTestCases() {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
run(t, client, tc)
|
||||
})
|
||||
}
|
||||
for desc, tc := range deleteTestCases() {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
run(t, client, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -411,115 +385,86 @@ func TestDelete_NotFound(t *testing.T) {
|
|||
func TestDelete_VersionMismatch(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
for _, useV2Tenancy := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(useV2Tenancy).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
artist, err := demo.GenerateV2Artist()
|
||||
require.NoError(t, err)
|
||||
rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: artist})
|
||||
require.NoError(t, err)
|
||||
artist, err := demo.GenerateV2Artist()
|
||||
require.NoError(t, err)
|
||||
rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: artist})
|
||||
require.NoError(t, err)
|
||||
|
||||
// delete with a version that is different from the stored version
|
||||
_, err = client.Delete(context.Background(), &pbresource.DeleteRequest{Id: rsp.Resource.Id, Version: "non-existent-version"})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Aborted.String(), status.Code(err).String())
|
||||
require.ErrorContains(t, err, "CAS operation failed")
|
||||
})
|
||||
}
|
||||
// delete with a version that is different from the stored version
|
||||
_, err = client.Delete(context.Background(), &pbresource.DeleteRequest{Id: rsp.Resource.Id, Version: "non-existent-version"})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.Aborted.String(), status.Code(err).String())
|
||||
require.ErrorContains(t, err, "CAS operation failed")
|
||||
}
|
||||
|
||||
func TestDelete_MarkedForDeletionWhenFinalizersPresent(t *testing.T) {
|
||||
for _, useV2Tenancy := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(useV2Tenancy).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
ctx := context.Background()
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
// Create a resource with a finalizer
|
||||
res := rtest.Resource(demo.TypeV1Artist, "manwithnoname").
|
||||
WithTenancy(resource.DefaultClusteredTenancy()).
|
||||
WithData(t, &pbdemo.Artist{Name: "Man With No Name"}).
|
||||
WithMeta(resource.FinalizerKey, "finalizer1").
|
||||
Write(t, client)
|
||||
// Create a resource with a finalizer
|
||||
res := rtest.Resource(demo.TypeV1Artist, "manwithnoname").
|
||||
WithTenancy(resource.DefaultClusteredTenancy()).
|
||||
WithData(t, &pbdemo.Artist{Name: "Man With No Name"}).
|
||||
WithMeta(resource.FinalizerKey, "finalizer1").
|
||||
Write(t, client)
|
||||
|
||||
// Delete it
|
||||
_, err := client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
// Delete it
|
||||
_, err := client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify resource has been marked for deletion
|
||||
rsp, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
require.True(t, resource.IsMarkedForDeletion(rsp.Resource))
|
||||
// Verify resource has been marked for deletion
|
||||
rsp, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
require.True(t, resource.IsMarkedForDeletion(rsp.Resource))
|
||||
|
||||
// Delete again - should be no-op
|
||||
_, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
// Delete again - should be no-op
|
||||
_, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify no-op by checking version still the same
|
||||
rsp2, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
rtest.RequireVersionUnchanged(t, rsp2.Resource, rsp.Resource.Version)
|
||||
})
|
||||
}
|
||||
// Verify no-op by checking version still the same
|
||||
rsp2, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
rtest.RequireVersionUnchanged(t, rsp2.Resource, rsp.Resource.Version)
|
||||
}
|
||||
|
||||
func TestDelete_ImmediatelyDeletedAfterFinalizersRemoved(t *testing.T) {
|
||||
for _, useV2Tenancy := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("v2tenancy %v", useV2Tenancy), func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(useV2Tenancy).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
ctx := context.Background()
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
// Create a resource with a finalizer
|
||||
res := rtest.Resource(demo.TypeV1Artist, "manwithnoname").
|
||||
WithTenancy(resource.DefaultClusteredTenancy()).
|
||||
WithData(t, &pbdemo.Artist{Name: "Man With No Name"}).
|
||||
WithMeta(resource.FinalizerKey, "finalizer1").
|
||||
Write(t, client)
|
||||
// Create a resource with a finalizer
|
||||
res := rtest.Resource(demo.TypeV1Artist, "manwithnoname").
|
||||
WithTenancy(resource.DefaultClusteredTenancy()).
|
||||
WithData(t, &pbdemo.Artist{Name: "Man With No Name"}).
|
||||
WithMeta(resource.FinalizerKey, "finalizer1").
|
||||
Write(t, client)
|
||||
|
||||
// Delete should mark it for deletion
|
||||
_, err := client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
// Delete should mark it for deletion
|
||||
_, err := client.Delete(ctx, &pbresource.DeleteRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Remove the finalizer
|
||||
rsp, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
resource.RemoveFinalizer(rsp.Resource, "finalizer1")
|
||||
_, err = client.Write(ctx, &pbresource.WriteRequest{Resource: rsp.Resource})
|
||||
require.NoError(t, err)
|
||||
// Remove the finalizer
|
||||
rsp, err := client.Read(ctx, &pbresource.ReadRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
resource.RemoveFinalizer(rsp.Resource, "finalizer1")
|
||||
_, err = client.Write(ctx, &pbresource.WriteRequest{Resource: rsp.Resource})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete should be immediate
|
||||
_, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: rsp.Resource.Id})
|
||||
require.NoError(t, err)
|
||||
// Delete should be immediate
|
||||
_, err = client.Delete(ctx, &pbresource.DeleteRequest{Id: rsp.Resource.Id})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify deleted
|
||||
_, err = client.Read(ctx, &pbresource.ReadRequest{Id: rsp.Resource.Id})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.NotFound.String(), status.Code(err).String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete_BlockDeleteDefaultNamespace(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().WithV2Tenancy(true).Run(t)
|
||||
|
||||
id := &pbresource.ID{
|
||||
Name: resource.DefaultNamespaceName,
|
||||
Type: pbtenancy.NamespaceType,
|
||||
Tenancy: &pbresource.Tenancy{Partition: resource.DefaultPartitionName},
|
||||
}
|
||||
_, err := client.Delete(context.Background(), &pbresource.DeleteRequest{Id: id})
|
||||
// Verify deleted
|
||||
_, err = client.Read(ctx, &pbresource.ReadRequest{Id: rsp.Resource.Id})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
|
||||
require.ErrorContains(t, err, "cannot delete default namespace")
|
||||
require.Equal(t, codes.NotFound.String(), status.Code(err).String())
|
||||
}
|
||||
|
||||
type deleteTestCase struct {
|
||||
|
|
|
@ -104,10 +104,6 @@ func (s *Server) ensureListRequestValid(req *pbresource.ListRequest) (*resource.
|
|||
// not enabled in the license.
|
||||
_ = s.FeatureCheck(reg)
|
||||
|
||||
if err = checkV2Tenancy(s.UseV2Tenancy, req.Type); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := validateWildcardTenancy(req.Tenancy, req.NamePrefix); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -100,10 +100,6 @@ func (s *Server) ensureListByOwnerRequestValid(req *pbresource.ListByOwnerReques
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err = checkV2Tenancy(s.UseV2Tenancy, req.Owner.Type); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = validateScopedTenancy(reg.Scope, reg.Type, req.Owner.Tenancy, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -27,8 +27,6 @@ import (
|
|||
"github.com/hashicorp/consul/proto/private/prototest"
|
||||
)
|
||||
|
||||
// TODO: Update all tests to use true/false table test for v2tenancy
|
||||
|
||||
func TestListByOwner_InputValidation(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
|
|
|
@ -27,8 +27,6 @@ import (
|
|||
"github.com/hashicorp/consul/proto/private/prototest"
|
||||
)
|
||||
|
||||
// TODO: Update all tests to use true/false table test for v2tenancy
|
||||
|
||||
func TestList_InputValidation(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
|
|
|
@ -127,10 +127,6 @@ func (s *Server) ensureResourceValid(res *pbresource.Resource, enforceLicenseChe
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err = checkV2Tenancy(s.UseV2Tenancy, res.Id.Type); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check scope
|
||||
if reg.Scope == resource.ScopePartition && res.Id.Tenancy.Namespace != "" {
|
||||
return nil, status.Errorf(
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
package resource_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -34,18 +33,13 @@ func TestMutateAndValidate_InputValidation(t *testing.T) {
|
|||
require.ErrorContains(t, err, tc.errContains)
|
||||
}
|
||||
|
||||
for _, v2tenancy := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
WithV2Tenancy(v2tenancy).
|
||||
Run(t)
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
for desc, tc := range resourceValidTestCases(t) {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
run(t, client, tc)
|
||||
})
|
||||
}
|
||||
for desc, tc := range resourceValidTestCases(t) {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
run(t, client, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -66,39 +60,27 @@ func TestMutateAndValidate_OwnerValidation(t *testing.T) {
|
|||
require.ErrorContains(t, err, tc.errorContains)
|
||||
}
|
||||
|
||||
for _, v2tenancy := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
WithV2Tenancy(v2tenancy).
|
||||
Run(t)
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
for desc, tc := range ownerValidationTestCases(t) {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
run(t, client, tc)
|
||||
})
|
||||
}
|
||||
for desc, tc := range ownerValidationTestCases(t) {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
run(t, client, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMutateAndValidate_TypeNotFound(t *testing.T) {
|
||||
run := func(t *testing.T, client pbresource.ResourceServiceClient) {
|
||||
res, err := demo.GenerateV2Artist()
|
||||
require.NoError(t, err)
|
||||
client := svctest.NewResourceServiceBuilder().Run(t)
|
||||
|
||||
_, err = client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: res})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
|
||||
require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered")
|
||||
}
|
||||
res, err := demo.GenerateV2Artist()
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, v2tenancy := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().WithV2Tenancy(v2tenancy).Run(t)
|
||||
run(t, client)
|
||||
})
|
||||
}
|
||||
_, err = client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: res})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
|
||||
require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered")
|
||||
}
|
||||
|
||||
func TestMutateAndValidate_Success(t *testing.T) {
|
||||
|
@ -114,72 +96,40 @@ func TestMutateAndValidate_Success(t *testing.T) {
|
|||
prototest.AssertDeepEqual(t, tc.expectedTenancy, rsp.Resource.Id.Tenancy)
|
||||
}
|
||||
|
||||
for _, v2tenancy := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
WithV2Tenancy(v2tenancy).
|
||||
Run(t)
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
for desc, tc := range mavOrWriteSuccessTestCases(t) {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
run(t, client, tc)
|
||||
})
|
||||
}
|
||||
for desc, tc := range mavOrWriteSuccessTestCases(t) {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
run(t, client, tc)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMutateAndValidate_Mutate(t *testing.T) {
|
||||
for _, v2tenancy := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("v2tenancy %v", v2tenancy), func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
WithV2Tenancy(v2tenancy).
|
||||
Run(t)
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
artist, err := demo.GenerateV2Artist()
|
||||
require.NoError(t, err)
|
||||
artist, err := demo.GenerateV2Artist()
|
||||
require.NoError(t, err)
|
||||
|
||||
artistData := &pbdemov2.Artist{}
|
||||
artist.Data.UnmarshalTo(artistData)
|
||||
require.NoError(t, err)
|
||||
artistData := &pbdemov2.Artist{}
|
||||
artist.Data.UnmarshalTo(artistData)
|
||||
require.NoError(t, err)
|
||||
|
||||
// mutate hook sets genre to disco when unspecified
|
||||
artistData.Genre = pbdemov2.Genre_GENRE_UNSPECIFIED
|
||||
artist.Data.MarshalFrom(artistData)
|
||||
require.NoError(t, err)
|
||||
// mutate hook sets genre to disco when unspecified
|
||||
artistData.Genre = pbdemov2.Genre_GENRE_UNSPECIFIED
|
||||
artist.Data.MarshalFrom(artistData)
|
||||
require.NoError(t, err)
|
||||
|
||||
rsp, err := client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: artist})
|
||||
require.NoError(t, err)
|
||||
rsp, err := client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: artist})
|
||||
require.NoError(t, err)
|
||||
|
||||
// verify mutate hook set genre to disco
|
||||
require.NoError(t, rsp.Resource.Data.UnmarshalTo(artistData))
|
||||
require.Equal(t, pbdemov2.Genre_GENRE_DISCO, artistData.Genre)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMutateAndValidate_Tenancy_NotFound(t *testing.T) {
|
||||
for desc, tc := range mavOrWriteTenancyNotFoundTestCases(t) {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(true).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes")
|
||||
require.NoError(t, err)
|
||||
|
||||
artist, err := demo.GenerateV2Artist()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = client.MutateAndValidate(testContext(t), &pbresource.MutateAndValidateRequest{Resource: tc.modFn(artist, recordLabel)})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
|
||||
require.Contains(t, err.Error(), tc.errContains)
|
||||
})
|
||||
}
|
||||
// verify mutate hook set genre to disco
|
||||
require.NoError(t, rsp.Resource.Data.UnmarshalTo(artistData))
|
||||
require.Equal(t, pbdemov2.Genre_GENRE_DISCO, artistData.Genre)
|
||||
}
|
||||
|
||||
func TestMutateAndValidate_TenancyMarkedForDeletion_Fails(t *testing.T) {
|
||||
|
|
|
@ -106,10 +106,6 @@ func (s *Server) ensureReadRequestValid(req *pbresource.ReadRequest) (*resource.
|
|||
// not enabled in the license.
|
||||
_ = s.FeatureCheck(reg)
|
||||
|
||||
if err = checkV2Tenancy(s.UseV2Tenancy, req.Id.Type); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check scope
|
||||
if err = validateScopedTenancy(reg.Scope, req.Id.Type, req.Id.Tenancy, false); err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -30,8 +30,6 @@ import (
|
|||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
||||
// TODO: Update all tests to use true/false table test for v2tenancy
|
||||
|
||||
func TestRead_InputValidation(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
|
@ -162,74 +160,6 @@ func TestRead_TypeNotFound(t *testing.T) {
|
|||
require.Contains(t, err.Error(), "resource type demo.v2.Artist not registered")
|
||||
}
|
||||
|
||||
func TestRead_ResourceNotFound(t *testing.T) {
|
||||
for desc, tc := range readTestCases() {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
type tenancyCase struct {
|
||||
modFn func(artistId, recordlabelId *pbresource.ID) *pbresource.ID
|
||||
errContains string
|
||||
}
|
||||
tenancyCases := map[string]tenancyCase{
|
||||
"resource not found by name": {
|
||||
modFn: func(artistId, _ *pbresource.ID) *pbresource.ID {
|
||||
artistId.Name = "bogusname"
|
||||
return artistId
|
||||
},
|
||||
errContains: "resource not found",
|
||||
},
|
||||
"partition not found when namespace scoped": {
|
||||
modFn: func(artistId, _ *pbresource.ID) *pbresource.ID {
|
||||
id := clone(artistId)
|
||||
id.Tenancy.Partition = "boguspartition"
|
||||
return id
|
||||
},
|
||||
errContains: "partition not found",
|
||||
},
|
||||
"namespace not found when namespace scoped": {
|
||||
modFn: func(artistId, _ *pbresource.ID) *pbresource.ID {
|
||||
id := clone(artistId)
|
||||
id.Tenancy.Namespace = "bogusnamespace"
|
||||
return id
|
||||
},
|
||||
errContains: "namespace not found",
|
||||
},
|
||||
"partition not found when partition scoped": {
|
||||
modFn: func(_, recordLabelId *pbresource.ID) *pbresource.ID {
|
||||
id := clone(recordLabelId)
|
||||
id.Tenancy.Partition = "boguspartition"
|
||||
return id
|
||||
},
|
||||
errContains: "partition not found",
|
||||
},
|
||||
}
|
||||
for tenancyDesc, tenancyCase := range tenancyCases {
|
||||
t.Run(tenancyDesc, func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(true).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes")
|
||||
require.NoError(t, err)
|
||||
_, err = client.Write(context.Background(), &pbresource.WriteRequest{Resource: recordLabel})
|
||||
require.NoError(t, err)
|
||||
|
||||
artist, err := demo.GenerateV2Artist()
|
||||
require.NoError(t, err)
|
||||
_, err = client.Write(context.Background(), &pbresource.WriteRequest{Resource: artist})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Each tenancy test case picks which resource to use based on the resource type's scope.
|
||||
_, err = client.Read(tc.ctx, &pbresource.ReadRequest{Id: tenancyCase.modFn(artist.Id, recordLabel.Id)})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.NotFound.String(), status.Code(err).String())
|
||||
require.ErrorContains(t, err, tenancyCase.errContains)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRead_GroupVersionMismatch(t *testing.T) {
|
||||
for desc, tc := range readTestCases() {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
|
|
|
@ -6,15 +6,11 @@
|
|||
package resource
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1"
|
||||
)
|
||||
|
||||
func v2TenancyToV1EntMeta(tenancy *pbresource.Tenancy) *acl.EnterpriseMeta {
|
||||
|
@ -31,15 +27,6 @@ func v1EntMetaToV2Tenancy(reg *resource.Registration, entMeta *acl.EnterpriseMet
|
|||
}
|
||||
}
|
||||
|
||||
// checkV2Tenancy returns FailedPrecondition error for namespace resource type
|
||||
// when the "v2tenancy" feature flag is not enabled.
|
||||
func checkV2Tenancy(useV2Tenancy bool, rtype *pbresource.Type) error {
|
||||
if resource.EqualType(rtype, pbtenancy.NamespaceType) && !useV2Tenancy {
|
||||
return status.Errorf(codes.FailedPrecondition, "use of the v2 namespace resource requires the \"v2tenancy\" feature flag")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Logger hclog.Logger
|
||||
Registry Registry
|
||||
|
@ -50,11 +37,6 @@ type Config struct {
|
|||
// TenancyBridge temporarily allows us to use V1 implementations of
|
||||
// partitions and namespaces until V2 implementations are available.
|
||||
TenancyBridge TenancyBridge
|
||||
|
||||
// UseV2Tenancy is true if the "v2tenancy" experiment is active, false otherwise.
|
||||
// Attempts to create v2 tenancy resources (partition or namespace) will fail when the
|
||||
// flag is false.
|
||||
UseV2Tenancy bool
|
||||
}
|
||||
|
||||
// FeatureCheck does not apply to the community edition.
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
"github.com/hashicorp/consul/agent/grpc-external/testutils"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/storage/inmem"
|
||||
"github.com/hashicorp/consul/internal/tenancy"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
@ -26,25 +25,14 @@ import (
|
|||
// making requests.
|
||||
func NewResourceServiceBuilder() *Builder {
|
||||
b := &Builder{
|
||||
useV2Tenancy: false,
|
||||
registry: resource.NewRegistry(),
|
||||
// Regardless of whether using mock of v2tenancy, always make sure
|
||||
// the builtin tenancy exists.
|
||||
registry: resource.NewRegistry(),
|
||||
// Always make sure the builtin tenancy exists.
|
||||
tenancies: []*pbresource.Tenancy{resource.DefaultNamespacedTenancy()},
|
||||
cloning: true,
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// WithV2Tenancy configures which tenancy bridge is used.
|
||||
//
|
||||
// true => real v2 default partition and namespace via v2 tenancy bridge
|
||||
// false => mock default partition and namespace since v1 tenancy bridge can't be used (not spinning up an entire server here)
|
||||
func (b *Builder) WithV2Tenancy(useV2Tenancy bool) *Builder {
|
||||
b.useV2Tenancy = useV2Tenancy
|
||||
return b
|
||||
}
|
||||
|
||||
// Registry provides access to the constructed registry post-Run() when
|
||||
// needed by other test dependencies.
|
||||
func (b *Builder) Registry() resource.Registry {
|
||||
|
@ -106,33 +94,22 @@ func (b *Builder) Run(t testutil.TestingTB) pbresource.ResourceServiceClient {
|
|||
t.Cleanup(cancel)
|
||||
go backend.Run(ctx)
|
||||
|
||||
// Automatically add tenancy types if v2 tenancy enabled
|
||||
if b.useV2Tenancy {
|
||||
b.registerFns = append(b.registerFns, tenancy.RegisterTypes)
|
||||
}
|
||||
|
||||
for _, registerFn := range b.registerFns {
|
||||
registerFn(b.registry)
|
||||
}
|
||||
|
||||
var tenancyBridge resource.TenancyBridge
|
||||
if !b.useV2Tenancy {
|
||||
// use mock tenancy bridge. default/default has already been added out of the box
|
||||
mockTenancyBridge := &svc.MockTenancyBridge{}
|
||||
// use mock tenancy bridge. default/default has already been added out of the box
|
||||
mockTenancyBridge := &svc.MockTenancyBridge{}
|
||||
|
||||
for _, tenancy := range b.tenancies {
|
||||
mockTenancyBridge.On("PartitionExists", tenancy.Partition).Return(true, nil)
|
||||
mockTenancyBridge.On("NamespaceExists", tenancy.Partition, tenancy.Namespace).Return(true, nil)
|
||||
mockTenancyBridge.On("IsPartitionMarkedForDeletion", tenancy.Partition).Return(false, nil)
|
||||
mockTenancyBridge.On("IsNamespaceMarkedForDeletion", tenancy.Partition, tenancy.Namespace).Return(false, nil)
|
||||
}
|
||||
|
||||
tenancyBridge = mockTenancyBridge
|
||||
} else {
|
||||
// use v2 tenancy bridge. population comes later after client injected.
|
||||
tenancyBridge = tenancy.NewV2TenancyBridge()
|
||||
for _, tenancy := range b.tenancies {
|
||||
mockTenancyBridge.On("PartitionExists", tenancy.Partition).Return(true, nil)
|
||||
mockTenancyBridge.On("NamespaceExists", tenancy.Partition, tenancy.Namespace).Return(true, nil)
|
||||
mockTenancyBridge.On("IsPartitionMarkedForDeletion", tenancy.Partition).Return(false, nil)
|
||||
mockTenancyBridge.On("IsNamespaceMarkedForDeletion", tenancy.Partition, tenancy.Namespace).Return(false, nil)
|
||||
}
|
||||
|
||||
tenancyBridge := mockTenancyBridge
|
||||
|
||||
if b.aclResolver == nil {
|
||||
// When not provided (regardless of V1 tenancy or V2 tenancy), configure an ACL resolver
|
||||
// that has ACLs disabled and fills in "default" for the partition and namespace when
|
||||
|
@ -172,22 +149,5 @@ func (b *Builder) Run(t testutil.TestingTB) pbresource.ResourceServiceClient {
|
|||
client = pbresource.NewCloningResourceServiceClient(client)
|
||||
}
|
||||
|
||||
// HACK ALERT: The client needs to be injected into the V2TenancyBridge
|
||||
// after it has been created due the circular dependency. This will
|
||||
// go away when the tenancy bridge is removed and V1 is no more, however
|
||||
// long that takes.
|
||||
switch config.TenancyBridge.(type) {
|
||||
case *tenancy.V2TenancyBridge:
|
||||
config.TenancyBridge.(*tenancy.V2TenancyBridge).WithClient(client)
|
||||
// Default partition and namespace can finally be created
|
||||
require.NoError(t, initTenancy(ctx, backend))
|
||||
|
||||
for _, tenancy := range b.tenancies {
|
||||
if tenancy.Partition == resource.DefaultPartitionName && tenancy.Namespace == resource.DefaultNamespaceName {
|
||||
continue
|
||||
}
|
||||
t.Fatalf("TODO: implement creation of passed in v2 tenancy: %v", tenancy)
|
||||
}
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
|
|
@ -14,13 +14,12 @@ import (
|
|||
)
|
||||
|
||||
type Builder struct {
|
||||
registry resource.Registry
|
||||
registerFns []func(resource.Registry)
|
||||
useV2Tenancy bool
|
||||
tenancies []*pbresource.Tenancy
|
||||
aclResolver svc.ACLResolver
|
||||
serviceImpl *svc.Server
|
||||
cloning bool
|
||||
registry resource.Registry
|
||||
registerFns []func(resource.Registry)
|
||||
tenancies []*pbresource.Tenancy
|
||||
aclResolver svc.ACLResolver
|
||||
serviceImpl *svc.Server
|
||||
cloning bool
|
||||
}
|
||||
|
||||
func (b *Builder) ensureLicenseManager() {
|
||||
|
@ -33,6 +32,5 @@ func (b *Builder) newConfig(logger hclog.Logger, backend svc.Backend, tenancyBri
|
|||
Backend: backend,
|
||||
ACLResolver: b.aclResolver,
|
||||
TenancyBridge: tenancyBridge,
|
||||
UseV2Tenancy: b.useV2Tenancy,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,19 +6,7 @@
|
|||
package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/oklog/ulid/v2"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
"github.com/hashicorp/consul/internal/storage"
|
||||
"github.com/hashicorp/consul/internal/storage/inmem"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
pbtenancy "github.com/hashicorp/consul/proto-public/pbtenancy/v2beta1"
|
||||
)
|
||||
|
||||
func FillEntMeta(entMeta *acl.EnterpriseMeta) {
|
||||
|
@ -28,36 +16,3 @@ func FillEntMeta(entMeta *acl.EnterpriseMeta) {
|
|||
func FillAuthorizerContext(authzContext *acl.AuthorizerContext) {
|
||||
// nothing to to in CE.
|
||||
}
|
||||
|
||||
// initTenancy creates the builtin v2 namespace resource only. The builtin
|
||||
// v2 partition is not created because we're in CE.
|
||||
func initTenancy(ctx context.Context, b *inmem.Backend) error {
|
||||
nsData, err := anypb.New(&pbtenancy.Namespace{Description: "default namespace in default partition"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nsID := &pbresource.ID{
|
||||
Type: pbtenancy.NamespaceType,
|
||||
Name: resource.DefaultNamespaceName,
|
||||
Tenancy: resource.DefaultPartitionedTenancy(),
|
||||
Uid: ulid.Make().String(),
|
||||
}
|
||||
read, err := b.Read(ctx, storage.StrongConsistency, nsID)
|
||||
if err != nil && !errors.Is(err, storage.ErrNotFound) {
|
||||
return err
|
||||
}
|
||||
if read == nil && errors.Is(err, storage.ErrNotFound) {
|
||||
_, err = b.WriteCAS(ctx, &pbresource.Resource{
|
||||
Id: nsID,
|
||||
Generation: ulid.Make().String(),
|
||||
Data: nsData,
|
||||
Metadata: map[string]string{
|
||||
"generated_at": time.Now().Format(time.RFC3339),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -130,10 +130,6 @@ func (s *Server) ensureWatchListRequestValid(req *pbresource.WatchListRequest) (
|
|||
req.Tenancy = wildcardTenancyFor(reg.Scope)
|
||||
}
|
||||
|
||||
if err = checkV2Tenancy(s.UseV2Tenancy, req.Type); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := validateWildcardTenancy(req.Tenancy, req.NamePrefix); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -27,8 +27,6 @@ import (
|
|||
"github.com/hashicorp/consul/proto/private/prototest"
|
||||
)
|
||||
|
||||
// TODO: Update all tests to use true/false table test for v2tenancy
|
||||
|
||||
func TestWatchList_InputValidation(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
|
|
|
@ -23,8 +23,6 @@ import (
|
|||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
// TODO: Update all tests to use true/false table test for v2tenancy
|
||||
|
||||
func TestWriteStatus_ACL(t *testing.T) {
|
||||
type testCase struct {
|
||||
authz resolver.Result
|
||||
|
@ -371,66 +369,6 @@ func TestWriteStatus_Tenancy_Defaults(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWriteStatus_Tenancy_NotFound(t *testing.T) {
|
||||
for desc, tc := range map[string]struct {
|
||||
scope resource.Scope
|
||||
modFn func(req *pbresource.WriteStatusRequest)
|
||||
errCode codes.Code
|
||||
errContains string
|
||||
}{
|
||||
"namespaced resource provides nonexistant partition": {
|
||||
scope: resource.ScopeNamespace,
|
||||
modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Partition = "bad" },
|
||||
errCode: codes.InvalidArgument,
|
||||
errContains: "partition",
|
||||
},
|
||||
"namespaced resource provides nonexistant namespace": {
|
||||
scope: resource.ScopeNamespace,
|
||||
modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Namespace = "bad" },
|
||||
errCode: codes.InvalidArgument,
|
||||
errContains: "namespace",
|
||||
},
|
||||
"partitioned resource provides nonexistant partition": {
|
||||
scope: resource.ScopePartition,
|
||||
modFn: func(req *pbresource.WriteStatusRequest) { req.Id.Tenancy.Partition = "bad" },
|
||||
errCode: codes.InvalidArgument,
|
||||
errContains: "partition",
|
||||
},
|
||||
} {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(true).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
// Pick resource based on scope of type in testcase.
|
||||
var res *pbresource.Resource
|
||||
var err error
|
||||
switch tc.scope {
|
||||
case resource.ScopeNamespace:
|
||||
res, err = demo.GenerateV2Artist()
|
||||
case resource.ScopePartition:
|
||||
res, err = demo.GenerateV1RecordLabel("looney-tunes")
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Fill in required fields so validation continues until tenancy is checked
|
||||
req := validWriteStatusRequest(t, res)
|
||||
req.Id.Uid = ulid.Make().String()
|
||||
req.Status.ObservedGeneration = ulid.Make().String()
|
||||
|
||||
// Write status with tenancy modded by testcase.
|
||||
tc.modFn(req)
|
||||
_, err = client.WriteStatus(testContext(t), req)
|
||||
|
||||
// Verify non-existant tenancy field is the cause of the error.
|
||||
require.Error(t, err)
|
||||
require.Equal(t, tc.errCode.String(), status.Code(err).String())
|
||||
require.Contains(t, err.Error(), tc.errContains)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteStatus_CASFailure(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
|
|
|
@ -4,16 +4,13 @@
|
|||
package resource_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/hashicorp/consul/acl/resolver"
|
||||
svc "github.com/hashicorp/consul/agent/grpc-external/services/resource"
|
||||
|
@ -22,14 +19,11 @@ import (
|
|||
"github.com/hashicorp/consul/internal/resource/demo"
|
||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
pbdemo "github.com/hashicorp/consul/proto/private/pbdemo/v1"
|
||||
pbdemov1 "github.com/hashicorp/consul/proto/private/pbdemo/v1"
|
||||
pbdemov2 "github.com/hashicorp/consul/proto/private/pbdemo/v2"
|
||||
"github.com/hashicorp/consul/proto/private/prototest"
|
||||
)
|
||||
|
||||
// TODO: Update all tests to use true/false table test for v2tenancy
|
||||
|
||||
func TestWrite_InputValidation(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
|
@ -186,46 +180,6 @@ func TestWrite_Create_Success(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestWrite_Create_Tenancy_NotFound(t *testing.T) {
|
||||
for desc, tc := range mavOrWriteTenancyNotFoundTestCases(t) {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(true).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
recordLabel, err := demo.GenerateV1RecordLabel("looney-tunes")
|
||||
require.NoError(t, err)
|
||||
|
||||
artist, err := demo.GenerateV2Artist()
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = client.Write(testContext(t), &pbresource.WriteRequest{Resource: tc.modFn(artist, recordLabel)})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
|
||||
require.Contains(t, err.Error(), tc.errContains)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrite_Create_With_DeletionTimestamp_Fails(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(true).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
res := rtest.Resource(demo.TypeV1Artist, "blur").
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||
WithData(t, &pbdemov1.Artist{Name: "Blur"}).
|
||||
WithMeta(resource.DeletionTimestampKey, time.Now().Format(time.RFC3339)).
|
||||
Build()
|
||||
|
||||
_, err := client.Write(testContext(t), &pbresource.WriteRequest{Resource: res})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
|
||||
require.Contains(t, err.Error(), resource.DeletionTimestampKey)
|
||||
}
|
||||
|
||||
func TestWrite_Create_With_TenancyMarkedForDeletion_Fails(t *testing.T) {
|
||||
for desc, tc := range mavOrWriteTenancyMarkedForDeletionTestCases(t) {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
|
@ -690,239 +644,3 @@ func TestEnsureFinalizerRemoved(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrite_ResourceFrozenAfterMarkedForDeletion(t *testing.T) {
|
||||
type testCase struct {
|
||||
modFn func(res *pbresource.Resource)
|
||||
errContains string
|
||||
}
|
||||
testCases := map[string]testCase{
|
||||
"no-op write rejected": {
|
||||
modFn: func(res *pbresource.Resource) {},
|
||||
errContains: "cannot no-op write resource marked for deletion",
|
||||
},
|
||||
"remove one finalizer": {
|
||||
modFn: func(res *pbresource.Resource) {
|
||||
resource.RemoveFinalizer(res, "finalizer1")
|
||||
},
|
||||
},
|
||||
"remove all finalizers": {
|
||||
modFn: func(res *pbresource.Resource) {
|
||||
resource.RemoveFinalizer(res, "finalizer1")
|
||||
resource.RemoveFinalizer(res, "finalizer2")
|
||||
},
|
||||
},
|
||||
"adding finalizer fails": {
|
||||
modFn: func(res *pbresource.Resource) {
|
||||
resource.AddFinalizer(res, "finalizer3")
|
||||
},
|
||||
errContains: "expected at least one finalizer to be removed",
|
||||
},
|
||||
"remove deletionTimestamp fails": {
|
||||
modFn: func(res *pbresource.Resource) {
|
||||
delete(res.Metadata, resource.DeletionTimestampKey)
|
||||
},
|
||||
errContains: "cannot remove deletionTimestamp",
|
||||
},
|
||||
"modify deletionTimestamp fails": {
|
||||
modFn: func(res *pbresource.Resource) {
|
||||
res.Metadata[resource.DeletionTimestampKey] = "bad"
|
||||
},
|
||||
errContains: "cannot modify deletionTimestamp",
|
||||
},
|
||||
"modify data fails": {
|
||||
modFn: func(res *pbresource.Resource) {
|
||||
var err error
|
||||
res.Data, err = anypb.New(&pbdemo.Artist{Name: "New Order"})
|
||||
require.NoError(t, err)
|
||||
},
|
||||
errContains: "cannot modify data",
|
||||
},
|
||||
}
|
||||
|
||||
for desc, tc := range testCases {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(true).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
// Create a resource with finalizers
|
||||
res := rtest.Resource(demo.TypeV1Artist, "joydivision").
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||
WithData(t, &pbdemo.Artist{Name: "Joy Division"}).
|
||||
WithMeta(resource.FinalizerKey, "finalizer1 finalizer2").
|
||||
Write(t, client)
|
||||
|
||||
// Mark for deletion - resource should now be frozen
|
||||
_, err := client.Delete(context.Background(), &pbresource.DeleteRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify marked for deletion
|
||||
rsp, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
require.True(t, resource.IsMarkedForDeletion(rsp.Resource))
|
||||
|
||||
// Apply test case mods
|
||||
tc.modFn(rsp.Resource)
|
||||
|
||||
// Verify write results
|
||||
_, err = client.Write(context.Background(), &pbresource.WriteRequest{Resource: rsp.Resource})
|
||||
if tc.errContains == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
require.Equal(t, codes.InvalidArgument.String(), status.Code(err).String())
|
||||
require.ErrorContains(t, err, tc.errContains)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrite_NonCASWritePreservesFinalizers(t *testing.T) {
|
||||
type testCase struct {
|
||||
existingMeta map[string]string
|
||||
inputMeta map[string]string
|
||||
expectedMeta map[string]string
|
||||
}
|
||||
testCases := map[string]testCase{
|
||||
"input nil metadata preserves existing finalizers": {
|
||||
inputMeta: nil,
|
||||
existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"},
|
||||
expectedMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"},
|
||||
},
|
||||
"input metadata and no finalizer key preserves existing finalizers": {
|
||||
inputMeta: map[string]string{},
|
||||
existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"},
|
||||
expectedMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"},
|
||||
},
|
||||
"input metadata and with empty finalizer key overwrites existing finalizers": {
|
||||
inputMeta: map[string]string{resource.FinalizerKey: ""},
|
||||
existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"},
|
||||
expectedMeta: map[string]string{resource.FinalizerKey: ""},
|
||||
},
|
||||
"input metadata with one finalizer key overwrites multiple existing finalizers": {
|
||||
inputMeta: map[string]string{resource.FinalizerKey: "finalizer2"},
|
||||
existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"},
|
||||
expectedMeta: map[string]string{resource.FinalizerKey: "finalizer2"},
|
||||
},
|
||||
}
|
||||
|
||||
for desc, tc := range testCases {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(true).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
// Create the resource based on tc.existingMetadata
|
||||
builder := rtest.Resource(demo.TypeV1Artist, "joydivision").
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||
WithData(t, &pbdemo.Artist{Name: "Joy"})
|
||||
|
||||
if tc.existingMeta != nil {
|
||||
for k, v := range tc.existingMeta {
|
||||
builder.WithMeta(k, v)
|
||||
}
|
||||
}
|
||||
res := builder.Write(t, client)
|
||||
|
||||
// Build resource for user write based on tc.inputMetadata
|
||||
builder = rtest.Resource(demo.TypeV1Artist, res.Id.Name).
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||
WithData(t, &pbdemo.Artist{Name: "Joy Division"})
|
||||
|
||||
if tc.inputMeta != nil {
|
||||
for k, v := range tc.inputMeta {
|
||||
builder.WithMeta(k, v)
|
||||
}
|
||||
}
|
||||
userRes := builder.Build()
|
||||
|
||||
// Perform the user write
|
||||
rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: userRes})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify write result preserved metadata based on testcase.expecteMetadata
|
||||
for k := range tc.expectedMeta {
|
||||
require.Equal(t, tc.expectedMeta[k], rsp.Resource.Metadata[k])
|
||||
}
|
||||
require.Equal(t, len(tc.expectedMeta), len(rsp.Resource.Metadata))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestWrite_NonCASWritePreservesDeletionTimestamp(t *testing.T) {
|
||||
type testCase struct {
|
||||
existingMeta map[string]string
|
||||
inputMeta map[string]string
|
||||
expectedMeta map[string]string
|
||||
}
|
||||
|
||||
// deletionTimestamp has to be generated via Delete() call and can't be embedded in testdata
|
||||
// even though testcase desc refers to it.
|
||||
testCases := map[string]testCase{
|
||||
"input metadata no deletion timestamp preserves existing deletion timestamp and removes single finalizer": {
|
||||
inputMeta: map[string]string{resource.FinalizerKey: "finalizer1"},
|
||||
existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"},
|
||||
expectedMeta: map[string]string{resource.FinalizerKey: "finalizer1"},
|
||||
},
|
||||
"input metadata no deletion timestamp preserves existing deletion timestamp and removes all finalizers": {
|
||||
inputMeta: map[string]string{resource.FinalizerKey: ""},
|
||||
existingMeta: map[string]string{resource.FinalizerKey: "finalizer1 finalizer2"},
|
||||
expectedMeta: map[string]string{resource.FinalizerKey: ""},
|
||||
},
|
||||
}
|
||||
|
||||
for desc, tc := range testCases {
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
client := svctest.NewResourceServiceBuilder().
|
||||
WithV2Tenancy(true).
|
||||
WithRegisterFns(demo.RegisterTypes).
|
||||
Run(t)
|
||||
|
||||
// Create the resource based on tc.existingMetadata
|
||||
builder := rtest.Resource(demo.TypeV1Artist, "joydivision").
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||
WithData(t, &pbdemo.Artist{Name: "Joy Division"})
|
||||
|
||||
if tc.existingMeta != nil {
|
||||
for k, v := range tc.existingMeta {
|
||||
builder.WithMeta(k, v)
|
||||
}
|
||||
}
|
||||
res := builder.Write(t, client)
|
||||
|
||||
// Mark for deletion
|
||||
_, err := client.Delete(context.Background(), &pbresource.DeleteRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Re-read the deleted res for future comparison of deletionTimestamp
|
||||
delRsp, err := client.Read(context.Background(), &pbresource.ReadRequest{Id: res.Id})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Build resource for user write based on tc.inputMetadata
|
||||
builder = rtest.Resource(demo.TypeV1Artist, res.Id.Name).
|
||||
WithTenancy(resource.DefaultNamespacedTenancy()).
|
||||
WithData(t, &pbdemo.Artist{Name: "Joy Division"})
|
||||
|
||||
if tc.inputMeta != nil {
|
||||
for k, v := range tc.inputMeta {
|
||||
builder.WithMeta(k, v)
|
||||
}
|
||||
}
|
||||
userRes := builder.Build()
|
||||
|
||||
// Perform the non-CAS user write
|
||||
rsp, err := client.Write(context.Background(), &pbresource.WriteRequest{Resource: userRes})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify write result preserved metadata based on testcase.expectedMetadata
|
||||
for k := range tc.expectedMeta {
|
||||
require.Equal(t, tc.expectedMeta[k], rsp.Resource.Metadata[k])
|
||||
}
|
||||
// Verify deletion timestamp preserved even though it wasn't passed in to the write
|
||||
require.Equal(t, delRsp.Resource.Metadata[resource.DeletionTimestampKey], rsp.Resource.Metadata[resource.DeletionTimestampKey])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,10 +15,11 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/serf/coordinate"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
|
@ -27,25 +28,6 @@ import (
|
|||
"github.com/hashicorp/consul/types"
|
||||
)
|
||||
|
||||
func TestHealthEndpointsFailInV2(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
a := NewTestAgent(t, `experiments = ["resource-apis"]`)
|
||||
|
||||
checkRequest := func(method, url string) {
|
||||
t.Run(method+" "+url, func(t *testing.T) {
|
||||
assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, "{}")
|
||||
})
|
||||
}
|
||||
|
||||
checkRequest("GET", "/v1/health/node/web")
|
||||
checkRequest("GET", "/v1/health/checks/web")
|
||||
checkRequest("GET", "/v1/health/state/web")
|
||||
checkRequest("GET", "/v1/health/service/web")
|
||||
checkRequest("GET", "/v1/health/connect/web")
|
||||
checkRequest("GET", "/v1/health/ingress/web")
|
||||
}
|
||||
|
||||
func TestHealthChecksInState(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
|
|
@ -22,12 +22,13 @@ import (
|
|||
"github.com/NYTimes/gziphandler"
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/armon/go-metrics/prometheus"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
"github.com/hashicorp/consul/agent/config"
|
||||
|
@ -397,11 +398,6 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc
|
|||
}
|
||||
logURL = aclEndpointRE.ReplaceAllString(logURL, "$1<hidden>$4")
|
||||
|
||||
rejectCatalogV1Endpoint := false
|
||||
if s.agent.baseDeps.UseV2Resources() {
|
||||
rejectCatalogV1Endpoint = isV1CatalogRequest(req.URL.Path)
|
||||
}
|
||||
|
||||
if s.denylist.Block(req.URL.Path) {
|
||||
errMsg := "Endpoint is blocked by agent configuration"
|
||||
httpLogger.Error("Request error",
|
||||
|
@ -463,14 +459,6 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc
|
|||
return strings.Contains(err.Error(), rate.ErrRetryLater.Error())
|
||||
}
|
||||
|
||||
isUsingV2CatalogExperiment := func(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return structs.IsErrUsingV2CatalogExperiment(err)
|
||||
}
|
||||
|
||||
isMethodNotAllowed := func(err error) bool {
|
||||
_, ok := err.(MethodNotAllowedError)
|
||||
return ok
|
||||
|
@ -506,10 +494,6 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc
|
|||
msg = s.Message()
|
||||
}
|
||||
|
||||
if isUsingV2CatalogExperiment(err) && !isHTTPError(err) {
|
||||
err = newRejectV1RequestWhenV2EnabledError()
|
||||
}
|
||||
|
||||
switch {
|
||||
case isForbidden(err):
|
||||
resp.WriteHeader(http.StatusForbidden)
|
||||
|
@ -586,12 +570,7 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc
|
|||
|
||||
if err == nil {
|
||||
// Invoke the handler
|
||||
if rejectCatalogV1Endpoint {
|
||||
obj = nil
|
||||
err = s.rejectV1RequestWhenV2Enabled()
|
||||
} else {
|
||||
obj, err = handler(resp, req)
|
||||
}
|
||||
obj, err = handler(resp, req)
|
||||
}
|
||||
}
|
||||
contentType := "application/json"
|
||||
|
@ -633,46 +612,6 @@ func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc
|
|||
}
|
||||
}
|
||||
|
||||
func isV1CatalogRequest(logURL string) bool {
|
||||
switch {
|
||||
case strings.HasPrefix(logURL, "/v1/catalog/"),
|
||||
strings.HasPrefix(logURL, "/v1/health/"),
|
||||
strings.HasPrefix(logURL, "/v1/config/"):
|
||||
return true
|
||||
|
||||
case strings.HasPrefix(logURL, "/v1/agent/token/"),
|
||||
logURL == "/v1/agent/self",
|
||||
logURL == "/v1/agent/host",
|
||||
logURL == "/v1/agent/version",
|
||||
logURL == "/v1/agent/reload",
|
||||
logURL == "/v1/agent/monitor",
|
||||
logURL == "/v1/agent/metrics",
|
||||
logURL == "/v1/agent/metrics/stream",
|
||||
logURL == "/v1/agent/members",
|
||||
strings.HasPrefix(logURL, "/v1/agent/join/"),
|
||||
logURL == "/v1/agent/leave",
|
||||
strings.HasPrefix(logURL, "/v1/agent/force-leave/"),
|
||||
logURL == "/v1/agent/connect/authorize",
|
||||
logURL == "/v1/agent/connect/ca/roots",
|
||||
strings.HasPrefix(logURL, "/v1/agent/connect/ca/leaf/"):
|
||||
return false
|
||||
|
||||
case strings.HasPrefix(logURL, "/v1/agent/"):
|
||||
return true
|
||||
|
||||
case logURL == "/v1/internal/acl/authorize",
|
||||
logURL == "/v1/internal/service-virtual-ip",
|
||||
logURL == "/v1/internal/ui/oidc-auth-methods",
|
||||
strings.HasPrefix(logURL, "/v1/internal/ui/metrics-proxy/"):
|
||||
return false
|
||||
|
||||
case strings.HasPrefix(logURL, "/v1/internal/"):
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// marshalJSON marshals the object into JSON, respecting the user's pretty-ness
|
||||
// configuration.
|
||||
func (s *HTTPHandlers) marshalJSON(req *http.Request, obj interface{}) ([]byte, error) {
|
||||
|
@ -1149,20 +1088,6 @@ func (s *HTTPHandlers) parseToken(req *http.Request, token *string) {
|
|||
s.parseTokenWithDefault(req, token)
|
||||
}
|
||||
|
||||
func (s *HTTPHandlers) rejectV1RequestWhenV2Enabled() error {
|
||||
if s.agent.baseDeps.UseV2Resources() {
|
||||
return newRejectV1RequestWhenV2EnabledError()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newRejectV1RequestWhenV2EnabledError() error {
|
||||
return HTTPError{
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Reason: structs.ErrUsingV2CatalogExperiment.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
func sourceAddrFromRequest(req *http.Request) string {
|
||||
xff := req.Header.Get("X-Forwarded-For")
|
||||
forwardHosts := strings.Split(xff, ",")
|
||||
|
|
|
@ -230,15 +230,6 @@ func (m *Manager) generateNewLeaf(
|
|||
var ipAddresses []net.IP
|
||||
|
||||
switch {
|
||||
case req.WorkloadIdentity != "":
|
||||
id = &connect.SpiffeIDWorkloadIdentity{
|
||||
TrustDomain: roots.TrustDomain,
|
||||
Partition: req.TargetPartition(),
|
||||
Namespace: req.TargetNamespace(),
|
||||
WorkloadIdentity: req.WorkloadIdentity,
|
||||
}
|
||||
dnsNames = append(dnsNames, req.DNSSAN...)
|
||||
|
||||
case req.Service != "":
|
||||
id = &connect.SpiffeIDService{
|
||||
Host: roots.TrustDomain,
|
||||
|
@ -281,7 +272,7 @@ func (m *Manager) generateNewLeaf(
|
|||
dnsNames = append(dnsNames, connect.PeeringServerSAN(req.Datacenter, roots.TrustDomain))
|
||||
|
||||
default:
|
||||
return nil, newState, errors.New("URI must be either workload identity, service, agent, server, or kind")
|
||||
return nil, newState, errors.New("URI must be either service, agent, server, or kind")
|
||||
}
|
||||
|
||||
// Create a new private key
|
||||
|
|
|
@ -180,16 +180,10 @@ func (s *TestSigner) SignCert(ctx context.Context, req *structs.CASignRequest) (
|
|||
return nil, fmt.Errorf("error parsing CSR URI: %w", err)
|
||||
}
|
||||
|
||||
var isService bool
|
||||
var serviceID *connect.SpiffeIDService
|
||||
var workloadID *connect.SpiffeIDWorkloadIdentity
|
||||
|
||||
switch spiffeID.(type) {
|
||||
case *connect.SpiffeIDService:
|
||||
isService = true
|
||||
serviceID = spiffeID.(*connect.SpiffeIDService)
|
||||
case *connect.SpiffeIDWorkloadIdentity:
|
||||
workloadID = spiffeID.(*connect.SpiffeIDWorkloadIdentity)
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected spiffeID type %T", spiffeID)
|
||||
}
|
||||
|
@ -270,35 +264,19 @@ func (s *TestSigner) SignCert(ctx context.Context, req *structs.CASignRequest) (
|
|||
}
|
||||
|
||||
index := s.nextIndex()
|
||||
if isService {
|
||||
// Service Spiffe ID case
|
||||
return &structs.IssuedCert{
|
||||
SerialNumber: connect.EncodeSerialNumber(leafCert.SerialNumber),
|
||||
CertPEM: leafPEM,
|
||||
Service: serviceID.Service,
|
||||
ServiceURI: leafCert.URIs[0].String(),
|
||||
ValidAfter: leafCert.NotBefore,
|
||||
ValidBefore: leafCert.NotAfter,
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: index,
|
||||
ModifyIndex: index,
|
||||
},
|
||||
}, nil
|
||||
} else {
|
||||
// Workload identity Spiffe ID case
|
||||
return &structs.IssuedCert{
|
||||
SerialNumber: connect.EncodeSerialNumber(leafCert.SerialNumber),
|
||||
CertPEM: leafPEM,
|
||||
WorkloadIdentity: workloadID.WorkloadIdentity,
|
||||
WorkloadIdentityURI: leafCert.URIs[0].String(),
|
||||
ValidAfter: leafCert.NotBefore,
|
||||
ValidBefore: leafCert.NotAfter,
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: index,
|
||||
ModifyIndex: index,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
// Service Spiffe ID case
|
||||
return &structs.IssuedCert{
|
||||
SerialNumber: connect.EncodeSerialNumber(leafCert.SerialNumber),
|
||||
CertPEM: leafPEM,
|
||||
Service: serviceID.Service,
|
||||
ServiceURI: leafCert.URIs[0].String(),
|
||||
ValidAfter: leafCert.NotBefore,
|
||||
ValidBefore: leafCert.NotAfter,
|
||||
RaftIndex: structs.RaftIndex{
|
||||
CreateIndex: index,
|
||||
ModifyIndex: index,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type testRootsReader struct {
|
||||
|
|
|
@ -31,27 +31,16 @@ type ConnectCALeafRequest struct {
|
|||
|
||||
// The following flags indicate the entity we are requesting a cert for.
|
||||
// Only one of these must be specified.
|
||||
WorkloadIdentity string // Given a WorkloadIdentity name, the request is for a SpiffeIDWorkload.
|
||||
Service string // Given a Service name, not ID, the request is for a SpiffeIDService.
|
||||
Agent string // Given an Agent name, not ID, the request is for a SpiffeIDAgent.
|
||||
Kind structs.ServiceKind // Given "mesh-gateway", the request is for a SpiffeIDMeshGateway. No other kinds supported.
|
||||
Server bool // If true, the request is for a SpiffeIDServer.
|
||||
Service string // Given a Service name, not ID, the request is for a SpiffeIDService.
|
||||
Agent string // Given an Agent name, not ID, the request is for a SpiffeIDAgent.
|
||||
Kind structs.ServiceKind // Given "mesh-gateway", the request is for a SpiffeIDMeshGateway. No other kinds supported.
|
||||
Server bool // If true, the request is for a SpiffeIDServer.
|
||||
}
|
||||
|
||||
func (r *ConnectCALeafRequest) Key() string {
|
||||
r.EnterpriseMeta.Normalize()
|
||||
|
||||
switch {
|
||||
case r.WorkloadIdentity != "":
|
||||
v, err := hashstructure.Hash([]any{
|
||||
r.WorkloadIdentity,
|
||||
r.EnterpriseMeta,
|
||||
r.DNSSAN,
|
||||
r.IPSAN,
|
||||
}, nil)
|
||||
if err == nil {
|
||||
return fmt.Sprintf("workloadidentity:%d", v)
|
||||
}
|
||||
case r.Agent != "":
|
||||
v, err := hashstructure.Hash([]any{
|
||||
r.Agent,
|
||||
|
|
|
@ -17,8 +17,6 @@ import (
|
|||
"github.com/hashicorp/consul/agent/local"
|
||||
"github.com/hashicorp/consul/agent/proxycfg"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
const source proxycfg.ProxySource = "catalog"
|
||||
|
@ -53,13 +51,11 @@ func NewConfigSource(cfg Config) *ConfigSource {
|
|||
|
||||
// Watch wraps the underlying proxycfg.Manager and dynamically registers
|
||||
// services from the catalog with it when requested by the xDS server.
|
||||
func (m *ConfigSource) Watch(id *pbresource.ID, nodeName string, token string) (<-chan proxysnapshot.ProxySnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, proxysnapshot.CancelFunc, error) {
|
||||
// Create service ID
|
||||
serviceID := structs.NewServiceID(id.Name, GetEnterpriseMetaFromResourceID(id))
|
||||
func (m *ConfigSource) Watch(serviceID structs.ServiceID, nodeName string, token string) (<-chan *proxycfg.ConfigSnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, context.CancelFunc, error) {
|
||||
// If the service is registered to the local agent, use the LocalConfigSource
|
||||
// rather than trying to configure it from the catalog.
|
||||
if nodeName == m.NodeName && m.LocalState.ServiceExists(serviceID) {
|
||||
return m.LocalConfigSource.Watch(id, nodeName, token)
|
||||
return m.LocalConfigSource.Watch(serviceID, nodeName, token)
|
||||
}
|
||||
|
||||
// Begin a session with the xDS session concurrency limiter.
|
||||
|
@ -290,7 +286,7 @@ type Config struct {
|
|||
|
||||
//go:generate mockery --name ConfigManager --inpackage
|
||||
type ConfigManager interface {
|
||||
Watch(req proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc)
|
||||
Watch(req proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, context.CancelFunc)
|
||||
Register(proxyID proxycfg.ProxyID, service *structs.NodeService, source proxycfg.ProxySource, token string, overwrite bool) error
|
||||
Deregister(proxyID proxycfg.ProxyID, source proxycfg.ProxySource)
|
||||
}
|
||||
|
@ -303,7 +299,7 @@ type Store interface {
|
|||
|
||||
//go:generate mockery --name Watcher --inpackage
|
||||
type Watcher interface {
|
||||
Watch(proxyID *pbresource.ID, nodeName string, token string) (<-chan proxysnapshot.ProxySnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, proxysnapshot.CancelFunc, error)
|
||||
Watch(proxyID structs.ServiceID, nodeName string, token string) (<-chan *proxycfg.ConfigSnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, context.CancelFunc, error)
|
||||
}
|
||||
|
||||
//go:generate mockery --name SessionLimiter --inpackage
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
//go:build !consulent
|
||||
|
||||
package catalog
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
func GetEnterpriseMetaFromResourceID(id *pbresource.ID) *acl.EnterpriseMeta {
|
||||
return acl.DefaultEnterpriseMeta()
|
||||
}
|
|
@ -10,10 +10,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
"github.com/hashicorp/consul/agent/consul/state"
|
||||
"github.com/hashicorp/consul/agent/consul/stream"
|
||||
"github.com/hashicorp/consul/agent/grpc-external/limiter"
|
||||
|
@ -21,9 +22,6 @@ import (
|
|||
"github.com/hashicorp/consul/agent/proxycfg"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/agent/token"
|
||||
proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot"
|
||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
|
||||
)
|
||||
|
||||
func TestConfigSource_Success(t *testing.T) {
|
||||
|
@ -80,15 +78,15 @@ func TestConfigSource_Success(t *testing.T) {
|
|||
})
|
||||
t.Cleanup(mgr.Shutdown)
|
||||
|
||||
snapCh, termCh, _, cancelWatch1, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token)
|
||||
snapCh, termCh, _, cancelWatch1, err := mgr.Watch(serviceID, nodeName, token)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, session1TermCh, termCh)
|
||||
|
||||
// Expect Register to have been called with the proxy's inital port.
|
||||
select {
|
||||
case snap := <-snapCh:
|
||||
require.Equal(t, 9999, snap.(*proxycfg.ConfigSnapshot).Port)
|
||||
require.Equal(t, token, snap.(*proxycfg.ConfigSnapshot).ProxyID.Token)
|
||||
require.Equal(t, 9999, snap.Port)
|
||||
require.Equal(t, token, snap.ProxyID.Token)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("timeout waiting for snapshot")
|
||||
}
|
||||
|
@ -112,7 +110,7 @@ func TestConfigSource_Success(t *testing.T) {
|
|||
// Expect Register to have been called again with the proxy's new port.
|
||||
select {
|
||||
case snap := <-snapCh:
|
||||
require.Equal(t, 8888, snap.(*proxycfg.ConfigSnapshot).Port)
|
||||
require.Equal(t, 8888, snap.Port)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("timeout waiting for snapshot")
|
||||
}
|
||||
|
@ -131,13 +129,13 @@ func TestConfigSource_Success(t *testing.T) {
|
|||
require.Equal(t, map[string]any{
|
||||
"local_connect_timeout_ms": 123,
|
||||
"max_inbound_connections": 321,
|
||||
}, snap.(*proxycfg.ConfigSnapshot).Proxy.Config)
|
||||
}, snap.Proxy.Config)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("timeout waiting for snapshot")
|
||||
}
|
||||
|
||||
// Start another watch.
|
||||
_, termCh2, _, cancelWatch2, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token)
|
||||
_, termCh2, _, cancelWatch2, err := mgr.Watch(serviceID, nodeName, token)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, session2TermCh, termCh2)
|
||||
|
||||
|
@ -171,7 +169,7 @@ func TestConfigSource_Success(t *testing.T) {
|
|||
|
||||
func TestConfigSource_LocallyManagedService(t *testing.T) {
|
||||
serviceID := structs.NewServiceID("web-sidecar-proxy-1", nil)
|
||||
proxyID := rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID()
|
||||
proxyID := serviceID
|
||||
nodeName := "node-1"
|
||||
token := "token"
|
||||
|
||||
|
@ -180,7 +178,7 @@ func TestConfigSource_LocallyManagedService(t *testing.T) {
|
|||
|
||||
localWatcher := NewMockWatcher(t)
|
||||
localWatcher.On("Watch", proxyID, nodeName, token).
|
||||
Return(make(<-chan proxysnapshot.ProxySnapshot), nil, nil, proxysnapshot.CancelFunc(func() {}), nil)
|
||||
Return(make(<-chan *proxycfg.ConfigSnapshot), nil, nil, context.CancelFunc(func() {}), nil)
|
||||
|
||||
mgr := NewConfigSource(Config{
|
||||
NodeName: nodeName,
|
||||
|
@ -214,12 +212,12 @@ func TestConfigSource_ErrorRegisteringService(t *testing.T) {
|
|||
}))
|
||||
|
||||
var canceledWatch bool
|
||||
cancel := proxysnapshot.CancelFunc(func() { canceledWatch = true })
|
||||
cancel := context.CancelFunc(func() { canceledWatch = true })
|
||||
|
||||
cfgMgr := NewMockConfigManager(t)
|
||||
|
||||
cfgMgr.On("Watch", mock.Anything).
|
||||
Return(make(<-chan proxysnapshot.ProxySnapshot), cancel)
|
||||
Return(make(<-chan *proxycfg.ConfigSnapshot), cancel)
|
||||
|
||||
cfgMgr.On("Register", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).
|
||||
Return(errors.New("KABOOM"))
|
||||
|
@ -239,7 +237,7 @@ func TestConfigSource_ErrorRegisteringService(t *testing.T) {
|
|||
})
|
||||
t.Cleanup(mgr.Shutdown)
|
||||
|
||||
_, _, _, _, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token)
|
||||
_, _, _, _, err := mgr.Watch(serviceID, nodeName, token)
|
||||
require.Error(t, err)
|
||||
require.True(t, canceledWatch, "watch should've been canceled")
|
||||
|
||||
|
@ -276,9 +274,9 @@ func TestConfigSource_ErrorInSyncLoop(t *testing.T) {
|
|||
NodeName: nodeName,
|
||||
Token: token,
|
||||
}
|
||||
snapCh := make(chan proxysnapshot.ProxySnapshot, 1)
|
||||
snapCh := make(chan *proxycfg.ConfigSnapshot, 1)
|
||||
cfgMgr.On("Watch", proxyID).
|
||||
Return((<-chan proxysnapshot.ProxySnapshot)(snapCh), proxysnapshot.CancelFunc(func() {}), nil)
|
||||
Return((<-chan *proxycfg.ConfigSnapshot)(snapCh), context.CancelFunc(func() {}), nil)
|
||||
|
||||
// Answer the register call successfully for session 1 starting (Repeatability = 1).
|
||||
// Session 2 should not have caused a re-register to happen.
|
||||
|
@ -330,21 +328,21 @@ func TestConfigSource_ErrorInSyncLoop(t *testing.T) {
|
|||
})
|
||||
t.Cleanup(mgr.Shutdown)
|
||||
|
||||
snapCh, termCh, cfgSrcTerminated1, cancelWatch1, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token)
|
||||
snapCh, termCh, cfgSrcTerminated1, cancelWatch1, err := mgr.Watch(serviceID, nodeName, token)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, session1TermCh, termCh)
|
||||
|
||||
// Expect Register to have been called with the proxy's inital port.
|
||||
select {
|
||||
case snap := <-snapCh:
|
||||
require.Equal(t, 9999, snap.(*proxycfg.ConfigSnapshot).Port)
|
||||
require.Equal(t, token, snap.(*proxycfg.ConfigSnapshot).ProxyID.Token)
|
||||
require.Equal(t, 9999, snap.Port)
|
||||
require.Equal(t, token, snap.ProxyID.Token)
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
t.Fatal("timeout waiting for snapshot")
|
||||
}
|
||||
|
||||
// Start another watch.
|
||||
_, termCh2, cfgSrcTerminated2, cancelWatch2, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token)
|
||||
_, termCh2, cfgSrcTerminated2, cancelWatch2, err := mgr.Watch(serviceID, nodeName, token)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, session2TermCh, termCh2)
|
||||
|
||||
|
@ -424,12 +422,12 @@ func TestConfigSource_NotProxyService(t *testing.T) {
|
|||
}))
|
||||
|
||||
var canceledWatch bool
|
||||
cancel := proxysnapshot.CancelFunc(func() { canceledWatch = true })
|
||||
cancel := context.CancelFunc(func() { canceledWatch = true })
|
||||
|
||||
cfgMgr := NewMockConfigManager(t)
|
||||
|
||||
cfgMgr.On("Watch", mock.Anything).
|
||||
Return(make(<-chan proxysnapshot.ProxySnapshot), cancel)
|
||||
Return(make(<-chan *proxycfg.ConfigSnapshot), cancel)
|
||||
|
||||
mgr := NewConfigSource(Config{
|
||||
Manager: cfgMgr,
|
||||
|
@ -440,7 +438,7 @@ func TestConfigSource_NotProxyService(t *testing.T) {
|
|||
})
|
||||
t.Cleanup(mgr.Shutdown)
|
||||
|
||||
_, _, _, _, err := mgr.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, serviceID.ID).ID(), nodeName, token)
|
||||
_, _, _, _, err := mgr.Watch(serviceID, nodeName, token)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "must be a sidecar proxy or gateway")
|
||||
require.True(t, canceledWatch, "watch should've been canceled")
|
||||
|
@ -457,7 +455,7 @@ func TestConfigSource_SessionLimiterError(t *testing.T) {
|
|||
t.Cleanup(src.Shutdown)
|
||||
|
||||
_, _, _, _, err := src.Watch(
|
||||
rtest.Resource(pbmesh.ProxyConfigurationType, "web-sidecar-proxy-1").ID(),
|
||||
structs.NewServiceID("web-sidecar-proxy-1", nil),
|
||||
"node-name",
|
||||
"token",
|
||||
)
|
||||
|
@ -475,9 +473,9 @@ func testConfigManager(t *testing.T, serviceID structs.ServiceID, nodeName strin
|
|||
Token: token,
|
||||
}
|
||||
|
||||
snapCh := make(chan proxysnapshot.ProxySnapshot, 1)
|
||||
snapCh := make(chan *proxycfg.ConfigSnapshot, 1)
|
||||
cfgMgr.On("Watch", proxyID).
|
||||
Return((<-chan proxysnapshot.ProxySnapshot)(snapCh), proxysnapshot.CancelFunc(func() {}), nil)
|
||||
Return((<-chan *proxycfg.ConfigSnapshot)(snapCh), context.CancelFunc(func() {}), nil)
|
||||
|
||||
cfgMgr.On("Register", mock.Anything, mock.Anything, source, token, false).
|
||||
Run(func(args mock.Arguments) {
|
||||
|
|
|
@ -5,8 +5,8 @@ package catalog
|
|||
import (
|
||||
proxycfg "github.com/hashicorp/consul/agent/proxycfg"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
"context"
|
||||
|
||||
proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot"
|
||||
|
||||
structs "github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
@ -36,27 +36,27 @@ func (_m *MockConfigManager) Register(proxyID proxycfg.ProxyID, service *structs
|
|||
}
|
||||
|
||||
// Watch provides a mock function with given fields: req
|
||||
func (_m *MockConfigManager) Watch(req proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc) {
|
||||
func (_m *MockConfigManager) Watch(req proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, context.CancelFunc) {
|
||||
ret := _m.Called(req)
|
||||
|
||||
var r0 <-chan proxysnapshot.ProxySnapshot
|
||||
var r1 proxysnapshot.CancelFunc
|
||||
if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc)); ok {
|
||||
var r0 <-chan *proxycfg.ConfigSnapshot
|
||||
var r1 context.CancelFunc
|
||||
if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, context.CancelFunc)); ok {
|
||||
return rf(req)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) <-chan proxysnapshot.ProxySnapshot); ok {
|
||||
if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) <-chan *proxycfg.ConfigSnapshot); ok {
|
||||
r0 = rf(req)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(<-chan proxysnapshot.ProxySnapshot)
|
||||
r0 = ret.Get(0).(<-chan *proxycfg.ConfigSnapshot)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(proxycfg.ProxyID) proxysnapshot.CancelFunc); ok {
|
||||
if rf, ok := ret.Get(1).(func(proxycfg.ProxyID) context.CancelFunc); ok {
|
||||
r1 = rf(req)
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).(proxysnapshot.CancelFunc)
|
||||
r1 = ret.Get(1).(context.CancelFunc)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5,12 +5,9 @@ package catalog
|
|||
import (
|
||||
limiter "github.com/hashicorp/consul/agent/grpc-external/limiter"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
pbresource "github.com/hashicorp/consul/proto-public/pbresource"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
proxycfg "github.com/hashicorp/consul/agent/proxycfg"
|
||||
|
||||
proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot"
|
||||
"context"
|
||||
)
|
||||
|
||||
// MockWatcher is an autogenerated mock type for the Watcher type
|
||||
|
@ -19,26 +16,26 @@ type MockWatcher struct {
|
|||
}
|
||||
|
||||
// Watch provides a mock function with given fields: proxyID, nodeName, token
|
||||
func (_m *MockWatcher) Watch(proxyID *pbresource.ID, nodeName string, token string) (<-chan proxysnapshot.ProxySnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, proxysnapshot.CancelFunc, error) {
|
||||
func (_m *MockWatcher) Watch(proxyID structs.ServiceID, nodeName string, token string) (<-chan *proxycfg.ConfigSnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, context.CancelFunc, error) {
|
||||
ret := _m.Called(proxyID, nodeName, token)
|
||||
|
||||
var r0 <-chan proxysnapshot.ProxySnapshot
|
||||
var r0 <-chan *proxycfg.ConfigSnapshot
|
||||
var r1 limiter.SessionTerminatedChan
|
||||
var r2 proxycfg.SrcTerminatedChan
|
||||
var r3 proxysnapshot.CancelFunc
|
||||
var r3 context.CancelFunc
|
||||
var r4 error
|
||||
if rf, ok := ret.Get(0).(func(*pbresource.ID, string, string) (<-chan proxysnapshot.ProxySnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, proxysnapshot.CancelFunc, error)); ok {
|
||||
if rf, ok := ret.Get(0).(func(structs.ServiceID, string, string) (<-chan *proxycfg.ConfigSnapshot, limiter.SessionTerminatedChan, proxycfg.SrcTerminatedChan, context.CancelFunc, error)); ok {
|
||||
return rf(proxyID, nodeName, token)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(*pbresource.ID, string, string) <-chan proxysnapshot.ProxySnapshot); ok {
|
||||
if rf, ok := ret.Get(0).(func(structs.ServiceID, string, string) <-chan *proxycfg.ConfigSnapshot); ok {
|
||||
r0 = rf(proxyID, nodeName, token)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(<-chan proxysnapshot.ProxySnapshot)
|
||||
r0 = ret.Get(0).(<-chan *proxycfg.ConfigSnapshot)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(*pbresource.ID, string, string) limiter.SessionTerminatedChan); ok {
|
||||
if rf, ok := ret.Get(1).(func(structs.ServiceID, string, string) limiter.SessionTerminatedChan); ok {
|
||||
r1 = rf(proxyID, nodeName, token)
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
|
@ -46,7 +43,7 @@ func (_m *MockWatcher) Watch(proxyID *pbresource.ID, nodeName string, token stri
|
|||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(2).(func(*pbresource.ID, string, string) proxycfg.SrcTerminatedChan); ok {
|
||||
if rf, ok := ret.Get(2).(func(structs.ServiceID, string, string) proxycfg.SrcTerminatedChan); ok {
|
||||
r2 = rf(proxyID, nodeName, token)
|
||||
} else {
|
||||
if ret.Get(2) != nil {
|
||||
|
@ -54,15 +51,15 @@ func (_m *MockWatcher) Watch(proxyID *pbresource.ID, nodeName string, token stri
|
|||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(3).(func(*pbresource.ID, string, string) proxysnapshot.CancelFunc); ok {
|
||||
if rf, ok := ret.Get(3).(func(structs.ServiceID, string, string) context.CancelFunc); ok {
|
||||
r3 = rf(proxyID, nodeName, token)
|
||||
} else {
|
||||
if ret.Get(3) != nil {
|
||||
r3 = ret.Get(3).(proxysnapshot.CancelFunc)
|
||||
r3 = ret.Get(3).(context.CancelFunc)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(4).(func(*pbresource.ID, string, string) error); ok {
|
||||
if rf, ok := ret.Get(4).(func(structs.ServiceID, string, string) error); ok {
|
||||
r4 = rf(proxyID, nodeName, token)
|
||||
} else {
|
||||
r4 = ret.Error(4)
|
||||
|
|
|
@ -4,12 +4,11 @@
|
|||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/consul/agent/grpc-external/limiter"
|
||||
"github.com/hashicorp/consul/agent/proxycfg"
|
||||
"github.com/hashicorp/consul/agent/proxycfg-sources/catalog"
|
||||
structs "github.com/hashicorp/consul/agent/structs"
|
||||
proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
)
|
||||
|
||||
// ConfigSource wraps a proxycfg.Manager to create watches on services
|
||||
|
@ -23,14 +22,13 @@ func NewConfigSource(cfgMgr ConfigManager) *ConfigSource {
|
|||
return &ConfigSource{cfgMgr}
|
||||
}
|
||||
|
||||
func (m *ConfigSource) Watch(proxyID *pbresource.ID, nodeName string, _ string) (
|
||||
<-chan proxysnapshot.ProxySnapshot,
|
||||
func (m *ConfigSource) Watch(serviceID structs.ServiceID, nodeName string, _ string) (
|
||||
<-chan *proxycfg.ConfigSnapshot,
|
||||
limiter.SessionTerminatedChan,
|
||||
proxycfg.SrcTerminatedChan,
|
||||
proxysnapshot.CancelFunc,
|
||||
context.CancelFunc,
|
||||
error,
|
||||
) {
|
||||
serviceID := structs.NewServiceID(proxyID.Name, catalog.GetEnterpriseMetaFromResourceID(proxyID))
|
||||
watchCh, cancelWatch := m.manager.Watch(proxycfg.ProxyID{
|
||||
ServiceID: serviceID,
|
||||
NodeName: nodeName,
|
||||
|
|
|
@ -5,8 +5,8 @@ package local
|
|||
import (
|
||||
proxycfg "github.com/hashicorp/consul/agent/proxycfg"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
"context"
|
||||
|
||||
proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot"
|
||||
|
||||
structs "github.com/hashicorp/consul/agent/structs"
|
||||
)
|
||||
|
@ -52,27 +52,27 @@ func (_m *MockConfigManager) RegisteredProxies(source proxycfg.ProxySource) []pr
|
|||
}
|
||||
|
||||
// Watch provides a mock function with given fields: id
|
||||
func (_m *MockConfigManager) Watch(id proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc) {
|
||||
func (_m *MockConfigManager) Watch(id proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, context.CancelFunc) {
|
||||
ret := _m.Called(id)
|
||||
|
||||
var r0 <-chan proxysnapshot.ProxySnapshot
|
||||
var r1 proxysnapshot.CancelFunc
|
||||
if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc)); ok {
|
||||
var r0 <-chan *proxycfg.ConfigSnapshot
|
||||
var r1 context.CancelFunc
|
||||
if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, context.CancelFunc)); ok {
|
||||
return rf(id)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) <-chan proxysnapshot.ProxySnapshot); ok {
|
||||
if rf, ok := ret.Get(0).(func(proxycfg.ProxyID) <-chan *proxycfg.ConfigSnapshot); ok {
|
||||
r0 = rf(id)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(<-chan proxysnapshot.ProxySnapshot)
|
||||
r0 = ret.Get(0).(<-chan *proxycfg.ConfigSnapshot)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(proxycfg.ProxyID) proxysnapshot.CancelFunc); ok {
|
||||
if rf, ok := ret.Get(1).(func(proxycfg.ProxyID) context.CancelFunc); ok {
|
||||
r1 = rf(id)
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
r1 = ret.Get(1).(proxysnapshot.CancelFunc)
|
||||
r1 = ret.Get(1).(context.CancelFunc)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -7,8 +7,6 @@ import (
|
|||
"context"
|
||||
"time"
|
||||
|
||||
proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
"github.com/hashicorp/consul/agent/local"
|
||||
|
@ -148,7 +146,7 @@ func sync(cfg SyncConfig) {
|
|||
|
||||
//go:generate mockery --name ConfigManager --inpackage
|
||||
type ConfigManager interface {
|
||||
Watch(id proxycfg.ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc)
|
||||
Watch(id proxycfg.ProxyID) (<-chan *proxycfg.ConfigSnapshot, context.CancelFunc)
|
||||
Register(proxyID proxycfg.ProxyID, service *structs.NodeService, source proxycfg.ProxySource, token string, overwrite bool) error
|
||||
Deregister(proxyID proxycfg.ProxyID, source proxycfg.ProxySource)
|
||||
RegisteredProxies(source proxycfg.ProxySource) []proxycfg.ProxyID
|
||||
|
|
|
@ -4,17 +4,17 @@
|
|||
package proxycfg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/consul/lib/channels"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot"
|
||||
"github.com/hashicorp/consul/lib/channels"
|
||||
"github.com/hashicorp/consul/tlsutil"
|
||||
)
|
||||
|
||||
|
@ -58,7 +58,7 @@ type Manager struct {
|
|||
|
||||
mu sync.Mutex
|
||||
proxies map[ProxyID]*state
|
||||
watchers map[ProxyID]map[uint64]chan proxysnapshot.ProxySnapshot
|
||||
watchers map[ProxyID]map[uint64]chan *ConfigSnapshot
|
||||
maxWatchID uint64
|
||||
}
|
||||
|
||||
|
@ -109,7 +109,7 @@ func NewManager(cfg ManagerConfig) (*Manager, error) {
|
|||
m := &Manager{
|
||||
ManagerConfig: cfg,
|
||||
proxies: make(map[ProxyID]*state),
|
||||
watchers: make(map[ProxyID]map[uint64]chan proxysnapshot.ProxySnapshot),
|
||||
watchers: make(map[ProxyID]map[uint64]chan *ConfigSnapshot),
|
||||
rateLimiter: rate.NewLimiter(cfg.UpdateRateLimit, 1),
|
||||
}
|
||||
return m, nil
|
||||
|
@ -265,12 +265,12 @@ func (m *Manager) notify(snap *ConfigSnapshot) {
|
|||
// it will drain the chan and then re-attempt delivery so that a slow consumer
|
||||
// gets the latest config earlier. This MUST be called from a method where m.mu
|
||||
// is held to be safe since it assumes we are the only goroutine sending on ch.
|
||||
func (m *Manager) deliverLatest(snap proxysnapshot.ProxySnapshot, ch chan proxysnapshot.ProxySnapshot) {
|
||||
m.Logger.Trace("delivering latest proxy snapshot to proxy", "proxyID", snap.(*ConfigSnapshot).ProxyID)
|
||||
func (m *Manager) deliverLatest(snap *ConfigSnapshot, ch chan *ConfigSnapshot) {
|
||||
m.Logger.Trace("delivering latest proxy snapshot to proxy", "proxyID", snap.ProxyID)
|
||||
err := channels.DeliverLatest(snap, ch)
|
||||
if err != nil {
|
||||
m.Logger.Error("failed to deliver proxyState to proxy",
|
||||
"proxy", snap.(*ConfigSnapshot).ProxyID,
|
||||
"proxy", snap.ProxyID,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -280,16 +280,16 @@ func (m *Manager) deliverLatest(snap proxysnapshot.ProxySnapshot, ch chan proxys
|
|||
// will not fail, but no updates will be delivered until the proxy is
|
||||
// registered. If there is already a valid snapshot in memory, it will be
|
||||
// delivered immediately.
|
||||
func (m *Manager) Watch(id ProxyID) (<-chan proxysnapshot.ProxySnapshot, proxysnapshot.CancelFunc) {
|
||||
func (m *Manager) Watch(id ProxyID) (<-chan *ConfigSnapshot, context.CancelFunc) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// This buffering is crucial otherwise we'd block immediately trying to
|
||||
// deliver the current snapshot below if we already have one.
|
||||
ch := make(chan proxysnapshot.ProxySnapshot, 1)
|
||||
ch := make(chan *ConfigSnapshot, 1)
|
||||
watchers, ok := m.watchers[id]
|
||||
if !ok {
|
||||
watchers = make(map[uint64]chan proxysnapshot.ProxySnapshot)
|
||||
watchers = make(map[uint64]chan *ConfigSnapshot)
|
||||
}
|
||||
watchID := m.maxWatchID
|
||||
m.maxWatchID++
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"github.com/hashicorp/consul/agent/proxycfg/internal/watch"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot"
|
||||
"github.com/hashicorp/consul/proto/private/pbpeering"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
@ -471,7 +470,7 @@ func testManager_BasicLifecycle(
|
|||
require.Len(t, m.watchers, 0)
|
||||
}
|
||||
|
||||
func assertWatchChanBlocks(t *testing.T, ch <-chan proxysnapshot.ProxySnapshot) {
|
||||
func assertWatchChanBlocks(t *testing.T, ch <-chan *ConfigSnapshot) {
|
||||
t.Helper()
|
||||
|
||||
select {
|
||||
|
@ -481,7 +480,7 @@ func assertWatchChanBlocks(t *testing.T, ch <-chan proxysnapshot.ProxySnapshot)
|
|||
}
|
||||
}
|
||||
|
||||
func assertWatchChanRecvs(t *testing.T, ch <-chan proxysnapshot.ProxySnapshot, expect proxysnapshot.ProxySnapshot) {
|
||||
func assertWatchChanRecvs(t *testing.T, ch <-chan *ConfigSnapshot, expect *ConfigSnapshot) {
|
||||
t.Helper()
|
||||
|
||||
select {
|
||||
|
@ -519,7 +518,7 @@ func TestManager_deliverLatest(t *testing.T) {
|
|||
}
|
||||
|
||||
// test 1 buffered chan
|
||||
ch1 := make(chan proxysnapshot.ProxySnapshot, 1)
|
||||
ch1 := make(chan *ConfigSnapshot, 1)
|
||||
|
||||
// Sending to an unblocked chan should work
|
||||
m.deliverLatest(snap1, ch1)
|
||||
|
@ -535,7 +534,7 @@ func TestManager_deliverLatest(t *testing.T) {
|
|||
require.Equal(t, snap2, <-ch1)
|
||||
|
||||
// Same again for 5-buffered chan
|
||||
ch5 := make(chan proxysnapshot.ProxySnapshot, 5)
|
||||
ch5 := make(chan *ConfigSnapshot, 5)
|
||||
|
||||
// Sending to an unblocked chan should work
|
||||
m.deliverLatest(snap1, ch5)
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
@ -13,11 +14,9 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/agent/grpc-external/limiter"
|
||||
"github.com/hashicorp/consul/agent/proxycfg"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot"
|
||||
rtest "github.com/hashicorp/consul/internal/resource/resourcetest"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
)
|
||||
|
||||
|
@ -64,9 +63,9 @@ func TestAgent_local_proxycfg(t *testing.T) {
|
|||
|
||||
var (
|
||||
firstTime = true
|
||||
ch <-chan proxysnapshot.ProxySnapshot
|
||||
ch <-chan *proxycfg.ConfigSnapshot
|
||||
stc limiter.SessionTerminatedChan
|
||||
cancel proxysnapshot.CancelFunc
|
||||
cancel context.CancelFunc
|
||||
)
|
||||
defer func() {
|
||||
if cancel != nil {
|
||||
|
@ -87,7 +86,7 @@ func TestAgent_local_proxycfg(t *testing.T) {
|
|||
// Prior to fixes in https://github.com/hashicorp/consul/pull/16497
|
||||
// this call to Watch() would deadlock.
|
||||
var err error
|
||||
ch, stc, _, cancel, err = cfg.Watch(rtest.Resource(pbmesh.ProxyConfigurationType, sid.ID).ID(), a.config.NodeName, token)
|
||||
ch, stc, _, cancel, err = cfg.Watch(sid, a.config.NodeName, token)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
select {
|
||||
|
|
|
@ -16,8 +16,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/google/tcpproxy"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
gogrpc "google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
@ -26,6 +24,9 @@ import (
|
|||
grpcstatus "google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/consul"
|
||||
|
@ -1835,7 +1836,7 @@ func newTestServer(t *testing.T, cb func(conf *consul.Config)) testingServer {
|
|||
deps := newDefaultDeps(t, conf)
|
||||
externalGRPCServer := external.NewServer(deps.Logger, nil, deps.TLSConfigurator, rate.NullRequestLimitsHandler(), keepalive.ServerParameters{}, nil)
|
||||
|
||||
server, err := consul.NewServer(conf, deps, externalGRPCServer, nil, deps.Logger, nil)
|
||||
server, err := consul.NewServer(conf, deps, externalGRPCServer, nil, deps.Logger)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, server.Shutdown())
|
||||
|
|
|
@ -13,13 +13,12 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
|
||||
"golang.org/x/crypto/blake2b"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
)
|
||||
|
||||
type ACLMode string
|
||||
|
@ -63,10 +62,6 @@ agent_prefix "" {
|
|||
event_prefix "" {
|
||||
policy = "%[1]s"
|
||||
}
|
||||
identity_prefix "" {
|
||||
policy = "%[1]s"
|
||||
intentions = "%[1]s"
|
||||
}
|
||||
key_prefix "" {
|
||||
policy = "%[1]s"
|
||||
}
|
||||
|
|
|
@ -11,10 +11,11 @@ import (
|
|||
"hash/fnv"
|
||||
"text/template"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/xeipuuv/gojsonschema"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
|
@ -26,30 +27,26 @@ var ACLTemplatedPolicyNodeSchema string
|
|||
//go:embed acltemplatedpolicy/schemas/service.json
|
||||
var ACLTemplatedPolicyServiceSchema string
|
||||
|
||||
//go:embed acltemplatedpolicy/schemas/workload-identity.json
|
||||
var ACLTemplatedPolicyWorkloadIdentitySchema string
|
||||
|
||||
//go:embed acltemplatedpolicy/schemas/api-gateway.json
|
||||
var ACLTemplatedPolicyAPIGatewaySchema string
|
||||
|
||||
type ACLTemplatedPolicies []*ACLTemplatedPolicy
|
||||
|
||||
const (
|
||||
ACLTemplatedPolicyServiceID = "00000000-0000-0000-0000-000000000003"
|
||||
ACLTemplatedPolicyNodeID = "00000000-0000-0000-0000-000000000004"
|
||||
ACLTemplatedPolicyDNSID = "00000000-0000-0000-0000-000000000005"
|
||||
ACLTemplatedPolicyNomadServerID = "00000000-0000-0000-0000-000000000006"
|
||||
ACLTemplatedPolicyWorkloadIdentityID = "00000000-0000-0000-0000-000000000007"
|
||||
ACLTemplatedPolicyAPIGatewayID = "00000000-0000-0000-0000-000000000008"
|
||||
ACLTemplatedPolicyNomadClientID = "00000000-0000-0000-0000-000000000009"
|
||||
ACLTemplatedPolicyServiceID = "00000000-0000-0000-0000-000000000003"
|
||||
ACLTemplatedPolicyNodeID = "00000000-0000-0000-0000-000000000004"
|
||||
ACLTemplatedPolicyDNSID = "00000000-0000-0000-0000-000000000005"
|
||||
ACLTemplatedPolicyNomadServerID = "00000000-0000-0000-0000-000000000006"
|
||||
_ = "00000000-0000-0000-0000-000000000007" // formerly workload identity
|
||||
ACLTemplatedPolicyAPIGatewayID = "00000000-0000-0000-0000-000000000008"
|
||||
ACLTemplatedPolicyNomadClientID = "00000000-0000-0000-0000-000000000009"
|
||||
|
||||
ACLTemplatedPolicyServiceDescription = "Gives the token or role permissions to register a service and discover services in the Consul catalog. It also gives the specified service's sidecar proxy the permission to discover and route traffic to other services."
|
||||
ACLTemplatedPolicyNodeDescription = "Gives the token or role permissions for a register an agent/node into the catalog. A node is typically a consul agent but can also be a physical server, cloud instance or a container."
|
||||
ACLTemplatedPolicyDNSDescription = "Gives the token or role permissions for the Consul DNS to query services in the network."
|
||||
ACLTemplatedPolicyNomadServerDescription = "Gives the token or role permissions required for integration with a nomad server."
|
||||
ACLTemplatedPolicyWorkloadIdentityDescription = "Gives the token or role permissions for a specific workload identity."
|
||||
ACLTemplatedPolicyAPIGatewayDescription = "Gives the token or role permissions for a Consul api gateway"
|
||||
ACLTemplatedPolicyNomadClientDescription = "Gives the token or role permissions required for integration with a nomad client."
|
||||
ACLTemplatedPolicyServiceDescription = "Gives the token or role permissions to register a service and discover services in the Consul catalog. It also gives the specified service's sidecar proxy the permission to discover and route traffic to other services."
|
||||
ACLTemplatedPolicyNodeDescription = "Gives the token or role permissions for a register an agent/node into the catalog. A node is typically a consul agent but can also be a physical server, cloud instance or a container."
|
||||
ACLTemplatedPolicyDNSDescription = "Gives the token or role permissions for the Consul DNS to query services in the network."
|
||||
ACLTemplatedPolicyNomadServerDescription = "Gives the token or role permissions required for integration with a nomad server."
|
||||
ACLTemplatedPolicyAPIGatewayDescription = "Gives the token or role permissions for a Consul api gateway"
|
||||
ACLTemplatedPolicyNomadClientDescription = "Gives the token or role permissions required for integration with a nomad client."
|
||||
|
||||
ACLTemplatedPolicyNoRequiredVariablesSchema = "" // catch-all schema for all templated policy that don't require a schema
|
||||
)
|
||||
|
@ -96,13 +93,6 @@ var (
|
|||
Template: ACLTemplatedPolicyNomadServer,
|
||||
Description: ACLTemplatedPolicyNomadServerDescription,
|
||||
},
|
||||
api.ACLTemplatedPolicyWorkloadIdentityName: {
|
||||
TemplateID: ACLTemplatedPolicyWorkloadIdentityID,
|
||||
TemplateName: api.ACLTemplatedPolicyWorkloadIdentityName,
|
||||
Schema: ACLTemplatedPolicyWorkloadIdentitySchema,
|
||||
Template: ACLTemplatedPolicyWorkloadIdentity,
|
||||
Description: ACLTemplatedPolicyWorkloadIdentityDescription,
|
||||
},
|
||||
api.ACLTemplatedPolicyAPIGatewayName: {
|
||||
TemplateID: ACLTemplatedPolicyAPIGatewayID,
|
||||
TemplateName: api.ACLTemplatedPolicyAPIGatewayName,
|
||||
|
|
|
@ -19,9 +19,6 @@ var ACLTemplatedPolicyDNS string
|
|||
//go:embed acltemplatedpolicy/policies/ce/nomad-server.hcl
|
||||
var ACLTemplatedPolicyNomadServer string
|
||||
|
||||
//go:embed acltemplatedpolicy/policies/ce/workload-identity.hcl
|
||||
var ACLTemplatedPolicyWorkloadIdentity string
|
||||
|
||||
//go:embed acltemplatedpolicy/policies/ce/api-gateway.hcl
|
||||
var ACLTemplatedPolicyAPIGateway string
|
||||
|
||||
|
|
|
@ -80,21 +80,6 @@ service_prefix "" {
|
|||
}
|
||||
query_prefix "" {
|
||||
policy = "read"
|
||||
}`,
|
||||
},
|
||||
},
|
||||
"workload-identity-template": {
|
||||
templatedPolicy: &ACLTemplatedPolicy{
|
||||
TemplateID: ACLTemplatedPolicyWorkloadIdentityID,
|
||||
TemplateName: api.ACLTemplatedPolicyWorkloadIdentityName,
|
||||
TemplateVariables: &ACLTemplatedPolicyVariables{
|
||||
Name: "api",
|
||||
},
|
||||
},
|
||||
expectedPolicy: &ACLPolicy{
|
||||
Description: "synthetic policy generated from templated policy: builtin/workload-identity",
|
||||
Rules: `identity "api" {
|
||||
policy = "write"
|
||||
}`,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
identity "{{.Name}}" {
|
||||
policy = "write"
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": { "type": "string", "$ref": "#/definitions/min-length-one" }
|
||||
},
|
||||
"required": ["name"],
|
||||
"definitions": {
|
||||
"min-length-one": {
|
||||
"type": "string",
|
||||
"minLength": 1
|
||||
}
|
||||
}
|
||||
}
|
|
@ -8,12 +8,11 @@ import (
|
|||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
"github.com/hashicorp/consul/lib/stringslice"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -217,11 +216,6 @@ type IssuedCert struct {
|
|||
// PrivateKeyPEM is the PEM encoded private key associated with CertPEM.
|
||||
PrivateKeyPEM string `json:",omitempty"`
|
||||
|
||||
// WorkloadIdentity is the name of the workload identity for which the cert was issued.
|
||||
WorkloadIdentity string `json:",omitempty"`
|
||||
// WorkloadIdentityURI is the cert URI value.
|
||||
WorkloadIdentityURI string `json:",omitempty"`
|
||||
|
||||
// Service is the name of the service for which the cert was issued.
|
||||
Service string `json:",omitempty"`
|
||||
// ServiceURI is the cert URI value.
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/lib"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -181,39 +180,6 @@ type AccessLogsConfig struct {
|
|||
TextFormat string `json:",omitempty" alias:"text_format"`
|
||||
}
|
||||
|
||||
func (c *AccessLogsConfig) GetEnabled() bool {
|
||||
return c.Enabled
|
||||
}
|
||||
|
||||
func (c *AccessLogsConfig) GetDisableListenerLogs() bool {
|
||||
return c.DisableListenerLogs
|
||||
}
|
||||
|
||||
func (c *AccessLogsConfig) GetType() pbmesh.LogSinkType {
|
||||
switch c.Type {
|
||||
case FileLogSinkType:
|
||||
return pbmesh.LogSinkType_LOG_SINK_TYPE_FILE
|
||||
case StdErrLogSinkType:
|
||||
return pbmesh.LogSinkType_LOG_SINK_TYPE_STDERR
|
||||
case StdOutLogSinkType:
|
||||
return pbmesh.LogSinkType_LOG_SINK_TYPE_STDOUT
|
||||
}
|
||||
|
||||
return pbmesh.LogSinkType_LOG_SINK_TYPE_DEFAULT
|
||||
}
|
||||
|
||||
func (c *AccessLogsConfig) GetPath() string {
|
||||
return c.Path
|
||||
}
|
||||
|
||||
func (c *AccessLogsConfig) GetJsonFormat() string {
|
||||
return c.JSONFormat
|
||||
}
|
||||
|
||||
func (c *AccessLogsConfig) GetTextFormat() string {
|
||||
return c.TextFormat
|
||||
}
|
||||
|
||||
func (c *AccessLogsConfig) IsZero() bool {
|
||||
if c == nil {
|
||||
return true
|
||||
|
@ -839,12 +805,3 @@ func (e *ExposeConfig) Finalize() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
type AccessLogs interface {
|
||||
GetEnabled() bool
|
||||
GetDisableListenerLogs() bool
|
||||
GetType() pbmesh.LogSinkType
|
||||
GetPath() string
|
||||
GetJsonFormat() string
|
||||
GetTextFormat() string
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ const (
|
|||
errRateLimited = "Rate limit reached, try again later" // Note: we depend on this error message in the gRPC ConnectCA.Sign endpoint (see: isRateLimitError).
|
||||
errNotPrimaryDatacenter = "not the primary datacenter"
|
||||
errStateReadOnly = "CA Provider State is read-only"
|
||||
errUsingV2CatalogExperiment = "V1 catalog is disabled when V2 is enabled"
|
||||
errSamenessGroupNotFound = "Sameness Group not found"
|
||||
errSamenessGroupMustBeDefaultForFailover = "Sameness Group must have DefaultForFailover set to true in order to use this endpoint"
|
||||
)
|
||||
|
@ -42,7 +41,6 @@ var (
|
|||
ErrRateLimited = errors.New(errRateLimited) // Note: we depend on this error message in the gRPC ConnectCA.Sign endpoint (see: isRateLimitError).
|
||||
ErrNotPrimaryDatacenter = errors.New(errNotPrimaryDatacenter)
|
||||
ErrStateReadOnly = errors.New(errStateReadOnly)
|
||||
ErrUsingV2CatalogExperiment = errors.New(errUsingV2CatalogExperiment)
|
||||
ErrSamenessGroupNotFound = errors.New(errSamenessGroupNotFound)
|
||||
ErrSamenessGroupMustBeDefaultForFailover = errors.New(errSamenessGroupMustBeDefaultForFailover)
|
||||
)
|
||||
|
@ -63,10 +61,6 @@ func IsErrRPCRateExceeded(err error) bool {
|
|||
return err != nil && strings.Contains(err.Error(), errRPCRateExceeded)
|
||||
}
|
||||
|
||||
func IsErrUsingV2CatalogExperiment(err error) bool {
|
||||
return err != nil && strings.Contains(err.Error(), errUsingV2CatalogExperiment)
|
||||
}
|
||||
|
||||
func IsErrSamenessGroupNotFound(err error) bool {
|
||||
return err != nil && strings.Contains(err.Error(), errSamenessGroupNotFound)
|
||||
}
|
||||
|
|
|
@ -19,9 +19,10 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/config"
|
||||
|
@ -105,7 +106,7 @@ type TestAgentOpts struct {
|
|||
|
||||
// NewTestAgent returns a started agent with the given configuration. It fails
|
||||
// the test if the Agent could not be started.
|
||||
func NewTestAgent(t *testing.T, hcl string, opts ...TestAgentOpts) *TestAgent {
|
||||
func NewTestAgent(t testing.TB, hcl string, opts ...TestAgentOpts) *TestAgent {
|
||||
// This varargs approach is used so that we don't have to modify all of the `NewTestAgent()` calls
|
||||
// in order to introduce more optional arguments.
|
||||
require.LessOrEqual(t, len(opts), 1, "NewTestAgent cannot accept more than one opts argument")
|
||||
|
@ -133,7 +134,7 @@ func NewTestAgentWithConfigFile(t *testing.T, hcl string, configFiles []string)
|
|||
//
|
||||
// The caller is responsible for calling Shutdown() to stop the agent and remove
|
||||
// temporary directories.
|
||||
func StartTestAgent(t *testing.T, a TestAgent) *TestAgent {
|
||||
func StartTestAgent(t testing.TB, a TestAgent) *TestAgent {
|
||||
t.Helper()
|
||||
retry.RunWith(retry.ThreeTimes(), t, func(r *retry.R) {
|
||||
r.Helper()
|
||||
|
@ -315,22 +316,6 @@ func (a *TestAgent) waitForUp() error {
|
|||
}
|
||||
}
|
||||
|
||||
if a.baseDeps.UseV2Resources() {
|
||||
args := structs.DCSpecificRequest{
|
||||
Datacenter: "dc1",
|
||||
}
|
||||
var leader string
|
||||
if err := a.RPC(context.Background(), "Status.Leader", args, &leader); err != nil {
|
||||
retErr = fmt.Errorf("Status.Leader failed: %v", err)
|
||||
continue // fail, try again
|
||||
}
|
||||
if leader == "" {
|
||||
retErr = fmt.Errorf("No leader")
|
||||
continue // fail, try again
|
||||
}
|
||||
return nil // success
|
||||
}
|
||||
|
||||
// Ensure we have a leader and a node registration.
|
||||
args := &structs.DCSpecificRequest{
|
||||
Datacenter: a.Config.Datacenter,
|
||||
|
|
|
@ -18,10 +18,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
|
||||
"github.com/hashicorp/consul/agent/config"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/api"
|
||||
|
@ -32,28 +33,6 @@ import (
|
|||
"github.com/hashicorp/consul/types"
|
||||
)
|
||||
|
||||
func TestUIEndpointsFailInV2(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
a := NewTestAgent(t, `experiments = ["resource-apis"]`)
|
||||
|
||||
checkRequest := func(method, url string) {
|
||||
t.Run(method+" "+url, func(t *testing.T) {
|
||||
assertV1CatalogEndpointDoesNotWorkWithV2(t, a, method, url, "{}")
|
||||
})
|
||||
}
|
||||
|
||||
checkRequest("GET", "/v1/internal/ui/nodes")
|
||||
checkRequest("GET", "/v1/internal/ui/node/web")
|
||||
checkRequest("GET", "/v1/internal/ui/services")
|
||||
checkRequest("GET", "/v1/internal/ui/exported-services")
|
||||
checkRequest("GET", "/v1/internal/ui/catalog-overview")
|
||||
checkRequest("GET", "/v1/internal/ui/gateway-services-nodes/web")
|
||||
checkRequest("GET", "/v1/internal/ui/gateway-intentions/web")
|
||||
checkRequest("GET", "/v1/internal/ui/service-topology/web")
|
||||
checkRequest("PUT", "/v1/internal/service-virtual-ip")
|
||||
}
|
||||
|
||||
func TestUIIndex(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("too slow for testing.Short")
|
||||
|
|
|
@ -31,14 +31,6 @@ func uiTemplateDataFromConfig(cfg *config.RuntimeConfig) (map[string]interface{}
|
|||
uiCfg["metrics_provider_options"] = json.RawMessage(cfg.UIConfig.MetricsProviderOptionsJSON)
|
||||
}
|
||||
|
||||
v2CatalogEnabled := false
|
||||
for _, experiment := range cfg.Experiments {
|
||||
if experiment == "resource-apis" {
|
||||
v2CatalogEnabled = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
d := map[string]interface{}{
|
||||
"ContentPath": cfg.UIConfig.ContentPath,
|
||||
"ACLsEnabled": cfg.ACLsEnabled,
|
||||
|
@ -47,7 +39,6 @@ func uiTemplateDataFromConfig(cfg *config.RuntimeConfig) (map[string]interface{}
|
|||
"LocalDatacenter": cfg.Datacenter,
|
||||
"PrimaryDatacenter": cfg.PrimaryDatacenter,
|
||||
"PeeringEnabled": cfg.PeeringEnabled,
|
||||
"V2CatalogEnabled": v2CatalogEnabled,
|
||||
}
|
||||
|
||||
// Also inject additional provider scripts if needed, otherwise strip the
|
||||
|
|
|
@ -13,10 +13,11 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/html"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
"github.com/hashicorp/consul/agent/config"
|
||||
"github.com/hashicorp/consul/sdk/testutil"
|
||||
)
|
||||
|
@ -51,8 +52,7 @@ func TestUIServerIndex(t *testing.T) {
|
|||
"metrics_provider": "",
|
||||
"metrics_proxy_enabled": false,
|
||||
"dashboard_url_templates": null
|
||||
},
|
||||
"V2CatalogEnabled": false
|
||||
}
|
||||
}`,
|
||||
},
|
||||
{
|
||||
|
@ -91,8 +91,7 @@ func TestUIServerIndex(t *testing.T) {
|
|||
},
|
||||
"metrics_proxy_enabled": false,
|
||||
"dashboard_url_templates": null
|
||||
},
|
||||
"V2CatalogEnabled": false
|
||||
}
|
||||
}`,
|
||||
},
|
||||
{
|
||||
|
@ -113,8 +112,7 @@ func TestUIServerIndex(t *testing.T) {
|
|||
"metrics_provider": "",
|
||||
"metrics_proxy_enabled": false,
|
||||
"dashboard_url_templates": null
|
||||
},
|
||||
"V2CatalogEnabled": false
|
||||
}
|
||||
}`,
|
||||
},
|
||||
{
|
||||
|
@ -135,30 +133,7 @@ func TestUIServerIndex(t *testing.T) {
|
|||
"metrics_provider": "",
|
||||
"metrics_proxy_enabled": false,
|
||||
"dashboard_url_templates": null
|
||||
},
|
||||
"V2CatalogEnabled": false
|
||||
}`,
|
||||
},
|
||||
{
|
||||
name: "v2 catalog enabled",
|
||||
cfg: basicUIEnabledConfig(withV2CatalogEnabled()),
|
||||
path: "/",
|
||||
wantStatus: http.StatusOK,
|
||||
wantContains: []string{"<!-- CONSUL_VERSION:"},
|
||||
wantUICfgJSON: `{
|
||||
"ACLsEnabled": false,
|
||||
"HCPEnabled": false,
|
||||
"LocalDatacenter": "dc1",
|
||||
"PrimaryDatacenter": "dc1",
|
||||
"ContentPath": "/ui/",
|
||||
"PeeringEnabled": true,
|
||||
"UIConfig": {
|
||||
"hcp_enabled": false,
|
||||
"metrics_provider": "",
|
||||
"metrics_proxy_enabled": false,
|
||||
"dashboard_url_templates": null
|
||||
},
|
||||
"V2CatalogEnabled": true
|
||||
}
|
||||
}`,
|
||||
},
|
||||
{
|
||||
|
@ -181,8 +156,7 @@ func TestUIServerIndex(t *testing.T) {
|
|||
"metrics_provider": "",
|
||||
"metrics_proxy_enabled": false,
|
||||
"dashboard_url_templates": null
|
||||
},
|
||||
"V2CatalogEnabled": false
|
||||
}
|
||||
}`,
|
||||
},
|
||||
{
|
||||
|
@ -214,8 +188,7 @@ func TestUIServerIndex(t *testing.T) {
|
|||
"metrics_provider": "bar",
|
||||
"metrics_proxy_enabled": false,
|
||||
"dashboard_url_templates": null
|
||||
},
|
||||
"V2CatalogEnabled": false
|
||||
}
|
||||
}`,
|
||||
},
|
||||
{
|
||||
|
@ -348,12 +321,6 @@ func withHCPEnabled() cfgFunc {
|
|||
}
|
||||
}
|
||||
|
||||
func withV2CatalogEnabled() cfgFunc {
|
||||
return func(cfg *config.RuntimeConfig) {
|
||||
cfg.Experiments = append(cfg.Experiments, "resource-apis")
|
||||
}
|
||||
}
|
||||
|
||||
func withPeeringDisabled() cfgFunc {
|
||||
return func(cfg *config.RuntimeConfig) {
|
||||
cfg.PeeringEnabled = false
|
||||
|
@ -500,7 +467,6 @@ func TestHandler_ServeHTTP_TransformIsEvaluatedOnEachRequest(t *testing.T) {
|
|||
"metrics_proxy_enabled": false,
|
||||
"dashboard_url_templates": null
|
||||
},
|
||||
"V2CatalogEnabled": false,
|
||||
"apple": "seeds"
|
||||
}`
|
||||
require.JSONEq(t, expected, extractUIConfig(t, rec.Body.String()))
|
||||
|
@ -527,7 +493,6 @@ func TestHandler_ServeHTTP_TransformIsEvaluatedOnEachRequest(t *testing.T) {
|
|||
"metrics_proxy_enabled": false,
|
||||
"dashboard_url_templates": null
|
||||
},
|
||||
"V2CatalogEnabled": false,
|
||||
"apple": "plant"
|
||||
}`
|
||||
require.JSONEq(t, expected, extractUIConfig(t, rec.Body.String()))
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// Copyrigh (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package accesslogs
|
||||
|
@ -10,7 +10,6 @@ import (
|
|||
envoy_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
envoy_fileaccesslog_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/file/v3"
|
||||
envoy_streamaccesslog_v3 "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/stream/v3"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
"google.golang.org/protobuf/types/known/structpb"
|
||||
|
||||
|
@ -53,12 +52,12 @@ const (
|
|||
// on the proxy-defaults settings. Currently only one access logger is supported.
|
||||
// Listeners (as opposed to listener filters) can trigger an access log filter with the boolean.
|
||||
// Tests are located in agent/xds/listeners_test.go.
|
||||
func MakeAccessLogs(logs structs.AccessLogs, isListener bool) ([]*envoy_accesslog_v3.AccessLog, error) {
|
||||
if logs == nil || !logs.GetEnabled() {
|
||||
func MakeAccessLogs(logs *structs.AccessLogsConfig, isListener bool) ([]*envoy_accesslog_v3.AccessLog, error) {
|
||||
if logs == nil || !logs.Enabled {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if isListener && logs.GetDisableListenerLogs() {
|
||||
if isListener && logs.DisableListenerLogs {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -86,37 +85,36 @@ func MakeAccessLogs(logs structs.AccessLogs, isListener bool) ([]*envoy_accesslo
|
|||
}
|
||||
|
||||
// getLogger returns an individual instance of an Envoy logger based on proxy-defaults
|
||||
func getLogger(logs structs.AccessLogs) (*anypb.Any, error) {
|
||||
func getLogger(logs *structs.AccessLogsConfig) (*anypb.Any, error) {
|
||||
logFormat, err := getLogFormat(logs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get envoy log format: %w", err)
|
||||
}
|
||||
|
||||
switch logs.GetType() {
|
||||
case pbmesh.LogSinkType_LOG_SINK_TYPE_DEFAULT, pbmesh.LogSinkType_LOG_SINK_TYPE_STDOUT:
|
||||
switch logs.Type {
|
||||
case structs.DefaultLogSinkType, structs.StdOutLogSinkType:
|
||||
return getStdoutLogger(logFormat)
|
||||
case pbmesh.LogSinkType_LOG_SINK_TYPE_STDERR:
|
||||
case structs.StdErrLogSinkType:
|
||||
return getStderrLogger(logFormat)
|
||||
case pbmesh.LogSinkType_LOG_SINK_TYPE_FILE:
|
||||
return getFileLogger(logFormat, logs.GetPath())
|
||||
case structs.FileLogSinkType:
|
||||
return getFileLogger(logFormat, logs.Path)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported log format: %s", logs.GetType())
|
||||
return nil, fmt.Errorf("unsupported log format: %s", logs.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// getLogFormat returns an Envoy log format object that is compatible with all log sinks.
|
||||
// If a format is not provided in the proxy-defaults, the default JSON format is used.
|
||||
func getLogFormat(logs structs.AccessLogs) (*envoy_core_v3.SubstitutionFormatString, error) {
|
||||
|
||||
func getLogFormat(logs *structs.AccessLogsConfig) (*envoy_core_v3.SubstitutionFormatString, error) {
|
||||
var format, formatType string
|
||||
if logs.GetTextFormat() == "" && logs.GetJsonFormat() == "" {
|
||||
if logs.TextFormat == "" && logs.JSONFormat == "" {
|
||||
format = defaultJSONFormat
|
||||
formatType = "json"
|
||||
} else if logs.GetJsonFormat() != "" {
|
||||
format = logs.GetJsonFormat()
|
||||
} else if logs.JSONFormat != "" {
|
||||
format = logs.JSONFormat
|
||||
formatType = "json"
|
||||
} else {
|
||||
format = logs.GetTextFormat()
|
||||
format = logs.TextFormat
|
||||
formatType = "text"
|
||||
}
|
||||
|
||||
|
|
|
@ -18,27 +18,24 @@ import (
|
|||
envoy_config_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||
envoy_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
|
||||
envoy_discovery_v3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
goversion "github.com/hashicorp/go-version"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
goversion "github.com/hashicorp/go-version"
|
||||
|
||||
"github.com/hashicorp/consul/agent/envoyextensions"
|
||||
external "github.com/hashicorp/consul/agent/grpc-external"
|
||||
"github.com/hashicorp/consul/agent/grpc-external/limiter"
|
||||
"github.com/hashicorp/consul/agent/proxycfg"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/agent/xds/configfetcher"
|
||||
"github.com/hashicorp/consul/agent/xds/extensionruntime"
|
||||
"github.com/hashicorp/consul/agent/xdsv2"
|
||||
"github.com/hashicorp/consul/envoyextensions/extensioncommon"
|
||||
"github.com/hashicorp/consul/envoyextensions/xdscommon"
|
||||
proxysnapshot "github.com/hashicorp/consul/internal/mesh/proxy-snapshot"
|
||||
proxytracker "github.com/hashicorp/consul/internal/mesh/proxy-tracker"
|
||||
"github.com/hashicorp/consul/logging"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbresource"
|
||||
"github.com/hashicorp/consul/version"
|
||||
)
|
||||
|
||||
|
@ -99,36 +96,14 @@ func (s *Server) DeltaAggregatedResources(stream ADSDeltaStream) error {
|
|||
}
|
||||
|
||||
// getEnvoyConfiguration is a utility function that instantiates the proper
|
||||
// Envoy resource generator based on whether it was passed a ConfigSource or
|
||||
// ProxyState implementation of the ProxySnapshot interface and returns the
|
||||
// generated Envoy configuration.
|
||||
func getEnvoyConfiguration(proxySnapshot proxysnapshot.ProxySnapshot, logger hclog.Logger, cfgFetcher configfetcher.ConfigFetcher) (map[string][]proto.Message, error) {
|
||||
switch proxySnapshot.(type) {
|
||||
case *proxycfg.ConfigSnapshot:
|
||||
logger.Trace("ProxySnapshot update channel received a ProxySnapshot of type ConfigSnapshot")
|
||||
generator := NewResourceGenerator(
|
||||
logger,
|
||||
cfgFetcher,
|
||||
true,
|
||||
)
|
||||
|
||||
c := proxySnapshot.(*proxycfg.ConfigSnapshot)
|
||||
return generator.AllResourcesFromSnapshot(c)
|
||||
case *proxytracker.ProxyState:
|
||||
logger.Trace("ProxySnapshot update channel received a ProxySnapshot of type ProxyState")
|
||||
generator := xdsv2.NewResourceGenerator(
|
||||
logger,
|
||||
)
|
||||
c := proxySnapshot.(*proxytracker.ProxyState)
|
||||
resources, err := generator.AllResourcesFromIR(c)
|
||||
if err != nil {
|
||||
logger.Error("error generating resources from proxy state template", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
return resources, nil
|
||||
default:
|
||||
return nil, errors.New("proxysnapshot must be of type ProxyState or ConfigSnapshot")
|
||||
}
|
||||
// Envoy resource generator and returns the generated Envoy configuration.
|
||||
func getEnvoyConfiguration(snapshot *proxycfg.ConfigSnapshot, logger hclog.Logger, cfgFetcher configfetcher.ConfigFetcher) (map[string][]proto.Message, error) {
|
||||
generator := NewResourceGenerator(
|
||||
logger,
|
||||
cfgFetcher,
|
||||
true,
|
||||
)
|
||||
return generator.AllResourcesFromSnapshot(snapshot)
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -145,9 +120,9 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove
|
|||
|
||||
// Loop state
|
||||
var (
|
||||
proxySnapshot proxysnapshot.ProxySnapshot
|
||||
snapshot *proxycfg.ConfigSnapshot
|
||||
node *envoy_config_core_v3.Node
|
||||
stateCh <-chan proxysnapshot.ProxySnapshot
|
||||
stateCh <-chan *proxycfg.ConfigSnapshot
|
||||
drainCh limiter.SessionTerminatedChan
|
||||
cfgSrcTerminated proxycfg.SrcTerminatedChan
|
||||
watchCancel func()
|
||||
|
@ -178,13 +153,13 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove
|
|||
// Configure handlers for each type of request we currently care about.
|
||||
handlers := map[string]*xDSDeltaType{
|
||||
xdscommon.ListenerType: newDeltaType(logger, stream, xdscommon.ListenerType, func() bool {
|
||||
return proxySnapshot.AllowEmptyListeners()
|
||||
return snapshot.AllowEmptyListeners()
|
||||
}),
|
||||
xdscommon.RouteType: newDeltaType(logger, stream, xdscommon.RouteType, func() bool {
|
||||
return proxySnapshot.AllowEmptyRoutes()
|
||||
return snapshot.AllowEmptyRoutes()
|
||||
}),
|
||||
xdscommon.ClusterType: newDeltaType(logger, stream, xdscommon.ClusterType, func() bool {
|
||||
return proxySnapshot.AllowEmptyClusters()
|
||||
return snapshot.AllowEmptyClusters()
|
||||
}),
|
||||
xdscommon.EndpointType: newDeltaType(logger, stream, xdscommon.EndpointType, nil),
|
||||
xdscommon.SecretType: newDeltaType(logger, stream, xdscommon.SecretType, nil), // TODO allowEmptyFn
|
||||
|
@ -213,8 +188,8 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove
|
|||
authTimer = time.After(s.AuthCheckFrequency)
|
||||
}
|
||||
|
||||
checkStreamACLs := func(proxySnap proxysnapshot.ProxySnapshot) error {
|
||||
return s.authorize(stream.Context(), proxySnap)
|
||||
checkStreamACLs := func(snapshot *proxycfg.ConfigSnapshot) error {
|
||||
return s.authorize(stream.Context(), snapshot)
|
||||
}
|
||||
|
||||
for {
|
||||
|
@ -225,7 +200,7 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove
|
|||
return errOverwhelmed
|
||||
case <-authTimer:
|
||||
// It's been too long since a Discovery{Request,Response} so recheck ACLs.
|
||||
if err := checkStreamACLs(proxySnapshot); err != nil {
|
||||
if err := checkStreamACLs(snapshot); err != nil {
|
||||
return err
|
||||
}
|
||||
extendAuthTimer()
|
||||
|
@ -280,9 +255,9 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove
|
|||
// would've already exited this loop.
|
||||
return status.Error(codes.Aborted, "xDS stream terminated due to an irrecoverable error, please try again")
|
||||
}
|
||||
proxySnapshot = cs
|
||||
snapshot = cs
|
||||
|
||||
newRes, err := getEnvoyConfiguration(proxySnapshot, logger, s.CfgFetcher)
|
||||
newRes, err := getEnvoyConfiguration(snapshot, logger, s.CfgFetcher)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Unavailable, "failed to generate all xDS resources from the snapshot: %v", err)
|
||||
}
|
||||
|
@ -294,7 +269,7 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove
|
|||
s.ResourceMapMutateFn(newResourceMap)
|
||||
}
|
||||
|
||||
if newResourceMap, err = s.applyEnvoyExtensions(newResourceMap, proxySnapshot, node); err != nil {
|
||||
if newResourceMap, err = s.applyEnvoyExtensions(newResourceMap, snapshot, node); err != nil {
|
||||
// err is already the result of calling status.Errorf
|
||||
return err
|
||||
}
|
||||
|
@ -334,7 +309,7 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove
|
|||
}
|
||||
|
||||
// Start authentication process, we need the proxyID
|
||||
proxyID := newResourceIDFromEnvoyNode(node)
|
||||
proxyID := structs.NewServiceID(node.Id, parseEnterpriseMeta(node))
|
||||
|
||||
// Start watching config for that proxy
|
||||
var err error
|
||||
|
@ -357,14 +332,14 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove
|
|||
// state machine.
|
||||
defer watchCancel()
|
||||
|
||||
logger = logger.With("service_id", proxyID.Name) // enhance future logs
|
||||
logger = logger.With("service_id", proxyID.String()) // enhance future logs
|
||||
|
||||
logger.Trace("watching proxy, pending initial proxycfg snapshot for xDS")
|
||||
|
||||
// Now wait for the config so we can check ACL
|
||||
state = stateDeltaPendingInitialConfig
|
||||
case stateDeltaPendingInitialConfig:
|
||||
if proxySnapshot == nil {
|
||||
if snapshot == nil {
|
||||
// Nothing we can do until we get the initial config
|
||||
continue
|
||||
}
|
||||
|
@ -373,7 +348,7 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove
|
|||
state = stateDeltaRunning
|
||||
|
||||
// Upgrade the logger
|
||||
loggerName := proxySnapshot.LoggerName()
|
||||
loggerName := snapshot.LoggerName()
|
||||
if loggerName != "" {
|
||||
logger = logger.Named(loggerName)
|
||||
}
|
||||
|
@ -384,7 +359,7 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove
|
|||
fallthrough
|
||||
case stateDeltaRunning:
|
||||
// Check ACLs on every Discovery{Request,Response}.
|
||||
if err := checkStreamACLs(proxySnapshot); err != nil {
|
||||
if err := checkStreamACLs(snapshot); err != nil {
|
||||
return err
|
||||
}
|
||||
// For the first time through the state machine, this is when the
|
||||
|
@ -433,37 +408,7 @@ func (s *Server) processDelta(stream ADSDeltaStream, reqCh <-chan *envoy_discove
|
|||
}
|
||||
}
|
||||
|
||||
// newResourceIDFromEnvoyNode is a utility function that allows creating a
|
||||
// Resource ID from an Envoy proxy node so that existing delta calls can easily
|
||||
// use ProxyWatcher interface arguments for Watch().
|
||||
func newResourceIDFromEnvoyNode(node *envoy_config_core_v3.Node) *pbresource.ID {
|
||||
entMeta := parseEnterpriseMeta(node)
|
||||
|
||||
return &pbresource.ID{
|
||||
Name: node.Id,
|
||||
Tenancy: &pbresource.Tenancy{
|
||||
Namespace: entMeta.NamespaceOrDefault(),
|
||||
Partition: entMeta.PartitionOrDefault(),
|
||||
},
|
||||
Type: pbmesh.ProxyStateTemplateType,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) applyEnvoyExtensions(resources *xdscommon.IndexedResources, proxySnapshot proxysnapshot.ProxySnapshot, node *envoy_config_core_v3.Node) (*xdscommon.IndexedResources, error) {
|
||||
// TODO(proxystate)
|
||||
// This is a workaround for now as envoy extensions are not yet supported with ProxyState.
|
||||
// For now, we cast to proxycfg.ConfigSnapshot and no-op if it's the pbmesh.ProxyState type.
|
||||
var snapshot *proxycfg.ConfigSnapshot
|
||||
switch proxySnapshot.(type) {
|
||||
//TODO(proxystate): implement envoy extensions for ProxyState
|
||||
case *proxytracker.ProxyState:
|
||||
return resources, nil
|
||||
case *proxycfg.ConfigSnapshot:
|
||||
snapshot = proxySnapshot.(*proxycfg.ConfigSnapshot)
|
||||
default:
|
||||
return nil, status.Errorf(codes.InvalidArgument,
|
||||
"unsupported config snapshot type to apply envoy extensions to %T", proxySnapshot)
|
||||
}
|
||||
func (s *Server) applyEnvoyExtensions(resources *xdscommon.IndexedResources, snapshot *proxycfg.ConfigSnapshot, node *envoy_config_core_v3.Node) (*xdscommon.IndexedResources, error) {
|
||||
var err error
|
||||
envoyVersion := xdscommon.DetermineEnvoyVersionFromNode(node)
|
||||
consulVersion, err := goversion.NewVersion(version.Version)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,135 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package proxystateconverter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
||||
"github.com/hashicorp/consul/agent/proxycfg"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/agent/xds/configfetcher"
|
||||
proxytracker "github.com/hashicorp/consul/internal/mesh/proxy-tracker"
|
||||
"github.com/hashicorp/consul/internal/resource"
|
||||
pbmesh "github.com/hashicorp/consul/proto-public/pbmesh/v2beta1"
|
||||
"github.com/hashicorp/consul/proto-public/pbmesh/v2beta1/pbproxystate"
|
||||
)
|
||||
|
||||
// Converter converts a single snapshot into a ProxyState.
|
||||
type Converter struct {
|
||||
Logger hclog.Logger
|
||||
CfgFetcher configfetcher.ConfigFetcher
|
||||
proxyState *proxytracker.ProxyState
|
||||
}
|
||||
|
||||
func NewConverter(
|
||||
logger hclog.Logger,
|
||||
cfgFetcher configfetcher.ConfigFetcher,
|
||||
) *Converter {
|
||||
return &Converter{
|
||||
Logger: logger,
|
||||
CfgFetcher: cfgFetcher,
|
||||
proxyState: &proxytracker.ProxyState{
|
||||
ProxyState: &pbmesh.ProxyState{
|
||||
Listeners: make([]*pbproxystate.Listener, 0),
|
||||
Clusters: make(map[string]*pbproxystate.Cluster),
|
||||
Routes: make(map[string]*pbproxystate.Route),
|
||||
Endpoints: make(map[string]*pbproxystate.Endpoints),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (g *Converter) ProxyStateFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot) (*proxytracker.ProxyState, error) {
|
||||
err := g.resourcesFromSnapshot(cfgSnap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate FullProxyState: %v", err)
|
||||
}
|
||||
|
||||
return g.proxyState, nil
|
||||
}
|
||||
|
||||
func (g *Converter) resourcesFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot) error {
|
||||
err := g.tlsConfigFromSnapshot(cfgSnap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = g.listenersFromSnapshot(cfgSnap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = g.endpointsFromSnapshot(cfgSnap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = g.clustersFromSnapshot(cfgSnap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = g.routesFromSnapshot(cfgSnap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//g.secretsFromSnapshot(cfgSnap)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *Converter) tlsConfigFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot) error {
|
||||
proxyStateTLS := &pbproxystate.TLS{}
|
||||
g.proxyState.TrustBundles = make(map[string]*pbproxystate.TrustBundle)
|
||||
g.proxyState.LeafCertificates = make(map[string]*pbproxystate.LeafCertificate)
|
||||
|
||||
// Set the TLS in the top level proxyState
|
||||
g.proxyState.Tls = proxyStateTLS
|
||||
|
||||
// Add local trust bundle
|
||||
g.proxyState.TrustBundles[resource.DefaultPeerName] = &pbproxystate.TrustBundle{
|
||||
TrustDomain: cfgSnap.Roots.TrustDomain,
|
||||
Roots: []string{cfgSnap.RootPEMs()},
|
||||
}
|
||||
|
||||
// Add peered trust bundles for remote peers that will dial this proxy.
|
||||
for _, peeringTrustBundle := range cfgSnap.PeeringTrustBundles() {
|
||||
g.proxyState.TrustBundles[peeringTrustBundle.PeerName] = &pbproxystate.TrustBundle{
|
||||
TrustDomain: peeringTrustBundle.GetTrustDomain(),
|
||||
Roots: peeringTrustBundle.RootPEMs,
|
||||
}
|
||||
}
|
||||
|
||||
// Add upstream peer trust bundles for dialing upstreams in remote peers.
|
||||
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
|
||||
if err != nil {
|
||||
if !(cfgSnap.Kind == structs.ServiceKindMeshGateway || cfgSnap.Kind == structs.ServiceKindTerminatingGateway) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if upstreamsSnapshot != nil {
|
||||
upstreamsSnapshot.UpstreamPeerTrustBundles.ForEachKeyE(func(k proxycfg.PeerName) error {
|
||||
tbs, ok := upstreamsSnapshot.UpstreamPeerTrustBundles.Get(k)
|
||||
if ok {
|
||||
g.proxyState.TrustBundles[k] = &pbproxystate.TrustBundle{
|
||||
TrustDomain: tbs.TrustDomain,
|
||||
Roots: tbs.RootPEMs,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if cfgSnap.MeshConfigTLSOutgoing() != nil {
|
||||
proxyStateTLS.OutboundTlsParameters = makeTLSParametersFromTLSConfig(cfgSnap.MeshConfigTLSOutgoing().TLSMinVersion,
|
||||
cfgSnap.MeshConfigTLSOutgoing().TLSMaxVersion, cfgSnap.MeshConfigTLSOutgoing().CipherSuites)
|
||||
}
|
||||
|
||||
if cfgSnap.MeshConfigTLSIncoming() != nil {
|
||||
proxyStateTLS.InboundTlsParameters = makeTLSParametersFromTLSConfig(cfgSnap.MeshConfigTLSIncoming().TLSMinVersion,
|
||||
cfgSnap.MeshConfigTLSIncoming().TLSMaxVersion, cfgSnap.MeshConfigTLSIncoming().CipherSuites)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,674 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
package proxystateconverter
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/consul/agent/connect"
|
||||
"github.com/hashicorp/consul/agent/proxycfg"
|
||||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/agent/xds/response"
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/consul/proto-public/pbmesh/v2beta1/pbproxystate"
|
||||
"github.com/hashicorp/go-bexpr"
|
||||
|
||||
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||
)
|
||||
|
||||
func makeLbEndpoint(addr string, port int, health pbproxystate.HealthStatus, weight int) *pbproxystate.Endpoint {
|
||||
ep := &pbproxystate.Endpoint{
|
||||
Address: &pbproxystate.Endpoint_HostPort{
|
||||
HostPort: &pbproxystate.HostPortAddress{
|
||||
Host: addr,
|
||||
Port: uint32(port),
|
||||
},
|
||||
},
|
||||
}
|
||||
ep.HealthStatus = health
|
||||
ep.LoadBalancingWeight = &wrapperspb.UInt32Value{Value: uint32(weight)}
|
||||
return ep
|
||||
}
|
||||
|
||||
// endpointsFromSnapshot returns the mesh API representation of the "routes" in the snapshot.
|
||||
func (s *Converter) endpointsFromSnapshot(cfgSnap *proxycfg.ConfigSnapshot) error {
|
||||
|
||||
if cfgSnap == nil {
|
||||
return errors.New("nil config given")
|
||||
}
|
||||
|
||||
switch cfgSnap.Kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return s.endpointsFromSnapshotConnectProxy(cfgSnap)
|
||||
//case structs.ServiceKindTerminatingGateway:
|
||||
// return s.endpointsFromSnapshotTerminatingGateway(cfgSnap)
|
||||
//case structs.ServiceKindMeshGateway:
|
||||
// return s.endpointsFromSnapshotMeshGateway(cfgSnap)
|
||||
//case structs.ServiceKindIngressGateway:
|
||||
// return s.endpointsFromSnapshotIngressGateway(cfgSnap)
|
||||
//case structs.ServiceKindAPIGateway:
|
||||
// return s.endpointsFromSnapshotAPIGateway(cfgSnap)
|
||||
default:
|
||||
return fmt.Errorf("Invalid service kind: %v", cfgSnap.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
// endpointsFromSnapshotConnectProxy returns the xDS API representation of the "endpoints"
|
||||
// (upstream instances) in the snapshot.
|
||||
func (s *Converter) endpointsFromSnapshotConnectProxy(cfgSnap *proxycfg.ConfigSnapshot) error {
|
||||
eps := make(map[string]*pbproxystate.Endpoints)
|
||||
|
||||
// NOTE: Any time we skip a chain below we MUST also skip that discovery chain in clusters.go
|
||||
// so that the sets of endpoints generated matches the sets of clusters.
|
||||
for uid, chain := range cfgSnap.ConnectProxy.DiscoveryChain {
|
||||
upstream, skip := cfgSnap.ConnectProxy.GetUpstream(uid, &cfgSnap.ProxyID.EnterpriseMeta)
|
||||
if skip {
|
||||
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
|
||||
continue
|
||||
}
|
||||
|
||||
var upstreamConfigMap map[string]interface{}
|
||||
if upstream != nil {
|
||||
upstreamConfigMap = upstream.Config
|
||||
}
|
||||
|
||||
es, err := s.endpointsFromDiscoveryChain(
|
||||
uid,
|
||||
chain,
|
||||
cfgSnap,
|
||||
cfgSnap.Locality,
|
||||
upstreamConfigMap,
|
||||
cfgSnap.ConnectProxy.WatchedUpstreamEndpoints[uid],
|
||||
cfgSnap.ConnectProxy.WatchedGatewayEndpoints[uid],
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for clusterName, endpoints := range es {
|
||||
eps[clusterName] = &pbproxystate.Endpoints{
|
||||
Endpoints: endpoints,
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: Any time we skip an upstream below we MUST also skip that same
|
||||
// upstream in clusters.go so that the sets of endpoints generated matches
|
||||
// the sets of clusters.
|
||||
for _, uid := range cfgSnap.ConnectProxy.PeeredUpstreamIDs() {
|
||||
upstream, skip := cfgSnap.ConnectProxy.GetUpstream(uid, &cfgSnap.ProxyID.EnterpriseMeta)
|
||||
if skip {
|
||||
// Discovery chain is not associated with a known explicit or implicit upstream so it is skipped.
|
||||
continue
|
||||
}
|
||||
|
||||
tbs, ok := cfgSnap.ConnectProxy.UpstreamPeerTrustBundles.Get(uid.Peer)
|
||||
if !ok {
|
||||
// this should never happen since we loop through upstreams with
|
||||
// set trust bundles
|
||||
return fmt.Errorf("trust bundle not ready for peer %s", uid.Peer)
|
||||
}
|
||||
|
||||
clusterName := generatePeeredClusterName(uid, tbs)
|
||||
|
||||
mgwMode := structs.MeshGatewayModeDefault
|
||||
if upstream != nil {
|
||||
mgwMode = upstream.MeshGateway.Mode
|
||||
}
|
||||
peerServiceEndpoints, err := s.makeEndpointsForPeerService(cfgSnap, uid, mgwMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if peerServiceEndpoints != nil {
|
||||
pbEndpoints := &pbproxystate.Endpoints{
|
||||
Endpoints: peerServiceEndpoints,
|
||||
}
|
||||
|
||||
eps[clusterName] = pbEndpoints
|
||||
}
|
||||
}
|
||||
|
||||
// Looping over explicit upstreams is only needed for prepared queries because they do not have discovery chains
|
||||
for _, u := range cfgSnap.Proxy.Upstreams {
|
||||
if u.DestinationType != structs.UpstreamDestTypePreparedQuery {
|
||||
continue
|
||||
}
|
||||
uid := proxycfg.NewUpstreamID(&u)
|
||||
|
||||
dc := u.Datacenter
|
||||
if dc == "" {
|
||||
dc = cfgSnap.Datacenter
|
||||
}
|
||||
clusterName := connect.UpstreamSNI(&u, "", dc, cfgSnap.Roots.TrustDomain)
|
||||
|
||||
endpoints, ok := cfgSnap.ConnectProxy.PreparedQueryEndpoints[uid]
|
||||
if ok {
|
||||
epts := makeEndpointsForLoadAssignment(
|
||||
cfgSnap,
|
||||
nil,
|
||||
[]loadAssignmentEndpointGroup{
|
||||
{Endpoints: endpoints},
|
||||
},
|
||||
cfgSnap.Locality,
|
||||
)
|
||||
pbEndpoints := &pbproxystate.Endpoints{
|
||||
Endpoints: epts,
|
||||
}
|
||||
|
||||
eps[clusterName] = pbEndpoints
|
||||
}
|
||||
}
|
||||
|
||||
// Loop over potential destinations in the mesh, then grab the gateway nodes associated with each
|
||||
cfgSnap.ConnectProxy.DestinationsUpstream.ForEachKey(func(uid proxycfg.UpstreamID) bool {
|
||||
svcConfig, ok := cfgSnap.ConnectProxy.DestinationsUpstream.Get(uid)
|
||||
if !ok || svcConfig.Destination == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, address := range svcConfig.Destination.Addresses {
|
||||
clusterName := clusterNameForDestination(cfgSnap, uid.Name, address, uid.NamespaceOrDefault(), uid.PartitionOrDefault())
|
||||
|
||||
endpoints, ok := cfgSnap.ConnectProxy.DestinationGateways.Get(uid)
|
||||
if ok {
|
||||
epts := makeEndpointsForLoadAssignment(
|
||||
cfgSnap,
|
||||
nil,
|
||||
[]loadAssignmentEndpointGroup{
|
||||
{Endpoints: endpoints},
|
||||
},
|
||||
proxycfg.GatewayKey{ /*empty so it never matches*/ },
|
||||
)
|
||||
pbEndpoints := &pbproxystate.Endpoints{
|
||||
Endpoints: epts,
|
||||
}
|
||||
eps[clusterName] = pbEndpoints
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
s.proxyState.Endpoints = eps
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Converter) makeEndpointsForPeerService(
|
||||
cfgSnap *proxycfg.ConfigSnapshot,
|
||||
uid proxycfg.UpstreamID,
|
||||
upstreamGatewayMode structs.MeshGatewayMode,
|
||||
) ([]*pbproxystate.Endpoint, error) {
|
||||
var eps []*pbproxystate.Endpoint
|
||||
|
||||
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
|
||||
if err != nil {
|
||||
return eps, err
|
||||
}
|
||||
|
||||
if upstreamGatewayMode == structs.MeshGatewayModeNone {
|
||||
s.Logger.Warn(fmt.Sprintf("invalid mesh gateway mode 'none', defaulting to 'remote' for %q", uid))
|
||||
}
|
||||
|
||||
// If an upstream is configured with local mesh gw mode, we make a load assignment
|
||||
// from the gateway endpoints instead of those of the upstreams.
|
||||
if upstreamGatewayMode == structs.MeshGatewayModeLocal {
|
||||
localGw, ok := cfgSnap.ConnectProxy.WatchedLocalGWEndpoints.Get(cfgSnap.Locality.String())
|
||||
if !ok {
|
||||
// local GW is not ready; return early
|
||||
return eps, nil
|
||||
}
|
||||
eps = makeEndpointsForLoadAssignment(
|
||||
cfgSnap,
|
||||
nil,
|
||||
[]loadAssignmentEndpointGroup{
|
||||
{Endpoints: localGw},
|
||||
},
|
||||
cfgSnap.Locality,
|
||||
)
|
||||
return eps, nil
|
||||
}
|
||||
|
||||
// Also skip peer instances with a hostname as their address. EDS
|
||||
// cannot resolve hostnames, so we provide them through CDS instead.
|
||||
if _, ok := upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid]; ok {
|
||||
return eps, nil
|
||||
}
|
||||
|
||||
endpoints, ok := upstreamsSnapshot.PeerUpstreamEndpoints.Get(uid)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
eps = makeEndpointsForLoadAssignment(
|
||||
cfgSnap,
|
||||
nil,
|
||||
[]loadAssignmentEndpointGroup{
|
||||
{Endpoints: endpoints},
|
||||
},
|
||||
proxycfg.GatewayKey{ /*empty so it never matches*/ },
|
||||
)
|
||||
return eps, nil
|
||||
}
|
||||
|
||||
func (s *Converter) filterSubsetEndpoints(subset *structs.ServiceResolverSubset, endpoints structs.CheckServiceNodes) (structs.CheckServiceNodes, error) {
|
||||
// locally execute the subsets filter
|
||||
if subset.Filter != "" {
|
||||
filter, err := bexpr.CreateFilter(subset.Filter, nil, endpoints)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
raw, err := filter.Execute(endpoints)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return raw.(structs.CheckServiceNodes), nil
|
||||
}
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
// TODO(proxystate): Terminating Gateway will be added in the future.
|
||||
// Functions to add from agent/xds/endpoints.go:
|
||||
// func endpointsFromSnapshotTerminatingGateway
|
||||
|
||||
// TODO(proxystate): Mesh Gateway will be added in the future.
|
||||
// Functions to add from agent/xds/endpoints.go:
|
||||
// func endpointsFromSnapshotMeshGateway
|
||||
|
||||
// TODO(proxystate): Cluster Peering will be added in the future.
|
||||
// Functions to add from agent/xds/endpoints.go:
|
||||
// func makeEndpointsForOutgoingPeeredServices
|
||||
|
||||
// TODO(proxystate): Mesh Gateway will be added in the future.
|
||||
// Functions to add from agent/xds/endpoints.go:
|
||||
// func endpointsFromServicesAndResolvers
|
||||
|
||||
// TODO(proxystate): Mesh Gateway will be added in the future.
|
||||
// Functions to add from agent/xds/endpoints.go:
|
||||
// func makePeerServerEndpointsForMeshGateway
|
||||
|
||||
// TODO(proxystate): Ingress Gateway will be added in the future.
|
||||
// Functions to add from agent/xds/endpoints.go:
|
||||
// func endpointsFromSnapshotIngressGateway
|
||||
|
||||
// TODO(proxystate): API Gateway will be added in the future.
|
||||
// Functions to add from agent/xds/endpoints.go:
|
||||
// func endpointsFromSnapshotAPIGateway
|
||||
|
||||
// used in clusters.go
|
||||
func makeHostPortEndpoint(host string, port int) *pbproxystate.Endpoint {
|
||||
if port >= 0 && port <= 65535 {
|
||||
return &pbproxystate.Endpoint{
|
||||
Address: &pbproxystate.Endpoint_HostPort{
|
||||
HostPort: &pbproxystate.HostPortAddress{
|
||||
Host: host,
|
||||
Port: uint32(port),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeUnixSocketEndpoint(path string) *pbproxystate.Endpoint {
|
||||
return &pbproxystate.Endpoint{
|
||||
Address: &pbproxystate.Endpoint_UnixSocket{
|
||||
UnixSocket: &pbproxystate.UnixSocketAddress{
|
||||
Path: path,
|
||||
// envoy's mode is particular to a pipe address and is uint32.
|
||||
// it also says "The mode for the Pipe. Not applicable for abstract sockets."
|
||||
// https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/address.proto#config-core-v3-pipe
|
||||
Mode: "0",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Converter) makeUpstreamLoadAssignmentEndpointForPeerService(
|
||||
cfgSnap *proxycfg.ConfigSnapshot,
|
||||
uid proxycfg.UpstreamID,
|
||||
upstreamGatewayMode structs.MeshGatewayMode,
|
||||
) ([]*pbproxystate.Endpoint, error) {
|
||||
var eps []*pbproxystate.Endpoint
|
||||
|
||||
upstreamsSnapshot, err := cfgSnap.ToConfigSnapshotUpstreams()
|
||||
if err != nil {
|
||||
return eps, err
|
||||
}
|
||||
|
||||
if upstreamGatewayMode == structs.MeshGatewayModeNone {
|
||||
s.Logger.Warn(fmt.Sprintf("invalid mesh gateway mode 'none', defaulting to 'remote' for %q", uid))
|
||||
}
|
||||
|
||||
// If an upstream is configured with local mesh gw mode, we make a load assignment
|
||||
// from the gateway endpoints instead of those of the upstreams.
|
||||
if upstreamGatewayMode == structs.MeshGatewayModeLocal {
|
||||
localGw, ok := cfgSnap.ConnectProxy.WatchedLocalGWEndpoints.Get(cfgSnap.Locality.String())
|
||||
if !ok {
|
||||
// local GW is not ready; return early
|
||||
return eps, nil
|
||||
}
|
||||
eps = makeEndpointsForLoadAssignment(
|
||||
cfgSnap,
|
||||
nil,
|
||||
[]loadAssignmentEndpointGroup{
|
||||
{Endpoints: localGw},
|
||||
},
|
||||
cfgSnap.Locality,
|
||||
)
|
||||
return eps, nil
|
||||
}
|
||||
|
||||
// Also skip peer instances with a hostname as their address. EDS
|
||||
// cannot resolve hostnames, so we provide them through CDS instead.
|
||||
if _, ok := upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid]; ok {
|
||||
return eps, nil
|
||||
}
|
||||
|
||||
endpoints, ok := upstreamsSnapshot.PeerUpstreamEndpoints.Get(uid)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
eps = makeEndpointsForLoadAssignment(
|
||||
cfgSnap,
|
||||
nil,
|
||||
[]loadAssignmentEndpointGroup{
|
||||
{Endpoints: endpoints},
|
||||
},
|
||||
proxycfg.GatewayKey{ /*empty so it never matches*/ },
|
||||
)
|
||||
return eps, nil
|
||||
}
|
||||
|
||||
func (s *Converter) endpointsFromDiscoveryChain(
|
||||
uid proxycfg.UpstreamID,
|
||||
chain *structs.CompiledDiscoveryChain,
|
||||
cfgSnap *proxycfg.ConfigSnapshot,
|
||||
gatewayKey proxycfg.GatewayKey,
|
||||
upstreamConfigMap map[string]interface{},
|
||||
upstreamEndpoints map[string]structs.CheckServiceNodes,
|
||||
gatewayEndpoints map[string]structs.CheckServiceNodes,
|
||||
forMeshGateway bool,
|
||||
) (map[string][]*pbproxystate.Endpoint, error) {
|
||||
if chain == nil {
|
||||
if forMeshGateway {
|
||||
return nil, fmt.Errorf("missing discovery chain for %s", uid)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if upstreamConfigMap == nil {
|
||||
upstreamConfigMap = make(map[string]interface{}) // TODO:needed?
|
||||
}
|
||||
|
||||
clusterEndpoints := make(map[string][]*pbproxystate.Endpoint)
|
||||
|
||||
// TODO(proxystate): escape hatches will be implemented in the future
|
||||
//var escapeHatchCluster *pbproxystate.Cluster
|
||||
//if !forMeshGateway {
|
||||
|
||||
//cfg, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap)
|
||||
//if err != nil {
|
||||
// // Don't hard fail on a config typo, just warn. The parse func returns
|
||||
// // default config if there is an error so it's safe to continue.
|
||||
// s.Logger.Warn("failed to parse", "upstream", uid,
|
||||
// "error", err)
|
||||
//}
|
||||
|
||||
//if cfg.EnvoyClusterJSON != "" {
|
||||
// if chain.Default {
|
||||
// // If you haven't done anything to setup the discovery chain, then
|
||||
// // you can use the envoy_cluster_json escape hatch.
|
||||
// escapeHatchCluster, err = makeClusterFromUserConfig(cfg.EnvoyClusterJSON)
|
||||
// if err != nil {
|
||||
// return ce, nil
|
||||
// }
|
||||
// } else {
|
||||
// s.Logger.Warn("ignoring escape hatch setting, because a discovery chain is configued for",
|
||||
// "discovery chain", chain.ServiceName, "upstream", uid,
|
||||
// "envoy_cluster_json", chain.ServiceName)
|
||||
// }
|
||||
//}
|
||||
//}
|
||||
|
||||
mgwMode := structs.MeshGatewayModeDefault
|
||||
if upstream, _ := cfgSnap.ConnectProxy.GetUpstream(uid, &cfgSnap.ProxyID.EnterpriseMeta); upstream != nil {
|
||||
mgwMode = upstream.MeshGateway.Mode
|
||||
}
|
||||
|
||||
// Find all resolver nodes.
|
||||
for _, node := range chain.Nodes {
|
||||
switch {
|
||||
case node == nil:
|
||||
return nil, fmt.Errorf("impossible to process a nil node")
|
||||
case node.Type != structs.DiscoveryGraphNodeTypeResolver:
|
||||
continue
|
||||
case node.Resolver == nil:
|
||||
return nil, fmt.Errorf("impossible to process a non-resolver node")
|
||||
}
|
||||
rawUpstreamConfig, err := structs.ParseUpstreamConfigNoDefaults(upstreamConfigMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
upstreamConfig := finalizeUpstreamConfig(rawUpstreamConfig, chain, node.Resolver.ConnectTimeout)
|
||||
|
||||
mappedTargets, err := s.mapDiscoChainTargets(cfgSnap, chain, node, upstreamConfig, forMeshGateway)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
targetGroups, err := mappedTargets.groupedTargets()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, groupedTarget := range targetGroups {
|
||||
clusterName := groupedTarget.ClusterName
|
||||
// TODO(proxystate): escape hatches will be implemented in the future
|
||||
//if escapeHatchCluster != nil {
|
||||
// clusterName = escapeHatchCluster.Name
|
||||
//}
|
||||
switch len(groupedTarget.Targets) {
|
||||
case 0:
|
||||
continue
|
||||
case 1:
|
||||
// We expect one target so this passes through to continue setting the load assignment up.
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot have more than one target")
|
||||
}
|
||||
ti := groupedTarget.Targets[0]
|
||||
s.Logger.Debug("generating endpoints for", "cluster", clusterName, "targetID", ti.TargetID)
|
||||
targetUID := proxycfg.NewUpstreamIDFromTargetID(ti.TargetID)
|
||||
if targetUID.Peer != "" {
|
||||
peerServiceEndpoints, err := s.makeEndpointsForPeerService(cfgSnap, targetUID, mgwMode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if peerServiceEndpoints != nil {
|
||||
clusterEndpoints[clusterName] = peerServiceEndpoints
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
endpointGroup, valid := makeLoadAssignmentEndpointGroup(
|
||||
chain.Targets,
|
||||
upstreamEndpoints,
|
||||
gatewayEndpoints,
|
||||
ti.TargetID,
|
||||
gatewayKey,
|
||||
forMeshGateway,
|
||||
)
|
||||
if !valid {
|
||||
continue // skip the cluster if we're still populating the snapshot
|
||||
}
|
||||
|
||||
epts := makeEndpointsForLoadAssignment(
|
||||
cfgSnap,
|
||||
ti.PrioritizeByLocality,
|
||||
[]loadAssignmentEndpointGroup{endpointGroup},
|
||||
gatewayKey,
|
||||
)
|
||||
clusterEndpoints[clusterName] = epts
|
||||
}
|
||||
}
|
||||
|
||||
return clusterEndpoints, nil
|
||||
}
|
||||
|
||||
// TODO(proxystate): Mesh Gateway will be added in the future.
|
||||
// Functions to add from agent/xds/endpoints.go:
|
||||
// func makeExportedUpstreamEndpointsForMeshGateway
|
||||
|
||||
type loadAssignmentEndpointGroup struct {
|
||||
Endpoints structs.CheckServiceNodes
|
||||
OnlyPassing bool
|
||||
OverrideHealth pbproxystate.HealthStatus
|
||||
}
|
||||
|
||||
func makeEndpointsForLoadAssignment(cfgSnap *proxycfg.ConfigSnapshot,
|
||||
policy *structs.DiscoveryPrioritizeByLocality,
|
||||
endpointGroups []loadAssignmentEndpointGroup,
|
||||
localKey proxycfg.GatewayKey) []*pbproxystate.Endpoint {
|
||||
pbEndpoints := make([]*pbproxystate.Endpoint, 0, len(endpointGroups))
|
||||
|
||||
// TODO(proxystate): this will be added with property overrides having golden files with this
|
||||
//if len(endpointGroups) > 1 {
|
||||
// cla.Policy = &envoy_endpoint_v3.ClusterLoadAssignment_Policy{
|
||||
// // We choose such a large value here that the failover math should
|
||||
// // in effect not happen until zero instances are healthy.
|
||||
// OverprovisioningFactor: response.MakeUint32Value(100000),
|
||||
// }
|
||||
//}
|
||||
|
||||
var priority uint32
|
||||
|
||||
for _, endpointGroup := range endpointGroups {
|
||||
endpointsByLocality, err := groupedEndpoints(cfgSnap.ServiceLocality, policy, endpointGroup.Endpoints)
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, endpoints := range endpointsByLocality {
|
||||
for _, ep := range endpoints {
|
||||
// TODO (mesh-gateway) - should we respect the translate_wan_addrs configuration here or just always use the wan for cross-dc?
|
||||
_, addr, port := ep.BestAddress(!localKey.Matches(ep.Node.Datacenter, ep.Node.PartitionOrDefault()))
|
||||
healthStatus, weight := calculateEndpointHealthAndWeight(ep, endpointGroup.OnlyPassing)
|
||||
|
||||
if endpointGroup.OverrideHealth != pbproxystate.HealthStatus_HEALTH_STATUS_UNKNOWN {
|
||||
healthStatus = endpointGroup.OverrideHealth
|
||||
}
|
||||
|
||||
endpoint := makeHostPortEndpoint(addr, port)
|
||||
endpoint.HealthStatus = healthStatus
|
||||
endpoint.LoadBalancingWeight = response.MakeUint32Value(weight)
|
||||
|
||||
pbEndpoints = append(pbEndpoints, endpoint)
|
||||
}
|
||||
|
||||
// TODO(proxystate): what do we do about priority downstream?
|
||||
//cla.Endpoints = append(cla.Endpoints, &envoy_endpoint_v3.LocalityLbEndpoints{
|
||||
// Priority: priority,
|
||||
// LbEndpoints: es,
|
||||
//})
|
||||
|
||||
priority++
|
||||
}
|
||||
}
|
||||
|
||||
return pbEndpoints
|
||||
}
|
||||
|
||||
func makeLoadAssignmentEndpointGroup(
|
||||
targets map[string]*structs.DiscoveryTarget,
|
||||
targetHealth map[string]structs.CheckServiceNodes,
|
||||
gatewayHealth map[string]structs.CheckServiceNodes,
|
||||
targetID string,
|
||||
localKey proxycfg.GatewayKey,
|
||||
forMeshGateway bool,
|
||||
) (loadAssignmentEndpointGroup, bool) {
|
||||
realEndpoints, ok := targetHealth[targetID]
|
||||
if !ok {
|
||||
// skip the cluster if we're still populating the snapshot
|
||||
return loadAssignmentEndpointGroup{}, false
|
||||
}
|
||||
target := targets[targetID]
|
||||
|
||||
var gatewayKey proxycfg.GatewayKey
|
||||
|
||||
switch target.MeshGateway.Mode {
|
||||
case structs.MeshGatewayModeRemote:
|
||||
gatewayKey.Datacenter = target.Datacenter
|
||||
gatewayKey.Partition = target.Partition
|
||||
case structs.MeshGatewayModeLocal:
|
||||
gatewayKey = localKey
|
||||
}
|
||||
|
||||
if forMeshGateway || gatewayKey.IsEmpty() || localKey.Matches(target.Datacenter, target.Partition) {
|
||||
// Gateways are not needed if the request isn't for a remote DC or partition.
|
||||
return loadAssignmentEndpointGroup{
|
||||
Endpoints: realEndpoints,
|
||||
OnlyPassing: target.Subset.OnlyPassing,
|
||||
}, true
|
||||
}
|
||||
|
||||
// If using a mesh gateway we need to pull those endpoints instead.
|
||||
gatewayEndpoints, ok := gatewayHealth[gatewayKey.String()]
|
||||
if !ok {
|
||||
// skip the cluster if we're still populating the snapshot
|
||||
return loadAssignmentEndpointGroup{}, false
|
||||
}
|
||||
|
||||
// But we will use the health from the actual backend service.
|
||||
overallHealth := pbproxystate.HealthStatus_HEALTH_STATUS_UNHEALTHY
|
||||
for _, ep := range realEndpoints {
|
||||
health, _ := calculateEndpointHealthAndWeight(ep, target.Subset.OnlyPassing)
|
||||
if health == pbproxystate.HealthStatus_HEALTH_STATUS_HEALTHY {
|
||||
overallHealth = pbproxystate.HealthStatus_HEALTH_STATUS_HEALTHY
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return loadAssignmentEndpointGroup{
|
||||
Endpoints: gatewayEndpoints,
|
||||
OverrideHealth: overallHealth,
|
||||
}, true
|
||||
}
|
||||
|
||||
func calculateEndpointHealthAndWeight(
|
||||
ep structs.CheckServiceNode,
|
||||
onlyPassing bool,
|
||||
) (pbproxystate.HealthStatus, int) {
|
||||
healthStatus := pbproxystate.HealthStatus_HEALTH_STATUS_HEALTHY
|
||||
weight := 1
|
||||
if ep.Service.Weights != nil {
|
||||
weight = ep.Service.Weights.Passing
|
||||
}
|
||||
|
||||
for _, chk := range ep.Checks {
|
||||
if chk.Status == api.HealthCritical {
|
||||
healthStatus = pbproxystate.HealthStatus_HEALTH_STATUS_UNHEALTHY
|
||||
}
|
||||
if onlyPassing && chk.Status != api.HealthPassing {
|
||||
healthStatus = pbproxystate.HealthStatus_HEALTH_STATUS_UNHEALTHY
|
||||
}
|
||||
if chk.Status == api.HealthWarning && ep.Service.Weights != nil {
|
||||
weight = ep.Service.Weights.Warning
|
||||
}
|
||||
}
|
||||
// Make weights fit Envoy's limits. A zero weight means that either Warning
|
||||
// (likely) or Passing (weirdly) weight has been set to 0 effectively making
|
||||
// this instance unhealthy and should not be sent traffic.
|
||||
if weight < 1 {
|
||||
healthStatus = pbproxystate.HealthStatus_HEALTH_STATUS_UNHEALTHY
|
||||
weight = 1
|
||||
}
|
||||
if weight > 128 {
|
||||
weight = 128
|
||||
}
|
||||
return healthStatus, weight
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue