xds controller: setup watches for and compute leaf cert references in ProxyStateTemplate, and wire up leaf cert manager dependency (#18756)

* Refactors the leafcert package to not have a dependency on agent/consul and agent/cache to avoid import cycles. This way the xds controller can just import the leafcert package to use the leafcert manager.

The leaf cert logic in the controller:
* Sets up watches for leaf certs that are referenced in the ProxyStateTemplate (which generates the leaf certs too).
* Gets the leaf cert from the leaf cert cache
* Stores the leaf cert in the ProxyState that's pushed to xds
* For the cert watches, this PR also uses a bimapper + a thin wrapper to map leaf cert events to related ProxyStateTemplates

Since bimapper uses a resource.Reference or resource.ID to map between two resource types, I've created an internal type for a leaf certificate to use for the resource.Reference, since it's not a v2 resource.
The wrapper allows mapping events to resources (as opposed to mapping resources to resources)

The controller tests:
Unit: Ensure that we resolve leaf cert references
Lifecycle: Ensure that when the CA is updated, the leaf cert is as well

Also adds a new spiffe id type, and adds workload identity and workload identity URI to leaf certs. This is so certs are generated with the new workload identity based SPIFFE id.

* Pulls out some leaf cert test helpers into a helpers file so it
can be used in the xds controller tests.
* Wires up leaf cert manager dependency
* Support getting token from proxytracker
* Add workload identity spiffe id type to the authorize and sign functions



---------

Co-authored-by: John Murret <john.murret@hashicorp.com>
This commit is contained in:
Nitya Dhanushkodi 2023-09-12 12:56:43 -07:00 committed by GitHub
parent 89e6725eee
commit 78b170ad50
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
42 changed files with 1302 additions and 399 deletions

View File

@ -22,6 +22,8 @@ import (
"sync/atomic"
"time"
"github.com/hashicorp/consul/lib/stringslice"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/go-connlimit"
@ -71,7 +73,6 @@ import (
"github.com/hashicorp/consul/lib/file"
"github.com/hashicorp/consul/lib/mutex"
"github.com/hashicorp/consul/lib/routine"
"github.com/hashicorp/consul/lib/stringslice"
"github.com/hashicorp/consul/logging"
"github.com/hashicorp/consul/proto-public/pbresource"
"github.com/hashicorp/consul/proto/private/pboperator"

View File

@ -8,11 +8,12 @@ import (
"fmt"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/cacheshim"
"github.com/hashicorp/consul/agent/structs"
)
// Recommended name for registration.
const ConnectCARootName = "connect-ca-root"
const ConnectCARootName = cacheshim.ConnectCARootName
// ConnectCARoot supports fetching the Connect CA roots. This is a
// straightforward cache type since it only has to block on the given

28
agent/cache/cache.go vendored
View File

@ -32,6 +32,7 @@ import (
"golang.org/x/time/rate"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/cacheshim"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/lib/ttlcache"
)
@ -172,32 +173,7 @@ type typeEntry struct {
// ResultMeta is returned from Get calls along with the value and can be used
// to expose information about the cache status for debugging or testing.
type ResultMeta struct {
// Hit indicates whether or not the request was a cache hit
Hit bool
// Age identifies how "stale" the result is. It's semantics differ based on
// whether or not the cache type performs background refresh or not as defined
// in https://www.consul.io/api/index.html#agent-caching.
//
// For background refresh types, Age is 0 unless the background blocking query
// is currently in a failed state and so not keeping up with the server's
// values. If it is non-zero it represents the time since the first failure to
// connect during background refresh, and is reset after a background request
// does manage to reconnect and either return successfully, or block for at
// least the yamux keepalive timeout of 30 seconds (which indicates the
// connection is OK but blocked as expected).
//
// For simple cache types, Age is the time since the result being returned was
// fetched from the servers.
Age time.Duration
// Index is the internal ModifyIndex for the cache entry. Not all types
// support blocking and all that do will likely have this in their result type
// already but this allows generic code to reason about whether cache values
// have changed.
Index uint64
}
type ResultMeta = cacheshim.ResultMeta
// Options are options for the Cache.
type Options struct {

View File

@ -4,7 +4,7 @@
package cache
import (
"time"
"github.com/hashicorp/consul/agent/cacheshim"
)
// Request is a cacheable request.
@ -13,10 +13,7 @@ import (
// the agent/structs package.
//
//go:generate mockery --name Request --inpackage
type Request interface {
// CacheInfo returns information used for caching this request.
CacheInfo() RequestInfo
}
type Request = cacheshim.Request
// RequestInfo represents cache information for a request. The caching
// framework uses this to control the behavior of caching and to determine
@ -24,53 +21,4 @@ type Request interface {
//
// TODO(peering): finish ensuring everything that sets a Datacenter sets or doesn't set PeerName.
// TODO(peering): also make sure the peer name is present in the cache key likely in lieu of the datacenter somehow.
type RequestInfo struct {
// Key is a unique cache key for this request. This key should
// be globally unique to identify this request, since any conflicting
// cache keys could result in invalid data being returned from the cache.
// The Key does not need to include ACL or DC information, since the
// cache already partitions by these values prior to using this key.
Key string
// Token is the ACL token associated with this request.
//
// Datacenter is the datacenter that the request is targeting.
//
// PeerName is the peer that the request is targeting.
//
// All of these values are used to partition the cache. The cache framework
// today partitions data on these values to simplify behavior: by
// partitioning ACL tokens, the cache doesn't need to be smart about
// filtering results. By filtering datacenter/peer results, the cache can
// service the multi-DC/multi-peer nature of Consul. This comes at the expense of
// working set size, but in general the effect is minimal.
Token string
Datacenter string
PeerName string
// MinIndex is the minimum index being queried. This is used to
// determine if we already have data satisfying the query or if we need
// to block until new data is available. If no index is available, the
// default value (zero) is acceptable.
MinIndex uint64
// Timeout is the timeout for waiting on a blocking query. When the
// timeout is reached, the last known value is returned (or maybe nil
// if there was no prior value). This "last known value" behavior matches
// normal Consul blocking queries.
Timeout time.Duration
// MaxAge if set limits how stale a cache entry can be. If it is non-zero and
// there is an entry in cache that is older than specified, it is treated as a
// cache miss and re-fetched. It is ignored for cachetypes with Refresh =
// true.
MaxAge time.Duration
// MustRevalidate forces a new lookup of the cache even if there is an
// existing one that has not expired. It is implied by HTTP requests with
// `Cache-Control: max-age=0` but we can't distinguish that case from the
// unset case for MaxAge. Later we may support revalidating the index without
// a full re-fetch but for now the only option is to refetch. It is ignored
// for cachetypes with Refresh = true.
MustRevalidate bool
}
type RequestInfo = cacheshim.RequestInfo

23
agent/cache/watch.go vendored
View File

@ -9,26 +9,17 @@ import (
"reflect"
"time"
"github.com/hashicorp/consul/lib"
"google.golang.org/protobuf/proto"
"github.com/hashicorp/consul/agent/cacheshim"
"github.com/hashicorp/consul/lib"
)
// UpdateEvent is a struct summarizing an update to a cache entry
type UpdateEvent struct {
// CorrelationID is used by the Notify API to allow correlation of updates
// with specific requests. We could return the full request object and
// cachetype for consumers to match against the calls they made but in
// practice it's cleaner for them to choose the minimal necessary unique
// identifier given the set of things they are watching. They might even
// choose to assign random IDs for example.
CorrelationID string
Result interface{}
Meta ResultMeta
Err error
}
type UpdateEvent = cacheshim.UpdateEvent
// Callback is the function type accepted by NotifyCallback.
type Callback func(ctx context.Context, event UpdateEvent)
type Callback = cacheshim.Callback
// Notify registers a desire to be updated about changes to a cache result.
//
@ -126,7 +117,7 @@ func (c *Cache) notifyBlockingQuery(ctx context.Context, r getOptions, correlati
// Check the index of the value returned in the cache entry to be sure it
// changed
if index == 0 || index < meta.Index {
cb(ctx, UpdateEvent{correlationID, res, meta, err})
cb(ctx, UpdateEvent{CorrelationID: correlationID, Result: res, Meta: meta, Err: err})
// Update index for next request
index = meta.Index
@ -186,7 +177,7 @@ func (c *Cache) notifyPollingQuery(ctx context.Context, r getOptions, correlatio
// Check for a change in the value or an index change
if index < meta.Index || !isEqual(lastValue, res) {
cb(ctx, UpdateEvent{correlationID, res, meta, err})
cb(ctx, UpdateEvent{CorrelationID: correlationID, Result: res, Meta: meta, Err: err})
// Update index and lastValue
lastValue = res

118
agent/cacheshim/cache.go Normal file
View File

@ -0,0 +1,118 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cacheshim
import (
"context"
"time"
)
// cacheshim defines any shared cache types for any packages that don't want to have a dependency on the agent cache.
// This was created as part of a refactor to remove agent/leafcert package's dependency on agent/cache.
type ResultMeta struct {
// Hit indicates whether or not the request was a cache hit
Hit bool
// Age identifies how "stale" the result is. It's semantics differ based on
// whether or not the cache type performs background refresh or not as defined
// in https://www.consul.io/api/index.html#agent-caching.
//
// For background refresh types, Age is 0 unless the background blocking query
// is currently in a failed state and so not keeping up with the server's
// values. If it is non-zero it represents the time since the first failure to
// connect during background refresh, and is reset after a background request
// does manage to reconnect and either return successfully, or block for at
// least the yamux keepalive timeout of 30 seconds (which indicates the
// connection is OK but blocked as expected).
//
// For simple cache types, Age is the time since the result being returned was
// fetched from the servers.
Age time.Duration
// Index is the internal ModifyIndex for the cache entry. Not all types
// support blocking and all that do will likely have this in their result type
// already but this allows generic code to reason about whether cache values
// have changed.
Index uint64
}
type Request interface {
// CacheInfo returns information used for caching this request.
CacheInfo() RequestInfo
}
type RequestInfo struct {
// Key is a unique cache key for this request. This key should
// be globally unique to identify this request, since any conflicting
// cache keys could result in invalid data being returned from the cache.
// The Key does not need to include ACL or DC information, since the
// cache already partitions by these values prior to using this key.
Key string
// Token is the ACL token associated with this request.
//
// Datacenter is the datacenter that the request is targeting.
//
// PeerName is the peer that the request is targeting.
//
// All of these values are used to partition the cache. The cache framework
// today partitions data on these values to simplify behavior: by
// partitioning ACL tokens, the cache doesn't need to be smart about
// filtering results. By filtering datacenter/peer results, the cache can
// service the multi-DC/multi-peer nature of Consul. This comes at the expense of
// working set size, but in general the effect is minimal.
Token string
Datacenter string
PeerName string
// MinIndex is the minimum index being queried. This is used to
// determine if we already have data satisfying the query or if we need
// to block until new data is available. If no index is available, the
// default value (zero) is acceptable.
MinIndex uint64
// Timeout is the timeout for waiting on a blocking query. When the
// timeout is reached, the last known value is returned (or maybe nil
// if there was no prior value). This "last known value" behavior matches
// normal Consul blocking queries.
Timeout time.Duration
// MaxAge if set limits how stale a cache entry can be. If it is non-zero and
// there is an entry in cache that is older than specified, it is treated as a
// cache miss and re-fetched. It is ignored for cachetypes with Refresh =
// true.
MaxAge time.Duration
// MustRevalidate forces a new lookup of the cache even if there is an
// existing one that has not expired. It is implied by HTTP requests with
// `Cache-Control: max-age=0` but we can't distinguish that case from the
// unset case for MaxAge. Later we may support revalidating the index without
// a full re-fetch but for now the only option is to refetch. It is ignored
// for cachetypes with Refresh = true.
MustRevalidate bool
}
type UpdateEvent struct {
// CorrelationID is used by the Notify API to allow correlation of updates
// with specific requests. We could return the full request object and
// cachetype for consumers to match against the calls they made but in
// practice it's cleaner for them to choose the minimal necessary unique
// identifier given the set of things they are watching. They might even
// choose to assign random IDs for example.
CorrelationID string
Result interface{}
Meta ResultMeta
Err error
}
type Callback func(ctx context.Context, event UpdateEvent)
type Cache interface {
Get(ctx context.Context, t string, r Request) (interface{}, ResultMeta, error)
NotifyCallback(ctx context.Context, t string, r Request, correlationID string, cb Callback) error
Notify(ctx context.Context, t string, r Request, correlationID string, ch chan<- UpdateEvent) error
}
const ConnectCARootName = "connect-ca-root"

View File

@ -23,6 +23,8 @@ type CertURI interface {
}
var (
spiffeIDWorkloadIdentityRegexp = regexp.MustCompile(
`^(?:/ap/([^/]+))/ns/([^/]+)/identity/([^/]+)$`)
spiffeIDServiceRegexp = regexp.MustCompile(
`^(?:/ap/([^/]+))?/ns/([^/]+)/dc/([^/]+)/svc/([^/]+)$`)
spiffeIDAgentRegexp = regexp.MustCompile(
@ -94,6 +96,32 @@ func ParseCertURI(input *url.URL) (CertURI, error) {
Datacenter: dc,
Service: service,
}, nil
} else if v := spiffeIDWorkloadIdentityRegexp.FindStringSubmatch(path); v != nil {
// Determine the values. We assume they're reasonable to save cycles,
// but if the raw path is not empty that means that something is
// URL encoded so we go to the slow path.
ap := v[1]
ns := v[2]
workloadIdentity := v[3]
if input.RawPath != "" {
var err error
if ap, err = url.PathUnescape(v[1]); err != nil {
return nil, fmt.Errorf("Invalid admin partition: %s", err)
}
if ns, err = url.PathUnescape(v[2]); err != nil {
return nil, fmt.Errorf("Invalid namespace: %s", err)
}
if workloadIdentity, err = url.PathUnescape(v[3]); err != nil {
return nil, fmt.Errorf("Invalid workload identity: %s", err)
}
}
return &SpiffeIDWorkloadIdentity{
TrustDomain: input.Host,
Partition: ap,
Namespace: ns,
WorkloadIdentity: workloadIdentity,
}, nil
} else if v := spiffeIDAgentRegexp.FindStringSubmatch(path); v != nil {
// Determine the values. We assume they're reasonable to save cycles,
// but if the raw path is not empty that means that something is

View File

@ -54,33 +54,13 @@ func (id SpiffeIDService) uriPath() string {
return path
}
// SpiffeIDWorkloadIdentity is the structure to represent the SPIFFE ID for a workload identity.
type SpiffeIDWorkloadIdentity struct {
Host string
Partition string
Namespace string
Identity string
}
func (id SpiffeIDWorkloadIdentity) URI() *url.URL {
var result url.URL
result.Scheme = "spiffe"
result.Host = id.Host
result.Path = fmt.Sprintf("/ap/%s/ns/%s/identity/%s",
id.Partition,
id.Namespace,
id.Identity,
)
return &result
}
// SpiffeIDFromIdentityRef creates the SPIFFE ID from a workload identity.
// TODO (ishustava): make sure ref type is workload identity.
func SpiffeIDFromIdentityRef(trustDomain string, ref *pbresource.Reference) string {
return SpiffeIDWorkloadIdentity{
Host: trustDomain,
TrustDomain: trustDomain,
Partition: ref.Tenancy.Partition,
Namespace: ref.Tenancy.Namespace,
Identity: ref.Name,
WorkloadIdentity: ref.Name,
}.URI().String()
}

View File

@ -51,14 +51,20 @@ func (id SpiffeIDSigning) CanSign(cu CertURI) bool {
// worry about Unicode domains if we start allowing customisation beyond the
// built-in cluster ids.
return strings.ToLower(other.Host) == id.Host()
case *SpiffeIDWorkloadIdentity:
// The trust domain component of the workload identity SPIFFE ID must be an exact match for now under
// ascii case folding (since hostnames are case-insensitive). Later we might
// worry about Unicode domains if we start allowing customisation beyond the
// built-in cluster ids.
return strings.ToLower(other.TrustDomain) == id.Host()
case *SpiffeIDMeshGateway:
// The host component of the service must be an exact match for now under
// The host component of the mesh gateway SPIFFE ID must be an exact match for now under
// ascii case folding (since hostnames are case-insensitive). Later we might
// worry about Unicode domains if we start allowing customisation beyond the
// built-in cluster ids.
return strings.ToLower(other.Host) == id.Host()
case *SpiffeIDServer:
// The host component of the service must be an exact match for now under
// The host component of the server SPIFFE ID must be an exact match for now under
// ascii case folding (since hostnames are case-insensitive). Later we might
// worry about Unicode domains if we start allowing customisation beyond the
// built-in cluster ids.

View File

@ -98,6 +98,30 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) {
input: &SpiffeIDService{Host: TestClusterID + ".fake", Namespace: "default", Datacenter: "dc1", Service: "web"},
want: false,
},
{
name: "workload - good",
id: testSigning,
input: &SpiffeIDWorkloadIdentity{TrustDomain: TestClusterID + ".consul", Namespace: "default", WorkloadIdentity: "web"},
want: true,
},
{
name: "workload - good mixed case",
id: testSigning,
input: &SpiffeIDWorkloadIdentity{TrustDomain: strings.ToUpper(TestClusterID) + ".CONsuL", Namespace: "defAUlt", WorkloadIdentity: "WEB"},
want: true,
},
{
name: "workload - different cluster",
id: testSigning,
input: &SpiffeIDWorkloadIdentity{TrustDomain: "55555555-4444-3333-2222-111111111111.consul", Namespace: "default", WorkloadIdentity: "web"},
want: false,
},
{
name: "workload - different TLD",
id: testSigning,
input: &SpiffeIDWorkloadIdentity{TrustDomain: TestClusterID + ".fake", Namespace: "default", WorkloadIdentity: "web"},
want: false,
},
{
name: "mesh gateway - good",
id: testSigning,

View File

@ -51,6 +51,61 @@ func TestParseCertURIFromString(t *testing.T) {
},
ParseError: "",
},
{
Name: "basic workload ID",
URI: "spiffe://1234.consul/ap/default/ns/default/identity/web",
Struct: &SpiffeIDWorkloadIdentity{
TrustDomain: "1234.consul",
Partition: defaultEntMeta.PartitionOrDefault(),
Namespace: "default",
WorkloadIdentity: "web",
},
ParseError: "",
},
{
Name: "basic workload ID with nondefault partition",
URI: "spiffe://1234.consul/ap/bizdev/ns/default/identity/web",
Struct: &SpiffeIDWorkloadIdentity{
TrustDomain: "1234.consul",
Partition: "bizdev",
Namespace: "default",
WorkloadIdentity: "web",
},
ParseError: "",
},
{
Name: "workload ID error - missing identity",
URI: "spiffe://1234.consul/ns/default",
Struct: &SpiffeIDWorkloadIdentity{
TrustDomain: "1234.consul",
Partition: defaultEntMeta.PartitionOrDefault(),
Namespace: "default",
WorkloadIdentity: "web",
},
ParseError: "SPIFFE ID is not in the expected format",
},
{
Name: "workload ID error - missing partition",
URI: "spiffe://1234.consul/ns/default/identity/web",
Struct: &SpiffeIDWorkloadIdentity{
TrustDomain: "1234.consul",
Partition: defaultEntMeta.PartitionOrDefault(),
Namespace: "default",
WorkloadIdentity: "web",
},
ParseError: "SPIFFE ID is not in the expected format",
},
{
Name: "workload ID error - missing namespace",
URI: "spiffe://1234.consul/ap/default/identity/web",
Struct: &SpiffeIDWorkloadIdentity{
TrustDomain: "1234.consul",
Partition: defaultEntMeta.PartitionOrDefault(),
Namespace: "default",
WorkloadIdentity: "web",
},
ParseError: "SPIFFE ID is not in the expected format",
},
{
Name: "basic agent ID",
URI: "spiffe://1234.consul/agent/client/dc/dc1/id/uuid",

View File

@ -0,0 +1,46 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package connect
import (
"fmt"
"net/url"
"github.com/hashicorp/consul/acl"
)
// SpiffeIDWorkloadIdentity is the structure to represent the SPIFFE ID for a workload.
type SpiffeIDWorkloadIdentity struct {
TrustDomain string
Partition string
Namespace string
WorkloadIdentity string
}
func (id SpiffeIDWorkloadIdentity) NamespaceOrDefault() string {
return acl.NamespaceOrDefault(id.Namespace)
}
// URI returns the *url.URL for this SPIFFE ID.
func (id SpiffeIDWorkloadIdentity) URI() *url.URL {
var result url.URL
result.Scheme = "spiffe"
result.Host = id.TrustDomain
result.Path = id.uriPath()
return &result
}
func (id SpiffeIDWorkloadIdentity) uriPath() string {
// Although CE has no support for partitions, it still needs to be able to
// handle exportedPartition from peered Consul Enterprise clusters in order
// to generate the correct SpiffeID.
// We intentionally avoid using pbpartition.DefaultName here to be CE friendly.
path := fmt.Sprintf("/ap/%s/ns/%s/identity/%s",
id.PartitionOrDefault(),
id.NamespaceOrDefault(),
id.WorkloadIdentity,
)
return path
}

View File

@ -0,0 +1,31 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
//go:build !consulent
// +build !consulent
package connect
import (
"strings"
"github.com/hashicorp/consul/acl"
)
// GetEnterpriseMeta will synthesize an EnterpriseMeta struct from the SpiffeIDWorkloadIdentity.
// in CE this just returns an empty (but never nil) struct pointer
func (id SpiffeIDWorkloadIdentity) GetEnterpriseMeta() *acl.EnterpriseMeta {
return &acl.EnterpriseMeta{}
}
// PartitionOrDefault breaks from CE's pattern of returning empty strings.
// Although CE has no support for partitions, it still needs to be able to
// handle exportedPartition from peered Consul Enterprise clusters in order
// to generate the correct SpiffeID.
func (id SpiffeIDWorkloadIdentity) PartitionOrDefault() string {
if id.Partition == "" {
return "default"
}
return strings.ToLower(id.Partition)
}

View File

@ -0,0 +1,41 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
//go:build !consulent
// +build !consulent
package connect
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestSpiffeIDWorkloadURI(t *testing.T) {
t.Run("default partition; default namespace", func(t *testing.T) {
wl := &SpiffeIDWorkloadIdentity{
TrustDomain: "1234.consul",
WorkloadIdentity: "web",
}
require.Equal(t, "spiffe://1234.consul/ap/default/ns/default/identity/web", wl.URI().String())
})
t.Run("namespaces are ignored", func(t *testing.T) {
wl := &SpiffeIDWorkloadIdentity{
TrustDomain: "1234.consul",
WorkloadIdentity: "web",
Namespace: "other",
}
require.Equal(t, "spiffe://1234.consul/ap/default/ns/default/identity/web", wl.URI().String())
})
t.Run("partitions are not ignored", func(t *testing.T) {
wl := &SpiffeIDWorkloadIdentity{
TrustDomain: "1234.consul",
WorkloadIdentity: "web",
Partition: "other",
}
require.Equal(t, "spiffe://1234.consul/ap/other/ns/default/identity/web", wl.URI().String())
})
}

View File

@ -4,7 +4,6 @@
package consul
import (
"errors"
"fmt"
"time"
@ -26,10 +25,11 @@ var (
// variable points to. Clients need to compare using `err.Error() ==
// consul.ErrRateLimited.Error()` which is very sad. Short of replacing our
// RPC mechanism it's hard to know how to make that much better though.
ErrConnectNotEnabled = errors.New("Connect must be enabled in order to use this endpoint")
ErrRateLimited = errors.New("Rate limit reached, try again later") // Note: we depend on this error message in the gRPC ConnectCA.Sign endpoint (see: isRateLimitError).
ErrNotPrimaryDatacenter = errors.New("not the primary datacenter")
ErrStateReadOnly = errors.New("CA Provider State is read-only")
ErrConnectNotEnabled = structs.ErrConnectNotEnabled
ErrRateLimited = structs.ErrRateLimited
ErrNotPrimaryDatacenter = structs.ErrNotPrimaryDatacenter
ErrStateReadOnly = structs.ErrStateReadOnly
)
const (

View File

@ -1436,6 +1436,7 @@ func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, au
if err != nil {
return nil, err
}
c.logger.Trace("authorizing and signing cert", "spiffeID", spiffeID)
// Perform authorization.
var authzContext acl.AuthorizerContext
@ -1454,6 +1455,8 @@ func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, au
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different datacenter: %s, "+
"we are %s", v.Datacenter, dc)
}
case *connect.SpiffeIDWorkloadIdentity:
// TODO: Check for identity:write on the token when identity permissions are supported.
case *connect.SpiffeIDAgent:
v.GetEnterpriseMeta().FillAuthzContext(&authzContext)
if err := allow.NodeWriteAllowed(v.Agent, &authzContext); err != nil {
@ -1487,6 +1490,7 @@ func (c *CAManager) AuthorizeAndSignCertificate(csr *x509.CertificateRequest, au
"we are %s", v.Datacenter, dc)
}
default:
c.logger.Trace("spiffe ID type is not expected", "spiffeID", spiffeID, "spiffeIDType", v)
return nil, connect.InvalidCSRError("SPIFFE ID in CSR must be a service, mesh-gateway, or agent ID")
}
@ -1513,6 +1517,7 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
agentID, isAgent := spiffeID.(*connect.SpiffeIDAgent)
serverID, isServer := spiffeID.(*connect.SpiffeIDServer)
mgwID, isMeshGateway := spiffeID.(*connect.SpiffeIDMeshGateway)
wID, isWorkloadIdentity := spiffeID.(*connect.SpiffeIDWorkloadIdentity)
var entMeta acl.EnterpriseMeta
switch {
@ -1522,7 +1527,12 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
"we are %s", serviceID.Host, signingID.Host())
}
entMeta.Merge(serviceID.GetEnterpriseMeta())
case isWorkloadIdentity:
if !signingID.CanSign(spiffeID) {
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+
"we are %s", wID.TrustDomain, signingID.Host())
}
entMeta.Merge(wID.GetEnterpriseMeta())
case isMeshGateway:
if !signingID.CanSign(spiffeID) {
return nil, connect.InvalidCSRError("SPIFFE ID in CSR from a different trust domain: %s, "+
@ -1645,6 +1655,9 @@ func (c *CAManager) SignCertificate(csr *x509.CertificateRequest, spiffeID conne
case isService:
reply.Service = serviceID.Service
reply.ServiceURI = cert.URIs[0].String()
case isWorkloadIdentity:
reply.WorkloadIdentity = wID.WorkloadIdentity
reply.WorkloadIdentityURI = cert.URIs[0].String()
case isMeshGateway:
reply.Kind = structs.ServiceKindMeshGateway
reply.KindURI = cert.URIs[0].String()

View File

@ -12,6 +12,7 @@ import (
"github.com/hashicorp/consul/agent/consul/stream"
"github.com/hashicorp/consul/agent/grpc-external/limiter"
"github.com/hashicorp/consul/agent/hcp"
"github.com/hashicorp/consul/agent/leafcert"
"github.com/hashicorp/consul/agent/pool"
"github.com/hashicorp/consul/agent/router"
"github.com/hashicorp/consul/agent/rpc/middleware"
@ -21,6 +22,7 @@ import (
)
type Deps struct {
LeafCertManager *leafcert.Manager
EventPublisher *stream.EventPublisher
Logger hclog.InterceptLogger
TLSConfigurator *tlsutil.Configurator

View File

@ -489,8 +489,9 @@ type ProxyUpdater interface {
// PushChange allows pushing a computed ProxyState to xds for xds resource generation to send to a proxy.
PushChange(id *pbresource.ID, snapshot proxysnapshot.ProxySnapshot) error
// ProxyConnectedToServer returns whether this id is connected to this server.
ProxyConnectedToServer(id *pbresource.ID) bool
// ProxyConnectedToServer returns whether this id is connected to this server. If it is connected, it also returns
// the token as the first argument.
ProxyConnectedToServer(id *pbresource.ID) (string, bool)
EventChannel() chan controller.Event
}
@ -918,6 +919,8 @@ func (s *Server) registerControllers(deps Deps, proxyUpdater ProxyUpdater) {
return s.getTrustDomain(caConfig)
},
LeafCertManager: deps.LeafCertManager,
LocalDatacenter: s.config.Datacenter,
ProxyUpdater: proxyUpdater,
})

View File

@ -7,13 +7,12 @@ import (
"context"
"errors"
"github.com/hashicorp/consul/agent/cache"
cachetype "github.com/hashicorp/consul/agent/cache-types"
"github.com/hashicorp/consul/agent/cacheshim"
"github.com/hashicorp/consul/agent/structs"
)
// NewCachedRootsReader returns a RootsReader that sources data from the agent cache.
func NewCachedRootsReader(cache *cache.Cache, dc string) RootsReader {
func NewCachedRootsReader(cache cacheshim.Cache, dc string) RootsReader {
return &agentCacheRootsReader{
cache: cache,
datacenter: dc,
@ -21,7 +20,7 @@ func NewCachedRootsReader(cache *cache.Cache, dc string) RootsReader {
}
type agentCacheRootsReader struct {
cache *cache.Cache
cache cacheshim.Cache
datacenter string
}
@ -30,7 +29,7 @@ var _ RootsReader = (*agentCacheRootsReader)(nil)
func (r *agentCacheRootsReader) Get() (*structs.IndexedCARoots, error) {
// Background is fine here because this isn't a blocking query as no index is set.
// Therefore this will just either be a cache hit or return once the non-blocking query returns.
rawRoots, _, err := r.cache.Get(context.Background(), cachetype.ConnectCARootName, &structs.DCSpecificRequest{
rawRoots, _, err := r.cache.Get(context.Background(), cacheshim.ConnectCARootName, &structs.DCSpecificRequest{
Datacenter: r.datacenter,
})
if err != nil {
@ -43,8 +42,8 @@ func (r *agentCacheRootsReader) Get() (*structs.IndexedCARoots, error) {
return roots, nil
}
func (r *agentCacheRootsReader) Notify(ctx context.Context, correlationID string, ch chan<- cache.UpdateEvent) error {
return r.cache.Notify(ctx, cachetype.ConnectCARootName, &structs.DCSpecificRequest{
func (r *agentCacheRootsReader) Notify(ctx context.Context, correlationID string, ch chan<- cacheshim.UpdateEvent) error {
return r.cache.Notify(ctx, cacheshim.ConnectCARootName, &structs.DCSpecificRequest{
Datacenter: r.datacenter,
}, correlationID, ch)
}

View File

@ -11,7 +11,6 @@ import (
"time"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib"
)
@ -231,6 +230,15 @@ func (m *Manager) generateNewLeaf(
var ipAddresses []net.IP
switch {
case req.WorkloadIdentity != "":
id = &connect.SpiffeIDWorkloadIdentity{
TrustDomain: roots.TrustDomain,
Partition: req.TargetPartition(),
Namespace: req.TargetNamespace(),
WorkloadIdentity: req.WorkloadIdentity,
}
dnsNames = append(dnsNames, req.DNSSAN...)
case req.Service != "":
id = &connect.SpiffeIDService{
Host: roots.TrustDomain,
@ -273,7 +281,7 @@ func (m *Manager) generateNewLeaf(
dnsNames = append(dnsNames, connect.PeeringServerSAN(req.Datacenter, roots.TrustDomain))
default:
return nil, newState, errors.New("URI must be either service, agent, server, or kind")
return nil, newState, errors.New("URI must be either workload identity, service, agent, server, or kind")
}
// Create a new private key
@ -308,7 +316,7 @@ func (m *Manager) generateNewLeaf(
reply, err := m.certSigner.SignCert(context.Background(), &args)
if err != nil {
if err.Error() == consul.ErrRateLimited.Error() {
if err.Error() == structs.ErrRateLimited.Error() {
if firstTime {
// This was a first fetch - we have no good value in cache. In this case
// we just return the error to the caller rather than rely on surprising

View File

@ -15,7 +15,7 @@ import (
"golang.org/x/sync/singleflight"
"golang.org/x/time/rate"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/cacheshim"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib/ttlcache"
)
@ -104,7 +104,7 @@ type Deps struct {
type RootsReader interface {
Get() (*structs.IndexedCARoots, error)
Notify(ctx context.Context, correlationID string, ch chan<- cache.UpdateEvent) error
Notify(ctx context.Context, correlationID string, ch chan<- cacheshim.UpdateEvent) error
}
type CertSigner interface {
@ -237,7 +237,7 @@ func (m *Manager) Stop() {
// index is retrieved, the last known value (maybe nil) is returned. No
// error is returned on timeout. This matches the behavior of Consul blocking
// queries.
func (m *Manager) Get(ctx context.Context, req *ConnectCALeafRequest) (*structs.IssuedCert, cache.ResultMeta, error) {
func (m *Manager) Get(ctx context.Context, req *ConnectCALeafRequest) (*structs.IssuedCert, cacheshim.ResultMeta, error) {
// Lightweight copy this object so that manipulating req doesn't race.
dup := *req
req = &dup
@ -254,10 +254,10 @@ func (m *Manager) Get(ctx context.Context, req *ConnectCALeafRequest) (*structs.
return m.internalGet(ctx, req)
}
func (m *Manager) internalGet(ctx context.Context, req *ConnectCALeafRequest) (*structs.IssuedCert, cache.ResultMeta, error) {
func (m *Manager) internalGet(ctx context.Context, req *ConnectCALeafRequest) (*structs.IssuedCert, cacheshim.ResultMeta, error) {
key := req.Key()
if key == "" {
return nil, cache.ResultMeta{}, fmt.Errorf("a key is required")
return nil, cacheshim.ResultMeta{}, fmt.Errorf("a key is required")
}
if req.MaxQueryTime <= 0 {
@ -310,7 +310,7 @@ func (m *Manager) internalGet(ctx context.Context, req *ConnectCALeafRequest) (*
}
if !shouldReplaceCert {
meta := cache.ResultMeta{
meta := cacheshim.ResultMeta{
Index: existingIndex,
}
@ -347,7 +347,7 @@ func (m *Manager) internalGet(ctx context.Context, req *ConnectCALeafRequest) (*
// other words valid fetches should reset the error. See
// https://github.com/hashicorp/consul/issues/4480.
if !first && lastFetchErr != nil {
return existing, cache.ResultMeta{Index: existingIndex}, lastFetchErr
return existing, cacheshim.ResultMeta{Index: existingIndex}, lastFetchErr
}
notifyCh := m.triggerCertRefreshInGroup(req, cd)
@ -357,14 +357,14 @@ func (m *Manager) internalGet(ctx context.Context, req *ConnectCALeafRequest) (*
select {
case <-ctx.Done():
return nil, cache.ResultMeta{}, ctx.Err()
return nil, cacheshim.ResultMeta{}, ctx.Err()
case <-notifyCh:
// Our fetch returned, retry the get from the cache.
req.MustRevalidate = false
case <-timeoutTimer.C:
// Timeout on the cache read, just return whatever we have.
return existing, cache.ResultMeta{Index: existingIndex}, nil
return existing, cacheshim.ResultMeta{Index: existingIndex}, nil
}
}
}

View File

@ -9,7 +9,6 @@ import (
"crypto/x509"
"encoding/pem"
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
@ -17,9 +16,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/cacheshim"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/sdk/testutil/retry"
@ -34,7 +32,7 @@ func TestManager_changingRoots(t *testing.T) {
t.Parallel()
m, signer := testManager(t, nil)
m, signer := NewTestManager(t, nil)
caRoot := signer.UpdateCA(t, nil)
@ -98,7 +96,7 @@ func TestManager_changingRootsJitterBetweenCalls(t *testing.T) {
const TestOverrideCAChangeInitialDelay = 100 * time.Millisecond
m, signer := testManager(t, func(cfg *Config) {
m, signer := NewTestManager(t, func(cfg *Config) {
// Override the root-change delay so we will timeout first. We can't set it to
// a crazy high value otherwise we'll have to wait that long in the test to
// see if it actually happens on subsequent calls. We instead reduce the
@ -226,7 +224,7 @@ func testObserveLeafCert[T any](m *Manager, req *ConnectCALeafRequest, cb func(*
func TestManager_changingRootsBetweenBlockingCalls(t *testing.T) {
t.Parallel()
m, signer := testManager(t, nil)
m, signer := NewTestManager(t, nil)
caRoot := signer.UpdateCA(t, nil)
@ -297,7 +295,7 @@ func TestManager_CSRRateLimiting(t *testing.T) {
t.Parallel()
m, signer := testManager(t, func(cfg *Config) {
m, signer := NewTestManager(t, func(cfg *Config) {
// Each jitter window will be only 100 ms long to make testing quick but
// highly likely not to fail based on scheduling issues.
cfg.TestOverrideCAChangeInitialDelay = 100 * time.Millisecond
@ -309,12 +307,12 @@ func TestManager_CSRRateLimiting(t *testing.T) {
// First call return rate limit error. This is important as it checks
// behavior when cache is empty and we have to return a nil Value but need to
// save state to do the right thing for retry.
consul.ErrRateLimited, // inc
structs.ErrRateLimited, // inc
// Then succeed on second call
nil,
// Then be rate limited again on several further calls
consul.ErrRateLimited, // inc
consul.ErrRateLimited, // inc
structs.ErrRateLimited, // inc
structs.ErrRateLimited, // inc
// Then fine after that
)
@ -332,7 +330,7 @@ func TestManager_CSRRateLimiting(t *testing.T) {
t.Fatal("shouldn't block longer than one jitter window for success")
case result := <-getCh:
require.Error(t, result.Err)
require.Equal(t, consul.ErrRateLimited.Error(), result.Err.Error())
require.Equal(t, structs.ErrRateLimited.Error(), result.Err.Error())
}
// Second call should return correct cert immediately.
@ -429,7 +427,7 @@ func TestManager_watchRootsDedupingMultipleCallers(t *testing.T) {
t.Parallel()
m, signer := testManager(t, nil)
m, signer := NewTestManager(t, nil)
caRoot := signer.UpdateCA(t, nil)
@ -577,7 +575,7 @@ func TestManager_expiringLeaf(t *testing.T) {
t.Parallel()
m, signer := testManager(t, nil)
m, signer := NewTestManager(t, nil)
caRoot := signer.UpdateCA(t, nil)
@ -637,7 +635,7 @@ func TestManager_expiringLeaf(t *testing.T) {
func TestManager_DNSSANForService(t *testing.T) {
t.Parallel()
m, signer := testManager(t, nil)
m, signer := NewTestManager(t, nil)
_ = signer.UpdateCA(t, nil)
@ -669,7 +667,7 @@ func TestManager_workflow_good(t *testing.T) {
const TestOverrideCAChangeInitialDelay = 1 * time.Nanosecond
m, signer := testManager(t, func(cfg *Config) {
m, signer := NewTestManager(t, func(cfg *Config) {
cfg.TestOverrideCAChangeInitialDelay = TestOverrideCAChangeInitialDelay
})
@ -711,7 +709,7 @@ func TestManager_workflow_good(t *testing.T) {
type reply struct {
cert *structs.IssuedCert
meta cache.ResultMeta
meta cacheshim.ResultMeta
err error
}
@ -818,7 +816,7 @@ func TestManager_workflow_goodNotLocal(t *testing.T) {
const TestOverrideCAChangeInitialDelay = 1 * time.Nanosecond
m, signer := testManager(t, func(cfg *Config) {
m, signer := NewTestManager(t, func(cfg *Config) {
cfg.TestOverrideCAChangeInitialDelay = TestOverrideCAChangeInitialDelay
})
@ -935,7 +933,7 @@ func TestManager_workflow_nonBlockingQuery_after_blockingQuery_shouldNotBlock(t
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
m, signer := testManager(t, nil)
m, signer := NewTestManager(t, nil)
_ = signer.UpdateCA(t, nil)
@ -1020,98 +1018,6 @@ func requireLeafValidUnderCA(t require.TestingT, issued *structs.IssuedCert, ca
require.NoError(t, err)
}
// testManager returns a *Manager that is pre-configured to use a mock RPC
// implementation that can sign certs, and an in-memory CA roots reader that
// interacts well with it.
func testManager(t *testing.T, mut func(*Config)) (*Manager, *testSigner) {
signer := newTestSigner(t, nil, nil)
deps := Deps{
Logger: testutil.Logger(t),
RootsReader: signer.RootsReader,
CertSigner: signer,
Config: Config{
// Override the root-change spread so we don't have to wait up to 20 seconds
// to see root changes work. Can be changed back for specific tests that
// need to test this, Note it's not 0 since that used default but is
// effectively the same.
TestOverrideCAChangeInitialDelay: 1 * time.Microsecond,
},
}
if mut != nil {
mut(&deps.Config)
}
m := NewManager(deps)
t.Cleanup(m.Stop)
return m, signer
}
type testRootsReader struct {
mu sync.Mutex
index uint64
roots *structs.IndexedCARoots
watcher chan struct{}
}
func newTestRootsReader(t *testing.T) *testRootsReader {
r := &testRootsReader{
watcher: make(chan struct{}),
}
t.Cleanup(func() {
r.mu.Lock()
watcher := r.watcher
r.mu.Unlock()
close(watcher)
})
return r
}
var _ RootsReader = (*testRootsReader)(nil)
func (r *testRootsReader) Set(roots *structs.IndexedCARoots) {
r.mu.Lock()
oldWatcher := r.watcher
r.watcher = make(chan struct{})
r.roots = roots
if roots == nil {
r.index = 1
} else {
r.index = roots.Index
}
r.mu.Unlock()
close(oldWatcher)
}
func (r *testRootsReader) Get() (*structs.IndexedCARoots, error) {
r.mu.Lock()
defer r.mu.Unlock()
return r.roots, nil
}
func (r *testRootsReader) Notify(ctx context.Context, correlationID string, ch chan<- cache.UpdateEvent) error {
r.mu.Lock()
watcher := r.watcher
r.mu.Unlock()
go func() {
<-watcher
r.mu.Lock()
defer r.mu.Unlock()
ch <- cache.UpdateEvent{
CorrelationID: correlationID,
Result: r.roots,
Meta: cache.ResultMeta{Index: r.index},
Err: nil,
}
}()
return nil
}
type testGetResult struct {
Index uint64
Value *structs.IssuedCert

View File

@ -17,12 +17,42 @@ import (
"testing"
"time"
"github.com/hashicorp/consul/agent/cacheshim"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/sdk/testutil"
)
// testSigner implements NetRPC and handles leaf signing operations
type testSigner struct {
// NewTestManager returns a *Manager that is pre-configured to use a mock RPC
// implementation that can sign certs, and an in-memory CA roots reader that
// interacts well with it.
func NewTestManager(t *testing.T, mut func(*Config)) (*Manager, *TestSigner) {
signer := newTestSigner(t, nil, nil)
deps := Deps{
Logger: testutil.Logger(t),
RootsReader: signer.RootsReader,
CertSigner: signer,
Config: Config{
// Override the root-change spread so we don't have to wait up to 20 seconds
// to see root changes work. Can be changed back for specific tests that
// need to test this, Note it's not 0 since that used default but is
// effectively the same.
TestOverrideCAChangeInitialDelay: 1 * time.Microsecond,
},
}
if mut != nil {
mut(&deps.Config)
}
m := NewManager(deps)
t.Cleanup(m.Stop)
return m, signer
}
// TestSigner implements NetRPC and handles leaf signing operations
type TestSigner struct {
caLock sync.Mutex
ca *structs.CARoot
prevRoots []*structs.CARoot // remember prior ones
@ -36,37 +66,37 @@ type testSigner struct {
signCallCapture []*structs.CASignRequest
}
var _ CertSigner = (*testSigner)(nil)
var _ CertSigner = (*TestSigner)(nil)
var ReplyWithExpiredCert = errors.New("reply with expired cert")
func newTestSigner(t *testing.T, idGenerator *atomic.Uint64, rootsReader *testRootsReader) *testSigner {
func newTestSigner(t *testing.T, idGenerator *atomic.Uint64, rootsReader *testRootsReader) *TestSigner {
if idGenerator == nil {
idGenerator = &atomic.Uint64{}
}
if rootsReader == nil {
rootsReader = newTestRootsReader(t)
}
s := &testSigner{
s := &TestSigner{
IDGenerator: idGenerator,
RootsReader: rootsReader,
}
return s
}
func (s *testSigner) SetSignCallErrors(errs ...error) {
func (s *TestSigner) SetSignCallErrors(errs ...error) {
s.signCallLock.Lock()
defer s.signCallLock.Unlock()
s.signCallErrors = append(s.signCallErrors, errs...)
}
func (s *testSigner) GetSignCallErrorCount() uint64 {
func (s *TestSigner) GetSignCallErrorCount() uint64 {
s.signCallLock.Lock()
defer s.signCallLock.Unlock()
return s.signCallErrorCount
}
func (s *testSigner) UpdateCA(t *testing.T, ca *structs.CARoot) *structs.CARoot {
func (s *TestSigner) UpdateCA(t *testing.T, ca *structs.CARoot) *structs.CARoot {
if ca == nil {
ca = connect.TestCA(t, nil)
}
@ -95,17 +125,17 @@ func (s *testSigner) UpdateCA(t *testing.T, ca *structs.CARoot) *structs.CARoot
return ca
}
func (s *testSigner) nextIndex() uint64 {
func (s *TestSigner) nextIndex() uint64 {
return s.IDGenerator.Add(1)
}
func (s *testSigner) getCA() *structs.CARoot {
func (s *TestSigner) getCA() *structs.CARoot {
s.caLock.Lock()
defer s.caLock.Unlock()
return s.ca
}
func (s *testSigner) GetCapture(idx int) *structs.CASignRequest {
func (s *TestSigner) GetCapture(idx int) *structs.CASignRequest {
s.signCallLock.Lock()
defer s.signCallLock.Unlock()
if len(s.signCallCapture) > idx {
@ -115,7 +145,7 @@ func (s *testSigner) GetCapture(idx int) *structs.CASignRequest {
return nil
}
func (s *testSigner) SignCert(ctx context.Context, req *structs.CASignRequest) (*structs.IssuedCert, error) {
func (s *TestSigner) SignCert(ctx context.Context, req *structs.CASignRequest) (*structs.IssuedCert, error) {
useExpiredCert := false
s.signCallLock.Lock()
s.signCallCapture = append(s.signCallCapture, req)
@ -150,8 +180,17 @@ func (s *testSigner) SignCert(ctx context.Context, req *structs.CASignRequest) (
return nil, fmt.Errorf("error parsing CSR URI: %w", err)
}
serviceID, isService := spiffeID.(*connect.SpiffeIDService)
if !isService {
var isService bool
var serviceID *connect.SpiffeIDService
var workloadID *connect.SpiffeIDWorkloadIdentity
switch spiffeID.(type) {
case *connect.SpiffeIDService:
isService = true
serviceID = spiffeID.(*connect.SpiffeIDService)
case *connect.SpiffeIDWorkloadIdentity:
workloadID = spiffeID.(*connect.SpiffeIDWorkloadIdentity)
default:
return nil, fmt.Errorf("unexpected spiffeID type %T", spiffeID)
}
@ -231,6 +270,8 @@ func (s *testSigner) SignCert(ctx context.Context, req *structs.CASignRequest) (
}
index := s.nextIndex()
if isService {
// Service Spiffe ID case
return &structs.IssuedCert{
SerialNumber: connect.EncodeSerialNumber(leafCert.SerialNumber),
CertPEM: leafPEM,
@ -243,4 +284,83 @@ func (s *testSigner) SignCert(ctx context.Context, req *structs.CASignRequest) (
ModifyIndex: index,
},
}, nil
} else {
// Workload identity Spiffe ID case
return &structs.IssuedCert{
SerialNumber: connect.EncodeSerialNumber(leafCert.SerialNumber),
CertPEM: leafPEM,
WorkloadIdentity: workloadID.WorkloadIdentity,
WorkloadIdentityURI: leafCert.URIs[0].String(),
ValidAfter: leafCert.NotBefore,
ValidBefore: leafCert.NotAfter,
RaftIndex: structs.RaftIndex{
CreateIndex: index,
ModifyIndex: index,
},
}, nil
}
}
type testRootsReader struct {
mu sync.Mutex
index uint64
roots *structs.IndexedCARoots
watcher chan struct{}
}
func newTestRootsReader(t *testing.T) *testRootsReader {
r := &testRootsReader{
watcher: make(chan struct{}),
}
t.Cleanup(func() {
r.mu.Lock()
watcher := r.watcher
r.mu.Unlock()
close(watcher)
})
return r
}
var _ RootsReader = (*testRootsReader)(nil)
func (r *testRootsReader) Set(roots *structs.IndexedCARoots) {
r.mu.Lock()
oldWatcher := r.watcher
r.watcher = make(chan struct{})
r.roots = roots
if roots == nil {
r.index = 1
} else {
r.index = roots.Index
}
r.mu.Unlock()
close(oldWatcher)
}
func (r *testRootsReader) Get() (*structs.IndexedCARoots, error) {
r.mu.Lock()
defer r.mu.Unlock()
return r.roots, nil
}
func (r *testRootsReader) Notify(ctx context.Context, correlationID string, ch chan<- cacheshim.UpdateEvent) error {
r.mu.Lock()
watcher := r.watcher
r.mu.Unlock()
go func() {
<-watcher
r.mu.Lock()
defer r.mu.Unlock()
ch <- cacheshim.UpdateEvent{
CorrelationID: correlationID,
Result: r.roots,
Meta: cacheshim.ResultMeta{Index: r.index},
Err: nil,
}
}()
return nil
}

View File

@ -8,7 +8,7 @@ import (
"sync"
"sync/atomic"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/cacheshim"
"github.com/hashicorp/consul/agent/structs"
)
@ -100,7 +100,7 @@ func (r *rootWatcher) rootWatcher(ctx context.Context) {
atomic.AddUint32(&r.testStartCount, 1)
defer atomic.AddUint32(&r.testStopCount, 1)
ch := make(chan cache.UpdateEvent, 1)
ch := make(chan cacheshim.UpdateEvent, 1)
if err := r.rootsReader.Notify(ctx, "roots", ch); err != nil {
// Trigger all inflight watchers. We don't pass the error, but they will

View File

@ -11,7 +11,7 @@ import (
"github.com/mitchellh/hashstructure"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/cacheshim"
"github.com/hashicorp/consul/agent/structs"
)
@ -31,6 +31,7 @@ type ConnectCALeafRequest struct {
// The following flags indicate the entity we are requesting a cert for.
// Only one of these must be specified.
WorkloadIdentity string // Given a WorkloadIdentity name, the request is for a SpiffeIDWorkload.
Service string // Given a Service name, not ID, the request is for a SpiffeIDService.
Agent string // Given an Agent name, not ID, the request is for a SpiffeIDAgent.
Kind structs.ServiceKind // Given "mesh-gateway", the request is for a SpiffeIDMeshGateway. No other kinds supported.
@ -41,6 +42,16 @@ func (r *ConnectCALeafRequest) Key() string {
r.EnterpriseMeta.Normalize()
switch {
case r.WorkloadIdentity != "":
v, err := hashstructure.Hash([]any{
r.WorkloadIdentity,
r.EnterpriseMeta,
r.DNSSAN,
r.IPSAN,
}, nil)
if err == nil {
return fmt.Sprintf("workloadidentity:%d", v)
}
case r.Agent != "":
v, err := hashstructure.Hash([]any{
r.Agent,
@ -94,8 +105,8 @@ func (req *ConnectCALeafRequest) TargetPartition() string {
return req.PartitionOrDefault()
}
func (r *ConnectCALeafRequest) CacheInfo() cache.RequestInfo {
return cache.RequestInfo{
func (r *ConnectCALeafRequest) CacheInfo() cacheshim.RequestInfo {
return cacheshim.RequestInfo{
Token: r.Token,
Key: r.Key(),
Datacenter: r.Datacenter,

View File

@ -8,7 +8,7 @@ import (
"fmt"
"time"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/cacheshim"
"github.com/hashicorp/consul/lib"
)
@ -43,9 +43,9 @@ func (m *Manager) Notify(
ctx context.Context,
req *ConnectCALeafRequest,
correlationID string,
ch chan<- cache.UpdateEvent,
ch chan<- cacheshim.UpdateEvent,
) error {
return m.NotifyCallback(ctx, req, correlationID, func(ctx context.Context, event cache.UpdateEvent) {
return m.NotifyCallback(ctx, req, correlationID, func(ctx context.Context, event cacheshim.UpdateEvent) {
select {
case ch <- event:
case <-ctx.Done():
@ -60,7 +60,7 @@ func (m *Manager) NotifyCallback(
ctx context.Context,
req *ConnectCALeafRequest,
correlationID string,
cb cache.Callback,
cb cacheshim.Callback,
) error {
if req.Key() == "" {
return fmt.Errorf("a key is required")
@ -81,7 +81,7 @@ func (m *Manager) notifyBlockingQuery(
ctx context.Context,
req *ConnectCALeafRequest,
correlationID string,
cb cache.Callback,
cb cacheshim.Callback,
) {
// Always start at 0 index to deliver the initial (possibly currently cached
// value).
@ -106,7 +106,7 @@ func (m *Manager) notifyBlockingQuery(
// Check the index of the value returned in the cache entry to be sure it
// changed
if index == 0 || index < meta.Index {
cb(ctx, cache.UpdateEvent{
cb(ctx, cacheshim.UpdateEvent{
CorrelationID: correlationID,
Result: newValue,
Meta: meta,

View File

@ -185,6 +185,8 @@ func NewBaseDeps(configLoader ConfigLoader, logOut io.Writer, providedLogger hcl
TestOverrideCAChangeInitialDelay: cfg.ConnectTestCALeafRootChangeSpread,
},
})
// Set the leaf cert manager in the embedded deps type so it can be used by consul servers.
d.Deps.LeafCertManager = d.LeafCertManager
agentType := "client"
if cfg.ServerMode {

View File

@ -217,6 +217,11 @@ type IssuedCert struct {
// PrivateKeyPEM is the PEM encoded private key associated with CertPEM.
PrivateKeyPEM string `json:",omitempty"`
// WorkloadIdentity is the name of the workload identity for which the cert was issued.
WorkloadIdentity string `json:",omitempty"`
// WorkloadIdentityURI is the cert URI value.
WorkloadIdentityURI string `json:",omitempty"`
// Service is the name of the service for which the cert was issued.
Service string `json:",omitempty"`
// ServiceURI is the cert URI value.
@ -247,6 +252,12 @@ type IssuedCert struct {
RaftIndex
}
func (i *IssuedCert) Key() string {
return fmt.Sprintf("%s",
i.SerialNumber,
)
}
// CAOp is the operation for a request related to intentions.
type CAOp string

View File

@ -19,6 +19,10 @@ const (
errServiceNotFound = "Service not found: "
errQueryNotFound = "Query not found"
errLeaderNotTracked = "Raft leader not found in server lookup mapping"
errConnectNotEnabled = "Connect must be enabled in order to use this endpoint"
errRateLimited = "Rate limit reached, try again later" // Note: we depend on this error message in the gRPC ConnectCA.Sign endpoint (see: isRateLimitError).
errNotPrimaryDatacenter = "not the primary datacenter"
errStateReadOnly = "CA Provider State is read-only"
)
var (
@ -31,6 +35,10 @@ var (
ErrDCNotAvailable = errors.New(errDCNotAvailable)
ErrQueryNotFound = errors.New(errQueryNotFound)
ErrLeaderNotTracked = errors.New(errLeaderNotTracked)
ErrConnectNotEnabled = errors.New(errConnectNotEnabled)
ErrRateLimited = errors.New(errRateLimited) // Note: we depend on this error message in the gRPC ConnectCA.Sign endpoint (see: isRateLimitError).
ErrNotPrimaryDatacenter = errors.New(errNotPrimaryDatacenter)
ErrStateReadOnly = errors.New(errStateReadOnly)
)
func IsErrNoDCPath(err error) bool {

View File

@ -4,6 +4,9 @@
package controllers
import (
"context"
"github.com/hashicorp/consul/agent/leafcert"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/cache/sidecarproxycache"
@ -20,11 +23,18 @@ type Dependencies struct {
LocalDatacenter string
TrustBundleFetcher xds.TrustBundleFetcher
ProxyUpdater xds.ProxyUpdater
LeafCertManager *leafcert.Manager
}
func Register(mgr *controller.Manager, deps Dependencies) {
mapper := bimapper.New(types.ProxyStateTemplateType, catalog.ServiceEndpointsType)
mgr.Register(xds.Controller(mapper, deps.ProxyUpdater, deps.TrustBundleFetcher))
endpointsMapper := bimapper.New(types.ProxyStateTemplateType, catalog.ServiceEndpointsType)
leafMapper := &xds.LeafMapper{
Mapper: bimapper.New(types.ProxyStateTemplateType, xds.InternalLeafType),
}
leafCancels := &xds.LeafCancels{
Cancels: make(map[string]context.CancelFunc),
}
mgr.Register(xds.Controller(endpointsMapper, deps.ProxyUpdater, deps.TrustBundleFetcher, deps.LeafCertManager, leafMapper, leafCancels, deps.LocalDatacenter))
destinationsCache := sidecarproxycache.NewDestinationsCache()
proxyCfgCache := sidecarproxycache.NewProxyConfigurationCache()

View File

@ -5,7 +5,12 @@ package xds
import (
"context"
"fmt"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/cacheshim"
"github.com/hashicorp/consul/agent/leafcert"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/xds/status"
@ -19,23 +24,35 @@ import (
)
const ControllerName = "consul.io/xds-controller"
const defaultTenancy = "default"
func Controller(mapper *bimapper.Mapper, updater ProxyUpdater, fetcher TrustBundleFetcher) controller.Controller {
if mapper == nil || updater == nil || fetcher == nil {
panic("mapper, updater and fetcher are required")
func Controller(endpointsMapper *bimapper.Mapper, updater ProxyUpdater, fetcher TrustBundleFetcher, leafCertManager *leafcert.Manager, leafMapper *LeafMapper, leafCancels *LeafCancels, datacenter string) controller.Controller {
leafCertEvents := make(chan controller.Event, 1000)
if endpointsMapper == nil || fetcher == nil || leafCertManager == nil || leafMapper == nil || datacenter == "" {
panic("endpointsMapper, updater, fetcher, leafCertManager, leafMapper, and datacenter are required")
}
return controller.ForType(types.ProxyStateTemplateType).
WithWatch(catalog.ServiceEndpointsType, mapper.MapLink).
WithWatch(catalog.ServiceEndpointsType, endpointsMapper.MapLink).
WithCustomWatch(proxySource(updater), proxyMapper).
WithCustomWatch(&controller.Source{Source: leafCertEvents}, leafMapper.EventMapLink).
WithPlacement(controller.PlacementEachServer).
WithReconciler(&xdsReconciler{bimapper: mapper, updater: updater, fetchTrustBundle: fetcher})
WithReconciler(&xdsReconciler{endpointsMapper: endpointsMapper, updater: updater, fetchTrustBundle: fetcher, leafCertManager: leafCertManager, leafCancels: leafCancels, leafCertEvents: leafCertEvents, leafMapper: leafMapper, datacenter: datacenter})
}
type xdsReconciler struct {
bimapper *bimapper.Mapper
// Fields for fetching and watching endpoints.
endpointsMapper *bimapper.Mapper
// Fields for proxy management.
updater ProxyUpdater
// Fields for fetching and watching trust bundles.
fetchTrustBundle TrustBundleFetcher
// Fields for fetching and watching leaf certificates.
leafCertManager *leafcert.Manager
leafMapper *LeafMapper
leafCancels *LeafCancels
leafCertEvents chan controller.Event
datacenter string
}
type TrustBundleFetcher func() (*pbproxystate.TrustBundle, error)
@ -47,7 +64,7 @@ type ProxyUpdater interface {
PushChange(id *pbresource.ID, snapshot proxysnapshot.ProxySnapshot) error
// ProxyConnectedToServer returns whether this id is connected to this server.
ProxyConnectedToServer(id *pbresource.ID) bool
ProxyConnectedToServer(id *pbresource.ID) (string, bool)
// EventChannel returns a channel of events that are consumed by the Custom Watcher.
EventChannel() chan controller.Event
@ -65,11 +82,25 @@ func (r *xdsReconciler) Reconcile(ctx context.Context, rt controller.Runtime, re
return err
}
if proxyStateTemplate == nil || proxyStateTemplate.Template == nil || !r.updater.ProxyConnectedToServer(req.ID) {
token, proxyConnected := r.updater.ProxyConnectedToServer(req.ID)
if proxyStateTemplate == nil || proxyStateTemplate.Template == nil || !proxyConnected {
rt.Logger.Trace("proxy state template has been deleted or this controller is not responsible for this proxy state template", "id", req.ID)
// If the proxy state was deleted, we should remove references to it in the mapper.
r.bimapper.UntrackItem(req.ID)
// If the proxy state template (PST) was deleted, we should:
// 1. Remove references from endpoints mapper.
// 2. Remove references from leaf mapper.
// 3. Cancel all leaf watches.
// 1. Remove PST from endpoints mapper.
r.endpointsMapper.UntrackItem(req.ID)
// Grab the leafs related to this PST before untracking the PST so we know which ones to cancel.
leafLinks := r.leafMapper.LinkRefsForItem(req.ID)
// 2. Remove PST from leaf mapper.
r.leafMapper.UntrackItem(req.ID)
// 3. Cancel watches for leafs that were related to this PST as long as it's not referenced by any other PST.
r.cancelWatches(leafLinks)
return nil
}
@ -80,7 +111,6 @@ func (r *xdsReconciler) Reconcile(ctx context.Context, rt controller.Runtime, re
)
pstResource = proxyStateTemplate.Resource
// Initialize the ProxyState endpoints map.
if proxyStateTemplate.Template.ProxyState == nil {
rt.Logger.Error("proxy state was missing from proxy state template")
// Set the status.
@ -100,6 +130,7 @@ func (r *xdsReconciler) Reconcile(ctx context.Context, rt controller.Runtime, re
return err
}
// Initialize ProxyState maps.
if proxyStateTemplate.Template.ProxyState.TrustBundles == nil {
proxyStateTemplate.Template.ProxyState.TrustBundles = make(map[string]*pbproxystate.TrustBundle)
}
@ -109,6 +140,9 @@ func (r *xdsReconciler) Reconcile(ctx context.Context, rt controller.Runtime, re
if proxyStateTemplate.Template.ProxyState.Endpoints == nil {
proxyStateTemplate.Template.ProxyState.Endpoints = make(map[string]*pbproxystate.Endpoints)
}
if proxyStateTemplate.Template.ProxyState.LeafCertificates == nil {
proxyStateTemplate.Template.ProxyState.LeafCertificates = make(map[string]*pbproxystate.LeafCertificate)
}
// Iterate through the endpoint references.
// For endpoints, the controller should:
@ -156,8 +190,110 @@ func (r *xdsReconciler) Reconcile(ctx context.Context, rt controller.Runtime, re
}
// Step 4: Track relationships between ProxyStateTemplates and ServiceEndpoints.
r.bimapper.TrackItem(req.ID, endpointsInProxyStateTemplate)
r.endpointsMapper.TrackItem(req.ID, endpointsInProxyStateTemplate)
// Iterate through leaf certificate references.
// For each leaf certificate reference, the controller should:
// 1. Setup a watch for the leaf certificate so that the leaf cert manager will generate and store a leaf
// certificate if it's not already in the leaf cert manager cache.
// 1a. Store a cancel function for that leaf certificate watch.
// 2. Get the leaf certificate from the leaf cert manager. (This should succeed if a watch has been set up).
// 3. Put the leaf certificate contents into the ProxyState leaf certificates map.
// 4. Track relationships between ProxyState and leaf certificates using a bimapper.
leafReferencesMap := proxyStateTemplate.Template.RequiredLeafCertificates
var leafsInProxyStateTemplate []resource.ReferenceOrID
for workloadIdentityName, leafRef := range leafReferencesMap {
// leafRef must include the namespace and partition
leafResourceReference := leafResourceRef(leafRef.Name, leafRef.Namespace, leafRef.Partition)
leafKey := keyFromReference(leafResourceReference)
leafRequest := &leafcert.ConnectCALeafRequest{
Token: token,
WorkloadIdentity: leafRef.Name,
EnterpriseMeta: acl.NewEnterpriseMetaWithPartition(leafRef.Partition, leafRef.Namespace),
}
// Step 1: Setup a watch for this leaf if one doesn't already exist.
if _, ok := r.leafCancels.Get(leafKey); !ok {
certWatchContext, cancel := context.WithCancel(ctx)
err = r.leafCertManager.NotifyCallback(certWatchContext, leafRequest, "", func(ctx context.Context, event cacheshim.UpdateEvent) {
cert, ok := event.Result.(*structs.IssuedCert)
if !ok {
panic("wrong type")
}
if cert == nil {
return
}
controllerEvent := controller.Event{
Obj: cert,
}
select {
// This callback function is running in its own goroutine, so blocking inside this goroutine to send the
// update event doesn't affect the controller or other leaf certificates. r.leafCertEvents is a buffered
// channel, which should constantly be consumed by the controller custom events queue. If the controller
// custom events consumer isn't clearing up the leafCertEvents channel, then that would be the main
// issue to address, as opposed to this goroutine blocking.
case r.leafCertEvents <- controllerEvent:
// This context is the certWatchContext, so we will reach this case if the watch is canceled, and exit
// the callback goroutine.
case <-ctx.Done():
}
})
if err != nil {
rt.Logger.Error("error creating leaf watch", "leafRef", leafResourceReference, "error", err)
// Set the status.
statusCondition = status.ConditionRejectedErrorCreatingLeafWatch(keyFromReference(leafResourceReference), err.Error())
status.WriteStatusIfChanged(ctx, rt, pstResource, statusCondition)
cancel()
return err
}
r.leafCancels.Set(leafKey, cancel)
}
// Step 2: Get the leaf certificate.
cert, _, err := r.leafCertManager.Get(ctx, leafRequest)
if err != nil {
rt.Logger.Error("error getting leaf", "leafRef", leafResourceReference, "error", err)
// Set the status.
statusCondition = status.ConditionRejectedErrorGettingLeaf(keyFromReference(leafResourceReference), err.Error())
status.WriteStatusIfChanged(ctx, rt, pstResource, statusCondition)
return err
}
// Create the pbproxystate.LeafCertificate out of the structs.IssuedCert returned from the manager.
psLeaf := generateProxyStateLeafCertificates(cert)
if psLeaf == nil {
rt.Logger.Error("error getting leaf certificate contents", "leafRef", leafResourceReference)
// Set the status.
statusCondition = status.ConditionRejectedErrorCreatingProxyStateLeaf(keyFromReference(leafResourceReference), err.Error())
status.WriteStatusIfChanged(ctx, rt, pstResource, statusCondition)
return err
}
// Step 3: Add the leaf certificate to ProxyState.
proxyStateTemplate.Template.ProxyState.LeafCertificates[workloadIdentityName] = psLeaf
// Track all the leaf certificates that are used by this ProxyStateTemplate, so we can use this for step 4.
leafsInProxyStateTemplate = append(leafsInProxyStateTemplate, leafResourceReference)
}
// Get the previously tracked leafs for this ProxyStateTemplate so we can use this to cancel watches in step 5.
prevWatchedLeafs := r.leafMapper.LinkRefsForItem(req.ID)
// Step 4: Track relationships between ProxyStateTemplates and leaf certificates for the current leafs referenced in
// ProxyStateTemplate.
r.leafMapper.TrackItem(req.ID, leafsInProxyStateTemplate)
// Step 5: Compute whether there are leafs that are no longer referenced by this proxy state template, and cancel
// watches for them if they aren't referenced anywhere.
watches := prevWatchesToCancel(prevWatchedLeafs, leafsInProxyStateTemplate)
r.cancelWatches(watches)
// Now that the references have been resolved, push the computed proxy state to the updater.
computedProxyState := proxyStateTemplate.Template.ProxyState
err = r.updater.PushChange(req.ID, &proxytracker.ProxyState{ProxyState: computedProxyState})
@ -174,11 +310,93 @@ func (r *xdsReconciler) Reconcile(ctx context.Context, rt controller.Runtime, re
return nil
}
func resourceIdToReference(id *pbresource.ID) *pbresource.Reference {
// leafResourceRef translates a leaf certificate reference in ProxyState template to an internal resource reference. The
// bimapper package uses resource references, so we use an internal type to create a leaf resource reference since leaf
// certificates are not v2 resources.
func leafResourceRef(workloadIdentity, namespace, partition string) *pbresource.Reference {
// Since leaf certificate references aren't resources in the resource API, we don't have the same guarantees that
// namespace and partition are set. So this is to ensure that we always do set values for tenancy.
if namespace == "" {
namespace = defaultTenancy
}
if partition == "" {
partition = defaultTenancy
}
ref := &pbresource.Reference{
Name: id.GetName(),
Type: id.GetType(),
Tenancy: id.GetTenancy(),
Name: workloadIdentity,
Type: InternalLeafType,
Tenancy: &pbresource.Tenancy{
Partition: partition,
Namespace: namespace,
},
}
return ref
}
// InternalLeafType sets up an internal resource type to use for leaf certificates, since they are not yet a v2
// resource. It's exported because it's used by the mesh controller registration which needs to set up the bimapper for
// leaf certificates.
var InternalLeafType = &pbresource.Type{
Group: "internal",
GroupVersion: "v1alpha1",
Kind: "leaf",
}
// keyFromReference is used to create string keys from resource references.
func keyFromReference(ref resource.ReferenceOrID) string {
return fmt.Sprintf("%s/%s/%s",
resource.ToGVK(ref.GetType()),
tenancyToString(ref.GetTenancy()),
ref.GetName())
}
func tenancyToString(tenancy *pbresource.Tenancy) string {
return fmt.Sprintf("%s.%s", tenancy.Partition, tenancy.Namespace)
}
// generateProxyStateLeafCertificates translates a *structs.IssuedCert into a *pbproxystate.LeafCertificate.
func generateProxyStateLeafCertificates(cert *structs.IssuedCert) *pbproxystate.LeafCertificate {
if cert.CertPEM == "" || cert.PrivateKeyPEM == "" {
return nil
}
return &pbproxystate.LeafCertificate{
Cert: cert.CertPEM,
Key: cert.PrivateKeyPEM,
}
}
// cancelWatches cancels watches for leafs that no longer need to be watched, as long as it is referenced by zero ProxyStateTemplates.
func (r *xdsReconciler) cancelWatches(leafResourceRefs []*pbresource.Reference) {
for _, leaf := range leafResourceRefs {
pstItems := r.leafMapper.ItemRefsForLink(leaf)
if len(pstItems) > 0 {
// Don't delete and cancel watches, since this leaf is referenced elsewhere.
continue
}
cancel, ok := r.leafCancels.Get(keyFromReference(leaf))
if ok {
cancel()
r.leafCancels.Delete(keyFromReference(leaf))
}
}
}
// prevWatchesToCancel computes if there are any items in prevWatchedLeafs that are not in currentLeafs, and returns a list of those items.
func prevWatchesToCancel(prevWatchedLeafs []*pbresource.Reference, currentLeafs []resource.ReferenceOrID) []*pbresource.Reference {
var prevWatchedLeafsToCancel []*pbresource.Reference
for _, prevLeaf := range prevWatchedLeafs {
prevKey := keyFromReference(prevLeaf)
found := false
for _, newLeaf := range currentLeafs {
newKey := keyFromReference(newLeaf)
if prevKey == newKey {
found = true
break
}
}
if !found {
prevWatchedLeafsToCancel = append(prevWatchedLeafsToCancel, prevLeaf)
}
}
return prevWatchedLeafsToCancel
}

View File

@ -5,12 +5,15 @@ package xds
import (
"context"
"crypto/x509"
"encoding/pem"
"testing"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
svctest "github.com/hashicorp/consul/agent/grpc-external/services/resource/testing"
"github.com/hashicorp/consul/agent/leafcert"
"github.com/hashicorp/consul/internal/catalog"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/mesh/internal/controllers/xds/status"
@ -39,17 +42,24 @@ type xdsControllerTestSuite struct {
mapper *bimapper.Mapper
updater *mockUpdater
fetcher TrustBundleFetcher
leafMapper *LeafMapper
leafCertManager *leafcert.Manager
leafCancels *LeafCancels
leafCertEvents chan controller.Event
signer *leafcert.TestSigner
fooProxyStateTemplate *pbresource.Resource
barProxyStateTemplate *pbresource.Resource
barEndpointRefs map[string]*pbproxystate.EndpointRef
fooEndpointRefs map[string]*pbproxystate.EndpointRef
fooLeafRefs map[string]*pbproxystate.LeafCertificateRef
fooEndpoints *pbresource.Resource
fooService *pbresource.Resource
fooBarEndpoints *pbresource.Resource
fooBarService *pbresource.Resource
expectedFooProxyStateEndpoints map[string]*pbproxystate.Endpoints
expectedBarProxyStateEndpoints map[string]*pbproxystate.Endpoints
expectedFooProxyStateSpiffes map[string]string
expectedTrustBundle map[string]*pbproxystate.TrustBundle
}
@ -63,10 +73,27 @@ func (suite *xdsControllerTestSuite) SetupTest() {
suite.mapper = bimapper.New(types.ProxyStateTemplateType, catalog.ServiceEndpointsType)
suite.updater = newMockUpdater()
suite.leafMapper = &LeafMapper{
bimapper.New(types.ProxyStateTemplateType, InternalLeafType),
}
lcm, signer := leafcert.NewTestManager(suite.T(), nil)
signer.UpdateCA(suite.T(), nil)
suite.signer = signer
suite.leafCertManager = lcm
suite.leafCancels = &LeafCancels{
Cancels: make(map[string]context.CancelFunc),
}
suite.leafCertEvents = make(chan controller.Event, 1000)
suite.ctl = &xdsReconciler{
bimapper: suite.mapper,
endpointsMapper: suite.mapper,
updater: suite.updater,
fetchTrustBundle: suite.fetcher,
leafMapper: suite.leafMapper,
leafCertManager: suite.leafCertManager,
leafCancels: suite.leafCancels,
leafCertEvents: suite.leafCertEvents,
datacenter: "dc1",
}
}
@ -79,11 +106,14 @@ func mockFetcher() (*pbproxystate.TrustBundle, error) {
return &bundle, nil
}
// This test ensures when a ProxyState is deleted, it is no longer tracked in the mapper.
// This test ensures when a ProxyState is deleted, it is no longer tracked in the mappers.
func (suite *xdsControllerTestSuite) TestReconcile_NoProxyStateTemplate() {
// Track the id of a non-existent ProxyStateTemplate.
proxyStateTemplateId := resourcetest.Resource(types.ProxyStateTemplateType, "not-found").ID()
suite.mapper.TrackItem(proxyStateTemplateId, []resource.ReferenceOrID{})
suite.leafMapper.TrackItem(proxyStateTemplateId, []resource.ReferenceOrID{})
require.False(suite.T(), suite.mapper.IsEmpty())
require.False(suite.T(), suite.leafMapper.IsEmpty())
// Run the reconcile, and since no ProxyStateTemplate is stored, this simulates a deletion.
err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
@ -91,8 +121,9 @@ func (suite *xdsControllerTestSuite) TestReconcile_NoProxyStateTemplate() {
})
require.NoError(suite.T(), err)
// Assert that nothing is tracked in the mapper.
// Assert that nothing is tracked in the endpoints mapper.
require.True(suite.T(), suite.mapper.IsEmpty())
require.True(suite.T(), suite.leafMapper.IsEmpty())
}
// This test ensures if the controller was previously tracking a ProxyStateTemplate, and now that proxy has
@ -126,7 +157,7 @@ func (suite *xdsControllerTestSuite) TestReconcile_PushChangeError() {
suite.updater.pushChangeError = true
// Setup a happy path scenario.
suite.setupFooProxyStateTemplateAndEndpoints()
suite.setupFooProxyStateTemplateWithReferences()
// Run the reconcile.
err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
@ -215,7 +246,7 @@ func (suite *xdsControllerTestSuite) TestReconcile_ReadEndpointError() {
func (suite *xdsControllerTestSuite) TestReconcile_ProxyStateTemplateComputesEndpoints() {
// Set up fooEndpoints and fooProxyStateTemplate with a reference to fooEndpoints and store them in the state store.
// This setup saves expected values in the suite so it can be asserted against later.
suite.setupFooProxyStateTemplateAndEndpoints()
suite.setupFooProxyStateTemplateWithReferences()
// Run the reconcile.
err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
@ -231,10 +262,35 @@ func (suite *xdsControllerTestSuite) TestReconcile_ProxyStateTemplateComputesEnd
prototest.AssertDeepEqual(suite.T(), suite.expectedFooProxyStateEndpoints, actualEndpoints)
}
func (suite *xdsControllerTestSuite) TestReconcile_ProxyStateTemplateComputesLeafCerts() {
// Set up fooEndpoints and fooProxyStateTemplate with a reference to fooEndpoints and store them in the state store.
// This setup saves expected values in the suite so it can be asserted against later.
suite.setupFooProxyStateTemplateWithReferences()
// Run the reconcile.
err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
ID: suite.fooProxyStateTemplate.Id,
})
require.NoError(suite.T(), err)
// Assert on the status.
suite.client.RequireStatusCondition(suite.T(), suite.fooProxyStateTemplate.Id, ControllerName, status.ConditionAccepted())
// Assert that the actual leaf certs computed are match the expected leaf cert spiffes.
actualLeafs := suite.updater.GetLeafs(suite.fooProxyStateTemplate.Id.Name)
for k, l := range actualLeafs {
pem, _ := pem.Decode([]byte(l.Cert))
cert, err := x509.ParseCertificate(pem.Bytes)
require.NoError(suite.T(), err)
require.Equal(suite.T(), cert.URIs[0].String(), suite.expectedFooProxyStateSpiffes[k])
}
}
// This test is a happy path creation test to make sure pbproxystate.Template.TrustBundles are created in the computed
// pbmesh.ProxyState from the TrustBundleFetcher.
func (suite *xdsControllerTestSuite) TestReconcile_ProxyStateTemplateSetsTrustBundles() {
// This test is a happy path creation test to make sure pbproxystate.Template.TrustBundles are created in the computed
// pbmesh.ProxyState from the TrustBundleFetcher.
suite.setupFooProxyStateTemplateAndEndpoints()
suite.setupFooProxyStateTemplateWithReferences()
// Run the reconcile.
err := suite.ctl.Reconcile(context.Background(), suite.runtime, controller.Request{
@ -287,16 +343,14 @@ func (suite *xdsControllerTestSuite) TestReconcile_MultipleProxyStateTemplatesCo
}
// Sets up a full controller, and tests that reconciles are getting triggered for the events it should.
func (suite *xdsControllerTestSuite) TestController_ComputeAddUpdateEndpoints() {
func (suite *xdsControllerTestSuite) TestController_ComputeAddUpdateEndpointReferences() {
// Run the controller manager.
mgr := controller.NewManager(suite.client, suite.runtime.Logger)
mgr.Register(Controller(suite.mapper, suite.updater, suite.fetcher))
mgr.Register(Controller(suite.mapper, suite.updater, suite.fetcher, suite.leafCertManager, suite.leafMapper, suite.leafCancels, "dc1"))
mgr.SetRaftLeader(true)
go mgr.Run(suite.ctx)
// Set up fooEndpoints and fooProxyStateTemplate with a reference to fooEndpoints. These need to be stored
// because the controller reconcile looks them up.
suite.setupFooProxyStateTemplateAndEndpoints()
suite.setupFooProxyStateTemplateWithReferences()
// Assert that the expected ProxyState matches the actual ProxyState that PushChange was called with. This needs to
// be in a retry block unlike the Reconcile tests because the controller triggers asynchronously.
@ -387,11 +441,13 @@ func (suite *xdsControllerTestSuite) TestController_ComputeAddUpdateEndpoints()
Id: secondEndpoints.Id,
Port: "mesh",
}
oldVersion := suite.fooProxyStateTemplate.Version
fooProxyStateTemplate := resourcetest.Resource(types.ProxyStateTemplateType, "foo-pst").
WithData(suite.T(), &pbmesh.ProxyStateTemplate{
RequiredEndpoints: suite.fooEndpointRefs,
ProxyState: &pbmesh.ProxyState{},
RequiredLeafCertificates: suite.fooLeafRefs,
}).
Write(suite.T(), suite.client)
@ -433,18 +489,147 @@ func (suite *xdsControllerTestSuite) TestController_ComputeAddUpdateEndpoints()
}
// Sets up a full controller, and tests that reconciles are getting triggered for the leaf cert events it should.
// This test ensures when a CA is updated, the controller is triggered to update the leaf cert when it changes.
func (suite *xdsControllerTestSuite) TestController_ComputeAddUpdateDeleteLeafReferences() {
// Run the controller manager.
mgr := controller.NewManager(suite.client, suite.runtime.Logger)
mgr.Register(Controller(suite.mapper, suite.updater, suite.fetcher, suite.leafCertManager, suite.leafMapper, suite.leafCancels, "dc1"))
mgr.SetRaftLeader(true)
go mgr.Run(suite.ctx)
suite.setupFooProxyStateTemplateWithReferences()
leafCertRef := suite.fooLeafRefs["foo-workload-identity"]
fooLeafResRef := leafResourceRef(leafCertRef.Name, leafCertRef.Namespace, leafCertRef.Partition)
// oldLeaf will store the original leaf from before we trigger a CA update.
var oldLeaf *x509.Certificate
// Assert that the expected ProxyState matches the actual ProxyState that PushChange was called with. This needs to
// be in a retry block unlike the Reconcile tests because the controller triggers asynchronously.
retry.Run(suite.T(), func(r *retry.R) {
actualEndpoints := suite.updater.GetEndpoints(suite.fooProxyStateTemplate.Id.Name)
actualLeafs := suite.updater.GetLeafs(suite.fooProxyStateTemplate.Id.Name)
// Assert on the status.
suite.client.RequireStatusCondition(r, suite.fooProxyStateTemplate.Id, ControllerName, status.ConditionAccepted())
// Assert that the endpoints computed in the controller matches the expected endpoints.
prototest.AssertDeepEqual(r, suite.expectedFooProxyStateEndpoints, actualEndpoints)
// Assert that the leafs computed in the controller matches the expected leafs.
require.Len(r, actualLeafs, 1)
for k, l := range actualLeafs {
pem, _ := pem.Decode([]byte(l.Cert))
cert, err := x509.ParseCertificate(pem.Bytes)
oldLeaf = cert
require.NoError(r, err)
require.Equal(r, cert.URIs[0].String(), suite.expectedFooProxyStateSpiffes[k])
// Check the state of the cancel functions map.
_, ok := suite.leafCancels.Get(keyFromReference(fooLeafResRef))
require.True(r, ok)
}
})
// Update the CA, and ensure the leaf cert is different from the leaf certificate from the step above.
suite.signer.UpdateCA(suite.T(), nil)
retry.Run(suite.T(), func(r *retry.R) {
actualLeafs := suite.updater.GetLeafs(suite.fooProxyStateTemplate.Id.Name)
require.Len(r, actualLeafs, 1)
for k, l := range actualLeafs {
pem, _ := pem.Decode([]byte(l.Cert))
cert, err := x509.ParseCertificate(pem.Bytes)
// Ensure the leaf was actually updated by checking that the leaf we just got is different from the old leaf.
require.NotEqual(r, oldLeaf.Raw, cert.Raw)
require.NoError(r, err)
require.Equal(r, suite.expectedFooProxyStateSpiffes[k], cert.URIs[0].String())
// Check the state of the cancel functions map. Even though we've updated the leaf cert, we should still
// have a watch going for it.
_, ok := suite.leafCancels.Get(keyFromReference(fooLeafResRef))
require.True(r, ok)
}
})
// Delete the leaf references on the fooProxyStateTemplate
delete(suite.fooLeafRefs, "foo-workload-identity")
oldVersion := suite.fooProxyStateTemplate.Version
fooProxyStateTemplate := resourcetest.Resource(types.ProxyStateTemplateType, "foo-pst").
WithData(suite.T(), &pbmesh.ProxyStateTemplate{
RequiredEndpoints: suite.fooEndpointRefs,
ProxyState: &pbmesh.ProxyState{},
RequiredLeafCertificates: suite.fooLeafRefs,
}).
Write(suite.T(), suite.client)
retry.Run(suite.T(), func(r *retry.R) {
suite.client.RequireVersionChanged(r, fooProxyStateTemplate.Id, oldVersion)
})
// Ensure the leaf certificate watches were cancelled since we deleted the leaf reference.
retry.Run(suite.T(), func(r *retry.R) {
_, ok := suite.leafCancels.Get(keyFromReference(fooLeafResRef))
require.False(r, ok)
})
}
// Sets up a full controller, and tests that reconciles are getting triggered for the leaf cert events it should.
// This test ensures that when a ProxyStateTemplate is deleted, the leaf watches are cancelled.
func (suite *xdsControllerTestSuite) TestController_ComputeLeafReferencesDeletePST() {
// Run the controller manager.
mgr := controller.NewManager(suite.client, suite.runtime.Logger)
mgr.Register(Controller(suite.mapper, suite.updater, suite.fetcher, suite.leafCertManager, suite.leafMapper, suite.leafCancels, "dc1"))
mgr.SetRaftLeader(true)
go mgr.Run(suite.ctx)
suite.setupFooProxyStateTemplateWithReferences()
leafCertRef := suite.fooLeafRefs["foo-workload-identity"]
fooLeafResRef := leafResourceRef(leafCertRef.Name, leafCertRef.Namespace, leafCertRef.Partition)
// Assert that the expected ProxyState matches the actual ProxyState that PushChange was called with. This needs to
// be in a retry block unlike the Reconcile tests because the controller triggers asynchronously.
retry.Run(suite.T(), func(r *retry.R) {
actualEndpoints := suite.updater.GetEndpoints(suite.fooProxyStateTemplate.Id.Name)
actualLeafs := suite.updater.GetLeafs(suite.fooProxyStateTemplate.Id.Name)
// Assert on the status.
suite.client.RequireStatusCondition(r, suite.fooProxyStateTemplate.Id, ControllerName, status.ConditionAccepted())
// Assert that the endpoints computed in the controller matches the expected endpoints.
prototest.AssertDeepEqual(r, suite.expectedFooProxyStateEndpoints, actualEndpoints)
// Assert that the leafs computed in the controller matches the expected leafs.
require.Len(r, actualLeafs, 1)
for k, l := range actualLeafs {
pem, _ := pem.Decode([]byte(l.Cert))
cert, err := x509.ParseCertificate(pem.Bytes)
require.NoError(r, err)
require.Equal(r, cert.URIs[0].String(), suite.expectedFooProxyStateSpiffes[k])
// Check the state of the cancel functions map.
_, ok := suite.leafCancels.Get(keyFromReference(fooLeafResRef))
require.True(r, ok)
}
})
// Delete the fooProxyStateTemplate
req := &pbresource.DeleteRequest{
Id: suite.fooProxyStateTemplate.Id,
}
suite.client.Delete(suite.ctx, req)
// Ensure the leaf certificate watches were cancelled since we deleted the leaf reference.
retry.Run(suite.T(), func(r *retry.R) {
_, ok := suite.leafCancels.Get(keyFromReference(fooLeafResRef))
require.False(r, ok)
})
}
// Sets up a full controller, and tests that reconciles are getting triggered for the events it should.
func (suite *xdsControllerTestSuite) TestController_ComputeEndpointForProxyConnections() {
// Run the controller manager.
mgr := controller.NewManager(suite.client, suite.runtime.Logger)
mgr.Register(Controller(suite.mapper, suite.updater, suite.fetcher))
mgr.Register(Controller(suite.mapper, suite.updater, suite.fetcher, suite.leafCertManager, suite.leafMapper, suite.leafCancels, "dc1"))
mgr.SetRaftLeader(true)
go mgr.Run(suite.ctx)
// Set up fooEndpoints and fooProxyStateTemplate with a reference to fooEndpoints. These need to be stored
// because the controller reconcile looks them up.
suite.setupFooProxyStateTemplateAndEndpoints()
suite.setupFooProxyStateTemplateWithReferences()
// Assert that the expected ProxyState matches the actual ProxyState that PushChange was called with. This needs to
// be in a retry block unlike the Reconcile tests because the controller triggers asynchronously.
@ -464,9 +649,12 @@ func (suite *xdsControllerTestSuite) TestController_ComputeEndpointForProxyConne
require.NotNil(suite.T(), proxyStateTemp)
}
// Setup: fooProxyStateTemplate with an EndpointsRef to fooEndpoints
// Saves all related resources to the suite so they can be modified if needed.
func (suite *xdsControllerTestSuite) setupFooProxyStateTemplateAndEndpoints() {
// Setup: fooProxyStateTemplate with:
// - an EndpointsRef to fooEndpoints
// - a LeafCertificateRef to "foo-workload-identity"
//
// Saves all related resources to the suite so they can be looked up by the controller or modified if needed.
func (suite *xdsControllerTestSuite) setupFooProxyStateTemplateWithReferences() {
fooService := resourcetest.Resource(catalog.ServiceType, "foo-service").
WithData(suite.T(), &pbcatalog.Service{}).
Write(suite.T(), suite.client)
@ -501,9 +689,15 @@ func (suite *xdsControllerTestSuite) setupFooProxyStateTemplateAndEndpoints() {
Port: "mesh",
}
fooRequiredLeafs := make(map[string]*pbproxystate.LeafCertificateRef)
fooRequiredLeafs["foo-workload-identity"] = &pbproxystate.LeafCertificateRef{
Name: "foo-workload-identity",
}
fooProxyStateTemplate := resourcetest.Resource(types.ProxyStateTemplateType, "foo-pst").
WithData(suite.T(), &pbmesh.ProxyStateTemplate{
RequiredEndpoints: fooRequiredEndpoints,
RequiredLeafCertificates: fooRequiredLeafs,
ProxyState: &pbmesh.ProxyState{},
}).
Write(suite.T(), suite.client)
@ -512,6 +706,9 @@ func (suite *xdsControllerTestSuite) setupFooProxyStateTemplateAndEndpoints() {
suite.client.RequireResourceExists(r, fooProxyStateTemplate.Id)
})
expectedFooLeafSpiffes := map[string]string{
"foo-workload-identity": "spiffe://11111111-2222-3333-4444-555555555555.consul/ap/default/ns/default/identity/foo-workload-identity",
}
expectedFooProxyStateEndpoints := map[string]*pbproxystate.Endpoints{
"test-cluster-1": {Endpoints: []*pbproxystate.Endpoint{
{
@ -545,9 +742,11 @@ func (suite *xdsControllerTestSuite) setupFooProxyStateTemplateAndEndpoints() {
suite.fooService = fooService
suite.fooEndpoints = fooEndpoints
suite.fooEndpointRefs = fooRequiredEndpoints
suite.fooLeafRefs = fooRequiredLeafs
suite.fooProxyStateTemplate = fooProxyStateTemplate
suite.expectedFooProxyStateEndpoints = expectedFooProxyStateEndpoints
suite.expectedTrustBundle = expectedTrustBundle
suite.expectedFooProxyStateSpiffes = expectedFooLeafSpiffes
}
// Setup:

View File

@ -0,0 +1,34 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package xds
import (
"context"
"sync"
)
// LeafCancels holds the cancel functions for leaf certificates being watched by this controller instance.
type LeafCancels struct {
sync.Mutex
// Cancels is a map from a string key constructed from the pbproxystate.LeafReference to a cancel function for the
// leaf watch.
Cancels map[string]context.CancelFunc
}
func (l *LeafCancels) Get(key string) (context.CancelFunc, bool) {
l.Lock()
defer l.Unlock()
v, ok := l.Cancels[key]
return v, ok
}
func (l *LeafCancels) Set(key string, value context.CancelFunc) {
l.Lock()
defer l.Unlock()
l.Cancels[key] = value
}
func (l *LeafCancels) Delete(key string) {
l.Lock()
defer l.Unlock()
delete(l.Cancels, key)
}

View File

@ -0,0 +1,39 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package xds
import (
"context"
"fmt"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/internal/controller"
"github.com/hashicorp/consul/internal/resource/mappers/bimapper"
)
// LeafMapper is a wrapper around endpointsMapper to allow mapping from events to requests for PSTs (as opposed to from a resource to requests for PSTs).
type LeafMapper struct {
*bimapper.Mapper
}
func (m *LeafMapper) EventMapLink(_ context.Context, _ controller.Runtime, event controller.Event) ([]controller.Request, error) {
// Get cert from event.
cert, ok := event.Obj.(*structs.IssuedCert)
if !ok {
return nil, fmt.Errorf("got invalid event type; expected *structs.IssuedCert")
}
// The LeafMapper has mappings from leaf certificate resource references to PSTs. So we need to translate the
// contents of the certificate from the event to a leaf resource reference.
leafRef := leafResourceRef(cert.WorkloadIdentity, cert.EnterpriseMeta.NamespaceOrDefault(), cert.EnterpriseMeta.PartitionOrDefault())
// Get all the ProxyStateTemplates that reference this leaf.
itemIDs := m.ItemIDsForLink(leafRef)
out := make([]controller.Request, 0, len(itemIDs))
for _, item := range itemIDs {
out = append(out, controller.Request{ID: item})
}
return out, nil
}

View File

@ -55,13 +55,13 @@ func (m *mockUpdater) PushChange(id *pbresource.ID, snapshot proxysnapshot.Proxy
return nil
}
func (m *mockUpdater) ProxyConnectedToServer(_ *pbresource.ID) bool {
func (m *mockUpdater) ProxyConnectedToServer(_ *pbresource.ID) (string, bool) {
m.lock.Lock()
defer m.lock.Unlock()
if m.notConnected {
return false
return "", false
}
return true
return "atoken", true
}
func (m *mockUpdater) EventChannel() chan controller.Event {
@ -91,6 +91,16 @@ func (p *mockUpdater) GetEndpoints(name string) map[string]*pbproxystate.Endpoin
return nil
}
func (p *mockUpdater) GetLeafs(name string) map[string]*pbproxystate.LeafCertificate {
p.lock.Lock()
defer p.lock.Unlock()
ps, ok := p.latestPs[name]
if ok {
return ps.(*proxytracker.ProxyState).LeafCertificates
}
return nil
}
func (p *mockUpdater) GetTrustBundle(name string) map[string]*pbproxystate.TrustBundle {
p.lock.Lock()
defer p.lock.Unlock()

View File

@ -20,6 +20,9 @@ const (
StatusReasonCreatingProxyStateEndpointsFailed = "ProxyStateEndpointsNotComputed"
StatusReasonPushChangeFailed = "ProxyStatePushChangeFailed"
StatusReasonTrustBundleFetchFailed = "ProxyStateTrustBundleFetchFailed"
StatusReasonLeafWatchSetupFailed = "ProxyStateLeafWatchSetupError"
StatusReasonLeafFetchFailed = "ProxyStateLeafFetchError"
StatusReasonLeafEmpty = "ProxyStateLeafEmptyError"
)
func KeyFromID(id *pbresource.ID) string {
@ -45,6 +48,30 @@ func ConditionRejectedNilProxyState(pstRef string) *pbresource.Condition {
Message: fmt.Sprintf("nil proxy state is not valid %q.", pstRef),
}
}
func ConditionRejectedErrorCreatingLeafWatch(leafRef string, err string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionProxyStateAccepted,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonLeafWatchSetupFailed,
Message: fmt.Sprintf("error creating leaf watch %q: %s", leafRef, err),
}
}
func ConditionRejectedErrorGettingLeaf(leafRef string, err string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionProxyStateAccepted,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonLeafFetchFailed,
Message: fmt.Sprintf("error getting leaf from leaf certificate manager %q: %s", leafRef, err),
}
}
func ConditionRejectedErrorCreatingProxyStateLeaf(leafRef string, err string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionProxyStateAccepted,
State: pbresource.Condition_STATE_FALSE,
Reason: StatusReasonLeafEmpty,
Message: fmt.Sprintf("error getting leaf certificate contents %q: %s", leafRef, err),
}
}
func ConditionRejectedErrorReadingEndpoints(endpointRef string, err string) *pbresource.Condition {
return &pbresource.Condition{
Type: StatusConditionProxyStateAccepted,

View File

@ -251,12 +251,15 @@ func (pt *ProxyTracker) ShutdownChannel() chan struct{} {
}
// ProxyConnectedToServer returns whether this id is connected to this server.
func (pt *ProxyTracker) ProxyConnectedToServer(proxyID *pbresource.ID) bool {
func (pt *ProxyTracker) ProxyConnectedToServer(proxyID *pbresource.ID) (string, bool) {
pt.mu.Lock()
defer pt.mu.Unlock()
proxyReferenceKey := resource.NewReferenceKey(proxyID)
_, ok := pt.proxies[proxyReferenceKey]
return ok
proxyData, ok := pt.proxies[proxyReferenceKey]
if ok {
return proxyData.token, ok
}
return "", ok
}
// Shutdown removes all state and close all channels.

View File

@ -277,7 +277,8 @@ func TestProxyTracker_ProxyConnectedToServer(t *testing.T) {
})
resourceID := resourcetest.Resource(types.ProxyStateTemplateType, "test").ID()
tc.preProcessingFunc(pt, resourceID, lim, session1, session1TermCh)
require.Equal(t, tc.shouldExist, pt.ProxyConnectedToServer(resourceID))
_, ok := pt.ProxyConnectedToServer(resourceID)
require.Equal(t, tc.shouldExist, ok)
}
}

View File

@ -222,11 +222,11 @@ func (m *Mapper) ItemsForLink(link *pbresource.ID) []*pbresource.ID {
}
// ItemIDsForLink returns item ids for items related to the provided link.
func (m *Mapper) ItemIDsForLink(link *pbresource.ID) []*pbresource.ID {
if !resource.EqualType(link.Type, m.linkType) {
func (m *Mapper) ItemIDsForLink(link resource.ReferenceOrID) []*pbresource.ID {
if !resource.EqualType(link.GetType(), m.linkType) {
panic(fmt.Sprintf("expected link type %q got %q",
resource.TypeToString(m.linkType),
resource.TypeToString(link.Type),
resource.TypeToString(link.GetType()),
))
}
@ -234,11 +234,11 @@ func (m *Mapper) ItemIDsForLink(link *pbresource.ID) []*pbresource.ID {
}
// ItemRefsForLink returns item references for items related to the provided link.
func (m *Mapper) ItemRefsForLink(link *pbresource.ID) []*pbresource.Reference {
if !resource.EqualType(link.Type, m.linkType) {
func (m *Mapper) ItemRefsForLink(link resource.ReferenceOrID) []*pbresource.Reference {
if !resource.EqualType(link.GetType(), m.linkType) {
panic(fmt.Sprintf("expected link type %q got %q",
resource.TypeToString(m.linkType),
resource.TypeToString(link.Type),
resource.TypeToString(link.GetType()),
))
}

View File

@ -89,6 +89,8 @@ func IssuedCertToStructsIssuedCert(s *IssuedCert, t *structs.IssuedCert) {
t.SerialNumber = s.SerialNumber
t.CertPEM = s.CertPEM
t.PrivateKeyPEM = s.PrivateKeyPEM
t.WorkloadIdentity = s.WorkloadIdentity
t.WorkloadIdentityURI = s.WorkloadIdentityURI
t.Service = s.Service
t.ServiceURI = s.ServiceURI
t.Agent = s.Agent
@ -108,6 +110,8 @@ func IssuedCertFromStructsIssuedCert(t *structs.IssuedCert, s *IssuedCert) {
s.SerialNumber = t.SerialNumber
s.CertPEM = t.CertPEM
s.PrivateKeyPEM = t.PrivateKeyPEM
s.WorkloadIdentity = t.WorkloadIdentity
s.WorkloadIdentityURI = t.WorkloadIdentityURI
s.Service = t.Service
s.ServiceURI = t.ServiceURI
s.Agent = t.Agent

View File

@ -383,6 +383,10 @@ type IssuedCert struct {
// ServerURI is the URI value of a cert issued for a server agent.
// The same URI is shared by all servers in a Consul datacenter.
ServerURI string `protobuf:"bytes,14,opt,name=ServerURI,proto3" json:"ServerURI,omitempty"`
// WorkloadIdentity is the name of the workload identity for which the cert was issued.
WorkloadIdentity string `protobuf:"bytes,15,opt,name=WorkloadIdentity,proto3" json:"WorkloadIdentity,omitempty"`
// WorkloadIdentityURI is the cert URI value.
WorkloadIdentityURI string `protobuf:"bytes,16,opt,name=WorkloadIdentityURI,proto3" json:"WorkloadIdentityURI,omitempty"`
// ValidAfter and ValidBefore are the validity periods for the
// certificate.
// mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto
@ -498,6 +502,20 @@ func (x *IssuedCert) GetServerURI() string {
return ""
}
func (x *IssuedCert) GetWorkloadIdentity() string {
if x != nil {
return x.WorkloadIdentity
}
return ""
}
func (x *IssuedCert) GetWorkloadIdentityURI() string {
if x != nil {
return x.WorkloadIdentityURI
}
return ""
}
func (x *IssuedCert) GetValidAfter() *timestamppb.Timestamp {
if x != nil {
return x.ValidAfter
@ -592,7 +610,7 @@ var file_private_pbconnect_connect_proto_rawDesc = []byte{
0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65,
0x78, 0x52, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xc7, 0x04, 0x0a,
0x78, 0x52, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xa5, 0x05, 0x0a,
0x0a, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x43, 0x65, 0x72, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x53,
0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x0c, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12,
@ -611,43 +629,49 @@ var file_private_pbconnect_connect_proto_rawDesc = []byte{
0x18, 0x0a, 0x07, 0x4b, 0x69, 0x6e, 0x64, 0x55, 0x52, 0x49, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09,
0x52, 0x07, 0x4b, 0x69, 0x6e, 0x64, 0x55, 0x52, 0x49, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x65, 0x72,
0x76, 0x65, 0x72, 0x55, 0x52, 0x49, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x53, 0x65,
0x72, 0x76, 0x65, 0x72, 0x55, 0x52, 0x49, 0x12, 0x3a, 0x0a, 0x0a, 0x56, 0x61, 0x6c, 0x69, 0x64,
0x41, 0x66, 0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x66,
0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x0b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x42, 0x65, 0x66, 0x6f,
0x72, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72,
0x65, 0x12, 0x58, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d,
0x65, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68,
0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74,
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74,
0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74,
0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x49, 0x0a, 0x09, 0x52,
0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b,
0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75,
0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
0x6e, 0x2e, 0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x09, 0x52, 0x61, 0x66,
0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x92, 0x02, 0x0a, 0x25, 0x63, 0x6f, 0x6d, 0x2e, 0x68,
0x72, 0x76, 0x65, 0x72, 0x55, 0x52, 0x49, 0x12, 0x2a, 0x0a, 0x10, 0x57, 0x6f, 0x72, 0x6b, 0x6c,
0x6f, 0x61, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28,
0x09, 0x52, 0x10, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74,
0x69, 0x74, 0x79, 0x12, 0x30, 0x0a, 0x13, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x49,
0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x55, 0x52, 0x49, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09,
0x52, 0x13, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69,
0x74, 0x79, 0x55, 0x52, 0x49, 0x12, 0x3a, 0x0a, 0x0a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x66,
0x74, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x41, 0x66, 0x74, 0x65,
0x72, 0x12, 0x3c, 0x0a, 0x0b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65,
0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
0x6d, 0x70, 0x52, 0x0b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12,
0x58, 0x0a, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74,
0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63,
0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72,
0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x45, 0x6e, 0x74, 0x65, 0x72,
0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x0e, 0x45, 0x6e, 0x74, 0x65, 0x72,
0x70, 0x72, 0x69, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x49, 0x0a, 0x09, 0x52, 0x61, 0x66,
0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x68,
0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e,
0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
0x42, 0x0c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73,
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x2f, 0x70, 0x62, 0x63, 0x6f,
0x6e, 0x6e, 0x65, 0x63, 0x74, 0xa2, 0x02, 0x04, 0x48, 0x43, 0x49, 0x43, 0xaa, 0x02, 0x21, 0x48,
0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e,
0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
0xca, 0x02, 0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e,
0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x43, 0x6f, 0x6e,
0x6e, 0x65, 0x63, 0x74, 0xe2, 0x02, 0x2d, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
0x5c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61,
0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x24, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70,
0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e,
0x61, 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
0x52, 0x61, 0x66, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x09, 0x52, 0x61, 0x66, 0x74, 0x49,
0x6e, 0x64, 0x65, 0x78, 0x42, 0x92, 0x02, 0x0a, 0x25, 0x63, 0x6f, 0x6d, 0x2e, 0x68, 0x61, 0x73,
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x69, 0x6e,
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x42, 0x0c,
0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x33,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69,
0x63, 0x6f, 0x72, 0x70, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x2f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x2f, 0x70, 0x62, 0x63, 0x6f, 0x6e, 0x6e,
0x65, 0x63, 0x74, 0xa2, 0x02, 0x04, 0x48, 0x43, 0x49, 0x43, 0xaa, 0x02, 0x21, 0x48, 0x61, 0x73,
0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x2e, 0x49, 0x6e,
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0xca, 0x02,
0x21, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43, 0x6f, 0x6e, 0x73, 0x75,
0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x43, 0x6f, 0x6e, 0x6e, 0x65,
0x63, 0x74, 0xe2, 0x02, 0x2d, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x5c, 0x43,
0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x5c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5c, 0x43,
0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
0x74, 0x61, 0xea, 0x02, 0x24, 0x48, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x3a, 0x3a,
0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6c, 0x3a, 0x3a, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
0x3a, 0x3a, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (

View File

@ -172,6 +172,11 @@ message IssuedCert {
// The same URI is shared by all servers in a Consul datacenter.
string ServerURI = 14;
// WorkloadIdentity is the name of the workload identity for which the cert was issued.
string WorkloadIdentity = 15;
// WorkloadIdentityURI is the cert URI value.
string WorkloadIdentityURI = 16;
// ValidAfter and ValidBefore are the validity periods for the
// certificate.
// mog: func-to=structs.TimeFromProto func-from=structs.TimeToProto