2020-12-24 19:11:13 +00:00
|
|
|
package proxycfg
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/mitchellh/mapstructure"
|
|
|
|
|
2022-04-05 21:10:06 +00:00
|
|
|
"github.com/hashicorp/consul/acl"
|
2022-08-30 15:46:34 +00:00
|
|
|
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
2020-12-24 19:11:13 +00:00
|
|
|
"github.com/hashicorp/consul/agent/structs"
|
2022-08-30 15:46:34 +00:00
|
|
|
"github.com/hashicorp/consul/proto/pbpeering"
|
2020-12-24 19:11:13 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type handlerUpstreams struct {
|
|
|
|
handlerState
|
|
|
|
}
|
|
|
|
|
2022-05-20 14:47:40 +00:00
|
|
|
func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u UpdateEvent, snap *ConfigSnapshot) error {
|
2020-12-24 19:11:13 +00:00
|
|
|
if u.Err != nil {
|
|
|
|
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
|
|
|
}
|
|
|
|
|
2022-08-30 15:46:34 +00:00
|
|
|
upstreamsSnapshot, err := snap.ToConfigSnapshotUpstreams()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case u.CorrelationID == leafWatchID:
|
|
|
|
leaf, ok := u.Result.(*structs.IssuedCert)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
upstreamsSnapshot.Leaf = leaf
|
|
|
|
|
2022-03-30 18:43:59 +00:00
|
|
|
case u.CorrelationID == meshConfigEntryID:
|
|
|
|
resp, ok := u.Result.(*structs.ConfigEntryResponse)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp.Entry != nil {
|
|
|
|
meshConf, ok := resp.Entry.(*structs.MeshConfigEntry)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for config entry: %T", resp.Entry)
|
|
|
|
}
|
|
|
|
upstreamsSnapshot.MeshConfig = meshConf
|
|
|
|
} else {
|
|
|
|
upstreamsSnapshot.MeshConfig = nil
|
|
|
|
}
|
|
|
|
upstreamsSnapshot.MeshConfigSet = true
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
case strings.HasPrefix(u.CorrelationID, "discovery-chain:"):
|
|
|
|
resp, ok := u.Result.(*structs.DiscoveryChainResponse)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
uidString := strings.TrimPrefix(u.CorrelationID, "discovery-chain:")
|
|
|
|
uid := UpstreamIDFromString(uidString)
|
2021-12-13 22:44:22 +00:00
|
|
|
|
2021-12-13 23:40:04 +00:00
|
|
|
switch snap.Kind {
|
|
|
|
case structs.ServiceKindIngressGateway:
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := snap.IngressGateway.UpstreamsSet[uid]; !ok {
|
2021-12-14 01:07:56 +00:00
|
|
|
// Discovery chain is not associated with a known explicit or implicit upstream so it is purged/skipped.
|
2021-12-13 23:40:04 +00:00
|
|
|
// The associated watch was likely cancelled.
|
2022-01-20 16:12:04 +00:00
|
|
|
delete(upstreamsSnapshot.DiscoveryChain, uid)
|
|
|
|
s.logger.Trace("discovery-chain watch fired for unknown upstream", "upstream", uid)
|
2021-12-13 23:40:04 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
case structs.ServiceKindConnectProxy:
|
2022-01-20 16:12:04 +00:00
|
|
|
explicit := snap.ConnectProxy.UpstreamConfig[uid].HasLocalPortOrSocket()
|
2022-07-13 20:12:01 +00:00
|
|
|
implicit := snap.ConnectProxy.IsImplicitUpstream(uid)
|
|
|
|
if !implicit && !explicit {
|
2021-12-14 01:07:56 +00:00
|
|
|
// Discovery chain is not associated with a known explicit or implicit upstream so it is purged/skipped.
|
2021-12-13 23:40:04 +00:00
|
|
|
// The associated watch was likely cancelled.
|
2022-01-20 16:12:04 +00:00
|
|
|
delete(upstreamsSnapshot.DiscoveryChain, uid)
|
|
|
|
s.logger.Trace("discovery-chain watch fired for unknown upstream", "upstream", uid)
|
2021-12-13 23:40:04 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("discovery-chain watch fired for unsupported kind: %s", snap.Kind)
|
2021-12-13 22:44:22 +00:00
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamsSnapshot.DiscoveryChain[uid] = resp.Chain
|
2020-12-24 19:11:13 +00:00
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
if err := s.resetWatchesFromChain(ctx, uid, resp.Chain, upstreamsSnapshot); err != nil {
|
2020-12-24 19:11:13 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-06-03 21:42:50 +00:00
|
|
|
case strings.HasPrefix(u.CorrelationID, upstreamPeerWatchIDPrefix):
|
|
|
|
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
uidString := strings.TrimPrefix(u.CorrelationID, upstreamPeerWatchIDPrefix)
|
|
|
|
|
|
|
|
uid := UpstreamIDFromString(uidString)
|
|
|
|
|
2022-08-30 15:46:34 +00:00
|
|
|
s.setPeerEndpoints(upstreamsSnapshot, uid, resp.Nodes)
|
|
|
|
|
|
|
|
case strings.HasPrefix(u.CorrelationID, peerTrustBundleIDPrefix):
|
|
|
|
resp, ok := u.Result.(*pbpeering.TrustBundleReadResponse)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
peer := strings.TrimPrefix(u.CorrelationID, peerTrustBundleIDPrefix)
|
|
|
|
if resp.Bundle != nil {
|
|
|
|
upstreamsSnapshot.UpstreamPeerTrustBundles.Set(peer, resp.Bundle)
|
2022-06-03 21:42:50 +00:00
|
|
|
}
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
case strings.HasPrefix(u.CorrelationID, "upstream-target:"):
|
|
|
|
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
correlationID := strings.TrimPrefix(u.CorrelationID, "upstream-target:")
|
2022-01-20 16:12:04 +00:00
|
|
|
targetID, uidString, ok := removeColonPrefix(correlationID)
|
2020-12-24 19:11:13 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid correlation id %q", u.CorrelationID)
|
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
uid := UpstreamIDFromString(uidString)
|
|
|
|
|
|
|
|
if _, ok := upstreamsSnapshot.WatchedUpstreamEndpoints[uid]; !ok {
|
|
|
|
upstreamsSnapshot.WatchedUpstreamEndpoints[uid] = make(map[string]structs.CheckServiceNodes)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamsSnapshot.WatchedUpstreamEndpoints[uid][targetID] = resp.Nodes
|
2020-12-24 19:11:13 +00:00
|
|
|
|
2022-01-28 03:52:26 +00:00
|
|
|
if s.kind != structs.ServiceKindConnectProxy || s.proxyCfg.Mode != structs.ProxyModeTransparent {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-01-28 06:49:06 +00:00
|
|
|
// Clear out this target's existing passthrough upstreams and indices so that they can be repopulated below.
|
|
|
|
if _, ok := upstreamsSnapshot.PassthroughUpstreams[uid]; ok {
|
|
|
|
for addr := range upstreamsSnapshot.PassthroughUpstreams[uid][targetID] {
|
|
|
|
if indexed := upstreamsSnapshot.PassthroughIndices[addr]; indexed.targetID == targetID && indexed.upstreamID == uid {
|
|
|
|
delete(upstreamsSnapshot.PassthroughIndices, addr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
upstreamsSnapshot.PassthroughUpstreams[uid][targetID] = make(map[string]struct{})
|
2022-01-28 03:52:26 +00:00
|
|
|
}
|
2022-01-28 06:49:06 +00:00
|
|
|
|
|
|
|
passthroughs := make(map[string]struct{})
|
2020-12-24 19:11:13 +00:00
|
|
|
|
|
|
|
for _, node := range resp.Nodes {
|
2022-01-28 03:52:26 +00:00
|
|
|
if !node.Service.Proxy.TransparentProxy.DialedDirectly {
|
|
|
|
continue
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
2022-01-28 03:52:26 +00:00
|
|
|
|
2022-02-10 00:16:00 +00:00
|
|
|
// Make sure to use an external address when crossing partition or DC boundaries.
|
|
|
|
isRemote := !snap.Locality.Matches(node.Node.Datacenter, node.Node.PartitionOrDefault())
|
2022-07-13 16:14:57 +00:00
|
|
|
// If node is peered it must be remote
|
|
|
|
if node.Node.PeerOrEmpty() != "" {
|
|
|
|
isRemote = true
|
|
|
|
}
|
2022-01-28 06:49:06 +00:00
|
|
|
csnIdx, addr, _ := node.BestAddress(isRemote)
|
|
|
|
|
|
|
|
existing := upstreamsSnapshot.PassthroughIndices[addr]
|
|
|
|
if existing.idx > csnIdx {
|
|
|
|
// The last known instance with this address had a higher index so it takes precedence.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// The current instance has a higher Raft index so we ensure the passthrough address is only
|
|
|
|
// associated with this upstream target. Older associations are cleaned up as needed.
|
|
|
|
delete(upstreamsSnapshot.PassthroughUpstreams[existing.upstreamID][existing.targetID], addr)
|
|
|
|
if len(upstreamsSnapshot.PassthroughUpstreams[existing.upstreamID][existing.targetID]) == 0 {
|
|
|
|
delete(upstreamsSnapshot.PassthroughUpstreams[existing.upstreamID], existing.targetID)
|
|
|
|
}
|
|
|
|
if len(upstreamsSnapshot.PassthroughUpstreams[existing.upstreamID]) == 0 {
|
|
|
|
delete(upstreamsSnapshot.PassthroughUpstreams, existing.upstreamID)
|
|
|
|
}
|
|
|
|
|
|
|
|
upstreamsSnapshot.PassthroughIndices[addr] = indexedTarget{idx: csnIdx, upstreamID: uid, targetID: targetID}
|
|
|
|
passthroughs[addr] = struct{}{}
|
|
|
|
}
|
|
|
|
if len(passthroughs) > 0 {
|
|
|
|
upstreamsSnapshot.PassthroughUpstreams[uid] = map[string]map[string]struct{}{
|
|
|
|
targetID: passthroughs,
|
|
|
|
}
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case strings.HasPrefix(u.CorrelationID, "mesh-gateway:"):
|
2022-09-01 14:46:30 +00:00
|
|
|
resp, ok := u.Result.(*structs.IndexedCheckServiceNodes)
|
2020-12-24 19:11:13 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid type for response: %T", u.Result)
|
|
|
|
}
|
|
|
|
correlationID := strings.TrimPrefix(u.CorrelationID, "mesh-gateway:")
|
2022-01-20 16:12:04 +00:00
|
|
|
key, uidString, ok := removeColonPrefix(correlationID)
|
2020-12-24 19:11:13 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid correlation id %q", u.CorrelationID)
|
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
uid := UpstreamIDFromString(uidString)
|
|
|
|
|
|
|
|
if _, ok = upstreamsSnapshot.WatchedGatewayEndpoints[uid]; !ok {
|
|
|
|
upstreamsSnapshot.WatchedGatewayEndpoints[uid] = make(map[string]structs.CheckServiceNodes)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamsSnapshot.WatchedGatewayEndpoints[uid][key] = resp.Nodes
|
2021-10-22 21:22:55 +00:00
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
default:
|
|
|
|
return fmt.Errorf("unknown correlation ID: %s", u.CorrelationID)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func removeColonPrefix(s string) (string, string, bool) {
|
|
|
|
idx := strings.Index(s, ":")
|
|
|
|
if idx == -1 {
|
|
|
|
return "", "", false
|
|
|
|
}
|
|
|
|
return s[0:idx], s[idx+1:], true
|
|
|
|
}
|
|
|
|
|
2022-08-30 15:46:34 +00:00
|
|
|
func (s *handlerUpstreams) setPeerEndpoints(upstreamsSnapshot *ConfigSnapshotUpstreams, uid UpstreamID, nodes structs.CheckServiceNodes) {
|
|
|
|
filteredNodes := hostnameEndpoints(
|
|
|
|
s.logger,
|
|
|
|
GatewayKey{ /*empty so it never matches*/ },
|
|
|
|
nodes,
|
|
|
|
)
|
|
|
|
if len(filteredNodes) > 0 {
|
|
|
|
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, filteredNodes); set {
|
|
|
|
upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames[uid] = struct{}{}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if set := upstreamsSnapshot.PeerUpstreamEndpoints.Set(uid, nodes); set {
|
|
|
|
delete(upstreamsSnapshot.PeerUpstreamEndpointsUseHostnames, uid)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
func (s *handlerUpstreams) resetWatchesFromChain(
|
|
|
|
ctx context.Context,
|
2022-01-20 16:12:04 +00:00
|
|
|
uid UpstreamID,
|
2020-12-24 19:11:13 +00:00
|
|
|
chain *structs.CompiledDiscoveryChain,
|
|
|
|
snap *ConfigSnapshotUpstreams,
|
|
|
|
) error {
|
2022-01-20 16:12:04 +00:00
|
|
|
s.logger.Trace("resetting watches for discovery chain", "id", uid)
|
2020-12-24 19:11:13 +00:00
|
|
|
if chain == nil {
|
|
|
|
return fmt.Errorf("not possible to arrive here with no discovery chain")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize relevant sub maps.
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := snap.WatchedUpstreams[uid]; !ok {
|
|
|
|
snap.WatchedUpstreams[uid] = make(map[string]context.CancelFunc)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := snap.WatchedUpstreamEndpoints[uid]; !ok {
|
|
|
|
snap.WatchedUpstreamEndpoints[uid] = make(map[string]structs.CheckServiceNodes)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := snap.WatchedGateways[uid]; !ok {
|
|
|
|
snap.WatchedGateways[uid] = make(map[string]context.CancelFunc)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := snap.WatchedGatewayEndpoints[uid]; !ok {
|
|
|
|
snap.WatchedGatewayEndpoints[uid] = make(map[string]structs.CheckServiceNodes)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We could invalidate this selectively based on a hash of the relevant
|
|
|
|
// resolver information, but for now just reset anything about this
|
|
|
|
// upstream when the chain changes in any way.
|
|
|
|
//
|
|
|
|
// TODO(rb): content hash based add/remove
|
2022-01-20 16:12:04 +00:00
|
|
|
for targetID, cancelFn := range snap.WatchedUpstreams[uid] {
|
2020-12-24 19:11:13 +00:00
|
|
|
s.logger.Trace("stopping watch of target",
|
2022-01-20 16:12:04 +00:00
|
|
|
"upstream", uid,
|
2020-12-24 19:11:13 +00:00
|
|
|
"chain", chain.ServiceName,
|
|
|
|
"target", targetID,
|
|
|
|
)
|
2022-01-20 16:12:04 +00:00
|
|
|
delete(snap.WatchedUpstreams[uid], targetID)
|
|
|
|
delete(snap.WatchedUpstreamEndpoints[uid], targetID)
|
2020-12-24 19:11:13 +00:00
|
|
|
cancelFn()
|
2022-08-30 15:46:34 +00:00
|
|
|
|
|
|
|
targetUID := NewUpstreamIDFromTargetID(targetID)
|
|
|
|
if targetUID.Peer != "" {
|
|
|
|
snap.PeerUpstreamEndpoints.CancelWatch(targetUID)
|
|
|
|
snap.UpstreamPeerTrustBundles.CancelWatch(targetUID.Peer)
|
|
|
|
}
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
watchedChainEndpoints bool
|
|
|
|
needGateways = make(map[string]struct{})
|
|
|
|
)
|
|
|
|
|
|
|
|
chainID := chain.ID()
|
|
|
|
for _, target := range chain.Targets {
|
|
|
|
if target.ID == chainID {
|
|
|
|
watchedChainEndpoints = true
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := targetWatchOpts{
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamID: uid,
|
2020-12-24 19:11:13 +00:00
|
|
|
chainID: target.ID,
|
|
|
|
service: target.Service,
|
|
|
|
filter: target.Subset.Filter,
|
|
|
|
datacenter: target.Datacenter,
|
2022-08-30 15:46:34 +00:00
|
|
|
peer: target.Peer,
|
2020-12-24 19:11:13 +00:00
|
|
|
entMeta: target.GetEnterpriseMetadata(),
|
|
|
|
}
|
|
|
|
err := s.watchUpstreamTarget(ctx, snap, opts)
|
|
|
|
if err != nil {
|
2022-01-20 16:12:04 +00:00
|
|
|
return fmt.Errorf("failed to watch target %q for upstream %q", target.ID, uid)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// We'll get endpoints from the gateway query, but the health still has
|
|
|
|
// to come from the backing service query.
|
2021-10-22 21:22:55 +00:00
|
|
|
var gk GatewayKey
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
switch target.MeshGateway.Mode {
|
|
|
|
case structs.MeshGatewayModeRemote:
|
2021-10-22 21:22:55 +00:00
|
|
|
gk = GatewayKey{
|
|
|
|
Partition: target.Partition,
|
|
|
|
Datacenter: target.Datacenter,
|
|
|
|
}
|
2020-12-24 19:11:13 +00:00
|
|
|
case structs.MeshGatewayModeLocal:
|
2021-10-22 21:22:55 +00:00
|
|
|
gk = GatewayKey{
|
|
|
|
Partition: s.source.NodePartitionOrDefault(),
|
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s.source.Datacenter != target.Datacenter || s.proxyID.PartitionOrDefault() != target.Partition {
|
|
|
|
needGateways[gk.String()] = struct{}{}
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the discovery chain's targets do not lead to watching all endpoints
|
|
|
|
// for the upstream, then create a separate watch for those too.
|
|
|
|
// This is needed in transparent mode because if there is some service A that
|
|
|
|
// redirects to service B, the dialing proxy needs to associate A's virtual IP
|
|
|
|
// with A's discovery chain.
|
|
|
|
//
|
|
|
|
// Outside of transparent mode we only watch the chain target, B,
|
|
|
|
// since A is a virtual service and traffic will not be sent to it.
|
|
|
|
if !watchedChainEndpoints && s.proxyCfg.Mode == structs.ProxyModeTransparent {
|
2022-04-05 21:10:06 +00:00
|
|
|
chainEntMeta := acl.NewEnterpriseMetaWithPartition(chain.Partition, chain.Namespace)
|
2020-12-24 19:11:13 +00:00
|
|
|
|
|
|
|
opts := targetWatchOpts{
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamID: uid,
|
2020-12-24 19:11:13 +00:00
|
|
|
chainID: chainID,
|
|
|
|
service: chain.ServiceName,
|
|
|
|
filter: "",
|
|
|
|
datacenter: chain.Datacenter,
|
|
|
|
entMeta: &chainEntMeta,
|
|
|
|
}
|
|
|
|
err := s.watchUpstreamTarget(ctx, snap, opts)
|
|
|
|
if err != nil {
|
2022-01-20 16:12:04 +00:00
|
|
|
return fmt.Errorf("failed to watch target %q for upstream %q", chainID, uid)
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-22 21:22:55 +00:00
|
|
|
for key := range needGateways {
|
2022-01-20 16:12:04 +00:00
|
|
|
if _, ok := snap.WatchedGateways[uid][key]; ok {
|
2020-12-24 19:11:13 +00:00
|
|
|
continue
|
|
|
|
}
|
2021-10-22 21:22:55 +00:00
|
|
|
gwKey := gatewayKeyFromString(key)
|
2020-12-24 19:11:13 +00:00
|
|
|
|
2021-10-22 21:22:55 +00:00
|
|
|
s.logger.Trace("initializing watch of mesh gateway",
|
2022-01-20 16:12:04 +00:00
|
|
|
"upstream", uid,
|
2020-12-24 19:11:13 +00:00
|
|
|
"chain", chain.ServiceName,
|
2021-10-22 21:22:55 +00:00
|
|
|
"datacenter", gwKey.Datacenter,
|
|
|
|
"partition", gwKey.Partition,
|
2020-12-24 19:11:13 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
2021-10-22 21:22:55 +00:00
|
|
|
opts := gatewayWatchOpts{
|
2022-06-01 15:18:06 +00:00
|
|
|
internalServiceDump: s.dataSources.InternalServiceDump,
|
|
|
|
notifyCh: s.ch,
|
|
|
|
source: *s.source,
|
|
|
|
token: s.token,
|
|
|
|
key: gwKey,
|
|
|
|
upstreamID: uid,
|
2021-10-22 21:22:55 +00:00
|
|
|
}
|
|
|
|
err := watchMeshGateway(ctx, opts)
|
2020-12-24 19:11:13 +00:00
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
snap.WatchedGateways[uid][key] = cancel
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
|
2022-01-20 16:12:04 +00:00
|
|
|
for key, cancelFn := range snap.WatchedGateways[uid] {
|
2021-10-22 21:22:55 +00:00
|
|
|
if _, ok := needGateways[key]; ok {
|
2020-12-24 19:11:13 +00:00
|
|
|
continue
|
|
|
|
}
|
2021-10-22 21:22:55 +00:00
|
|
|
gwKey := gatewayKeyFromString(key)
|
|
|
|
|
|
|
|
s.logger.Trace("stopping watch of mesh gateway",
|
2022-01-20 16:12:04 +00:00
|
|
|
"upstream", uid,
|
2020-12-24 19:11:13 +00:00
|
|
|
"chain", chain.ServiceName,
|
2021-10-22 21:22:55 +00:00
|
|
|
"datacenter", gwKey.Datacenter,
|
|
|
|
"partition", gwKey.Partition,
|
2020-12-24 19:11:13 +00:00
|
|
|
)
|
2022-01-20 16:12:04 +00:00
|
|
|
delete(snap.WatchedGateways[uid], key)
|
|
|
|
delete(snap.WatchedGatewayEndpoints[uid], key)
|
2020-12-24 19:11:13 +00:00
|
|
|
cancelFn()
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type targetWatchOpts struct {
|
2022-01-20 16:12:04 +00:00
|
|
|
upstreamID UpstreamID
|
2020-12-24 19:11:13 +00:00
|
|
|
chainID string
|
|
|
|
service string
|
|
|
|
filter string
|
|
|
|
datacenter string
|
2022-08-30 15:46:34 +00:00
|
|
|
peer string
|
2022-04-05 21:10:06 +00:00
|
|
|
entMeta *acl.EnterpriseMeta
|
2020-12-24 19:11:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *ConfigSnapshotUpstreams, opts targetWatchOpts) error {
|
|
|
|
s.logger.Trace("initializing watch of target",
|
|
|
|
"upstream", opts.upstreamID,
|
|
|
|
"chain", opts.service,
|
|
|
|
"target", opts.chainID,
|
|
|
|
)
|
|
|
|
|
2022-04-05 21:10:06 +00:00
|
|
|
var finalMeta acl.EnterpriseMeta
|
2020-12-24 19:11:13 +00:00
|
|
|
finalMeta.Merge(opts.entMeta)
|
|
|
|
|
2022-08-30 15:46:34 +00:00
|
|
|
uid := opts.upstreamID
|
|
|
|
correlationID := "upstream-target:" + opts.chainID + ":" + uid.String()
|
|
|
|
|
|
|
|
if opts.peer != "" {
|
|
|
|
uid = NewUpstreamIDFromTargetID(opts.chainID)
|
|
|
|
correlationID = upstreamPeerWatchIDPrefix + uid.String()
|
|
|
|
}
|
2020-12-24 19:11:13 +00:00
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
2022-06-01 15:18:06 +00:00
|
|
|
err := s.dataSources.Health.Notify(ctx, &structs.ServiceSpecificRequest{
|
2022-08-30 15:46:34 +00:00
|
|
|
PeerName: opts.peer,
|
2020-12-24 19:11:13 +00:00
|
|
|
Datacenter: opts.datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Token: s.token,
|
|
|
|
Filter: opts.filter,
|
|
|
|
},
|
|
|
|
ServiceName: opts.service,
|
|
|
|
Connect: true,
|
|
|
|
// Note that Identifier doesn't type-prefix for service any more as it's
|
|
|
|
// the default and makes metrics and other things much cleaner. It's
|
|
|
|
// simpler for us if we have the type to make things unambiguous.
|
|
|
|
Source: *s.source,
|
|
|
|
EnterpriseMeta: finalMeta,
|
|
|
|
}, correlationID, s.ch)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
snap.WatchedUpstreams[opts.upstreamID][opts.chainID] = cancel
|
|
|
|
|
2022-08-30 15:46:34 +00:00
|
|
|
if uid.Peer == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if ok := snap.PeerUpstreamEndpoints.IsWatched(uid); !ok {
|
|
|
|
snap.PeerUpstreamEndpoints.InitWatch(uid, cancel)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check whether a watch for this peer exists to avoid duplicates.
|
|
|
|
if ok := snap.UpstreamPeerTrustBundles.IsWatched(uid.Peer); !ok {
|
|
|
|
peerCtx, cancel := context.WithCancel(ctx)
|
|
|
|
if err := s.dataSources.TrustBundle.Notify(peerCtx, &cachetype.TrustBundleReadRequest{
|
|
|
|
Request: &pbpeering.TrustBundleReadRequest{
|
|
|
|
Name: uid.Peer,
|
|
|
|
Partition: uid.PartitionOrDefault(),
|
|
|
|
},
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
}, peerTrustBundleIDPrefix+uid.Peer, s.ch); err != nil {
|
|
|
|
cancel()
|
|
|
|
return fmt.Errorf("error while watching trust bundle for peer %q: %w", uid.Peer, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
snap.UpstreamPeerTrustBundles.InitWatch(uid.Peer, cancel)
|
|
|
|
}
|
|
|
|
|
2020-12-24 19:11:13 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type discoveryChainWatchOpts struct {
|
2022-01-20 16:12:04 +00:00
|
|
|
id UpstreamID
|
2020-12-24 19:11:13 +00:00
|
|
|
name string
|
|
|
|
namespace string
|
2021-08-20 16:57:45 +00:00
|
|
|
partition string
|
2020-12-24 19:11:13 +00:00
|
|
|
datacenter string
|
|
|
|
cfg reducedUpstreamConfig
|
|
|
|
meshGateway structs.MeshGatewayConfig
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *handlerUpstreams) watchDiscoveryChain(ctx context.Context, snap *ConfigSnapshot, opts discoveryChainWatchOpts) error {
|
2022-07-20 21:25:20 +00:00
|
|
|
var watchedDiscoveryChains map[UpstreamID]context.CancelFunc
|
|
|
|
switch s.kind {
|
|
|
|
case structs.ServiceKindIngressGateway:
|
|
|
|
watchedDiscoveryChains = snap.IngressGateway.WatchedDiscoveryChains
|
|
|
|
case structs.ServiceKindConnectProxy:
|
|
|
|
watchedDiscoveryChains = snap.ConnectProxy.WatchedDiscoveryChains
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("unsupported kind %s", s.kind)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := watchedDiscoveryChains[opts.id]; ok {
|
2020-12-24 19:11:13 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
2022-06-01 15:18:06 +00:00
|
|
|
err := s.dataSources.CompiledDiscoveryChain.Notify(ctx, &structs.DiscoveryChainRequest{
|
2020-12-24 19:11:13 +00:00
|
|
|
Datacenter: s.source.Datacenter,
|
|
|
|
QueryOptions: structs.QueryOptions{Token: s.token},
|
|
|
|
Name: opts.name,
|
|
|
|
EvaluateInDatacenter: opts.datacenter,
|
|
|
|
EvaluateInNamespace: opts.namespace,
|
2021-09-07 20:29:32 +00:00
|
|
|
EvaluateInPartition: opts.partition,
|
2020-12-24 19:11:13 +00:00
|
|
|
OverrideProtocol: opts.cfg.Protocol,
|
|
|
|
OverrideConnectTimeout: opts.cfg.ConnectTimeout(),
|
|
|
|
OverrideMeshGateway: opts.meshGateway,
|
2022-01-20 16:12:04 +00:00
|
|
|
}, "discovery-chain:"+opts.id.String(), s.ch)
|
2020-12-24 19:11:13 +00:00
|
|
|
if err != nil {
|
|
|
|
cancel()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-07-20 21:25:20 +00:00
|
|
|
watchedDiscoveryChains[opts.id] = cancel
|
2020-12-24 19:11:13 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// reducedUpstreamConfig represents the basic opaque config values that are now
|
|
|
|
// managed with the discovery chain but for backwards compatibility reasons
|
|
|
|
// should still affect how the proxy is configured.
|
|
|
|
//
|
|
|
|
// The full-blown config is agent/xds.UpstreamConfig
|
|
|
|
type reducedUpstreamConfig struct {
|
|
|
|
Protocol string `mapstructure:"protocol"`
|
|
|
|
ConnectTimeoutMs int `mapstructure:"connect_timeout_ms"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *reducedUpstreamConfig) ConnectTimeout() time.Duration {
|
|
|
|
return time.Duration(c.ConnectTimeoutMs) * time.Millisecond
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseReducedUpstreamConfig(m map[string]interface{}) (reducedUpstreamConfig, error) {
|
|
|
|
var cfg reducedUpstreamConfig
|
|
|
|
err := mapstructure.WeakDecode(m, &cfg)
|
|
|
|
return cfg, err
|
|
|
|
}
|