mirror of https://github.com/status-im/consul.git
Merge pull request #9489 from hashicorp/dnephin/proxycfg-state-2
proxycfg: split state into a handler for each kind
This commit is contained in:
commit
19d3eeff3c
|
@ -189,14 +189,8 @@ func (m *Manager) ensureProxyServiceLocked(ns *structs.NodeService, token string
|
|||
state.Close()
|
||||
}
|
||||
|
||||
var err error
|
||||
state, err = newState(ns, token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: move to a function that translates ManagerConfig->stateConfig
|
||||
state.stateConfig = stateConfig{
|
||||
stateConfig := stateConfig{
|
||||
logger: m.Logger.With("service_id", sid.String()),
|
||||
cache: m.Cache,
|
||||
health: m.Health,
|
||||
|
@ -205,7 +199,13 @@ func (m *Manager) ensureProxyServiceLocked(ns *structs.NodeService, token string
|
|||
intentionDefaultAllow: m.IntentionDefaultAllow,
|
||||
}
|
||||
if m.TLSConfigurator != nil {
|
||||
state.serverSNIFn = m.TLSConfigurator.ServerSNI
|
||||
stateConfig.serverSNIFn = m.TLSConfigurator.ServerSNI
|
||||
}
|
||||
|
||||
var err error
|
||||
state, err = newState(ns, token, stateConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch, err := state.Watch()
|
||||
|
|
|
@ -67,10 +67,9 @@ type stateConfig struct {
|
|||
// connect-proxy service. When a proxy registration is changed, the entire state
|
||||
// is discarded and a new one created.
|
||||
type state struct {
|
||||
// TODO: un-embedd once refactor is complete
|
||||
stateConfig
|
||||
// TODO: un-embed once refactor is complete
|
||||
serviceInstance
|
||||
logger hclog.Logger
|
||||
serviceInstance serviceInstance
|
||||
handler kindHandler
|
||||
|
||||
// cancel is set by Watch and called by Close to stop the goroutine started
|
||||
// in Watch.
|
||||
|
@ -136,34 +135,46 @@ func copyProxyConfig(ns *structs.NodeService) (structs.ConnectProxyConfig, error
|
|||
//
|
||||
// The returned state needs its required dependencies to be set before Watch
|
||||
// can be called.
|
||||
func newState(ns *structs.NodeService, token string) (*state, error) {
|
||||
switch ns.Kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
case structs.ServiceKindMeshGateway:
|
||||
case structs.ServiceKindIngressGateway:
|
||||
default:
|
||||
return nil, errors.New("not a connect-proxy, terminating-gateway, mesh-gateway, or ingress-gateway")
|
||||
}
|
||||
func newState(ns *structs.NodeService, token string, config stateConfig) (*state, error) {
|
||||
// 10 is fairly arbitrary here but allow for the 3 mandatory and a
|
||||
// reasonable number of upstream watches to all deliver their initial
|
||||
// messages in parallel without blocking the cache.Notify loops. It's not a
|
||||
// huge deal if we do for a short period so we don't need to be more
|
||||
// conservative to handle larger numbers of upstreams correctly but gives
|
||||
// some head room for normal operation to be non-blocking in most typical
|
||||
// cases.
|
||||
ch := make(chan cache.UpdateEvent, 10)
|
||||
|
||||
s, err := newServiceInstanceFromNodeService(ns, token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &state{
|
||||
serviceInstance: s,
|
||||
var handler kindHandler
|
||||
h := handlerState{stateConfig: config, serviceInstance: s, ch: ch}
|
||||
|
||||
// 10 is fairly arbitrary here but allow for the 3 mandatory and a
|
||||
// reasonable number of upstream watches to all deliver their initial
|
||||
// messages in parallel without blocking the cache.Notify loops. It's not a
|
||||
// huge deal if we do for a short period so we don't need to be more
|
||||
// conservative to handle larger numbers of upstreams correctly but gives
|
||||
// some head room for normal operation to be non-blocking in most typical
|
||||
// cases.
|
||||
ch: make(chan cache.UpdateEvent, 10),
|
||||
snapCh: make(chan ConfigSnapshot, 1),
|
||||
reqCh: make(chan chan *ConfigSnapshot, 1),
|
||||
switch ns.Kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
handler = &handlerConnectProxy{handlerState: h}
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
h.stateConfig.logger = config.logger.Named(logging.TerminatingGateway)
|
||||
handler = &handlerTerminatingGateway{handlerState: h}
|
||||
case structs.ServiceKindMeshGateway:
|
||||
h.stateConfig.logger = config.logger.Named(logging.MeshGateway)
|
||||
handler = &handlerMeshGateway{handlerState: h}
|
||||
case structs.ServiceKindIngressGateway:
|
||||
handler = &handlerIngressGateway{handlerState: h}
|
||||
default:
|
||||
return nil, errors.New("not a connect-proxy, terminating-gateway, mesh-gateway, or ingress-gateway")
|
||||
}
|
||||
|
||||
return &state{
|
||||
logger: config.logger.With("proxy", s.proxyID, "kind", s.kind),
|
||||
serviceInstance: s,
|
||||
handler: handler,
|
||||
ch: ch,
|
||||
snapCh: make(chan ConfigSnapshot, 1),
|
||||
reqCh: make(chan chan *ConfigSnapshot, 1),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -196,6 +207,11 @@ func newServiceInstanceFromNodeService(ns *structs.NodeService, token string) (s
|
|||
}, nil
|
||||
}
|
||||
|
||||
type kindHandler interface {
|
||||
initialize(ctx context.Context) (ConfigSnapshot, error)
|
||||
handleUpdate(ctx context.Context, u cache.UpdateEvent, snap *ConfigSnapshot) error
|
||||
}
|
||||
|
||||
// Watch initialized watches on all necessary cache data for the current proxy
|
||||
// registration state and returns a chan to observe updates to the
|
||||
// ConfigSnapshot that contains all necessary config state. The chan is closed
|
||||
|
@ -204,8 +220,7 @@ func (s *state) Watch() (<-chan ConfigSnapshot, error) {
|
|||
var ctx context.Context
|
||||
ctx, s.cancel = context.WithCancel(context.Background())
|
||||
|
||||
snap := s.initialConfigSnapshot()
|
||||
err := s.initWatches(ctx, &snap)
|
||||
snap, err := s.handler.initialize(ctx)
|
||||
if err != nil {
|
||||
s.cancel()
|
||||
return nil, err
|
||||
|
@ -224,23 +239,29 @@ func (s *state) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// initWatches sets up the watches needed for the particular service
|
||||
func (s *state) initWatches(ctx context.Context, snap *ConfigSnapshot) error {
|
||||
switch s.kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return s.initWatchesConnectProxy(ctx, snap)
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
return s.initWatchesTerminatingGateway(ctx)
|
||||
case structs.ServiceKindMeshGateway:
|
||||
return s.initWatchesMeshGateway(ctx)
|
||||
case structs.ServiceKindIngressGateway:
|
||||
return s.initWatchesIngressGateway(ctx)
|
||||
default:
|
||||
return fmt.Errorf("Unsupported service kind")
|
||||
}
|
||||
type handlerState struct {
|
||||
stateConfig // TODO: un-embed
|
||||
serviceInstance // TODO: un-embed
|
||||
ch chan cache.UpdateEvent
|
||||
}
|
||||
|
||||
func (s *state) watchMeshGateway(ctx context.Context, dc string, upstreamID string) error {
|
||||
type handlerMeshGateway struct {
|
||||
handlerState
|
||||
}
|
||||
|
||||
type handlerTerminatingGateway struct {
|
||||
handlerState
|
||||
}
|
||||
|
||||
type handlerConnectProxy struct {
|
||||
handlerState
|
||||
}
|
||||
|
||||
type handlerIngressGateway struct {
|
||||
handlerState
|
||||
}
|
||||
|
||||
func (s *handlerUpstreams) watchMeshGateway(ctx context.Context, dc string, upstreamID string) error {
|
||||
return s.cache.Notify(ctx, cachetype.InternalServiceDumpName, &structs.ServiceDumpRequest{
|
||||
Datacenter: dc,
|
||||
QueryOptions: structs.QueryOptions{Token: s.token},
|
||||
|
@ -251,9 +272,42 @@ func (s *state) watchMeshGateway(ctx context.Context, dc string, upstreamID stri
|
|||
}, "mesh-gateway:"+dc+":"+upstreamID, s.ch)
|
||||
}
|
||||
|
||||
// initWatchesConnectProxy sets up the watches needed based on current proxy registration
|
||||
type handlerUpstreams struct {
|
||||
handlerState
|
||||
}
|
||||
|
||||
func (s *handlerUpstreams) watchConnectProxyService(ctx context.Context, correlationId string, target *structs.DiscoveryTarget) error {
|
||||
return s.stateConfig.cache.Notify(ctx, cachetype.HealthServicesName, &structs.ServiceSpecificRequest{
|
||||
Datacenter: target.Datacenter,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Token: s.serviceInstance.token,
|
||||
Filter: target.Subset.Filter,
|
||||
},
|
||||
ServiceName: target.Service,
|
||||
Connect: true,
|
||||
// Note that Identifier doesn't type-prefix for service any more as it's
|
||||
// the default and makes metrics and other things much cleaner. It's
|
||||
// simpler for us if we have the type to make things unambiguous.
|
||||
Source: *s.stateConfig.source,
|
||||
EnterpriseMeta: *target.GetEnterpriseMetadata(),
|
||||
}, correlationId, s.ch)
|
||||
}
|
||||
|
||||
// initialize sets up the watches needed based on current proxy registration
|
||||
// state.
|
||||
func (s *state) initWatchesConnectProxy(ctx context.Context, snap *ConfigSnapshot) error {
|
||||
func (s *handlerConnectProxy) initialize(ctx context.Context) (ConfigSnapshot, error) {
|
||||
snap := newConfigSnapshotFromServiceInstance(s.serviceInstance, s.stateConfig)
|
||||
snap.ConnectProxy.DiscoveryChain = make(map[string]*structs.CompiledDiscoveryChain)
|
||||
snap.ConnectProxy.WatchedDiscoveryChains = make(map[string]context.CancelFunc)
|
||||
snap.ConnectProxy.WatchedUpstreams = make(map[string]map[string]context.CancelFunc)
|
||||
snap.ConnectProxy.WatchedUpstreamEndpoints = make(map[string]map[string]structs.CheckServiceNodes)
|
||||
snap.ConnectProxy.WatchedGateways = make(map[string]map[string]context.CancelFunc)
|
||||
snap.ConnectProxy.WatchedGatewayEndpoints = make(map[string]map[string]structs.CheckServiceNodes)
|
||||
snap.ConnectProxy.WatchedServiceChecks = make(map[structs.ServiceID][]structs.CheckType)
|
||||
snap.ConnectProxy.PreparedQueryEndpoints = make(map[string]structs.CheckServiceNodes)
|
||||
snap.ConnectProxy.UpstreamConfig = make(map[string]*structs.Upstream)
|
||||
snap.ConnectProxy.PassthroughUpstreams = make(map[string]ServicePassthroughAddrs)
|
||||
|
||||
// Watch for root changes
|
||||
err := s.cache.Notify(ctx, cachetype.ConnectCARootName, &structs.DCSpecificRequest{
|
||||
Datacenter: s.source.Datacenter,
|
||||
|
@ -261,7 +315,7 @@ func (s *state) initWatchesConnectProxy(ctx context.Context, snap *ConfigSnapsho
|
|||
Source: *s.source,
|
||||
}, rootsWatchID, s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
|
||||
// Watch the leaf cert
|
||||
|
@ -272,7 +326,7 @@ func (s *state) initWatchesConnectProxy(ctx context.Context, snap *ConfigSnapsho
|
|||
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
||||
}, leafWatchID, s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
|
||||
// Watch for intention updates
|
||||
|
@ -290,7 +344,7 @@ func (s *state) initWatchesConnectProxy(ctx context.Context, snap *ConfigSnapsho
|
|||
},
|
||||
}, intentionsWatchID, s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
|
||||
// Watch for service check updates
|
||||
|
@ -299,7 +353,7 @@ func (s *state) initWatchesConnectProxy(ctx context.Context, snap *ConfigSnapsho
|
|||
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
||||
}, svcChecksWatchIDPrefix+structs.ServiceIDString(s.proxyCfg.DestinationServiceID, &s.proxyID.EnterpriseMeta), s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
|
||||
// default the namespace to the namespace of this proxy service
|
||||
|
@ -314,7 +368,7 @@ func (s *state) initWatchesConnectProxy(ctx context.Context, snap *ConfigSnapsho
|
|||
EnterpriseMeta: structs.NewEnterpriseMeta(s.proxyID.NamespaceOrEmpty()),
|
||||
}, intentionUpstreamsID, s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
|
||||
err = s.cache.Notify(ctx, cachetype.ConfigEntryName, &structs.ConfigEntryQuery{
|
||||
|
@ -325,7 +379,7 @@ func (s *state) initWatchesConnectProxy(ctx context.Context, snap *ConfigSnapsho
|
|||
EnterpriseMeta: *structs.DefaultEnterpriseMeta(),
|
||||
}, meshConfigEntryID, s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -381,7 +435,7 @@ func (s *state) initWatchesConnectProxy(ctx context.Context, snap *ConfigSnapsho
|
|||
Source: *s.source,
|
||||
}, "upstream:"+u.Identifier(), s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
|
||||
case structs.UpstreamDestTypeService:
|
||||
|
@ -399,17 +453,18 @@ func (s *state) initWatchesConnectProxy(ctx context.Context, snap *ConfigSnapsho
|
|||
OverrideConnectTimeout: cfg.ConnectTimeout(),
|
||||
}, "discovery-chain:"+u.Identifier(), s.ch)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to watch discovery chain for %s: %v", u.Identifier(), err)
|
||||
return snap, fmt.Errorf("failed to watch discovery chain for %s: %v", u.Identifier(), err)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown upstream type: %q", u.DestinationType)
|
||||
return snap, fmt.Errorf("unknown upstream type: %q", u.DestinationType)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
// reducedProxyConfig represents the basic opaque config values that are now
|
||||
// reducedUpstreamConfig represents the basic opaque config values that are now
|
||||
// managed with the discovery chain but for backwards compatibility reasons
|
||||
// should still affect how the proxy is configured.
|
||||
//
|
||||
|
@ -430,7 +485,8 @@ func parseReducedUpstreamConfig(m map[string]interface{}) (reducedUpstreamConfig
|
|||
}
|
||||
|
||||
// initWatchesTerminatingGateway sets up the initial watches needed based on the terminating-gateway registration
|
||||
func (s *state) initWatchesTerminatingGateway(ctx context.Context) error {
|
||||
func (s *handlerTerminatingGateway) initialize(ctx context.Context) (ConfigSnapshot, error) {
|
||||
snap := newConfigSnapshotFromServiceInstance(s.serviceInstance, s.stateConfig)
|
||||
// Watch for root changes
|
||||
err := s.cache.Notify(ctx, cachetype.ConnectCARootName, &structs.DCSpecificRequest{
|
||||
Datacenter: s.source.Datacenter,
|
||||
|
@ -438,9 +494,8 @@ func (s *state) initWatchesTerminatingGateway(ctx context.Context) error {
|
|||
Source: *s.source,
|
||||
}, rootsWatchID, s.ch)
|
||||
if err != nil {
|
||||
s.logger.Named(logging.TerminatingGateway).
|
||||
Error("failed to register watch for root changes", "error", err)
|
||||
return err
|
||||
s.logger.Error("failed to register watch for root changes", "error", err)
|
||||
return snap, err
|
||||
}
|
||||
|
||||
// Watch for the terminating-gateway's linked services
|
||||
|
@ -451,16 +506,29 @@ func (s *state) initWatchesTerminatingGateway(ctx context.Context) error {
|
|||
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
||||
}, gatewayServicesWatchID, s.ch)
|
||||
if err != nil {
|
||||
s.logger.Named(logging.TerminatingGateway).
|
||||
Error("failed to register watch for linked services", "error", err)
|
||||
return err
|
||||
s.logger.Error("failed to register watch for linked services", "error", err)
|
||||
return snap, err
|
||||
}
|
||||
|
||||
return nil
|
||||
snap.TerminatingGateway.WatchedServices = make(map[structs.ServiceName]context.CancelFunc)
|
||||
snap.TerminatingGateway.WatchedIntentions = make(map[structs.ServiceName]context.CancelFunc)
|
||||
snap.TerminatingGateway.Intentions = make(map[structs.ServiceName]structs.Intentions)
|
||||
snap.TerminatingGateway.WatchedLeaves = make(map[structs.ServiceName]context.CancelFunc)
|
||||
snap.TerminatingGateway.ServiceLeaves = make(map[structs.ServiceName]*structs.IssuedCert)
|
||||
snap.TerminatingGateway.WatchedConfigs = make(map[structs.ServiceName]context.CancelFunc)
|
||||
snap.TerminatingGateway.ServiceConfigs = make(map[structs.ServiceName]*structs.ServiceConfigResponse)
|
||||
snap.TerminatingGateway.WatchedResolvers = make(map[structs.ServiceName]context.CancelFunc)
|
||||
snap.TerminatingGateway.ServiceResolvers = make(map[structs.ServiceName]*structs.ServiceResolverConfigEntry)
|
||||
snap.TerminatingGateway.ServiceResolversSet = make(map[structs.ServiceName]bool)
|
||||
snap.TerminatingGateway.ServiceGroups = make(map[structs.ServiceName]structs.CheckServiceNodes)
|
||||
snap.TerminatingGateway.GatewayServices = make(map[structs.ServiceName]structs.GatewayService)
|
||||
snap.TerminatingGateway.HostnameServices = make(map[structs.ServiceName]structs.CheckServiceNodes)
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
// initWatchesMeshGateway sets up the watches needed based on the current mesh gateway registration
|
||||
func (s *state) initWatchesMeshGateway(ctx context.Context) error {
|
||||
func (s *handlerMeshGateway) initialize(ctx context.Context) (ConfigSnapshot, error) {
|
||||
snap := newConfigSnapshotFromServiceInstance(s.serviceInstance, s.stateConfig)
|
||||
// Watch for root changes
|
||||
err := s.cache.Notify(ctx, cachetype.ConnectCARootName, &structs.DCSpecificRequest{
|
||||
Datacenter: s.source.Datacenter,
|
||||
|
@ -468,7 +536,7 @@ func (s *state) initWatchesMeshGateway(ctx context.Context) error {
|
|||
Source: *s.source,
|
||||
}, rootsWatchID, s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
|
||||
// Watch for all services
|
||||
|
@ -480,7 +548,7 @@ func (s *state) initWatchesMeshGateway(ctx context.Context) error {
|
|||
}, serviceListWatchID, s.ch)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
|
||||
if s.meta[structs.MetaWANFederationKey] == "1" {
|
||||
|
@ -493,7 +561,7 @@ func (s *state) initWatchesMeshGateway(ctx context.Context) error {
|
|||
Source: *s.source,
|
||||
}, federationStateListGatewaysWatchID, s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
|
||||
err = s.health.Notify(ctx, structs.ServiceSpecificRequest{
|
||||
|
@ -502,7 +570,7 @@ func (s *state) initWatchesMeshGateway(ctx context.Context) error {
|
|||
ServiceName: structs.ConsulServiceName,
|
||||
}, consulServerListWatchID, s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -515,7 +583,7 @@ func (s *state) initWatchesMeshGateway(ctx context.Context) error {
|
|||
QueryOptions: structs.QueryOptions{Token: s.token, MaxAge: 30 * time.Second},
|
||||
}, datacentersWatchID, s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
|
||||
// Once we start getting notified about the datacenters we will setup watches on the
|
||||
|
@ -529,17 +597,25 @@ func (s *state) initWatchesMeshGateway(ctx context.Context) error {
|
|||
Kind: structs.ServiceResolver,
|
||||
EnterpriseMeta: *structs.WildcardEnterpriseMeta(),
|
||||
}, serviceResolversWatchID, s.ch)
|
||||
|
||||
if err != nil {
|
||||
s.logger.Named(logging.MeshGateway).
|
||||
Error("failed to register watch for service-resolver config entries", "error", err)
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
|
||||
return err
|
||||
snap.MeshGateway.WatchedServices = make(map[structs.ServiceName]context.CancelFunc)
|
||||
snap.MeshGateway.WatchedDatacenters = make(map[string]context.CancelFunc)
|
||||
snap.MeshGateway.ServiceGroups = make(map[structs.ServiceName]structs.CheckServiceNodes)
|
||||
snap.MeshGateway.GatewayGroups = make(map[string]structs.CheckServiceNodes)
|
||||
snap.MeshGateway.ServiceResolvers = make(map[structs.ServiceName]*structs.ServiceResolverConfigEntry)
|
||||
snap.MeshGateway.HostnameDatacenters = make(map[string]structs.CheckServiceNodes)
|
||||
// there is no need to initialize the map of service resolvers as we
|
||||
// fully rebuild it every time we get updates
|
||||
return snap, err
|
||||
}
|
||||
|
||||
func (s *state) initWatchesIngressGateway(ctx context.Context) error {
|
||||
func (s *handlerIngressGateway) initialize(ctx context.Context) (ConfigSnapshot, error) {
|
||||
snap := newConfigSnapshotFromServiceInstance(s.serviceInstance, s.stateConfig)
|
||||
// Watch for root changes
|
||||
err := s.cache.Notify(ctx, cachetype.ConnectCARootName, &structs.DCSpecificRequest{
|
||||
Datacenter: s.source.Datacenter,
|
||||
|
@ -547,7 +623,7 @@ func (s *state) initWatchesIngressGateway(ctx context.Context) error {
|
|||
Source: *s.source,
|
||||
}, rootsWatchID, s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
|
||||
// Watch this ingress gateway's config entry
|
||||
|
@ -559,7 +635,7 @@ func (s *state) initWatchesIngressGateway(ctx context.Context) error {
|
|||
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
||||
}, gatewayConfigWatchID, s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
|
||||
// Watch the ingress-gateway's list of upstreams
|
||||
|
@ -570,14 +646,21 @@ func (s *state) initWatchesIngressGateway(ctx context.Context) error {
|
|||
EnterpriseMeta: s.proxyID.EnterpriseMeta,
|
||||
}, gatewayServicesWatchID, s.ch)
|
||||
if err != nil {
|
||||
return err
|
||||
return snap, err
|
||||
}
|
||||
|
||||
return nil
|
||||
snap.IngressGateway.WatchedDiscoveryChains = make(map[string]context.CancelFunc)
|
||||
snap.IngressGateway.DiscoveryChain = make(map[string]*structs.CompiledDiscoveryChain)
|
||||
snap.IngressGateway.WatchedUpstreams = make(map[string]map[string]context.CancelFunc)
|
||||
snap.IngressGateway.WatchedUpstreamEndpoints = make(map[string]map[string]structs.CheckServiceNodes)
|
||||
snap.IngressGateway.WatchedGateways = make(map[string]map[string]context.CancelFunc)
|
||||
snap.IngressGateway.WatchedGatewayEndpoints = make(map[string]map[string]structs.CheckServiceNodes)
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
func (s *state) initialConfigSnapshot() ConfigSnapshot {
|
||||
snap := ConfigSnapshot{
|
||||
func newConfigSnapshotFromServiceInstance(s serviceInstance, config stateConfig) ConfigSnapshot {
|
||||
// TODO: use serviceInstance type in ConfigSnapshot
|
||||
return ConfigSnapshot{
|
||||
Kind: s.kind,
|
||||
Service: s.service,
|
||||
ProxyID: s.proxyID,
|
||||
|
@ -586,56 +669,10 @@ func (s *state) initialConfigSnapshot() ConfigSnapshot {
|
|||
ServiceMeta: s.meta,
|
||||
TaggedAddresses: s.taggedAddresses,
|
||||
Proxy: s.proxyCfg,
|
||||
Datacenter: s.source.Datacenter,
|
||||
ServerSNIFn: s.serverSNIFn,
|
||||
IntentionDefaultAllow: s.intentionDefaultAllow,
|
||||
Datacenter: config.source.Datacenter,
|
||||
ServerSNIFn: config.serverSNIFn,
|
||||
IntentionDefaultAllow: config.intentionDefaultAllow,
|
||||
}
|
||||
|
||||
switch s.kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
snap.ConnectProxy.DiscoveryChain = make(map[string]*structs.CompiledDiscoveryChain)
|
||||
snap.ConnectProxy.WatchedDiscoveryChains = make(map[string]context.CancelFunc)
|
||||
snap.ConnectProxy.WatchedUpstreams = make(map[string]map[string]context.CancelFunc)
|
||||
snap.ConnectProxy.WatchedUpstreamEndpoints = make(map[string]map[string]structs.CheckServiceNodes)
|
||||
snap.ConnectProxy.WatchedGateways = make(map[string]map[string]context.CancelFunc)
|
||||
snap.ConnectProxy.WatchedGatewayEndpoints = make(map[string]map[string]structs.CheckServiceNodes)
|
||||
snap.ConnectProxy.WatchedServiceChecks = make(map[structs.ServiceID][]structs.CheckType)
|
||||
snap.ConnectProxy.PreparedQueryEndpoints = make(map[string]structs.CheckServiceNodes)
|
||||
snap.ConnectProxy.UpstreamConfig = make(map[string]*structs.Upstream)
|
||||
snap.ConnectProxy.PassthroughUpstreams = make(map[string]ServicePassthroughAddrs)
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
snap.TerminatingGateway.WatchedServices = make(map[structs.ServiceName]context.CancelFunc)
|
||||
snap.TerminatingGateway.WatchedIntentions = make(map[structs.ServiceName]context.CancelFunc)
|
||||
snap.TerminatingGateway.Intentions = make(map[structs.ServiceName]structs.Intentions)
|
||||
snap.TerminatingGateway.WatchedLeaves = make(map[structs.ServiceName]context.CancelFunc)
|
||||
snap.TerminatingGateway.ServiceLeaves = make(map[structs.ServiceName]*structs.IssuedCert)
|
||||
snap.TerminatingGateway.WatchedConfigs = make(map[structs.ServiceName]context.CancelFunc)
|
||||
snap.TerminatingGateway.ServiceConfigs = make(map[structs.ServiceName]*structs.ServiceConfigResponse)
|
||||
snap.TerminatingGateway.WatchedResolvers = make(map[structs.ServiceName]context.CancelFunc)
|
||||
snap.TerminatingGateway.ServiceResolvers = make(map[structs.ServiceName]*structs.ServiceResolverConfigEntry)
|
||||
snap.TerminatingGateway.ServiceResolversSet = make(map[structs.ServiceName]bool)
|
||||
snap.TerminatingGateway.ServiceGroups = make(map[structs.ServiceName]structs.CheckServiceNodes)
|
||||
snap.TerminatingGateway.GatewayServices = make(map[structs.ServiceName]structs.GatewayService)
|
||||
snap.TerminatingGateway.HostnameServices = make(map[structs.ServiceName]structs.CheckServiceNodes)
|
||||
case structs.ServiceKindMeshGateway:
|
||||
snap.MeshGateway.WatchedServices = make(map[structs.ServiceName]context.CancelFunc)
|
||||
snap.MeshGateway.WatchedDatacenters = make(map[string]context.CancelFunc)
|
||||
snap.MeshGateway.ServiceGroups = make(map[structs.ServiceName]structs.CheckServiceNodes)
|
||||
snap.MeshGateway.GatewayGroups = make(map[string]structs.CheckServiceNodes)
|
||||
snap.MeshGateway.ServiceResolvers = make(map[structs.ServiceName]*structs.ServiceResolverConfigEntry)
|
||||
snap.MeshGateway.HostnameDatacenters = make(map[string]structs.CheckServiceNodes)
|
||||
// there is no need to initialize the map of service resolvers as we
|
||||
// fully rebuild it every time we get updates
|
||||
case structs.ServiceKindIngressGateway:
|
||||
snap.IngressGateway.WatchedDiscoveryChains = make(map[string]context.CancelFunc)
|
||||
snap.IngressGateway.DiscoveryChain = make(map[string]*structs.CompiledDiscoveryChain)
|
||||
snap.IngressGateway.WatchedUpstreams = make(map[string]map[string]context.CancelFunc)
|
||||
snap.IngressGateway.WatchedUpstreamEndpoints = make(map[string]map[string]structs.CheckServiceNodes)
|
||||
snap.IngressGateway.WatchedGateways = make(map[string]map[string]context.CancelFunc)
|
||||
snap.IngressGateway.WatchedGatewayEndpoints = make(map[string]map[string]structs.CheckServiceNodes)
|
||||
}
|
||||
|
||||
return snap
|
||||
}
|
||||
|
||||
func (s *state) run(ctx context.Context, snap *ConfigSnapshot) {
|
||||
|
@ -659,7 +696,7 @@ func (s *state) run(ctx context.Context, snap *ConfigSnapshot) {
|
|||
case u := <-s.ch:
|
||||
s.logger.Trace("A blocking query returned; handling snapshot update")
|
||||
|
||||
if err := s.handleUpdate(ctx, u, snap); err != nil {
|
||||
if err := s.handler.handleUpdate(ctx, u, snap); err != nil {
|
||||
s.logger.Error("Failed to handle update from watch",
|
||||
"id", u.CorrelationID, "error", err,
|
||||
)
|
||||
|
@ -671,9 +708,7 @@ func (s *state) run(ctx context.Context, snap *ConfigSnapshot) {
|
|||
// etc on future updates.
|
||||
snapCopy, err := snap.Clone()
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to copy config snapshot for proxy",
|
||||
"error", err,
|
||||
)
|
||||
s.logger.Error("Failed to copy config snapshot for proxy", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -719,9 +754,7 @@ func (s *state) run(ctx context.Context, snap *ConfigSnapshot) {
|
|||
// etc on future updates.
|
||||
snapCopy, err := snap.Clone()
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to copy config snapshot for proxy",
|
||||
"error", err,
|
||||
)
|
||||
s.logger.Error("Failed to copy config snapshot for proxy", "error", err)
|
||||
continue
|
||||
}
|
||||
replyCh <- snapCopy
|
||||
|
@ -748,22 +781,7 @@ func (s *state) run(ctx context.Context, snap *ConfigSnapshot) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *state) handleUpdate(ctx context.Context, u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
||||
switch s.kind {
|
||||
case structs.ServiceKindConnectProxy:
|
||||
return s.handleUpdateConnectProxy(ctx, u, snap)
|
||||
case structs.ServiceKindTerminatingGateway:
|
||||
return s.handleUpdateTerminatingGateway(ctx, u, snap)
|
||||
case structs.ServiceKindMeshGateway:
|
||||
return s.handleUpdateMeshGateway(ctx, u, snap)
|
||||
case structs.ServiceKindIngressGateway:
|
||||
return s.handleUpdateIngressGateway(ctx, u, snap)
|
||||
default:
|
||||
return fmt.Errorf("Unsupported service kind")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) handleUpdateConnectProxy(ctx context.Context, u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
||||
func (s *handlerConnectProxy) handleUpdate(ctx context.Context, u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
||||
if u.Err != nil {
|
||||
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
||||
}
|
||||
|
@ -838,7 +856,8 @@ func (s *state) handleUpdateConnectProxy(ctx context.Context, u cache.UpdateEven
|
|||
cfg: cfg,
|
||||
meshGateway: meshGateway,
|
||||
}
|
||||
err = s.watchDiscoveryChain(ctx, snap, watchOpts)
|
||||
up := &handlerUpstreams{handlerState: s.handlerState}
|
||||
err = up.watchDiscoveryChain(ctx, snap, watchOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to watch discovery chain for %s: %v", svc.String(), err)
|
||||
}
|
||||
|
@ -922,12 +941,12 @@ func (s *state) handleUpdateConnectProxy(ctx context.Context, u cache.UpdateEven
|
|||
snap.ConnectProxy.MeshConfigSet = true
|
||||
|
||||
default:
|
||||
return s.handleUpdateUpstreams(ctx, u, snap)
|
||||
return (*handlerUpstreams)(s).handleUpdateUpstreams(ctx, u, snap)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *state) handleUpdateUpstreams(ctx context.Context, u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
||||
func (s *handlerUpstreams) handleUpdateUpstreams(ctx context.Context, u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
||||
if u.Err != nil {
|
||||
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
||||
}
|
||||
|
@ -1044,7 +1063,7 @@ func removeColonPrefix(s string) (string, string, bool) {
|
|||
return s[0:idx], s[idx+1:], true
|
||||
}
|
||||
|
||||
func (s *state) resetWatchesFromChain(
|
||||
func (s *handlerUpstreams) resetWatchesFromChain(
|
||||
ctx context.Context,
|
||||
id string,
|
||||
chain *structs.CompiledDiscoveryChain,
|
||||
|
@ -1191,7 +1210,7 @@ type targetWatchOpts struct {
|
|||
entMeta *structs.EnterpriseMeta
|
||||
}
|
||||
|
||||
func (s *state) watchUpstreamTarget(ctx context.Context, snap *ConfigSnapshotUpstreams, opts targetWatchOpts) error {
|
||||
func (s *handlerUpstreams) watchUpstreamTarget(ctx context.Context, snap *ConfigSnapshotUpstreams, opts targetWatchOpts) error {
|
||||
s.logger.Trace("initializing watch of target",
|
||||
"upstream", opts.upstreamID,
|
||||
"chain", opts.service,
|
||||
|
@ -1228,11 +1247,11 @@ func (s *state) watchUpstreamTarget(ctx context.Context, snap *ConfigSnapshotUps
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *state) handleUpdateTerminatingGateway(ctx context.Context, u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
||||
func (s *handlerTerminatingGateway) handleUpdate(ctx context.Context, u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
||||
if u.Err != nil {
|
||||
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
||||
}
|
||||
logger := s.logger.Named(logging.TerminatingGateway)
|
||||
logger := s.logger
|
||||
|
||||
switch {
|
||||
case u.CorrelationID == rootsWatchID:
|
||||
|
@ -1455,7 +1474,8 @@ func (s *state) handleUpdateTerminatingGateway(ctx context.Context, u cache.Upda
|
|||
|
||||
if len(resp.Nodes) > 0 {
|
||||
snap.TerminatingGateway.ServiceGroups[sn] = resp.Nodes
|
||||
snap.TerminatingGateway.HostnameServices[sn] = s.hostnameEndpoints(logging.TerminatingGateway, snap.Datacenter, resp.Nodes)
|
||||
snap.TerminatingGateway.HostnameServices[sn] = hostnameEndpoints(
|
||||
s.logger, snap.Datacenter, resp.Nodes)
|
||||
}
|
||||
|
||||
// Store leaf cert for watched service
|
||||
|
@ -1513,7 +1533,7 @@ func (s *state) handleUpdateTerminatingGateway(ctx context.Context, u cache.Upda
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *state) handleUpdateMeshGateway(ctx context.Context, u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
||||
func (s *handlerMeshGateway) handleUpdate(ctx context.Context, u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
||||
if u.Err != nil {
|
||||
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
||||
}
|
||||
|
@ -1535,7 +1555,8 @@ func (s *state) handleUpdateMeshGateway(ctx context.Context, u cache.UpdateEvent
|
|||
snap.MeshGateway.FedStateGateways = dcIndexedNodes.DatacenterNodes
|
||||
|
||||
for dc, nodes := range dcIndexedNodes.DatacenterNodes {
|
||||
snap.MeshGateway.HostnameDatacenters[dc] = s.hostnameEndpoints(logging.MeshGateway, snap.Datacenter, nodes)
|
||||
snap.MeshGateway.HostnameDatacenters[dc] = hostnameEndpoints(
|
||||
s.logger.Named(logging.MeshGateway), snap.Datacenter, nodes)
|
||||
}
|
||||
|
||||
for dc := range snap.MeshGateway.HostnameDatacenters {
|
||||
|
@ -1704,7 +1725,8 @@ func (s *state) handleUpdateMeshGateway(ctx context.Context, u cache.UpdateEvent
|
|||
|
||||
if len(resp.Nodes) > 0 {
|
||||
snap.MeshGateway.GatewayGroups[dc] = resp.Nodes
|
||||
snap.MeshGateway.HostnameDatacenters[dc] = s.hostnameEndpoints(logging.MeshGateway, snap.Datacenter, resp.Nodes)
|
||||
snap.MeshGateway.HostnameDatacenters[dc] = hostnameEndpoints(
|
||||
s.logger.Named(logging.MeshGateway), snap.Datacenter, resp.Nodes)
|
||||
}
|
||||
default:
|
||||
// do nothing for now
|
||||
|
@ -1714,7 +1736,7 @@ func (s *state) handleUpdateMeshGateway(ctx context.Context, u cache.UpdateEvent
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *state) handleUpdateIngressGateway(ctx context.Context, u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
||||
func (s *handlerIngressGateway) handleUpdate(ctx context.Context, u cache.UpdateEvent, snap *ConfigSnapshot) error {
|
||||
if u.Err != nil {
|
||||
return fmt.Errorf("error filling agent cache: %v", u.Err)
|
||||
}
|
||||
|
@ -1762,7 +1784,8 @@ func (s *state) handleUpdateIngressGateway(ctx context.Context, u cache.UpdateEv
|
|||
namespace: u.DestinationNamespace,
|
||||
datacenter: s.source.Datacenter,
|
||||
}
|
||||
err := s.watchDiscoveryChain(ctx, snap, watchOpts)
|
||||
up := &handlerUpstreams{handlerState: s.handlerState}
|
||||
err := up.watchDiscoveryChain(ctx, snap, watchOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to watch discovery chain for %s: %v", u.Identifier(), err)
|
||||
}
|
||||
|
@ -1790,7 +1813,7 @@ func (s *state) handleUpdateIngressGateway(ctx context.Context, u cache.UpdateEv
|
|||
}
|
||||
|
||||
default:
|
||||
return s.handleUpdateUpstreams(ctx, u, snap)
|
||||
return (*handlerUpstreams)(s).handleUpdateUpstreams(ctx, u, snap)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -1823,7 +1846,7 @@ type discoveryChainWatchOpts struct {
|
|||
meshGateway structs.MeshGatewayConfig
|
||||
}
|
||||
|
||||
func (s *state) watchDiscoveryChain(ctx context.Context, snap *ConfigSnapshot, opts discoveryChainWatchOpts) error {
|
||||
func (s *handlerUpstreams) watchDiscoveryChain(ctx context.Context, snap *ConfigSnapshot, opts discoveryChainWatchOpts) error {
|
||||
if _, ok := snap.ConnectProxy.WatchedDiscoveryChains[opts.id]; ok {
|
||||
return nil
|
||||
}
|
||||
|
@ -1857,7 +1880,7 @@ func (s *state) watchDiscoveryChain(ctx context.Context, snap *ConfigSnapshot, o
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *state) generateIngressDNSSANs(snap *ConfigSnapshot) []string {
|
||||
func (s *handlerIngressGateway) generateIngressDNSSANs(snap *ConfigSnapshot) []string {
|
||||
// Update our leaf cert watch with wildcard entries for our DNS domains as well as any
|
||||
// configured custom hostnames from the service.
|
||||
if !snap.IngressGateway.TLSEnabled {
|
||||
|
@ -1894,7 +1917,7 @@ func (s *state) generateIngressDNSSANs(snap *ConfigSnapshot) []string {
|
|||
return dnsNames
|
||||
}
|
||||
|
||||
func (s *state) watchIngressLeafCert(ctx context.Context, snap *ConfigSnapshot) error {
|
||||
func (s *handlerIngressGateway) watchIngressLeafCert(ctx context.Context, snap *ConfigSnapshot) error {
|
||||
if !snap.IngressGateway.TLSSet || !snap.IngressGateway.HostsSet {
|
||||
return nil
|
||||
}
|
||||
|
@ -1943,19 +1966,20 @@ func (s *state) Changed(ns *structs.NodeService, token string) bool {
|
|||
s.logger.Warn("Failed to parse proxy config and will treat the new service as unchanged")
|
||||
}
|
||||
|
||||
return ns.Kind != s.kind ||
|
||||
s.proxyID != ns.CompoundServiceID() ||
|
||||
s.address != ns.Address ||
|
||||
s.port != ns.Port ||
|
||||
!reflect.DeepEqual(s.proxyCfg, proxyCfg) ||
|
||||
s.token != token
|
||||
i := s.serviceInstance
|
||||
return ns.Kind != i.kind ||
|
||||
i.proxyID != ns.CompoundServiceID() ||
|
||||
i.address != ns.Address ||
|
||||
i.port != ns.Port ||
|
||||
!reflect.DeepEqual(i.proxyCfg, proxyCfg) ||
|
||||
i.token != token
|
||||
}
|
||||
|
||||
// hostnameEndpoints returns all CheckServiceNodes that have hostnames instead of IPs as the address.
|
||||
// Envoy cannot resolve hostnames provided through EDS, so we exclusively use CDS for these clusters.
|
||||
// If there is a mix of hostnames and addresses we exclusively use the hostnames, since clusters cannot discover
|
||||
// services with both EDS and DNS.
|
||||
func (s *state) hostnameEndpoints(loggerName string, localDC string, nodes structs.CheckServiceNodes) structs.CheckServiceNodes {
|
||||
func hostnameEndpoints(logger hclog.Logger, localDC string, nodes structs.CheckServiceNodes) structs.CheckServiceNodes {
|
||||
var (
|
||||
hasIP bool
|
||||
hasHostname bool
|
||||
|
@ -1976,9 +2000,8 @@ func (s *state) hostnameEndpoints(loggerName string, localDC string, nodes struc
|
|||
dc := nodes[0].Node.Datacenter
|
||||
sn := nodes[0].Service.CompoundServiceName()
|
||||
|
||||
s.logger.Named(loggerName).
|
||||
Warn("service contains instances with mix of hostnames and IP addresses; only hostnames will be passed to Envoy",
|
||||
"dc", dc, "service", sn.String())
|
||||
logger.Warn("service contains instances with mix of hostnames and IP addresses; only hostnames will be passed to Envoy",
|
||||
"dc", dc, "service", sn.String())
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/consul/agent/cache"
|
||||
|
@ -115,7 +116,7 @@ func TestStateChanged(t *testing.T) {
|
|||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
require := require.New(t)
|
||||
state, err := newState(tt.ns, tt.token)
|
||||
state, err := newState(tt.ns, tt.token, stateConfig{logger: hclog.New(nil)})
|
||||
require.NoError(err)
|
||||
otherNS, otherToken := tt.mutate(*tt.ns, tt.token)
|
||||
require.Equal(tt.want, state.Changed(otherNS, otherToken))
|
||||
|
@ -2125,7 +2126,19 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
|||
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
state, err := newState(&tc.ns, "")
|
||||
cn := newTestCacheNotifier()
|
||||
state, err := newState(&tc.ns, "", stateConfig{
|
||||
logger: testutil.Logger(t),
|
||||
cache: cn,
|
||||
health: &health.Client{Cache: cn, CacheName: cachetype.HealthServicesName},
|
||||
source: &structs.QuerySource{
|
||||
Datacenter: tc.sourceDC,
|
||||
},
|
||||
dnsConfig: DNSConfig{
|
||||
Domain: "consul.",
|
||||
AltDomain: "alt.consul.",
|
||||
},
|
||||
})
|
||||
|
||||
// verify building the initial state worked
|
||||
require.NoError(t, err)
|
||||
|
@ -2134,30 +2147,12 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
|||
// setup the test logger to use the t.Log
|
||||
state.logger = testutil.Logger(t)
|
||||
|
||||
// setup a new testing cache notifier
|
||||
cn := newTestCacheNotifier()
|
||||
state.cache = cn
|
||||
state.health = &health.Client{Cache: cn, CacheName: cachetype.HealthServicesName}
|
||||
|
||||
// setup the local datacenter information
|
||||
state.source = &structs.QuerySource{
|
||||
Datacenter: tc.sourceDC,
|
||||
}
|
||||
|
||||
state.dnsConfig = DNSConfig{
|
||||
Domain: "consul.",
|
||||
AltDomain: "alt.consul.",
|
||||
}
|
||||
|
||||
// setup the ctx as initWatches expects this to be there
|
||||
var ctx context.Context
|
||||
ctx, state.cancel = context.WithCancel(context.Background())
|
||||
|
||||
// get the initial configuration snapshot
|
||||
snap := state.initialConfigSnapshot()
|
||||
|
||||
// ensure the initial watch setup did not error
|
||||
require.NoError(t, state.initWatches(ctx, &snap))
|
||||
snap, err := state.handler.initialize(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
//--------------------------------------------------------------------
|
||||
//
|
||||
|
@ -2184,7 +2179,7 @@ func TestState_WatchesAndUpdates(t *testing.T) {
|
|||
// therefore we just tell it about the updates
|
||||
for eveIdx, event := range stage.events {
|
||||
require.True(t, t.Run(fmt.Sprintf("update-%d", eveIdx), func(t *testing.T) {
|
||||
require.NoError(t, state.handleUpdate(ctx, event, &snap))
|
||||
require.NoError(t, state.handler.handleUpdate(ctx, event, &snap))
|
||||
}))
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue