proxycfg: extract two types from state struct

These two new struct types will allow us to make polymorphic handler for each kind, instad of
having all the logic for each proxy kind on the state struct.
This commit is contained in:
Daniel Nephin 2020-12-23 17:29:20 -05:00
parent 9c40aa729f
commit 016c5611d1
2 changed files with 56 additions and 36 deletions

View File

@ -195,13 +195,15 @@ func (m *Manager) ensureProxyServiceLocked(ns *structs.NodeService, token string
return err return err
} }
// Set the necessary dependencies // TODO: move to a function that translates ManagerConfig->stateConfig
state.logger = m.Logger.With("service_id", sid.String()) state.stateConfig = stateConfig{
state.cache = m.Cache logger: m.Logger.With("service_id", sid.String()),
state.health = m.Health cache: m.Cache,
state.source = m.Source health: m.Health,
state.dnsConfig = m.DNSConfig source: m.Source,
state.intentionDefaultAllow = m.IntentionDefaultAllow dnsConfig: m.DNSConfig,
intentionDefaultAllow: m.IntentionDefaultAllow,
}
if m.TLSConfigurator != nil { if m.TLSConfigurator != nil {
state.serverSNIFn = m.TLSConfigurator.ServerSNI state.serverSNIFn = m.TLSConfigurator.ServerSNI
} }

View File

@ -49,16 +49,11 @@ const (
intentionUpstreamsID = "intention-upstreams" intentionUpstreamsID = "intention-upstreams"
meshConfigEntryID = "mesh" meshConfigEntryID = "mesh"
svcChecksWatchIDPrefix = cachetype.ServiceHTTPChecksName + ":" svcChecksWatchIDPrefix = cachetype.ServiceHTTPChecksName + ":"
serviceIDPrefix = string(structs.UpstreamDestTypeService) + ":"
preparedQueryIDPrefix = string(structs.UpstreamDestTypePreparedQuery) + ":" preparedQueryIDPrefix = string(structs.UpstreamDestTypePreparedQuery) + ":"
defaultPreparedQueryPollInterval = 30 * time.Second defaultPreparedQueryPollInterval = 30 * time.Second
) )
// state holds all the state needed to maintain the config for a registered type stateConfig struct {
// connect-proxy service. When a proxy registration is changed, the entire state
// is discarded and a new one created.
type state struct {
// logger, source and cache are required to be set before calling Watch.
logger hclog.Logger logger hclog.Logger
source *structs.QuerySource source *structs.QuerySource
cache CacheNotifier cache CacheNotifier
@ -66,21 +61,21 @@ type state struct {
dnsConfig DNSConfig dnsConfig DNSConfig
serverSNIFn ServerSNIFunc serverSNIFn ServerSNIFunc
intentionDefaultAllow bool intentionDefaultAllow bool
}
// state holds all the state needed to maintain the config for a registered
// connect-proxy service. When a proxy registration is changed, the entire state
// is discarded and a new one created.
type state struct {
// TODO: un-embedd once refactor is complete
stateConfig
// TODO: un-embed once refactor is complete
serviceInstance
// cancel is set by Watch and called by Close to stop the goroutine started // cancel is set by Watch and called by Close to stop the goroutine started
// in Watch. // in Watch.
cancel func() cancel func()
kind structs.ServiceKind
service string
proxyID structs.ServiceID
address string
port int
meta map[string]string
taggedAddresses map[string]structs.ServiceAddress
proxyCfg structs.ConnectProxyConfig
token string
ch chan cache.UpdateEvent ch chan cache.UpdateEvent
snapCh chan ConfigSnapshot snapCh chan ConfigSnapshot
reqCh chan chan *ConfigSnapshot reqCh chan chan *ConfigSnapshot
@ -93,6 +88,18 @@ type DNSConfig struct {
type ServerSNIFunc func(dc, nodeName string) string type ServerSNIFunc func(dc, nodeName string) string
type serviceInstance struct {
kind structs.ServiceKind
service string
proxyID structs.ServiceID
address string
port int
meta map[string]string
taggedAddresses map[string]structs.ServiceAddress
proxyCfg structs.ConnectProxyConfig
token string
}
func copyProxyConfig(ns *structs.NodeService) (structs.ConnectProxyConfig, error) { func copyProxyConfig(ns *structs.NodeService) (structs.ConnectProxyConfig, error) {
if ns == nil { if ns == nil {
return structs.ConnectProxyConfig{}, nil return structs.ConnectProxyConfig{}, nil
@ -139,11 +146,33 @@ func newState(ns *structs.NodeService, token string) (*state, error) {
return nil, errors.New("not a connect-proxy, terminating-gateway, mesh-gateway, or ingress-gateway") return nil, errors.New("not a connect-proxy, terminating-gateway, mesh-gateway, or ingress-gateway")
} }
proxyCfg, err := copyProxyConfig(ns) s, err := newServiceInstanceFromNodeService(ns, token)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &state{
serviceInstance: s,
// 10 is fairly arbitrary here but allow for the 3 mandatory and a
// reasonable number of upstream watches to all deliver their initial
// messages in parallel without blocking the cache.Notify loops. It's not a
// huge deal if we do for a short period so we don't need to be more
// conservative to handle larger numbers of upstreams correctly but gives
// some head room for normal operation to be non-blocking in most typical
// cases.
ch: make(chan cache.UpdateEvent, 10),
snapCh: make(chan ConfigSnapshot, 1),
reqCh: make(chan chan *ConfigSnapshot, 1),
}, nil
}
func newServiceInstanceFromNodeService(ns *structs.NodeService, token string) (serviceInstance, error) {
proxyCfg, err := copyProxyConfig(ns)
if err != nil {
return serviceInstance{}, err
}
taggedAddresses := make(map[string]structs.ServiceAddress) taggedAddresses := make(map[string]structs.ServiceAddress)
for k, v := range ns.TaggedAddresses { for k, v := range ns.TaggedAddresses {
taggedAddresses[k] = v taggedAddresses[k] = v
@ -154,7 +183,7 @@ func newState(ns *structs.NodeService, token string) (*state, error) {
meta[k] = v meta[k] = v
} }
return &state{ return serviceInstance{
kind: ns.Kind, kind: ns.Kind,
service: ns.Service, service: ns.Service,
proxyID: ns.CompoundServiceID(), proxyID: ns.CompoundServiceID(),
@ -164,17 +193,6 @@ func newState(ns *structs.NodeService, token string) (*state, error) {
taggedAddresses: taggedAddresses, taggedAddresses: taggedAddresses,
proxyCfg: proxyCfg, proxyCfg: proxyCfg,
token: token, token: token,
// 10 is fairly arbitrary here but allow for the 3 mandatory and a
// reasonable number of upstream watches to all deliver their initial
// messages in parallel without blocking the cache.Notify loops. It's not a
// huge deal if we do for a short period so we don't need to be more
// conservative to handle larger numbers of upstreams correctly but gives
// some head room for normal operation to be non-blocking in most typical
// cases.
ch: make(chan cache.UpdateEvent, 10),
snapCh: make(chan ConfigSnapshot, 1),
reqCh: make(chan chan *ConfigSnapshot, 1),
}, nil }, nil
} }