diff --git a/agent/consul/state/config_entry.go b/agent/consul/state/config_entry.go index e5eb4210b2..b56e8276cf 100644 --- a/agent/consul/state/config_entry.go +++ b/agent/consul/state/config_entry.go @@ -83,7 +83,10 @@ func (s *Restore) ConfigEntry(c structs.ConfigEntry) error { func (s *Store) ConfigEntry(ws memdb.WatchSet, kind, name string) (uint64, structs.ConfigEntry, error) { tx := s.db.Txn(false) defer tx.Abort() + return s.configEntryTxn(tx, ws, kind, name) +} +func (s *Store) configEntryTxn(tx *memdb.Txn, ws memdb.WatchSet, kind, name string) (uint64, structs.ConfigEntry, error) { // Get the index idx := maxIndexTxn(tx, configTableName) diff --git a/agent/structs/config_entry.go b/agent/structs/config_entry.go index 1dff93ca8d..6607d37bd3 100644 --- a/agent/structs/config_entry.go +++ b/agent/structs/config_entry.go @@ -16,6 +16,9 @@ import ( const ( ServiceDefaults string = "service-defaults" ProxyDefaults string = "proxy-defaults" + ServiceRouter string = "service-router" + ServiceSplitter string = "service-splitter" + ServiceResolver string = "service-resolver" ProxyConfigGlobal string = "global" @@ -231,6 +234,10 @@ func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) { "Config": "", }) + // TODO(rb): see if any changes are needed here for the discovery chain + + // TODO(rb): maybe do an initial kind/Kind switch and do kind-specific decoding? + var entry ConfigEntry kindVal, ok := raw["Kind"] @@ -340,6 +347,12 @@ func MakeConfigEntry(kind, name string) (ConfigEntry, error) { return &ServiceConfigEntry{Name: name}, nil case ProxyDefaults: return &ProxyConfigEntry{Name: name}, nil + case ServiceRouter: + return &ServiceRouterConfigEntry{Name: name}, nil + case ServiceSplitter: + return &ServiceSplitterConfigEntry{Name: name}, nil + case ServiceResolver: + return &ServiceResolverConfigEntry{Name: name}, nil default: return nil, fmt.Errorf("invalid config entry kind: %s", kind) } @@ -349,6 +362,8 @@ func ValidateConfigEntryKind(kind string) bool { switch kind { case ServiceDefaults, ProxyDefaults: return true + case ServiceRouter, ServiceSplitter, ServiceResolver: + return true default: return false } diff --git a/agent/structs/config_entry_discoverychain.go b/agent/structs/config_entry_discoverychain.go new file mode 100644 index 0000000000..70be8e5c9c --- /dev/null +++ b/agent/structs/config_entry_discoverychain.go @@ -0,0 +1,746 @@ +package structs + +import ( + "fmt" + "math" + "sort" + "time" + + "github.com/hashicorp/consul/acl" +) + +// ServiceRouterConfigEntry defines L7 (e.g. http) routing rules for a named +// service exposed in Connect. +// +// This config entry represents the topmost part of the discovery chain. Only +// one router config will be used per resolved discovery chain and is not +// otherwise discovered recursively (unlike splitter and resolver config +// entries). +// +// Router config entries will be restricted to only services that define their +// protocol as http-based (in centralized configuration). +type ServiceRouterConfigEntry struct { + Kind string + Name string + + // Routes is the list of routes to consider when processing L7 requests. + // The first rule to match in the list is terminal and stops further + // evaluation. + // + // Traffic that fails to match any of the provided routes will be routed to + // the default service. + Routes []ServiceRoute + + RaftIndex +} + +func (e *ServiceRouterConfigEntry) GetKind() string { + return ServiceRouter +} + +func (e *ServiceRouterConfigEntry) GetName() string { + if e == nil { + return "" + } + + return e.Name +} + +func (e *ServiceRouterConfigEntry) Normalize() error { + if e == nil { + return fmt.Errorf("config entry is nil") + } + + e.Kind = ServiceRouter + + // TODO(rb): anything to normalize? + + return nil +} + +func (e *ServiceRouterConfigEntry) Validate() error { + if e.Name == "" { + return fmt.Errorf("Name is required") + } + + // TODO(rb): enforce corresponding service has protocol=http + + // TODO(rb): validate the entire compiled chain? how? + + // TODO(rb): validate more + + // Technically you can have no explicit routes at all where just the + // catch-all is configured for you, but at that point maybe you should just + // delete it so it will default? + + return nil +} + +func (e *ServiceRouterConfigEntry) CanRead(rule acl.Authorizer) bool { + return canReadDiscoveryChain(e, rule) +} + +func (e *ServiceRouterConfigEntry) CanWrite(rule acl.Authorizer) bool { + return canWriteDiscoveryChain(e, rule) +} + +func (e *ServiceRouterConfigEntry) GetRaftIndex() *RaftIndex { + if e == nil { + return &RaftIndex{} + } + + return &e.RaftIndex +} + +func (e *ServiceRouterConfigEntry) ListRelatedServices() []string { + found := make(map[string]struct{}) + + // We always inject a default catch-all route to the same service as the router. + found[e.Name] = struct{}{} + + for _, route := range e.Routes { + if route.Destination != nil && route.Destination.Service != "" { + found[route.Destination.Service] = struct{}{} + } + } + + out := make([]string, 0, len(found)) + for svc, _ := range found { + out = append(out, svc) + } + sort.Strings(out) + return out +} + +// ServiceRoute is a single routing rule that routes traffic to the destination +// when the match criteria applies. +type ServiceRoute struct { + Match *ServiceRouteMatch `json:",omitempty"` + Destination *ServiceRouteDestination `json:",omitempty"` +} + +// ServiceRouteMatch is a set of criteria that can match incoming L7 requests. +type ServiceRouteMatch struct { + HTTP *ServiceRouteHTTPMatch `json:",omitempty"` + + // If we have non-http match criteria for other protocols in the future + // (gRPC, redis, etc) they can go here. +} + +// ServiceRouteHTTPMatch is a set of http-specific match criteria. +type ServiceRouteHTTPMatch struct { + PathExact string `json:",omitempty"` + PathPrefix string `json:",omitempty"` + PathRegex string `json:",omitempty"` + + Header []ServiceRouteHTTPMatchHeader `json:",omitempty"` + QueryParam []ServiceRouteHTTPMatchQueryParam `json:",omitempty"` + + Methods []string `json:",omitempty"` +} + +type ServiceRouteHTTPMatchHeader struct { + Name string + Present bool `json:",omitempty"` + Exact string `json:",omitempty"` + Prefix string `json:",omitempty"` + Suffix string `json:",omitempty"` + Regex string `json:",omitempty"` + Invert bool `json:",omitempty"` +} + +type ServiceRouteHTTPMatchQueryParam struct { + Name string + Value string `json:",omitempty"` + Regex bool `json:",omitempty"` +} + +// ServiceRouteDestination describes how to proxy the actual matching request +// to a service. +type ServiceRouteDestination struct { + // Service is the service to resolve instead of the default service. If + // empty then the default discovery chain service name is used. + Service string `json:",omitempty"` + + // ServiceSubset is a named subset of the given service to resolve instead + // of one defined as that service's DefaultSubset. If empty the default + // subset is used. + // + // If this field is specified then this route is ineligible for further + // splitting. + ServiceSubset string `json:",omitempty"` + + // Namespace is the namespace to resolve the service from instead of the + // current namespace. If empty the current namespace is assumed. + // + // If this field is specified then this route is ineligible for further + // splitting. + Namespace string `json:",omitempty"` + + // PrefixRewrite allows for the proxied request to have its matching path + // prefix modified before being sent to the destination. Described more + // below in the envoy implementation section. + PrefixRewrite string `json:",omitempty"` + + // RequestTimeout is the total amount of time permitted for the entire + // downstream request (and retries) to be processed. + RequestTimeout time.Duration `json:",omitempty"` + + // NumRetries is the number of times to retry the request when a retryable + // result occurs. This seems fairly proxy agnostic. + NumRetries uint32 `json:",omitempty"` + + // RetryOnConnectFailure allows for connection failure errors to trigger a + // retry. This should be expressible in other proxies as it's just a layer + // 4 failure bubbling up to layer 7. + RetryOnConnectFailure bool `json:",omitempty"` + + // RetryOnStatusCodes is a flat list of http response status codes that are + // eligible for retry. This again should be feasible in any sane proxy. + RetryOnStatusCodes []uint32 `json:",omitempty"` +} + +// ServiceSplitterConfigEntry defines how incoming requests are split across +// different subsets of a single service (like during staged canary rollouts), +// or perhaps across different services (like during a v2 rewrite or other type +// of codebase migration). +// +// This config entry represents the next hop of the discovery chain after +// routing. If no splitter config is defined the chain assumes 100% of traffic +// goes to the default service and discovery continues on to the resolution +// hop. +// +// Splitter configs are recursively collected while walking the discovery +// chain. +// +// Splitter config entries will be restricted to only services that define +// their protocol as http-based (in centralized configuration). +type ServiceSplitterConfigEntry struct { + Kind string + Name string + + // Splits is the configurations for the details of the traffic splitting. + // + // The sum of weights across all splits must add up to 100. + // + // If the split is within epsilon of 100 then the remainder is attributed + // to the FIRST split. + Splits []ServiceSplit + + RaftIndex +} + +func (e *ServiceSplitterConfigEntry) GetKind() string { + return ServiceSplitter +} + +func (e *ServiceSplitterConfigEntry) GetName() string { + if e == nil { + return "" + } + + return e.Name +} + +func (e *ServiceSplitterConfigEntry) Normalize() error { + if e == nil { + return fmt.Errorf("config entry is nil") + } + + e.Kind = ServiceSplitter + + // This slightly massages inputs to enforce that the smallest representable + // weight is 1/10000 or .01% + + if len(e.Splits) > 0 { + sumScaled := 0 + for i, split := range e.Splits { + weightScaled := scaleWeight(split.Weight) + e.Splits[i].Weight = float32(float32(weightScaled) / 100.0) + + sumScaled += weightScaled + } + } + + return nil +} + +func (e *ServiceSplitterConfigEntry) Validate() error { + if e.Name == "" { + return fmt.Errorf("Name is required") + } + + if len(e.Splits) == 0 { + return fmt.Errorf("no splits configured") + } + + const maxScaledWeight = 100 * 100 + + copyAsKey := func(s ServiceSplit) ServiceSplit { + s.Weight = 0 + return s + } + + // Make sure we didn't refer to the same thing twice. + found := make(map[ServiceSplit]struct{}) + for _, split := range e.Splits { + splitKey := copyAsKey(split) + if splitKey.Service == "" { + splitKey.Service = e.Name + } + if _, ok := found[splitKey]; ok { + return fmt.Errorf( + "split destination occurs more than once: service=%q, subset=%q, namespace=%q", + splitKey.Service, splitKey.ServiceSubset, splitKey.Namespace, + ) + } + found[splitKey] = struct{}{} + } + + sumScaled := 0 + for _, split := range e.Splits { + sumScaled += scaleWeight(split.Weight) + } + + if sumScaled != maxScaledWeight { + return fmt.Errorf("the sum of all split weights must be 100, not %f", float32(sumScaled)/100) + } + + // TODO(rb): enforce corresponding service has protocol=http + + // TODO(rb): validate the entire compiled chain? how? + + return nil +} + +// scaleWeight assumes the input is a value between 0 and 100 representing +// shares out of a percentile range. The function will convert to a unit +// representing 0.01% units in the same manner as you may convert $0.98 to 98 +// cents. +func scaleWeight(v float32) int { + return int(math.Round(float64(v * 100.0))) +} + +func (e *ServiceSplitterConfigEntry) CanRead(rule acl.Authorizer) bool { + return canReadDiscoveryChain(e, rule) +} + +func (e *ServiceSplitterConfigEntry) CanWrite(rule acl.Authorizer) bool { + return canWriteDiscoveryChain(e, rule) +} + +func (e *ServiceSplitterConfigEntry) GetRaftIndex() *RaftIndex { + if e == nil { + return &RaftIndex{} + } + + return &e.RaftIndex +} + +func (e *ServiceSplitterConfigEntry) ListRelatedServices() []string { + found := make(map[string]struct{}) + + for _, split := range e.Splits { + if split.Service != "" { + found[split.Service] = struct{}{} + } + } + + out := make([]string, 0, len(found)) + for svc, _ := range found { + out = append(out, svc) + } + sort.Strings(out) + return out +} + +// ServiceSplit defines how much traffic to send to which set of service +// instances during a traffic split. +type ServiceSplit struct { + // A value between 0 and 100 reflecting what portion of traffic should be + // directed to this split. + // + // The smallest representable weight is 1/10000 or .01% + // + // If the split is within epsilon of 100 then the remainder is attributed + // to the FIRST split. + Weight float32 + + // Service is the service to resolve instead of the default (optional). + Service string `json:",omitempty"` + + // ServiceSubset is a named subset of the given service to resolve instead + // of one defined as that service's DefaultSubset. If empty the default + // subset is used (optional). + // + // If this field is specified then this route is ineligible for further + // splitting. + ServiceSubset string `json:",omitempty"` + + // Namespace is the namespace to resolve the service from instead of the + // current namespace. If empty the current namespace is assumed (optional). + // + // If this field is specified then this route is ineligible for further + // splitting. + Namespace string `json:",omitempty"` +} + +// ServiceResolverConfigEntry defines which instances of a service should +// satisfy discovery requests for a given named service. +// +// This config entry represents the next hop of the discovery chain after +// splitting. If no resolver config is defined the chain assumes 100% of +// traffic goes to the healthy instances of the default service in the current +// datacenter+namespace and discovery terminates. +// +// Resolver configs are recursively collected while walking the chain. +// +// Resolver config entries will be valid for services defined with any protocol +// (in centralized configuration). +type ServiceResolverConfigEntry struct { + Kind string + Name string + + // DefaultSubset is the subset to use when no explicit subset is + // requested. If empty the unnamed subset is used. + DefaultSubset string `json:",omitempty"` + + // Subsets is a map of subset name to subset definition for all + // usable named subsets of this service. The map key is the name + // of the subset and all names must be valid DNS subdomain elements + // so they can be used in SNI FQDN headers for the Connect Gateways + // feature. + // + // This may be empty, in which case only the unnamed default subset + // will be usable. + Subsets map[string]ServiceResolverSubset `json:",omitempty"` + + // Redirect is a service/subset/datacenter/namespace to resolve + // instead of the requested service (optional). + // + // When configured, all occurrences of this resolver in any discovery + // chain evaluation will be substituted for the supplied redirect + // EXCEPT when the redirect has already been applied. + // + // When substituting the supplied redirect into the discovery chain + // all other fields beside Kind/Name/Redirect will be ignored. + Redirect *ServiceResolverRedirect `json:",omitempty"` + + // Failover controls when and how to reroute traffic to an alternate pool + // of service instances. + // + // The map is keyed by the service subset it applies to, and the special + // string "*" is a wildcard that applies to any subset not otherwise + // specified here. + Failover map[string]ServiceResolverFailover `json:",omitempty"` + + // ConnectTimeout is the timeout for establishing new network connections + // to this service. + ConnectTimeout time.Duration `json:",omitempty"` + + RaftIndex +} + +func (e *ServiceResolverConfigEntry) GetKind() string { + return ServiceResolver +} + +func (e *ServiceResolverConfigEntry) GetName() string { + if e == nil { + return "" + } + + return e.Name +} + +func (e *ServiceResolverConfigEntry) Normalize() error { + if e == nil { + return fmt.Errorf("config entry is nil") + } + + e.Kind = ServiceResolver + + // TODO(rb): anything to normalize? + + return nil +} + +func (e *ServiceResolverConfigEntry) Validate() error { + if e.Name == "" { + return fmt.Errorf("Name is required") + } + + if len(e.Subsets) > 0 { + for name, _ := range e.Subsets { + if name == "" { + return fmt.Errorf("Subset defined with empty name") + } + } + } + + isSubset := func(subset string) bool { + if len(e.Subsets) > 0 { + _, ok := e.Subsets[subset] + return ok + } + return false + } + + if e.DefaultSubset != "" && !isSubset(e.DefaultSubset) { + return fmt.Errorf("DefaultSubset %q is not a valid subset", e.DefaultSubset) + } + + if e.Redirect != nil { + r := e.Redirect + + if r.Service == "" && r.ServiceSubset == "" && r.Namespace == "" && r.Datacenter == "" { + return fmt.Errorf("Redirect is empty") + } + + if r.Service == "" { + if r.ServiceSubset != "" { + return fmt.Errorf("Redirect.ServiceSubset defined without Redirect.Service") + } + if r.Namespace != "" { + return fmt.Errorf("Redirect.Namespace defined without Redirect.Service") + } + } else if r.Service == e.Name { + // TODO(rb): prevent self loops? + if r.ServiceSubset != "" && !isSubset(r.ServiceSubset) { + return fmt.Errorf("Redirect.ServiceSubset %q is not a valid subset of %q", r.ServiceSubset, r.Service) + } + } else { + // TODO(rb): handle validating subsets for other services + } + } + + if len(e.Failover) > 0 { + for subset, f := range e.Failover { + if subset != "*" && !isSubset(subset) { + return fmt.Errorf("Bad Failover[%q]: not a valid subset", subset) + } + + if f.Service == "" && f.ServiceSubset == "" && len(f.Datacenters) == 0 { + return fmt.Errorf("Bad Failover[%q] one of Service, ServiceSubset, or Datacenters is required", subset) + } + + if f.ServiceSubset != "" { + if f.Service == "" || f.Service == e.Name { + if !isSubset(f.ServiceSubset) { + return fmt.Errorf("Bad Failover[%q].ServiceSubset %q is not a valid subset of %q", subset, f.ServiceSubset, f.Service) + } + } else { + // TODO(rb): handle validating subsets for other services + } + } + + if f.OverprovisioningFactor < 0 { + return fmt.Errorf("Bad Failover[%q].OverprovisioningFactor '%d', must be >= 0", subset, f.OverprovisioningFactor) + } + + // TODO(rb): more extensive validation will require graph traversal + + for _, dc := range f.Datacenters { + if dc == "" { + return fmt.Errorf("Bad Failover[%q].Datacenters: found empty datacenter", subset) + } + } + } + } + + if e.ConnectTimeout < 0 { + return fmt.Errorf("Bad ConnectTimeout '%s', must be >= 0", e.ConnectTimeout) + } + + // TODO(rb): validate the entire compiled chain? how? + + // TODO(rb): validate more + + return nil +} + +func (e *ServiceResolverConfigEntry) CanRead(rule acl.Authorizer) bool { + return canReadDiscoveryChain(e, rule) +} + +func (e *ServiceResolverConfigEntry) CanWrite(rule acl.Authorizer) bool { + return canWriteDiscoveryChain(e, rule) +} + +func (e *ServiceResolverConfigEntry) GetRaftIndex() *RaftIndex { + if e == nil { + return &RaftIndex{} + } + + return &e.RaftIndex +} + +func (e *ServiceResolverConfigEntry) ListRelatedServices() []string { + found := make(map[string]struct{}) + + if e.Redirect != nil { + if e.Redirect.Service != "" { + found[e.Redirect.Service] = struct{}{} + } + } + + if len(e.Failover) > 0 { + for _, failover := range e.Failover { + if failover.Service != "" { + found[failover.Service] = struct{}{} + } + } + } + + out := make([]string, 0, len(found)) + for svc, _ := range found { + out = append(out, svc) + } + sort.Strings(out) + return out +} + +// ServiceResolverSubset defines a way to select a portion of the Consul +// catalog during service discovery. Anything that affects the ultimate catalog +// query performed OR post-processing on the results of that sort of query +// should be defined here. +type ServiceResolverSubset struct { + // Filter specifies the go-bexpr filter expression to be used for selecting + // instances of the requested service. + Filter string `json:",omitempty"` + + // OnlyPassing - Specifies the behavior of the resolver's health check + // filtering. If this is set to false, the results will include instances + // with checks in the passing as well as the warning states. If this is set + // to true, only instances with checks in the passing state will be + // returned. (behaves identically to the similarly named field on prepared + // queries). + OnlyPassing bool `json:",omitempty"` +} + +type ServiceResolverRedirect struct { + // Service is a service to resolve instead of the current service + // (optional). + Service string `json:",omitempty"` + + // ServiceSubset is a named subset of the given service to resolve instead + // of one defined as that service's DefaultSubset If empty the default + // subset is used (optional). + // + // If this is specified at least one of Service, Datacenter, or Namespace + // should be configured. + ServiceSubset string `json:",omitempty"` + + // Namespace is the namespace to resolve the service from instead of the + // current one (optional). + Namespace string `json:",omitempty"` + + // Datacenter is the datacenter to resolve the service from instead of the + // current one (optional). + Datacenter string `json:",omitempty"` +} + +// There are some restrictions on what is allowed in here: +// +// - Service, ServiceSubset, Namespace, NearestN, and Datacenters cannot all be +// empty at once. +// +// - Both 'NearestN' and 'Datacenters' may be specified at once. +// +type ServiceResolverFailover struct { + // Service is the service to resolve instead of the default as the failover + // group of instances (optional). + // + // This is a DESTINATION during failover. + Service string `json:",omitempty"` + + // ServiceSubset is the named subset of the requested service to resolve as + // the failover group of instances. If empty the default subset for the + // requested service is used (optional). + // + // This is a DESTINATION during failover. + ServiceSubset string `json:",omitempty"` + + // Namespace is the namespace to resolve the requested service from to form + // the failover group of instances. If empty the current namespace is used + // (optional). + // + // This is a DESTINATION during failover. + Namespace string `json:",omitempty"` + + // NearestN is set to the number of remote datacenters to try, based on + // network coordinates. + // + // This is a DESTINATION during failover. + // + // TODO(rb): bring this back after normal DC failover works + // NearestN int `json:",omitempty"` + + // Datacenters is a fixed list of datacenters to try after NearestN. We + // never try a datacenter multiple times, so those are subtracted from this + // list before proceeding. + // + // This is a DESTINATION during failover. + Datacenters []string `json:",omitempty"` + + // OverprovisioningFactor is a pass through for envoy's + // overprovisioning_factor value. + // + // If omitted the overprovisioning factor value will be set so high as to + // imply binary failover (all or nothing). + OverprovisioningFactor int `json:",omitempty"` +} + +type discoveryChainConfigEntry interface { + ConfigEntry + // ListRelatedServices returns a list of other names of services referenced + // in this config entry. + ListRelatedServices() []string +} + +func canReadDiscoveryChain(entry discoveryChainConfigEntry, rule acl.Authorizer) bool { + if rule.OperatorRead() { + return true + } + + name := entry.GetName() + + if !rule.ServiceRead(name) { + return false + } + + for _, svc := range entry.ListRelatedServices() { + if svc == name { + continue + } + if !rule.ServiceRead(svc) { + return false + } + } + return true +} + +func canWriteDiscoveryChain(entry discoveryChainConfigEntry, rule acl.Authorizer) bool { + if rule.OperatorWrite() { + return true + } + + name := entry.GetName() + + if !rule.ServiceWrite(name, nil) { + return false + } + + for _, svc := range entry.ListRelatedServices() { + if svc == name { + continue + } + + // You only need read on related services to redirect traffic flow for + // your own service. + if !rule.ServiceRead(svc) { + return false + } + } + return true +} diff --git a/agent/structs/config_entry_discoverychain_test.go b/agent/structs/config_entry_discoverychain_test.go new file mode 100644 index 0000000000..9099fa10b3 --- /dev/null +++ b/agent/structs/config_entry_discoverychain_test.go @@ -0,0 +1,463 @@ +package structs + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestServiceResolverConfigEntry(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + name string + entry *ServiceResolverConfigEntry + normalizeErr string + validateErr string + // check is called between normalize and validate + check func(t *testing.T, entry *ServiceResolverConfigEntry) + }{ + { + name: "nil", + entry: nil, + normalizeErr: "config entry is nil", + }, + { + name: "no name", + entry: &ServiceResolverConfigEntry{}, + validateErr: "Name is required", + }, + { + name: "empty", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + }, + }, + { + name: "empty subset name", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Subsets: map[string]ServiceResolverSubset{ + "": {OnlyPassing: true}, + }, + }, + validateErr: "Subset defined with empty name", + }, + { + name: "default subset does not exist", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + DefaultSubset: "gone", + Subsets: map[string]ServiceResolverSubset{ + "v1": {Filter: "ServiceMeta.version == v1"}, + }, + }, + validateErr: `DefaultSubset "gone" is not a valid subset`, + }, + { + name: "default subset does exist", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + DefaultSubset: "v1", + Subsets: map[string]ServiceResolverSubset{ + "v1": {Filter: "ServiceMeta.version == v1"}, + }, + }, + }, + { + name: "empty redirect", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Redirect: &ServiceResolverRedirect{}, + }, + validateErr: "Redirect is empty", + }, + { + name: "redirect subset with no service", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Redirect: &ServiceResolverRedirect{ + ServiceSubset: "next", + }, + }, + validateErr: "Redirect.ServiceSubset defined without Redirect.Service", + }, + { + name: "redirect namespace with no service", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Redirect: &ServiceResolverRedirect{ + Namespace: "alternate", + }, + }, + validateErr: "Redirect.Namespace defined without Redirect.Service", + }, + { + name: "self redirect with invalid subset", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Redirect: &ServiceResolverRedirect{ + Service: "test", + ServiceSubset: "gone", + }, + }, + validateErr: `Redirect.ServiceSubset "gone" is not a valid subset of "test"`, + }, + { + name: "self redirect with valid subset", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Redirect: &ServiceResolverRedirect{ + Service: "test", + ServiceSubset: "v1", + }, + Subsets: map[string]ServiceResolverSubset{ + "v1": {Filter: "ServiceMeta.version == v1"}, + }, + }, + }, + { + name: "simple wildcard failover", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Failover: map[string]ServiceResolverFailover{ + "*": ServiceResolverFailover{ + Datacenters: []string{"dc2"}, + }, + }, + }, + }, + { + name: "failover for missing subset", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Failover: map[string]ServiceResolverFailover{ + "gone": ServiceResolverFailover{ + Datacenters: []string{"dc2"}, + }, + }, + }, + validateErr: `Bad Failover["gone"]: not a valid subset`, + }, + { + name: "failover for present subset", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Subsets: map[string]ServiceResolverSubset{ + "v1": {Filter: "ServiceMeta.version == v1"}, + }, + Failover: map[string]ServiceResolverFailover{ + "v1": ServiceResolverFailover{ + Datacenters: []string{"dc2"}, + }, + }, + }, + }, + { + name: "failover empty", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Subsets: map[string]ServiceResolverSubset{ + "v1": {Filter: "ServiceMeta.version == v1"}, + }, + Failover: map[string]ServiceResolverFailover{ + "v1": ServiceResolverFailover{}, + }, + }, + validateErr: `Bad Failover["v1"] one of Service, ServiceSubset, or Datacenters is required`, + }, + { + name: "failover to self using invalid subset", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Subsets: map[string]ServiceResolverSubset{ + "v1": {Filter: "ServiceMeta.version == v1"}, + }, + Failover: map[string]ServiceResolverFailover{ + "v1": ServiceResolverFailover{ + Service: "test", + ServiceSubset: "gone", + }, + }, + }, + validateErr: `Bad Failover["v1"].ServiceSubset "gone" is not a valid subset of "test"`, + }, + { + name: "failover to self using valid subset", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Subsets: map[string]ServiceResolverSubset{ + "v1": {Filter: "ServiceMeta.version == v1"}, + "v2": {Filter: "ServiceMeta.version == v2"}, + }, + Failover: map[string]ServiceResolverFailover{ + "v1": ServiceResolverFailover{ + Service: "test", + ServiceSubset: "v2", + }, + }, + }, + }, + { + name: "failover with invalid overprovisioning factor", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Failover: map[string]ServiceResolverFailover{ + "*": ServiceResolverFailover{ + Service: "backup", + OverprovisioningFactor: -1, + }, + }, + }, + validateErr: `Bad Failover["*"].OverprovisioningFactor '-1', must be >= 0`, + }, + { + name: "failover with empty datacenters in list", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Failover: map[string]ServiceResolverFailover{ + "*": ServiceResolverFailover{ + Service: "backup", + Datacenters: []string{"", "dc2", "dc3"}, + }, + }, + }, + validateErr: `Bad Failover["*"].Datacenters: found empty datacenter`, + }, + { + name: "bad connect timeout", + entry: &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + ConnectTimeout: -1 * time.Second, + }, + validateErr: "Bad ConnectTimeout", + }, + } { + tc := tc + t.Run(tc.name, func(t *testing.T) { + err := tc.entry.Normalize() + if tc.normalizeErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.normalizeErr) + return + } + require.NoError(t, err) + + if tc.check != nil { + tc.check(t, tc.entry) + } + + err = tc.entry.Validate() + if tc.validateErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.validateErr) + return + } + require.NoError(t, err) + }) + } +} + +func TestServiceSplitterConfigEntry(t *testing.T) { + t.Parallel() + + makesplitter := func(splits ...ServiceSplit) *ServiceSplitterConfigEntry { + return &ServiceSplitterConfigEntry{ + Kind: ServiceSplitter, + Name: "test", + Splits: splits, + } + } + + makesplit := func(weight float32, service, serviceSubset, namespace string) ServiceSplit { + return ServiceSplit{ + Weight: weight, + Service: service, + ServiceSubset: serviceSubset, + Namespace: namespace, + } + } + + for _, tc := range []struct { + name string + entry *ServiceSplitterConfigEntry + normalizeErr string + validateErr string + // check is called between normalize and validate + check func(t *testing.T, entry *ServiceSplitterConfigEntry) + }{ + { + name: "nil", + entry: nil, + normalizeErr: "config entry is nil", + }, + { + name: "no name", + entry: &ServiceSplitterConfigEntry{}, + validateErr: "Name is required", + }, + { + name: "empty", + entry: makesplitter(), + validateErr: "no splits configured", + }, + { + name: "1 split", + entry: makesplitter( + makesplit(100, "test", "", ""), + ), + check: func(t *testing.T, entry *ServiceSplitterConfigEntry) { + require.Equal(t, float32(100), entry.Splits[0].Weight) + }, + }, + { + name: "1 split not enough weight", + entry: makesplitter( + makesplit(99.99, "test", "", ""), + ), + check: func(t *testing.T, entry *ServiceSplitterConfigEntry) { + require.Equal(t, float32(99.99), entry.Splits[0].Weight) + }, + validateErr: "the sum of all split weights must be 100", + }, + { + name: "1 split too much weight", + entry: makesplitter( + makesplit(100.01, "test", "", ""), + ), + check: func(t *testing.T, entry *ServiceSplitterConfigEntry) { + require.Equal(t, float32(100.01), entry.Splits[0].Weight) + }, + validateErr: "the sum of all split weights must be 100", + }, + { + name: "2 splits", + entry: makesplitter( + makesplit(99, "test", "v1", ""), + makesplit(1, "test", "v2", ""), + ), + check: func(t *testing.T, entry *ServiceSplitterConfigEntry) { + require.Equal(t, float32(99), entry.Splits[0].Weight) + require.Equal(t, float32(1), entry.Splits[1].Weight) + }, + }, + { + name: "2 splits - rounded up to smallest units", + entry: makesplitter( + makesplit(99.999, "test", "v1", ""), + makesplit(0.001, "test", "v2", ""), + ), + check: func(t *testing.T, entry *ServiceSplitterConfigEntry) { + require.Equal(t, float32(100), entry.Splits[0].Weight) + require.Equal(t, float32(0), entry.Splits[1].Weight) + }, + }, + { + name: "2 splits not enough weight", + entry: makesplitter( + makesplit(99.98, "test", "v1", ""), + makesplit(0.01, "test", "v2", ""), + ), + check: func(t *testing.T, entry *ServiceSplitterConfigEntry) { + require.Equal(t, float32(99.98), entry.Splits[0].Weight) + require.Equal(t, float32(0.01), entry.Splits[1].Weight) + }, + validateErr: "the sum of all split weights must be 100", + }, + { + name: "2 splits too much weight", + entry: makesplitter( + makesplit(100, "test", "v1", ""), + makesplit(0.01, "test", "v2", ""), + ), + check: func(t *testing.T, entry *ServiceSplitterConfigEntry) { + require.Equal(t, float32(100), entry.Splits[0].Weight) + require.Equal(t, float32(0.01), entry.Splits[1].Weight) + }, + validateErr: "the sum of all split weights must be 100", + }, + { + name: "3 splits", + entry: makesplitter( + makesplit(34, "test", "v1", ""), + makesplit(33, "test", "v2", ""), + makesplit(33, "test", "v3", ""), + ), + check: func(t *testing.T, entry *ServiceSplitterConfigEntry) { + require.Equal(t, float32(34), entry.Splits[0].Weight) + require.Equal(t, float32(33), entry.Splits[1].Weight) + require.Equal(t, float32(33), entry.Splits[2].Weight) + }, + }, + { + name: "3 splits one duplicated same weights", + entry: makesplitter( + makesplit(34, "test", "v1", ""), + makesplit(33, "test", "v2", ""), + makesplit(33, "test", "v2", ""), + ), + check: func(t *testing.T, entry *ServiceSplitterConfigEntry) { + require.Equal(t, float32(34), entry.Splits[0].Weight) + require.Equal(t, float32(33), entry.Splits[1].Weight) + require.Equal(t, float32(33), entry.Splits[2].Weight) + }, + validateErr: "split destination occurs more than once", + }, + { + name: "3 splits one duplicated diff weights", + entry: makesplitter( + makesplit(34, "test", "v1", ""), + makesplit(33, "test", "v2", ""), + makesplit(33, "test", "v1", ""), + ), + check: func(t *testing.T, entry *ServiceSplitterConfigEntry) { + require.Equal(t, float32(34), entry.Splits[0].Weight) + require.Equal(t, float32(33), entry.Splits[1].Weight) + require.Equal(t, float32(33), entry.Splits[2].Weight) + }, + validateErr: "split destination occurs more than once", + }, + } { + tc := tc + t.Run(tc.name, func(t *testing.T) { + err := tc.entry.Normalize() + if tc.normalizeErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.normalizeErr) + return + } + require.NoError(t, err) + + if tc.check != nil { + tc.check(t, tc.entry) + } + + err = tc.entry.Validate() + if tc.validateErr != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.validateErr) + return + } + require.NoError(t, err) + }) + } +} diff --git a/api/config_entry.go b/api/config_entry.go index 0c18963fd6..c0a1ded2c0 100644 --- a/api/config_entry.go +++ b/api/config_entry.go @@ -12,8 +12,12 @@ import ( ) const ( - ServiceDefaults string = "service-defaults" - ProxyDefaults string = "proxy-defaults" + ServiceDefaults string = "service-defaults" + ProxyDefaults string = "proxy-defaults" + ServiceRouter string = "service-router" + ServiceSplitter string = "service-splitter" + ServiceResolver string = "service-resolver" + ProxyConfigGlobal string = "global" ) @@ -83,11 +87,33 @@ func makeConfigEntry(kind, name string) (ConfigEntry, error) { return &ServiceConfigEntry{Name: name}, nil case ProxyDefaults: return &ProxyConfigEntry{Name: name}, nil + case ServiceRouter: + return &ServiceRouterConfigEntry{Name: name}, nil + case ServiceSplitter: + return &ServiceSplitterConfigEntry{Name: name}, nil + case ServiceResolver: + return &ServiceResolverConfigEntry{Name: name}, nil default: return nil, fmt.Errorf("invalid config entry kind: %s", kind) } } +func MakeConfigEntry(kind, name string) (ConfigEntry, error) { + return makeConfigEntry(kind, name) +} + +// DEPRECATED: TODO(rb): remove? +// +// DecodeConfigEntry only successfully works on config entry kinds +// "service-defaults" and "proxy-defaults" (as of Consul 1.5). +// +// This is because by parsing HCL into map[string]interface{} and then trying +// to decode it with mapstructure we run into the problem where hcl generically +// decodes many things into map[string][]interface{} at intermediate nodes in +// the resulting structure (for nested structs not otherwise in an enclosing +// slice). This breaks decoding. +// +// Until a better solution is arrived at don't use this method. func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) { var entry ConfigEntry diff --git a/api/config_entry_discoverychain.go b/api/config_entry_discoverychain.go new file mode 100644 index 0000000000..0ebeaa66e9 --- /dev/null +++ b/api/config_entry_discoverychain.go @@ -0,0 +1,129 @@ +package api + +import "time" + +type ServiceRouterConfigEntry struct { + Kind string + Name string + + Routes []ServiceRoute + + CreateIndex uint64 + ModifyIndex uint64 +} + +func (e *ServiceRouterConfigEntry) GetKind() string { return e.Kind } +func (e *ServiceRouterConfigEntry) GetName() string { return e.Name } +func (e *ServiceRouterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *ServiceRouterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } + +type ServiceRoute struct { + Match *ServiceRouteMatch `json:",omitempty"` + Destination *ServiceRouteDestination `json:",omitempty"` +} + +type ServiceRouteMatch struct { + HTTP *ServiceRouteHTTPMatch `json:",omitempty"` +} + +type ServiceRouteHTTPMatch struct { + PathExact string `json:",omitempty"` + PathPrefix string `json:",omitempty"` + PathRegex string `json:",omitempty"` + + Header []ServiceRouteHTTPMatchHeader `json:",omitempty"` + QueryParam []ServiceRouteHTTPMatchQueryParam `json:",omitempty"` + + Methods []string `json:",omitempty"` +} + +type ServiceRouteHTTPMatchHeader struct { + Name string + Present bool `json:",omitempty"` + Exact string `json:",omitempty"` + Prefix string `json:",omitempty"` + Suffix string `json:",omitempty"` + Regex string `json:",omitempty"` + Invert bool `json:",omitempty"` +} + +type ServiceRouteHTTPMatchQueryParam struct { + Name string + Value string `json:",omitempty"` + Regex bool `json:",omitempty"` +} + +type ServiceRouteDestination struct { + Service string `json:",omitempty"` + ServiceSubset string `json:",omitempty"` + Namespace string `json:",omitempty"` + PrefixRewrite string `json:",omitempty"` + RequestTimeout time.Duration `json:",omitempty"` + NumRetries uint32 `json:",omitempty"` + RetryOnConnectFailure bool `json:",omitempty"` + RetryOnStatusCodes []uint32 `json:",omitempty"` +} + +type ServiceSplitterConfigEntry struct { + Kind string + Name string + + Splits []ServiceSplit + + CreateIndex uint64 + ModifyIndex uint64 +} + +func (e *ServiceSplitterConfigEntry) GetKind() string { return e.Kind } +func (e *ServiceSplitterConfigEntry) GetName() string { return e.Name } +func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } + +type ServiceSplit struct { + Weight float32 + Service string `json:",omitempty"` + ServiceSubset string `json:",omitempty"` + Namespace string `json:",omitempty"` +} + +type ServiceResolverConfigEntry struct { + Kind string + Name string + + DefaultSubset string `json:",omitempty"` + Subsets map[string]ServiceResolverSubset `json:",omitempty"` + Redirect *ServiceResolverRedirect `json:",omitempty"` + Failover map[string]ServiceResolverFailover `json:",omitempty"` + ConnectTimeout time.Duration `json:",omitempty"` + + CreateIndex uint64 + ModifyIndex uint64 +} + +func (e *ServiceResolverConfigEntry) GetKind() string { return ServiceResolver } +func (e *ServiceResolverConfigEntry) GetName() string { return e.Name } +func (e *ServiceResolverConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *ServiceResolverConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } + +type ServiceResolverSubset struct { + Filter string `json:",omitempty"` + OnlyPassing bool `json:",omitempty"` +} + +type ServiceResolverRedirect struct { + Service string `json:",omitempty"` + ServiceSubset string `json:",omitempty"` + Namespace string `json:",omitempty"` + Datacenter string `json:",omitempty"` +} + +type ServiceResolverFailover struct { + Service string `json:",omitempty"` + ServiceSubset string `json:",omitempty"` + Namespace string `json:",omitempty"` + Datacenters []string `json:",omitempty"` + OverprovisioningFactor int `json:",omitempty"` + + // TODO(rb): bring this back after normal DC failover works + // NearestN int `json:",omitempty"` +} diff --git a/api/config_entry_discoverychain_test.go b/api/config_entry_discoverychain_test.go new file mode 100644 index 0000000000..dcfaccefed --- /dev/null +++ b/api/config_entry_discoverychain_test.go @@ -0,0 +1,192 @@ +package api + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestAPI_ConfigEntry_DiscoveryChain(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + config_entries := c.ConfigEntries() + + t.Run("Service Router", func(t *testing.T) { + // use one mega object to avoid multiple trips + makeEntry := func() *ServiceRouterConfigEntry { + return &ServiceRouterConfigEntry{ + Kind: ServiceRouter, + Name: "test", + Routes: []ServiceRoute{ + { + Match: &ServiceRouteMatch{ + HTTP: &ServiceRouteHTTPMatch{ + PathPrefix: "/prefix", + Header: []ServiceRouteHTTPMatchHeader{ + {Name: "x-debug", Exact: "1"}, + }, + QueryParam: []ServiceRouteHTTPMatchQueryParam{ + {Name: "debug", Value: "1"}, + }, + Methods: []string{"GET", "POST"}, + }, + }, + Destination: &ServiceRouteDestination{ + Service: "other", + ServiceSubset: "v2", + Namespace: "sec", + PrefixRewrite: "/", + RequestTimeout: 5 * time.Second, + NumRetries: 5, + RetryOnConnectFailure: true, + RetryOnStatusCodes: []uint32{500, 503, 401}, + }, + }, + }, + } + } + + // set it + _, wm, err := config_entries.Set(makeEntry(), nil) + require.NoError(t, err) + require.NotNil(t, wm) + require.NotEqual(t, 0, wm.RequestTime) + + // get it + entry, qm, err := config_entries.Get(ServiceRouter, "test", nil) + require.NoError(t, err) + require.NotNil(t, qm) + require.NotEqual(t, 0, qm.RequestTime) + + // verify it + readRouter, ok := entry.(*ServiceRouterConfigEntry) + require.True(t, ok) + readRouter.ModifyIndex = 0 // reset for Equals() + readRouter.CreateIndex = 0 // reset for Equals() + + goldenEntry := makeEntry() + require.Equal(t, goldenEntry, readRouter) + + // TODO(rb): cas? + // TODO(rb): list? + }) + + t.Run("Service Splitter", func(t *testing.T) { + // use one mega object to avoid multiple trips + makeEntry := func() *ServiceSplitterConfigEntry { + return &ServiceSplitterConfigEntry{ + Kind: ServiceSplitter, + Name: "test", + Splits: []ServiceSplit{ + { + Weight: 90, + Service: "a", + ServiceSubset: "b", + Namespace: "c", + }, + { + Weight: 10, + Service: "x", + ServiceSubset: "y", + Namespace: "z", + }, + }, + } + } + + // set it + _, wm, err := config_entries.Set(makeEntry(), nil) + require.NoError(t, err) + require.NotNil(t, wm) + require.NotEqual(t, 0, wm.RequestTime) + + // get it + entry, qm, err := config_entries.Get(ServiceSplitter, "test", nil) + require.NoError(t, err) + require.NotNil(t, qm) + require.NotEqual(t, 0, qm.RequestTime) + + // verify it + readSplitter, ok := entry.(*ServiceSplitterConfigEntry) + require.True(t, ok) + readSplitter.ModifyIndex = 0 // reset for Equals() + readSplitter.CreateIndex = 0 // reset for Equals() + + goldenEntry := makeEntry() + require.Equal(t, goldenEntry, readSplitter) + + // TODO(rb): cas? + // TODO(rb): list? + }) + + for name, tc := range map[string]func() *ServiceResolverConfigEntry{ + "with-redirect": func() *ServiceResolverConfigEntry { + return &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + Redirect: &ServiceResolverRedirect{ + Service: "a", + ServiceSubset: "b", + Namespace: "c", + Datacenter: "d", + }, + } + }, + "no-redirect": func() *ServiceResolverConfigEntry { + return &ServiceResolverConfigEntry{ + Kind: ServiceResolver, + Name: "test", + DefaultSubset: "v1", + Subsets: map[string]ServiceResolverSubset{ + "v1": ServiceResolverSubset{ + Filter: "ServiceMeta.version == v1", + }, + "v2": ServiceResolverSubset{ + Filter: "ServiceMeta.version == v2", + }, + }, + Failover: map[string]ServiceResolverFailover{ + "*": ServiceResolverFailover{ + Datacenters: []string{"dc2"}, + }, + "v1": ServiceResolverFailover{ + Service: "alternate", + }, + }, + ConnectTimeout: 5 * time.Second, + } + }, + } { + // use one mega object to avoid multiple trips + makeEntry := tc + t.Run("Service Resolver - "+name, func(t *testing.T) { + + // set it + _, wm, err := config_entries.Set(makeEntry(), nil) + require.NoError(t, err) + require.NotNil(t, wm) + require.NotEqual(t, 0, wm.RequestTime) + + // get it + entry, qm, err := config_entries.Get(ServiceResolver, "test", nil) + require.NoError(t, err) + require.NotNil(t, qm) + require.NotEqual(t, 0, qm.RequestTime) + + // verify it + readResolver, ok := entry.(*ServiceResolverConfigEntry) + require.True(t, ok) + readResolver.ModifyIndex = 0 // reset for Equals() + readResolver.CreateIndex = 0 // reset for Equals() + + goldenEntry := makeEntry() + require.Equal(t, goldenEntry, readResolver) + + // TODO(rb): cas? + // TODO(rb): list? + }) + } +} diff --git a/command/config/config.go b/command/config/config.go index 3e07dd1907..be05664381 100644 --- a/command/config/config.go +++ b/command/config/config.go @@ -31,7 +31,7 @@ Usage: consul config [options] [args] Configuration system. Here are some simple examples, and more detailed examples are available in the subcommands or the documentation. - Write a config:: + Write a config: $ consul config write web.serviceconf.hcl diff --git a/command/config/read/config_read.go b/command/config/read/config_read.go index ec3fbf54b2..6047979b30 100644 --- a/command/config/read/config_read.go +++ b/command/config/read/config_read.go @@ -26,6 +26,8 @@ type cmd struct { } func (c *cmd) init() { + // TODO(rb): needs a way to print the metadata so you know the modify index to use for 'config write -cas' + c.flags = flag.NewFlagSet("", flag.ContinueOnError) c.flags.StringVar(&c.kind, "kind", "", "The kind of configuration to read.") c.flags.StringVar(&c.name, "name", "", "The name of configuration to read.") diff --git a/command/config/write/config_write.go b/command/config/write/config_write.go index 418b49921b..6df3e18c5e 100644 --- a/command/config/write/config_write.go +++ b/command/config/write/config_write.go @@ -44,6 +44,43 @@ func (c *cmd) init() { c.help = flags.Usage(help, c.flags) } +type genericConfig struct { + Kind1 string `hcl:"Kind"` + Kind2 string `hcl:"kind"` +} + +func (c *genericConfig) Kind() string { + if c.Kind1 != "" { + return c.Kind1 + } + return c.Kind2 +} + +func decodeConfigEntryFromHCL(data string) (api.ConfigEntry, error) { + // For why this is necessary see the comment block on api.DecodeConfigEntry. + var generic genericConfig + err := hcl.Decode(&generic, data) + if err != nil { + return nil, err + } + + kindVal := generic.Kind() + if kindVal == "" { + return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level") + } + + entry, err := api.MakeConfigEntry(kindVal, "") + if err != nil { + return nil, err + } + + err = hcl.Decode(entry, data) + if err != nil { + return nil, err + } + return entry, nil +} + func (c *cmd) Run(args []string) int { if err := c.flags.Parse(args); err != nil { return 1 @@ -61,15 +98,7 @@ func (c *cmd) Run(args []string) int { return 1 } - // parse the data - var raw map[string]interface{} - err = hcl.Decode(&raw, data) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to decode config entry input: %v", err)) - return 1 - } - - entry, err := api.DecodeConfigEntry(raw) + entry, err := decodeConfigEntryFromHCL(string(data)) if err != nil { c.UI.Error(fmt.Sprintf("Failed to decode config entry input: %v", err)) return 1