consul/agent/http.go

1204 lines
35 KiB
Go
Raw Normal View History

2013-12-23 19:38:51 +00:00
package agent
import (
"encoding/json"
"fmt"
"io"
New config parser, HCL support, multiple bind addrs (#3480) * new config parser for agent This patch implements a new config parser for the consul agent which makes the following changes to the previous implementation: * add HCL support * all configuration fragments in tests and for default config are expressed as HCL fragments * HCL fragments can be provided on the command line so that they can eventually replace the command line flags. * HCL/JSON fragments are parsed into a temporary Config structure which can be merged using reflection (all values are pointers). The existing merge logic of overwrite for values and append for slices has been preserved. * A single builder process generates a typed runtime configuration for the agent. The new implementation is more strict and fails in the builder process if no valid runtime configuration can be generated. Therefore, additional validations in other parts of the code should be removed. The builder also pre-computes all required network addresses so that no address/port magic should be required where the configuration is used and should therefore be removed. * Upgrade github.com/hashicorp/hcl to support int64 * improve error messages * fix directory permission test * Fix rtt test * Fix ForceLeave test * Skip performance test for now until we know what to do * Update github.com/hashicorp/memberlist to update log prefix * Make memberlist use the default logger * improve config error handling * do not fail on non-existing data-dir * experiment with non-uniform timeouts to get a handle on stalled leader elections * Run tests for packages separately to eliminate the spurious port conflicts * refactor private address detection and unify approach for ipv4 and ipv6. Fixes #2825 * do not allow unix sockets for DNS * improve bind and advertise addr error handling * go through builder using test coverage * minimal update to the docs * more coverage tests fixed * more tests * fix makefile * cleanup * fix port conflicts with external port server 'porter' * stop test server on error * do not run api test that change global ENV concurrently with the other tests * Run remaining api tests concurrently * no need for retry with the port number service * monkey patch race condition in go-sockaddr until we understand why that fails * monkey patch hcl decoder race condidtion until we understand why that fails * monkey patch spurious errors in strings.EqualFold from here * add test for hcl decoder race condition. Run with go test -parallel 128 * Increase timeout again * cleanup * don't log port allocations by default * use base command arg parsing to format help output properly * handle -dc deprecation case in Build * switch autopilot.max_trailing_logs to int * remove duplicate test case * remove unused methods * remove comments about flag/config value inconsistencies * switch got and want around since the error message was misleading. * Removes a stray debug log. * Removes a stray newline in imports. * Fixes TestACL_Version8. * Runs go fmt. * Adds a default case for unknown address types. * Reoders and reformats some imports. * Adds some comments and fixes typos. * Reorders imports. * add unix socket support for dns later * drop all deprecated flags and arguments * fix wrong field name * remove stray node-id file * drop unnecessary patch section in test * drop duplicate test * add test for LeaveOnTerm and SkipLeaveOnInt in client mode * drop "bla" and add clarifying comment for the test * split up tests to support enterprise/non-enterprise tests * drop raft multiplier and derive values during build phase * sanitize runtime config reflectively and add test * detect invalid config fields * fix tests with invalid config fields * use different values for wan sanitiziation test * drop recursor in favor of recursors * allow dns_config.udp_answer_limit to be zero * make sure tests run on machines with multiple ips * Fix failing tests in a few more places by providing a bind address in the test * Gets rid of skipped TestAgent_CheckPerformanceSettings and adds case for builder. * Add porter to server_test.go to make tests there less flaky * go fmt
2017-09-25 18:40:42 +00:00
"net"
2013-12-23 19:38:51 +00:00
"net/http"
"net/http/pprof"
"net/netip"
"net/url"
"reflect"
"regexp"
"strconv"
2014-08-22 19:59:47 +00:00
"strings"
"sync/atomic"
2013-12-23 22:26:34 +00:00
"time"
2014-08-22 19:59:47 +00:00
"github.com/NYTimes/gziphandler"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/go-cleanhttp"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/cache"
"github.com/hashicorp/consul/agent/config"
"github.com/hashicorp/consul/agent/consul"
"github.com/hashicorp/consul/agent/consul/rate"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/agent/uiserver"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/logging"
"github.com/hashicorp/consul/proto/pbcommon"
2013-12-23 19:38:51 +00:00
)
var HTTPSummaries = []prometheus.SummaryDefinition{
{
Name: []string{"api", "http"},
2020-11-14 00:26:08 +00:00
Help: "Samples how long it takes to service the given HTTP request for the given verb and path.",
},
}
// MethodNotAllowedError should be returned by a handler when the HTTP method is not allowed.
type MethodNotAllowedError struct {
Method string
Allow []string
}
func (e MethodNotAllowedError) Error() string {
return fmt.Sprintf("method %s not allowed", e.Method)
}
// CodeWithPayloadError allow returning non HTTP 200
// Error codes while not returning PlainText payload
type CodeWithPayloadError struct {
Reason string
StatusCode int
ContentType string
}
func (e CodeWithPayloadError) Error() string {
return e.Reason
}
// HTTPError is returned by the handler when a specific http error
// code is needed alongside a plain text response.
type HTTPError struct {
StatusCode int
Reason string
}
func (h HTTPError) Error() string {
return h.Reason
}
// HTTPHandlers provides an HTTP api for an agent.
type HTTPHandlers struct {
agent *Agent
denylist *Denylist
configReloaders []ConfigReloader
h http.Handler
metricsProxyCfg atomic.Value
// proxyTransport is used by UIMetricsProxy to keep
// a managed pool of connections.
proxyTransport http.RoundTripper
ui: modify content path (#5950) * Add ui-content-path flag * tests complete, regex validator on string, index.html updated * cleaning up debugging stuff * ui: Enable ember environment configuration to be set via the go binary at runtime (#5934) * ui: Only inject {{.ContentPath}} if we are makeing a prod build... ...otherwise we just use the current rootURL This gets injected into a <base /> node which solves the assets path problem but not the ember problem * ui: Pull out the <base href=""> value and inject it into ember env See previous commit: The <base href=""> value is 'sometimes' injected from go at index serve time. We pass this value down to ember by overwriting the ember config that is injected via a <meta> tag. This has to be done before ember bootup. Sometimes (during testing and development, basically not production) this is injected with the already existing value, in which case this essentially changes nothing. The code here is slightly abstracted away from our specific usage to make it easier for anyone else to use, and also make sure we can cope with using this same method to pass variables down from the CLI through to ember in the future. * ui: We can't use <base /> move everything to javascript (#5941) Unfortuantely we can't seem to be able to use <base> and rootURL together as URL paths will get doubled up (`ui/ui/`). This moves all the things that we need to interpolate with .ContentPath to the `startup` javascript so we can conditionally print out `{{.ContentPath}}` in lots of places (now we can't use base) * fixed when we serve index.html * ui: For writing a ContentPath, we also need to cope with testing... (#5945) ...and potentially more environments Testing has more additional things in a separate index.html in `tests/` This make the entire thing a little saner and uses just javascriopt template literals instead of a pseudo handbrake synatx for our templating of these files. Intead of just templating the entire file this way, we still only template `{{content-for 'head'}}` and `{{content-for 'body'}}` in this way to ensure we support other plugins/addons * build: Loosen up the regex for retrieving the CONSUL_VERSION (#5946) * build: Loosen up the regex for retrieving the CONSUL_VERSION 1. Previously the `sed` replacement was searching for the CONSUL_VERSION comment at the start of a line, it no longer does this to allow for indentation. 2. Both `grep` and `sed` where looking for the omment at the end of the line. We've removed this restriction here. We don't need to remove it right now, but if we ever put the comment followed by something here the searching would break. 3. Added `xargs` for trimming the resulting version string. We aren't using this already in the rest of the scripts, but we are pretty sure this is available on most systems. * ui: Fix erroneous variable, and also force an ember cache clean on build 1. We referenced a variable incorrectly here, this fixes that. 2. We also made sure that every `make` target clears ember's `tmp` cache to ensure that its not using any caches that have since been edited everytime we call a `make` target. * added docs, fixed encoding * fixed go fmt * Update agent/config/config.go Co-Authored-By: R.B. Boyer <public@richardboyer.net> * Completed Suggestions * run gofmt on http.go * fix testsanitize * fix fullconfig/hcl by setting correct 'want' * ran gofmt on agent/config/runtime_test.go * Update website/source/docs/agent/options.html.md Co-Authored-By: Hans Hasselberg <me@hans.io> * Update website/source/docs/agent/options.html.md Co-Authored-By: kaitlincarter-hc <43049322+kaitlincarter-hc@users.noreply.github.com> * remove contentpath from redirectFS struct
2019-06-26 16:43:30 +00:00
}
2017-11-29 00:06:26 +00:00
// endpoint is a Consul-specific HTTP handler that takes the usual arguments in
// but returns a response object and error, both of which are handled in a
// common manner by Consul's HTTP server.
type endpoint func(resp http.ResponseWriter, req *http.Request) (interface{}, error)
// unboundEndpoint is an endpoint method on a server.
type unboundEndpoint func(s *HTTPHandlers, resp http.ResponseWriter, req *http.Request) (interface{}, error)
2017-11-29 00:06:26 +00:00
// endpoints is a map from URL pattern to unbound endpoint.
var endpoints map[string]unboundEndpoint
// allowedMethods is a map from endpoint prefix to supported HTTP methods.
2018-02-18 01:46:11 +00:00
// An empty slice means an endpoint handles OPTIONS requests and MethodNotFound errors itself.
var allowedMethods map[string][]string = make(map[string][]string)
2017-11-29 00:06:26 +00:00
// registerEndpoint registers a new endpoint, which should be done at package
// init() time.
func registerEndpoint(pattern string, methods []string, fn unboundEndpoint) {
2017-11-29 00:06:26 +00:00
if endpoints == nil {
endpoints = make(map[string]unboundEndpoint)
}
if endpoints[pattern] != nil || allowedMethods[pattern] != nil {
2017-11-29 00:06:26 +00:00
panic(fmt.Errorf("Pattern %q is already registered", pattern))
}
2017-11-29 00:06:26 +00:00
endpoints[pattern] = fn
allowedMethods[pattern] = methods
2017-11-29 00:06:26 +00:00
}
// wrappedMux hangs on to the underlying mux for unit tests.
type wrappedMux struct {
mux *http.ServeMux
handler http.Handler
}
// ServeHTTP implements the http.Handler interface.
func (w *wrappedMux) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
w.handler.ServeHTTP(resp, req)
}
// ReloadConfig updates any internal state when the config is changed at
// runtime.
func (s *HTTPHandlers) ReloadConfig(newCfg *config.RuntimeConfig) error {
for _, r := range s.configReloaders {
if err := r(newCfg); err != nil {
return err
}
}
return nil
}
// handler is used to initialize the Handler. In agent code we only ever call
// this once during agent initialization so it was always intended as a single
// pass init method. However many test rely on it as a cheaper way to get a
// handler to call ServeHTTP against and end up calling it multiple times on a
// single agent instance. Until this method had to manage state that might be
// affected by a reload or otherwise vary over time that was not problematic
// although it was wasteful to redo all this setup work multiple times in one
// test.
//
// Now uiserver and possibly other components need to handle reloadable state
// having test randomly clobber the state with the original config again for
// each call gets confusing fast. So handler will memoize it's response - it's
// allowed to call it multiple times on the same agent, but it will only do the
// work the first time and return the same handler on subsequent calls.
//
// The `enableDebug` argument used in the first call will be effective and a
// later change will not do anything. The same goes for the initial config. For
// example if config is reloaded with UI enabled but it was not originally, the
// http.Handler returned will still have it disabled.
//
// The first call must not be concurrent with any other call. Subsequent calls
// may be concurrent with HTTP requests since no state is modified.
func (s *HTTPHandlers) handler(enableDebug bool) http.Handler {
// Memoize multiple calls.
if s.h != nil {
return s.h
}
mux := http.NewServeMux()
// handleFuncMetrics takes the given pattern and handler and wraps to produce
// metrics based on the pattern and request.
handleFuncMetrics := func(pattern string, handler http.HandlerFunc) {
// Transform the pattern to a valid label by replacing the '/' by '_'.
2020-07-09 08:31:27 +00:00
// Omit the leading slash.
// Distinguish thing like /v1/query from /v1/query/<query_id> by having
// an extra underscore.
path_label := strings.Replace(pattern[1:], "/", "_", -1)
2020-07-09 08:31:27 +00:00
// Register the wrapper.
wrapper := func(resp http.ResponseWriter, req *http.Request) {
start := time.Now()
handler(resp, req)
2020-07-10 09:27:22 +00:00
2020-07-09 08:31:27 +00:00
labels := []metrics.Label{{Name: "method", Value: req.Method}, {Name: "path", Value: path_label}}
2020-07-10 09:27:22 +00:00
metrics.MeasureSinceWithLabels([]string{"api", "http"}, start, labels)
}
var gzipHandler http.Handler
minSize := gziphandler.DefaultMinSize
if pattern == "/v1/agent/monitor" || pattern == "/v1/agent/metrics/stream" {
minSize = 0
}
gzipWrapper, err := gziphandler.GzipHandlerWithOpts(gziphandler.MinSize(minSize))
if err == nil {
gzipHandler = gzipWrapper(http.HandlerFunc(wrapper))
} else {
gzipHandler = gziphandler.GzipHandler(http.HandlerFunc(wrapper))
}
mux.Handle(pattern, gzipHandler)
}
New command: consul debug (#4754) * agent/debug: add package for debugging, host info * api: add v1/agent/host endpoint * agent: add v1/agent/host endpoint * command/debug: implementation of static capture * command/debug: tests and only configured targets * agent/debug: add basic test for host metrics * command/debug: add methods for dynamic data capture * api: add debug/pprof endpoints * command/debug: add pprof * command/debug: timing, wg, logs to disk * vendor: add gopsutil/disk * command/debug: add a usage section * website: add docs for consul debug * agent/host: require operator:read * api/host: improve docs and no retry timing * command/debug: fail on extra arguments * command/debug: fixup file permissions to 0644 * command/debug: remove server flags * command/debug: improve clarity of usage section * api/debug: add Trace for profiling, fix profile * command/debug: capture profile and trace at the same time * command/debug: add index document * command/debug: use "clusters" in place of members * command/debug: remove address in output * command/debug: improve comment on metrics sleep * command/debug: clarify usage * agent: always register pprof handlers and protect This will allow us to avoid a restart of a target agent for profiling by always registering the pprof handlers. Given this is a potentially sensitive path, it is protected with an operator:read ACL and enable debug being set to true on the target agent. enable_debug still requires a restart. If ACLs are disabled, enable_debug is sufficient. * command/debug: use trace.out instead of .prof More in line with golang docs. * agent: fix comment wording * agent: wrap table driven tests in t.run()
2018-10-17 20:20:35 +00:00
// handlePProf takes the given pattern and pprof handler
// and wraps it to add authorization and metrics
handlePProf := func(pattern string, handler http.HandlerFunc) {
wrapper := func(resp http.ResponseWriter, req *http.Request) {
var token string
s.parseToken(req, &token)
authz, err := s.agent.delegate.ResolveTokenAndDefaultMeta(token, nil, nil)
New command: consul debug (#4754) * agent/debug: add package for debugging, host info * api: add v1/agent/host endpoint * agent: add v1/agent/host endpoint * command/debug: implementation of static capture * command/debug: tests and only configured targets * agent/debug: add basic test for host metrics * command/debug: add methods for dynamic data capture * api: add debug/pprof endpoints * command/debug: add pprof * command/debug: timing, wg, logs to disk * vendor: add gopsutil/disk * command/debug: add a usage section * website: add docs for consul debug * agent/host: require operator:read * api/host: improve docs and no retry timing * command/debug: fail on extra arguments * command/debug: fixup file permissions to 0644 * command/debug: remove server flags * command/debug: improve clarity of usage section * api/debug: add Trace for profiling, fix profile * command/debug: capture profile and trace at the same time * command/debug: add index document * command/debug: use "clusters" in place of members * command/debug: remove address in output * command/debug: improve comment on metrics sleep * command/debug: clarify usage * agent: always register pprof handlers and protect This will allow us to avoid a restart of a target agent for profiling by always registering the pprof handlers. Given this is a potentially sensitive path, it is protected with an operator:read ACL and enable debug being set to true on the target agent. enable_debug still requires a restart. If ACLs are disabled, enable_debug is sufficient. * command/debug: use trace.out instead of .prof More in line with golang docs. * agent: fix comment wording * agent: wrap table driven tests in t.run()
2018-10-17 20:20:35 +00:00
if err != nil {
resp.WriteHeader(http.StatusForbidden)
return
}
// If the token provided does not have the necessary permissions,
// write a forbidden response
2021-08-20 22:11:01 +00:00
// TODO(partitions): should this be possible in a partition?
// TODO(acl-error-enhancements): We should return error details somehow here.
if authz.OperatorRead(nil) != acl.Allow {
New command: consul debug (#4754) * agent/debug: add package for debugging, host info * api: add v1/agent/host endpoint * agent: add v1/agent/host endpoint * command/debug: implementation of static capture * command/debug: tests and only configured targets * agent/debug: add basic test for host metrics * command/debug: add methods for dynamic data capture * api: add debug/pprof endpoints * command/debug: add pprof * command/debug: timing, wg, logs to disk * vendor: add gopsutil/disk * command/debug: add a usage section * website: add docs for consul debug * agent/host: require operator:read * api/host: improve docs and no retry timing * command/debug: fail on extra arguments * command/debug: fixup file permissions to 0644 * command/debug: remove server flags * command/debug: improve clarity of usage section * api/debug: add Trace for profiling, fix profile * command/debug: capture profile and trace at the same time * command/debug: add index document * command/debug: use "clusters" in place of members * command/debug: remove address in output * command/debug: improve comment on metrics sleep * command/debug: clarify usage * agent: always register pprof handlers and protect This will allow us to avoid a restart of a target agent for profiling by always registering the pprof handlers. Given this is a potentially sensitive path, it is protected with an operator:read ACL and enable debug being set to true on the target agent. enable_debug still requires a restart. If ACLs are disabled, enable_debug is sufficient. * command/debug: use trace.out instead of .prof More in line with golang docs. * agent: fix comment wording * agent: wrap table driven tests in t.run()
2018-10-17 20:20:35 +00:00
resp.WriteHeader(http.StatusForbidden)
return
}
// Call the pprof handler
handler(resp, req)
}
handleFuncMetrics(pattern, http.HandlerFunc(wrapper))
}
mux.HandleFunc("/", s.Index)
2017-11-29 00:06:26 +00:00
for pattern, fn := range endpoints {
thisFn := fn
methods := allowedMethods[pattern]
2017-11-29 00:06:26 +00:00
bound := func(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
return thisFn(s, resp, req)
}
handleFuncMetrics(pattern, s.wrap(bound, methods))
2017-11-29 00:06:26 +00:00
}
New command: consul debug (#4754) * agent/debug: add package for debugging, host info * api: add v1/agent/host endpoint * agent: add v1/agent/host endpoint * command/debug: implementation of static capture * command/debug: tests and only configured targets * agent/debug: add basic test for host metrics * command/debug: add methods for dynamic data capture * api: add debug/pprof endpoints * command/debug: add pprof * command/debug: timing, wg, logs to disk * vendor: add gopsutil/disk * command/debug: add a usage section * website: add docs for consul debug * agent/host: require operator:read * api/host: improve docs and no retry timing * command/debug: fail on extra arguments * command/debug: fixup file permissions to 0644 * command/debug: remove server flags * command/debug: improve clarity of usage section * api/debug: add Trace for profiling, fix profile * command/debug: capture profile and trace at the same time * command/debug: add index document * command/debug: use "clusters" in place of members * command/debug: remove address in output * command/debug: improve comment on metrics sleep * command/debug: clarify usage * agent: always register pprof handlers and protect This will allow us to avoid a restart of a target agent for profiling by always registering the pprof handlers. Given this is a potentially sensitive path, it is protected with an operator:read ACL and enable debug being set to true on the target agent. enable_debug still requires a restart. If ACLs are disabled, enable_debug is sufficient. * command/debug: use trace.out instead of .prof More in line with golang docs. * agent: fix comment wording * agent: wrap table driven tests in t.run()
2018-10-17 20:20:35 +00:00
// If enableDebug or ACL enabled, register wrapped pprof handlers
if enableDebug || !s.checkACLDisabled() {
handlePProf("/debug/pprof/", pprof.Index)
handlePProf("/debug/pprof/cmdline", pprof.Cmdline)
handlePProf("/debug/pprof/profile", pprof.Profile)
handlePProf("/debug/pprof/symbol", pprof.Symbol)
handlePProf("/debug/pprof/trace", pprof.Trace)
}
2014-04-23 19:57:06 +00:00
UI V2 (#4086) * Move settings to use the same service/route API as the rest of the app * Put some ideas down for unit testing on adapters * Favour `Model` over `Entity` * Move away from using `reopen` to using Mixins * Amend messages, comment/document some usage * Make sure the returns are consistent in normalizePayload, also Add some todo's in to remind me to think consider this further at a later date. For example, is normalizePayload to be a hook or an overridable method * Start stripping back the HTML to semantics * Use a variable rather than chaining * Remove unused helpers * Start picking through the new designs, start with listing pages * First draft HTML for every page * Making progress on the CSS * Keep plugging away at the catalog css * Looking at scrolling * Wire up filtering * Sort out filter counting, more or less done a few outstanding * Start knocking the forms into shape * Add in codemirror * Keep moving forwards with the form like layouts * Start looking at ACL editing page, add footer in * Pull the filters back in, look at an autoresizer for scroll views * First draft toggles * 2nd draft healthcheck icons * Tweak node healthcheck icons * Looking at healthcheck detail icons * Tweak the filter-bar and add selections to the in content tabs * Add ACL create, pill-like acl type highlight * Tweaking the main nav some more * Working on the filter-bar and freetext-filter * Masonry layout * Stick with `checks` instead of healthy/unhealthy * Fix up the filter numbers/counts * Use the thead for a measure * First draft tomography back in * First draft DC dropdown * Add a temporary create buttong to kv's * Move KV and ACL to use a create page * Move tags * Run through old tests * Injectable server * Start adding test attributes * Add some page objects * More test attributes and pages * Acl filter objects * Add a page.. page object * Clickable items in lists * Add rest/spread babel plugin, remove mirage for now * Add fix for ember-collection * Keep track of acl filters * ember-cli-page-object * ember-test-selectors * ui: update version of ui compile deps * Update static assets * Centralize radiogroup helper * Rejig KV's and begin to clean it up * Work around lack of Tags for the moment.. * Some little css tweaks and start to remove possibles * Working on the dc page and incidentals 1. Sort the datacenter-picker list 2. Add a selected state to the datacenter-picker 3. Make dc an {Name: dc} 4. Add an env helper to get to 'env vars' from within templates * Click outside stuff for the datacenter-picker, is-active on nav * Make sure the dropdown CTA can be active * Bump ember add pluralize helper * Little try at sass based custom queries * Rejig tablular collection so it deals with resizing, actions 1. WIP: start building actions dropdowns 2. Move tabular collection to deal with resizing to rule out differences * First draft actions dropdowns * Add ports, selectable IP's * Flash messages, plus general cleanup/consistency 1. Add ember-cli-flash for flash messages 2. Move everything to get() instead of item.get 3. Spotted a few things that weren't consistent * DOn't go lower than zero * First draft vertical menu * Missed a get, tweak dropmenu tick * Big cleanup 1. this.get(), this.set() > get(), set() 2. assign > {...{}, ...{}} 3. Seperator > separator * WIP: settings * Moved things into a ui-v2 folder * Decide on a way to do the settings page whilst maintaining the url + dc's * Start some error pages * Remove base64 polyfill * Tie in settings, fix atob bug, tweak layout css * Centralize confirmations into a component * Allow switching between the old and new UI with the CONSUL_UI_BETA env var Currently all the assets are packaged into a single AssetFS and a prefix is configured to switch between the two. * Attempt at some updates to integrate the v2 ui build into the main infrastructure * Add redirect to index.html for unknown paths * Allow redictor to /index.html for new ui when using -ui-dir * Take ACLs to the correct place on save * First pass breadcrumbs * Remove datacenter selector on the index page * Tweak overall layout * Make buttons 'resets' * Tweak last DC stuff * Validations plus kv keyname viewing tweaks * Pull sessions back in * Tweak the env vars to be more reusable * Move isAnon to the view * No items and disabled acl css * ACL and KV details 1. Unauthorized page 2. Make sure the ACL is always selected when it needs it 3. Check record deletion with a changeset * Few more acl tweaks/corrections * Add no items view to node > services * Tags for node > services * Make sure we have tags * Fix up the labels on the tomography graph * Add node link (agent) to kv sessions * Duplicate up `create` for KV 'root creation' * Safety check for health checks * Fix up the grids * Truncate td a's, fix kv columns * Watch for spaces in KV id's * Move actions to their own mixins for now at least * Link reset to settings incase I want to type it in * Tweak error page * Cleanup healthcheck icons in service listing * Centralize errors and make getting back easier * Nice numbers * Compact buttons * Some incidental css cleanups * Use 'Key / Value' for root * Tweak tomography layout * Fix single healthcheck unhealthy resource * Get loading screen ready * Fix healthy healthcheck tick * Everything in header starts white * First draft loader * Refactor the entire backend to use proper unique keys, plus.. 1. Make unique keys form dc + slug (uid) 2. Fun with errors... * Tweak header colors * Add noopener noreferrer to external links * Add supers to setupController * Implement cloning, using ember-data... * Move the more expensive down the switch order * First draft empty record cleanup.. * Add the cusomt store test * Temporarily use the htmlSafe prototype to remove the console warning * Encode hashes in urls * Go back to using title for errors for now * Start removing unused bulma * Lint * WIP: Start looking at failing tests * Remove single redirect test * Finish off error message styling * Add full ember-data cache invalidation to avoid stale data... * Add uncolorable warning icons * More info icon * Rearrange single service, plus tag printing * Logo * No quotes * Add a simple startup logo * Tweak healthcheck statuses * Fix border-color for healthchecks * Tweak node tabs * Catch 401 ACL errors and rethrow with the provided error message * Remove old acl unauth and error routes * Missed a super * Make 'All' refer to number of checks, not services * Remove ember-resizer, add autoprefixer * Don't show tomography if its not worth it, viewify it more also * Little model cleanup * Chevrons * Find a way to reliably set the class of html from the view * Consistent html * Make sure session id's are visible as long as possible * Fix single service check count * Add filters and searchs to the query string * Don't remember the selected tab * Change text * Eror tweaking * Use chevrons on all breadcrumbs even in kv's * Clean up a file * Tweak some messaging * Makesure the footer overlays whats in the page * Tweak KV errors * Move json toggle over to the right * feedback-dialog along with copy buttons * Better confirmation dialogs * Add git sha comment * Same title as old UI * Allow defaults * Make sure value is a string * WIP: Scrolling dropdowns/confirmations * Add to kv's * Remove set * First pass trace * Better table rows * Pull over the hashi code editor styles * Editor tweaks * Responsive tabs * Add number formatting to tomography * Review whats left todo * Lint * Add a coordinate ember data triplet * Bump in a v2.0.0 * Update old tests * Get coverage working again * Make sure query keys are also encoded * Don't test console.error * Unit test some more utils * Tweak the size of the tabular collections * Clean up gitignore * Fix copy button rollovers * Get healthcheck 'icon icons' onto the text baseline * Tweak healthcheck padding and alignment * Make sure commas kick in in rtt, probably never get to that * Improve vertical menu * Tweak dropdown active state to not have a bg * Tweak paddings * Search entire string not just 'startsWith' * Button states * Most buttons have 1px border * More button tweaks * You can only view kv folders * CSS cleanup reduction * Form input states and little cleanup * More CSS reduction * Sort checks by importance * Fix click outside on datacenter picker * Make sure table th's also auto calculate properly * Make sure `json` isn't remembered in KV editing * Fix recursive deletion in KV's * Centralize size * Catch updateRecord * Don't double envode * model > item consistency * Action loading and ACL tweaks * Add settings dependencies to acl tests * Better loading * utf-8 base64 encode/decode * Don't hang off a prototype for htmlSafe * Missing base64 files... * Get atob/btoa polyfill right * Shadowy rollovers * Disabled button styling for primaries * autofocuses only onload for now * Fix footer centering * Beginning of 'notices' * Remove the isLocked disabling as we are letting you do what the API does * Don't forget the documentation link for sessions * Updates are more likely * Use exported constant * Dont export redirectFS and a few other PR updates * Remove the old bootstrap config which was used for the old UI skin * Use curlies for multiple properties
2018-05-10 18:52:53 +00:00
if s.IsUIEnabled() {
// Note that we _don't_ support reloading ui_config.{enabled, content_dir,
// content_path} since this only runs at initial startup.
uiHandler := uiserver.NewHandler(
s.agent.config,
s.agent.logger.Named(logging.HTTP),
s.uiTemplateDataTransform,
)
s.configReloaders = append(s.configReloaders, uiHandler.ReloadConfig)
// Wrap it to add the headers specified by the http_config.response_headers
// user config
uiHandlerWithHeaders := serveHandlerWithHeaders(
uiHandler,
s.agent.config.HTTPResponseHeaders,
)
mux.Handle("/robots.txt", uiHandlerWithHeaders)
mux.Handle(
s.agent.config.UIConfig.ContentPath,
http.StripPrefix(
s.agent.config.UIConfig.ContentPath,
uiHandlerWithHeaders,
),
)
2015-02-11 21:25:04 +00:00
}
// Initialize (reloadable) metrics proxy config
s.metricsProxyCfg.Store(s.agent.config.UIConfig.MetricsProxy)
s.configReloaders = append(s.configReloaders, func(cfg *config.RuntimeConfig) error {
s.metricsProxyCfg.Store(cfg.UIConfig.MetricsProxy)
return nil
})
// Wrap the whole mux with a handler that bans URLs with non-printable
// characters, unless disabled explicitly to deal with old keys that fail this
// check.
h := cleanhttp.PrintablePathCheckHandler(mux, nil)
if s.agent.config.DisableHTTPUnprintableCharFilter {
h = mux
}
h = s.enterpriseHandler(h)
h = withRemoteAddrHandler(h)
s.h = &wrappedMux{
mux: mux,
handler: h,
}
return s.h
}
// Injects remote addr into the request's context
func withRemoteAddrHandler(next http.Handler) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
addrPort, err := netip.ParseAddrPort(req.RemoteAddr)
if err == nil {
remoteAddr := net.TCPAddrFromAddrPort(addrPort)
ctx := consul.ContextWithRemoteAddr(req.Context(), remoteAddr)
req = req.WithContext(ctx)
}
next.ServeHTTP(resp, req)
})
}
2017-12-22 04:30:29 +00:00
// nodeName returns the node name of the agent
func (s *HTTPHandlers) nodeName() string {
2017-12-22 04:30:29 +00:00
return s.agent.config.NodeName
}
// aclEndpointRE is used to find old ACL endpoints that take tokens in the URL
// so that we can redact them. The ACL endpoints that take the token in the URL
// are all of the form /v1/acl/<verb>/<token>, and can optionally include query
// parameters which are indicated by a question mark. We capture the part before
// the token, the token, and any query parameters after, and then reassemble as
// $1<hidden>$3 (the token in $2 isn't used), which will give:
//
// /v1/acl/clone/foo -> /v1/acl/clone/<hidden>
// /v1/acl/clone/foo?token=bar -> /v1/acl/clone/<hidden>?token=<hidden>
//
// The query parameter in the example above is obfuscated like any other, after
// this regular expression is applied, so the regular expression substitution
// results in:
//
// /v1/acl/clone/foo?token=bar -> /v1/acl/clone/<hidden>?token=bar
// ^---- $1 ----^^- $2 -^^-- $3 --^
//
// And then the loop that looks for parameters called "token" does the last
// step to get to the final redacted form.
var (
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
aclEndpointRE = regexp.MustCompile("^(/v1/acl/(create|update|destroy|info|clone|list)/)([^?]+)([?]?.*)$")
)
2013-12-23 19:38:51 +00:00
// wrap is used to wrap functions to make them more convenient
func (s *HTTPHandlers) wrap(handler endpoint, methods []string) http.HandlerFunc {
httpLogger := s.agent.logger.Named(logging.HTTP)
return func(resp http.ResponseWriter, req *http.Request) {
New config parser, HCL support, multiple bind addrs (#3480) * new config parser for agent This patch implements a new config parser for the consul agent which makes the following changes to the previous implementation: * add HCL support * all configuration fragments in tests and for default config are expressed as HCL fragments * HCL fragments can be provided on the command line so that they can eventually replace the command line flags. * HCL/JSON fragments are parsed into a temporary Config structure which can be merged using reflection (all values are pointers). The existing merge logic of overwrite for values and append for slices has been preserved. * A single builder process generates a typed runtime configuration for the agent. The new implementation is more strict and fails in the builder process if no valid runtime configuration can be generated. Therefore, additional validations in other parts of the code should be removed. The builder also pre-computes all required network addresses so that no address/port magic should be required where the configuration is used and should therefore be removed. * Upgrade github.com/hashicorp/hcl to support int64 * improve error messages * fix directory permission test * Fix rtt test * Fix ForceLeave test * Skip performance test for now until we know what to do * Update github.com/hashicorp/memberlist to update log prefix * Make memberlist use the default logger * improve config error handling * do not fail on non-existing data-dir * experiment with non-uniform timeouts to get a handle on stalled leader elections * Run tests for packages separately to eliminate the spurious port conflicts * refactor private address detection and unify approach for ipv4 and ipv6. Fixes #2825 * do not allow unix sockets for DNS * improve bind and advertise addr error handling * go through builder using test coverage * minimal update to the docs * more coverage tests fixed * more tests * fix makefile * cleanup * fix port conflicts with external port server 'porter' * stop test server on error * do not run api test that change global ENV concurrently with the other tests * Run remaining api tests concurrently * no need for retry with the port number service * monkey patch race condition in go-sockaddr until we understand why that fails * monkey patch hcl decoder race condidtion until we understand why that fails * monkey patch spurious errors in strings.EqualFold from here * add test for hcl decoder race condition. Run with go test -parallel 128 * Increase timeout again * cleanup * don't log port allocations by default * use base command arg parsing to format help output properly * handle -dc deprecation case in Build * switch autopilot.max_trailing_logs to int * remove duplicate test case * remove unused methods * remove comments about flag/config value inconsistencies * switch got and want around since the error message was misleading. * Removes a stray debug log. * Removes a stray newline in imports. * Fixes TestACL_Version8. * Runs go fmt. * Adds a default case for unknown address types. * Reoders and reformats some imports. * Adds some comments and fixes typos. * Reorders imports. * add unix socket support for dns later * drop all deprecated flags and arguments * fix wrong field name * remove stray node-id file * drop unnecessary patch section in test * drop duplicate test * add test for LeaveOnTerm and SkipLeaveOnInt in client mode * drop "bla" and add clarifying comment for the test * split up tests to support enterprise/non-enterprise tests * drop raft multiplier and derive values during build phase * sanitize runtime config reflectively and add test * detect invalid config fields * fix tests with invalid config fields * use different values for wan sanitiziation test * drop recursor in favor of recursors * allow dns_config.udp_answer_limit to be zero * make sure tests run on machines with multiple ips * Fix failing tests in a few more places by providing a bind address in the test * Gets rid of skipped TestAgent_CheckPerformanceSettings and adds case for builder. * Add porter to server_test.go to make tests there less flaky * go fmt
2017-09-25 18:40:42 +00:00
setHeaders(resp, s.agent.config.HTTPResponseHeaders)
setTranslateAddr(resp, s.agent.config.TranslateWANAddrs)
setACLDefaultPolicy(resp, s.agent.config.ACLResolverSettings.ACLDefaultPolicy)
// Obfuscate any tokens from appearing in the logs
formVals, err := url.ParseQuery(req.URL.RawQuery)
if err != nil {
httpLogger.Error("Failed to decode query",
"from", req.RemoteAddr,
"error", err,
)
2017-08-23 19:19:11 +00:00
resp.WriteHeader(http.StatusInternalServerError)
return
}
logURL := req.URL.String()
if tokens, ok := formVals["token"]; ok {
for _, token := range tokens {
if token == "" {
logURL += "<hidden>"
continue
}
logURL = strings.Replace(logURL, token, "<hidden>", -1)
}
}
New ACLs (#4791) This PR is almost a complete rewrite of the ACL system within Consul. It brings the features more in line with other HashiCorp products. Obviously there is quite a bit left to do here but most of it is related docs, testing and finishing the last few commands in the CLI. I will update the PR description and check off the todos as I finish them over the next few days/week. Description At a high level this PR is mainly to split ACL tokens from Policies and to split the concepts of Authorization from Identities. A lot of this PR is mostly just to support CRUD operations on ACLTokens and ACLPolicies. These in and of themselves are not particularly interesting. The bigger conceptual changes are in how tokens get resolved, how backwards compatibility is handled and the separation of policy from identity which could lead the way to allowing for alternative identity providers. On the surface and with a new cluster the ACL system will look very similar to that of Nomads. Both have tokens and policies. Both have local tokens. The ACL management APIs for both are very similar. I even ripped off Nomad's ACL bootstrap resetting procedure. There are a few key differences though. Nomad requires token and policy replication where Consul only requires policy replication with token replication being opt-in. In Consul local tokens only work with token replication being enabled though. All policies in Nomad are globally applicable. In Consul all policies are stored and replicated globally but can be scoped to a subset of the datacenters. This allows for more granular access management. Unlike Nomad, Consul has legacy baggage in the form of the original ACL system. The ramifications of this are: A server running the new system must still support other clients using the legacy system. A client running the new system must be able to use the legacy RPCs when the servers in its datacenter are running the legacy system. The primary ACL DC's servers running in legacy mode needs to be a gate that keeps everything else in the entire multi-DC cluster running in legacy mode. So not only does this PR implement the new ACL system but has a legacy mode built in for when the cluster isn't ready for new ACLs. Also detecting that new ACLs can be used is automatic and requires no configuration on the part of administrators. This process is detailed more in the "Transitioning from Legacy to New ACL Mode" section below.
2018-10-19 16:04:07 +00:00
logURL = aclEndpointRE.ReplaceAllString(logURL, "$1<hidden>$4")
if s.denylist.Block(req.URL.Path) {
errMsg := "Endpoint is blocked by agent configuration"
httpLogger.Error("Request error",
"method", req.Method,
"url", logURL,
"from", req.RemoteAddr,
"error", errMsg,
)
resp.WriteHeader(http.StatusForbidden)
fmt.Fprint(resp, errMsg)
return
}
isForbidden := func(err error) bool {
if acl.IsErrPermissionDenied(err) || acl.IsErrNotFound(err) {
return true
}
if e, ok := status.FromError(err); ok && e.Code() == codes.PermissionDenied {
return true
}
return false
}
isTooManyRequests := func(err error) bool {
if err == nil {
return false
}
// Client-side RPC limits.
if structs.IsErrRPCRateExceeded(err) {
return true
}
// Connect CA rate limiter.
if err.Error() == consul.ErrRateLimited.Error() {
return true
}
// gRPC server rate limit interceptor.
if status.Code(err) == codes.ResourceExhausted {
return true
}
// net/rpc server rate limit interceptor.
return strings.Contains(err.Error(), rate.ErrRetryElsewhere.Error())
}
isServiceUnavailable := func(err error) bool {
if err == nil {
return false
}
// gRPC server rate limit interceptor.
if status.Code(err) == codes.Unavailable {
return true
}
// net/rpc server rate limit interceptor.
return strings.Contains(err.Error(), rate.ErrRetryLater.Error())
}
isMethodNotAllowed := func(err error) bool {
_, ok := err.(MethodNotAllowedError)
return ok
}
addAllowHeader := func(methods []string) {
resp.Header().Add("Allow", strings.Join(methods, ","))
}
isHTTPError := func(err error) bool {
_, ok := err.(HTTPError)
return ok
}
handleErr := func(err error) {
if req.Context().Err() != nil {
httpLogger.Info("Request cancelled",
"method", req.Method,
"url", logURL,
"from", req.RemoteAddr,
"error", err)
} else {
httpLogger.Error("Request error",
"method", req.Method,
"url", logURL,
"from", req.RemoteAddr,
"error", err)
}
// If the error came from gRPC, unpack it to get the real message.
msg := err.Error()
if s, ok := status.FromError(err); ok {
msg = s.Message()
}
switch {
case isForbidden(err):
resp.WriteHeader(http.StatusForbidden)
case isTooManyRequests(err):
resp.WriteHeader(http.StatusTooManyRequests)
case isServiceUnavailable(err):
resp.WriteHeader(http.StatusServiceUnavailable)
case isMethodNotAllowed(err):
// RFC2616 states that for 405 Method Not Allowed the response
// MUST include an Allow header containing the list of valid
// methods for the requested resource.
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
addAllowHeader(err.(MethodNotAllowedError).Allow)
resp.WriteHeader(http.StatusMethodNotAllowed) // 405
case isHTTPError(err):
err := err.(HTTPError)
code := http.StatusInternalServerError
if err.StatusCode != 0 {
code = err.StatusCode
}
if msg == "" {
msg = "An unexpected error occurred"
}
resp.WriteHeader(code)
default:
resp.WriteHeader(http.StatusInternalServerError)
}
fmt.Fprint(resp, msg)
}
2013-12-23 22:26:34 +00:00
start := time.Now()
defer func() {
httpLogger.Debug("Request finished",
"method", req.Method,
"url", logURL,
"from", req.RemoteAddr,
"latency", time.Since(start).String(),
)
2013-12-23 22:26:34 +00:00
}()
var obj interface{}
// if this endpoint has declared methods, respond appropriately to OPTIONS requests. Otherwise let the endpoint handle that.
if req.Method == "OPTIONS" && len(methods) > 0 {
addAllowHeader(append([]string{"OPTIONS"}, methods...))
return
}
// if this endpoint has declared methods, check the request method. Otherwise let the endpoint handle that.
methodFound := len(methods) == 0
for _, method := range methods {
if method == req.Method {
methodFound = true
break
}
}
if !methodFound {
err = MethodNotAllowedError{req.Method, append([]string{"OPTIONS"}, methods...)}
} else {
err = s.checkWriteAccess(req)
// Give the user a hint that they might be doing something wrong if they issue a GET request
// with a non-empty body (e.g., parameters placed in body rather than query string).
if req.Method == http.MethodGet {
if req.ContentLength > 0 {
httpLogger.Warn("GET request has a non-empty body that will be ignored; "+
"check whether parameters meant for the query string were accidentally placed in the body",
"url", logURL,
"from", req.RemoteAddr)
}
}
if err == nil {
// Invoke the handler
obj, err = handler(resp, req)
}
}
contentType := "application/json"
httpCode := http.StatusOK
2013-12-23 19:38:51 +00:00
if err != nil {
if errPayload, ok := err.(CodeWithPayloadError); ok {
httpCode = errPayload.StatusCode
if errPayload.ContentType != "" {
contentType = errPayload.ContentType
}
if errPayload.Reason != "" {
resp.Header().Add("X-Consul-Reason", errPayload.Reason)
}
} else {
handleErr(err)
return
}
}
if obj == nil {
2013-12-23 19:38:51 +00:00
return
}
var buf []byte
if contentType == "application/json" {
buf, err = s.marshalJSON(req, obj)
if err != nil {
handleErr(err)
return
}
} else {
if strings.HasPrefix(contentType, "text/") {
if val, ok := obj.(string); ok {
buf = []byte(val)
}
}
2013-12-23 19:38:51 +00:00
}
resp.Header().Set("Content-Type", contentType)
resp.WriteHeader(httpCode)
resp.Write(buf)
2013-12-23 19:38:51 +00:00
}
}
2013-12-24 00:20:51 +00:00
// marshalJSON marshals the object into JSON, respecting the user's pretty-ness
// configuration.
func (s *HTTPHandlers) marshalJSON(req *http.Request, obj interface{}) ([]byte, error) {
2016-11-18 06:31:19 +00:00
if _, ok := req.URL.Query()["pretty"]; ok || s.agent.config.DevMode {
buf, err := json.MarshalIndent(obj, "", " ")
if err != nil {
return nil, err
}
buf = append(buf, "\n"...)
return buf, nil
}
buf, err := json.Marshal(obj)
if err != nil {
return nil, err
}
return buf, nil
}
// Returns true if the UI is enabled.
func (s *HTTPHandlers) IsUIEnabled() bool {
// Note that we _don't_ support reloading ui_config.{enabled,content_dir}
// since this only runs at initial startup.
return s.agent.config.UIConfig.Dir != "" || s.agent.config.UIConfig.Enabled
}
2013-12-25 01:09:51 +00:00
// Renders a simple index page
func (s *HTTPHandlers) Index(resp http.ResponseWriter, req *http.Request) {
// Send special headers too since this endpoint isn't wrapped with something
// that sends them.
setHeaders(resp, s.agent.config.HTTPResponseHeaders)
2014-04-27 19:10:38 +00:00
// Check if this is a non-index path
if req.URL.Path != "/" {
2017-08-23 19:19:11 +00:00
resp.WriteHeader(http.StatusNotFound)
if strings.Contains(req.URL.Path, "/v1/") {
fmt.Fprintln(resp, "Invalid URL path: not a recognized HTTP API endpoint")
} else {
fmt.Fprintln(resp, "Invalid URL path: if attempting to use the HTTP API, ensure the path starts with '/v1/'")
}
2014-04-27 19:10:38 +00:00
return
2013-12-25 01:09:51 +00:00
}
2014-04-27 19:10:38 +00:00
// Give them something helpful if there's no UI so they at least know
// what this server is.
if !s.IsUIEnabled() {
fmt.Fprint(resp, "Consul Agent: UI disabled. To enable, set ui_config.enabled=true in the agent configuration and restart.")
2014-04-27 19:10:38 +00:00
return
}
// Redirect to the UI endpoint
http.Redirect(
resp,
req,
s.agent.config.UIConfig.ContentPath,
http.StatusMovedPermanently,
) // 301
2013-12-25 01:09:51 +00:00
}
func decodeBody(body io.Reader, out interface{}) error {
return lib.DecodeJSON(body, out)
}
// decodeBodyDeprecated is deprecated, please ues decodeBody above.
// decodeBodyDeprecated is used to decode a JSON request body
func decodeBodyDeprecated(req *http.Request, out interface{}, cb func(interface{}) error) error {
// This generally only happens in tests since real HTTP requests set
// a non-nil body with no content. We guard against it anyways to prevent
// a panic. The EOF response is the same behavior as an empty reader.
if req.Body == nil {
return io.EOF
}
var raw interface{}
2013-12-24 00:20:51 +00:00
dec := json.NewDecoder(req.Body)
if err := dec.Decode(&raw); err != nil {
return err
}
// Invoke the callback prior to decode
if cb != nil {
if err := cb(raw); err != nil {
return err
}
}
decodeConf := &mapstructure.DecoderConfig{
DecodeHook: mapstructure.ComposeDecodeHookFunc(
mapstructure.StringToTimeDurationHookFunc(),
connect: intentions are now managed as a new config entry kind "service-intentions" (#8834) - Upgrade the ConfigEntry.ListAll RPC to be kind-aware so that older copies of consul will not see new config entries it doesn't understand replicate down. - Add shim conversion code so that the old API/CLI method of interacting with intentions will continue to work so long as none of these are edited via config entry endpoints. Almost all of the read-only APIs will continue to function indefinitely. - Add new APIs that operate on individual intentions without IDs so that the UI doesn't need to implement CAS operations. - Add a new serf feature flag indicating support for intentions-as-config-entries. - The old line-item intentions way of interacting with the state store will transparently flip between the legacy memdb table and the config entry representations so that readers will never see a hiccup during migration where the results are incomplete. It uses a piece of system metadata to control the flip. - The primary datacenter will begin migrating intentions into config entries on startup once all servers in the datacenter are on a version of Consul with the intentions-as-config-entries feature flag. When it is complete the old state store representations will be cleared. We also record a piece of system metadata indicating this has occurred. We use this metadata to skip ALL of this code the next time the leader starts up. - The secondary datacenters continue to run the old intentions replicator until all servers in the secondary DC and primary DC support intentions-as-config-entries (via serf flag). Once this condition it met the old intentions replicator ceases. - The secondary datacenters replicate the new config entries as they are migrated in the primary. When they detect that the primary has zeroed it's old state store table it waits until all config entries up to that point are replicated and then zeroes its own copy of the old state store table. We also record a piece of system metadata indicating this has occurred. We use this metadata to skip ALL of this code the next time the leader starts up.
2020-10-06 18:24:05 +00:00
mapstructure.StringToTimeHookFunc(time.RFC3339),
stringToReadableDurationFunc(),
),
Result: &out,
}
decoder, err := mapstructure.NewDecoder(decodeConf)
if err != nil {
return err
}
return decoder.Decode(raw)
2013-12-24 00:20:51 +00:00
}
// stringToReadableDurationFunc is a mapstructure hook for decoding a string
// into an api.ReadableDuration for backwards compatibility.
func stringToReadableDurationFunc() mapstructure.DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
var v api.ReadableDuration
if t != reflect.TypeOf(v) {
return data, nil
}
switch {
case f.Kind() == reflect.String:
if dur, err := time.ParseDuration(data.(string)); err != nil {
return nil, err
} else {
v = api.ReadableDuration(dur)
}
return v, nil
default:
return data, nil
}
}
}
// setTranslateAddr is used to set the address translation header. This is only
// present if the feature is active.
func setTranslateAddr(resp http.ResponseWriter, active bool) {
if active {
resp.Header().Set("X-Consul-Translate-Addresses", "true")
}
}
// setIndex is used to set the index response header
func setIndex(resp http.ResponseWriter, index uint64) {
// If we ever return X-Consul-Index of 0 blocking clients will go into a busy
// loop and hammer us since ?index=0 will never block. It's always safe to
// return index=1 since the very first Raft write is always an internal one
// writing the raft config for the cluster so no user-facing blocking query
// will ever legitimately have an X-Consul-Index of 1.
if index == 0 {
index = 1
}
resp.Header().Set("X-Consul-Index", strconv.FormatUint(index, 10))
}
// setKnownLeader is used to set the known leader header
func setKnownLeader(resp http.ResponseWriter, known bool) {
s := "true"
if !known {
s = "false"
}
resp.Header().Set("X-Consul-KnownLeader", s)
}
func setConsistency(resp http.ResponseWriter, consistency string) {
if consistency != "" {
resp.Header().Set("X-Consul-Effective-Consistency", consistency)
}
}
func setACLDefaultPolicy(resp http.ResponseWriter, aclDefaultPolicy string) {
if aclDefaultPolicy != "" {
resp.Header().Set("X-Consul-Default-ACL-Policy", aclDefaultPolicy)
}
}
// setLastContact is used to set the last contact header
func setLastContact(resp http.ResponseWriter, last time.Duration) {
if last < 0 {
last = 0
}
lastMsec := uint64(last / time.Millisecond)
resp.Header().Set("X-Consul-LastContact", strconv.FormatUint(lastMsec, 10))
}
// setMeta is used to set the query response meta data
func setMeta(resp http.ResponseWriter, m *structs.QueryMeta) error {
2022-03-16 16:12:29 +00:00
lastContact, err := m.GetLastContact()
if err != nil {
return err
}
setLastContact(resp, lastContact)
setIndex(resp, m.GetIndex())
setKnownLeader(resp, m.GetKnownLeader())
setConsistency(resp, m.GetConsistencyLevel())
setQueryBackend(resp, m.GetBackend())
setResultsFilteredByACLs(resp, m.GetResultsFilteredByACLs())
2022-03-16 16:12:29 +00:00
return nil
}
func setQueryBackend(resp http.ResponseWriter, backend structs.QueryBackend) {
if b := backend.String(); b != "" {
resp.Header().Set("X-Consul-Query-Backend", b)
}
}
// setCacheMeta sets http response headers to indicate cache status.
func setCacheMeta(resp http.ResponseWriter, m *cache.ResultMeta) {
if m == nil {
return
}
str := "MISS"
if m.Hit {
str = "HIT"
}
resp.Header().Set("X-Cache", str)
if m.Hit {
resp.Header().Set("Age", fmt.Sprintf("%.0f", m.Age.Seconds()))
}
}
// setResultsFilteredByACLs sets an HTTP response header to indicate that the
// query results were filtered by enforcing ACLs. If the given filtered value
// is false the header will be omitted, as its ambiguous whether the results
// were not filtered or whether the endpoint doesn't yet support this header.
func setResultsFilteredByACLs(resp http.ResponseWriter, filtered bool) {
if filtered {
resp.Header().Set("X-Consul-Results-Filtered-By-ACLs", "true")
}
}
// setHeaders is used to set canonical response header fields
func setHeaders(resp http.ResponseWriter, headers map[string]string) {
for field, value := range headers {
resp.Header().Set(http.CanonicalHeaderKey(field), value)
}
}
// serveHandlerWithHeaders is used to serve a http.Handler with the specified headers
func serveHandlerWithHeaders(h http.Handler, headers map[string]string) http.HandlerFunc {
return func(resp http.ResponseWriter, req *http.Request) {
setHeaders(resp, headers)
h.ServeHTTP(resp, req)
}
}
// parseWait is used to parse the ?wait and ?index query params
// Returns true on error
2022-03-28 17:56:44 +00:00
func parseWait(resp http.ResponseWriter, req *http.Request, b QueryOptionsCompat) bool {
query := req.URL.Query()
if wait := query.Get("wait"); wait != "" {
dur, err := time.ParseDuration(wait)
if err != nil {
2017-08-23 19:19:11 +00:00
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Invalid wait time")
return true
}
b.SetMaxQueryTime(dur)
}
if idx := query.Get("index"); idx != "" {
index, err := strconv.ParseUint(idx, 10, 64)
if err != nil {
2017-08-23 19:19:11 +00:00
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Invalid index")
return true
}
b.SetMinQueryIndex(index)
}
return false
}
// parseCacheControl parses the CacheControl HTTP header value. So far we only
// support maxage directive.
2022-03-28 17:56:44 +00:00
func parseCacheControl(resp http.ResponseWriter, req *http.Request, b QueryOptionsCompat) bool {
raw := strings.ToLower(req.Header.Get("Cache-Control"))
if raw == "" {
return false
}
// Didn't want to import a full parser for this. While quoted strings are
// allowed in some directives, max-age does not allow them per
// https://tools.ietf.org/html/rfc7234#section-5.2.2.8 so we assume all
// well-behaved clients use the exact token form of max-age=<delta-seconds>
// where delta-seconds is a non-negative decimal integer.
directives := strings.Split(raw, ",")
parseDurationOrFail := func(raw string) (time.Duration, bool) {
i, err := strconv.Atoi(raw)
if err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Invalid Cache-Control header.")
return 0, true
}
return time.Duration(i) * time.Second, false
}
for _, d := range directives {
d = strings.ToLower(strings.TrimSpace(d))
if d == "must-revalidate" {
b.SetMustRevalidate(true)
}
if strings.HasPrefix(d, "max-age=") {
d, failed := parseDurationOrFail(d[8:])
if failed {
return true
}
b.SetMaxAge(d)
if d == 0 {
// max-age=0 specifically means that we need to consider the cache stale
// immediately however MaxAge = 0 is indistinguishable from the default
// where MaxAge is unset.
b.SetMustRevalidate(true)
}
}
if strings.HasPrefix(d, "stale-if-error=") {
d, failed := parseDurationOrFail(d[15:])
if failed {
return true
}
b.SetStaleIfError(d)
}
}
return false
}
// parseConsistency is used to parse the ?stale, ?consistent, and ?leader query params.
2014-04-21 19:26:12 +00:00
// Returns true on error
2022-03-28 17:56:44 +00:00
func (s *HTTPHandlers) parseConsistency(resp http.ResponseWriter, req *http.Request, b QueryOptionsCompat) bool {
2014-04-21 19:26:12 +00:00
query := req.URL.Query()
defaults := true
if _, ok := query["stale"]; ok {
b.SetAllowStale(true)
defaults = false
}
if _, ok := query["consistent"]; ok {
b.SetRequireConsistent(true)
defaults = false
}
if _, ok := query["leader"]; ok {
// The leader query param forces use of the "default" consistency mode.
// This allows the "default" consistency mode to be used even the consistency mode is
// default to "stale" through use of the discovery_max_stale agent config option.
defaults = false
}
if _, ok := query["cached"]; ok && s.agent.config.HTTPUseCache {
b.SetUseCache(true)
defaults = false
}
if maxStale := query.Get("max_stale"); maxStale != "" {
dur, err := time.ParseDuration(maxStale)
if err != nil {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(resp, "Invalid max_stale value %q", maxStale)
return true
}
b.SetMaxStaleDuration(dur)
if dur.Nanoseconds() > 0 {
b.SetAllowStale(true)
defaults = false
}
}
// No specific Consistency has been specified by caller
if defaults {
path := req.URL.Path
if strings.HasPrefix(path, "/v1/catalog") || strings.HasPrefix(path, "/v1/health") {
if s.agent.config.DiscoveryMaxStale.Nanoseconds() > 0 {
b.SetMaxStaleDuration(s.agent.config.DiscoveryMaxStale)
b.SetAllowStale(true)
}
}
}
if b.GetAllowStale() && b.GetRequireConsistent() {
2017-08-23 19:19:11 +00:00
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Cannot specify ?stale with ?consistent, conflicting semantics.")
2014-04-21 19:26:12 +00:00
return true
}
if b.GetUseCache() && b.GetRequireConsistent() {
resp.WriteHeader(http.StatusBadRequest)
fmt.Fprint(resp, "Cannot specify ?cached with ?consistent, conflicting semantics.")
return true
}
2014-04-21 19:26:12 +00:00
return false
}
// parseConsistencyReadRequest is used to parse the ?consistent query param.
func parseConsistencyReadRequest(resp http.ResponseWriter, req *http.Request, b *pbcommon.ReadRequest) {
query := req.URL.Query()
if _, ok := query["consistent"]; ok {
b.RequireConsistent = true
}
}
// parseDC is used to parse the ?dc query param
func (s *HTTPHandlers) parseDC(req *http.Request, dc *string) {
if other := req.URL.Query().Get("dc"); other != "" {
*dc = other
} else if *dc == "" {
*dc = s.agent.config.Datacenter
}
}
// parseTokenInternal is used to parse the ?token query param or the X-Consul-Token header or
// Authorization Bearer token (RFC6750).
func (s *HTTPHandlers) parseTokenInternal(req *http.Request, token *string) {
2014-08-12 18:35:22 +00:00
if other := req.URL.Query().Get("token"); other != "" {
*token = other
return
}
if ok := s.parseTokenFromHeaders(req, token); ok {
return
}
*token = ""
return
}
func (s *HTTPHandlers) parseTokenFromHeaders(req *http.Request, token *string) bool {
if other := req.Header.Get("X-Consul-Token"); other != "" {
*token = other
return true
}
if other := req.Header.Get("Authorization"); other != "" {
// HTTP Authorization headers are in the format: <Scheme>[SPACE]<Value>
// Ref. https://tools.ietf.org/html/rfc7236#section-3
parts := strings.Split(other, " ")
// Authorization Header is invalid if containing 1 or 0 parts, e.g.:
// "" || "<Scheme><Value>" || "<Scheme>" || "<Value>"
if len(parts) > 1 {
scheme := parts[0]
// Everything after "<Scheme>" is "<Value>", trimmed
value := strings.TrimSpace(strings.Join(parts[1:], " "))
// <Scheme> must be "Bearer"
if strings.ToLower(scheme) == "bearer" {
// Since Bearer tokens shouldn't contain spaces (rfc6750#section-2.1)
// "value" is tokenized, only the first item is used
*token = strings.TrimSpace(strings.Split(value, " ")[0])
return true
}
}
2014-08-12 18:35:22 +00:00
}
2015-02-06 22:10:01 +00:00
return false
}
func (s *HTTPHandlers) clearTokenFromHeaders(req *http.Request) {
req.Header.Del("X-Consul-Token")
req.Header.Del("Authorization")
}
// parseTokenWithDefault passes through to parseTokenInternal and optionally resolves proxy tokens to real ACL tokens.
// If the token is invalid or not specified it will populate the token with the agents UserToken (acl_token in the
// consul configuration)
func (s *HTTPHandlers) parseTokenWithDefault(req *http.Request, token *string) {
s.parseTokenInternal(req, token) // parseTokenInternal modifies *token
if token != nil && *token == "" {
*token = s.agent.tokens.UserToken()
return
}
return
2014-08-12 18:35:22 +00:00
}
// parseToken is used to parse the ?token query param or the X-Consul-Token header or
// Authorization Bearer token header (RFC6750). This function is used widely in Consul's endpoints
func (s *HTTPHandlers) parseToken(req *http.Request, token *string) {
s.parseTokenWithDefault(req, token)
}
func sourceAddrFromRequest(req *http.Request) string {
xff := req.Header.Get("X-Forwarded-For")
forwardHosts := strings.Split(xff, ",")
if len(forwardHosts) > 0 {
forwardIp := net.ParseIP(strings.TrimSpace(forwardHosts[0]))
if forwardIp != nil {
return forwardIp.String()
}
}
host, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
return ""
}
ip := net.ParseIP(host)
if ip != nil {
return ip.String()
} else {
return ""
}
}
// parseSource is used to parse the ?near=<node> query parameter, used for
// sorting by RTT based on a source node. We set the source's DC to the target
// DC in the request, if given, or else the agent's DC.
func (s *HTTPHandlers) parseSource(req *http.Request, source *structs.QuerySource) {
s.parseDC(req, &source.Datacenter)
source.Ip = sourceAddrFromRequest(req)
if node := req.URL.Query().Get("near"); node != "" {
if node == "_agent" {
source.Node = s.agent.config.NodeName
} else {
source.Node = node
}
source.NodePartition = s.agent.config.PartitionOrEmpty()
}
}
func (s *HTTPHandlers) parsePeerName(req *http.Request, args *structs.ServiceSpecificRequest) {
if peer := req.URL.Query().Get("peer"); peer != "" {
args.PeerName = peer
}
}
// parseMetaFilter is used to parse the ?node-meta=key:value query parameter, used for
// filtering results to nodes with the given metadata key/value
func (s *HTTPHandlers) parseMetaFilter(req *http.Request) map[string]string {
if filterList, ok := req.URL.Query()["node-meta"]; ok {
filters := make(map[string]string)
for _, filter := range filterList {
key, value := parseMetaPair(filter)
filters[key] = value
}
return filters
}
return nil
}
func parseMetaPair(raw string) (string, string) {
pair := strings.SplitN(raw, ":", 2)
if len(pair) == 2 {
return pair[0], pair[1]
}
return pair[0], ""
}
// parse is a convenience method for endpoints that need to use both parseWait
// and parseDC.
2022-03-28 17:56:44 +00:00
func (s *HTTPHandlers) parse(resp http.ResponseWriter, req *http.Request, dc *string, b QueryOptionsCompat) bool {
s.parseDC(req, dc)
var token string
s.parseTokenWithDefault(req, &token)
b.SetToken(token)
var filter string
s.parseFilter(req, &filter)
b.SetFilter(filter)
if s.parseConsistency(resp, req, b) {
2014-04-21 19:26:12 +00:00
return true
}
if parseCacheControl(resp, req, b) {
return true
}
return parseWait(resp, req, b)
}
func (s *HTTPHandlers) checkWriteAccess(req *http.Request) error {
if req.Method == http.MethodGet || req.Method == http.MethodHead || req.Method == http.MethodOptions {
return nil
}
allowed := s.agent.config.AllowWriteHTTPFrom
if len(allowed) == 0 {
return nil
}
ipStr, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
return errors.Wrap(err, "unable to parse remote addr")
}
ip := net.ParseIP(ipStr)
for _, n := range allowed {
if n.Contains(ip) {
return nil
}
}
return HTTPError{StatusCode: http.StatusForbidden, Reason: "Access is restricted"}
}
func (s *HTTPHandlers) parseFilter(req *http.Request, filter *string) {
if other := req.URL.Query().Get("filter"); other != "" {
*filter = other
}
}
2022-03-28 17:56:44 +00:00
func setMetaProtobuf(resp http.ResponseWriter, queryMeta *pbcommon.QueryMeta) {
qm := new(structs.QueryMeta)
pbcommon.QueryMetaToStructs(queryMeta, qm)
setMeta(resp, qm)
}
type QueryOptionsCompat interface {
GetAllowStale() bool
SetAllowStale(bool)
GetRequireConsistent() bool
SetRequireConsistent(bool)
GetUseCache() bool
SetUseCache(bool)
SetFilter(string)
SetToken(string)
SetMustRevalidate(bool)
SetMaxAge(time.Duration)
SetMaxStaleDuration(time.Duration)
SetStaleIfError(time.Duration)
SetMaxQueryTime(time.Duration)
SetMinQueryIndex(uint64)
}