mirror of https://github.com/status-im/consul.git
fix typos reported by golangci-lint:misspell (#5434)
This commit is contained in:
parent
9b1bc8d96d
commit
f4a3b9d518
|
@ -14,7 +14,7 @@ import (
|
|||
)
|
||||
|
||||
// NOTE: The tests contained herein are designed to test the HTTP API
|
||||
// They are not intented to thoroughly test the backing RPC
|
||||
// They are not intended to thoroughly test the backing RPC
|
||||
// functionality as that will be done with other tests.
|
||||
|
||||
func TestACL_Disabled_Response(t *testing.T) {
|
||||
|
|
|
@ -3674,7 +3674,7 @@ func (a *Agent) registerCache() {
|
|||
|
||||
// defaultProxyCommand returns the default Connect managed proxy command.
|
||||
func defaultProxyCommand(agentCfg *config.RuntimeConfig) ([]string, error) {
|
||||
// Get the path to the current exectuable. This is cached once by the
|
||||
// Get the path to the current executable. This is cached once by the
|
||||
// library so this is effectively just a variable read.
|
||||
execPath, err := os.Executable()
|
||||
if err != nil {
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
"github.com/mitchellh/hashstructure"
|
||||
|
||||
"github.com/hashicorp/consul/acl"
|
||||
"github.com/hashicorp/consul/agent/cache-types"
|
||||
cachetype "github.com/hashicorp/consul/agent/cache-types"
|
||||
"github.com/hashicorp/consul/agent/checks"
|
||||
"github.com/hashicorp/consul/agent/config"
|
||||
"github.com/hashicorp/consul/agent/debug"
|
||||
|
@ -254,11 +254,11 @@ func (s *HTTPServer) AgentService(resp http.ResponseWriter, req *http.Request) (
|
|||
// Support managed proxies until they are removed entirely. Since built-in
|
||||
// proxy will now use this endpoint, in order to not break managed proxies in
|
||||
// the interim until they are removed, we need to mirror the default-setting
|
||||
// behaviour they had. Rather than thread that through this whole method as
|
||||
// behavior they had. Rather than thread that through this whole method as
|
||||
// special cases that need to be unwound later (and duplicate logic in the
|
||||
// proxy config endpoint) just defer to that and then translater the response.
|
||||
// proxy config endpoint) just defer to that and then translate the response.
|
||||
if managedProxy := s.agent.State.Proxy(id); managedProxy != nil {
|
||||
// This is for a managed proxy, use the old endpoint's behaviour
|
||||
// This is for a managed proxy, use the old endpoint's behavior
|
||||
req.URL.Path = "/v1/agent/connect/proxy/" + id
|
||||
obj, err := s.AgentConnectProxyConfig(resp, req)
|
||||
if err != nil {
|
||||
|
@ -1362,7 +1362,7 @@ func (s *HTTPServer) AgentConnectCARoots(resp http.ResponseWriter, req *http.Req
|
|||
// AgentConnectCALeafCert returns the certificate bundle for a service
|
||||
// instance. This supports blocking queries to update the returned bundle.
|
||||
func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
// Get the service name. Note that this is the name of the sevice,
|
||||
// Get the service name. Note that this is the name of the service,
|
||||
// not the ID of the service instance.
|
||||
serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/ca/leaf/")
|
||||
|
||||
|
@ -1539,7 +1539,7 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http
|
|||
type agentLocalBlockingFunc func(ws memdb.WatchSet) (string, interface{}, error)
|
||||
|
||||
// agentLocalBlockingQuery performs a blocking query in a generic way against
|
||||
// local agent state that has no RPC or raft to back it. It uses `hash` paramter
|
||||
// local agent state that has no RPC or raft to back it. It uses `hash` parameter
|
||||
// instead of an `index`. The resp is needed to write the `X-Consul-ContentHash`
|
||||
// header back on return no Status nor body content is ever written to it.
|
||||
func (s *HTTPServer) agentLocalBlockingQuery(resp http.ResponseWriter, hash string,
|
||||
|
|
|
@ -363,10 +363,10 @@ func TestAgent_Service(t *testing.T) {
|
|||
// into a busy-poll!
|
||||
//
|
||||
// This test though doesn't catch that because busy poll still has the
|
||||
// correct external behaviour. I don't want to instrument the loop to
|
||||
// correct external behavior. I don't want to instrument the loop to
|
||||
// assert it's not executing too fast here as I can't think of a clean way
|
||||
// and the issue is fixed now so this test doesn't actually catch the
|
||||
// error, but does provide an easy way to verify the behaviour by hand:
|
||||
// error, but does provide an easy way to verify the behavior by hand:
|
||||
// 1. Make this test fail e.g. change wantErr to true
|
||||
// 2. Add a log.Println or similar into the blocking loop/function
|
||||
// 3. See whether it's called just once or many times in a tight loop.
|
||||
|
@ -3063,7 +3063,7 @@ func TestAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T) {
|
|||
{
|
||||
name: "ACL OK for service but and overridden for sidecar",
|
||||
// This test ensures that if the sidecar embeds it's own token with
|
||||
// different privs from the main request token it will be honoured for the
|
||||
// different privs from the main request token it will be honored for the
|
||||
// sidecar registration. We use the test root token since that should have
|
||||
// permission.
|
||||
json: `
|
||||
|
@ -3342,7 +3342,7 @@ func TestAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T) {
|
|||
assert.Equal(sd.Port, svc.Port)
|
||||
// Ensure that the actual registered service _doesn't_ still have it's
|
||||
// sidecar info since it's duplicate and we don't want that synced up to
|
||||
// the catalog or included in responses particulary - it's just
|
||||
// the catalog or included in responses particularly - it's just
|
||||
// registration syntax sugar.
|
||||
assert.Nil(svc.Connect.SidecarService)
|
||||
|
||||
|
@ -4765,7 +4765,7 @@ func TestAgentConnectCALeafCert_good(t *testing.T) {
|
|||
r.Fatalf("leaf has not updated")
|
||||
}
|
||||
|
||||
// Got a new leaf. Sanity check it's a whole new key as well as differnt
|
||||
// Got a new leaf. Sanity check it's a whole new key as well as different
|
||||
// cert.
|
||||
if issued.PrivateKeyPEM == issued2.PrivateKeyPEM {
|
||||
r.Fatalf("new leaf has same private key as before")
|
||||
|
@ -5057,10 +5057,10 @@ func TestAgentConnectProxyConfig_Blocking(t *testing.T) {
|
|||
// into a busy-poll!
|
||||
//
|
||||
// This test though doesn't catch that because busy poll still has the
|
||||
// correct external behaviour. I don't want to instrument the loop to
|
||||
// correct external behavior. I don't want to instrument the loop to
|
||||
// assert it's not executing too fast here as I can't think of a clean way
|
||||
// and the issue is fixed now so this test doesn't actually catch the
|
||||
// error, but does provide an easy way to verify the behaviour by hand:
|
||||
// error, but does provide an easy way to verify the behavior by hand:
|
||||
// 1. Make this test fail e.g. change wantErr to true
|
||||
// 2. Add a log.Println or similar into the blocking loop/function
|
||||
// 3. See whether it's called just once or many times in a tight loop.
|
||||
|
|
|
@ -81,7 +81,7 @@ type ConnectCALeaf struct {
|
|||
// delay in tests they can set it to 1 nanosecond. We may separately allow
|
||||
// configuring the jitter limit by users later but this is different and for
|
||||
// tests only since we need to set a deterministic time delay in order to test
|
||||
// the behaviour here fully and determinstically.
|
||||
// the behavior here fully and determinstically.
|
||||
TestOverrideCAChangeInitialDelay time.Duration
|
||||
}
|
||||
|
||||
|
@ -233,7 +233,7 @@ func (c *ConnectCALeaf) rootWatcher(ctx context.Context) {
|
|||
|
||||
// calculateSoftExpiry encapsulates our logic for when to renew a cert based on
|
||||
// it's age. It returns a pair of times min, max which makes it easier to test
|
||||
// the logic without non-determinisic jitter to account for. The caller should
|
||||
// the logic without non-deterministic jitter to account for. The caller should
|
||||
// choose a time randomly in between these.
|
||||
//
|
||||
// We want to balance a few factors here:
|
||||
|
|
|
@ -316,7 +316,7 @@ RETRY_GET:
|
|||
// without waiting a whole timeout to see it, but clients that just look up
|
||||
// cache with an older index than the last valid result will still see the
|
||||
// result and not the error here. I.e. the error is not "cached" without a
|
||||
// new fetch attempt occuring, but the last good value can still be fetched
|
||||
// new fetch attempt occurring, but the last good value can still be fetched
|
||||
// from cache.
|
||||
return entry.Value, meta, nil
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"github.com/hashicorp/consul/lib"
|
||||
)
|
||||
|
||||
// UpdateEvent is a struct summarising an update to a cache entry
|
||||
// UpdateEvent is a struct summarizing an update to a cache entry
|
||||
type UpdateEvent struct {
|
||||
// CorrelationID is used by the Notify API to allow correlation of updates
|
||||
// with specific requests. We could return the full request object and
|
||||
|
@ -30,10 +30,10 @@ type UpdateEvent struct {
|
|||
// cache actively. It will continue to perform blocking Get requests until the
|
||||
// context is canceled.
|
||||
//
|
||||
// The passed context must be cancelled or timeout in order to free resources
|
||||
// The passed context must be canceled or timeout in order to free resources
|
||||
// and stop maintaining the value in cache. Typically request-scoped resources
|
||||
// do this but if a long-lived context like context.Background is used, then the
|
||||
// caller must arrange for it to be cancelled when the watch is no longer
|
||||
// caller must arrange for it to be canceled when the watch is no longer
|
||||
// needed.
|
||||
//
|
||||
// The passed chan may be buffered or unbuffered, if the caller doesn't consume
|
||||
|
@ -80,7 +80,7 @@ func (c *Cache) notifyBlockingQuery(ctx context.Context, t string, r Request, co
|
|||
failures := uint(0)
|
||||
|
||||
for {
|
||||
// Check context hasn't been cancelled
|
||||
// Check context hasn't been canceled
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ func (c *Cache) notifyBlockingQuery(ctx context.Context, t string, r Request, co
|
|||
// Blocking request
|
||||
res, meta, err := c.getWithIndex(t, r, index)
|
||||
|
||||
// Check context hasn't been cancelled
|
||||
// Check context hasn't been canceled
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ func (c *Cache) notifyPollingQuery(ctx context.Context, t string, r Request, cor
|
|||
var lastValue interface{} = nil
|
||||
|
||||
for {
|
||||
// Check context hasn't been cancelled
|
||||
// Check context hasn't been canceled
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ func (c *Cache) notifyPollingQuery(ctx context.Context, t string, r Request, cor
|
|||
// Make the request
|
||||
res, meta, err := c.getWithIndex(t, r, index)
|
||||
|
||||
// Check context hasn't been cancelled
|
||||
// Check context hasn't been canceled
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ func TestCacheNotify(t *testing.T) {
|
|||
})
|
||||
|
||||
// We could wait for a full timeout but we can't directly observe it so
|
||||
// simulate the behaviour by triggering a response with the same value and
|
||||
// simulate the behavior by triggering a response with the same value and
|
||||
// index as the last one.
|
||||
close(trigger[1])
|
||||
|
||||
|
@ -122,7 +122,7 @@ func TestCacheNotify(t *testing.T) {
|
|||
Err: nil,
|
||||
})
|
||||
|
||||
// Sanity check closing chan before context is cancelled doesn't panic
|
||||
// Sanity check closing chan before context is canceled doesn't panic
|
||||
//close(ch)
|
||||
|
||||
// Close context
|
||||
|
@ -136,7 +136,7 @@ func TestCacheNotify(t *testing.T) {
|
|||
// will be cleaned.
|
||||
close(trigger[3])
|
||||
|
||||
// I want to test that cancelling the context cleans up goroutines (which it
|
||||
// I want to test that canceling the context cleans up goroutines (which it
|
||||
// does from manual verification with debugger etc). I had a check based on a
|
||||
// similar approach to https://golang.org/src/net/http/main_test.go#L60 but it
|
||||
// was just too flaky because it relies on the timing of the error backoff
|
||||
|
|
|
@ -490,7 +490,7 @@ type Upstream struct {
|
|||
LocalBindPort *int `json:"local_bind_port,omitempty" hcl:"local_bind_port" mapstructure:"local_bind_port"`
|
||||
|
||||
// Config is an opaque config that is specific to the proxy process being run.
|
||||
// It can be used to pass abritrary configuration for this specific upstream
|
||||
// It can be used to pass arbitrary configuration for this specific upstream
|
||||
// to the proxy.
|
||||
Config map[string]interface{} `json:"config,omitempty" hcl:"config" mapstructure:"config"`
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ type RuntimeConfig struct {
|
|||
// ACL's to be used to service requests. This
|
||||
// is the default. If the ACL is not in the cache,
|
||||
// this acts like deny.
|
||||
// * async-cache - Same behaviour as extend-cache, but perform ACL
|
||||
// * async-cache - Same behavior as extend-cache, but perform ACL
|
||||
// Lookups asynchronously when cache TTL is expired.
|
||||
//
|
||||
// hcl: acl.down_policy = ("allow"|"deny"|"extend-cache"|"async-cache")
|
||||
|
@ -361,7 +361,7 @@ type RuntimeConfig struct {
|
|||
// flag: -datacenter string
|
||||
Datacenter string
|
||||
|
||||
// Defines the maximum stale value for discovery path. Defauls to "0s".
|
||||
// Defines the maximum stale value for discovery path. Defaults to "0s".
|
||||
// Discovery paths are /v1/heath/ paths
|
||||
//
|
||||
// If not set to 0, it will try to perform stale read and perform only a
|
||||
|
|
|
@ -368,7 +368,7 @@ func (c *ConsulProvider) Sign(csr *x509.CertificateRequest) (string, error) {
|
|||
sn := &big.Int{}
|
||||
sn.SetUint64(idx + 1)
|
||||
// Sign the certificate valid from 1 minute in the past, this helps it be
|
||||
// accepted right away even when nodes are not in close time sync accross the
|
||||
// accepted right away even when nodes are not in close time sync across the
|
||||
// cluster. A minute is more than enough for typical DC clock drift.
|
||||
effectiveNow := time.Now().Add(-1 * time.Minute)
|
||||
template := x509.Certificate{
|
||||
|
@ -459,7 +459,7 @@ func (c *ConsulProvider) SignIntermediate(csr *x509.CertificateRequest) (string,
|
|||
sn := &big.Int{}
|
||||
sn.SetUint64(idx + 1)
|
||||
// Sign the certificate valid from 1 minute in the past, this helps it be
|
||||
// accepted right away even when nodes are not in close time sync accross the
|
||||
// accepted right away even when nodes are not in close time sync across the
|
||||
// cluster. A minute is more than enough for typical DC clock drift.
|
||||
effectiveNow := time.Now().Add(-1 * time.Minute)
|
||||
template := x509.Certificate{
|
||||
|
@ -537,7 +537,7 @@ func (c *ConsulProvider) CrossSignCA(cert *x509.Certificate) (string, error) {
|
|||
template.AuthorityKeyId = keyId
|
||||
|
||||
// Sign the certificate valid from 1 minute in the past, this helps it be
|
||||
// accepted right away even when nodes are not in close time sync accross the
|
||||
// accepted right away even when nodes are not in close time sync across the
|
||||
// cluster. A minute is more than enough for typical DC clock drift.
|
||||
effectiveNow := time.Now().Add(-1 * time.Minute)
|
||||
template.NotBefore = effectiveNow
|
||||
|
@ -624,9 +624,9 @@ func (c *ConsulProvider) generateCA(privateKey string, sn uint64) (string, error
|
|||
serialNum := &big.Int{}
|
||||
serialNum.SetUint64(sn)
|
||||
template := x509.Certificate{
|
||||
SerialNumber: serialNum,
|
||||
Subject: pkix.Name{CommonName: name},
|
||||
URIs: []*url.URL{id.URI()},
|
||||
SerialNumber: serialNum,
|
||||
Subject: pkix.Name{CommonName: name},
|
||||
URIs: []*url.URL{id.URI()},
|
||||
BasicConstraintsValid: true,
|
||||
KeyUsage: x509.KeyUsageCertSign |
|
||||
x509.KeyUsageCRLSign |
|
||||
|
|
|
@ -244,7 +244,7 @@ func testKeyID(t testing.T, raw interface{}) []byte {
|
|||
// crypto/rand will never block and always reads from /dev/urandom on unix OSes
|
||||
// which does not consume entropy.
|
||||
//
|
||||
// If we find by profiling it's taking a lot of cycles we could optimise/cache
|
||||
// If we find by profiling it's taking a lot of cycles we could optimize/cache
|
||||
// again but we at least need to use different keys for each distinct CA (when
|
||||
// multiple CAs are generated at once e.g. to test cross-signing) and a
|
||||
// different one again for the leafs otherwise we risk tests that have false
|
||||
|
|
|
@ -34,7 +34,7 @@ var (
|
|||
)
|
||||
|
||||
// ParseCertURIFromString attempts to parse a string representation of a
|
||||
// certificate URI as a convenince helper around ParseCertURI.
|
||||
// certificate URI as a convenience helper around ParseCertURI.
|
||||
func ParseCertURIFromString(input string) (CertURI, error) {
|
||||
// Parse the certificate URI from the string
|
||||
uriRaw, err := url.Parse(input)
|
||||
|
|
|
@ -38,7 +38,7 @@ func (id *SpiffeIDSigning) Authorize(ixn *structs.Intention) (bool, bool) {
|
|||
// allowed to sign CSRs for that entity (i.e. represents the trust domain for
|
||||
// that entity).
|
||||
//
|
||||
// I choose to make this a fixed centralised method here for now rather than a
|
||||
// I choose to make this a fixed centralized method here for now rather than a
|
||||
// method on CertURI interface since we don't intend this to be extensible
|
||||
// outside and it's easier to reason about the security properties when they are
|
||||
// all in one place with "whitelist" semantics.
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
// HTTP API authz endpoint and in the gRPX xDS/ext_authz API for envoy.
|
||||
//
|
||||
// The ACL token and the auth request are provided and the auth decision (true
|
||||
// means authorised) and reason string are returned.
|
||||
// means authorized) and reason string are returned.
|
||||
//
|
||||
// If the request input is invalid the error returned will be a BadRequestError,
|
||||
// if the token doesn't grant necessary access then an acl.ErrPermissionDenied
|
||||
|
|
|
@ -31,7 +31,7 @@ type ACL struct {
|
|||
srv *Server
|
||||
}
|
||||
|
||||
// fileBootstrapResetIndex retrieves the reset index specified by the adminstrator from
|
||||
// fileBootstrapResetIndex retrieves the reset index specified by the administrator from
|
||||
// the file on disk.
|
||||
//
|
||||
// Q: What is the bootstrap reset index?
|
||||
|
|
|
@ -36,7 +36,7 @@ var (
|
|||
|
||||
const (
|
||||
// csrLimitWait is the maximum time we'll wait for a slot when CSR concurrency
|
||||
// limiting or rate limiting is occuring. It's intentionally short so small
|
||||
// limiting or rate limiting is occurring. It's intentionally short so small
|
||||
// batches of requests can be accommodated when server has capacity (assuming
|
||||
// signing one cert takes much less than this) but failing requests fast when
|
||||
// a thundering herd comes along.
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
"github.com/hashicorp/consul/agent/structs"
|
||||
"github.com/hashicorp/consul/testrpc"
|
||||
"github.com/hashicorp/consul/testutil/retry"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
@ -435,7 +435,7 @@ func TestConnectCASign_rateLimit(t *testing.T) {
|
|||
// the test here isn't really the exact token bucket response more a sanity
|
||||
// check that some limiting is being applied. Note that we can't just measure
|
||||
// the time it took to send them all and infer how many should have succeeded
|
||||
// without some complex modelling of the token bucket algorithm.
|
||||
// without some complex modeling of the token bucket algorithm.
|
||||
require.Truef(successCount >= 1, "at least 1 CSRs should have succeeded, got %d", successCount)
|
||||
require.Truef(limitedCount >= 7, "at least 7 CSRs should have been rate limited, got %d", limitedCount)
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
"github.com/hashicorp/consul/lib"
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/memberlist"
|
||||
"github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
|
||||
"github.com/hashicorp/yamux"
|
||||
)
|
||||
|
||||
|
@ -417,7 +417,7 @@ RUN_QUERY:
|
|||
err := fn(ws, state)
|
||||
// Note we check queryOpts.MinQueryIndex is greater than zero to determine if
|
||||
// blocking was requested by client, NOT meta.Index since the state function
|
||||
// might return zero if something is not initialised and care wasn't taken to
|
||||
// might return zero if something is not initialized and care wasn't taken to
|
||||
// handle that special case (in practice this happened a lot so fixing it
|
||||
// systematically here beats trying to remember to add zero checks in every
|
||||
// state method). We also need to ensure that unless there is an error, we
|
||||
|
|
|
@ -1046,7 +1046,7 @@ func TestStateStore_ACLToken_Delete(t *testing.T) {
|
|||
t.Parallel()
|
||||
s := testACLStateStore(t)
|
||||
|
||||
// deletion of non-existant policies is not an error
|
||||
// deletion of non-existent policies is not an error
|
||||
require.NoError(t, s.ACLTokenDeleteByAccessor(3, "ea58a09c-2100-4aef-816b-8ee0ade77dcd"))
|
||||
require.NoError(t, s.ACLTokenDeleteBySecret(3, "376d0cae-dd50-4213-9668-2c7797a7fb2d"))
|
||||
})
|
||||
|
@ -1461,7 +1461,7 @@ func TestStateStore_ACLPolicy_Delete(t *testing.T) {
|
|||
t.Parallel()
|
||||
s := testACLStateStore(t)
|
||||
|
||||
// deletion of non-existant policies is not an error
|
||||
// deletion of non-existent policies is not an error
|
||||
require.NoError(t, s.ACLPolicyDeleteByName(3, "not-found"))
|
||||
require.NoError(t, s.ACLPolicyDeleteByID(3, "376d0cae-dd50-4213-9668-2c7797a7fb2d"))
|
||||
})
|
||||
|
|
|
@ -803,7 +803,7 @@ func (s *Store) ensureServiceTxn(tx *memdb.Txn, idx uint64, node string, svc *st
|
|||
serviceNode := existing.(*structs.ServiceNode)
|
||||
entry.CreateIndex = serviceNode.CreateIndex
|
||||
entry.ModifyIndex = serviceNode.ModifyIndex
|
||||
// We cannot return here because: we want to keep existing behaviour (ex: failed node lookup -> ErrMissingNode)
|
||||
// We cannot return here because: we want to keep existing behavior (ex: failed node lookup -> ErrMissingNode)
|
||||
// It might be modified in future, but it requires changing many unit tests
|
||||
// Enforcing saving the entry also ensures that if we add default values in .ToServiceNode()
|
||||
// those values will be saved even if node is not really modified for a while.
|
||||
|
|
|
@ -360,7 +360,7 @@ func TestStore_IntentionMatch_table(t *testing.T) {
|
|||
[][][]string{
|
||||
{
|
||||
// Note the first two have the same precedence so we rely on arbitrary
|
||||
// lexicographical tie-break behaviour.
|
||||
// lexicographical tie-break behavior.
|
||||
{"foo", "bar", "bar", "*"},
|
||||
{"foo", "bar", "foo", "*"},
|
||||
{"*", "*", "*", "*"},
|
||||
|
|
|
@ -66,7 +66,7 @@ func testRegisterNodeWithMeta(t *testing.T, s *Store, idx uint64, nodeID string,
|
|||
|
||||
// testRegisterServiceWithChange registers a service and allow ensuring the consul index is updated
|
||||
// even if service already exists if using `modifyAccordingIndex`.
|
||||
// This is done by setting the transation ID in "version" meta so service will be updated if it already exists
|
||||
// This is done by setting the transaction ID in "version" meta so service will be updated if it already exists
|
||||
func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, serviceID string, modifyAccordingIndex bool) {
|
||||
meta := make(map[string]string)
|
||||
if modifyAccordingIndex {
|
||||
|
@ -96,7 +96,7 @@ func testRegisterServiceWithChange(t *testing.T, s *Store, idx uint64, nodeID, s
|
|||
}
|
||||
}
|
||||
|
||||
// testRegisterService register a service with given transation idx
|
||||
// testRegisterService register a service with given transaction idx
|
||||
// If the service already exists, transaction number might not be increased
|
||||
// Use `testRegisterServiceWithChange()` if you want perform a registration that
|
||||
// ensures the transaction is updated by setting idx in Meta of Service
|
||||
|
|
|
@ -143,7 +143,7 @@ func (t *Txn) Apply(args *structs.TxnRequest, reply *structs.TxnResponse) error
|
|||
}
|
||||
|
||||
// Read is used to perform a read-only transaction that doesn't modify the state
|
||||
// store. This is much more scaleable since it doesn't go through Raft and
|
||||
// store. This is much more scalable since it doesn't go through Raft and
|
||||
// supports staleness, so this should be preferred if you're just performing
|
||||
// reads.
|
||||
func (t *Txn) Read(args *structs.TxnReadRequest, reply *structs.TxnReadResponse) error {
|
||||
|
|
|
@ -1015,7 +1015,7 @@ func trimUDPResponse(req, resp *dns.Msg, udpAnswerLimit int) (trimmed bool) {
|
|||
// will allow our responses to be compliant even if some downstream server
|
||||
// uncompresses them.
|
||||
// Even when size is too big for one single record, try to send it anyway
|
||||
// (usefull for 512 bytes messages)
|
||||
// (useful for 512 bytes messages)
|
||||
for len(resp.Answer) > 1 && resp.Len() > maxSize {
|
||||
// More than 100 bytes, find with a binary search
|
||||
if resp.Len()-maxSize > 100 {
|
||||
|
|
|
@ -534,7 +534,7 @@ func (l *State) RemoveAliasCheck(checkID types.CheckID, srcServiceID string) {
|
|||
|
||||
// RemoveCheck is used to remove a health check from the local state.
|
||||
// The agent will make a best effort to ensure it is deregistered
|
||||
// todo(fs): RemoveService returns an error for a non-existant service. RemoveCheck should as well.
|
||||
// todo(fs): RemoveService returns an error for a non-existent service. RemoveCheck should as well.
|
||||
// todo(fs): Check code that calls this to handle the error.
|
||||
func (l *State) RemoveCheck(id types.CheckID) error {
|
||||
l.Lock()
|
||||
|
@ -774,13 +774,13 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token,
|
|||
}
|
||||
|
||||
// Lock now. We can't lock earlier as l.Service would deadlock and shouldn't
|
||||
// anyway to minimise the critical section.
|
||||
// anyway to minimize the critical section.
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
pToken := restoredProxyToken
|
||||
|
||||
// Does this proxy instance allready exist?
|
||||
// Does this proxy instance already exist?
|
||||
if existing, ok := l.managedProxies[svc.ID]; ok {
|
||||
// Keep the existing proxy token so we don't have to restart proxy to
|
||||
// re-inject token.
|
||||
|
@ -807,14 +807,14 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token,
|
|||
// Allocate port if needed (min and max inclusive).
|
||||
rangeLen := l.config.ProxyBindMaxPort - l.config.ProxyBindMinPort + 1
|
||||
if svc.Port < 1 && l.config.ProxyBindMinPort > 0 && rangeLen > 0 {
|
||||
// This should be a really short list so don't bother optimising lookup yet.
|
||||
// This should be a really short list so don't bother optimizing lookup yet.
|
||||
OUTER:
|
||||
for _, offset := range rand.Perm(rangeLen) {
|
||||
p := l.config.ProxyBindMinPort + offset
|
||||
// See if this port was already allocated to another proxy
|
||||
for _, other := range l.managedProxies {
|
||||
if other.Proxy.ProxyService.Port == p {
|
||||
// allready taken, skip to next random pick in the range
|
||||
// already taken, skip to next random pick in the range
|
||||
continue OUTER
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ func newState(ns *structs.NodeService, token string) (*state, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
// Watch initialised watches on all necessary cache data for the current proxy
|
||||
// Watch initialized watches on all necessary cache data for the current proxy
|
||||
// registration state and returns a chan to observe updates to the
|
||||
// ConfigSnapshot that contains all necessary config state. The chan is closed
|
||||
// when the state is Closed.
|
||||
|
|
|
@ -154,7 +154,7 @@ func (p *Daemon) keepAlive(stopCh <-chan struct{}, exitedCh chan<- struct{}) {
|
|||
// Timer is up, good!
|
||||
|
||||
case <-stopCh:
|
||||
// During our backoff wait, we've been signalled to
|
||||
// During our backoff wait, we've been signaled to
|
||||
// quit, so just quit.
|
||||
timer.Stop()
|
||||
return
|
||||
|
@ -198,7 +198,7 @@ func (p *Daemon) keepAlive(stopCh <-chan struct{}, exitedCh chan<- struct{}) {
|
|||
// We want a busy loop, but not too busy. 1 second between detecting a
|
||||
// process death seems reasonable.
|
||||
//
|
||||
// SUBTELTY: we must NOT select on stopCh here since the Stop function
|
||||
// SUBTLETY: we must NOT select on stopCh here since the Stop function
|
||||
// assumes that as soon as this method returns and closes exitedCh, that
|
||||
// the process is no longer running. If we are polling then we don't
|
||||
// know that is true until we've polled again so we have to keep polling
|
||||
|
@ -445,7 +445,7 @@ func (p *Daemon) UnmarshalSnapshot(m map[string]interface{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// daemonSnapshot is the structure of the marshalled data for snapshotting.
|
||||
// daemonSnapshot is the structure of the marshaled data for snapshotting.
|
||||
//
|
||||
// Note we don't have to store the ProxyId because this is stored directly
|
||||
// within the manager snapshot and is restored automatically.
|
||||
|
|
|
@ -175,7 +175,7 @@ func TestDaemonLaunchesNewProcessGroup(t *testing.T) {
|
|||
require.NoError(err)
|
||||
// Yep the minus PGid is how you kill a whole process group in unix... no idea
|
||||
// how this works on windows. We TERM no KILL since we rely on the child
|
||||
// catching the signal and deleting it's file to detect correct behaviour.
|
||||
// catching the signal and deleting it's file to detect correct behavior.
|
||||
require.NoError(syscall.Kill(-pgid, syscall.SIGTERM))
|
||||
|
||||
_, err = parentCmd.Process.Wait()
|
||||
|
@ -326,7 +326,7 @@ func TestDaemonStop_killAdopted(t *testing.T) {
|
|||
|
||||
path := filepath.Join(td, "file")
|
||||
|
||||
// In this test we want to ensure that gracefull/ungraceful stop works with
|
||||
// In this test we want to ensure that graceful/ungraceful stop works with
|
||||
// processes that were adopted by current process but not started by it. (i.e.
|
||||
// we have to poll them not use Wait).
|
||||
//
|
||||
|
@ -361,7 +361,7 @@ func TestDaemonStop_killAdopted(t *testing.T) {
|
|||
gracefulWait: 200 * time.Millisecond,
|
||||
// Can't just set process as it will bypass intializing stopCh etc.
|
||||
}
|
||||
// Adopt the pid from a fake state snapshot (this correctly initialises Daemon
|
||||
// Adopt the pid from a fake state snapshot (this correctly initializes Daemon
|
||||
// for adoption)
|
||||
fakeSnap := map[string]interface{}{
|
||||
"Pid": childCmd.Process.Pid,
|
||||
|
@ -375,7 +375,7 @@ func TestDaemonStop_killAdopted(t *testing.T) {
|
|||
require.NoError(d.Start())
|
||||
|
||||
// Wait for the file to exist (child was already running so this doesn't
|
||||
// gaurantee that Daemon is in "polling" state)
|
||||
// guarantee that Daemon is in "polling" state)
|
||||
retry.Run(t, func(r *retry.R) {
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
|
|
|
@ -269,7 +269,7 @@ func (m *Manager) Run() {
|
|||
|
||||
// Start the timer for snapshots. We don't use a ticker because disk
|
||||
// IO can be slow and we don't want overlapping notifications. So we only
|
||||
// reset the timer once the snapshot is complete rather than continously.
|
||||
// reset the timer once the snapshot is complete rather than continuously.
|
||||
snapshotTimer := time.NewTimer(m.SnapshotPeriod)
|
||||
defer snapshotTimer.Stop()
|
||||
|
||||
|
|
|
@ -74,8 +74,8 @@ type Proxy interface {
|
|||
// for any proxies: proxy ID.
|
||||
//
|
||||
// UnmarshalSnapshot is called to restore the receiving Proxy from its
|
||||
// marshalled state. If UnmarshalSnapshot returns an error, the snapshot
|
||||
// is ignored and the marshalled snapshot will be lost. The manager will
|
||||
// marshaled state. If UnmarshalSnapshot returns an error, the snapshot
|
||||
// is ignored and the marshaled snapshot will be lost. The manager will
|
||||
// log.
|
||||
//
|
||||
// This should save/restore enough state to be able to regain management
|
||||
|
|
|
@ -261,7 +261,7 @@ func TestHelperProcess(t *testing.T) {
|
|||
log.Println("Started child")
|
||||
|
||||
// Wait "forever" (calling test chooses when we exit with signal/Wait to
|
||||
// minimise coordination).
|
||||
// minimize coordination).
|
||||
for {
|
||||
time.Sleep(time.Hour)
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ func (a *Agent) sidecarServiceID(serviceID string) string {
|
|||
//
|
||||
// The third return argument is the effective Token to use for the sidecar
|
||||
// registration. This will be the same as the token parameter passed unless the
|
||||
// SidecarService definition contains a distint one.
|
||||
// SidecarService definition contains a distinct one.
|
||||
func (a *Agent) sidecarServiceFromNodeService(ns *structs.NodeService, token string) (*structs.NodeService, []*structs.CheckType, string, error) {
|
||||
if ns.Connect.SidecarService == nil {
|
||||
return nil, nil, "", nil
|
||||
|
|
|
@ -92,7 +92,7 @@ type ConnectManagedProxy struct {
|
|||
// Config is the arbitrary configuration data provided with the registration.
|
||||
Config map[string]interface{}
|
||||
|
||||
// Upstreams are the dependencies the proxy should setup outgoing listners for.
|
||||
// Upstreams are the dependencies the proxy should setup outgoing listeners for.
|
||||
Upstreams Upstreams
|
||||
|
||||
// ProxyService is a pointer to the local proxy's service record for
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
// ConnectProxyConfig describes the configuration needed for any proxy managed
|
||||
// or unmanaged. It describes a single logical service's listener and optionally
|
||||
// upstreams and sidecar-related config for a single instance. To describe a
|
||||
// centralised proxy that routed traffic for multiple services, a different one
|
||||
// centralized proxy that routed traffic for multiple services, a different one
|
||||
// of these would be needed for each, sharing the same LogicalProxyID.
|
||||
type ConnectProxyConfig struct {
|
||||
// DestinationServiceName is required and is the name of the service to accept
|
||||
|
@ -119,7 +119,7 @@ type Upstream struct {
|
|||
LocalBindPort int
|
||||
|
||||
// Config is an opaque config that is specific to the proxy process being run.
|
||||
// It can be used to pass abritrary configuration for this specific upstream
|
||||
// It can be used to pass arbitrary configuration for this specific upstream
|
||||
// to the proxy.
|
||||
Config map[string]interface{}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ type ServiceDefinition struct {
|
|||
Token string
|
||||
EnableTagOverride bool
|
||||
// DEPRECATED (ProxyDestination) - remove this when removing ProxyDestination
|
||||
// ProxyDestination is deprecated in favour of Proxy.DestinationServiceName
|
||||
// ProxyDestination is deprecated in favor of Proxy.DestinationServiceName
|
||||
ProxyDestination string `json:",omitempty"`
|
||||
|
||||
// Proxy is the configuration set for Kind = connect-proxy. It is mandatory in
|
||||
|
|
|
@ -145,7 +145,7 @@ type QueryOptions struct {
|
|||
// If there is a cached response that is older than the MaxAge, it is treated
|
||||
// as a cache miss and a new fetch invoked. If the fetch fails, the error is
|
||||
// returned. Clients that wish to allow for stale results on error can set
|
||||
// StaleIfError to a longer duration to change this behaviour. It is ignored
|
||||
// StaleIfError to a longer duration to change this behavior. It is ignored
|
||||
// if the endpoint supports background refresh caching. See
|
||||
// https://www.consul.io/api/index.html#agent-caching for more details.
|
||||
MaxAge time.Duration
|
||||
|
|
|
@ -46,7 +46,7 @@ func makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot) (*envoy.Cluster, error) {
|
|||
var c *envoy.Cluster
|
||||
var err error
|
||||
|
||||
// If we have overriden local cluster config try to parse it into an Envoy cluster
|
||||
// If we have overridden local cluster config try to parse it into an Envoy cluster
|
||||
if clusterJSONRaw, ok := cfgSnap.Proxy.Config["envoy_local_cluster_json"]; ok {
|
||||
if clusterJSON, ok := clusterJSONRaw.(string); ok {
|
||||
c, err = makeClusterFromUserConfig(clusterJSON)
|
||||
|
@ -90,7 +90,7 @@ func makeUpstreamCluster(upstream structs.Upstream, cfgSnap *proxycfg.ConfigSnap
|
|||
var c *envoy.Cluster
|
||||
var err error
|
||||
|
||||
// If we have overriden cluster config attempt to parse it into an Envoy cluster
|
||||
// If we have overridden cluster config attempt to parse it into an Envoy cluster
|
||||
if clusterJSONRaw, ok := upstream.Config["envoy_cluster_json"]; ok {
|
||||
if clusterJSON, ok := clusterJSONRaw.(string); ok {
|
||||
c, err = makeClusterFromUserConfig(clusterJSON)
|
||||
|
|
|
@ -112,7 +112,7 @@ func makeListenerFromUserConfig(configJSON string) (*envoy.Listener, error) {
|
|||
}
|
||||
|
||||
// Ensure that the first filter in each filter chain of a public listener is the
|
||||
// authz filter to prevent unauthorised access and that every filter chain uses
|
||||
// authz filter to prevent unauthorized access and that every filter chain uses
|
||||
// our TLS certs. We might allow users to work around this later if there is a
|
||||
// good use case but this is actually a feature for now as it allows them to
|
||||
// specify custom listener params in config but still get our certs delivered
|
||||
|
|
|
@ -234,7 +234,7 @@ func (s *Server) process(stream ADSStream, reqCh <-chan *envoy.DiscoveryRequest)
|
|||
case req, ok = <-reqCh:
|
||||
if !ok {
|
||||
// reqCh is closed when stream.Recv errors which is how we detect client
|
||||
// going away. AFAICT the stream.Context() is only cancelled once the
|
||||
// going away. AFAICT the stream.Context() is only canceled once the
|
||||
// RPC method returns which it can't until we return from this one so
|
||||
// there's no point in blocking on that.
|
||||
return nil
|
||||
|
|
|
@ -93,7 +93,7 @@ type QueryOptions struct {
|
|||
// If there is a cached response that is older than the MaxAge, it is treated
|
||||
// as a cache miss and a new fetch invoked. If the fetch fails, the error is
|
||||
// returned. Clients that wish to allow for stale results on error can set
|
||||
// StaleIfError to a longer duration to change this behaviour. It is ignored
|
||||
// StaleIfError to a longer duration to change this behavior. It is ignored
|
||||
// if the endpoint supports background refresh caching. See
|
||||
// https://www.consul.io/api/index.html#agent-caching for more details.
|
||||
MaxAge time.Duration
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
// even with a very large number of intentions, the size of the data gzipped
|
||||
// over HTTP will be relatively small.
|
||||
//
|
||||
// The Finder will only downlaod the intentions one time. This struct is
|
||||
// The Finder will only download the intentions one time. This struct is
|
||||
// not expected to be used over a long period of time. Though it may be
|
||||
// reused multile times, the intentions list is only downloaded once.
|
||||
type Finder struct {
|
||||
|
|
|
@ -66,7 +66,7 @@ func (c *Conn) CopyBytes() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Stats returns number of bytes transmitted and recieved. Transmit means bytes
|
||||
// Stats returns number of bytes transmitted and received. Transmit means bytes
|
||||
// written to dst, receive means bytes written to src.
|
||||
func (c *Conn) Stats() (txBytes, rxBytes uint64) {
|
||||
return c.srcW.Written(), c.dstW.Written()
|
||||
|
|
|
@ -188,7 +188,7 @@ func ConsulResolverFromAddrFunc(client *api.Client) func(addr string) (Resolver,
|
|||
// For now we force use of `.consul` TLD regardless of the configured domain
|
||||
// on the cluster. That's because we don't know that domain here and it
|
||||
// would be really complicated to discover it inline here. We do however
|
||||
// need to be able to distingush a hostname with the optional datacenter
|
||||
// need to be able to distinguish a hostname with the optional datacenter
|
||||
// segment which we can't do unambiguously if we allow arbitrary trailing
|
||||
// domains.
|
||||
domain := ".consul"
|
||||
|
|
|
@ -156,7 +156,7 @@ func (s *Service) ServerTLSConfig() *tls.Config {
|
|||
}
|
||||
|
||||
// Dial connects to a remote Connect-enabled server. The passed Resolver is used
|
||||
// to discover a single candidate instance which will be dialled and have it's
|
||||
// to discover a single candidate instance which will be dialed and have it's
|
||||
// TLS certificate verified against the expected identity. Failures are returned
|
||||
// directly with no retries. Repeated dials may use different instances
|
||||
// depending on the Resolver implementation.
|
||||
|
|
|
@ -76,7 +76,7 @@ func TestPeerCertificates(t testing.T, service string, ca *structs.CARoot) []*x5
|
|||
}
|
||||
|
||||
// TestServer runs a service listener that can be used to test clients. It's
|
||||
// behaviour can be controlled by the struct members.
|
||||
// behavior can be controlled by the struct members.
|
||||
type TestServer struct {
|
||||
// The service name to serve.
|
||||
Service string
|
||||
|
|
|
@ -175,7 +175,7 @@ func extractCertURI(certs []*x509.Certificate) (*url.URL, error) {
|
|||
return cert.URIs[0], nil
|
||||
}
|
||||
|
||||
// verifyServerCertMatchesURI is used on tls connections dialled to a connect
|
||||
// verifyServerCertMatchesURI is used on tls connections dialed to a connect
|
||||
// server to ensure that the certificate it presented has the correct identity.
|
||||
func verifyServerCertMatchesURI(certs []*x509.Certificate,
|
||||
expected connect.CertURI) error {
|
||||
|
@ -253,7 +253,7 @@ func newServerSideVerifier(client *api.Client, serviceName string) verifierFunc
|
|||
// clientSideVerifier is a verifierFunc that performs verification of certificates
|
||||
// on the client end of the connection. For now it is just basic TLS
|
||||
// verification since the identity check needs additional state and becomes
|
||||
// clunky to customise the callback for every outgoing request. That is done
|
||||
// clunky to customize the callback for every outgoing request. That is done
|
||||
// within Service.Dial for now.
|
||||
func clientSideVerifier(tlsCfg *tls.Config, rawCerts [][]byte) error {
|
||||
_, err := verifyChain(tlsCfg, rawCerts, true)
|
||||
|
@ -303,10 +303,10 @@ func verifyChain(tlsCfg *tls.Config, rawCerts [][]byte, client bool) (*x509.Cert
|
|||
|
||||
// dynamicTLSConfig represents the state for returning a tls.Config that can
|
||||
// have root and leaf certificates updated dynamically with all existing clients
|
||||
// and servers automatically picking up the changes. It requires initialising
|
||||
// and servers automatically picking up the changes. It requires initializing
|
||||
// with a valid base config from which all the non-certificate and verification
|
||||
// params are used. The base config passed should not be modified externally as
|
||||
// it is assumed to be serialised by the embedded mutex.
|
||||
// it is assumed to be serialized by the embedded mutex.
|
||||
type dynamicTLSConfig struct {
|
||||
base *tls.Config
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
// StopChannelContext implements the context.Context interface
|
||||
// You provide the channel to select on to determine whether
|
||||
// the context should be cancelled and other code such
|
||||
// the context should be canceled and other code such
|
||||
// as the rate.Limiter will automatically use the channel
|
||||
// appropriately
|
||||
type StopChannelContext struct {
|
||||
|
|
|
@ -41,13 +41,13 @@ type LogFile struct {
|
|||
}
|
||||
|
||||
func (l *LogFile) openNew() error {
|
||||
// Extract the file extention
|
||||
// Extract the file extension
|
||||
fileExt := filepath.Ext(l.fileName)
|
||||
// If we have no file extension we append .log
|
||||
if fileExt == "" {
|
||||
fileExt = ".log"
|
||||
}
|
||||
// Remove the file extention from the filename
|
||||
// Remove the file extension from the filename
|
||||
fileName := strings.TrimSuffix(l.fileName, fileExt)
|
||||
// New file name has the format : filename-timestamp.extension
|
||||
createTime := now()
|
||||
|
|
|
@ -302,7 +302,7 @@ func (c *Configurator) IncomingHTTPSConfig() (*tls.Config, error) {
|
|||
}
|
||||
|
||||
// IncomingTLSConfig generates a *tls.Config for outgoing TLS connections for
|
||||
// checks. This function is seperated because there is an extra flag to
|
||||
// checks. This function is separated because there is an extra flag to
|
||||
// consider for checks. EnableAgentTLSForChecks and InsecureSkipVerify has to
|
||||
// be checked for checks.
|
||||
func (c *Configurator) OutgoingTLSConfigForCheck(id string) (*tls.Config, error) {
|
||||
|
|
|
@ -63,7 +63,7 @@ type BlockingParamVal interface {
|
|||
// Next is called when deciding which value to use on the next blocking call.
|
||||
// It assumes the BlockingParamVal value it is called on is the most recent one
|
||||
// returned and passes the previous one which may be nil as context. This
|
||||
// allows types to customise logic around ordering without assuming there is
|
||||
// allows types to customize logic around ordering without assuming there is
|
||||
// an order. For example WaitIndexVal can check that the index didn't go
|
||||
// backwards and if it did then reset to 0. Most other cases should just
|
||||
// return themselves (the most recent value) to be used in the next request.
|
||||
|
|
Loading…
Reference in New Issue