Regenerate files according to 1.19.2 formatter

This commit is contained in:
Chris S. Kim 2022-10-21 15:58:06 -04:00 committed by Chris S. Kim
parent 0aaf49fed7
commit bde57c0dd0
57 changed files with 342 additions and 735 deletions

View File

@ -63,7 +63,6 @@ func IsErrPermissionDenied(err error) bool {
// Arguably this should be some sort of union type.
// The usage of Cause and the rest of the fields is entirely disjoint.
//
type PermissionDeniedError struct {
Cause string

View File

@ -225,7 +225,7 @@ func (ac *AutoConfig) introToken() (string, error) {
// recordInitialConfiguration is responsible for recording the AutoConfigResponse from
// the AutoConfig.InitialConfiguration RPC. It is an all-in-one function to do the following
// * update the Agent token in the token store
// - update the Agent token in the token store
func (ac *AutoConfig) recordInitialConfiguration(resp *pbautoconf.AutoConfigResponse) error {
ac.autoConfigResponse = resp

View File

@ -262,10 +262,10 @@ func (c *ConnectCALeaf) rootWatcher(ctx context.Context) {
//
// Somewhat arbitrarily the current strategy looks like this:
//
// 0 60% 90%
// Issued [------------------------------|===============|!!!!!] Expires
// 72h TTL: 0 ~43h ~65h
// 1h TTL: 0 36m 54m
// 0 60% 90%
// Issued [------------------------------|===============|!!!!!] Expires
// 72h TTL: 0 ~43h ~65h
// 1h TTL: 0 36m 54m
//
// Where |===| is the soft renewal period where we jitter for the first attempt
// and |!!!| is the danger zone where we just try immediately.

View File

@ -3,6 +3,7 @@ package cachetype
// RPC is an interface that an RPC client must implement. This is a helper
// interface that is implemented by the agent delegate so that Type
// implementations can request RPC access.
//
//go:generate mockery --name RPC --inpackage
type RPC interface {
RPC(method string, args interface{}, reply interface{}) error

View File

@ -8,6 +8,7 @@ import (
//
// This interface is typically implemented by request structures in
// the agent/structs package.
//
//go:generate mockery --name Request --inpackage
type Request interface {
// CacheInfo returns information used for caching this request.

1
agent/cache/type.go vendored
View File

@ -5,6 +5,7 @@ import (
)
// Type implements the logic to fetch certain types of data.
//
//go:generate mockery --name Type --inpackage
type Type interface {
// Fetch fetches a single unique item.

View File

@ -47,7 +47,7 @@ type FileWatcherEvent struct {
Filenames []string
}
//NewFileWatcher create a file watcher that will watch all the files/folders from configFiles
// NewFileWatcher create a file watcher that will watch all the files/folders from configFiles
// if success a fileWatcher will be returned and a nil error
// otherwise an error and a nil fileWatcher are returned
func NewFileWatcher(configFiles []string, logger hclog.Logger) (Watcher, error) {

View File

@ -6,9 +6,12 @@ import (
)
// KindName is a value type useful for maps. You can use:
// map[KindName]Payload
//
// map[KindName]Payload
//
// instead of:
// map[string]map[string]Payload
//
// map[string]map[string]Payload
type KindName struct {
Kind string
Name string

View File

@ -45,9 +45,10 @@ func CompactUID() (string, error) {
// specific purpose.
//
// Format is:
// {provider}-{uniqueID_first8}.{pri|sec}.ca.<trust_domain_first_8>.consul
//
// trust domain is truncated to keep the whole name short
// {provider}-{uniqueID_first8}.{pri|sec}.ca.<trust_domain_first_8>.consul
//
// trust domain is truncated to keep the whole name short
func CACN(provider, uniqueID, trustDomain string, primaryDC bool) string {
providerSan := invalidDNSNameChars.ReplaceAllString(strings.ToLower(provider), "")
typ := "pri"

View File

@ -225,19 +225,19 @@ type ACLResolverSettings struct {
// - Resolving roles remotely via an ACL.RoleResolve RPC
//
// Remote Resolution:
// Remote resolution can be done synchronously or asynchronously depending
// on the ACLDownPolicy in the Config passed to the resolver.
//
// When the down policy is set to async-cache and we have already cached values
// then go routines will be spawned to perform the RPCs in the background
// and then will update the cache with either the positive or negative result.
// Remote resolution can be done synchronously or asynchronously depending
// on the ACLDownPolicy in the Config passed to the resolver.
//
// When the down policy is set to extend-cache or the token/policy/role is not already
// cached then the same go routines are spawned to do the RPCs in the background.
// However in this mode channels are created to receive the results of the RPC
// and are registered with the resolver. Those channels are immediately read/blocked
// upon.
// When the down policy is set to async-cache and we have already cached values
// then go routines will be spawned to perform the RPCs in the background
// and then will update the cache with either the positive or negative result.
//
// When the down policy is set to extend-cache or the token/policy/role is not already
// cached then the same go routines are spawned to do the RPCs in the background.
// However in this mode channels are created to receive the results of the RPC
// and are registered with the resolver. Those channels are immediately read/blocked
// upon.
type ACLResolver struct {
config ACLResolverSettings
logger hclog.Logger

View File

@ -13,8 +13,8 @@ import (
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/go-bexpr"
"github.com/hashicorp/go-hclog"
memdb "github.com/hashicorp/go-memdb"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/acl/resolver"
@ -108,19 +108,18 @@ type ACL struct {
// fileBootstrapResetIndex retrieves the reset index specified by the administrator from
// the file on disk.
//
// Q: What is the bootstrap reset index?
// A: If you happen to lose acess to all tokens capable of ACL management you need a way
// to get back into your system. This allows an admin to write the current
// bootstrap "index" into a special file on disk to override the mechanism preventing
// a second token bootstrap. The index will be retrieved by a API call to /v1/acl/bootstrap
// When already bootstrapped this API will return the reset index necessary within
// the error response. Once set in the file, the bootstrap API can be used again to
// get a new token.
//
// Q: Why is the reset index not in the config?
// A: We want to be able to remove the reset index once we have used it. This prevents
// accidentally allowing bootstrapping yet again after a snapshot restore.
// Q: What is the bootstrap reset index?
// A: If you happen to lose acess to all tokens capable of ACL management you need a way
// to get back into your system. This allows an admin to write the current
// bootstrap "index" into a special file on disk to override the mechanism preventing
// a second token bootstrap. The index will be retrieved by a API call to /v1/acl/bootstrap
// When already bootstrapped this API will return the reset index necessary within
// the error response. Once set in the file, the bootstrap API can be used again to
// get a new token.
//
// Q: Why is the reset index not in the config?
// A: We want to be able to remove the reset index once we have used it. This prevents
// accidentally allowing bootstrapping yet again after a snapshot restore.
func (a *ACL) fileBootstrapResetIndex() uint64 {
// Determine the file path to check
path := filepath.Join(a.srv.config.DataDir, aclBootstrapReset)

View File

@ -27,7 +27,6 @@ import (
//
// - POST /apis/authentication.k8s.io/v1/tokenreviews
// - GET /api/v1/namespaces/<NAMESPACE>/serviceaccounts/<NAME>
//
type TestAPIServer struct {
srv *httptest.Server
caCert string

View File

@ -127,7 +127,7 @@ type clientOrServer interface {
// joinLAN is a convenience function for
//
// member.JoinLAN("127.0.0.1:"+leader.config.SerfLANConfig.MemberlistConfig.BindPort)
// member.JoinLAN("127.0.0.1:"+leader.config.SerfLANConfig.MemberlistConfig.BindPort)
func joinLAN(t *testing.T, member clientOrServer, leader *Server) {
t.Helper()
joinLANWithOptions(t, member, leader, true)
@ -184,7 +184,7 @@ func joinLANWithOptions(t *testing.T, member clientOrServer, leader *Server, doM
// joinWAN is a convenience function for
//
// member.JoinWAN("127.0.0.1:"+leader.config.SerfWANConfig.MemberlistConfig.BindPort)
// member.JoinWAN("127.0.0.1:"+leader.config.SerfWANConfig.MemberlistConfig.BindPort)
func joinWAN(t *testing.T, member, leader *Server) {
t.Helper()
joinWANWithOptions(t, member, leader, true)

View File

@ -14,9 +14,9 @@ import (
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
connlimit "github.com/hashicorp/go-connlimit"
"github.com/hashicorp/go-connlimit"
"github.com/hashicorp/go-hclog"
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-raftchunking"
"github.com/hashicorp/memberlist"
"github.com/hashicorp/raft"
@ -992,19 +992,19 @@ type blockingQueryResponseMeta interface {
//
// The query function must follow these rules:
//
// 1. to access data it must use the passed in state.Store.
// 2. it must set the responseMeta.Index to an index greater than
// opts.GetMinQueryIndex if the results return by the query have changed.
// 3. any channels added to the memdb.WatchSet must unblock when the results
// returned by the query have changed.
// 1. to access data it must use the passed in state.Store.
// 2. it must set the responseMeta.Index to an index greater than
// opts.GetMinQueryIndex if the results return by the query have changed.
// 3. any channels added to the memdb.WatchSet must unblock when the results
// returned by the query have changed.
//
// To ensure optimal performance of the query, the query function should make a
// best-effort attempt to follow these guidelines:
//
// 1. only set responseMeta.Index to an index greater than
// opts.GetMinQueryIndex when the results returned by the query have changed.
// 2. any channels added to the memdb.WatchSet should only unblock when the
// results returned by the query have changed.
// 1. only set responseMeta.Index to an index greater than
// opts.GetMinQueryIndex when the results returned by the query have changed.
// 2. any channels added to the memdb.WatchSet should only unblock when the
// results returned by the query have changed.
func (s *Server) blockingQuery(
opts blockingQueryOptions,
responseMeta blockingQueryResponseMeta,
@ -1142,7 +1142,7 @@ func (s *Server) consistentRead() error {
defer metrics.MeasureSince([]string{"rpc", "consistentRead"}, time.Now())
future := s.raft.VerifyLeader()
if err := future.Error(); err != nil {
return err //fail fast if leader verification fails
return err // fail fast if leader verification fails
}
// poll consistent read readiness, wait for up to RPCHoldTimeout milliseconds
if s.isReadyForConsistentReads() {
@ -1197,16 +1197,16 @@ func (s *Server) rpcQueryTimeout(queryTimeout time.Duration) time.Duration {
//
// Notes:
//
// * The definition of "unauthenticated" here is incomplete, as it doesn't
// account for the fact that operators can modify the anonymous token with
// custom policies, or set namespace default policies. As these scenarios
// are less common and this flag is a best-effort UX improvement, we think
// the trade-off for reduced complexity is acceptable.
// - The definition of "unauthenticated" here is incomplete, as it doesn't
// account for the fact that operators can modify the anonymous token with
// custom policies, or set namespace default policies. As these scenarios
// are less common and this flag is a best-effort UX improvement, we think
// the trade-off for reduced complexity is acceptable.
//
// * This method assumes that the given token has already been validated (and
// will only check whether it is blank or not). It's a safe assumption because
// ResultsFilteredByACLs is only set to try when applying the already-resolved
// token's policies.
// - This method assumes that the given token has already been validated (and
// will only check whether it is blank or not). It's a safe assumption because
// ResultsFilteredByACLs is only set to try when applying the already-resolved
// token's policies.
func maskResultsFilteredByACLs(token string, meta blockingQueryResponseMeta) {
if token == "" {
meta.SetResultsFilteredByACLs(false)

View File

@ -73,10 +73,9 @@ func verifyCheckServiceNodeSort(t *testing.T, nodes structs.CheckServiceNodes, e
//
// Here's the layout of the nodes:
//
// node3 node2 node5 node4 node1
// | | | | | | | | | | |
// 0 1 2 3 4 5 6 7 8 9 10 (ms)
//
// node3 node2 node5 node4 node1
// | | | | | | | | | | |
// 0 1 2 3 4 5 6 7 8 9 10 (ms)
func seedCoordinates(t *testing.T, codec rpc.ClientCodec, server *Server) {
// Register some nodes.
for i := 0; i < 5; i++ {

View File

@ -1267,8 +1267,8 @@ func (s *Store) ServicesByNodeMeta(ws memdb.WatchSet, filters map[string]string,
// The service_last_extinction is set to the last raft index when a service
// was unregistered (or 0 if no services were ever unregistered). This
// allows blocking queries to
// * return when the last instance of a service is removed
// * block until an instance for this service is available, or another
// - return when the last instance of a service is removed
// - block until an instance for this service is available, or another
// service is unregistered.
func maxIndexForService(tx ReadTxn, serviceName string, serviceExists, checks bool, entMeta *acl.EnterpriseMeta, peerName string) uint64 {
idx, _ := maxIndexAndWatchChForService(tx, serviceName, serviceExists, checks, entMeta, peerName)
@ -1280,8 +1280,8 @@ func maxIndexForService(tx ReadTxn, serviceName string, serviceExists, checks bo
// index. The service_last_extinction is set to the last raft index when a
// service was unregistered (or 0 if no services were ever unregistered). This
// allows blocking queries to
// * return when the last instance of a service is removed
// * block until an instance for this service is available, or another
// - return when the last instance of a service is removed
// - block until an instance for this service is available, or another
// service is unregistered.
//
// It also _may_ return a watch chan to add to a WatchSet. It will only return

View File

@ -54,8 +54,8 @@ func catalogUpdateNodeIndexes(tx WriteTxn, idx uint64, nodeName string, _ *acl.E
// catalogUpdateServicesIndexes upserts the max index for the entire services table with varying levels
// of granularity (no-op if `idx` is lower than what exists for that index key):
// - all services
// - all services in a specified peer (including internal)
// - all services
// - all services in a specified peer (including internal)
func catalogUpdateServicesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta, peerName string) error {
// overall services index for snapshot
if err := indexUpdateMaxTxn(tx, idx, tableServices); err != nil {
@ -72,8 +72,8 @@ func catalogUpdateServicesIndexes(tx WriteTxn, idx uint64, _ *acl.EnterpriseMeta
// catalogUpdateServiceKindIndexes upserts the max index for the ServiceKind with varying levels
// of granularity (no-op if `idx` is lower than what exists for that index key):
// - all services of ServiceKind
// - all services of ServiceKind in a specified peer (including internal)
// - all services of ServiceKind
// - all services of ServiceKind in a specified peer (including internal)
func catalogUpdateServiceKindIndexes(tx WriteTxn, idx uint64, kind structs.ServiceKind, _ *acl.EnterpriseMeta, peerName string) error {
base := "service_kind." + kind.Normalized()
// service-kind index

View File

@ -418,7 +418,7 @@ func indexServiceNameFromHealthCheck(hc *structs.HealthCheck) ([]byte, error) {
return b.Bytes(), nil
}
// gatewayServicesTableSchema returns a new table schema used to store information
// gatewayServicesTableSchema returns a new table schema used to store information
// about services associated with terminating gateways.
func gatewayServicesTableSchema() *memdb.TableSchema {
return &memdb.TableSchema{

View File

@ -7,7 +7,7 @@ import (
"fmt"
"strings"
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/consul/acl"
"github.com/hashicorp/consul/agent/configentry"
@ -65,6 +65,7 @@ func configIntentionsConvertToList(iter memdb.ResultIterator, _ *acl.EnterpriseM
// found in a list of exported-services config entries. For OSS, namespace is not considered, so a match is one of:
// - the service name matches
// - the service name is a wildcard
//
// This value can be used to filter exported-services config entries for a given service name.
func getExportedServicesMatchServiceNames(serviceName string, entMeta *acl.EnterpriseMeta) []structs.ServiceName {
return []structs.ServiceName{

View File

@ -16,7 +16,6 @@ import (
// data is deleted from the KV store, the "latest" row can go backwards if the
// newest row is removed. The tombstones provide a way to ensure time doesn't
// move backwards within some interval.
//
type TombstoneGC struct {
// ttl sets the TTL for tombstones.
ttl time.Duration

View File

@ -54,33 +54,33 @@ func noopDone() {}
// ServerLocalBlockingQuery performs a blocking query similar to the pre-existing blockingQuery
// method on the agent/consul.Server type. There are a few key differences.
//
// 1. This function makes use of Go 1.18 generics. The function is parameterized with two
// types. The first is the ResultType which can be anything. Having this be parameterized
// instead of using interface{} allows us to simplify the call sites so that no type
// coercion from interface{} to the real type is necessary. The second parameterized type
// is something that VERY loosely resembles a agent/consul/state.Store type. The StateStore
// interface in this package has a single method to get the stores abandon channel so we
// know when a snapshot restore is occurring and can act accordingly. We could have not
// parameterized this type and used a real *state.Store instead but then we would have
// concrete dependencies on the state package and it would make it a little harder to
// test this function.
// 1. This function makes use of Go 1.18 generics. The function is parameterized with two
// types. The first is the ResultType which can be anything. Having this be parameterized
// instead of using interface{} allows us to simplify the call sites so that no type
// coercion from interface{} to the real type is necessary. The second parameterized type
// is something that VERY loosely resembles a agent/consul/state.Store type. The StateStore
// interface in this package has a single method to get the stores abandon channel so we
// know when a snapshot restore is occurring and can act accordingly. We could have not
// parameterized this type and used a real *state.Store instead but then we would have
// concrete dependencies on the state package and it would make it a little harder to
// test this function.
//
// We could have also avoided the need to use a ResultType parameter by taking the route
// the original blockingQuery method did and to just assume all callers close around
// a pointer to their results and can modify it as necessary. That way of doing things
// feels a little gross so I have taken this one a different direction. The old way
// also gets especially gross with how we have to push concerns of spurious wakeup
// suppression down into every call site.
// We could have also avoided the need to use a ResultType parameter by taking the route
// the original blockingQuery method did and to just assume all callers close around
// a pointer to their results and can modify it as necessary. That way of doing things
// feels a little gross so I have taken this one a different direction. The old way
// also gets especially gross with how we have to push concerns of spurious wakeup
// suppression down into every call site.
//
// 2. This method has no internal timeout and can potentially run forever until a state
// change is observed. If there is a desire to have a timeout, that should be built into
// the context.Context passed as the first argument.
// 2. This method has no internal timeout and can potentially run forever until a state
// change is observed. If there is a desire to have a timeout, that should be built into
// the context.Context passed as the first argument.
//
// 3. This method bakes in some newer functionality around hashing of results to prevent sending
// back data when nothing has actually changed. With the old blockingQuery method this has to
// be done within the closure passed to the method which means the same bit of code is duplicated
// in many places. As this functionality isn't necessary in many scenarios whether to opt-in to
// that behavior is a argument to this function.
// 3. This method bakes in some newer functionality around hashing of results to prevent sending
// back data when nothing has actually changed. With the old blockingQuery method this has to
// be done within the closure passed to the method which means the same bit of code is duplicated
// in many places. As this functionality isn't necessary in many scenarios whether to opt-in to
// that behavior is a argument to this function.
//
// Similar to the older method:
//
@ -88,21 +88,20 @@ func noopDone() {}
//
// The query function must follow these rules:
//
// 1. To access data it must use the passed in StoreType (which will be a state.Store when
// everything gets stiched together outside of unit tests).
// 2. It must return an index greater than the minIndex if the results returned by the query
// have changed.
// 3. Any channels added to the memdb.WatchSet must unblock when the results
// returned by the query have changed.
// 1. To access data it must use the passed in StoreType (which will be a state.Store when
// everything gets stiched together outside of unit tests).
// 2. It must return an index greater than the minIndex if the results returned by the query
// have changed.
// 3. Any channels added to the memdb.WatchSet must unblock when the results
// returned by the query have changed.
//
// To ensure optimal performance of the query, the query function should make a
// best-effort attempt to follow these guidelines:
//
// 1. Only return an index greater than the minIndex.
// 2. Any channels added to the memdb.WatchSet should only unblock when the
// results returned by the query have changed. This might be difficult
// to do when blocking on non-existent data.
//
// 1. Only return an index greater than the minIndex.
// 2. Any channels added to the memdb.WatchSet should only unblock when the
// results returned by the query have changed. This might be difficult
// to do when blocking on non-existent data.
func ServerLocalBlockingQuery[ResultType any, StoreType StateStore](
ctx context.Context,
getStore func() StoreType,

View File

@ -7,12 +7,13 @@ import (
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/prometheus"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib/retry"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-memdb"
"golang.org/x/time/rate"
"github.com/hashicorp/consul/agent/consul/state"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib/retry"
)
var StatsGauges = []prometheus.GaugeDefinition{
@ -132,7 +133,6 @@ func (c *Controller) updateDrainRateLimit(numProxies uint32) {
// 0-512 proxies: drain 1 per second
// 513-2815 proxies: linearly scaled by 1/s for every additional 256 proxies
// 2816+ proxies: drain 10 per second
//
func calcRateLimit(numProxies uint32) rate.Limit {
perSecond := math.Floor((float64(numProxies) - 256) / 256)

View File

@ -28,9 +28,9 @@ var ErrCapacityReached = errors.New("active session limit reached")
//
// It is the session-holder's responsibility to:
//
// 1. Call End on the session when finished.
// 2. Receive on the session's Terminated channel and exit (e.g. close the gRPC
// stream) when it is closed.
// 1. Call End on the session when finished.
// 2. Receive on the session's Terminated channel and exit (e.g. close the gRPC
// stream) when it is closed.
//
// The maximum number of concurrent sessions is controlled with SetMaxSessions.
// If there are more than the given maximum sessions already in-flight,
@ -114,9 +114,9 @@ func (l *SessionLimiter) SetDrainRateLimit(limit rate.Limit) {
//
// It is the session-holder's responsibility to:
//
// 1. Call End on the session when finished.
// 2. Receive on the session's Terminated channel and exit (e.g. close the gRPC
// stream) when it is closed.
// 1. Call End on the session when finished.
// 2. Receive on the session's Terminated channel and exit (e.g. close the gRPC
// stream) when it is closed.
func (l *SessionLimiter) BeginSession() (Session, error) {
if !l.hasCapacity() {
return nil, ErrCapacityReached
@ -129,8 +129,8 @@ func (l *SessionLimiter) BeginSession() (Session, error) {
// Note: hasCapacity is *best effort*. As we do not hold l.mu it's possible that:
//
// - max has changed by the time we compare it to inFlight.
// - inFlight < max now, but increases before we create a new session.
// - max has changed by the time we compare it to inFlight.
// - inFlight < max now, but increases before we create a new session.
//
// This is acceptable for our uses, especially because excess sessions will
// eventually be drained.
@ -146,8 +146,8 @@ func (l *SessionLimiter) hasCapacity() bool {
// Note: overCapacity is *best effort*. As we do not hold l.mu it's possible that:
//
// - max has changed by the time we compare it to inFlight.
// - inFlight > max now, but decreases before we terminate a session.
// - max has changed by the time we compare it to inFlight.
// - inFlight > max now, but decreases before we terminate a session.
func (l *SessionLimiter) overCapacity() bool {
max := atomic.LoadUint32(&l.max)
if max == Unlimited {

View File

@ -18,6 +18,7 @@ import (
)
// Client interface exposes HCP operations that can be invoked by Consul
//
//go:generate mockery --name Client --with-expecter --inpackage
type Client interface {
FetchBootstrap(ctx context.Context) (*BootstrapConfig, error)

View File

@ -50,7 +50,7 @@ type MockClient_DiscoverServers_Call struct {
}
// DiscoverServers is a helper method to define mock.On call
// - ctx context.Context
// - ctx context.Context
func (_e *MockClient_Expecter) DiscoverServers(ctx interface{}) *MockClient_DiscoverServers_Call {
return &MockClient_DiscoverServers_Call{Call: _e.mock.On("DiscoverServers", ctx)}
}
@ -96,7 +96,7 @@ type MockClient_FetchBootstrap_Call struct {
}
// FetchBootstrap is a helper method to define mock.On call
// - ctx context.Context
// - ctx context.Context
func (_e *MockClient_Expecter) FetchBootstrap(ctx interface{}) *MockClient_FetchBootstrap_Call {
return &MockClient_FetchBootstrap_Call{Call: _e.mock.On("FetchBootstrap", ctx)}
}
@ -133,8 +133,8 @@ type MockClient_PushServerStatus_Call struct {
}
// PushServerStatus is a helper method to define mock.On call
// - ctx context.Context
// - status *ServerStatus
// - ctx context.Context
// - status *ServerStatus
func (_e *MockClient_Expecter) PushServerStatus(ctx interface{}, status interface{}) *MockClient_PushServerStatus_Call {
return &MockClient_PushServerStatus_Call{Call: _e.mock.On("PushServerStatus", ctx, status)}
}

View File

@ -133,7 +133,7 @@ type MockProvider_Listen_Call struct {
}
// Listen is a helper method to define mock.On call
// - capability string
// - capability string
func (_e *MockProvider_Expecter) Listen(capability interface{}) *MockProvider_Listen_Call {
return &MockProvider_Listen_Call{Call: _e.mock.On("Listen", capability)}
}
@ -269,7 +269,7 @@ type MockProvider_UpdateMeta_Call struct {
}
// UpdateMeta is a helper method to define mock.On call
// - _a0 map[string]string
// - _a0 map[string]string
func (_e *MockProvider_Expecter) UpdateMeta(_a0 interface{}) *MockProvider_UpdateMeta_Call {
return &MockProvider_UpdateMeta_Call{Call: _e.mock.On("UpdateMeta", _a0)}
}

View File

@ -320,8 +320,8 @@ func (s *HTTPHandlers) nodeName() string {
// this regular expression is applied, so the regular expression substitution
// results in:
//
// /v1/acl/clone/foo?token=bar -> /v1/acl/clone/<hidden>?token=bar
// ^---- $1 ----^^- $2 -^^-- $3 --^
// /v1/acl/clone/foo?token=bar -> /v1/acl/clone/<hidden>?token=bar
// ^---- $1 ----^^- $2 -^^-- $3 --^
//
// And then the loop that looks for parameters called "token" does the last
// step to get to the final redacted form.

View File

@ -33,7 +33,6 @@ package agent
import (
"bytes"
"fmt"
"strings"
"testing"
"time"
@ -791,21 +790,7 @@ var translateServiceIDTCs = []translateKeyTestCase{
},
}
// ACLPolicySetRequest:
// Policy structs.ACLPolicy
// ID string
// Name string
// Description string
// Rules string
// Syntax acl.SyntaxVersion
// Datacenters []string
// Hash []uint8
// RaftIndex structs.RaftIndex
// CreateIndex uint64
// ModifyIndex uint64
// Datacenter string
// WriteRequest structs.WriteRequest
// Token string
// structs.ACLPolicySetRequest
func TestDecodeACLPolicyWrite(t *testing.T) {
for _, tc := range hashTestCases {
@ -833,35 +818,7 @@ func TestDecodeACLPolicyWrite(t *testing.T) {
}
}
// ACLTokenSetRequest:
// ACLToken structs.ACLToken
// AccessorID string
// SecretID string
// Description string
// Policies []structs.ACLTokenPolicyLink
// ID string
// Name string
// Roles []structs.ACLTokenRoleLink
// ID string
// Name string
// ServiceIdentities []*structs.ACLServiceIdentity
// ServiceName string
// Datacenters []string
// Type string
// Rules string
// Local bool
// AuthMethod string
// ExpirationTime *time.Time
// ExpirationTTL time.Duration
// CreateTime time.Time
// Hash []uint8
// RaftIndex structs.RaftIndex
// CreateIndex uint64
// ModifyIndex uint64
// Create bool
// Datacenter string
// WriteRequest structs.WriteRequest
// Token string
// structs.ACLTokenSetRequest
func TestDecodeACLToken(t *testing.T) {
for _, tc := range translateValueTestCases {
t.Run(tc.desc, func(t *testing.T) {
@ -1079,76 +1036,7 @@ func TestDecodeAgentRegisterCheck(t *testing.T) {
}
// ServiceDefinition:
// Kind structs.ServiceKind
// ID string
// Name string
// Tags []string
// Address string
// TaggedAddresses map[string]structs.ServiceAddress
// Address string
// Port int
// Meta map[string]string
// Port int
// Check structs.CheckType
// CheckID types.CheckID
// Name string
// Status string
// Notes string
// ScriptArgs []string
// HTTP string
// Header map[string][]string
// Method string
// TCP string
// Interval time.Duration
// AliasNode string
// AliasService string
// DockerContainerID string
// Shell string
// GRPC string
// GRPCUseTLS bool
// TLSServerName string
// TLSSkipVerify bool
// Timeout time.Duration
// TTL time.Duration
// ProxyHTTP string
// ProxyGRPC string
// DeregisterCriticalServiceAfter time.Duration
// OutputMaxSize int
// Checks structs.CheckTypes
// Weights *structs.Weights
// Passing int
// Warning int
// Token string
// EnableTagOverride bool
// Proxy *structs.ConnectProxyConfig
// DestinationServiceName string
// DestinationServiceID string
// LocalServiceAddress string
// LocalServicePort int
// Config map[string]interface {}
// Upstreams structs.Upstreams
// DestinationType string
// DestinationNamespace string
// DestinationName string
// Datacenter string
// LocalBindAddress string
// LocalBindPort int
// Config map[string]interface {}
// MeshGateway structs.MeshGatewayConfig
// Mode structs.MeshGatewayMode
// MeshGateway structs.MeshGatewayConfig
// Expose structs.ExposeConfig
// Checks bool
// Paths []structs.ExposePath
// ListenerPort int
// Path string
// LocalPathPort int
// Protocol string
// ParsedFromCheck bool
// Connect *structs.ServiceConnect
// Native bool
// SidecarService *structs.ServiceDefinition
// structs.ServiceDefinition
func TestDecodeAgentRegisterService(t *testing.T) {
// key translation tests:
// decodeCB fields:
@ -1969,133 +1857,7 @@ func TestDecodeAgentRegisterService(t *testing.T) {
}
// RegisterRequest:
// Datacenter string
// ID types.NodeID
// Node string
// Address string
// TaggedAddresses map[string]string
// NodeMeta map[string]string
// Service *structs.NodeService
// Kind structs.ServiceKind
// ID string
// Service string
// Tags []string
// Address string
// TaggedAddresses map[string]structs.ServiceAddress
// Address string
// Port int
// Meta map[string]string
// Port int
// Weights *structs.Weights
// Passing int
// Warning int
// EnableTagOverride bool
// Proxy structs.ConnectProxyConfig
// DestinationServiceName string
// DestinationServiceID string
// LocalServiceAddress string
// LocalServicePort int
// Config map[string]interface {}
// Upstreams structs.Upstreams
// DestinationType string
// DestinationNamespace string
// DestinationName string
// Datacenter string
// LocalBindAddress string
// LocalBindPort int
// Config map[string]interface {}
// MeshGateway structs.MeshGatewayConfig
// Mode structs.MeshGatewayMode
// MeshGateway structs.MeshGatewayConfig
// Expose structs.ExposeConfig
// Checks bool
// Paths []structs.ExposePath
// ListenerPort int
// Path string
// LocalPathPort int
// Protocol string
// ParsedFromCheck bool
// Connect structs.ServiceConnect
// Native bool
// SidecarService *structs.ServiceDefinition
// Kind structs.ServiceKind
// ID string
// Name string
// Tags []string
// Address string
// TaggedAddresses map[string]structs.ServiceAddress
// Meta map[string]string
// Port int
// Check structs.CheckType
// CheckID types.CheckID
// Name string
// Status string
// Notes string
// ScriptArgs []string
// HTTP string
// Header map[string][]string
// Method string
// TCP string
// Interval time.Duration
// AliasNode string
// AliasService string
// DockerContainerID string
// Shell string
// GRPC string
// GRPCUseTLS bool
// TLSServerName string
// TLSSkipVerify bool
// Timeout time.Duration
// TTL time.Duration
// ProxyHTTP string
// ProxyGRPC string
// DeregisterCriticalServiceAfter time.Duration
// OutputMaxSize int
// Checks structs.CheckTypes
// Weights *structs.Weights
// Token string
// EnableTagOverride bool
// Proxy *structs.ConnectProxyConfig
// Connect *structs.ServiceConnect
// LocallyRegisteredAsSidecar bool
// RaftIndex structs.RaftIndex
// CreateIndex uint64
// ModifyIndex uint64
// Check *structs.HealthCheck
// Node string
// CheckID types.CheckID
// Name string
// Status string
// Notes string
// Output string
// ServiceID string
// ServiceName string
// ServiceTags []string
// Definition structs.HealthCheckDefinition
// HTTP string
// TLSServerName string
// TLSSkipVerify bool
// Header map[string][]string
// Method string
// TCP string
// Interval time.Duration
// OutputMaxSize uint
// Timeout time.Duration
// DeregisterCriticalServiceAfter time.Duration
// ScriptArgs []string
// DockerContainerID string
// Shell string
// GRPC string
// GRPCUseTLS bool
// AliasNode string
// AliasService string
// TTL time.Duration
// RaftIndex structs.RaftIndex
// Checks structs.HealthChecks
// SkipNodeUpdate bool
// WriteRequest structs.WriteRequest
// Token string
// structs.RegisterRequest
func TestDecodeCatalogRegister(t *testing.T) {
for _, tc := range durationTestCases {
t.Run(tc.desc, func(t *testing.T) {
@ -2164,28 +1926,7 @@ func TestDecodeCatalogRegister(t *testing.T) {
}
}
// IntentionRequest:
// Datacenter string
// Op structs.IntentionOp
// Intention *structs.Intention
// ID string
// Description string
// SourceNS string
// SourceName string
// DestinationNS string
// DestinationName string
// SourceType structs.IntentionSourceType
// Action structs.IntentionAction
// Meta map[string]string
// Precedence int
// CreatedAt time.Time mapstructure:'-'
// UpdatedAt time.Time mapstructure:'-'
// Hash []uint8
// RaftIndex structs.RaftIndex
// CreateIndex uint64
// ModifyIndex uint64
// WriteRequest structs.WriteRequest
// Token string
// structs.IntentionRequest
func TestDecodeIntentionCreate(t *testing.T) {
for _, tc := range append(hashTestCases, timestampTestCases...) {
t.Run(tc.desc, func(t *testing.T) {
@ -2300,22 +2041,7 @@ func TestDecodeOperatorAutopilotConfiguration(t *testing.T) {
}
}
// SessionRequest:
// Datacenter string
// Op structs.SessionOp
// Session structs.Session
// ID string
// Name string
// Node string
// Checks []types.CheckID
// LockDelay time.Duration
// Behavior structs.SessionBehavior
// TTL string
// RaftIndex structs.RaftIndex
// CreateIndex uint64
// ModifyIndex uint64
// WriteRequest structs.WriteRequest
// Token string
// structs.SessionRequest
func TestDecodeSessionCreate(t *testing.T) {
// outSession var is shared among test cases b/c of the
// nature/signature of the FixupChecks callback.
@ -2454,137 +2180,7 @@ func TestDecodeSessionCreate(t *testing.T) {
}
}
// TxnOps:
// KV *api.KVTxnOp
// Verb api.KVOp
// Key string
// Value []uint8
// Flags uint64
// Index uint64
// Session string
// Node *api.NodeTxnOp
// Verb api.NodeOp
// Node api.Node
// ID string
// Node string
// Address string
// Datacenter string
// TaggedAddresses map[string]string
// Meta map[string]string
// CreateIndex uint64
// ModifyIndex uint64
// Service *api.ServiceTxnOp
// Verb api.ServiceOp
// Node string
// Service api.AgentService
// Kind api.ServiceKind
// ID string
// Service string
// Tags []string
// Meta map[string]string
// Port int
// Address string
// TaggedAddresses map[string]api.ServiceAddress
// Address string
// Port int
// Weights api.AgentWeights
// Passing int
// Warning int
// EnableTagOverride bool
// CreateIndex uint64
// ModifyIndex uint64
// ContentHash string
// Proxy *api.AgentServiceConnectProxyConfig
// DestinationServiceName string
// DestinationServiceID string
// LocalServiceAddress string
// LocalServicePort int
// Config map[string]interface {}
// Upstreams []api.Upstream
// DestinationType api.UpstreamDestType
// DestinationNamespace string
// DestinationName string
// Datacenter string
// LocalBindAddress string
// LocalBindPort int
// Config map[string]interface {}
// MeshGateway api.MeshGatewayConfig
// Mode api.MeshGatewayMode
// MeshGateway api.MeshGatewayConfig
// Expose api.ExposeConfig
// Checks bool
// Paths []api.ExposePath
// ListenerPort int
// Path string
// LocalPathPort int
// Protocol string
// ParsedFromCheck bool
// Connect *api.AgentServiceConnect
// Native bool
// SidecarService *api.AgentServiceRegistration
// Kind api.ServiceKind
// ID string
// Name string
// Tags []string
// Port int
// Address string
// TaggedAddresses map[string]api.ServiceAddress
// EnableTagOverride bool
// Meta map[string]string
// Weights *api.AgentWeights
// Check *api.AgentServiceCheck
// CheckID string
// Name string
// Args []string
// DockerContainerID string
// Shell string
// Interval string
// Timeout string
// TTL string
// HTTP string
// Header map[string][]string
// Method string
// TCP string
// Status string
// Notes string
// TLSServerName string
// TLSSkipVerify bool
// GRPC string
// GRPCUseTLS bool
// AliasNode string
// AliasService string
// DeregisterCriticalServiceAfter string
// Checks api.AgentServiceChecks
// Proxy *api.AgentServiceConnectProxyConfig
// Connect *api.AgentServiceConnect
// Check *api.CheckTxnOp
// Verb api.CheckOp
// Check api.HealthCheck
// Node string
// CheckID string
// Name string
// Status string
// Notes string
// Output string
// ServiceID string
// ServiceName string
// ServiceTags []string
// Definition api.HealthCheckDefinition
// HTTP string
// Header map[string][]string
// Method string
// Body string
// TLSServerName string
// TLSSkipVerify bool
// TCP string
// IntervalDuration time.Duration
// TimeoutDuration time.Duration
// DeregisterCriticalServiceAfterDuration time.Duration
// Interval api.ReadableDuration
// Timeout api.ReadableDuration
// DeregisterCriticalServiceAfter api.ReadableDuration
// CreateIndex uint64
// ModifyIndex uint64
// structs.TxnOps
func TestDecodeTxnConvertOps(t *testing.T) {
for _, tc := range durationTestCases {
t.Run(tc.desc, func(t *testing.T) {

View File

@ -18,8 +18,8 @@ import (
// The TLS record layer governs the very first byte. The available options start
// at 20 as per:
//
// - v1.2: https://tools.ietf.org/html/rfc5246#appendix-A.1
// - v1.3: https://tools.ietf.org/html/rfc8446#appendix-B.1
// - v1.2: https://tools.ietf.org/html/rfc5246#appendix-A.1
// - v1.3: https://tools.ietf.org/html/rfc8446#appendix-B.1
//
// Note: this indicates that '0' is 'invalid'. Given that we only care about
// the first byte of a long-lived connection this is irrelevant, since you must

View File

@ -12,43 +12,42 @@
// The following diagram depicts the component relationships on a server, as
// this is the more complex mode of operation:
//
// +-------+ 1. +------------+
// | Local | ◀------------▶ | Local |
// | State | | State Sync |
// +-------+ +-----+------+
// ▲ |
// | +---------------+ | 2.
// 4. | 4a. | Local | |
// | +-▶ | Config Source +-+ |
// | | +---------------+ | |
// | | ▼ ▼
// +--------+ 3. +-+-+-----------+ 6. +----------+ 2a. +----------+
// | xDS +---▶ | Catalog +-----▶ | proxycfg +----▶ | proxycfg |
// | Server | ◀---+ Config Source +-----▶ | Manager +--+ | State |
// +--------+ 8. +----+----------+ 7. +----------+ | +----------+
// 5. | |
// ▼ 7a. | +----------+
// +-------+ +-▶ | proxycfg |
// | State | | State |
// | Store | +----------+
// +-------+
//
// 1. local.Sync watches the agent's local state for changes.
// 2. If any sidecar proxy or gateway services are registered to the local agent
// they are sync'd to the proxycfg.Manager.
// 2a. proxycfg.Manager creates a state object for the service and begins
// pre-fetching data (go to 8).
// 3. Client begins a stream and the xDS server calls Watch on its ConfigSource -
// on a client agent this would be a local config source, on a server it would
// be a catalog config source.
// 4. The catalog config source will check if service is registered locally.
// 4a. If the service *is* registered locally it hands off the the local config
// source, which calls Watch on the proxycfg manager (and serves the pre-
// fetched data).
// 5. Otherwise, it fetches the service from the state store.
// 6. It calls Watch on the proxycfg manager.
// 7. It registers the service with the proxycfg manager.
// 7a. See: 2a.
// 8. xDS server receives snapshots of configuration data whenever it changes.
// +-------+ 1. +------------+
// | Local | ◀------------▶ | Local |
// | State | | State Sync |
// +-------+ +-----+------+
// ▲ |
// | +---------------+ | 2.
// 4. | 4a. | Local | |
// | +-▶ | Config Source +-+ |
// | | +---------------+ | |
// | | ▼ ▼
// +--------+ 3. +-+-+-----------+ 6. +----------+ 2a. +----------+
// | xDS +---▶ | Catalog +-----▶ | proxycfg +----▶ | proxycfg |
// | Server | ◀---+ Config Source +-----▶ | Manager +--+ | State |
// +--------+ 8. +----+----------+ 7. +----------+ | +----------+
// 5. | |
// ▼ 7a. | +----------+
// +-------+ +-▶ | proxycfg |
// | State | | State |
// | Store | +----------+
// +-------+
//
// 1. local.Sync watches the agent's local state for changes.
// 2. If any sidecar proxy or gateway services are registered to the local agent
// they are sync'd to the proxycfg.Manager.
// 2a. proxycfg.Manager creates a state object for the service and begins
// pre-fetching data (go to 8).
// 3. Client begins a stream and the xDS server calls Watch on its ConfigSource -
// on a client agent this would be a local config source, on a server it would
// be a catalog config source.
// 4. The catalog config source will check if service is registered locally.
// 4a. If the service *is* registered locally it hands off the the local config
// source, which calls Watch on the proxycfg manager (and serves the pre-
// fetched data).
// 5. Otherwise, it fetches the service from the state store.
// 6. It calls Watch on the proxycfg manager.
// 7. It registers the service with the proxycfg manager.
// 7a. See: 2a.
// 8. xDS server receives snapshots of configuration data whenever it changes.
package proxycfg

View File

@ -401,14 +401,14 @@ func upstreamIDForDC2(uid UpstreamID) UpstreamID {
// routine. This allows the test to be fully synchronous and deterministic while still being able
// to validate the logic of most of the watching and state updating.
//
// The general strategy here is to
// The general strategy here is to:
//
// 1. Initialize a state with a call to newState + setting some of the extra stuff like the CacheNotifier
// We will not be using the CacheNotifier to send notifications but calling handleUpdate ourselves
// 2. Iterate through a list of verification stages performing validation and updates for each.
// a. Ensure that the required watches are in place and validate they are correct
// b. Process a bunch of UpdateEvents by calling handleUpdate
// c. Validate that the ConfigSnapshot has been updated appropriately
// 1. Initialize a state with a call to newState + setting some of the extra stuff like the CacheNotifier
// We will not be using the CacheNotifier to send notifications but calling handleUpdate ourselves
// 2. Iterate through a list of verification stages performing validation and updates for each.
// a. Ensure that the required watches are in place and validate they are correct
// b. Process a bunch of UpdateEvents by calling handleUpdate
// c. Validate that the ConfigSnapshot has been updated appropriately
func TestState_WatchesAndUpdates(t *testing.T) {
t.Parallel()

View File

@ -9,12 +9,13 @@ import (
"testing"
"time"
"github.com/hashicorp/serf/coordinate"
"github.com/hashicorp/serf/serf"
"github.com/hashicorp/consul/agent/structs"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/serf/coordinate"
"github.com/hashicorp/serf/serf"
"github.com/stretchr/testify/require"
)
@ -96,10 +97,10 @@ func (m *mockCluster) AddLANMember(dc, name, role string, coord *coordinate.Coor
//
// Here's the layout of the nodes:
//
// /---- dc1 ----\ /- dc2 -\ /- dc0 -\
// node2 node1 node3 node1 node0
// | | | | | | | | | | |
// 0 1 2 3 4 5 6 7 8 9 10 (ms)
// /---- dc1 ----\ /- dc2 -\ /- dc0 -\
// node2 node1 node3 node1 node0
// | | | | | | | | | | |
// 0 1 2 3 4 5 6 7 8 9 10 (ms)
//
// We also include a node4 in dc1 with no known coordinate, as well as a
// mysterious dcX with no nodes with known coordinates.

View File

@ -1297,7 +1297,6 @@ func (r *ServiceResolverRedirect) isEmpty() bool {
// - Service, ServiceSubset, Namespace, Datacenters, and Targets cannot all be
// empty at once. When Targets is defined, the other fields should not be
// populated.
//
type ServiceResolverFailover struct {
// Service is the service to resolve instead of the default as the failover
// group of instances (optional).

View File

@ -147,7 +147,6 @@ func (q *QueryOptions) SetFilter(filter string) {
q.Filter = filter
}
//
func (m *QueryMeta) GetIndex() uint64 {
if m != nil {
return m.Index

View File

@ -431,10 +431,10 @@ type rbacLocalInfo struct {
// Enterprise). Each intention in this flat list (sorted by precedence) can either
// be an allow rule or a deny rule. Heres a concrete example of this at work:
//
// intern/trusted-app => billing/payment-svc : ALLOW (prec=9)
// intern/* => billing/payment-svc : DENY (prec=8)
// */* => billing/payment-svc : ALLOW (prec=7)
// ::: ACL default policy ::: : DENY (prec=N/A)
// intern/trusted-app => billing/payment-svc : ALLOW (prec=9)
// intern/* => billing/payment-svc : DENY (prec=8)
// */* => billing/payment-svc : ALLOW (prec=7)
// ::: ACL default policy ::: : DENY (prec=N/A)
//
// In contrast, Envoy lets you either configure a filter to be based on an
// allow-list or a deny-list based on the action attribute of the RBAC rules
@ -452,25 +452,25 @@ type rbacLocalInfo struct {
// models. For clarity Ill rewrite the earlier example intentions in an
// abbreviated form:
//
// A : ALLOW
// B : DENY
// C : ALLOW
// <default> : DENY
// A : ALLOW
// B : DENY
// C : ALLOW
// <default> : DENY
//
// 1. Given that the overall intention default is set to deny, we start by
// choosing to build an allow-list in Envoy (this is also the variant that I find
// easier to think about).
// 2. Next we traverse the list in precedence order (top down) and any DENY
// intentions are combined with later intentions using logical operations.
// 3. Now that all of the intentions result in the same action (allow) we have
// successfully removed precedence and we can express this in as a set of Envoy
// RBAC policies.
// 1. Given that the overall intention default is set to deny, we start by
// choosing to build an allow-list in Envoy (this is also the variant that I find
// easier to think about).
// 2. Next we traverse the list in precedence order (top down) and any DENY
// intentions are combined with later intentions using logical operations.
// 3. Now that all of the intentions result in the same action (allow) we have
// successfully removed precedence and we can express this in as a set of Envoy
// RBAC policies.
//
// After this the earlier A/B/C/default list becomes:
//
// A : ALLOW
// C AND NOT(B) : ALLOW
// <default> : DENY
// A : ALLOW
// C AND NOT(B) : ALLOW
// <default> : DENY
//
// Which really is just an allow-list of [A, C AND NOT(B)]
func makeRBACRules(

View File

@ -538,7 +538,6 @@ func TestAPI_ACLToken_Clone(t *testing.T) {
require.Equal(t, cloned, read)
}
//
func TestAPI_AuthMethod_List(t *testing.T) {
t.Parallel()
c, s := makeACLClient(t)

View File

@ -8,8 +8,9 @@ import (
"reflect"
"time"
consulapi "github.com/hashicorp/consul/api"
"github.com/hashicorp/go-hclog"
consulapi "github.com/hashicorp/consul/api"
)
const (
@ -133,7 +134,7 @@ OUTER:
return nil
}
//Deprecated: Use RunwithClientAndHclog
// Deprecated: Use RunwithClientAndHclog
func (p *Plan) RunWithClientAndLogger(client *consulapi.Client, logger *log.Logger) error {
p.client = client

View File

@ -239,14 +239,14 @@ func ExtractNodeIdentities(nodeIdents []string) ([]*api.ACLNodeIdentity, error)
// TestKubernetesJWT_A is a valid service account jwt extracted from a minikube setup.
//
// {
// "iss": "kubernetes/serviceaccount",
// "kubernetes.io/serviceaccount/namespace": "default",
// "kubernetes.io/serviceaccount/secret.name": "admin-token-qlz42",
// "kubernetes.io/serviceaccount/service-account.name": "admin",
// "kubernetes.io/serviceaccount/service-account.uid": "738bc251-6532-11e9-b67f-48e6c8b8ecb5",
// "sub": "system:serviceaccount:default:admin"
// }
// {
// "iss": "kubernetes/serviceaccount",
// "kubernetes.io/serviceaccount/namespace": "default",
// "kubernetes.io/serviceaccount/secret.name": "admin-token-qlz42",
// "kubernetes.io/serviceaccount/service-account.name": "admin",
// "kubernetes.io/serviceaccount/service-account.uid": "738bc251-6532-11e9-b67f-48e6c8b8ecb5",
// "sub": "system:serviceaccount:default:admin"
// }
const TestKubernetesJWT_A = "eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImFkbWluLXRva2VuLXFsejQyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiNzM4YmMyNTEtNjUzMi0xMWU5LWI2N2YtNDhlNmM4YjhlY2I1Iiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50OmRlZmF1bHQ6YWRtaW4ifQ.ixMlnWrAG7NVuTTKu8cdcYfM7gweS3jlKaEsIBNGOVEjPE7rtXtgMkAwjQTdYR08_0QBjkgzy5fQC5ZNyglSwONJ-bPaXGvhoH1cTnRi1dz9H_63CfqOCvQP1sbdkMeRxNTGVAyWZT76rXoCUIfHP4LY2I8aab0KN9FTIcgZRF0XPTtT70UwGIrSmRpxW38zjiy2ymWL01cc5VWGhJqVysmWmYk3wNp0h5N57H_MOrz4apQR4pKaamzskzjLxO55gpbmZFC76qWuUdexAR7DT2fpbHLOw90atN_NlLMY-VrXyW3-Ei5EhYaVreMB9PSpKwkrA4jULITohV-sxpa1LA"
// TestKubernetesJWT_B is a valid service account jwt extracted from a minikube setup.

View File

@ -6,8 +6,9 @@ import (
"sync"
"time"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/consul/api"
)
const (
@ -69,11 +70,10 @@ type RegisterMonitor struct {
//
// This is a basic state machine with the following transitions:
//
// * idle => running, stopped
// * running => stopping, stopped
// * stopping => stopped
// * stopped => <>
//
// - idle => running, stopped
// - running => stopping, stopped
// - stopping => stopped
// - stopped => <>
type registerRunState uint8
const (

View File

@ -15,37 +15,37 @@ import (
//
// hcl.Decode-s into:
//
// map[string]interface {}{
// "sub":[]map[string]interface {}{
// map[string]interface {}{
// "v1":[]map[string]interface {}{
// map[string]interface {}{
// "field":"value1"
// }
// }
// },
// map[string]interface {}{
// "v2":[]map[string]interface {}{
// map[string]interface {}{
// "field":"value2"
// }
// }
// }
// }
// }
// map[string]interface {}{
// "sub":[]map[string]interface {}{
// map[string]interface {}{
// "v1":[]map[string]interface {}{
// map[string]interface {}{
// "field":"value1"
// }
// }
// },
// map[string]interface {}{
// "v2":[]map[string]interface {}{
// map[string]interface {}{
// "field":"value2"
// }
// }
// }
// }
// }
//
// but json.Unmarshal-s into the more expected:
//
// map[string]interface {}{
// "sub":map[string]interface {}{
// "v1":map[string]interface {}{
// "field":"value1"
// },
// "v2":map[string]interface {}{
// "field":"value2"
// }
// }
// }
// map[string]interface {}{
// "sub":map[string]interface {}{
// "v1":map[string]interface {}{
// "field":"value1"
// },
// "v2":map[string]interface {}{
// "field":"value2"
// }
// }
// }
//
// The strange part is that the following HCL:
//
@ -53,22 +53,22 @@ import (
//
// hcl.Decode-s into:
//
// map[string]interface {}{
// "sub":[]map[string]interface {}{
// map[string]interface {}{
// "v1":[]map[string]interface {}{
// map[string]interface {}{
// "field":"value1"
// }
// },
// "v2":[]map[string]interface {}{
// map[string]interface {}{
// "field":"value2"
// }
// }
// }
// }
// }
// map[string]interface {}{
// "sub":[]map[string]interface {}{
// map[string]interface {}{
// "v1":[]map[string]interface {}{
// map[string]interface {}{
// "field":"value1"
// }
// },
// "v2":[]map[string]interface {}{
// map[string]interface {}{
// "field":"value2"
// }
// }
// }
// }
// }
//
// Which is the "correct" value assuming you did the patch-slice-of-maps correction.
//

View File

@ -10,10 +10,11 @@ import (
"testing"
"time"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/sdk/testutil"
)
func TestValidateCommand_noTabs(t *testing.T) {
@ -147,7 +148,7 @@ func expectFiles(t *testing.T, caPath, keyPath string) (*x509.Certificate, crypt
// switchToTempDir is meant to be used in a defer statement like:
//
// defer switchToTempDir(t, testDir)()
// defer switchToTempDir(t, testDir)()
//
// This exploits the fact that the body of a defer is evaluated
// EXCEPT for the final function call invocation inline with the code

View File

@ -10,11 +10,12 @@ import (
"strings"
"testing"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/sdk/testutil"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/require"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/sdk/testutil"
caCreate "github.com/hashicorp/consul/command/tls/ca/create"
)
@ -285,7 +286,7 @@ func createCA(t *testing.T, domain string) {
// switchToTempDir is meant to be used in a defer statement like:
//
// defer switchToTempDir(t, testDir)()
// defer switchToTempDir(t, testDir)()
//
// This exploits the fact that the body of a defer is evaluated
// EXCEPT for the final function call invocation inline with the code

View File

@ -3,26 +3,26 @@
//
// Example usage:
//
// $ go run connect/certgen/certgen.go -out-dir /tmp/connect-certs
// $ go run connect/certgen/certgen.go -out-dir /tmp/connect-certs
//
// You can verify a given leaf with a given root using:
//
// $ openssl verify -verbose -CAfile ca1-ca.cert.pem ca1-svc-db.cert.pem
// $ openssl verify -verbose -CAfile ca1-ca.cert.pem ca1-svc-db.cert.pem
//
// Note that to verify via the cross-signed intermediate, openssl requires it to
// be bundled with the _root_ CA bundle and will ignore the cert if it's passed
// with the subject. You can do that with:
//
// $ openssl verify -verbose -CAfile \
// <(cat ca1-ca.cert.pem ca2-xc-by-ca1.cert.pem) \
// ca2-svc-db.cert.pem
// ca2-svc-db.cert.pem: OK
// $ openssl verify -verbose -CAfile \
// <(cat ca1-ca.cert.pem ca2-xc-by-ca1.cert.pem) \
// ca2-svc-db.cert.pem
// ca2-svc-db.cert.pem: OK
//
// Note that the same leaf and root without the intermediate should fail:
//
// $ openssl verify -verbose -CAfile ca1-ca.cert.pem ca2-svc-db.cert.pem
// ca2-svc-db.cert.pem: CN = db
// error 20 at 0 depth lookup:unable to get local issuer certificate
// $ openssl verify -verbose -CAfile ca1-ca.cert.pem ca2-svc-db.cert.pem
// ca2-svc-db.cert.pem: CN = db
// error 20 at 0 depth lookup:unable to get local issuer certificate
//
// NOTE: THIS IS A QUIRK OF OPENSSL; in Connect we distribute the roots alone
// and stable intermediates like the XC cert to the _leaf_.
@ -35,9 +35,10 @@ import (
"log"
"os"
"github.com/mitchellh/go-testing-interface"
"github.com/hashicorp/consul/agent/connect"
"github.com/hashicorp/consul/agent/structs"
"github.com/mitchellh/go-testing-interface"
)
func main() {

View File

@ -49,7 +49,7 @@ func testConnPairSetup(t *testing.T) (net.Conn, net.Conn, func()) {
// testConnPipelineSetup creates a pipeline consiting of two TCP connection
// pairs and a Conn that copies bytes between them. Data flow looks like this:
//
// src1 <---> dst1 <== Conn.CopyBytes ==> src2 <---> dst2
// src1 <---> dst1 <== Conn.CopyBytes ==> src2 <---> dst2
//
// The returned values are the src1 and dst2 which should be able to send and
// receive to each other via the Conn, the Conn itself (not running), and a

View File

@ -11,11 +11,12 @@ import (
"strconv"
"strings"
"github.com/hashicorp/consul/internal/go-sso/oidcauth/internal/strutil"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-hclog"
"github.com/mitchellh/pointerstructure"
"golang.org/x/oauth2"
"github.com/hashicorp/consul/internal/go-sso/oidcauth/internal/strutil"
)
func contextWithHttpClient(ctx context.Context, client *http.Client) context.Context {
@ -63,11 +64,11 @@ func (a *Authenticator) extractClaims(allClaims map[string]interface{}) (*Claims
// claims and claims mappings. The referenced claims must be strings and the
// claims mappings must be of the structure:
//
// {
// "/some/claim/pointer": "metadata_key1",
// "another_claim": "metadata_key2",
// ...
// }
// {
// "/some/claim/pointer": "metadata_key1",
// "another_claim": "metadata_key2",
// ...
// }
func extractStringMetadata(logger hclog.Logger, allClaims map[string]interface{}, claimMappings map[string]string) (map[string]string, error) {
metadata := make(map[string]string)
for source, target := range claimMappings {
@ -90,11 +91,11 @@ func extractStringMetadata(logger hclog.Logger, allClaims map[string]interface{}
// of claims and claims mappings. The referenced claims must be strings and
// the claims mappings must be of the structure:
//
// {
// "/some/claim/pointer": "metadata_key1",
// "another_claim": "metadata_key2",
// ...
// }
// {
// "/some/claim/pointer": "metadata_key1",
// "another_claim": "metadata_key2",
// ...
// }
func extractListMetadata(logger hclog.Logger, allClaims map[string]interface{}, listClaimMappings map[string]string) (map[string][]string, error) {
out := make(map[string][]string)
for source, target := range listClaimMappings {

View File

@ -19,7 +19,8 @@ import (
// Aliases must be lowercase, as keys are compared case-insensitive.
//
// Example alias tag:
// MyField []string `alias:"old_field_name,otherfieldname"`
//
// MyField []string `alias:"old_field_name,otherfieldname"`
//
// This hook should ONLY be used to maintain backwards compatibility with
// deprecated keys. For new structures use mapstructure struct tags to set the
@ -143,7 +144,7 @@ type mapstructureFieldTags struct {
// the target is a slice. This is necessary because this hook would have converted
// the initial slices into single values on the first pass.
//
// Background
// # Background
//
// HCL allows for repeated blocks which forces it to store structures
// as []map[string]interface{} instead of map[string]interface{}. This is an

View File

@ -47,12 +47,11 @@ var typByteSlice = reflect.TypeOf([]byte{})
//
// In particular we're looking to replace two cases the msgpack codec causes:
//
// 1.) String values get turned into byte slices. JSON will base64-encode
// this and we don't want that, so we convert them back to strings.
//
// 2.) Nested maps turn into map[interface{}]interface{}. JSON cannot
// encode this, so we need to turn it back into map[string]interface{}.
// 1.) String values get turned into byte slices. JSON will base64-encode
// this and we don't want that, so we convert them back to strings.
//
// 2.) Nested maps turn into map[interface{}]interface{}. JSON cannot
// encode this, so we need to turn it back into map[string]interface{}.
type mapWalker struct {
lastValue reflect.Value // lastValue of map, required for replacement
loc, lastLoc reflectwalk.Location // locations

View File

@ -12,7 +12,7 @@ type Routine func(ctx context.Context) error
// cancelCh is the ctx.Done()
// When cancel() is called, if the routine is running a blocking call (e.g. some ACL replication RPCs),
// stoppedCh won't be closed till the blocking call returns, while cancelCh will be closed immediately.
// stoppedCh won't be closed till the blocking call returns, while cancelCh will be closed immediately.
// cancelCh is used to properly detect routine running status between cancel() and close(stoppedCh)
type routineTracker struct {
cancel context.CancelFunc
@ -110,9 +110,9 @@ func (m *Manager) execute(ctx context.Context, name string, routine Routine, don
}
// Caveat: The returned stoppedCh indicates that the routine is completed
// It's possible that ctx is canceled, but stoppedCh not yet closed
// Use mgr.IsRunning(name) than this stoppedCh to tell whether the
// instance is still running (not cancelled or completed).
// It's possible that ctx is canceled, but stoppedCh not yet closed
// Use mgr.IsRunning(name) than this stoppedCh to tell whether the
// instance is still running (not cancelled or completed).
func (m *Manager) Stop(name string) <-chan struct{} {
instance := m.stopInstance(name)
if instance == nil {

View File

@ -12,7 +12,7 @@ import (
//
// Example:
//
// m = TranslateKeys(m, map[string]string{"snake_case": "CamelCase"})
// m = TranslateKeys(m, map[string]string{"snake_case": "CamelCase"})
//
// If the canonical string provided is the empty string, the effect is to stop
// recursing into any key matching the left hand side. In this case the left
@ -27,13 +27,14 @@ import (
// where the clash with key names in other parts of the definition :sob:
//
// Example:
// m - TranslateKeys(m, map[string]string{
// "foo_bar": "FooBar",
// "widget.config": "",
// // Assume widgets is an array, this will prevent recursing into any
// // item's config field
// "widgets.config": "",
// })
//
// m - TranslateKeys(m, map[string]string{
// "foo_bar": "FooBar",
// "widget.config": "",
// // Assume widgets is an array, this will prevent recursing into any
// // item's config field
// "widgets.config": "",
// })
//
// Deprecated: Use lib/decode.HookTranslateKeys instead.
func TranslateKeys(v map[string]interface{}, dict map[string]string) {

View File

@ -15,7 +15,7 @@ var (
now = time.Now
)
//LogFile is used to setup a file based logger that also performs log rotation
// LogFile is used to setup a file based logger that also performs log rotation
type LogFile struct {
//Name of the log file
fileName string

View File

@ -296,6 +296,7 @@ type GetEnvoyBootstrapParamsRequest struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to NodeSpec:
//
// *GetEnvoyBootstrapParamsRequest_NodeId
// *GetEnvoyBootstrapParamsRequest_NodeName
NodeSpec isGetEnvoyBootstrapParamsRequest_NodeSpec `protobuf_oneof:"node_spec"`

View File

@ -281,6 +281,7 @@ type ConfigEntry struct {
EnterpriseMeta *pbcommon.EnterpriseMeta `protobuf:"bytes,3,opt,name=EnterpriseMeta,proto3" json:"EnterpriseMeta,omitempty"`
RaftIndex *pbcommon.RaftIndex `protobuf:"bytes,4,opt,name=RaftIndex,proto3" json:"RaftIndex,omitempty"`
// Types that are assignable to Entry:
//
// *ConfigEntry_MeshConfig
// *ConfigEntry_ServiceResolver
// *ConfigEntry_IngressGateway

View File

@ -107,6 +107,7 @@ type SecretsWriteRequest struct {
// PeerID is the local UUID of the peering this request applies to.
PeerID string `protobuf:"bytes,1,opt,name=PeerID,proto3" json:"PeerID,omitempty"`
// Types that are assignable to Request:
//
// *SecretsWriteRequest_GenerateToken
// *SecretsWriteRequest_ExchangeSecret
// *SecretsWriteRequest_PromotePending

View File

@ -78,6 +78,7 @@ type ReplicationMessage struct {
unknownFields protoimpl.UnknownFields
// Types that are assignable to Payload:
//
// *ReplicationMessage_Open_
// *ReplicationMessage_Request_
// *ReplicationMessage_Response_

View File

@ -324,6 +324,7 @@ type SubscribeRequest struct {
// receive events (e.g. health events for a particular service).
//
// Types that are assignable to Subject:
//
// *SubscribeRequest_WildcardSubject
// *SubscribeRequest_NamedSubject
Subject isSubscribeRequest_Subject `protobuf_oneof:"Subject"`
@ -475,6 +476,7 @@ type Event struct {
// Payload is the actual event content.
//
// Types that are assignable to Payload:
//
// *Event_EndOfSnapshot
// *Event_NewSnapshotToFollow
// *Event_EventBatch

View File

@ -2,14 +2,13 @@
//
// A sample retry operation looks like this:
//
// func TestX(t *testing.T) {
// retry.Run(t, func(r *retry.R) {
// if err := foo(); err != nil {
// r.Fatal("f: ", err)
// }
// })
// }
//
// func TestX(t *testing.T) {
// retry.Run(t, func(r *retry.R) {
// if err := foo(); err != nil {
// r.Fatal("f: ", err)
// }
// })
// }
package retry
import (

View File

@ -11,8 +11,8 @@ type WrappedServer struct {
//
// For example, the following code snippets are equivalent.
//
// server.JoinLAN(t, "1.2.3.4")
// server.Wrap(t).JoinLAN("1.2.3.4")
// server.JoinLAN(t, "1.2.3.4")
// server.Wrap(t).JoinLAN("1.2.3.4")
//
// This is useful when you are calling multiple functions and save the wrapped
// value as another variable to reduce the inclusion of "t".