mirror of https://github.com/status-im/consul.git
Fix a bunch of unparam lint issues
This commit is contained in:
parent
e2cfa93f02
commit
010a609912
|
@ -20,7 +20,7 @@ type aclBootstrapResponse struct {
|
|||
|
||||
// checkACLDisabled will return a standard response if ACLs are disabled. This
|
||||
// returns true if they are disabled and we should not continue.
|
||||
func (s *HTTPServer) checkACLDisabled(resp http.ResponseWriter, req *http.Request) bool {
|
||||
func (s *HTTPServer) checkACLDisabled(resp http.ResponseWriter, _req *http.Request) bool {
|
||||
if s.agent.delegate.ACLsEnabled() {
|
||||
return false
|
||||
}
|
||||
|
@ -298,7 +298,7 @@ func (s *HTTPServer) ACLPolicyWrite(resp http.ResponseWriter, req *http.Request,
|
|||
return s.aclPolicyWriteInternal(resp, req, policyID, false)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) aclPolicyWriteInternal(resp http.ResponseWriter, req *http.Request, policyID string, create bool) (interface{}, error) {
|
||||
func (s *HTTPServer) aclPolicyWriteInternal(_resp http.ResponseWriter, req *http.Request, policyID string, create bool) (interface{}, error) {
|
||||
args := structs.ACLPolicySetRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
}
|
||||
|
@ -498,7 +498,7 @@ func (s *HTTPServer) ACLTokenSet(resp http.ResponseWriter, req *http.Request, to
|
|||
return s.aclTokenSetInternal(resp, req, tokenID, false)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) aclTokenSetInternal(resp http.ResponseWriter, req *http.Request, tokenID string, create bool) (interface{}, error) {
|
||||
func (s *HTTPServer) aclTokenSetInternal(_resp http.ResponseWriter, req *http.Request, tokenID string, create bool) (interface{}, error) {
|
||||
args := structs.ACLTokenSetRequest{
|
||||
Datacenter: s.agent.config.Datacenter,
|
||||
Create: create,
|
||||
|
|
|
@ -2781,7 +2781,7 @@ func (a *Agent) addServiceInternal(req *addServiceRequest, snap map[structs.Chec
|
|||
}
|
||||
|
||||
for i := range checks {
|
||||
if err := a.addCheck(checks[i], chkTypes[i], service, persist, token, source); err != nil {
|
||||
if err := a.addCheck(checks[i], chkTypes[i], service, token, source); err != nil {
|
||||
a.cleanupRegistration(cleanupServices, cleanupChecks)
|
||||
return err
|
||||
}
|
||||
|
@ -3101,7 +3101,7 @@ func (a *Agent) addCheckLocked(check *structs.HealthCheck, chkType *structs.Chec
|
|||
}
|
||||
}()
|
||||
|
||||
err := a.addCheck(check, chkType, service, persist, token, source)
|
||||
err := a.addCheck(check, chkType, service, token, source)
|
||||
if err != nil {
|
||||
a.State.RemoveCheck(cid)
|
||||
return err
|
||||
|
@ -3121,7 +3121,7 @@ func (a *Agent) addCheckLocked(check *structs.HealthCheck, chkType *structs.Chec
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, service *structs.NodeService, persist bool, token string, source configSource) error {
|
||||
func (a *Agent) addCheck(check *structs.HealthCheck, chkType *structs.CheckType, service *structs.NodeService, token string, source configSource) error {
|
||||
if check.CheckID == "" {
|
||||
return fmt.Errorf("CheckID missing")
|
||||
}
|
||||
|
|
|
@ -660,7 +660,7 @@ func (s *HTTPServer) AgentCheckUpdate(resp http.ResponseWriter, req *http.Reques
|
|||
return s.agentCheckUpdate(resp, req, checkID, update.Status, update.Output)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) agentCheckUpdate(resp http.ResponseWriter, req *http.Request, checkID types.CheckID, status string, output string) (interface{}, error) {
|
||||
func (s *HTTPServer) agentCheckUpdate(_resp http.ResponseWriter, req *http.Request, checkID types.CheckID, status string, output string) (interface{}, error) {
|
||||
cid := structs.NewCheckID(checkID, nil)
|
||||
|
||||
// Get the provided token, if any, and vet against any ACL policies.
|
||||
|
|
|
@ -3278,10 +3278,11 @@ func TestAgent_purgeCheckState(t *testing.T) {
|
|||
|
||||
func TestAgent_GetCoordinate(t *testing.T) {
|
||||
t.Parallel()
|
||||
check := func(server bool) {
|
||||
a := NewTestAgent(t, `
|
||||
server = true
|
||||
`)
|
||||
check := func(t *testing.T, server bool) {
|
||||
a := NewTestAgent(t, fmt.Sprintf(`
|
||||
server = %v
|
||||
bootstrap = %v
|
||||
`, server, server))
|
||||
defer a.Shutdown()
|
||||
|
||||
// This doesn't verify the returned coordinate, but it makes
|
||||
|
@ -3293,8 +3294,13 @@ func TestAgent_GetCoordinate(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
check(true)
|
||||
check(false)
|
||||
t.Run("server", func(t *testing.T) {
|
||||
check(t, true)
|
||||
})
|
||||
t.Run("client", func(t *testing.T) {
|
||||
check(t, false)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAgent_reloadWatches(t *testing.T) {
|
||||
|
|
|
@ -94,7 +94,7 @@ func (s *TestAPIServer) SetAllowedServiceAccount(
|
|||
}
|
||||
|
||||
s.allowedServiceAccountJWT = jwt
|
||||
s.replyRead = createReadServiceAccountFound(namespace, name, uid, overrideAnnotation, jwt)
|
||||
s.replyRead = createReadServiceAccountFound(namespace, name, uid, overrideAnnotation)
|
||||
s.replyStatus = createTokenReviewFound(namespace, name, uid, jwt)
|
||||
}
|
||||
|
||||
|
@ -223,10 +223,10 @@ func (s *TestAPIServer) handleReadServiceAccount(
|
|||
}
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
} else if s.replyRead == nil {
|
||||
out = createReadServiceAccountNotFound(namespace, name)
|
||||
out = createReadServiceAccountNotFound(name)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
} else if s.replyRead.Namespace != namespace || s.replyRead.Name != name {
|
||||
out = createReadServiceAccountNotFound(namespace, name)
|
||||
out = createReadServiceAccountNotFound(name)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
} else {
|
||||
out = s.replyRead
|
||||
|
@ -449,7 +449,7 @@ func createReadServiceAccountForbidden_NoAuthz() *metav1.Status {
|
|||
)
|
||||
}
|
||||
|
||||
func createReadServiceAccountNotFound(namespace, name string) *metav1.Status {
|
||||
func createReadServiceAccountNotFound(name string) *metav1.Status {
|
||||
/*
|
||||
STATUS: 404
|
||||
{
|
||||
|
@ -478,7 +478,7 @@ func createReadServiceAccountNotFound(namespace, name string) *metav1.Status {
|
|||
)
|
||||
}
|
||||
|
||||
func createReadServiceAccountFound(namespace, name, uid, overrideAnnotation, jwt string) *corev1.ServiceAccount {
|
||||
func createReadServiceAccountFound(namespace, name, uid, overrideAnnotation string) *corev1.ServiceAccount {
|
||||
/*
|
||||
STATUS: 200
|
||||
{
|
||||
|
|
|
@ -152,7 +152,7 @@ func (s *Intention) prepareApplyUpdate(ident structs.ACLIdentity, authz acl.Auth
|
|||
|
||||
// prepareApplyDelete ensures that the intention specified by the ID in the request exists
|
||||
// and that the requester is authorized to delete it
|
||||
func (s *Intention) prepareApplyDelete(ident structs.ACLIdentity, authz acl.Authorizer, entMeta *structs.EnterpriseMeta, args *structs.IntentionRequest) error {
|
||||
func (s *Intention) prepareApplyDelete(ident structs.ACLIdentity, authz acl.Authorizer, args *structs.IntentionRequest) error {
|
||||
// If this is not a create, then we have to verify the ID.
|
||||
state := s.srv.fsm.State()
|
||||
_, ixn, err := state.IntentionGet(nil, args.Intention.ID)
|
||||
|
@ -217,7 +217,7 @@ func (s *Intention) Apply(
|
|||
return err
|
||||
}
|
||||
case structs.IntentionOpDelete:
|
||||
if err := s.prepareApplyDelete(ident, authz, &entMeta, args); err != nil {
|
||||
if err := s.prepareApplyDelete(ident, authz, args); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
|
|
|
@ -2145,7 +2145,7 @@ func (s *Store) checkServiceNodesTxn(tx *txn, ws memdb.WatchSet, serviceName str
|
|||
ws.Add(iter.WatchCh())
|
||||
}
|
||||
|
||||
return s.parseCheckServiceNodes(tx, fallbackWS, idx, serviceName, results, err)
|
||||
return s.parseCheckServiceNodes(tx, fallbackWS, idx, results, err)
|
||||
}
|
||||
|
||||
// CheckServiceTagNodes is used to query all nodes and checks for a given
|
||||
|
@ -2174,7 +2174,7 @@ func (s *Store) CheckServiceTagNodes(ws memdb.WatchSet, serviceName string, tags
|
|||
|
||||
// Get the table index.
|
||||
idx := s.maxIndexForService(tx, serviceName, serviceExists, true, entMeta)
|
||||
return s.parseCheckServiceNodes(tx, ws, idx, serviceName, results, err)
|
||||
return s.parseCheckServiceNodes(tx, ws, idx, results, err)
|
||||
}
|
||||
|
||||
// GatewayServices is used to query all services associated with a gateway
|
||||
|
@ -2214,7 +2214,7 @@ func (s *Store) GatewayServices(ws memdb.WatchSet, gateway string, entMeta *stru
|
|||
// method used to return a rich set of results from a more simple query.
|
||||
func (s *Store) parseCheckServiceNodes(
|
||||
tx *txn, ws memdb.WatchSet, idx uint64,
|
||||
serviceName string, services structs.ServiceNodes,
|
||||
services structs.ServiceNodes,
|
||||
err error) (uint64, structs.CheckServiceNodes, error) {
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
|
@ -2353,7 +2353,7 @@ func (s *Store) serviceDumpAllTxn(tx *txn, ws memdb.WatchSet, entMeta *structs.E
|
|||
results = append(results, sn)
|
||||
}
|
||||
|
||||
return s.parseCheckServiceNodes(tx, nil, idx, "", results, err)
|
||||
return s.parseCheckServiceNodes(tx, nil, idx, results, err)
|
||||
}
|
||||
|
||||
func (s *Store) serviceDumpKindTxn(tx *txn, ws memdb.WatchSet, kind structs.ServiceKind, entMeta *structs.EnterpriseMeta) (uint64, structs.CheckServiceNodes, error) {
|
||||
|
@ -2374,7 +2374,7 @@ func (s *Store) serviceDumpKindTxn(tx *txn, ws memdb.WatchSet, kind structs.Serv
|
|||
results = append(results, sn)
|
||||
}
|
||||
|
||||
return s.parseCheckServiceNodes(tx, nil, idx, "", results, err)
|
||||
return s.parseCheckServiceNodes(tx, nil, idx, results, err)
|
||||
}
|
||||
|
||||
// parseNodes takes an iterator over a set of nodes and returns a struct
|
||||
|
|
|
@ -2908,7 +2908,7 @@ func ensureServiceVersion(t *testing.T, s *Store, ws memdb.WatchSet, serviceID s
|
|||
}
|
||||
|
||||
// Ensure index exist, if expectedIndex = -1, ensure the index does not exists
|
||||
func ensureIndexForService(t *testing.T, s *Store, ws memdb.WatchSet, serviceName string, expectedIndex uint64) {
|
||||
func ensureIndexForService(t *testing.T, s *Store, serviceName string, expectedIndex uint64) {
|
||||
t.Helper()
|
||||
tx := s.db.Txn(false)
|
||||
defer tx.Abort()
|
||||
|
@ -2993,10 +2993,10 @@ func TestStateStore_IndexIndependence(t *testing.T) {
|
|||
|
||||
s.DeleteCheck(15, "node2", types.CheckID("check_service_shared"), nil)
|
||||
ensureServiceVersion(t, s, ws, "service_shared", 15, 2)
|
||||
ensureIndexForService(t, s, ws, "service_shared", 15)
|
||||
ensureIndexForService(t, s, "service_shared", 15)
|
||||
s.DeleteService(16, "node2", "service_shared", nil)
|
||||
ensureServiceVersion(t, s, ws, "service_shared", 16, 1)
|
||||
ensureIndexForService(t, s, ws, "service_shared", 16)
|
||||
ensureIndexForService(t, s, "service_shared", 16)
|
||||
s.DeleteService(17, "node1", "service_shared", nil)
|
||||
ensureServiceVersion(t, s, ws, "service_shared", 17, 0)
|
||||
|
||||
|
@ -3007,7 +3007,7 @@ func TestStateStore_IndexIndependence(t *testing.T) {
|
|||
ensureServiceVersion(t, s, ws, "service_shared", 17, 0)
|
||||
|
||||
// No index should exist anymore, it must have been garbage collected
|
||||
ensureIndexForService(t, s, ws, "service_shared", 0)
|
||||
ensureIndexForService(t, s, "service_shared", 0)
|
||||
if !watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
|
|
@ -195,14 +195,7 @@ func (s *Store) ensureConfigEntryTxn(tx *txn, idx uint64, conf structs.ConfigEnt
|
|||
}
|
||||
raftIndex.ModifyIndex = idx
|
||||
|
||||
err = s.validateProposedConfigEntryInGraph(
|
||||
tx,
|
||||
idx,
|
||||
conf.GetKind(),
|
||||
conf.GetName(),
|
||||
conf,
|
||||
entMeta,
|
||||
)
|
||||
err = s.validateProposedConfigEntryInGraph(tx, conf.GetKind(), conf.GetName(), conf, entMeta)
|
||||
if err != nil {
|
||||
return err // Err is already sufficiently decorated.
|
||||
}
|
||||
|
@ -273,14 +266,7 @@ func (s *Store) DeleteConfigEntry(idx uint64, kind, name string, entMeta *struct
|
|||
}
|
||||
}
|
||||
|
||||
err = s.validateProposedConfigEntryInGraph(
|
||||
tx,
|
||||
idx,
|
||||
kind,
|
||||
name,
|
||||
nil,
|
||||
entMeta,
|
||||
)
|
||||
err = s.validateProposedConfigEntryInGraph(tx, kind, name, nil, entMeta)
|
||||
if err != nil {
|
||||
return err // Err is already sufficiently decorated.
|
||||
}
|
||||
|
@ -329,7 +315,6 @@ func (s *Store) insertConfigEntryWithTxn(tx *txn, idx uint64, conf structs.Confi
|
|||
// to the caller that they can correct.
|
||||
func (s *Store) validateProposedConfigEntryInGraph(
|
||||
tx *txn,
|
||||
idx uint64,
|
||||
kind, name string,
|
||||
next structs.ConfigEntry,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
|
@ -365,7 +350,7 @@ func (s *Store) validateProposedConfigEntryInGraph(
|
|||
return fmt.Errorf("unhandled kind %q during validation of %q", kind, name)
|
||||
}
|
||||
|
||||
return s.validateProposedConfigEntryInServiceGraph(tx, idx, kind, name, next, validateAllChains, entMeta)
|
||||
return s.validateProposedConfigEntryInServiceGraph(tx, kind, name, next, validateAllChains, entMeta)
|
||||
}
|
||||
|
||||
func (s *Store) checkGatewayClash(
|
||||
|
@ -392,7 +377,6 @@ var serviceGraphKinds = []string{
|
|||
|
||||
func (s *Store) validateProposedConfigEntryInServiceGraph(
|
||||
tx *txn,
|
||||
idx uint64,
|
||||
kind, name string,
|
||||
next structs.ConfigEntry,
|
||||
validateAllChains bool,
|
||||
|
@ -424,13 +408,13 @@ func (s *Store) validateProposedConfigEntryInServiceGraph(
|
|||
checkChains[sid] = struct{}{}
|
||||
|
||||
iter, err := tx.Get(configTableName, "link", sid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for raw := iter.Next(); raw != nil; raw = iter.Next() {
|
||||
entry := raw.(structs.ConfigEntry)
|
||||
checkChains[structs.NewServiceID(entry.GetName(), entry.GetEnterpriseMeta())] = struct{}{}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
overrides := map[structs.ConfigEntryKindName]structs.ConfigEntry{
|
||||
|
@ -438,7 +422,7 @@ func (s *Store) validateProposedConfigEntryInServiceGraph(
|
|||
}
|
||||
|
||||
for chain := range checkChains {
|
||||
if err := s.testCompileDiscoveryChain(tx, nil, chain.ID, overrides, &chain.EnterpriseMeta); err != nil {
|
||||
if err := s.testCompileDiscoveryChain(tx, chain.ID, overrides, &chain.EnterpriseMeta); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -448,7 +432,6 @@ func (s *Store) validateProposedConfigEntryInServiceGraph(
|
|||
|
||||
func (s *Store) testCompileDiscoveryChain(
|
||||
tx *txn,
|
||||
ws memdb.WatchSet,
|
||||
chainName string,
|
||||
overrides map[structs.ConfigEntryKindName]structs.ConfigEntry,
|
||||
entMeta *structs.EnterpriseMeta,
|
||||
|
|
10
agent/dns.go
10
agent/dns.go
|
@ -738,7 +738,7 @@ func (d *DNSServer) doDispatch(network string, remoteAddr net.Addr, req, resp *d
|
|||
|
||||
// Allow a "." in the node name, just join all the parts
|
||||
node := strings.Join(queryParts, ".")
|
||||
d.nodeLookup(cfg, network, datacenter, node, req, resp, maxRecursionLevel)
|
||||
d.nodeLookup(cfg, datacenter, node, req, resp, maxRecursionLevel)
|
||||
case "query":
|
||||
// ensure we have a query name
|
||||
if len(queryParts) < 1 {
|
||||
|
@ -832,7 +832,7 @@ func (d *DNSServer) computeRCode(err error) int {
|
|||
}
|
||||
|
||||
// nodeLookup is used to handle a node query
|
||||
func (d *DNSServer) nodeLookup(cfg *dnsConfig, network, datacenter, node string, req, resp *dns.Msg, maxRecursionLevel int) {
|
||||
func (d *DNSServer) nodeLookup(cfg *dnsConfig, datacenter, node string, req, resp *dns.Msg, maxRecursionLevel int) {
|
||||
// Only handle ANY, A, AAAA, and TXT type requests
|
||||
qType := req.Question[0].Qtype
|
||||
if qType != dns.TypeANY && qType != dns.TypeA && qType != dns.TypeAAAA && qType != dns.TypeTXT {
|
||||
|
@ -882,7 +882,7 @@ func (d *DNSServer) nodeLookup(cfg *dnsConfig, network, datacenter, node string,
|
|||
}
|
||||
|
||||
if cfg.NodeMetaTXT || qType == dns.TypeTXT || qType == dns.TypeANY {
|
||||
metas := d.generateMeta(n.Datacenter, q.Name, n, cfg.NodeTTL)
|
||||
metas := d.generateMeta(q.Name, n, cfg.NodeTTL)
|
||||
*metaTarget = append(*metaTarget, metas...)
|
||||
}
|
||||
}
|
||||
|
@ -1771,7 +1771,7 @@ func (d *DNSServer) nodeServiceRecords(dc string, node structs.CheckServiceNode,
|
|||
return d.makeRecordFromFQDN(dc, serviceAddr, node, req, ttl, cfg, maxRecursionLevel)
|
||||
}
|
||||
|
||||
func (d *DNSServer) generateMeta(dc string, qName string, node *structs.Node, ttl time.Duration) []dns.RR {
|
||||
func (d *DNSServer) generateMeta(qName string, node *structs.Node, ttl time.Duration) []dns.RR {
|
||||
extra := make([]dns.RR, 0, len(node.Meta))
|
||||
for key, value := range node.Meta {
|
||||
txt := value
|
||||
|
@ -1813,7 +1813,7 @@ func (d *DNSServer) serviceSRVRecords(cfg *dnsConfig, dc string, nodes structs.C
|
|||
resp.Extra = append(resp.Extra, extra...)
|
||||
|
||||
if cfg.NodeMetaTXT {
|
||||
resp.Extra = append(resp.Extra, d.generateMeta(dc, fmt.Sprintf("%s.node.%s.%s", node.Node.Node, dc, d.domain), node.Node, ttl)...)
|
||||
resp.Extra = append(resp.Extra, d.generateMeta(fmt.Sprintf("%s.node.%s.%s", node.Node.Node, dc, d.domain), node.Node, ttl)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4569,7 +4569,7 @@ func testDNSServiceLookupResponseLimits(t *testing.T, answerLimit int, qType uin
|
|||
}
|
||||
|
||||
func checkDNSService(t *testing.T, generateNumNodes int, aRecordLimit int, qType uint16,
|
||||
expectedResultsCount int, udpSize uint16, udpAnswerLimit int) error {
|
||||
expectedResultsCount int, udpSize uint16) error {
|
||||
a := NewTestAgent(t, `
|
||||
node_name = "test-node"
|
||||
dns_config {
|
||||
|
@ -4710,7 +4710,7 @@ func TestDNS_ServiceLookup_ARecordLimits(t *testing.T) {
|
|||
for idx, qType := range queriesLimited {
|
||||
t.Run(fmt.Sprintf("ARecordLimit %d qType: %d", idx, qType), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := checkDNSService(t, test.numNodesTotal, test.aRecordLimit, qType, test.expectedAResults, test.udpSize, test.udpAnswerLimit)
|
||||
err := checkDNSService(t, test.numNodesTotal, test.aRecordLimit, qType, test.expectedAResults, test.udpSize)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected lookup %s to pass: %v", test.name, err)
|
||||
}
|
||||
|
@ -4719,7 +4719,7 @@ func TestDNS_ServiceLookup_ARecordLimits(t *testing.T) {
|
|||
// No limits but the size of records for SRV records, since not subject to randomization issues
|
||||
t.Run("SRV lookup limitARecord", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := checkDNSService(t, test.expectedSRVResults, test.aRecordLimit, dns.TypeSRV, test.numNodesTotal, test.udpSize, test.udpAnswerLimit)
|
||||
err := checkDNSService(t, test.expectedSRVResults, test.aRecordLimit, dns.TypeSRV, test.numNodesTotal, test.udpSize)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected service SRV lookup %s to pass: %v", test.name, err)
|
||||
}
|
||||
|
|
|
@ -873,7 +873,7 @@ func TestDecodeAgentRegisterCheck(t *testing.T) {
|
|||
if err != nil && !tc.wantErr {
|
||||
t.Fatalf("expected nil error, got %v", err)
|
||||
}
|
||||
if err := checkTypeHeaderTest(out, tc.want, ""); err != nil {
|
||||
if err := checkTypeHeaderTest(out, tc.want); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
@ -1747,7 +1747,7 @@ func TestDecodeAgentRegisterService(t *testing.T) {
|
|||
if err != nil && !tc.wantErr {
|
||||
t.Fatalf("expected nil error, got %v", err)
|
||||
}
|
||||
if err := checkTypeHeaderTest(out.Check, tc.want, "Check"); err != nil {
|
||||
if err := checkTypeHeaderTest(out.Check, tc.want); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if out.Checks == nil {
|
||||
|
@ -1756,7 +1756,7 @@ func TestDecodeAgentRegisterService(t *testing.T) {
|
|||
}
|
||||
return
|
||||
}
|
||||
if err := checkTypeHeaderTest(out.Checks[0], tc.want, "Checks[0]"); err != nil {
|
||||
if err := checkTypeHeaderTest(out.Checks[0], tc.want); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
@ -2523,7 +2523,7 @@ func checkTypeDurationTest(check interface{}, want time.Duration, prefix string)
|
|||
|
||||
// checkTypeDurationTest is a helper func to test the Header map in a CheckType or CheckDefiniton
|
||||
// (to reduce repetetive typing).
|
||||
func checkTypeHeaderTest(check interface{}, want map[string][]string, prefix string) error {
|
||||
func checkTypeHeaderTest(check interface{}, want map[string][]string) error {
|
||||
|
||||
var header map[string][]string
|
||||
switch v := check.(type) {
|
||||
|
|
|
@ -308,18 +308,11 @@ func (p *ConnPool) DialTimeout(
|
|||
)
|
||||
}
|
||||
|
||||
return p.dial(
|
||||
dc,
|
||||
nodeName,
|
||||
addr,
|
||||
actualRPCType,
|
||||
RPCTLS,
|
||||
)
|
||||
return p.dial(dc, addr, actualRPCType, RPCTLS)
|
||||
}
|
||||
|
||||
func (p *ConnPool) dial(
|
||||
dc string,
|
||||
nodeName string,
|
||||
addr net.Addr,
|
||||
actualRPCType RPCType,
|
||||
tlsRPCType RPCType,
|
||||
|
@ -561,7 +554,7 @@ func (p *ConnPool) RPC(
|
|||
// or first time config request. For now though this is fine until
|
||||
// those ongoing requests are implemented.
|
||||
if method == "AutoEncrypt.Sign" || method == "Cluster.AutoConfig" {
|
||||
return p.rpcInsecure(dc, nodeName, addr, method, args, reply)
|
||||
return p.rpcInsecure(dc, addr, method, args, reply)
|
||||
} else {
|
||||
return p.rpc(dc, nodeName, addr, method, args, reply)
|
||||
}
|
||||
|
@ -572,13 +565,13 @@ func (p *ConnPool) RPC(
|
|||
// transparent for the consumer. The pool cannot be used because
|
||||
// AutoEncrypt.Sign is a one-off call and it doesn't make sense to pool that
|
||||
// connection if it is not being reused.
|
||||
func (p *ConnPool) rpcInsecure(dc string, nodeName string, addr net.Addr, method string, args interface{}, reply interface{}) error {
|
||||
func (p *ConnPool) rpcInsecure(dc string, addr net.Addr, method string, args interface{}, reply interface{}) error {
|
||||
if dc != p.Datacenter {
|
||||
return fmt.Errorf("insecure dialing prohibited between datacenters")
|
||||
}
|
||||
|
||||
var codec rpc.ClientCodec
|
||||
conn, _, err := p.dial(dc, nodeName, addr, 0, RPCTLSInsecure)
|
||||
conn, _, err := p.dial(dc, addr, 0, RPCTLSInsecure)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rpcinsecure error establishing connection: %v", err)
|
||||
}
|
||||
|
|
|
@ -294,9 +294,9 @@ func TestManager_BasicLifecycle(t *testing.T) {
|
|||
webProxyCopy, err := copystructure.Copy(webProxy)
|
||||
require.NoError(t, err)
|
||||
|
||||
testManager_BasicLifecycle(t, tt, types,
|
||||
testManager_BasicLifecycle(t, types,
|
||||
rootsCacheKey, leafCacheKey,
|
||||
roots, leaf,
|
||||
roots,
|
||||
webProxyCopy.(*structs.NodeService),
|
||||
expectSnapCopy.(*ConfigSnapshot),
|
||||
)
|
||||
|
@ -313,11 +313,9 @@ type testcase_BasicLifecycle struct {
|
|||
|
||||
func testManager_BasicLifecycle(
|
||||
t *testing.T,
|
||||
tt *testcase_BasicLifecycle,
|
||||
types *TestCacheTypes,
|
||||
rootsCacheKey, leafCacheKey string,
|
||||
roots *structs.IndexedCARoots,
|
||||
leaf *structs.IssuedCert,
|
||||
webProxy *structs.NodeService,
|
||||
expectSnap *ConfigSnapshot,
|
||||
) {
|
||||
|
|
|
@ -321,9 +321,7 @@ func (s *Server) makeAppCluster(cfgSnap *proxycfg.ConfigSnapshot, name, pathProt
|
|||
Endpoints: []*envoyendpoint.LocalityLbEndpoints{
|
||||
{
|
||||
LbEndpoints: []*envoyendpoint.LbEndpoint{
|
||||
makeEndpoint(name,
|
||||
addr,
|
||||
port),
|
||||
makeEndpoint(addr, port),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -569,7 +569,7 @@ func TestClustersFromSnapshot(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func expectClustersJSONResources(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64) map[string]string {
|
||||
func expectClustersJSONResources(snap *proxycfg.ConfigSnapshot) map[string]string {
|
||||
return map[string]string{
|
||||
"local_app": `
|
||||
{
|
||||
|
@ -620,7 +620,7 @@ func expectClustersJSONResources(t *testing.T, snap *proxycfg.ConfigSnapshot, to
|
|||
"healthyPanicThreshold": {}
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"tlsContext": ` + expectedUpstreamTLSContextJSON(t, snap, "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul") + `
|
||||
"tlsContext": ` + expectedUpstreamTLSContextJSON(snap, "db.default.dc1.internal.11111111-2222-3333-4444-555555555555.consul") + `
|
||||
}`,
|
||||
"prepared_query:geo-cache": `
|
||||
{
|
||||
|
@ -641,12 +641,12 @@ func expectClustersJSONResources(t *testing.T, snap *proxycfg.ConfigSnapshot, to
|
|||
|
||||
},
|
||||
"connectTimeout": "5s",
|
||||
"tlsContext": ` + expectedUpstreamTLSContextJSON(t, snap, "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul") + `
|
||||
"tlsContext": ` + expectedUpstreamTLSContextJSON(snap, "geo-cache.default.dc1.query.11111111-2222-3333-4444-555555555555.consul") + `
|
||||
}`,
|
||||
}
|
||||
}
|
||||
|
||||
func expectClustersJSONFromResources(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64, resourcesJSON map[string]string) string {
|
||||
func expectClustersJSONFromResources(snap *proxycfg.ConfigSnapshot, v, n uint64, resourcesJSON map[string]string) string {
|
||||
resJSON := ""
|
||||
|
||||
// Sort resources into specific order because that matters in JSONEq
|
||||
|
@ -674,9 +674,8 @@ func expectClustersJSONFromResources(t *testing.T, snap *proxycfg.ConfigSnapshot
|
|||
}`
|
||||
}
|
||||
|
||||
func expectClustersJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64) string {
|
||||
return expectClustersJSONFromResources(t, snap, token, v, n,
|
||||
expectClustersJSONResources(t, snap, token, v, n))
|
||||
func expectClustersJSON(snap *proxycfg.ConfigSnapshot, v, n uint64) string {
|
||||
return expectClustersJSONFromResources(snap, v, n, expectClustersJSONResources(snap))
|
||||
}
|
||||
|
||||
type customClusterJSONOptions struct {
|
||||
|
|
|
@ -288,7 +288,7 @@ func (s *Server) endpointsFromSnapshotIngressGateway(cfgSnap *proxycfg.ConfigSna
|
|||
return resources, nil
|
||||
}
|
||||
|
||||
func makeEndpoint(clusterName, host string, port int) *envoyendpoint.LbEndpoint {
|
||||
func makeEndpoint(host string, port int) *envoyendpoint.LbEndpoint {
|
||||
return &envoyendpoint.LbEndpoint{
|
||||
HostIdentifier: &envoyendpoint.LbEndpoint_Endpoint{
|
||||
Endpoint: &envoyendpoint.Endpoint{
|
||||
|
|
|
@ -402,7 +402,7 @@ func makeListenerFromUserConfig(configJSON string) (*envoy.Listener, error) {
|
|||
// specify custom listener params in config but still get our certs delivered
|
||||
// dynamically and intentions enforced without coming up with some complicated
|
||||
// templating/merging solution.
|
||||
func injectConnectFilters(cfgSnap *proxycfg.ConfigSnapshot, token string, listener *envoy.Listener, setTLS bool) error {
|
||||
func injectConnectFilters(cfgSnap *proxycfg.ConfigSnapshot, token string, listener *envoy.Listener) error {
|
||||
authFilter, err := makeExtAuthFilter(token)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -474,7 +474,7 @@ func (s *Server) makePublicListener(cfgSnap *proxycfg.ConfigSnapshot, token stri
|
|||
}
|
||||
}
|
||||
|
||||
err = injectConnectFilters(cfgSnap, token, l, true)
|
||||
err = injectConnectFilters(cfgSnap, token, l)
|
||||
return l, err
|
||||
}
|
||||
|
||||
|
|
|
@ -489,7 +489,7 @@ func TestListenersFromSnapshot(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func expectListenerJSONResources(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64) map[string]string {
|
||||
func expectListenerJSONResources(t *testing.T, snap *proxycfg.ConfigSnapshot, token string) map[string]string {
|
||||
tokenVal := ""
|
||||
if token != "" {
|
||||
tokenVal = fmt.Sprintf(",\n"+`"value": "%s"`, token)
|
||||
|
@ -585,7 +585,7 @@ func expectListenerJSONResources(t *testing.T, snap *proxycfg.ConfigSnapshot, to
|
|||
}
|
||||
}
|
||||
|
||||
func expectListenerJSONFromResources(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64, resourcesJSON map[string]string) string {
|
||||
func expectListenerJSONFromResources(snap *proxycfg.ConfigSnapshot, v, n uint64, resourcesJSON map[string]string) string {
|
||||
resJSON := ""
|
||||
// Sort resources into specific order because that matters in JSONEq
|
||||
// comparison later.
|
||||
|
@ -612,8 +612,7 @@ func expectListenerJSONFromResources(t *testing.T, snap *proxycfg.ConfigSnapshot
|
|||
}
|
||||
|
||||
func expectListenerJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64) string {
|
||||
return expectListenerJSONFromResources(t, snap, token, v, n,
|
||||
expectListenerJSONResources(t, snap, token, v, n))
|
||||
return expectListenerJSONFromResources(snap, v, n, expectListenerJSONResources(t, snap, token))
|
||||
}
|
||||
|
||||
type customListenerJSONOptions struct {
|
||||
|
|
|
@ -141,7 +141,7 @@ func makeUpstreamRouteForDiscoveryChain(
|
|||
routes = make([]*envoyroute.Route, 0, len(startNode.Routes))
|
||||
|
||||
for _, discoveryRoute := range startNode.Routes {
|
||||
routeMatch := makeRouteMatchForDiscoveryRoute(discoveryRoute, chain.Protocol)
|
||||
routeMatch := makeRouteMatchForDiscoveryRoute(discoveryRoute)
|
||||
|
||||
var (
|
||||
routeAction *envoyroute.Route_Route
|
||||
|
@ -240,7 +240,7 @@ func makeUpstreamRouteForDiscoveryChain(
|
|||
return host, nil
|
||||
}
|
||||
|
||||
func makeRouteMatchForDiscoveryRoute(discoveryRoute *structs.DiscoveryRoute, protocol string) *envoyroute.RouteMatch {
|
||||
func makeRouteMatchForDiscoveryRoute(discoveryRoute *structs.DiscoveryRoute) *envoyroute.RouteMatch {
|
||||
match := discoveryRoute.Definition.Match
|
||||
if match == nil || match.IsEmpty() {
|
||||
return makeDefaultRouteMatch()
|
||||
|
|
|
@ -145,7 +145,7 @@ func TestServer_StreamAggregatedResources_BasicProtocol(t *testing.T) {
|
|||
snap := proxycfg.TestConfigSnapshot(t)
|
||||
mgr.DeliverConfig(t, sid, snap)
|
||||
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectClustersJSON(t, snap, "", 1, 1))
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectClustersJSON(snap, 1, 1))
|
||||
|
||||
// Envoy then tries to discover endpoints for those clusters. Technically it
|
||||
// includes the cluster names in the ResourceNames field but we ignore that
|
||||
|
@ -160,7 +160,7 @@ func TestServer_StreamAggregatedResources_BasicProtocol(t *testing.T) {
|
|||
// the server for endpoints. Note that this should not be racy if the server
|
||||
// is behaving well since the Cluster send above should be blocked until we
|
||||
// deliver a new config version.
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectEndpointsJSON(t, snap, "", 1, 2))
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectEndpointsJSON(1, 2))
|
||||
|
||||
// And no other response yet
|
||||
assertChanBlocked(t, envoy.stream.sendCh)
|
||||
|
@ -195,8 +195,8 @@ func TestServer_StreamAggregatedResources_BasicProtocol(t *testing.T) {
|
|||
// don't know the order the nonces will be assigned. For now we rely and
|
||||
// require our implementation to always deliver updates in a specific order
|
||||
// which is reasonable anyway to ensure consistency of the config Envoy sees.
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectClustersJSON(t, snap, "", 2, 4))
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectEndpointsJSON(t, snap, "", 2, 5))
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectClustersJSON(snap, 2, 4))
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectEndpointsJSON(2, 5))
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectListenerJSON(t, snap, "", 2, 6))
|
||||
|
||||
// Let's pretend that Envoy doesn't like that new listener config. It will ACK
|
||||
|
@ -232,12 +232,12 @@ func TestServer_StreamAggregatedResources_BasicProtocol(t *testing.T) {
|
|||
snap.ConnectProxy.Leaf = proxycfg.TestLeafForCA(t, snap.Roots.Roots[0])
|
||||
mgr.DeliverConfig(t, sid, snap)
|
||||
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectClustersJSON(t, snap, "", 3, 7))
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectEndpointsJSON(t, snap, "", 3, 8))
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectClustersJSON(snap, 3, 7))
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectEndpointsJSON(3, 8))
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectListenerJSON(t, snap, "", 3, 9))
|
||||
}
|
||||
|
||||
func expectEndpointsJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, token string, v, n uint64) string {
|
||||
func expectEndpointsJSON(v, n uint64) string {
|
||||
return `{
|
||||
"versionInfo": "` + hexString(v) + `",
|
||||
"resources": [
|
||||
|
@ -315,15 +315,15 @@ func expectEndpointsJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, token stri
|
|||
}`
|
||||
}
|
||||
|
||||
func expectedUpstreamTLSContextJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, sni string) string {
|
||||
return expectedTLSContextJSON(t, snap, false, sni)
|
||||
func expectedUpstreamTLSContextJSON(snap *proxycfg.ConfigSnapshot, sni string) string {
|
||||
return expectedTLSContextJSON(snap, false, sni)
|
||||
}
|
||||
|
||||
func expectedPublicTLSContextJSON(t *testing.T, snap *proxycfg.ConfigSnapshot) string {
|
||||
return expectedTLSContextJSON(t, snap, true, "")
|
||||
return expectedTLSContextJSON(snap, true, "")
|
||||
}
|
||||
|
||||
func expectedTLSContextJSON(t *testing.T, snap *proxycfg.ConfigSnapshot, requireClientCert bool, sni string) string {
|
||||
func expectedTLSContextJSON(snap *proxycfg.ConfigSnapshot, requireClientCert bool, sni string) string {
|
||||
// Assume just one root for now, can get fancier later if needed.
|
||||
caPEM := snap.Roots.Roots[0].RootCert
|
||||
reqClient := ""
|
||||
|
@ -593,7 +593,7 @@ func TestServer_StreamAggregatedResources_ACLTokenDeleted_StreamTerminatedDuring
|
|||
snap := proxycfg.TestConfigSnapshot(t)
|
||||
mgr.DeliverConfig(t, sid, snap)
|
||||
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectClustersJSON(t, snap, token, 1, 1))
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectClustersJSON(snap, 1, 1))
|
||||
|
||||
// Now nuke the ACL token.
|
||||
validToken.Store("")
|
||||
|
@ -685,7 +685,7 @@ func TestServer_StreamAggregatedResources_ACLTokenDeleted_StreamTerminatedInBack
|
|||
snap := proxycfg.TestConfigSnapshot(t)
|
||||
mgr.DeliverConfig(t, sid, snap)
|
||||
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectClustersJSON(t, snap, token, 1, 1))
|
||||
assertResponseSent(t, envoy.stream.sendCh, expectClustersJSON(snap, 1, 1))
|
||||
|
||||
// It also (in parallel) issues the next cluster request (which acts as an ACK
|
||||
// of the version we sent)
|
||||
|
|
|
@ -338,7 +338,7 @@ func InitTelemetry(cfg TelemetryConfig) (*metrics.InmemSink, error) {
|
|||
metricsConf.BlockedPrefixes = cfg.BlockedPrefixes
|
||||
|
||||
var sinks metrics.FanoutSink
|
||||
addSink := func(name string, fn func(TelemetryConfig, string) (metrics.MetricSink, error)) error {
|
||||
addSink := func(fn func(TelemetryConfig, string) (metrics.MetricSink, error)) error {
|
||||
s, err := fn(cfg, metricsConf.HostName)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -349,19 +349,19 @@ func InitTelemetry(cfg TelemetryConfig) (*metrics.InmemSink, error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := addSink("statsite", statsiteSink); err != nil {
|
||||
if err := addSink(statsiteSink); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := addSink("statsd", statsdSink); err != nil {
|
||||
if err := addSink(statsdSink); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := addSink("dogstatd", dogstatdSink); err != nil {
|
||||
if err := addSink(dogstatdSink); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := addSink("circonus", circonusSink); err != nil {
|
||||
if err := addSink(circonusSink); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := addSink("prometheus", prometheusSink); err != nil {
|
||||
if err := addSink(prometheusSink); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue