diff --git a/command/agent/agent.go b/command/agent/agent.go index 138f1799c5..42fda9963c 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -1048,7 +1048,7 @@ func (a *Agent) UpdateCheck(checkID types.CheckID, status, output string) error check, ok := a.checkTTLs[checkID] if !ok { - return fmt.Errorf("CheckID does not have associated TTL") + return fmt.Errorf("CheckID %q does not have associated TTL", checkID) } // Set the status through CheckTTL to reset the TTL diff --git a/command/agent/rpc.go b/command/agent/rpc.go index c763dd0bab..9232be5750 100644 --- a/command/agent/rpc.go +++ b/command/agent/rpc.go @@ -154,9 +154,9 @@ type logRecord struct { type Member struct { Name string Addr net.IP - Port uint16 Tags map[string]string Status string + Port uint16 ProtocolMin uint8 ProtocolMax uint8 ProtocolCur uint8 diff --git a/consul/rpc.go b/consul/rpc.go index ea054cdb5d..7261c4a850 100644 --- a/consul/rpc.go +++ b/consul/rpc.go @@ -241,7 +241,7 @@ func (s *Server) forwardDC(method, dc string, args interface{}, reply interface{ } // Select a random addr - offset := rand.Int31() % int32(len(servers)) + offset := rand.Int31n(int32(len(servers))) server := servers[offset] s.remoteLock.RUnlock() diff --git a/consul/server.go b/consul/server.go index 2f77ca23cd..f94efcdef4 100644 --- a/consul/server.go +++ b/consul/server.go @@ -222,7 +222,7 @@ func NewServer(config *Config) (*Server, error) { localConsuls: make(map[string]*agent.Server), logger: logger, reconcileCh: make(chan serf.Member, 32), - remoteConsuls: make(map[string][]*agent.Server), + remoteConsuls: make(map[string][]*agent.Server, 4), rpcServer: rpc.NewServer(), rpcTLS: incomingTLS, tombstoneGC: gc, diff --git a/consul/servers/manager.go b/consul/servers/manager.go index c343d460ad..39077bfa15 100644 --- a/consul/servers/manager.go +++ b/consul/servers/manager.go @@ -50,14 +50,14 @@ const ( newRebalanceConnsPerSecPerServer = 64 ) -// ConsulClusterInfo is an interface wrapper around serf and prevents a -// cyclic import dependency +// ConsulClusterInfo is an interface wrapper around serf in order to prevent +// a cyclic import dependency. type ConsulClusterInfo interface { NumNodes() int } -// Pinger is an interface wrapping client.ConnPool to prevent a -// cyclic import dependency +// Pinger is an interface wrapping client.ConnPool to prevent a cyclic import +// dependency. type Pinger interface { PingConsulServer(s *agent.Server) (bool, error) } @@ -269,8 +269,8 @@ func (m *Manager) NumServers() int { // fail for a particular server are rotated to the end of the list. This // method reshuffles the list periodically in order to redistribute work // across all known consul servers (i.e. guarantee that the order of servers -// in the server list isn't positively correlated with the age of a server in -// the consul cluster). Periodically shuffling the server list prevents +// in the server list is not positively correlated with the age of a server +// in the Consul cluster). Periodically shuffling the server list prevents // long-lived clients from fixating on long-lived servers. // // Unhealthy servers are removed when serf notices the server has been @@ -280,16 +280,16 @@ func (m *Manager) RebalanceServers() { // Obtain a copy of the current serverList l := m.getServerList() - // Early abort if there is no value to shuffling + // Early abort if there is nothing to shuffle if len(l.servers) < 2 { return } l.shuffleServers() - // Iterate through the shuffled server list to find a healthy server. - // Don't iterate on the list directly, this loop mutates the server - // list. + // Iterate through the shuffled server list to find an assumed + // healthy server. NOTE: Do not iterate on the list directly because + // this loop mutates the server list in-place. var foundHealthyServer bool for i := 0; i < len(l.servers); i++ { // Always test the first server. Failed servers are cycled @@ -320,8 +320,7 @@ func (m *Manager) RebalanceServers() { // reconcileServerList failed because Serf removed the server // that was at the front of the list that had successfully // been Ping'ed. Between the Ping and reconcile, a Serf - // event had shown up removing the node. Prevent an RPC - // timeout by retrying RebalanceServers(). + // event had shown up removing the node. // // Instead of doing any heroics, "freeze in place" and // continue to use the existing connection until the next @@ -332,9 +331,9 @@ func (m *Manager) RebalanceServers() { } // reconcileServerList returns true when the first server in serverList -// exists in the receiver's serverList. If true, the merged serverList -// is stored as the receiver's serverList. Returns false if the first -// server does not exist in the list (i.e. was removed by Serf during a +// exists in the receiver's serverList. If true, the merged serverList is +// stored as the receiver's serverList. Returns false if the first server +// does not exist in the list (i.e. was removed by Serf during a // PingConsulServer() call. Newly added servers are appended to the list and // other missing servers are removed from the list. func (m *Manager) reconcileServerList(l *serverList) bool { @@ -346,7 +345,7 @@ func (m *Manager) reconcileServerList(l *serverList) bool { newServerCfg := m.getServerList() // If Serf has removed all nodes, or there is no selected server - // (zero nodes in l), abort early. + // (zero nodes in serverList), abort early. if len(newServerCfg.servers) == 0 || len(l.servers) == 0 { return false } @@ -423,12 +422,12 @@ func (m *Manager) RemoveServer(s *agent.Server) { // refreshServerRebalanceTimer is only called once m.rebalanceTimer expires. func (m *Manager) refreshServerRebalanceTimer() time.Duration { l := m.getServerList() - numConsulServers := len(l.servers) + numServers := len(l.servers) // Limit this connection's life based on the size (and health) of the // cluster. Never rebalance a connection more frequently than // connReuseLowWatermarkDuration, and make sure we never exceed // clusterWideRebalanceConnsPerSec operations/s across numLANMembers. - clusterWideRebalanceConnsPerSec := float64(numConsulServers * newRebalanceConnsPerSecPerServer) + clusterWideRebalanceConnsPerSec := float64(numServers * newRebalanceConnsPerSecPerServer) connReuseLowWatermarkDuration := clientRPCMinReuseDuration + lib.RandomStagger(clientRPCMinReuseDuration/clientRPCJitterFraction) numLANMembers := m.clusterInfo.NumNodes() connRebalanceTimeout := lib.RateScaledInterval(clusterWideRebalanceConnsPerSec, connReuseLowWatermarkDuration, numLANMembers) @@ -437,8 +436,8 @@ func (m *Manager) refreshServerRebalanceTimer() time.Duration { return connRebalanceTimeout } -// ResetRebalanceTimer resets the rebalance timer. This method primarily -// exists for testing and should not be used directly. +// ResetRebalanceTimer resets the rebalance timer. This method exists for +// testing and should not be used directly. func (m *Manager) ResetRebalanceTimer() { m.listLock.Lock() defer m.listLock.Unlock() @@ -446,11 +445,11 @@ func (m *Manager) ResetRebalanceTimer() { } // Start is used to start and manage the task of automatically shuffling and -// rebalancing the list of consul servers. This maintenance only happens +// rebalancing the list of Consul servers. This maintenance only happens // periodically based on the expiration of the timer. Failed servers are // automatically cycled to the end of the list. New servers are appended to // the list. The order of the server list must be shuffled periodically to -// distribute load across all known and available consul servers. +// distribute load across all known and available Consul servers. func (m *Manager) Start() { for { select { diff --git a/testutil/README.md b/testutil/README.md index dd953343c8..21eb01d2a7 100644 --- a/testutil/README.md +++ b/testutil/README.md @@ -26,7 +26,7 @@ import ( ) func TestMain(t *testing.T) { - // Create a server + // Create a test Consul server srv1 := testutil.NewTestServer(t) defer srv1.Stop()