mirror of https://github.com/status-im/consul.git
Fix a bunch of typos.
This commit is contained in:
parent
e029702e39
commit
5a28ebcaa3
|
@ -170,7 +170,7 @@ type PolicyACL struct {
|
|||
eventRules *radix.Tree
|
||||
|
||||
// keyringRules contains the keyring policies. The keyring has
|
||||
// a very simple yes/no without prefix mathing, so here we
|
||||
// a very simple yes/no without prefix matching, so here we
|
||||
// don't need to use a radix tree.
|
||||
keyringRule string
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ type QueryOptions struct {
|
|||
WaitIndex uint64
|
||||
|
||||
// WaitTime is used to bound the duration of a wait.
|
||||
// Defaults to that of the Config, but can be overriden.
|
||||
// Defaults to that of the Config, but can be overridden.
|
||||
WaitTime time.Duration
|
||||
|
||||
// Token is used to provide a per-request ACL token
|
||||
|
|
|
@ -143,7 +143,7 @@ func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
|||
return k.put(p.Key, params, p.Value, q)
|
||||
}
|
||||
|
||||
// Acquire is used for a lock acquisiiton operation. The Key,
|
||||
// Acquire is used for a lock acquisition operation. The Key,
|
||||
// Flags, Value and Session are respected. Returns true
|
||||
// on success or false on failures.
|
||||
func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
|
||||
|
|
|
@ -237,7 +237,7 @@ func TestLock_Destroy(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Should relese
|
||||
// Should release
|
||||
err = l2.Unlock()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
|
|
@ -66,7 +66,7 @@ type SemaphoreOptions struct {
|
|||
Prefix string // Must be set and have write permissions
|
||||
Limit int // Must be set, and be positive
|
||||
Value []byte // Optional, value to associate with the contender entry
|
||||
Session string // OPtional, created if not specified
|
||||
Session string // Optional, created if not specified
|
||||
SessionName string // Optional, defaults to DefaultLockSessionName
|
||||
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
|
|||
}
|
||||
|
||||
// Acquire attempts to reserve a slot in the semaphore, blocking until
|
||||
// success, interrupted via the stopCh or an error is encounted.
|
||||
// success, interrupted via the stopCh or an error is encountered.
|
||||
// Providing a non-nil stopCh can be used to abort the attempt.
|
||||
// On success, a channel is returned that represents our slot.
|
||||
// This channel could be closed at any time due to session invalidation,
|
||||
|
|
|
@ -102,7 +102,7 @@ func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta,
|
|||
return out.ID, wm, nil
|
||||
}
|
||||
|
||||
// Destroy invalides a given session
|
||||
// Destroy invalidates a given session
|
||||
func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
|
||||
wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q)
|
||||
if err != nil {
|
||||
|
|
|
@ -718,7 +718,7 @@ func (a *Agent) RemoveService(serviceID string, persist bool) error {
|
|||
return fmt.Errorf("ServiceID missing")
|
||||
}
|
||||
|
||||
// Remove service immeidately
|
||||
// Remove service immediately
|
||||
a.state.RemoveService(serviceID)
|
||||
|
||||
// Remove the service from the data dir
|
||||
|
|
|
@ -26,7 +26,7 @@ type PortConfig struct {
|
|||
HTTPS int // HTTPS API
|
||||
RPC int // CLI RPC
|
||||
SerfLan int `mapstructure:"serf_lan"` // LAN gossip (Client + Server)
|
||||
SerfWan int `mapstructure:"serf_wan"` // WAN gossip (Server onlyg)
|
||||
SerfWan int `mapstructure:"serf_wan"` // WAN gossip (Server only)
|
||||
Server int // Server internal RPC
|
||||
}
|
||||
|
||||
|
@ -99,7 +99,7 @@ type Config struct {
|
|||
Bootstrap bool `mapstructure:"bootstrap"`
|
||||
|
||||
// BootstrapExpect tries to automatically bootstrap the Consul cluster,
|
||||
// by witholding peers until enough servers join.
|
||||
// by withholding peers until enough servers join.
|
||||
BootstrapExpect int `mapstructure:"bootstrap_expect"`
|
||||
|
||||
// Server controls if this agent acts like a Consul server,
|
||||
|
|
|
@ -223,7 +223,7 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) {
|
|||
}
|
||||
}
|
||||
|
||||
// handleQUery is used to handle DNS queries in the configured domain
|
||||
// handleQuery is used to handle DNS queries in the configured domain
|
||||
func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) {
|
||||
q := req.Question[0]
|
||||
defer func(s time.Time) {
|
||||
|
|
|
@ -117,7 +117,7 @@ func (r *rexecWriter) Flush() {
|
|||
// handleRemoteExec is invoked when a new remote exec request is received
|
||||
func (a *Agent) handleRemoteExec(msg *UserEvent) {
|
||||
a.logger.Printf("[DEBUG] agent: received remote exec event (ID: %s)", msg.ID)
|
||||
// Decode the event paylaod
|
||||
// Decode the event payload
|
||||
var event remoteExecEvent
|
||||
if err := json.Unmarshal(msg.Payload, &event); err != nil {
|
||||
a.logger.Printf("[ERR] agent: failed to decode remote exec event: %v", err)
|
||||
|
|
|
@ -41,7 +41,7 @@ func (ls *logStream) HandleLog(l string) {
|
|||
select {
|
||||
case ls.logCh <- l:
|
||||
default:
|
||||
// We can't log syncronously, since we are already being invoked
|
||||
// We can't log synchronously, since we are already being invoked
|
||||
// from the logWriter, and a log will need to invoke Write() which
|
||||
// already holds the lock. We must therefor do the log async, so
|
||||
// as to not deadlock
|
||||
|
|
|
@ -118,7 +118,7 @@ func (s *scadaListener) PushRWC(conn io.ReadWriteCloser) error {
|
|||
return s.Push(wrapped)
|
||||
}
|
||||
|
||||
// Push is used to add a connection to the queu
|
||||
// Push is used to add a connection to the queue
|
||||
func (s *scadaListener) Push(conn net.Conn) error {
|
||||
select {
|
||||
case s.pending <- conn:
|
||||
|
|
|
@ -16,7 +16,7 @@ const (
|
|||
// threshold. Users often send a value like 5, which they assume
|
||||
// is seconds, but because Go uses nanosecond granularity, ends
|
||||
// up being very small. If we see a value below this threshold,
|
||||
// we multply by time.Second
|
||||
// we multiply by time.Second
|
||||
lockDelayMinThreshold = 1000
|
||||
)
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ var levelPriority = map[string]gsyslog.Priority{
|
|||
"CRIT": gsyslog.LOG_CRIT,
|
||||
}
|
||||
|
||||
// SyslogWrapper is used to cleaup log messages before
|
||||
// SyslogWrapper is used to cleanup log messages before
|
||||
// writing them to a Syslogger. Implements the io.Writer
|
||||
// interface.
|
||||
type SyslogWrapper struct {
|
||||
|
|
|
@ -168,7 +168,7 @@ func TestCatalogRegister_ForwardDC(t *testing.T) {
|
|||
testutil.WaitForLeader(t, client.Call, "dc2")
|
||||
|
||||
arg := structs.RegisterRequest{
|
||||
Datacenter: "dc2", // SHould forward through s1
|
||||
Datacenter: "dc2", // Should forward through s1
|
||||
Node: "foo",
|
||||
Address: "127.0.0.1",
|
||||
Service: &structs.NodeService{
|
||||
|
|
|
@ -20,7 +20,7 @@ const (
|
|||
// open to a server
|
||||
clientRPCCache = 30 * time.Second
|
||||
|
||||
// clientMaxStreams controsl how many idle streams we keep
|
||||
// clientMaxStreams controls how many idle streams we keep
|
||||
// open to a server
|
||||
clientMaxStreams = 32
|
||||
)
|
||||
|
|
|
@ -180,7 +180,7 @@ type Config struct {
|
|||
// is also monotonic. This prevents deletes from reducing the disk space
|
||||
// used.
|
||||
// In theory, neither of these are intrinsic limitations, however for the
|
||||
// purposes of building a practical system, they are reaonable trade offs.
|
||||
// purposes of building a practical system, they are reasonable trade offs.
|
||||
//
|
||||
// It is also possible to set this to an incredibly long time, thereby
|
||||
// simulating infinite retention. This is not recommended however.
|
||||
|
|
|
@ -50,7 +50,7 @@ func FilterKeys(acl acl.ACL, keys []string) []string {
|
|||
return keys[:FilterEntries(&kf)]
|
||||
}
|
||||
|
||||
// Filter interfae is used with FilterEntries to do an
|
||||
// Filter interface is used with FilterEntries to do an
|
||||
// in-place filter of a slice.
|
||||
type Filter interface {
|
||||
Len() int
|
||||
|
|
|
@ -48,7 +48,7 @@ func (s *Server) monitorLeadership() {
|
|||
}
|
||||
|
||||
// leaderLoop runs as long as we are the leader to run various
|
||||
// maintence activities
|
||||
// maintenance activities
|
||||
func (s *Server) leaderLoop(stopCh chan struct{}) {
|
||||
// Ensure we revoke leadership on stepdown
|
||||
defer s.revokeLeadership()
|
||||
|
@ -256,7 +256,7 @@ func (s *Server) reconcile() (err error) {
|
|||
|
||||
// reconcileReaped is used to reconcile nodes that have failed and been reaped
|
||||
// from Serf but remain in the catalog. This is done by looking for SerfCheckID
|
||||
// in a crticial state that does not correspond to a known Serf member. We generate
|
||||
// in a critical state that does not correspond to a known Serf member. We generate
|
||||
// a "reap" event to cause the node to be cleaned up.
|
||||
func (s *Server) reconcileReaped(known map[string]struct{}) error {
|
||||
state := s.fsm.State()
|
||||
|
|
|
@ -329,7 +329,7 @@ func (p *ConnPool) getNewConn(dc string, addr net.Addr, version int) (*Conn, err
|
|||
return c, nil
|
||||
}
|
||||
|
||||
// clearConn is used to clear any cached connection, potentially in response to an erro
|
||||
// clearConn is used to clear any cached connection, potentially in response to an error
|
||||
func (p *ConnPool) clearConn(conn *Conn) {
|
||||
// Ensure returned streams are closed
|
||||
atomic.StoreInt32(&conn.shouldClose, 1)
|
||||
|
|
|
@ -231,11 +231,11 @@ func (s *Server) maybeBootstrap() {
|
|||
s.logger.Printf("[ERR] consul: failed to bootstrap peers: %v", err)
|
||||
}
|
||||
|
||||
// Bootstrapping comlete, don't enter this again
|
||||
// Bootstrapping complete, don't enter this again
|
||||
s.config.BootstrapExpect = 0
|
||||
}
|
||||
|
||||
// nodeFailed is used to handle fail events on both the serf clustes
|
||||
// nodeFailed is used to handle fail events on both the serf clusters
|
||||
func (s *Server) nodeFailed(me serf.MemberEvent, wan bool) {
|
||||
for _, m := range me.Members {
|
||||
ok, parts := isConsulServer(m)
|
||||
|
|
|
@ -40,7 +40,7 @@ const (
|
|||
// open to a server
|
||||
serverRPCCache = 2 * time.Minute
|
||||
|
||||
// serverMaxStreams controsl how many idle streams we keep
|
||||
// serverMaxStreams controls how many idle streams we keep
|
||||
// open to a server
|
||||
serverMaxStreams = 64
|
||||
|
||||
|
@ -566,7 +566,7 @@ func (s *Server) Leave() error {
|
|||
}
|
||||
|
||||
// numOtherPeers is used to check on the number of known peers
|
||||
// excluding the local ndoe
|
||||
// excluding the local node
|
||||
func (s *Server) numOtherPeers() (int, error) {
|
||||
peers, err := s.raftPeers.Peers()
|
||||
if err != nil {
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/hashicorp/consul/consul/structs"
|
||||
)
|
||||
|
||||
// initializeSessionTimers is used when a leader is newly electd to create
|
||||
// initializeSessionTimers is used when a leader is newly elected to create
|
||||
// a new map to track session expiration and to reset all the timers from
|
||||
// the previously known set of timers.
|
||||
func (s *Server) initializeSessionTimers() error {
|
||||
|
|
|
@ -75,7 +75,7 @@ type StateStore struct {
|
|||
// lockDelay is used to mark certain locks as unacquirable.
|
||||
// When a lock is forcefully released (failing health
|
||||
// check, destroyed session, etc), it is subject to the LockDelay
|
||||
// impossed by the session. This prevents another session from
|
||||
// imposed by the session. This prevents another session from
|
||||
// acquiring the lock for some period of time as a protection against
|
||||
// split-brains. This is inspired by the lock-delay in Chubby.
|
||||
// Because this relies on wall-time, we cannot assume all peers
|
||||
|
@ -979,7 +979,7 @@ func (s *StateStore) ChecksInState(state string) (uint64, structs.HealthChecks)
|
|||
return s.parseHealthChecks(idx, res, err)
|
||||
}
|
||||
|
||||
// parseHealthChecks is used to handle the resutls of a Get against
|
||||
// parseHealthChecks is used to handle the results of a Get against
|
||||
// the checkTable
|
||||
func (s *StateStore) parseHealthChecks(idx uint64, res []interface{}, err error) (uint64, structs.HealthChecks) {
|
||||
results := make([]*structs.HealthCheck, len(res))
|
||||
|
@ -1054,7 +1054,7 @@ func (s *StateStore) parseCheckServiceNodes(tx *MDBTxn, res []interface{}, err e
|
|||
res, err := s.checkTable.GetTxn(tx, "node", srv.Node, srv.ServiceID)
|
||||
_, checks := s.parseHealthChecks(0, res, err)
|
||||
|
||||
// Get any checks of the node, not assciated with any service
|
||||
// Get any checks of the node, not associated with any service
|
||||
res, err = s.checkTable.GetTxn(tx, "node", srv.Node, "")
|
||||
_, nodeChecks := s.parseHealthChecks(0, res, err)
|
||||
checks = append(checks, nodeChecks...)
|
||||
|
@ -1093,7 +1093,7 @@ func (s *StateStore) NodeInfo(node string) (uint64, structs.NodeDump) {
|
|||
}
|
||||
|
||||
// NodeDump is used to generate the NodeInfo for all nodes. This is very expensive,
|
||||
// and should generally be avoided for programatic access.
|
||||
// and should generally be avoided for programmatic access.
|
||||
func (s *StateStore) NodeDump() (uint64, structs.NodeDump) {
|
||||
tables := s.queryTables["NodeDump"]
|
||||
tx, err := tables.StartTxn(true)
|
||||
|
@ -1269,7 +1269,7 @@ func (s *StateStore) KVSListKeys(prefix, seperator string) (uint64, []string, er
|
|||
ent := raw.(*structs.DirEntry)
|
||||
after := ent.Key[prefixLen:]
|
||||
|
||||
// Update the hightest index we've seen
|
||||
// Update the highest index we've seen
|
||||
if ent.ModifyIndex > maxIndex {
|
||||
maxIndex = ent.ModifyIndex
|
||||
}
|
||||
|
@ -1571,7 +1571,7 @@ func (s *StateStore) ReapTombstones(index uint64) error {
|
|||
defer tx.Abort()
|
||||
|
||||
// Scan the tombstone table for all the entries that are
|
||||
// eligble for GC. This could be improved by indexing on
|
||||
// eligible for GC. This could be improved by indexing on
|
||||
// ModifyTime and doing a less-than-equals scan, however
|
||||
// we don't currently support numeric indexes internally.
|
||||
// Luckily, this is a low frequency operation.
|
||||
|
@ -1779,7 +1779,7 @@ func (s *StateStore) SessionDestroy(index uint64, id string) error {
|
|||
return tx.Commit()
|
||||
}
|
||||
|
||||
// invalideNode is used to invalide all sessions belonging to a node
|
||||
// invalidateNode is used to invalidate all sessions belonging to a node
|
||||
// All tables should be locked in the tx.
|
||||
func (s *StateStore) invalidateNode(index uint64, tx *MDBTxn, node string) error {
|
||||
sessions, err := s.sessionTable.GetTxn(tx, "node", node)
|
||||
|
@ -1797,7 +1797,7 @@ func (s *StateStore) invalidateNode(index uint64, tx *MDBTxn, node string) error
|
|||
return nil
|
||||
}
|
||||
|
||||
// invalidateCheck is used to invalide all sessions belonging to a check
|
||||
// invalidateCheck is used to invalidate all sessions belonging to a check
|
||||
// All tables should be locked in the tx.
|
||||
func (s *StateStore) invalidateCheck(index uint64, tx *MDBTxn, node, check string) error {
|
||||
sessionChecks, err := s.sessionCheckTable.GetTxn(tx, "id", node, check)
|
||||
|
@ -1815,7 +1815,7 @@ func (s *StateStore) invalidateCheck(index uint64, tx *MDBTxn, node, check strin
|
|||
return nil
|
||||
}
|
||||
|
||||
// invalidateSession is used to invalide a session within a given txn
|
||||
// invalidateSession is used to invalidate a session within a given txn
|
||||
// All tables should be locked in the tx.
|
||||
func (s *StateStore) invalidateSession(index uint64, tx *MDBTxn, id string) error {
|
||||
// Get the session
|
||||
|
|
|
@ -129,7 +129,7 @@ func (t *TombstoneGC) PendingExpiration() bool {
|
|||
return len(t.expires) > 0
|
||||
}
|
||||
|
||||
// nextExpires is used to calculate the next experation time
|
||||
// nextExpires is used to calculate the next expiration time
|
||||
func (t *TombstoneGC) nextExpires() time.Time {
|
||||
expires := time.Now().Add(t.ttl)
|
||||
remain := expires.UnixNano() % int64(t.granularity)
|
||||
|
|
|
@ -271,7 +271,7 @@ func (s *TestServer) waitForLeader() {
|
|||
return false, err
|
||||
}
|
||||
|
||||
// Ensure we have a leader and a node registeration
|
||||
// Ensure we have a leader and a node registration
|
||||
if leader := resp.Header.Get("X-Consul-KnownLeader"); leader != "true" {
|
||||
fmt.Println(leader)
|
||||
return false, fmt.Errorf("Consul leader status: %#v", leader)
|
||||
|
|
Loading…
Reference in New Issue