diff --git a/acl/acl.go b/acl/acl.go index f18be42b5..8492177e3 100644 --- a/acl/acl.go +++ b/acl/acl.go @@ -170,7 +170,7 @@ type PolicyACL struct { eventRules *radix.Tree // keyringRules contains the keyring policies. The keyring has - // a very simple yes/no without prefix mathing, so here we + // a very simple yes/no without prefix matching, so here we // don't need to use a radix tree. keyringRule string } diff --git a/api/api.go b/api/api.go index 8fe2ead04..b0b712834 100644 --- a/api/api.go +++ b/api/api.go @@ -36,7 +36,7 @@ type QueryOptions struct { WaitIndex uint64 // WaitTime is used to bound the duration of a wait. - // Defaults to that of the Config, but can be overriden. + // Defaults to that of the Config, but can be overridden. WaitTime time.Duration // Token is used to provide a per-request ACL token diff --git a/api/kv.go b/api/kv.go index c1a8923be..688b3a09d 100644 --- a/api/kv.go +++ b/api/kv.go @@ -143,7 +143,7 @@ func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { return k.put(p.Key, params, p.Value, q) } -// Acquire is used for a lock acquisiiton operation. The Key, +// Acquire is used for a lock acquisition operation. The Key, // Flags, Value and Session are respected. Returns true // on success or false on failures. func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { diff --git a/api/lock_test.go b/api/lock_test.go index 0a8fa5172..ceab5cdf9 100644 --- a/api/lock_test.go +++ b/api/lock_test.go @@ -237,7 +237,7 @@ func TestLock_Destroy(t *testing.T) { t.Fatalf("err: %v", err) } - // Should relese + // Should release err = l2.Unlock() if err != nil { t.Fatalf("err: %v", err) diff --git a/api/semaphore.go b/api/semaphore.go index ff4c2058c..4e70be2e7 100644 --- a/api/semaphore.go +++ b/api/semaphore.go @@ -66,7 +66,7 @@ type SemaphoreOptions struct { Prefix string // Must be set and have write permissions Limit int // Must be set, and be positive Value []byte // Optional, value to associate with the contender entry - Session string // OPtional, created if not specified + Session string // Optional, created if not specified SessionName string // Optional, defaults to DefaultLockSessionName SessionTTL string // Optional, defaults to DefaultLockSessionTTL } @@ -123,7 +123,7 @@ func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { } // Acquire attempts to reserve a slot in the semaphore, blocking until -// success, interrupted via the stopCh or an error is encounted. +// success, interrupted via the stopCh or an error is encountered. // Providing a non-nil stopCh can be used to abort the attempt. // On success, a channel is returned that represents our slot. // This channel could be closed at any time due to session invalidation, diff --git a/api/session.go b/api/session.go index a99da511d..574738127 100644 --- a/api/session.go +++ b/api/session.go @@ -102,7 +102,7 @@ func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, return out.ID, wm, nil } -// Destroy invalides a given session +// Destroy invalidates a given session func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) if err != nil { diff --git a/command/agent/agent.go b/command/agent/agent.go index 6e11a535c..4509dce89 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -718,7 +718,7 @@ func (a *Agent) RemoveService(serviceID string, persist bool) error { return fmt.Errorf("ServiceID missing") } - // Remove service immeidately + // Remove service immediately a.state.RemoveService(serviceID) // Remove the service from the data dir diff --git a/command/agent/config.go b/command/agent/config.go index f0aba4247..7941b2997 100644 --- a/command/agent/config.go +++ b/command/agent/config.go @@ -26,7 +26,7 @@ type PortConfig struct { HTTPS int // HTTPS API RPC int // CLI RPC SerfLan int `mapstructure:"serf_lan"` // LAN gossip (Client + Server) - SerfWan int `mapstructure:"serf_wan"` // WAN gossip (Server onlyg) + SerfWan int `mapstructure:"serf_wan"` // WAN gossip (Server only) Server int // Server internal RPC } @@ -99,7 +99,7 @@ type Config struct { Bootstrap bool `mapstructure:"bootstrap"` // BootstrapExpect tries to automatically bootstrap the Consul cluster, - // by witholding peers until enough servers join. + // by withholding peers until enough servers join. BootstrapExpect int `mapstructure:"bootstrap_expect"` // Server controls if this agent acts like a Consul server, @@ -221,7 +221,7 @@ type Config struct { KeyFile string `mapstructure:"key_file"` // ServerName is used with the TLS certificates to ensure the name we - // provid ematches the certificate + // provide matches the certificate ServerName string `mapstructure:"server_name"` // StartJoin is a list of addresses to attempt to join when the diff --git a/command/agent/dns.go b/command/agent/dns.go index 35830648e..b1857c34a 100644 --- a/command/agent/dns.go +++ b/command/agent/dns.go @@ -223,7 +223,7 @@ func (d *DNSServer) handlePtr(resp dns.ResponseWriter, req *dns.Msg) { } } -// handleQUery is used to handle DNS queries in the configured domain +// handleQuery is used to handle DNS queries in the configured domain func (d *DNSServer) handleQuery(resp dns.ResponseWriter, req *dns.Msg) { q := req.Question[0] defer func(s time.Time) { diff --git a/command/agent/remote_exec.go b/command/agent/remote_exec.go index a529bbecf..20c4bb0b9 100644 --- a/command/agent/remote_exec.go +++ b/command/agent/remote_exec.go @@ -117,7 +117,7 @@ func (r *rexecWriter) Flush() { // handleRemoteExec is invoked when a new remote exec request is received func (a *Agent) handleRemoteExec(msg *UserEvent) { a.logger.Printf("[DEBUG] agent: received remote exec event (ID: %s)", msg.ID) - // Decode the event paylaod + // Decode the event payload var event remoteExecEvent if err := json.Unmarshal(msg.Payload, &event); err != nil { a.logger.Printf("[ERR] agent: failed to decode remote exec event: %v", err) diff --git a/command/agent/rpc_log_stream.go b/command/agent/rpc_log_stream.go index a561e082f..580663e75 100644 --- a/command/agent/rpc_log_stream.go +++ b/command/agent/rpc_log_stream.go @@ -41,7 +41,7 @@ func (ls *logStream) HandleLog(l string) { select { case ls.logCh <- l: default: - // We can't log syncronously, since we are already being invoked + // We can't log synchronously, since we are already being invoked // from the logWriter, and a log will need to invoke Write() which // already holds the lock. We must therefor do the log async, so // as to not deadlock diff --git a/command/agent/scada.go b/command/agent/scada.go index 7cd69e023..53766bcd5 100644 --- a/command/agent/scada.go +++ b/command/agent/scada.go @@ -118,7 +118,7 @@ func (s *scadaListener) PushRWC(conn io.ReadWriteCloser) error { return s.Push(wrapped) } -// Push is used to add a connection to the queu +// Push is used to add a connection to the queue func (s *scadaListener) Push(conn net.Conn) error { select { case s.pending <- conn: diff --git a/command/agent/session_endpoint.go b/command/agent/session_endpoint.go index 0750de7d2..f3d8db22b 100644 --- a/command/agent/session_endpoint.go +++ b/command/agent/session_endpoint.go @@ -16,7 +16,7 @@ const ( // threshold. Users often send a value like 5, which they assume // is seconds, but because Go uses nanosecond granularity, ends // up being very small. If we see a value below this threshold, - // we multply by time.Second + // we multiply by time.Second lockDelayMinThreshold = 1000 ) diff --git a/command/agent/syslog.go b/command/agent/syslog.go index b7aed636d..d2522a38a 100644 --- a/command/agent/syslog.go +++ b/command/agent/syslog.go @@ -17,7 +17,7 @@ var levelPriority = map[string]gsyslog.Priority{ "CRIT": gsyslog.LOG_CRIT, } -// SyslogWrapper is used to cleaup log messages before +// SyslogWrapper is used to cleanup log messages before // writing them to a Syslogger. Implements the io.Writer // interface. type SyslogWrapper struct { diff --git a/consul/catalog_endpoint_test.go b/consul/catalog_endpoint_test.go index 7f927a4e9..7fc4c8db5 100644 --- a/consul/catalog_endpoint_test.go +++ b/consul/catalog_endpoint_test.go @@ -168,7 +168,7 @@ func TestCatalogRegister_ForwardDC(t *testing.T) { testutil.WaitForLeader(t, client.Call, "dc2") arg := structs.RegisterRequest{ - Datacenter: "dc2", // SHould forward through s1 + Datacenter: "dc2", // Should forward through s1 Node: "foo", Address: "127.0.0.1", Service: &structs.NodeService{ diff --git a/consul/client.go b/consul/client.go index 35b77445e..abd27cd2d 100644 --- a/consul/client.go +++ b/consul/client.go @@ -20,7 +20,7 @@ const ( // open to a server clientRPCCache = 30 * time.Second - // clientMaxStreams controsl how many idle streams we keep + // clientMaxStreams controls how many idle streams we keep // open to a server clientMaxStreams = 32 ) diff --git a/consul/config.go b/consul/config.go index 541c2bce0..95ea0e604 100644 --- a/consul/config.go +++ b/consul/config.go @@ -180,7 +180,7 @@ type Config struct { // is also monotonic. This prevents deletes from reducing the disk space // used. // In theory, neither of these are intrinsic limitations, however for the - // purposes of building a practical system, they are reaonable trade offs. + // purposes of building a practical system, they are reasonable trade offs. // // It is also possible to set this to an incredibly long time, thereby // simulating infinite retention. This is not recommended however. diff --git a/consul/filter.go b/consul/filter.go index 5577aa47a..946508e31 100644 --- a/consul/filter.go +++ b/consul/filter.go @@ -50,7 +50,7 @@ func FilterKeys(acl acl.ACL, keys []string) []string { return keys[:FilterEntries(&kf)] } -// Filter interfae is used with FilterEntries to do an +// Filter interface is used with FilterEntries to do an // in-place filter of a slice. type Filter interface { Len() int diff --git a/consul/leader.go b/consul/leader.go index 2dbb9c546..67be5bb59 100644 --- a/consul/leader.go +++ b/consul/leader.go @@ -48,7 +48,7 @@ func (s *Server) monitorLeadership() { } // leaderLoop runs as long as we are the leader to run various -// maintence activities +// maintenance activities func (s *Server) leaderLoop(stopCh chan struct{}) { // Ensure we revoke leadership on stepdown defer s.revokeLeadership() @@ -256,7 +256,7 @@ func (s *Server) reconcile() (err error) { // reconcileReaped is used to reconcile nodes that have failed and been reaped // from Serf but remain in the catalog. This is done by looking for SerfCheckID -// in a crticial state that does not correspond to a known Serf member. We generate +// in a critical state that does not correspond to a known Serf member. We generate // a "reap" event to cause the node to be cleaned up. func (s *Server) reconcileReaped(known map[string]struct{}) error { state := s.fsm.State() diff --git a/consul/pool.go b/consul/pool.go index 0cd0a99df..b7711aad6 100644 --- a/consul/pool.go +++ b/consul/pool.go @@ -329,7 +329,7 @@ func (p *ConnPool) getNewConn(dc string, addr net.Addr, version int) (*Conn, err return c, nil } -// clearConn is used to clear any cached connection, potentially in response to an erro +// clearConn is used to clear any cached connection, potentially in response to an error func (p *ConnPool) clearConn(conn *Conn) { // Ensure returned streams are closed atomic.StoreInt32(&conn.shouldClose, 1) diff --git a/consul/serf.go b/consul/serf.go index ed80a74b0..f4a90e566 100644 --- a/consul/serf.go +++ b/consul/serf.go @@ -231,11 +231,11 @@ func (s *Server) maybeBootstrap() { s.logger.Printf("[ERR] consul: failed to bootstrap peers: %v", err) } - // Bootstrapping comlete, don't enter this again + // Bootstrapping complete, don't enter this again s.config.BootstrapExpect = 0 } -// nodeFailed is used to handle fail events on both the serf clustes +// nodeFailed is used to handle fail events on both the serf clusters func (s *Server) nodeFailed(me serf.MemberEvent, wan bool) { for _, m := range me.Members { ok, parts := isConsulServer(m) diff --git a/consul/server.go b/consul/server.go index 0925e144a..34ec75224 100644 --- a/consul/server.go +++ b/consul/server.go @@ -40,7 +40,7 @@ const ( // open to a server serverRPCCache = 2 * time.Minute - // serverMaxStreams controsl how many idle streams we keep + // serverMaxStreams controls how many idle streams we keep // open to a server serverMaxStreams = 64 @@ -566,7 +566,7 @@ func (s *Server) Leave() error { } // numOtherPeers is used to check on the number of known peers -// excluding the local ndoe +// excluding the local node func (s *Server) numOtherPeers() (int, error) { peers, err := s.raftPeers.Peers() if err != nil { diff --git a/consul/session_ttl.go b/consul/session_ttl.go index e93357841..172ef945e 100644 --- a/consul/session_ttl.go +++ b/consul/session_ttl.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/consul/consul/structs" ) -// initializeSessionTimers is used when a leader is newly electd to create +// initializeSessionTimers is used when a leader is newly elected to create // a new map to track session expiration and to reset all the timers from // the previously known set of timers. func (s *Server) initializeSessionTimers() error { diff --git a/consul/state_store.go b/consul/state_store.go index 4535d52ad..038ae212b 100644 --- a/consul/state_store.go +++ b/consul/state_store.go @@ -75,7 +75,7 @@ type StateStore struct { // lockDelay is used to mark certain locks as unacquirable. // When a lock is forcefully released (failing health // check, destroyed session, etc), it is subject to the LockDelay - // impossed by the session. This prevents another session from + // imposed by the session. This prevents another session from // acquiring the lock for some period of time as a protection against // split-brains. This is inspired by the lock-delay in Chubby. // Because this relies on wall-time, we cannot assume all peers @@ -979,7 +979,7 @@ func (s *StateStore) ChecksInState(state string) (uint64, structs.HealthChecks) return s.parseHealthChecks(idx, res, err) } -// parseHealthChecks is used to handle the resutls of a Get against +// parseHealthChecks is used to handle the results of a Get against // the checkTable func (s *StateStore) parseHealthChecks(idx uint64, res []interface{}, err error) (uint64, structs.HealthChecks) { results := make([]*structs.HealthCheck, len(res)) @@ -1054,7 +1054,7 @@ func (s *StateStore) parseCheckServiceNodes(tx *MDBTxn, res []interface{}, err e res, err := s.checkTable.GetTxn(tx, "node", srv.Node, srv.ServiceID) _, checks := s.parseHealthChecks(0, res, err) - // Get any checks of the node, not assciated with any service + // Get any checks of the node, not associated with any service res, err = s.checkTable.GetTxn(tx, "node", srv.Node, "") _, nodeChecks := s.parseHealthChecks(0, res, err) checks = append(checks, nodeChecks...) @@ -1093,7 +1093,7 @@ func (s *StateStore) NodeInfo(node string) (uint64, structs.NodeDump) { } // NodeDump is used to generate the NodeInfo for all nodes. This is very expensive, -// and should generally be avoided for programatic access. +// and should generally be avoided for programmatic access. func (s *StateStore) NodeDump() (uint64, structs.NodeDump) { tables := s.queryTables["NodeDump"] tx, err := tables.StartTxn(true) @@ -1269,7 +1269,7 @@ func (s *StateStore) KVSListKeys(prefix, seperator string) (uint64, []string, er ent := raw.(*structs.DirEntry) after := ent.Key[prefixLen:] - // Update the hightest index we've seen + // Update the highest index we've seen if ent.ModifyIndex > maxIndex { maxIndex = ent.ModifyIndex } @@ -1571,7 +1571,7 @@ func (s *StateStore) ReapTombstones(index uint64) error { defer tx.Abort() // Scan the tombstone table for all the entries that are - // eligble for GC. This could be improved by indexing on + // eligible for GC. This could be improved by indexing on // ModifyTime and doing a less-than-equals scan, however // we don't currently support numeric indexes internally. // Luckily, this is a low frequency operation. @@ -1779,7 +1779,7 @@ func (s *StateStore) SessionDestroy(index uint64, id string) error { return tx.Commit() } -// invalideNode is used to invalide all sessions belonging to a node +// invalidateNode is used to invalidate all sessions belonging to a node // All tables should be locked in the tx. func (s *StateStore) invalidateNode(index uint64, tx *MDBTxn, node string) error { sessions, err := s.sessionTable.GetTxn(tx, "node", node) @@ -1797,7 +1797,7 @@ func (s *StateStore) invalidateNode(index uint64, tx *MDBTxn, node string) error return nil } -// invalidateCheck is used to invalide all sessions belonging to a check +// invalidateCheck is used to invalidate all sessions belonging to a check // All tables should be locked in the tx. func (s *StateStore) invalidateCheck(index uint64, tx *MDBTxn, node, check string) error { sessionChecks, err := s.sessionCheckTable.GetTxn(tx, "id", node, check) @@ -1815,7 +1815,7 @@ func (s *StateStore) invalidateCheck(index uint64, tx *MDBTxn, node, check strin return nil } -// invalidateSession is used to invalide a session within a given txn +// invalidateSession is used to invalidate a session within a given txn // All tables should be locked in the tx. func (s *StateStore) invalidateSession(index uint64, tx *MDBTxn, id string) error { // Get the session diff --git a/consul/tombstone_gc.go b/consul/tombstone_gc.go index 8a238409c..8dd2e1a5a 100644 --- a/consul/tombstone_gc.go +++ b/consul/tombstone_gc.go @@ -129,7 +129,7 @@ func (t *TombstoneGC) PendingExpiration() bool { return len(t.expires) > 0 } -// nextExpires is used to calculate the next experation time +// nextExpires is used to calculate the next expiration time func (t *TombstoneGC) nextExpires() time.Time { expires := time.Now().Add(t.ttl) remain := expires.UnixNano() % int64(t.granularity) diff --git a/testutil/server.go b/testutil/server.go index 66b4f9509..d4ab7afe3 100644 --- a/testutil/server.go +++ b/testutil/server.go @@ -271,7 +271,7 @@ func (s *TestServer) waitForLeader() { return false, err } - // Ensure we have a leader and a node registeration + // Ensure we have a leader and a node registration if leader := resp.Header.Get("X-Consul-KnownLeader"); leader != "true" { fmt.Println(leader) return false, fmt.Errorf("Consul leader status: %#v", leader)