Merge pull request #3661 from hashicorp/typos

Fix some typos
This commit is contained in:
Preetha 2017-12-14 14:16:35 -06:00 committed by GitHub
commit 52d988ad6f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 15 additions and 15 deletions

View File

@ -303,7 +303,7 @@ func getSignalConstraint(signals []string) *structs.Constraint {
}
}
// Summary retreives the summary of a job
// Summary retrieves the summary of a job
func (j *Job) Summary(args *structs.JobSummaryRequest,
reply *structs.JobSummaryResponse) error {

View File

@ -290,14 +290,14 @@ type NodeUpdateStatusRequest struct {
WriteRequest
}
// NodeUpdateDrainRequest is used for updatin the drain status
// NodeUpdateDrainRequest is used for updating the drain status
type NodeUpdateDrainRequest struct {
NodeID string
Drain bool
WriteRequest
}
// NodeEvaluateRequest is used to re-evaluate the ndoe
// NodeEvaluateRequest is used to re-evaluate the node
type NodeEvaluateRequest struct {
NodeID string
WriteRequest
@ -5312,7 +5312,7 @@ const (
// potentially taking action (allocation of work) or doing nothing if the state
// of the world does not require it.
type Evaluation struct {
// ID is a randonly generated UUID used for this evaluation. This
// ID is a randomly generated UUID used for this evaluation. This
// is assigned upon the creation of the evaluation.
ID string

View File

@ -55,7 +55,7 @@ func (s *SetStatusError) Error() string {
}
// GenericScheduler is used for 'service' and 'batch' type jobs. This scheduler is
// designed for long-lived services, and as such spends more time attemping
// designed for long-lived services, and as such spends more time attempting
// to make a high quality placement. This is the primary scheduler for
// most workloads. It also supports a 'batch' mode to optimize for fast decision
// making at the cost of quality.
@ -443,7 +443,7 @@ func (s *GenericScheduler) computePlacements(destructive, place []placementResul
deploymentID = s.deployment.ID
}
// Update the set of placement ndoes
// Update the set of placement nodes
s.stack.SetNodes(nodes)
// Have to handle destructive changes first as we need to discount their

View File

@ -43,7 +43,7 @@ Update stanza Tests:
Stopped job cancels any active deployment
Stopped job doesn't cancel terminal deployment
JobIndex change cancels any active deployment
JobIndex change doens't cancels any terminal deployment
JobIndex change doesn't cancels any terminal deployment
Destructive changes create deployment and get rolled out via max_parallelism
Don't create a deployment if there are no changes
Deployment created by all inplace updates

View File

@ -8,7 +8,7 @@ import (
"github.com/hashicorp/nomad/nomad/structs"
)
// placementResult is an allocation that must be placed. It potentionally has a
// placementResult is an allocation that must be placed. It potentially has a
// previous allocation attached to it that should be stopped only if the
// paired placement is complete. This gives an atomic place/stop behavior to
// prevent an impossible resource ask as part of a rolling update to wipe the
@ -181,7 +181,7 @@ func (a allocSet) fromKeys(keys ...[]string) allocSet {
return from
}
// fitlerByTainted takes a set of tainted nodes and filters the allocation set
// filterByTainted takes a set of tainted nodes and filters the allocation set
// into three groups:
// 1. Those that exist on untainted nodes
// 2. Those exist on nodes that are draining
@ -296,7 +296,7 @@ func bitmapFrom(input allocSet, minSize uint) structs.Bitmap {
return bitmap
}
// RemoveHighest removes and returns the hightest n used names. The returned set
// RemoveHighest removes and returns the highest n used names. The returned set
// can be less than n if there aren't n names set in the index
func (a *allocNameIndex) Highest(n uint) map[string]struct{} {
h := make(map[string]struct{}, n)

View File

@ -9,7 +9,7 @@ import (
// Test that we properly create the bitmap even when the alloc set includes an
// allocation with a higher count than the current min count and it is byte
// aligned.
// Ensure no regerssion from: https://github.com/hashicorp/nomad/issues/3008
// Ensure no regression from: https://github.com/hashicorp/nomad/issues/3008
func TestBitmapFrom(t *testing.T) {
input := map[string]*structs.Allocation{
"8": {

View File

@ -212,7 +212,7 @@ func (s *SystemScheduler) computeJobAllocs() error {
s.plan.AppendUpdate(e.Alloc, structs.AllocDesiredStatusStop, allocNotNeeded, "")
}
// Lost allocations should be transistioned to desired status stop and client
// Lost allocations should be transitioned to desired status stop and client
// status lost.
for _, e := range diff.lost {
s.plan.AppendUpdate(e.Alloc, structs.AllocDesiredStatusStop, allocLost, structs.AllocClientStatusLost)
@ -278,7 +278,7 @@ func (s *SystemScheduler) computePlacements(place []allocTuple) error {
option, _ := s.stack.Select(missing.TaskGroup)
if option == nil {
// If nodes were filtered because of constain mismatches and we
// If nodes were filtered because of constraint mismatches and we
// couldn't create an allocation then decrementing queued for that
// task group
if s.ctx.metrics.NodesFiltered > 0 {

View File

@ -1349,7 +1349,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) {
node2.ComputeClass()
noErr(t, h.State.UpsertNode(h.NextIndex(), node2))
// Create a Job with two task groups, each constrianed on node class
// Create a Job with two task groups, each constrained on node class
job := mock.SystemJob()
tg1 := job.TaskGroups[0]
tg1.Constraints = append(tg1.Constraints,
@ -1436,7 +1436,7 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) {
node2.ComputeClass()
noErr(t, h.State.UpsertNode(h.NextIndex(), node2))
// Create a Job with two task groups, each constrianed on node class
// Create a Job with two task groups, each constrained on node class
job := mock.SystemJob()
tg1 := job.TaskGroups[0]
tg1.Constraints = append(tg1.Constraints,