diff --git a/client/fingerprint/memory.go b/client/fingerprint/memory.go index a8c30c31e..5af097848 100644 --- a/client/fingerprint/memory.go +++ b/client/fingerprint/memory.go @@ -25,7 +25,7 @@ func NewMemoryFingerprint(logger *log.Logger) Fingerprint { func (f *MemoryFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { memInfo, err := mem.VirtualMemory() if err != nil { - f.logger.Println("[WARN] Error reading memory information: %s", err) + f.logger.Printf("[WARN] Error reading memory information: %s", err) return false, err } diff --git a/command/agent_info_test.go b/command/agent_info_test.go index d16f04704..fd34e206e 100644 --- a/command/agent_info_test.go +++ b/command/agent_info_test.go @@ -20,7 +20,7 @@ func TestAgentInfoCommand_Run(t *testing.T) { code := cmd.Run([]string{"-address=" + url}) if code != 0 { - t.Fatalf("expected exit 0, got: %d %s", code) + t.Fatalf("expected exit 0, got: %d", code) } } diff --git a/helper/discover/discover.go b/helper/discover/discover.go index b683e95a7..d90ddb4cc 100644 --- a/helper/discover/discover.go +++ b/helper/discover/discover.go @@ -34,7 +34,7 @@ func NomadExecutable() (string, error) { // Check the CWD. pwd, err := os.Getwd() if err != nil { - return "", fmt.Errorf("Could not find Nomad executable (%v): %v", err) + return "", fmt.Errorf("Could not find Nomad executable (%v): %v", nomadExe, err) } bin = filepath.Join(pwd, nomadExe) diff --git a/nomad/plan_apply.go b/nomad/plan_apply.go index 18d7353fb..4b760cdaf 100644 --- a/nomad/plan_apply.go +++ b/nomad/plan_apply.go @@ -159,7 +159,7 @@ func evaluateNodePlan(snap *state.StateSnapshot, plan *structs.Plan, nodeID stri // Get the node itself node, err := snap.NodeByID(nodeID) if err != nil { - return false, fmt.Errorf("failed to get node '%s': %v", node, err) + return false, fmt.Errorf("failed to get node '%s': %v", nodeID, err) } // If the node does not exist or is not ready for schduling it is not fit @@ -172,7 +172,7 @@ func evaluateNodePlan(snap *state.StateSnapshot, plan *structs.Plan, nodeID stri // Get the existing allocations existingAlloc, err := snap.AllocsByNode(nodeID) if err != nil { - return false, fmt.Errorf("failed to get existing allocations for '%s': %v", node, err) + return false, fmt.Errorf("failed to get existing allocations for '%s': %v", nodeID, err) } // Filter on alloc state diff --git a/scheduler/generic_sched.go b/scheduler/generic_sched.go index 030f10bbc..bd5bb81e4 100644 --- a/scheduler/generic_sched.go +++ b/scheduler/generic_sched.go @@ -164,7 +164,7 @@ func (s *GenericScheduler) process() (bool, error) { if s.limitReached && s.nextEval == nil { s.nextEval = s.eval.NextRollingEval(s.job.Update.Stagger) if err := s.planner.CreateEval(s.nextEval); err != nil { - s.logger.Printf("[ERR] sched: %#v failed to make next eval for rolling update: %v", err) + s.logger.Printf("[ERR] sched: %#v failed to make next eval for rolling update: %v", s.eval, err) return false, err } s.logger.Printf("[DEBUG] sched: %#v: rolling update limit reached, next eval '%s' created", s.eval, s.nextEval.ID) @@ -289,7 +289,7 @@ func (s *GenericScheduler) inplaceUpdate(updates []allocTuple) []allocTuple { node, err := s.state.NodeByID(update.Alloc.NodeID) if err != nil { s.logger.Printf("[ERR] sched: %#v failed to get node '%s': %v", - update.Alloc.NodeID, err) + s.eval, update.Alloc.NodeID, err) continue } if node == nil {