Merge pull request #229 from iverberk/fix_vet_warnings

Fix vet warnings
This commit is contained in:
Alex Dadgar 2015-10-07 10:14:56 -07:00
commit 8dd1bbfe63
5 changed files with 7 additions and 7 deletions

View File

@ -25,7 +25,7 @@ func NewMemoryFingerprint(logger *log.Logger) Fingerprint {
func (f *MemoryFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { func (f *MemoryFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
memInfo, err := mem.VirtualMemory() memInfo, err := mem.VirtualMemory()
if err != nil { if err != nil {
f.logger.Println("[WARN] Error reading memory information: %s", err) f.logger.Printf("[WARN] Error reading memory information: %s", err)
return false, err return false, err
} }

View File

@ -20,7 +20,7 @@ func TestAgentInfoCommand_Run(t *testing.T) {
code := cmd.Run([]string{"-address=" + url}) code := cmd.Run([]string{"-address=" + url})
if code != 0 { if code != 0 {
t.Fatalf("expected exit 0, got: %d %s", code) t.Fatalf("expected exit 0, got: %d", code)
} }
} }

View File

@ -34,7 +34,7 @@ func NomadExecutable() (string, error) {
// Check the CWD. // Check the CWD.
pwd, err := os.Getwd() pwd, err := os.Getwd()
if err != nil { if err != nil {
return "", fmt.Errorf("Could not find Nomad executable (%v): %v", err) return "", fmt.Errorf("Could not find Nomad executable (%v): %v", nomadExe, err)
} }
bin = filepath.Join(pwd, nomadExe) bin = filepath.Join(pwd, nomadExe)

View File

@ -159,7 +159,7 @@ func evaluateNodePlan(snap *state.StateSnapshot, plan *structs.Plan, nodeID stri
// Get the node itself // Get the node itself
node, err := snap.NodeByID(nodeID) node, err := snap.NodeByID(nodeID)
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get node '%s': %v", node, err) return false, fmt.Errorf("failed to get node '%s': %v", nodeID, err)
} }
// If the node does not exist or is not ready for schduling it is not fit // If the node does not exist or is not ready for schduling it is not fit
@ -172,7 +172,7 @@ func evaluateNodePlan(snap *state.StateSnapshot, plan *structs.Plan, nodeID stri
// Get the existing allocations // Get the existing allocations
existingAlloc, err := snap.AllocsByNode(nodeID) existingAlloc, err := snap.AllocsByNode(nodeID)
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get existing allocations for '%s': %v", node, err) return false, fmt.Errorf("failed to get existing allocations for '%s': %v", nodeID, err)
} }
// Filter on alloc state // Filter on alloc state

View File

@ -164,7 +164,7 @@ func (s *GenericScheduler) process() (bool, error) {
if s.limitReached && s.nextEval == nil { if s.limitReached && s.nextEval == nil {
s.nextEval = s.eval.NextRollingEval(s.job.Update.Stagger) s.nextEval = s.eval.NextRollingEval(s.job.Update.Stagger)
if err := s.planner.CreateEval(s.nextEval); err != nil { if err := s.planner.CreateEval(s.nextEval); err != nil {
s.logger.Printf("[ERR] sched: %#v failed to make next eval for rolling update: %v", err) s.logger.Printf("[ERR] sched: %#v failed to make next eval for rolling update: %v", s.eval, err)
return false, err return false, err
} }
s.logger.Printf("[DEBUG] sched: %#v: rolling update limit reached, next eval '%s' created", s.eval, s.nextEval.ID) s.logger.Printf("[DEBUG] sched: %#v: rolling update limit reached, next eval '%s' created", s.eval, s.nextEval.ID)
@ -289,7 +289,7 @@ func (s *GenericScheduler) inplaceUpdate(updates []allocTuple) []allocTuple {
node, err := s.state.NodeByID(update.Alloc.NodeID) node, err := s.state.NodeByID(update.Alloc.NodeID)
if err != nil { if err != nil {
s.logger.Printf("[ERR] sched: %#v failed to get node '%s': %v", s.logger.Printf("[ERR] sched: %#v failed to get node '%s': %v",
update.Alloc.NodeID, err) s.eval, update.Alloc.NodeID, err)
continue continue
} }
if node == nil { if node == nil {