Merge pull request #2372 from hashicorp/f-vet
Fix vet script and fix vet problems
This commit is contained in:
commit
80239475b2
11
GNUmakefile
11
GNUmakefile
|
@ -1,6 +1,4 @@
|
|||
PACKAGES = $(shell go list ./... | grep -v '/vendor/')
|
||||
VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods \
|
||||
-nilfunc -printf -rangeloops -shift -structtags -unsafeptr
|
||||
EXTERNAL_TOOLS=\
|
||||
github.com/kardianos/govendor \
|
||||
github.com/mitchellh/gox \
|
||||
|
@ -9,8 +7,6 @@ EXTERNAL_TOOLS=\
|
|||
gopkg.in/matm/v1/gocov-html \
|
||||
github.com/ugorji/go/codec/codecgen
|
||||
|
||||
GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*")
|
||||
|
||||
all: test
|
||||
|
||||
dev: format generate
|
||||
|
@ -48,11 +44,8 @@ generate:
|
|||
@sed -i.old -e 's|github.com/hashicorp/nomad/vendor/github.com/ugorji/go/codec|github.com/ugorji/go/codec|' nomad/structs/structs.generated.go
|
||||
|
||||
vet:
|
||||
@go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \
|
||||
go get golang.org/x/tools/cmd/vet; \
|
||||
fi
|
||||
@echo "--> Running go tool vet $(VETARGS) ${GOFILES_NOVENDOR}"
|
||||
@go tool vet $(VETARGS) ${GOFILES_NOVENDOR} ; if [ $$? -eq 1 ]; then \
|
||||
@echo "--> Running go vet $(VETARGS) ${PACKAGES}"
|
||||
@go vet $(VETARGS) ${PACKAGES} ; if [ $$? -eq 1 ]; then \
|
||||
echo ""; \
|
||||
echo "[LINT] Vet found suspicious constructs. Please check the reported constructs"; \
|
||||
echo "and fix them if necessary before submitting the code for review."; \
|
||||
|
|
|
@ -4,8 +4,6 @@ import (
|
|||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
)
|
||||
|
||||
func TestAllocations_List(t *testing.T) {
|
||||
|
@ -29,29 +27,29 @@ func TestAllocations_List(t *testing.T) {
|
|||
// so we can query for them.
|
||||
return
|
||||
|
||||
job := &Job{
|
||||
ID: helper.StringToPtr("job1"),
|
||||
Name: helper.StringToPtr("Job #1"),
|
||||
Type: helper.StringToPtr(JobTypeService),
|
||||
}
|
||||
eval, _, err := c.Jobs().Register(job, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
//job := &Job{
|
||||
//ID: helper.StringToPtr("job1"),
|
||||
//Name: helper.StringToPtr("Job #1"),
|
||||
//Type: helper.StringToPtr(JobTypeService),
|
||||
//}
|
||||
//eval, _, err := c.Jobs().Register(job, nil)
|
||||
//if err != nil {
|
||||
//t.Fatalf("err: %s", err)
|
||||
//}
|
||||
|
||||
// List the allocations again
|
||||
allocs, qm, err = a.List(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if qm.LastIndex == 0 {
|
||||
t.Fatalf("bad index: %d", qm.LastIndex)
|
||||
}
|
||||
//// List the allocations again
|
||||
//allocs, qm, err = a.List(nil)
|
||||
//if err != nil {
|
||||
//t.Fatalf("err: %s", err)
|
||||
//}
|
||||
//if qm.LastIndex == 0 {
|
||||
//t.Fatalf("bad index: %d", qm.LastIndex)
|
||||
//}
|
||||
|
||||
// Check that we got the allocation back
|
||||
if len(allocs) == 0 || allocs[0].EvalID != eval {
|
||||
t.Fatalf("bad: %#v", allocs)
|
||||
}
|
||||
//// Check that we got the allocation back
|
||||
//if len(allocs) == 0 || allocs[0].EvalID != eval {
|
||||
//t.Fatalf("bad: %#v", allocs)
|
||||
//}
|
||||
}
|
||||
|
||||
func TestAllocations_PrefixList(t *testing.T) {
|
||||
|
@ -75,30 +73,30 @@ func TestAllocations_PrefixList(t *testing.T) {
|
|||
// so we can query for them.
|
||||
return
|
||||
|
||||
job := &Job{
|
||||
ID: helper.StringToPtr("job1"),
|
||||
Name: helper.StringToPtr("Job #1"),
|
||||
Type: helper.StringToPtr(JobTypeService),
|
||||
}
|
||||
//job := &Job{
|
||||
//ID: helper.StringToPtr("job1"),
|
||||
//Name: helper.StringToPtr("Job #1"),
|
||||
//Type: helper.StringToPtr(JobTypeService),
|
||||
//}
|
||||
|
||||
eval, _, err := c.Jobs().Register(job, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
//eval, _, err := c.Jobs().Register(job, nil)
|
||||
//if err != nil {
|
||||
//t.Fatalf("err: %s", err)
|
||||
//}
|
||||
|
||||
// List the allocations by prefix
|
||||
allocs, qm, err = a.PrefixList("foobar")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if qm.LastIndex == 0 {
|
||||
t.Fatalf("bad index: %d", qm.LastIndex)
|
||||
}
|
||||
//// List the allocations by prefix
|
||||
//allocs, qm, err = a.PrefixList("foobar")
|
||||
//if err != nil {
|
||||
//t.Fatalf("err: %s", err)
|
||||
//}
|
||||
//if qm.LastIndex == 0 {
|
||||
//t.Fatalf("bad index: %d", qm.LastIndex)
|
||||
//}
|
||||
|
||||
// Check that we got the allocation back
|
||||
if len(allocs) == 0 || allocs[0].EvalID != eval {
|
||||
t.Fatalf("bad: %#v", allocs)
|
||||
}
|
||||
//// Check that we got the allocation back
|
||||
//if len(allocs) == 0 || allocs[0].EvalID != eval {
|
||||
//t.Fatalf("bad: %#v", allocs)
|
||||
//}
|
||||
}
|
||||
|
||||
func TestAllocations_CreateIndexSort(t *testing.T) {
|
||||
|
|
|
@ -317,7 +317,7 @@ func (j *Job) Canonicalize() {
|
|||
j.ID = helper.StringToPtr("")
|
||||
}
|
||||
if j.Name == nil {
|
||||
j.Name = j.ID
|
||||
j.Name = helper.StringToPtr(*j.ID)
|
||||
}
|
||||
if j.ParentID == nil {
|
||||
j.ParentID = helper.StringToPtr("")
|
||||
|
|
|
@ -875,10 +875,10 @@ func TestJobs_JobSummary(t *testing.T) {
|
|||
|
||||
// Check that the result is what we expect
|
||||
if *job.ID != result.JobID {
|
||||
t.Fatalf("err: expected job id of %s saw %s", job.ID, result.JobID)
|
||||
t.Fatalf("err: expected job id of %s saw %s", *job.ID, result.JobID)
|
||||
}
|
||||
if _, ok := result.Summary[*taskName]; !ok {
|
||||
t.Fatalf("err: unable to find %s key in job summary", taskName)
|
||||
t.Fatalf("err: unable to find %s key in job summary", *taskName)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -472,7 +472,7 @@ func (r *AllocRunner) Run() {
|
|||
r.logger.Printf("[ERROR] client: failed to move alloc dir into alloc %q: %v", r.alloc.ID, err)
|
||||
}
|
||||
if err := r.otherAllocDir.Destroy(); err != nil {
|
||||
r.logger.Printf("[ERROR] client: error destroying allocdir %v", r.otherAllocDir.AllocDir, err)
|
||||
r.logger.Printf("[ERROR] client: error destroying allocdir %v: %v", r.otherAllocDir.AllocDir, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -589,7 +589,7 @@ func TestAllocRunner_RestoreOldState(t *testing.T) {
|
|||
t.Fatalf("expected exactly 1 error from RestoreState but found: %d: %v", len(merr.Errors), err)
|
||||
}
|
||||
if expected := "task runner snapshot includes nil Task"; merr.Errors[0].Error() != expected {
|
||||
t.Fatalf("expected %q but got: %q", merr.Errors[0].Error())
|
||||
t.Fatalf("expected %q but got: %q", expected, merr.Errors[0].Error())
|
||||
}
|
||||
|
||||
if err := ar.SaveState(); err != nil {
|
||||
|
|
|
@ -1708,7 +1708,7 @@ func (c *Client) unarchiveAllocDir(resp io.ReadCloser, allocID string, pathToAll
|
|||
stopMigrating, ok := c.migratingAllocs[allocID]
|
||||
if !ok {
|
||||
os.RemoveAll(pathToAllocDir)
|
||||
return fmt.Errorf("Allocation %q is not marked for remote migration: %v", allocID)
|
||||
return fmt.Errorf("Allocation %q is not marked for remote migration", allocID)
|
||||
}
|
||||
for {
|
||||
// See if the alloc still needs migration
|
||||
|
@ -1789,8 +1789,6 @@ func (c *Client) unarchiveAllocDir(resp io.ReadCloser, allocID string, pathToAll
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getNode gets the node from the server with the given Node ID
|
||||
|
|
|
@ -670,8 +670,7 @@ func TestClient_SaveRestoreState(t *testing.T) {
|
|||
ar := c2.allocs[alloc1.ID]
|
||||
c2.allocLock.RUnlock()
|
||||
status := ar.Alloc().ClientStatus
|
||||
alive := status != structs.AllocClientStatusRunning ||
|
||||
status != structs.AllocClientStatusPending
|
||||
alive := status == structs.AllocClientStatusRunning || status == structs.AllocClientStatusPending
|
||||
if !alive {
|
||||
return false, fmt.Errorf("incorrect client status: %#v", ar.Alloc())
|
||||
}
|
||||
|
|
|
@ -899,6 +899,6 @@ func TestTaskTemplateManager_Signal_Error(t *testing.T) {
|
|||
}
|
||||
|
||||
if !strings.Contains(harness.mockHooks.KillReason, "Sending signals") {
|
||||
t.Fatalf("Unexpected error", harness.mockHooks.KillReason)
|
||||
t.Fatalf("Unexpected error: %v", harness.mockHooks.KillReason)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1415,7 +1415,7 @@ func calculatePercent(newSample, oldSample, newTotal, oldTotal uint64, cores int
|
|||
func authOptionFrom(file, repo string) (*docker.AuthConfiguration, error) {
|
||||
name, err := reference.ParseNamed(repo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse named repo %q: %v", err)
|
||||
return nil, fmt.Errorf("Failed to parse named repo %q: %v", repo, err)
|
||||
}
|
||||
|
||||
repoInfo, err := registry.ParseRepositoryInfo(name)
|
||||
|
|
|
@ -69,8 +69,8 @@ func dockerTask() (*structs.Task, int, int) {
|
|||
Networks: []*structs.NetworkResource{
|
||||
&structs.NetworkResource{
|
||||
IP: "127.0.0.1",
|
||||
ReservedPorts: []structs.Port{{"main", docker_reserved}},
|
||||
DynamicPorts: []structs.Port{{"REDIS", docker_dynamic}},
|
||||
ReservedPorts: []structs.Port{{Label: "main", Value: docker_reserved}},
|
||||
DynamicPorts: []structs.Port{{Label: "REDIS", Value: docker_dynamic}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -783,7 +783,7 @@ func TestDockerWorkDir(t *testing.T) {
|
|||
}
|
||||
|
||||
if want, got := "/some/path", container.Config.WorkingDir; want != got {
|
||||
t.Errorf("Wrong working directory for docker job. Expect: %d, got: %d", want, got)
|
||||
t.Errorf("Wrong working directory for docker job. Expect: %s, got: %s", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1092,7 +1092,7 @@ while true; do
|
|||
done
|
||||
`)
|
||||
if err := ioutil.WriteFile(testFile, testData, 0777); err != nil {
|
||||
fmt.Errorf("Failed to write data")
|
||||
t.Fatalf("Failed to write data: %v", err)
|
||||
}
|
||||
|
||||
_, err := d.Prestart(ctx.ExecCtx, task)
|
||||
|
|
|
@ -25,8 +25,8 @@ var basicResources = &structs.Resources{
|
|||
Networks: []*structs.NetworkResource{
|
||||
&structs.NetworkResource{
|
||||
IP: "0.0.0.0",
|
||||
ReservedPorts: []structs.Port{{"main", 12345}},
|
||||
DynamicPorts: []structs.Port{{"HTTP", 43330}},
|
||||
ReservedPorts: []structs.Port{{Label: "main", Value: 12345}},
|
||||
DynamicPorts: []structs.Port{{Label: "HTTP", Value: 43330}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -144,8 +144,8 @@ func setupTaskEnv(t *testing.T, driver string) (*allocdir.TaskDir, map[string]st
|
|||
Networks: []*structs.NetworkResource{
|
||||
&structs.NetworkResource{
|
||||
IP: "1.2.3.4",
|
||||
ReservedPorts: []structs.Port{{"one", 80}, {"two", 443}},
|
||||
DynamicPorts: []structs.Port{{"admin", 8081}, {"web", 8086}},
|
||||
ReservedPorts: []structs.Port{{Label: "one", Value: 80}, {Label: "two", Value: 443}},
|
||||
DynamicPorts: []structs.Port{{Label: "admin", Value: 8081}, {Label: "web", Value: 8086}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -33,8 +33,8 @@ var (
|
|||
networks = []*structs.NetworkResource{
|
||||
&structs.NetworkResource{
|
||||
IP: "127.0.0.1",
|
||||
ReservedPorts: []structs.Port{{"http", 80}},
|
||||
DynamicPorts: []structs.Port{{"https", 8080}},
|
||||
ReservedPorts: []structs.Port{{Label: "http", Value: 80}},
|
||||
DynamicPorts: []structs.Port{{Label: "https", Value: 8080}},
|
||||
},
|
||||
}
|
||||
portMap = map[string]int{
|
||||
|
|
|
@ -354,7 +354,7 @@ while true; do
|
|||
done
|
||||
`)
|
||||
if err := ioutil.WriteFile(testFile, testData, 0777); err != nil {
|
||||
fmt.Errorf("Failed to write data")
|
||||
t.Fatalf("Failed to write data: %v", err)
|
||||
}
|
||||
|
||||
if _, err := d.Prestart(ctx.ExecCtx, task); err != nil {
|
||||
|
|
|
@ -78,7 +78,6 @@ func (d *DockerScriptCheck) Run() *cstructs.CheckResult {
|
|||
if client, err = d.dockerClient(); err != nil {
|
||||
return &cstructs.CheckResult{Err: err}
|
||||
}
|
||||
client = client
|
||||
execOpts := docker.CreateExecOptions{
|
||||
AttachStdin: false,
|
||||
AttachStdout: true,
|
||||
|
@ -157,33 +156,33 @@ func (e *ExecScriptCheck) Run() *cstructs.CheckResult {
|
|||
go func() {
|
||||
errCh <- cmd.Wait()
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
endTime := time.Now()
|
||||
if err == nil {
|
||||
return &cstructs.CheckResult{
|
||||
ExitCode: 0,
|
||||
Output: string(buf.Bytes()),
|
||||
Timestamp: ts,
|
||||
}
|
||||
}
|
||||
exitCode := 1
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
|
||||
exitCode = status.ExitStatus()
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-errCh:
|
||||
endTime := time.Now()
|
||||
if err == nil {
|
||||
return &cstructs.CheckResult{
|
||||
ExitCode: exitCode,
|
||||
ExitCode: 0,
|
||||
Output: string(buf.Bytes()),
|
||||
Timestamp: ts,
|
||||
Duration: endTime.Sub(ts),
|
||||
}
|
||||
case <-time.After(e.Timeout()):
|
||||
errCh <- fmt.Errorf("timed out after waiting 30s")
|
||||
}
|
||||
exitCode := 1
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
|
||||
exitCode = status.ExitStatus()
|
||||
}
|
||||
}
|
||||
return &cstructs.CheckResult{
|
||||
ExitCode: exitCode,
|
||||
Output: string(buf.Bytes()),
|
||||
Timestamp: ts,
|
||||
Duration: endTime.Sub(ts),
|
||||
}
|
||||
case <-time.After(e.Timeout()):
|
||||
errCh <- fmt.Errorf("timed out after waiting 30s")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ func TestDockerScriptCheck(t *testing.T) {
|
|||
defer removeContainer(client, container.ID)
|
||||
|
||||
if err := client.StartContainer(container.ID, container.HostConfig); err != nil {
|
||||
t.Fatalf("error starting container", err)
|
||||
t.Fatalf("error starting container: %v", err)
|
||||
}
|
||||
|
||||
check := &DockerScriptCheck{
|
||||
|
|
|
@ -873,7 +873,7 @@ func (e *UniversalExecutor) Signal(s os.Signal) error {
|
|||
e.logger.Printf("[DEBUG] executor: sending signal %s", s)
|
||||
err := e.cmd.Process.Signal(s)
|
||||
if err != nil {
|
||||
e.logger.Printf("[ERR] executor: sending signal %s failed: %v", err)
|
||||
e.logger.Printf("[ERR] executor: sending signal %v failed: %v", s, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -255,7 +255,7 @@ func TestExecutor_MakeExecutable(t *testing.T) {
|
|||
act := stat.Mode().Perm()
|
||||
exp := os.FileMode(0755)
|
||||
if act != exp {
|
||||
t.Fatalf("expected permissions %v; got %v", err)
|
||||
t.Fatalf("expected permissions %v; got %v", exp, act)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ func TestQemuDriver_StartOpen_Wait(t *testing.T) {
|
|||
MemoryMB: 512,
|
||||
Networks: []*structs.NetworkResource{
|
||||
&structs.NetworkResource{
|
||||
ReservedPorts: []structs.Port{{"main", 22000}, {"web", 80}},
|
||||
ReservedPorts: []structs.Port{{Label: "main", Value: 22000}, {Label: "web", Value: 80}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -136,7 +136,7 @@ func TestQemuDriverUser(t *testing.T) {
|
|||
MemoryMB: 512,
|
||||
Networks: []*structs.NetworkResource{
|
||||
&structs.NetworkResource{
|
||||
ReservedPorts: []structs.Port{{"main", 22000}, {"web", 80}},
|
||||
ReservedPorts: []structs.Port{{Label: "main", Value: 22000}, {Label: "web", Value: 80}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -332,7 +332,7 @@ while true; do
|
|||
done
|
||||
`)
|
||||
if err := ioutil.WriteFile(testFile, testData, 0777); err != nil {
|
||||
fmt.Errorf("Failed to write data")
|
||||
t.Fatalf("Failed to write data: %v", err)
|
||||
}
|
||||
|
||||
if _, err := d.Prestart(ctx.ExecCtx, task); err != nil {
|
||||
|
|
|
@ -452,7 +452,7 @@ func TestRktDriver_PortsMapping(t *testing.T) {
|
|||
Networks: []*structs.NetworkResource{
|
||||
&structs.NetworkResource{
|
||||
IP: "127.0.0.1",
|
||||
ReservedPorts: []structs.Port{{"main", 8080}},
|
||||
ReservedPorts: []structs.Port{{Label: "main", Value: 8080}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -80,7 +80,7 @@ func testTaskRunnerFromAlloc(t *testing.T, restarts bool, alloc *structs.Allocat
|
|||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
// Initialize the port listing. This should be done by the offer process but
|
||||
// we have a mock so that doesn't happen.
|
||||
task.Resources.Networks[0].ReservedPorts = []structs.Port{{"", 80}}
|
||||
task.Resources.Networks[0].ReservedPorts = []structs.Port{{Label: "", Value: 80}}
|
||||
|
||||
allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(conf.AllocDir, alloc.ID))
|
||||
if err := allocDir.Build(); err != nil {
|
||||
|
@ -1018,7 +1018,7 @@ func TestTaskRunner_Template_Block(t *testing.T) {
|
|||
func TestTaskRunner_Template_Artifact(t *testing.T) {
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal("bad: %v", err)
|
||||
t.Fatalf("bad: %v", err)
|
||||
}
|
||||
|
||||
ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(dir, ".."))))
|
||||
|
|
|
@ -85,7 +85,7 @@ func TestVaultClient_TokenRenewals(t *testing.T) {
|
|||
}
|
||||
|
||||
if c.heap.Length() != 0 {
|
||||
t.Fatal("bad: heap length: expected: 0, actual: %d", c.heap.Length())
|
||||
t.Fatalf("bad: heap length: expected: 0, actual: %d", c.heap.Length())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -695,8 +695,6 @@ OUTER:
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Logs streams the content of a log blocking on EOF. The parameters are:
|
||||
|
@ -868,8 +866,6 @@ func (s *HTTPServer) logs(follow, plain bool, offset int64,
|
|||
offset = int64(0)
|
||||
nextIdx = idx + 1
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// blockUntilNextLog returns a channel that will have data sent when the next
|
||||
|
|
|
@ -285,8 +285,7 @@ func (j *JobGetter) ApiJob(jpath string) (*api.Job, error) {
|
|||
// Parse the JobFile
|
||||
jobStruct, err := jobspec.Parse(jobfile)
|
||||
if err != nil {
|
||||
fmt.Errorf("Error parsing job file from %s: %v", jpath, err)
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("Error parsing job file from %s: %v", jpath, err)
|
||||
}
|
||||
|
||||
return jobStruct, nil
|
||||
|
|
|
@ -29,7 +29,7 @@ func TestJobDispatchCommand_Fails(t *testing.T) {
|
|||
t.Fatalf("expect exit 1, got: %d", code)
|
||||
}
|
||||
if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error reading input data") {
|
||||
t.Fatalf("expect error reading input data", out)
|
||||
t.Fatalf("expect error reading input data: %v", out)
|
||||
}
|
||||
ui.ErrorWriter.Reset()
|
||||
|
||||
|
|
|
@ -517,15 +517,15 @@ func getAllocatedResources(client *api.Client, runningAllocs []*api.Allocation,
|
|||
|
||||
resources := make([]string, 2)
|
||||
resources[0] = "CPU|Memory|Disk|IOPS"
|
||||
resources[1] = fmt.Sprintf("%v/%v MHz|%v/%v|%v/%v|%v/%v",
|
||||
resources[1] = fmt.Sprintf("%d/%d MHz|%s/%s|%s/%s|%d/%d",
|
||||
cpu,
|
||||
total.CPU,
|
||||
*total.CPU,
|
||||
humanize.IBytes(uint64(mem*bytesPerMegabyte)),
|
||||
humanize.IBytes(uint64(*total.MemoryMB*bytesPerMegabyte)),
|
||||
humanize.IBytes(uint64(disk*bytesPerMegabyte)),
|
||||
humanize.IBytes(uint64(*total.DiskMB*bytesPerMegabyte)),
|
||||
iops,
|
||||
total.IOPS)
|
||||
*total.IOPS)
|
||||
|
||||
return resources
|
||||
}
|
||||
|
@ -568,9 +568,9 @@ func getActualResources(client *api.Client, runningAllocs []*api.Allocation, nod
|
|||
|
||||
resources := make([]string, 2)
|
||||
resources[0] = "CPU|Memory"
|
||||
resources[1] = fmt.Sprintf("%v/%v MHz|%v/%v",
|
||||
resources[1] = fmt.Sprintf("%v/%d MHz|%v/%v",
|
||||
math.Floor(cpu),
|
||||
total.CPU,
|
||||
*total.CPU,
|
||||
humanize.IBytes(mem),
|
||||
humanize.IBytes(uint64(*total.MemoryMB*bytesPerMegabyte)))
|
||||
|
||||
|
@ -599,9 +599,9 @@ func getHostResources(hostStats *api.HostStats, node *api.Node) ([]string, error
|
|||
resources = make([]string, 2)
|
||||
resources[0] = "CPU|Memory|Disk"
|
||||
if physical {
|
||||
resources[1] = fmt.Sprintf("%v/%v MHz|%v/%v|%v/%v",
|
||||
resources[1] = fmt.Sprintf("%v/%d MHz|%s/%s|%s/%s",
|
||||
math.Floor(hostStats.CPUTicksConsumed),
|
||||
node.Resources.CPU,
|
||||
*node.Resources.CPU,
|
||||
humanize.IBytes(hostStats.Memory.Used),
|
||||
humanize.IBytes(hostStats.Memory.Total),
|
||||
humanize.IBytes(diskUsed),
|
||||
|
@ -610,9 +610,9 @@ func getHostResources(hostStats *api.HostStats, node *api.Node) ([]string, error
|
|||
} else {
|
||||
// If non-physical device are used, output device name only,
|
||||
// since nomad doesn't collect the stats data.
|
||||
resources[1] = fmt.Sprintf("%v/%v MHz|%v/%v|(%s)",
|
||||
resources[1] = fmt.Sprintf("%v/%d MHz|%s/%s|(%s)",
|
||||
math.Floor(hostStats.CPUTicksConsumed),
|
||||
node.Resources.CPU,
|
||||
*node.Resources.CPU,
|
||||
humanize.IBytes(hostStats.Memory.Used),
|
||||
humanize.IBytes(hostStats.Memory.Total),
|
||||
storageDevice,
|
||||
|
|
|
@ -110,7 +110,7 @@ func (c *StopCommand) Run(args []string) int {
|
|||
|
||||
// Confirm the stop if the job was a prefix match.
|
||||
if jobID != *job.ID && !autoYes {
|
||||
question := fmt.Sprintf("Are you sure you want to stop job %q? [y/N]", job.ID)
|
||||
question := fmt.Sprintf("Are you sure you want to stop job %q? [y/N]", *job.ID)
|
||||
answer, err := c.Ui.Ask(question)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Failed to parse answer: %v", err))
|
||||
|
|
|
@ -154,9 +154,9 @@ func (c *Config) OutgoingTLSWrapper() (RegionWrapper, error) {
|
|||
// Generate the wrapper based on hostname verification
|
||||
if c.VerifyServerHostname {
|
||||
wrapper := func(region string, conn net.Conn) (net.Conn, error) {
|
||||
conf := *tlsConfig
|
||||
conf := tlsConfig.Clone()
|
||||
conf.ServerName = "server." + region + ".nomad"
|
||||
return WrapTLSClient(conn, &conf)
|
||||
return WrapTLSClient(conn, conf)
|
||||
}
|
||||
return wrapper, nil
|
||||
} else {
|
||||
|
|
|
@ -112,7 +112,7 @@ func parseJob(result *api.Job, list *ast.ObjectList) error {
|
|||
|
||||
// Set the ID and name to the object key
|
||||
result.ID = helper.StringToPtr(obj.Keys[0].Token.Value().(string))
|
||||
result.Name = result.ID
|
||||
result.Name = helper.StringToPtr(*result.ID)
|
||||
|
||||
// Decode the rest
|
||||
if err := mapstructure.WeakDecode(m, result); err != nil {
|
||||
|
@ -124,7 +124,7 @@ func parseJob(result *api.Job, list *ast.ObjectList) error {
|
|||
if ot, ok := obj.Val.(*ast.ObjectType); ok {
|
||||
listVal = ot.List
|
||||
} else {
|
||||
return fmt.Errorf("job '%s' value: should be an object", result.ID)
|
||||
return fmt.Errorf("job '%s' value: should be an object", *result.ID)
|
||||
}
|
||||
|
||||
// Check for invalid keys
|
||||
|
|
|
@ -130,8 +130,8 @@ func TestParse(t *testing.T) {
|
|||
Networks: []*api.NetworkResource{
|
||||
&api.NetworkResource{
|
||||
MBits: helper.IntToPtr(100),
|
||||
ReservedPorts: []api.Port{{"one", 1}, {"two", 2}, {"three", 3}},
|
||||
DynamicPorts: []api.Port{{"http", 0}, {"https", 0}, {"admin", 0}},
|
||||
ReservedPorts: []api.Port{{Label: "one", Value: 1}, {Label: "two", Value: 2}, {Label: "three", Value: 3}},
|
||||
DynamicPorts: []api.Port{{Label: "http", Value: 0}, {Label: "https", Value: 0}, {Label: "admin", Value: 0}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -684,7 +684,7 @@ func TestEvalEndpoint_Reblock_NonBlocked(t *testing.T) {
|
|||
}
|
||||
var resp structs.GenericResponse
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Eval.Reblock", get, &resp); err == nil {
|
||||
t.Fatalf("should error since eval was not in blocked state", err)
|
||||
t.Fatalf("should error since eval was not in blocked state: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -238,7 +238,7 @@ func (s *Server) restoreRevokingAccessors() error {
|
|||
// Check the allocation
|
||||
alloc, err := state.AllocByID(ws, va.AllocID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to lookup allocation: %v", va.AllocID, err)
|
||||
return fmt.Errorf("failed to lookup allocation %q: %v", va.AllocID, err)
|
||||
}
|
||||
if alloc == nil || alloc.Terminated() {
|
||||
// No longer running and should be revoked
|
||||
|
|
|
@ -126,7 +126,7 @@ func TestClientEndpoint_Register_SecretMismatch(t *testing.T) {
|
|||
node.SecretID = structs.GenerateUUID()
|
||||
err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp)
|
||||
if err == nil || !strings.Contains(err.Error(), "Not registering") {
|
||||
t.Fatalf("Expecting error regarding mismatching secret id", err)
|
||||
t.Fatalf("Expecting error regarding mismatching secret id: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -265,7 +265,7 @@ func NewServer(config *Config, consulSyncer *consul.Syncer, logger *log.Logger)
|
|||
|
||||
// Setup the Consul syncer
|
||||
if err := s.setupConsulSyncer(); err != nil {
|
||||
return nil, fmt.Errorf("failed to create server Consul syncer: %v")
|
||||
return nil, fmt.Errorf("failed to create server Consul syncer: %v", err)
|
||||
}
|
||||
|
||||
// Monitor leadership changes
|
||||
|
|
|
@ -744,7 +744,7 @@ func (s *StateStore) nestedUpsertEval(txn *memdb.Txn, index uint64, eval *struct
|
|||
// Get the blocked evaluation for a job if it exists
|
||||
iter, err := txn.Get("evals", "job", eval.JobID, structs.EvalStatusBlocked)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get blocked evals for job %q", eval.JobID, err)
|
||||
return fmt.Errorf("failed to get blocked evals for job %q: %v", eval.JobID, err)
|
||||
}
|
||||
|
||||
var blocked []*structs.Evaluation
|
||||
|
@ -1708,7 +1708,7 @@ func (s *StateStore) updateSummaryWithAlloc(index uint64, alloc *structs.Allocat
|
|||
|
||||
summaryRaw, err := txn.First("job_summary", "id", alloc.JobID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to lookup job summary for job id %q: %v", err)
|
||||
return fmt.Errorf("unable to lookup job summary for job id %q: %v", alloc.JobID, err)
|
||||
}
|
||||
|
||||
if summaryRaw == nil {
|
||||
|
|
|
@ -1863,7 +1863,7 @@ func TestTaskGroupDiff(t *testing.T) {
|
|||
for i, c := range cases {
|
||||
actual, err := c.Old.Diff(c.New, c.Contextual)
|
||||
if c.Error && err == nil {
|
||||
t.Fatalf("case %d: expected errored")
|
||||
t.Fatalf("case %d: expected errored", i+1)
|
||||
} else if err != nil {
|
||||
if !c.Error {
|
||||
t.Fatalf("case %d: errored %#v", i+1, err)
|
||||
|
|
|
@ -2137,7 +2137,7 @@ func (sc *ServiceCheck) validate() error {
|
|||
case api.HealthWarning:
|
||||
case api.HealthCritical:
|
||||
default:
|
||||
return fmt.Errorf(`invalid initial check state (%s), must be one of %q, %q, %q, %q or empty`, sc.InitialStatus, api.HealthPassing, api.HealthWarning, api.HealthCritical)
|
||||
return fmt.Errorf(`invalid initial check state (%s), must be one of %q, %q, %q or empty`, sc.InitialStatus, api.HealthPassing, api.HealthWarning, api.HealthCritical)
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -599,7 +599,7 @@ func TestTask_Validate_Service_Check(t *testing.T) {
|
|||
|
||||
err := check1.validate()
|
||||
if err != nil {
|
||||
t.Fatal("err: %v", err)
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
check1.InitialStatus = "foo"
|
||||
|
@ -1221,7 +1221,7 @@ func TestPeriodicConfig_EnabledInvalid(t *testing.T) {
|
|||
// Create a config that is enabled, with a bad time zone.
|
||||
p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"}
|
||||
if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") {
|
||||
t.Fatal("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err)
|
||||
t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1267,7 +1267,7 @@ func TestPeriodicConfig_ValidTimeZone(t *testing.T) {
|
|||
p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone}
|
||||
p.Canonicalize()
|
||||
if err := p.Validate(); err != nil {
|
||||
t.Fatal("Valid tz errored: %v", err)
|
||||
t.Fatalf("Valid tz errored: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -637,7 +637,6 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) {
|
|||
}
|
||||
|
||||
// Cancel the context
|
||||
cancel()
|
||||
close(unblock)
|
||||
}()
|
||||
}
|
||||
|
@ -646,6 +645,7 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) {
|
|||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("timeout")
|
||||
case <-unblock:
|
||||
cancel()
|
||||
}
|
||||
|
||||
desired := numRequests - 1
|
||||
|
|
|
@ -485,7 +485,11 @@ func TestTasksUpdated(t *testing.T) {
|
|||
}
|
||||
|
||||
j6 := mock.Job()
|
||||
j6.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts = []structs.Port{{"http", 0}, {"https", 0}, {"admin", 0}}
|
||||
j6.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts = []structs.Port{
|
||||
{Label: "http", Value: 0},
|
||||
{Label: "https", Value: 0},
|
||||
{Label: "admin", Value: 0},
|
||||
}
|
||||
if !tasksUpdated(j1, j6, name) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue