Merge pull request #3284 from hashicorp/f-lint

Enable more linters
This commit is contained in:
Alex Dadgar 2017-09-26 15:46:42 -07:00 committed by GitHub
commit 76e4230833
120 changed files with 745 additions and 973 deletions

View file

@ -7,9 +7,6 @@ GIT_DIRTY := $(if $(shell git status --porcelain),+CHANGES)
GO_LDFLAGS := "-X main.GitCommit=$(GIT_COMMIT)$(GIT_DIRTY)"
GO_TAGS =
# Enable additional linters as the codebase evolves to pass them
CHECKS ?= --enable goimports
default: help
ifeq (,$(findstring $(THIS_OS),Darwin Linux FreeBSD))
@ -160,11 +157,23 @@ check: ## Lint the source code
@gometalinter \
--deadline 10m \
--vendor \
--exclude '(.*\.generated\.go:\d+:|bindata_assetfs)' \
--exclude='.*\.generated\.go' \
--exclude='.*bindata_assetfs\.go' \
--skip="ui/" \
--sort="path" \
--aggregate \
--enable-gc \
--disable-all \
--sort severity \
$(CHECKS) \
--enable goimports \
--enable misspell \
--enable vet \
--enable deadcode \
--enable varcheck \
--enable ineffassign \
--enable structcheck \
--enable unconvert \
--enable gas \
--enable gofmt \
./...
.PHONY: checkscripts

View file

@ -78,7 +78,7 @@ func isNamespaceCapabilityValid(cap string) bool {
case NamespaceCapabilityDeny, NamespaceCapabilityListJobs, NamespaceCapabilityReadJob,
NamespaceCapabilitySubmitJob, NamespaceCapabilityReadLogs, NamespaceCapabilityReadFS:
return true
// Seperate the enterprise-only capabilities
// Separate the enterprise-only capabilities
case NamespaceCapabilitySentinelOverride:
return true
default:

View file

@ -24,7 +24,7 @@ func TestParse(t *testing.T) {
"",
&Policy{
Namespaces: []*NamespacePolicy{
&NamespacePolicy{
{
Name: "default",
Policy: PolicyRead,
Capabilities: []string{
@ -59,7 +59,7 @@ func TestParse(t *testing.T) {
"",
&Policy{
Namespaces: []*NamespacePolicy{
&NamespacePolicy{
{
Name: "default",
Policy: PolicyRead,
Capabilities: []string{
@ -67,7 +67,7 @@ func TestParse(t *testing.T) {
NamespaceCapabilityReadJob,
},
},
&NamespacePolicy{
{
Name: "other",
Policy: PolicyWrite,
Capabilities: []string{
@ -78,7 +78,7 @@ func TestParse(t *testing.T) {
NamespaceCapabilityReadFS,
},
},
&NamespacePolicy{
{
Name: "secret",
Capabilities: []string{
NamespaceCapabilityDeny,
@ -160,7 +160,7 @@ func TestParse(t *testing.T) {
"",
&Policy{
Namespaces: []*NamespacePolicy{
&NamespacePolicy{
{
Name: "default",
Policy: "",
Capabilities: []string{

View file

@ -142,98 +142,98 @@ func TestAgents_Sort(t *testing.T) {
}{
{
[]*AgentMember{
&AgentMember{Name: "nomad-2.vac.us-east",
{Name: "nomad-2.vac.us-east",
Tags: map[string]string{"region": "us-east", "dc": "us-east-1c"}},
&AgentMember{Name: "nomad-1.global",
{Name: "nomad-1.global",
Tags: map[string]string{"region": "global", "dc": "dc1"}},
&AgentMember{Name: "nomad-1.vac.us-east",
{Name: "nomad-1.vac.us-east",
Tags: map[string]string{"region": "us-east", "dc": "us-east-1c"}},
},
[]*AgentMember{
&AgentMember{Name: "nomad-1.global",
{Name: "nomad-1.global",
Tags: map[string]string{"region": "global", "dc": "dc1"}},
&AgentMember{Name: "nomad-1.vac.us-east",
{Name: "nomad-1.vac.us-east",
Tags: map[string]string{"region": "us-east", "dc": "us-east-1c"}},
&AgentMember{Name: "nomad-2.vac.us-east",
{Name: "nomad-2.vac.us-east",
Tags: map[string]string{"region": "us-east", "dc": "us-east-1c"}},
},
},
{
[]*AgentMember{
&AgentMember{Name: "nomad-02.tam.us-east",
{Name: "nomad-02.tam.us-east",
Tags: map[string]string{"region": "us-east", "dc": "tampa"}},
&AgentMember{Name: "nomad-02.pal.us-west",
{Name: "nomad-02.pal.us-west",
Tags: map[string]string{"region": "us-west", "dc": "palo_alto"}},
&AgentMember{Name: "nomad-01.pal.us-west",
{Name: "nomad-01.pal.us-west",
Tags: map[string]string{"region": "us-west", "dc": "palo_alto"}},
&AgentMember{Name: "nomad-01.tam.us-east",
{Name: "nomad-01.tam.us-east",
Tags: map[string]string{"region": "us-east", "dc": "tampa"}},
},
[]*AgentMember{
&AgentMember{Name: "nomad-01.tam.us-east",
{Name: "nomad-01.tam.us-east",
Tags: map[string]string{"region": "us-east", "dc": "tampa"}},
&AgentMember{Name: "nomad-02.tam.us-east",
{Name: "nomad-02.tam.us-east",
Tags: map[string]string{"region": "us-east", "dc": "tampa"}},
&AgentMember{Name: "nomad-01.pal.us-west",
{Name: "nomad-01.pal.us-west",
Tags: map[string]string{"region": "us-west", "dc": "palo_alto"}},
&AgentMember{Name: "nomad-02.pal.us-west",
{Name: "nomad-02.pal.us-west",
Tags: map[string]string{"region": "us-west", "dc": "palo_alto"}},
},
},
{
[]*AgentMember{
&AgentMember{Name: "nomad-02.tam.us-east",
{Name: "nomad-02.tam.us-east",
Tags: map[string]string{"region": "us-east", "dc": "tampa"}},
&AgentMember{Name: "nomad-02.ams.europe",
{Name: "nomad-02.ams.europe",
Tags: map[string]string{"region": "europe", "dc": "amsterdam"}},
&AgentMember{Name: "nomad-01.tam.us-east",
{Name: "nomad-01.tam.us-east",
Tags: map[string]string{"region": "us-east", "dc": "tampa"}},
&AgentMember{Name: "nomad-01.ams.europe",
{Name: "nomad-01.ams.europe",
Tags: map[string]string{"region": "europe", "dc": "amsterdam"}},
},
[]*AgentMember{
&AgentMember{Name: "nomad-01.ams.europe",
{Name: "nomad-01.ams.europe",
Tags: map[string]string{"region": "europe", "dc": "amsterdam"}},
&AgentMember{Name: "nomad-02.ams.europe",
{Name: "nomad-02.ams.europe",
Tags: map[string]string{"region": "europe", "dc": "amsterdam"}},
&AgentMember{Name: "nomad-01.tam.us-east",
{Name: "nomad-01.tam.us-east",
Tags: map[string]string{"region": "us-east", "dc": "tampa"}},
&AgentMember{Name: "nomad-02.tam.us-east",
{Name: "nomad-02.tam.us-east",
Tags: map[string]string{"region": "us-east", "dc": "tampa"}},
},
},
{
[]*AgentMember{
&AgentMember{Name: "nomad-02.ber.europe",
{Name: "nomad-02.ber.europe",
Tags: map[string]string{"region": "europe", "dc": "berlin"}},
&AgentMember{Name: "nomad-02.ams.europe",
{Name: "nomad-02.ams.europe",
Tags: map[string]string{"region": "europe", "dc": "amsterdam"}},
&AgentMember{Name: "nomad-01.ams.europe",
{Name: "nomad-01.ams.europe",
Tags: map[string]string{"region": "europe", "dc": "amsterdam"}},
&AgentMember{Name: "nomad-01.ber.europe",
{Name: "nomad-01.ber.europe",
Tags: map[string]string{"region": "europe", "dc": "berlin"}},
},
[]*AgentMember{
&AgentMember{Name: "nomad-01.ams.europe",
{Name: "nomad-01.ams.europe",
Tags: map[string]string{"region": "europe", "dc": "amsterdam"}},
&AgentMember{Name: "nomad-02.ams.europe",
{Name: "nomad-02.ams.europe",
Tags: map[string]string{"region": "europe", "dc": "amsterdam"}},
&AgentMember{Name: "nomad-01.ber.europe",
{Name: "nomad-01.ber.europe",
Tags: map[string]string{"region": "europe", "dc": "berlin"}},
&AgentMember{Name: "nomad-02.ber.europe",
{Name: "nomad-02.ber.europe",
Tags: map[string]string{"region": "europe", "dc": "berlin"}},
},
},
{
[]*AgentMember{
&AgentMember{Name: "nomad-1.global"},
&AgentMember{Name: "nomad-3.global"},
&AgentMember{Name: "nomad-2.global"},
{Name: "nomad-1.global"},
{Name: "nomad-3.global"},
{Name: "nomad-2.global"},
},
[]*AgentMember{
&AgentMember{Name: "nomad-1.global"},
&AgentMember{Name: "nomad-2.global"},
&AgentMember{Name: "nomad-3.global"},
{Name: "nomad-1.global"},
{Name: "nomad-2.global"},
{Name: "nomad-3.global"},
},
},
}

View file

@ -104,16 +104,16 @@ func TestAllocations_PrefixList(t *testing.T) {
func TestAllocations_CreateIndexSort(t *testing.T) {
t.Parallel()
allocs := []*AllocationListStub{
&AllocationListStub{CreateIndex: 2},
&AllocationListStub{CreateIndex: 1},
&AllocationListStub{CreateIndex: 5},
{CreateIndex: 2},
{CreateIndex: 1},
{CreateIndex: 5},
}
sort.Sort(AllocIndexSort(allocs))
expect := []*AllocationListStub{
&AllocationListStub{CreateIndex: 5},
&AllocationListStub{CreateIndex: 2},
&AllocationListStub{CreateIndex: 1},
{CreateIndex: 5},
{CreateIndex: 2},
{CreateIndex: 1},
}
if !reflect.DeepEqual(allocs, expect) {
t.Fatalf("\n\n%#v\n\n%#v", allocs, expect)

View file

@ -20,7 +20,7 @@ func TestCompose(t *testing.T) {
DiskMB: helper.IntToPtr(2048),
IOPS: helper.IntToPtr(500),
Networks: []*NetworkResource{
&NetworkResource{
{
CIDR: "0.0.0.0/0",
MBits: helper.IntToPtr(100),
ReservedPorts: []Port{{"", 80}, {"", 443}},
@ -55,25 +55,25 @@ func TestCompose(t *testing.T) {
"foo": "bar",
},
Constraints: []*Constraint{
&Constraint{
{
LTarget: "kernel.name",
RTarget: "linux",
Operand: "=",
},
},
TaskGroups: []*TaskGroup{
&TaskGroup{
{
Name: helper.StringToPtr("grp1"),
Count: helper.IntToPtr(2),
Constraints: []*Constraint{
&Constraint{
{
LTarget: "kernel.name",
RTarget: "linux",
Operand: "=",
},
},
Tasks: []*Task{
&Task{
{
Name: "task1",
Driver: "exec",
Resources: &Resources{
@ -82,7 +82,7 @@ func TestCompose(t *testing.T) {
DiskMB: helper.IntToPtr(2048),
IOPS: helper.IntToPtr(500),
Networks: []*NetworkResource{
&NetworkResource{
{
CIDR: "0.0.0.0/0",
MBits: helper.IntToPtr(100),
ReservedPorts: []Port{
@ -93,7 +93,7 @@ func TestCompose(t *testing.T) {
},
},
Constraints: []*Constraint{
&Constraint{
{
LTarget: "kernel.name",
RTarget: "linux",
Operand: "=",

View file

@ -145,16 +145,16 @@ func TestEvaluations_Allocations(t *testing.T) {
func TestEvaluations_Sort(t *testing.T) {
t.Parallel()
evals := []*Evaluation{
&Evaluation{CreateIndex: 2},
&Evaluation{CreateIndex: 1},
&Evaluation{CreateIndex: 5},
{CreateIndex: 2},
{CreateIndex: 1},
{CreateIndex: 5},
}
sort.Sort(EvalIndexSort(evals))
expect := []*Evaluation{
&Evaluation{CreateIndex: 5},
&Evaluation{CreateIndex: 2},
&Evaluation{CreateIndex: 1},
{CreateIndex: 5},
{CreateIndex: 2},
{CreateIndex: 1},
}
if !reflect.DeepEqual(evals, expect) {
t.Fatalf("\n\n%#v\n\n%#v", evals, expect)

View file

@ -622,13 +622,13 @@ func TestJobs_EnforceRegister(t *testing.T) {
// Create a job and attempt to register it with an incorrect index.
job := testJob()
resp2, wm, err := jobs.EnforceRegister(job, 10, nil)
resp2, _, err := jobs.EnforceRegister(job, 10, nil)
if err == nil || !strings.Contains(err.Error(), RegisterEnforceIndexErrPrefix) {
t.Fatalf("expected enforcement error: %v", err)
}
// Register
resp2, wm, err = jobs.EnforceRegister(job, 0, nil)
resp2, wm, err := jobs.EnforceRegister(job, 0, nil)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -655,7 +655,7 @@ func TestJobs_EnforceRegister(t *testing.T) {
curIndex := resp[0].JobModifyIndex
// Fail at incorrect index
resp2, wm, err = jobs.EnforceRegister(job, 123456, nil)
resp2, _, err = jobs.EnforceRegister(job, 123456, nil)
if err == nil || !strings.Contains(err.Error(), RegisterEnforceIndexErrPrefix) {
t.Fatalf("expected enforcement error: %v", err)
}
@ -699,7 +699,7 @@ func TestJobs_Revert(t *testing.T) {
assertWriteMeta(t, wm)
// Fail revert at incorrect enforce
_, wm, err = jobs.Revert(*job.ID, 0, helper.Uint64ToPtr(10), nil)
_, _, err = jobs.Revert(*job.ID, 0, helper.Uint64ToPtr(10), nil)
if err == nil || !strings.Contains(err.Error(), "enforcing version") {
t.Fatalf("expected enforcement error: %v", err)
}
@ -1127,6 +1127,7 @@ func TestJobs_Plan(t *testing.T) {
if len(planResp.CreatedEvals) == 0 {
t.Fatalf("got no CreatedEvals: %#v", planResp)
}
assertWriteMeta(t, wm)
// Make a plan request w/o the diff
planResp, wm, err = jobs.Plan(job, false, nil)
@ -1263,12 +1264,12 @@ func TestJobs_Constrain(t *testing.T) {
// Adding another constraint preserves the original
job.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
expect := []*Constraint{
&Constraint{
{
LTarget: "kernel.name",
RTarget: "darwin",
Operand: "=",
},
&Constraint{
{
LTarget: "memory.totalbytes",
RTarget: "128000000",
Operand: ">=",
@ -1282,16 +1283,16 @@ func TestJobs_Constrain(t *testing.T) {
func TestJobs_Sort(t *testing.T) {
t.Parallel()
jobs := []*JobListStub{
&JobListStub{ID: "job2"},
&JobListStub{ID: "job0"},
&JobListStub{ID: "job1"},
{ID: "job2"},
{ID: "job0"},
{ID: "job1"},
}
sort.Sort(JobIDSort(jobs))
expect := []*JobListStub{
&JobListStub{ID: "job0"},
&JobListStub{ID: "job1"},
&JobListStub{ID: "job2"},
{ID: "job0"},
{ID: "job1"},
{ID: "job2"},
}
if !reflect.DeepEqual(jobs, expect) {
t.Fatalf("\n\n%#v\n\n%#v", jobs, expect)

View file

@ -17,14 +17,14 @@ func MockJob() *Job {
AllAtOnce: helper.BoolToPtr(false),
Datacenters: []string{"dc1"},
Constraints: []*Constraint{
&Constraint{
{
LTarget: "${attr.kernel.name}",
RTarget: "linux",
Operand: "=",
},
},
TaskGroups: []*TaskGroup{
&TaskGroup{
{
Name: helper.StringToPtr("web"),
Count: helper.IntToPtr(10),
EphemeralDisk: &EphemeralDisk{
@ -37,7 +37,7 @@ func MockJob() *Job {
Mode: helper.StringToPtr("delay"),
},
Tasks: []*Task{
&Task{
{
Name: "web",
Driver: "exec",
Config: map[string]interface{}{
@ -72,7 +72,7 @@ func MockJob() *Job {
CPU: helper.IntToPtr(500),
MemoryMB: helper.IntToPtr(256),
Networks: []*NetworkResource{
&NetworkResource{
{
MBits: helper.IntToPtr(50),
DynamicPorts: []Port{{Label: "http"}, {Label: "admin"}},
},

View file

@ -22,7 +22,7 @@ func (n *Nodes) List(q *QueryOptions) ([]*NodeListStub, *QueryMeta, error) {
if err != nil {
return nil, nil, err
}
sort.Sort(NodeIndexSort(resp))
sort.Sort(resp)
return resp, qm, nil
}

View file

@ -260,16 +260,16 @@ func TestNodes_ForceEvaluate(t *testing.T) {
func TestNodes_Sort(t *testing.T) {
t.Parallel()
nodes := []*NodeListStub{
&NodeListStub{CreateIndex: 2},
&NodeListStub{CreateIndex: 1},
&NodeListStub{CreateIndex: 5},
{CreateIndex: 2},
{CreateIndex: 1},
{CreateIndex: 5},
}
sort.Sort(NodeIndexSort(nodes))
expect := []*NodeListStub{
&NodeListStub{CreateIndex: 5},
&NodeListStub{CreateIndex: 2},
&NodeListStub{CreateIndex: 1},
{CreateIndex: 5},
{CreateIndex: 2},
{CreateIndex: 1},
}
if !reflect.DeepEqual(nodes, expect) {
t.Fatalf("\n\n%#v\n\n%#v", nodes, expect)

View file

@ -75,7 +75,7 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err
// TODO (alexdadgar) Currently we made address a query parameter. Once
// IDs are in place this will be DELETE /v1/operator/raft/peer/<id>.
r.params.Set("address", string(address))
r.params.Set("address", address)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {

View file

@ -38,12 +38,12 @@ func TestTaskGroup_Constrain(t *testing.T) {
// Add a second constraint
grp.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
expect := []*Constraint{
&Constraint{
{
LTarget: "kernel.name",
RTarget: "darwin",
Operand: "=",
},
&Constraint{
{
LTarget: "memory.totalbytes",
RTarget: "128000000",
Operand: ">=",
@ -95,11 +95,11 @@ func TestTaskGroup_AddTask(t *testing.T) {
// Add a second task
grp.AddTask(NewTask("task2", "exec"))
expect := []*Task{
&Task{
{
Name: "task1",
Driver: "java",
},
&Task{
{
Name: "task2",
Driver: "exec",
},
@ -178,7 +178,7 @@ func TestTask_Require(t *testing.T) {
DiskMB: helper.IntToPtr(2048),
IOPS: helper.IntToPtr(500),
Networks: []*NetworkResource{
&NetworkResource{
{
CIDR: "0.0.0.0/0",
MBits: helper.IntToPtr(100),
ReservedPorts: []Port{{"", 80}, {"", 443}},
@ -214,12 +214,12 @@ func TestTask_Constrain(t *testing.T) {
// Add a second constraint
task.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
expect := []*Constraint{
&Constraint{
{
LTarget: "kernel.name",
RTarget: "darwin",
Operand: "=",
},
&Constraint{
{
LTarget: "memory.totalbytes",
RTarget: "128000000",
Operand: ">=",

View file

@ -20,16 +20,6 @@ import (
cstructs "github.com/hashicorp/nomad/client/structs"
)
const (
// taskReceivedSyncLimit is how long the client will wait before sending
// that a task was received to the server. The client does not immediately
// send that the task was received to the server because another transition
// to running or failed is likely to occur immediately after and a single
// update will transfer all past state information. If not other transition
// has occurred up to this limit, we will send to the server.
taskReceivedSyncLimit = 30 * time.Second
)
var (
// The following are the key paths written to the state database
allocRunnerStateAllocKey = []byte("alloc")

View file

@ -49,6 +49,18 @@ func (m *MockAllocStateUpdater) Last() (int, *structs.Allocation) {
return n, m.Allocs[n-1].Copy()
}
// allocationBucketExists checks if the allocation bucket was created.
func allocationBucketExists(tx *bolt.Tx, allocID string) bool {
allocations := tx.Bucket(allocationsBucket)
if allocations == nil {
return false
}
// Retrieve the specific allocations bucket
alloc := allocations.Bucket([]byte(allocID))
return alloc != nil
}
func testAllocRunnerFromAlloc(alloc *structs.Allocation, restarts bool) (*MockAllocStateUpdater, *AllocRunner) {
logger := testLogger()
conf := config.DefaultConfig()

View file

@ -21,11 +21,6 @@ import (
)
var (
osMountSharedDirSupport = map[string]bool{
"darwin": true,
"linux": true,
}
t1 = &structs.Task{
Name: "web",
Driver: "exec",

View file

@ -1,7 +1,6 @@
package allocdir
import (
"errors"
"os"
"path/filepath"
)
@ -25,11 +24,6 @@ func linkOrCopy(src, dst string, uid, gid int, perm os.FileMode) error {
return fileCopy(src, dst, uid, gid, perm)
}
// The windows version does nothing currently.
func mountSharedDir(dir string) error {
return errors.New("Mount on Windows not supported.")
}
// The windows version does nothing currently.
func linkDir(src, dst string) error {
return nil
@ -55,11 +49,6 @@ func dropDirPermissions(path string, desired os.FileMode) error {
return nil
}
// The windows version does nothing currently.
func unmountSharedDir(dir string) error {
return nil
}
// MountSpecialDirs mounts the dev and proc file system on the chroot of the
// task. It's a no-op on windows.
func MountSpecialDirs(taskDir string) error {

View file

@ -62,7 +62,7 @@ const (
stateSnapshotIntv = 60 * time.Second
// initialHeartbeatStagger is used to stagger the interval between
// starting and the intial heartbeat. After the intial heartbeat,
// starting and the initial heartbeat. After the initial heartbeat,
// we switch to using the TTL specified by the servers.
initialHeartbeatStagger = 10 * time.Second
@ -120,7 +120,7 @@ type Client struct {
triggerDiscoveryCh chan struct{}
// discovered will be ticked whenever Consul discovery completes
// succesfully
// successfully
serversDiscoveredCh chan struct{}
// allocs is the current set of allocations
@ -473,7 +473,7 @@ func (c *Client) Stats() map[string]map[string]string {
c.heartbeatLock.Lock()
defer c.heartbeatLock.Unlock()
stats := map[string]map[string]string{
"client": map[string]string{
"client": {
"node_id": c.NodeID(),
"known_servers": c.servers.all().String(),
"num_allocations": strconv.Itoa(c.NumAllocs()),
@ -1650,10 +1650,9 @@ func (c *Client) deriveToken(alloc *structs.Allocation, taskNames []string, vcli
}
verifiedTasks := []string{}
found := false
// Check if the given task names actually exist in the allocation
for _, taskName := range taskNames {
found = false
found := false
for _, task := range group.Tasks {
if task.Name == taskName {
found = true
@ -1903,7 +1902,10 @@ func (c *Client) setGaugeForMemoryStats(nodeID string, hStats *stats.HostStats)
func (c *Client) setGaugeForCPUStats(nodeID string, hStats *stats.HostStats) {
for _, cpu := range hStats.CPU {
if !c.config.DisableTaggedMetrics {
labels := append(c.baseLabels, metrics.Label{"cpu", cpu.CPU})
labels := append(c.baseLabels, metrics.Label{
Name: "cpu",
Value: cpu.CPU,
})
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "total"}, float32(cpu.Total), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "cpu", "user"}, float32(cpu.User), labels)
@ -1924,7 +1926,10 @@ func (c *Client) setGaugeForCPUStats(nodeID string, hStats *stats.HostStats) {
func (c *Client) setGaugeForDiskStats(nodeID string, hStats *stats.HostStats) {
for _, disk := range hStats.DiskStats {
if !c.config.DisableTaggedMetrics {
labels := append(c.baseLabels, metrics.Label{"disk", disk.Device})
labels := append(c.baseLabels, metrics.Label{
Name: "disk",
Value: disk.Device,
})
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "size"}, float32(disk.Size), labels)
metrics.SetGaugeWithLabels([]string{"client", "host", "disk", "used"}, float32(disk.Used), labels)
@ -1969,7 +1974,10 @@ func (c *Client) setGaugeForAllocationStats(nodeID string) {
for _, n := range allocated.Networks {
if !c.config.DisableTaggedMetrics {
labels := append(c.baseLabels, metrics.Label{"device", n.Device})
labels := append(c.baseLabels, metrics.Label{
Name: "device",
Value: n.Device,
})
metrics.SetGaugeWithLabels([]string{"client", "allocated", "network"}, float32(n.MBits), labels)
}
@ -1999,18 +2007,19 @@ func (c *Client) setGaugeForAllocationStats(nodeID string) {
}
for _, n := range allocated.Networks {
totalMbits := 0
totalIdx := total.NetIndex(n)
if totalIdx != -1 {
totalMbits = total.Networks[totalIdx].MBits
continue
}
totalMbits := total.Networks[totalIdx].MBits
unallocatedMbits := totalMbits - n.MBits
if !c.config.DisableTaggedMetrics {
labels := append(c.baseLabels, metrics.Label{"device", n.Device})
labels := append(c.baseLabels, metrics.Label{
Name: "device",
Value: n.Device,
})
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "network"}, float32(unallocatedMbits), labels)
}

View file

@ -137,7 +137,7 @@ func TestClient_StartStop(t *testing.T) {
}
}
// Certain labels for metrics are dependant on client intial setup. This tests
// Certain labels for metrics are dependant on client initial setup. This tests
// that the client has properly initialized before we assign values to labels
func TestClient_BaseLabels(t *testing.T) {
t.Parallel()

View file

@ -40,12 +40,6 @@ const (
DefaultMaxTemplateEventRate = 3 * time.Second
)
var (
// testRetryRate is used to speed up tests by setting consul-templates retry
// rate to something low
testRetryRate time.Duration = 0
)
// TaskHooks is an interface which provides hooks into the tasks life-cycle
type TaskHooks interface {
// Restart is used to restart the task

View file

@ -349,7 +349,6 @@ type DockerHandle struct {
ImageID string
containerID string
version string
clkSpeed float64
killTimeout time.Duration
maxKillTimeout time.Duration
resourceUsageLock sync.RWMutex
@ -427,108 +426,108 @@ func (d *DockerDriver) Validate(config map[string]interface{}) error {
fd := &fields.FieldData{
Raw: config,
Schema: map[string]*fields.FieldSchema{
"image": &fields.FieldSchema{
"image": {
Type: fields.TypeString,
Required: true,
},
"load": &fields.FieldSchema{
"load": {
Type: fields.TypeString,
},
"command": &fields.FieldSchema{
"command": {
Type: fields.TypeString,
},
"args": &fields.FieldSchema{
"args": {
Type: fields.TypeArray,
},
"ipc_mode": &fields.FieldSchema{
"ipc_mode": {
Type: fields.TypeString,
},
"network_mode": &fields.FieldSchema{
"network_mode": {
Type: fields.TypeString,
},
"network_aliases": &fields.FieldSchema{
"network_aliases": {
Type: fields.TypeArray,
},
"ipv4_address": &fields.FieldSchema{
"ipv4_address": {
Type: fields.TypeString,
},
"ipv6_address": &fields.FieldSchema{
"ipv6_address": {
Type: fields.TypeString,
},
"mac_address": &fields.FieldSchema{
"mac_address": {
Type: fields.TypeString,
},
"pid_mode": &fields.FieldSchema{
"pid_mode": {
Type: fields.TypeString,
},
"uts_mode": &fields.FieldSchema{
"uts_mode": {
Type: fields.TypeString,
},
"userns_mode": &fields.FieldSchema{
"userns_mode": {
Type: fields.TypeString,
},
"port_map": &fields.FieldSchema{
"port_map": {
Type: fields.TypeArray,
},
"privileged": &fields.FieldSchema{
"privileged": {
Type: fields.TypeBool,
},
"dns_servers": &fields.FieldSchema{
"dns_servers": {
Type: fields.TypeArray,
},
"dns_options": &fields.FieldSchema{
"dns_options": {
Type: fields.TypeArray,
},
"dns_search_domains": &fields.FieldSchema{
"dns_search_domains": {
Type: fields.TypeArray,
},
"extra_hosts": &fields.FieldSchema{
"extra_hosts": {
Type: fields.TypeArray,
},
"hostname": &fields.FieldSchema{
"hostname": {
Type: fields.TypeString,
},
"labels": &fields.FieldSchema{
"labels": {
Type: fields.TypeArray,
},
"auth": &fields.FieldSchema{
"auth": {
Type: fields.TypeArray,
},
"auth_soft_fail": &fields.FieldSchema{
"auth_soft_fail": {
Type: fields.TypeBool,
},
// COMPAT: Remove in 0.6.0. SSL is no longer needed
"ssl": &fields.FieldSchema{
"ssl": {
Type: fields.TypeBool,
},
"tty": &fields.FieldSchema{
"tty": {
Type: fields.TypeBool,
},
"interactive": &fields.FieldSchema{
"interactive": {
Type: fields.TypeBool,
},
"shm_size": &fields.FieldSchema{
"shm_size": {
Type: fields.TypeInt,
},
"work_dir": &fields.FieldSchema{
"work_dir": {
Type: fields.TypeString,
},
"logging": &fields.FieldSchema{
"logging": {
Type: fields.TypeArray,
},
"volumes": &fields.FieldSchema{
"volumes": {
Type: fields.TypeArray,
},
"volume_driver": &fields.FieldSchema{
"volume_driver": {
Type: fields.TypeString,
},
"mounts": {
Type: fields.TypeArray,
},
"force_pull": &fields.FieldSchema{
"force_pull": {
Type: fields.TypeBool,
},
"security_opt": &fields.FieldSchema{
"security_opt": {
Type: fields.TypeArray,
},
},
@ -1153,7 +1152,7 @@ func (d *DockerDriver) createContainerConfig(ctx *ExecContext, task *structs.Tas
if len(driverConfig.NetworkAliases) > 0 || driverConfig.IPv4Address != "" || driverConfig.IPv6Address != "" {
networkingConfig = &docker.NetworkingConfig{
EndpointsConfig: map[string]*docker.EndpointConfig{
hostConfig.NetworkMode: &docker.EndpointConfig{},
hostConfig.NetworkMode: {},
},
}
}
@ -1414,7 +1413,7 @@ func (d *DockerDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, er
// Look for a running container with this ID
containers, err := client.ListContainers(docker.ListContainersOptions{
Filters: map[string][]string{
"id": []string{pid.ContainerID},
"id": {pid.ContainerID},
},
})
if err != nil {

View file

@ -10,5 +10,5 @@ const (
)
func getPortBinding(ip string, port string) []docker.PortBinding {
return []docker.PortBinding{docker.PortBinding{HostIP: ip, HostPort: port}}
return []docker.PortBinding{{HostIP: ip, HostPort: port}}
}

View file

@ -68,7 +68,7 @@ func dockerTask() (*structs.Task, int, int) {
MemoryMB: 256,
CPU: 512,
Networks: []*structs.NetworkResource{
&structs.NetworkResource{
{
IP: "127.0.0.1",
ReservedPorts: []structs.Port{{Label: "main", Value: docker_reserved}},
DynamicPorts: []structs.Port{{Label: "REDIS", Value: docker_dynamic}},
@ -772,7 +772,7 @@ func TestDockerDriver_Labels(t *testing.T) {
}
task, _, _ := dockerTask()
task.Config["labels"] = []map[string]string{
map[string]string{
{
"label1": "value1",
"label2": "value2",
},
@ -955,10 +955,10 @@ func TestDockerDriver_PortsNoMap(t *testing.T) {
// Verify that the correct ports are EXPOSED
expectedExposedPorts := map[docker.Port]struct{}{
docker.Port(fmt.Sprintf("%d/tcp", res)): struct{}{},
docker.Port(fmt.Sprintf("%d/udp", res)): struct{}{},
docker.Port(fmt.Sprintf("%d/tcp", dyn)): struct{}{},
docker.Port(fmt.Sprintf("%d/udp", dyn)): struct{}{},
docker.Port(fmt.Sprintf("%d/tcp", res)): {},
docker.Port(fmt.Sprintf("%d/udp", res)): {},
docker.Port(fmt.Sprintf("%d/tcp", dyn)): {},
docker.Port(fmt.Sprintf("%d/udp", dyn)): {},
}
if !reflect.DeepEqual(container.Config.ExposedPorts, expectedExposedPorts) {
@ -967,10 +967,10 @@ func TestDockerDriver_PortsNoMap(t *testing.T) {
// Verify that the correct ports are FORWARDED
expectedPortBindings := map[docker.Port][]docker.PortBinding{
docker.Port(fmt.Sprintf("%d/tcp", res)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}},
docker.Port(fmt.Sprintf("%d/udp", res)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}},
docker.Port(fmt.Sprintf("%d/tcp", dyn)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}},
docker.Port(fmt.Sprintf("%d/udp", dyn)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}},
docker.Port(fmt.Sprintf("%d/tcp", res)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}},
docker.Port(fmt.Sprintf("%d/udp", res)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}},
docker.Port(fmt.Sprintf("%d/tcp", dyn)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}},
docker.Port(fmt.Sprintf("%d/udp", dyn)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}},
}
if !reflect.DeepEqual(container.HostConfig.PortBindings, expectedPortBindings) {
@ -996,7 +996,7 @@ func TestDockerDriver_PortsMapping(t *testing.T) {
}
task, res, dyn := dockerTask()
task.Config["port_map"] = []map[string]string{
map[string]string{
{
"main": "8080",
"REDIS": "6379",
},
@ -1014,10 +1014,10 @@ func TestDockerDriver_PortsMapping(t *testing.T) {
// Verify that the correct ports are EXPOSED
expectedExposedPorts := map[docker.Port]struct{}{
docker.Port("8080/tcp"): struct{}{},
docker.Port("8080/udp"): struct{}{},
docker.Port("6379/tcp"): struct{}{},
docker.Port("6379/udp"): struct{}{},
docker.Port("8080/tcp"): {},
docker.Port("8080/udp"): {},
docker.Port("6379/tcp"): {},
docker.Port("6379/udp"): {},
}
if !reflect.DeepEqual(container.Config.ExposedPorts, expectedExposedPorts) {
@ -1026,10 +1026,10 @@ func TestDockerDriver_PortsMapping(t *testing.T) {
// Verify that the correct ports are FORWARDED
expectedPortBindings := map[docker.Port][]docker.PortBinding{
docker.Port("8080/tcp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}},
docker.Port("8080/udp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}},
docker.Port("6379/tcp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}},
docker.Port("6379/udp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}},
docker.Port("8080/tcp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}},
docker.Port("8080/udp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}},
docker.Port("6379/tcp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}},
docker.Port("6379/udp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}},
}
if !reflect.DeepEqual(container.HostConfig.PortBindings, expectedPortBindings) {

View file

@ -9,5 +9,5 @@ const (
//Currently Windows containers don't support host ip in port binding.
func getPortBinding(ip string, port string) []docker.PortBinding {
return []docker.PortBinding{docker.PortBinding{HostIP: "", HostPort: port}}
return []docker.PortBinding{{HostIP: "", HostPort: port}}
}

View file

@ -88,7 +88,7 @@ func NewCreatedResources() *CreatedResources {
// Add a new resource if it doesn't already exist.
func (r *CreatedResources) Add(k, v string) {
if r.Resources == nil {
r.Resources = map[string][]string{k: []string{v}}
r.Resources = map[string][]string{k: {v}}
return
}
existing, ok := r.Resources[k]
@ -340,16 +340,6 @@ func NewExecContext(td *allocdir.TaskDir, te *env.TaskEnv) *ExecContext {
}
}
func mapMergeStrInt(maps ...map[string]int) map[string]int {
out := map[string]int{}
for _, in := range maps {
for key, val := range in {
out[key] = val
}
}
return out
}
func mapMergeStrStr(maps ...map[string]string) map[string]string {
out := map[string]string{}
for _, in := range maps {

View file

@ -23,7 +23,7 @@ var basicResources = &structs.Resources{
MemoryMB: 256,
DiskMB: 20,
Networks: []*structs.NetworkResource{
&structs.NetworkResource{
{
IP: "0.0.0.0",
ReservedPorts: []structs.Port{{Label: "main", Value: 12345}},
DynamicPorts: []structs.Port{{Label: "HTTP", Value: 43330}},
@ -140,7 +140,7 @@ func setupTaskEnv(t *testing.T, driver string) (*allocdir.TaskDir, map[string]st
CPU: 1000,
MemoryMB: 500,
Networks: []*structs.NetworkResource{
&structs.NetworkResource{
{
IP: "1.2.3.4",
ReservedPorts: []structs.Port{{Label: "one", Value: 80}, {Label: "two", Value: 443}},
DynamicPorts: []structs.Port{{Label: "admin", Value: 8081}, {Label: "web", Value: 8086}},
@ -304,31 +304,6 @@ func TestDriver_TaskEnv_Image(t *testing.T) {
}
}
func TestMapMergeStrInt(t *testing.T) {
t.Parallel()
a := map[string]int{
"cakes": 5,
"cookies": 3,
}
b := map[string]int{
"cakes": 3,
"pies": 2,
}
c := mapMergeStrInt(a, b)
d := map[string]int{
"cakes": 3,
"cookies": 3,
"pies": 2,
}
if !reflect.DeepEqual(c, d) {
t.Errorf("\nExpected\n%+v\nGot\n%+v\n", d, c)
}
}
func TestMapMergeStrStr(t *testing.T) {
t.Parallel()
a := map[string]string{

View file

@ -159,7 +159,7 @@ func TestEnvironment_AsList(t *testing.T) {
"taskEnvKey": "taskEnvVal",
}
task.Resources.Networks = []*structs.NetworkResource{
&structs.NetworkResource{
{
IP: "127.0.0.1",
ReservedPorts: []structs.Port{{Label: "http", Value: 80}},
DynamicPorts: []structs.Port{{Label: "https", Value: 8080}},

View file

@ -20,12 +20,6 @@ import (
"github.com/mitchellh/mapstructure"
)
const (
// The key populated in Node Attributes to indicate the presence of the Exec
// driver
execDriverAttr = "driver.exec"
)
// ExecDriver fork/execs tasks using as many of the underlying OS's isolation
// features.
type ExecDriver struct {
@ -66,11 +60,11 @@ func (d *ExecDriver) Validate(config map[string]interface{}) error {
fd := &fields.FieldData{
Raw: config,
Schema: map[string]*fields.FieldSchema{
"command": &fields.FieldSchema{
"command": {
Type: fields.TypeString,
Required: true,
},
"args": &fields.FieldSchema{
"args": {
Type: fields.TypeArray,
},
},

View file

@ -7,6 +7,12 @@ import (
"golang.org/x/sys/unix"
)
const (
// The key populated in Node Attributes to indicate the presence of the Exec
// driver
execDriverAttr = "driver.exec"
)
func (d *ExecDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
// Only enable if cgroups are available and we are root
if !cgroupsMounted(node) {

View file

@ -163,8 +163,6 @@ type UniversalExecutor struct {
lro *logging.FileRotator
rotatorLock sync.Mutex
shutdownCh chan struct{}
syslogServer *logging.SyslogServer
syslogChan chan *logging.SyslogMessage

View file

@ -14,24 +14,10 @@ import (
"github.com/hashicorp/nomad/client/driver/env"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
tu "github.com/hashicorp/nomad/testutil"
"github.com/mitchellh/go-ps"
)
var (
constraint = &structs.Resources{
CPU: 250,
MemoryMB: 256,
Networks: []*structs.NetworkResource{
&structs.NetworkResource{
MBits: 50,
DynamicPorts: []structs.Port{{Label: "http"}},
},
},
}
)
func testLogger() *log.Logger {
return log.New(os.Stderr, "", log.LstdFlags)
}

View file

@ -80,19 +80,19 @@ func (d *JavaDriver) Validate(config map[string]interface{}) error {
fd := &fields.FieldData{
Raw: config,
Schema: map[string]*fields.FieldSchema{
"class": &fields.FieldSchema{
"class": {
Type: fields.TypeString,
},
"class_path": &fields.FieldSchema{
"class_path": {
Type: fields.TypeString,
},
"jar_path": &fields.FieldSchema{
"jar_path": {
Type: fields.TypeString,
},
"jvm_options": &fields.FieldSchema{
"jvm_options": {
Type: fields.TypeArray,
},
"args": &fields.FieldSchema{
"args": {
Type: fields.TypeArray,
},
},

View file

@ -190,7 +190,7 @@ func (f *FileRotator) createFile() error {
// flushPeriodically flushes the buffered writer every 100ms to the underlying
// file
func (f *FileRotator) flushPeriodically() {
for _ = range f.flushTicker.C {
for range f.flushTicker.C {
f.flushBuffer()
}
}

View file

@ -93,7 +93,7 @@ func (d *DockerLogParser) logContentIndex(line []byte) int {
}
}
}
// then the colon is what seperates it, followed by a space
// then the colon is what separates it, followed by a space
for i := cursor; i < len(line); i++ {
if line[i] == ':' && i+1 < len(line) && line[i+1] == ' ' {
cursor = i + 1

View file

@ -33,7 +33,7 @@ func TestSyslogServer_Start_Shutdown(t *testing.T) {
received := false
go func() {
for _ = range s.messages {
for range s.messages {
received = true
}
}()

View file

@ -56,9 +56,7 @@ type LogCollector interface {
// SyslogCollector is a LogCollector which starts a syslog server and does
// rotation to incoming stream
type SyslogCollector struct {
addr net.Addr
logConfig *structs.LogConfig
ctx *LogCollectorContext
ctx *LogCollectorContext
lro *FileRotator
lre *FileRotator

View file

@ -81,59 +81,59 @@ func (d *LxcDriver) Validate(config map[string]interface{}) error {
fd := &fields.FieldData{
Raw: config,
Schema: map[string]*fields.FieldSchema{
"template": &fields.FieldSchema{
"template": {
Type: fields.TypeString,
Required: true,
},
"distro": &fields.FieldSchema{
"distro": {
Type: fields.TypeString,
Required: false,
},
"release": &fields.FieldSchema{
"release": {
Type: fields.TypeString,
Required: false,
},
"arch": &fields.FieldSchema{
"arch": {
Type: fields.TypeString,
Required: false,
},
"image_variant": &fields.FieldSchema{
"image_variant": {
Type: fields.TypeString,
Required: false,
},
"image_server": &fields.FieldSchema{
"image_server": {
Type: fields.TypeString,
Required: false,
},
"gpg_key_id": &fields.FieldSchema{
"gpg_key_id": {
Type: fields.TypeString,
Required: false,
},
"gpg_key_server": &fields.FieldSchema{
"gpg_key_server": {
Type: fields.TypeString,
Required: false,
},
"disable_gpg": &fields.FieldSchema{
"disable_gpg": {
Type: fields.TypeString,
Required: false,
},
"flush_cache": &fields.FieldSchema{
"flush_cache": {
Type: fields.TypeString,
Required: false,
},
"force_cache": &fields.FieldSchema{
"force_cache": {
Type: fields.TypeString,
Required: false,
},
"template_args": &fields.FieldSchema{
"template_args": {
Type: fields.TypeArray,
Required: false,
},
"log_level": &fields.FieldSchema{
"log_level": {
Type: fields.TypeString,
Required: false,
},
"verbosity": &fields.FieldSchema{
"verbosity": {
Type: fields.TypeString,
Required: false,
},

View file

@ -74,17 +74,17 @@ func (d *QemuDriver) Validate(config map[string]interface{}) error {
fd := &fields.FieldData{
Raw: config,
Schema: map[string]*fields.FieldSchema{
"image_path": &fields.FieldSchema{
"image_path": {
Type: fields.TypeString,
Required: true,
},
"accelerator": &fields.FieldSchema{
"accelerator": {
Type: fields.TypeString,
},
"port_map": &fields.FieldSchema{
"port_map": {
Type: fields.TypeArray,
},
"args": &fields.FieldSchema{
"args": {
Type: fields.TypeArray,
},
},

View file

@ -72,7 +72,7 @@ func TestQemuDriver_StartOpen_Wait(t *testing.T) {
CPU: 500,
MemoryMB: 512,
Networks: []*structs.NetworkResource{
&structs.NetworkResource{
{
ReservedPorts: []structs.Port{{Label: "main", Value: 22000}, {Label: "web", Value: 80}},
},
},
@ -142,7 +142,7 @@ func TestQemuDriverUser(t *testing.T) {
CPU: 500,
MemoryMB: 512,
Networks: []*structs.NetworkResource{
&structs.NetworkResource{
{
ReservedPorts: []structs.Port{{Label: "main", Value: 22000}, {Label: "web", Value: 80}},
},
},

View file

@ -64,11 +64,11 @@ func (d *RawExecDriver) Validate(config map[string]interface{}) error {
fd := &fields.FieldData{
Raw: config,
Schema: map[string]*fields.FieldSchema{
"command": &fields.FieldSchema{
"command": {
Type: fields.TypeString,
Required: true,
},
"args": &fields.FieldSchema{
"args": {
Type: fields.TypeArray,
},
},

View file

@ -126,41 +126,41 @@ func (d *RktDriver) Validate(config map[string]interface{}) error {
fd := &fields.FieldData{
Raw: config,
Schema: map[string]*fields.FieldSchema{
"image": &fields.FieldSchema{
"image": {
Type: fields.TypeString,
Required: true,
},
"command": &fields.FieldSchema{
"command": {
Type: fields.TypeString,
},
"args": &fields.FieldSchema{
"args": {
Type: fields.TypeArray,
},
"trust_prefix": &fields.FieldSchema{
"trust_prefix": {
Type: fields.TypeString,
},
"dns_servers": &fields.FieldSchema{
"dns_servers": {
Type: fields.TypeArray,
},
"dns_search_domains": &fields.FieldSchema{
"dns_search_domains": {
Type: fields.TypeArray,
},
"net": &fields.FieldSchema{
"net": {
Type: fields.TypeArray,
},
"port_map": &fields.FieldSchema{
"port_map": {
Type: fields.TypeArray,
},
"debug": &fields.FieldSchema{
"debug": {
Type: fields.TypeBool,
},
"volumes": &fields.FieldSchema{
"volumes": {
Type: fields.TypeArray,
},
"no_overlay": &fields.FieldSchema{
"no_overlay": {
Type: fields.TypeBool,
},
"insecure_options": &fields.FieldSchema{
"insecure_options": {
Type: fields.TypeArray,
},
},

View file

@ -451,7 +451,7 @@ func TestRktDriver_PortsMapping(t *testing.T) {
"image": "docker://redis:latest",
"args": []string{"--version"},
"port_map": []map[string]string{
map[string]string{
{
"main": "6379-tcp",
},
},
@ -465,7 +465,7 @@ func TestRktDriver_PortsMapping(t *testing.T) {
MemoryMB: 256,
CPU: 512,
Networks: []*structs.NetworkResource{
&structs.NetworkResource{
{
IP: "127.0.0.1",
ReservedPorts: []structs.Port{{Label: "main", Value: 8080}},
},

View file

@ -16,11 +16,3 @@ func isolateCommand(cmd *exec.Cmd) {
}
cmd.SysProcAttr.Setsid = true
}
// setChroot on a command
func setChroot(cmd *exec.Cmd, chroot string) {
if cmd.SysProcAttr == nil {
cmd.SysProcAttr = &syscall.SysProcAttr{}
}
cmd.SysProcAttr.Chroot = chroot
}

View file

@ -7,7 +7,3 @@ import (
// TODO Figure out if this is needed in Wondows
func isolateCommand(cmd *exec.Cmd) {
}
// setChroot is a noop on Windows
func setChroot(cmd *exec.Cmd, chroot string) {
}

View file

@ -236,10 +236,15 @@ func (f *EnvAWSFingerprint) linkSpeed() int {
}
res, err := client.Get(metadataURL + "instance-type")
if err != nil {
f.logger.Printf("[ERR]: fingerprint.env_aws: Error reading instance-type: %v", err)
return 0
}
body, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
f.logger.Printf("[ERR]: fingerprint.env_aws: Error reading response body for instance-type")
f.logger.Printf("[ERR]: fingerprint.env_aws: Error reading response body for instance-type: %v", err)
return 0
}

View file

@ -167,7 +167,7 @@ func (f *EnvGCEFingerprint) Fingerprint(cfg *config.Config, node *structs.Node)
if unique {
key = structs.UniqueNamespace(key)
}
node.Attributes[key] = strings.Trim(string(value), "\n")
node.Attributes[key] = strings.Trim(value, "\n")
}
// These keys need everything before the final slash removed to be usable.
@ -190,18 +190,23 @@ func (f *EnvGCEFingerprint) Fingerprint(cfg *config.Config, node *structs.Node)
// Get internal and external IPs (if they exist)
value, err := f.Get("network-interfaces/", true)
var interfaces []GCEMetadataNetworkInterface
if err := json.Unmarshal([]byte(value), &interfaces); err != nil {
f.logger.Printf("[WARN] fingerprint.env_gce: Error decoding network interface information: %s", err.Error())
}
if err != nil {
f.logger.Printf("[WARN] fingerprint.env_gce: Error retrieving network interface information: %s", err)
} else {
for _, intf := range interfaces {
prefix := "platform.gce.network." + lastToken(intf.Network)
uniquePrefix := "unique." + prefix
node.Attributes[prefix] = "true"
node.Attributes[uniquePrefix+".ip"] = strings.Trim(intf.Ip, "\n")
for index, accessConfig := range intf.AccessConfigs {
node.Attributes[uniquePrefix+".external-ip."+strconv.Itoa(index)] = accessConfig.ExternalIp
var interfaces []GCEMetadataNetworkInterface
if err := json.Unmarshal([]byte(value), &interfaces); err != nil {
f.logger.Printf("[WARN] fingerprint.env_gce: Error decoding network interface information: %s", err.Error())
}
for _, intf := range interfaces {
prefix := "platform.gce.network." + lastToken(intf.Network)
uniquePrefix := "unique." + prefix
node.Attributes[prefix] = "true"
node.Attributes[uniquePrefix+".ip"] = strings.Trim(intf.Ip, "\n")
for index, accessConfig := range intf.AccessConfigs {
node.Attributes[uniquePrefix+".external-ip."+strconv.Itoa(index)] = accessConfig.ExternalIp
}
}
}

View file

@ -63,7 +63,7 @@ func (f *VaultFingerprint) Fingerprint(config *client.Config, node *structs.Node
}
node.Attributes["vault.accessible"] = strconv.FormatBool(true)
// We strip the Vault prefix becasue < 0.6.2 the version looks like:
// We strip the Vault prefix because < 0.6.2 the version looks like:
// status.Version = "Vault v0.6.1"
node.Attributes["vault.version"] = strings.TrimPrefix(status.Version, "Vault ")
node.Attributes["vault.cluster_id"] = status.ClusterID

View file

@ -201,9 +201,3 @@ func (r *RestartTracker) jitter() time.Duration {
j := float64(r.rand.Int63n(d)) * jitter
return time.Duration(d + int64(j))
}
// Returns a tracker that never restarts.
func noRestartsTracker() *RestartTracker {
policy := &structs.RestartPolicy{Attempts: 0, Mode: structs.RestartPolicyModeFail}
return newRestartTracker(policy, structs.JobTypeBatch)
}

View file

@ -108,17 +108,6 @@ func getAllocationBucket(tx *bolt.Tx, allocID string) (*bolt.Bucket, error) {
return alloc, nil
}
func allocationBucketExists(tx *bolt.Tx, allocID string) bool {
allocations := tx.Bucket(allocationsBucket)
if allocations == nil {
return false
}
// Retrieve the specific allocations bucket
alloc := allocations.Bucket([]byte(allocID))
return alloc != nil
}
// getTaskBucket returns the bucket used to persist state about a
// particular task. If the root allocation bucket, the specific
// allocation or task bucket doesn't exist, they will be created as long as the

View file

@ -11,7 +11,6 @@ import (
type CpuStats struct {
prevCpuTime float64
prevTime time.Time
clkSpeed float64
totalCpus int
}

View file

@ -61,7 +61,6 @@ type NodeStatsCollector interface {
// HostStatsCollector collects host resource usage stats
type HostStatsCollector struct {
clkSpeed float64
numCores int
statsCalculator map[string]*HostCpuStatsCalculator
logger *log.Logger

View file

@ -268,14 +268,33 @@ func NewTaskRunner(logger *log.Logger, config *config.Config,
signalCh: make(chan SignalEvent),
}
tc.baseLabels = []metrics.Label{{"job", tc.alloc.Job.Name}, {"task_group", tc.alloc.TaskGroup}, {"alloc_id", tc.alloc.ID}, {"task", tc.task.Name}}
tc.baseLabels = []metrics.Label{
{
Name: "job",
Value: tc.alloc.Job.Name,
},
{
Name: "task_group",
Value: tc.alloc.TaskGroup,
},
{
Name: "alloc_id",
Value: tc.alloc.ID,
},
{
Name: "task",
Value: tc.task.Name,
},
}
return tc
}
// MarkReceived marks the task as received.
func (r *TaskRunner) MarkReceived() {
r.updater(r.task.Name, structs.TaskStatePending, structs.NewTaskEvent(structs.TaskReceived), false)
// We lazy sync this since there will be a follow up message almost
// immediately.
r.updater(r.task.Name, structs.TaskStatePending, structs.NewTaskEvent(structs.TaskReceived), true)
}
// WaitCh returns a channel to wait for termination
@ -1677,7 +1696,7 @@ func (r *TaskRunner) handleDestroy(handle driver.DriverHandle) (destroyed bool,
r.logger.Printf("[ERR] client: failed to kill task '%s' for alloc %q. Retrying in %v: %v",
r.task.Name, r.alloc.ID, backoff, err)
time.Sleep(time.Duration(backoff))
time.Sleep(backoff)
} else {
// Kill was successful
return true, nil

View file

@ -39,6 +39,12 @@ func prefixedTestLogger(prefix string) *log.Logger {
return log.New(ioutil.Discard, "", 0)
}
// Returns a tracker that never restarts.
func noRestartsTracker() *RestartTracker {
policy := &structs.RestartPolicy{Attempts: 0, Mode: structs.RestartPolicyModeFail}
return newRestartTracker(policy, structs.JobTypeBatch)
}
type MockTaskStateUpdater struct {
state string
failed bool
@ -1073,10 +1079,6 @@ func TestTaskRunner_DeriveToken_Unrecoverable(t *testing.T) {
func TestTaskRunner_Template_Block(t *testing.T) {
t.Parallel()
testRetryRate = 2 * time.Second
defer func() {
testRetryRate = 0
}()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"

View file

@ -29,7 +29,7 @@ func TestDiffAllocs(t *testing.T) {
alloc4.ID: alloc4,
},
filtered: map[string]struct{}{
alloc1.ID: struct{}{},
alloc1.ID: {},
},
}

View file

@ -65,9 +65,6 @@ type vaultClient struct {
// running indicates if the renewal loop is active or not
running bool
// tokenData is the data of the passed VaultClient token
token *tokenData
// client is the API client to interact with vault
client *vaultapi.Client
@ -88,17 +85,6 @@ type vaultClient struct {
logger *log.Logger
}
// tokenData holds the relevant information about the Vault token passed to the
// client.
type tokenData struct {
CreationTTL int `mapstructure:"creation_ttl"`
TTL int `mapstructure:"ttl"`
Renewable bool `mapstructure:"renewable"`
Policies []string `mapstructure:"policies"`
Role string `mapstructure:"role"`
Root bool
}
// vaultClientRenewalRequest is a request object for renewal of both tokens and
// secret's leases.
type vaultClientRenewalRequest struct {

View file

@ -25,10 +25,8 @@ import (
)
const (
clientHttpCheckInterval = 10 * time.Second
clientHttpCheckTimeout = 3 * time.Second
serverHttpCheckInterval = 10 * time.Second
serverHttpCheckTimeout = 6 * time.Second
agentHttpCheckInterval = 10 * time.Second
agentHttpCheckTimeout = 5 * time.Second
serverRpcCheckInterval = 10 * time.Second
serverRpcCheckTimeout = 3 * time.Second
serverSerfCheckInterval = 10 * time.Second
@ -419,7 +417,7 @@ func (a *Agent) setupServer() error {
PortLabel: a.config.AdvertiseAddrs.RPC,
Tags: []string{consul.ServiceTagRPC},
Checks: []*structs.ServiceCheck{
&structs.ServiceCheck{
{
Name: "Nomad Server RPC Check",
Type: "tcp",
Interval: serverRpcCheckInterval,
@ -433,7 +431,7 @@ func (a *Agent) setupServer() error {
PortLabel: a.config.AdvertiseAddrs.Serf,
Tags: []string{consul.ServiceTagSerf},
Checks: []*structs.ServiceCheck{
&structs.ServiceCheck{
{
Name: "Nomad Server Serf Check",
Type: "tcp",
Interval: serverSerfCheckInterval,
@ -538,8 +536,8 @@ func (a *Agent) agentHTTPCheck(server bool) *structs.ServiceCheck {
Type: "http",
Path: "/v1/agent/servers",
Protocol: "http",
Interval: clientHttpCheckInterval,
Timeout: clientHttpCheckTimeout,
Interval: agentHttpCheckInterval,
Timeout: agentHttpCheckTimeout,
PortLabel: httpCheckAddr,
}
// Switch to endpoint that doesn't require a leader for servers

View file

@ -135,17 +135,12 @@ func TestHTTP_AgentSetServers(t *testing.T) {
t.Parallel()
assert := assert.New(t)
httpTest(t, nil, func(s *TestAgent) {
// Establish a baseline number of servers
req, err := http.NewRequest("GET", "/v1/agent/servers", nil)
assert.Nil(err)
respW := httptest.NewRecorder()
// Create the request
req, err = http.NewRequest("PUT", "/v1/agent/servers", nil)
req, err := http.NewRequest("PUT", "/v1/agent/servers", nil)
assert.Nil(err)
// Send the request
respW = httptest.NewRecorder()
respW := httptest.NewRecorder()
_, err = s.Server.AgentServersRequest(respW, req)
assert.NotNil(err)
assert.Contains(err.Error(), "missing server address")

View file

@ -179,12 +179,12 @@ func TestConfig_Parse(t *testing.T) {
},
Sentinel: &config.SentinelConfig{
Imports: []*config.SentinelImport{
&config.SentinelImport{
{
Name: "foo",
Path: "foo",
Args: []string{"a", "b", "c"},
},
&config.SentinelImport{
{
Name: "bar",
Path: "bar",
Args: []string{"x", "y", "z"},

View file

@ -315,7 +315,7 @@ func TestConfig_Merge(t *testing.T) {
},
Sentinel: &config.SentinelConfig{
Imports: []*config.SentinelImport{
&config.SentinelImport{
{
Name: "foo",
Path: "foo",
Args: []string{"a", "b", "c"},

View file

@ -71,10 +71,6 @@ type AgentAPI interface {
UpdateTTL(id, output, status string) error
}
// addrParser is usually the Task.FindHostAndPortFor method for turning a
// portLabel into an address and port.
type addrParser func(portLabel string) (string, int)
// operations are submitted to the main loop via commit() for synchronizing
// with Consul.
type operations struct {

View file

@ -154,6 +154,10 @@ func TestConsul_Integration(t *testing.T) {
// Block waiting for the service to appear
catalog := consulClient.Catalog()
res, meta, err := catalog.Service("httpd2", "test", nil)
if err != nil {
t.Fatalf("bad: %v", err)
}
for i := 0; len(res) == 0 && i < 10; i++ {
//Expected initial request to fail, do a blocking query
res, meta, err = catalog.Service("httpd2", "test", &consulapi.QueryOptions{WaitIndex: meta.LastIndex + 1, WaitTime: 3 * time.Second})

View file

@ -1004,7 +1004,7 @@ func findClosest(entries []*allocdir.AllocFileInfo, desiredIdx, desiredOffset in
}
// Binary search the indexes to get the desiredIdx
sort.Sort(indexTupleArray(indexes))
sort.Sort(indexes)
i := sort.Search(len(indexes), func(i int) bool { return indexes[i].idx >= desiredIdx })
l := len(indexes)
if i == l {

View file

@ -334,7 +334,7 @@ func TestStreamFramer_Order(t *testing.T) {
}
expected := bytes.NewBuffer(make([]byte, 0, 100000))
for _, _ = range files {
for range files {
expected.Write(input.Bytes())
}
receivedBuf := bytes.NewBuffer(make([]byte, 0, 100000))
@ -424,7 +424,7 @@ func TestStreamFramer_Order_PlainText(t *testing.T) {
}
expected := bytes.NewBuffer(make([]byte, 0, 100000))
for _, _ = range files {
for range files {
expected.Write(input.Bytes())
}
receivedBuf := bytes.NewBuffer(make([]byte, 0, 100000))

View file

@ -1394,13 +1394,13 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
"hello": "world",
},
Services: []*structs.Service{
&structs.Service{
{
Name: "serviceA",
Tags: []string{"1", "2"},
PortLabel: "foo",
AddressMode: "auto",
Checks: []*structs.ServiceCheck{
&structs.ServiceCheck{
{
Name: "bar",
Type: "http",
Command: "foo",

View file

@ -76,7 +76,7 @@ func (l *logWriter) Write(p []byte) (n int, err error) {
l.logs[l.index] = string(p)
l.index = (l.index + 1) % len(l.logs)
for lh, _ := range l.handlers {
for lh := range l.handlers {
lh.HandleLog(string(p))
}
return

View file

@ -9,7 +9,6 @@ import (
"time"
humanize "github.com/dustin/go-humanize"
"github.com/mitchellh/colorstring"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/api/contexts"
@ -19,7 +18,6 @@ import (
type AllocStatusCommand struct {
Meta
color *colorstring.Colorize
}
func (c *AllocStatusCommand) Help() string {

View file

@ -247,7 +247,7 @@ func (c *EvalStatusCommand) Run(args []string) int {
func sortedTaskGroupFromMetrics(groups map[string]*api.AllocationMetric) []string {
tgs := make([]string, 0, len(groups))
for tg, _ := range groups {
for tg := range groups {
tgs = append(tgs, tg)
}
sort.Strings(tgs)

View file

@ -47,7 +47,7 @@ type Meta struct {
// namespace to send API requests
namespace string
// token is used for ACLs to access privilaged information
// token is used for ACLs to access privileged information
token string
caCert string

View file

@ -49,11 +49,6 @@ type allocState struct {
client string
clientDesc string
index uint64
// full is the allocation struct with full details. This
// must be queried for explicitly so it is only included
// if there is important error information inside.
full *api.Allocation
}
// monitor wraps an evaluation monitor and holds metadata and
@ -328,17 +323,6 @@ func (m *monitor) monitor(evalID string, allowPrefix bool) int {
return 0
}
// dumpAllocStatus is a helper to generate a more user-friendly error message
// for scheduling failures, displaying a high level status of why the job
// could not be scheduled out.
func dumpAllocStatus(ui cli.Ui, alloc *api.Allocation, length int) {
// Print filter stats
ui.Output(fmt.Sprintf("Allocation %q status %q (%d/%d nodes filtered)",
limit(alloc.ID, length), alloc.ClientStatus,
alloc.Metrics.NodesFiltered, alloc.Metrics.NodesEvaluated))
ui.Output(formatAllocMetrics(alloc.Metrics, true, " "))
}
func formatAllocMetrics(metrics *api.AllocationMetric, scores bool, prefix string) string {
// Print a helpful message if we have an eligibility problem
var out string

View file

@ -5,7 +5,6 @@ import (
"testing"
"time"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/mitchellh/cli"
)
@ -72,7 +71,7 @@ func TestMonitor_Update_Allocs(t *testing.T) {
// New allocations write new logs
state := &evalState{
allocs: map[string]*allocState{
"alloc1": &allocState{
"alloc1": {
id: "87654321-abcd-efab-cdef-123456789abc",
group: "group1",
node: "12345678-abcd-efab-cdef-123456789abc",
@ -110,7 +109,7 @@ func TestMonitor_Update_Allocs(t *testing.T) {
// Alloc updates cause more log lines
state = &evalState{
allocs: map[string]*allocState{
"alloc1": &allocState{
"alloc1": {
id: "87654321-abcd-efab-cdef-123456789abc",
group: "group1",
node: "12345678-abcd-efab-cdef-123456789abc",
@ -145,7 +144,7 @@ func TestMonitor_Update_AllocModification(t *testing.T) {
state := &evalState{
index: 2,
allocs: map[string]*allocState{
"alloc3": &allocState{
"alloc3": {
id: "87654321-abcd-bafe-cdef-123456789abc",
node: "12345678-abcd-efab-cdef-123456789abc",
group: "group2",
@ -286,72 +285,3 @@ func TestMonitor_MonitorWithPrefix(t *testing.T) {
}
}
func TestMonitor_DumpAllocStatus(t *testing.T) {
t.Parallel()
ui := new(cli.MockUi)
// Create an allocation and dump its status to the UI
alloc := &api.Allocation{
ID: "87654321-abcd-efab-cdef-123456789abc",
TaskGroup: "group1",
ClientStatus: structs.AllocClientStatusRunning,
Metrics: &api.AllocationMetric{
NodesEvaluated: 10,
NodesFiltered: 5,
NodesExhausted: 1,
DimensionExhausted: map[string]int{
"cpu": 1,
},
ConstraintFiltered: map[string]int{
"$attr.kernel.name = linux": 1,
},
ClassExhausted: map[string]int{
"web-large": 1,
},
},
}
dumpAllocStatus(ui, alloc, fullId)
// Check the output
out := ui.OutputWriter.String()
if !strings.Contains(out, "87654321-abcd-efab-cdef-123456789abc") {
t.Fatalf("missing alloc\n\n%s", out)
}
if !strings.Contains(out, structs.AllocClientStatusRunning) {
t.Fatalf("missing status\n\n%s", out)
}
if !strings.Contains(out, "5/10") {
t.Fatalf("missing filter stats\n\n%s", out)
}
if !strings.Contains(
out, `Constraint "$attr.kernel.name = linux" filtered 1 nodes`) {
t.Fatalf("missing constraint\n\n%s", out)
}
if !strings.Contains(out, "Resources exhausted on 1 nodes") {
t.Fatalf("missing resource exhaustion\n\n%s", out)
}
if !strings.Contains(out, `Class "web-large" exhausted on 1 nodes`) {
t.Fatalf("missing class exhaustion\n\n%s", out)
}
if !strings.Contains(out, `Dimension "cpu" exhausted on 1 nodes`) {
t.Fatalf("missing dimension exhaustion\n\n%s", out)
}
ui.OutputWriter.Reset()
// Dumping alloc status with no eligible nodes adds a warning
alloc.Metrics.NodesEvaluated = 0
dumpAllocStatus(ui, alloc, shortId)
// Check the output
out = ui.OutputWriter.String()
if !strings.Contains(out, "No nodes were eligible") {
t.Fatalf("missing eligibility warning\n\n%s", out)
}
if strings.Contains(out, "87654321-abcd-efab-cdef-123456789abc") {
t.Fatalf("expected truncated id, got %s", out)
}
if !strings.Contains(out, "87654321") {
t.Fatalf("expected alloc id, got %s", out)
}
}

View file

@ -29,7 +29,7 @@ func (c *NamespaceDeleteCommand) AutocompleteFlags() complete.Flags {
}
func (c *NamespaceDeleteCommand) AutocompleteArgs() complete.Predictor {
filter := map[string]struct{}{"default": struct{}{}}
filter := map[string]struct{}{"default": {}}
return NamespacePredictor(c.Meta.Client, filter)
}

View file

@ -105,7 +105,7 @@ func (c *NodeDrainCommand) Run(args []string) int {
}
// If -self flag is set then determine the current node.
nodeID := ""
var nodeID string
if !self {
nodeID = args[0]
} else {

View file

@ -8,7 +8,6 @@ import (
"time"
humanize "github.com/dustin/go-humanize"
"github.com/mitchellh/colorstring"
"github.com/posener/complete"
"github.com/hashicorp/nomad/api"
@ -26,7 +25,6 @@ const (
type NodeStatusCommand struct {
Meta
color *colorstring.Colorize
length int
short bool
verbose bool
@ -221,7 +219,7 @@ func (c *NodeStatusCommand) Run(args []string) int {
}
// Query the specific node
nodeID := ""
var nodeID string
if !c.self {
nodeID = args[0]
} else {

View file

@ -8,7 +8,6 @@ import (
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/scheduler"
"github.com/mitchellh/colorstring"
"github.com/posener/complete"
)
@ -26,7 +25,6 @@ potentially invalid.`
type PlanCommand struct {
Meta
JobGetter
color *colorstring.Colorize
}
func (c *PlanCommand) Help() string {

View file

@ -1,8 +1,6 @@
package command
import (
"bytes"
"encoding/gob"
"encoding/json"
"fmt"
"os"
@ -13,7 +11,6 @@ import (
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/posener/complete"
)
@ -280,19 +277,3 @@ func parseCheckIndex(input string) (uint64, bool, error) {
u, err := strconv.ParseUint(input, 10, 64)
return u, true, err
}
// convertStructJob is used to take a *structs.Job and convert it to an *api.Job.
// This function is just a hammer and probably needs to be revisited.
func convertStructJob(in *structs.Job) (*api.Job, error) {
gob.Register([]map[string]interface{}{})
gob.Register([]interface{}{})
var apiJob *api.Job
buf := new(bytes.Buffer)
if err := gob.NewEncoder(buf).Encode(in); err != nil {
return nil, err
}
if err := gob.NewDecoder(buf).Decode(&apiJob); err != nil {
return nil, err
}
return apiJob, nil
}

View file

@ -19,7 +19,7 @@ func main() {
return
}
total := 0
var total int
if len(os.Args) != 2 {
fmt.Println("need 1 arg")
return

View file

@ -14,7 +14,7 @@ func TestFieldDataGet(t *testing.T) {
}{
"string type, string value": {
map[string]*FieldSchema{
"foo": &FieldSchema{Type: TypeString},
"foo": {Type: TypeString},
},
map[string]interface{}{
"foo": "bar",
@ -25,7 +25,7 @@ func TestFieldDataGet(t *testing.T) {
"string type, int value": {
map[string]*FieldSchema{
"foo": &FieldSchema{Type: TypeInt},
"foo": {Type: TypeInt},
},
map[string]interface{}{
"foo": 42,
@ -36,7 +36,7 @@ func TestFieldDataGet(t *testing.T) {
"string type, unset value": {
map[string]*FieldSchema{
"foo": &FieldSchema{Type: TypeString},
"foo": {Type: TypeString},
},
map[string]interface{}{},
"foo",
@ -45,7 +45,7 @@ func TestFieldDataGet(t *testing.T) {
"string type, unset value with default": {
map[string]*FieldSchema{
"foo": &FieldSchema{
"foo": {
Type: TypeString,
Default: "bar",
},
@ -57,7 +57,7 @@ func TestFieldDataGet(t *testing.T) {
"int type, int value": {
map[string]*FieldSchema{
"foo": &FieldSchema{Type: TypeInt},
"foo": {Type: TypeInt},
},
map[string]interface{}{
"foo": 42,
@ -68,7 +68,7 @@ func TestFieldDataGet(t *testing.T) {
"bool type, bool value": {
map[string]*FieldSchema{
"foo": &FieldSchema{Type: TypeBool},
"foo": {Type: TypeBool},
},
map[string]interface{}{
"foo": false,
@ -79,7 +79,7 @@ func TestFieldDataGet(t *testing.T) {
"map type, map value": {
map[string]*FieldSchema{
"foo": &FieldSchema{Type: TypeMap},
"foo": {Type: TypeMap},
},
map[string]interface{}{
"foo": map[string]interface{}{
@ -94,7 +94,7 @@ func TestFieldDataGet(t *testing.T) {
"array type, array value": {
map[string]*FieldSchema{
"foo": &FieldSchema{Type: TypeArray},
"foo": {Type: TypeArray},
},
map[string]interface{}{
"foo": []interface{}{},

View file

@ -113,23 +113,17 @@ func flatten(prefix string, v reflect.Value, primitiveOnly, enteredStruct bool,
// getSubPrefix takes the current prefix and the next subfield and returns an
// appropriate prefix.
func getSubPrefix(curPrefix, subField string) string {
newPrefix := ""
if curPrefix != "" {
newPrefix = fmt.Sprintf("%s.%s", curPrefix, subField)
} else {
newPrefix = fmt.Sprintf("%s", subField)
return fmt.Sprintf("%s.%s", curPrefix, subField)
}
return newPrefix
return fmt.Sprintf("%s", subField)
}
// getSubKeyPrefix takes the current prefix and the next subfield and returns an
// appropriate prefix for a map field.
func getSubKeyPrefix(curPrefix, subField string) string {
newPrefix := ""
if curPrefix != "" {
newPrefix = fmt.Sprintf("%s[%s]", curPrefix, subField)
} else {
newPrefix = fmt.Sprintf("%s", subField)
return fmt.Sprintf("%s[%s]", curPrefix, subField)
}
return newPrefix
return fmt.Sprintf("%s", subField)
}

View file

@ -167,10 +167,10 @@ func TestFlatMap(t *testing.T) {
Input: &containers{
myslice: []int{1, 2},
mymap: map[string]linkedList{
"foo": linkedList{
"foo": {
value: "l1",
},
"bar": linkedList{
"bar": {
value: "l2",
},
},
@ -188,10 +188,10 @@ func TestFlatMap(t *testing.T) {
Input: &containers{
myslice: []int{1, 2},
mymap: map[string]linkedList{
"foo": linkedList{
"foo": {
value: "l1",
},
"bar": linkedList{
"bar": {
value: "l2",
},
},

View file

@ -180,7 +180,7 @@ func CopyMapStringStruct(m map[string]struct{}) map[string]struct{} {
}
c := make(map[string]struct{}, l)
for k, _ := range m {
for k := range m {
c[k] = struct{}{}
}
return c

View file

@ -23,8 +23,8 @@ func TestSliceStringIsSubset(t *testing.T) {
func TestMapStringStringSliceValueSet(t *testing.T) {
m := map[string][]string{
"foo": []string{"1", "2"},
"bar": []string{"3"},
"foo": {"1", "2"},
"bar": {"3"},
"baz": nil,
}
@ -38,8 +38,8 @@ func TestMapStringStringSliceValueSet(t *testing.T) {
func TestCopyMapStringSliceString(t *testing.T) {
m := map[string][]string{
"x": []string{"a", "b", "c"},
"y": []string{"1", "2", "3"},
"x": {"a", "b", "c"},
"y": {"1", "2", "3"},
"z": nil,
}

View file

@ -39,7 +39,7 @@ func TestParse(t *testing.T) {
},
Constraints: []*api.Constraint{
&api.Constraint{
{
LTarget: "kernel.os",
RTarget: "windows",
Operand: "=",
@ -57,10 +57,10 @@ func TestParse(t *testing.T) {
},
TaskGroups: []*api.TaskGroup{
&api.TaskGroup{
{
Name: helper.StringToPtr("outside"),
Tasks: []*api.Task{
&api.Task{
{
Name: "outside",
Driver: "java",
Config: map[string]interface{}{
@ -73,11 +73,11 @@ func TestParse(t *testing.T) {
},
},
&api.TaskGroup{
{
Name: helper.StringToPtr("binsl"),
Count: helper.IntToPtr(5),
Constraints: []*api.Constraint{
&api.Constraint{
{
LTarget: "kernel.os",
RTarget: "linux",
Operand: "=",
@ -107,14 +107,14 @@ func TestParse(t *testing.T) {
Canary: helper.IntToPtr(2),
},
Tasks: []*api.Task{
&api.Task{
{
Name: "binstore",
Driver: "docker",
User: "bob",
Config: map[string]interface{}{
"image": "hashicorp/binstore",
"labels": []map[string]interface{}{
map[string]interface{}{
{
"FOO": "bar",
},
},
@ -147,7 +147,7 @@ func TestParse(t *testing.T) {
CPU: helper.IntToPtr(500),
MemoryMB: helper.IntToPtr(128),
Networks: []*api.NetworkResource{
&api.NetworkResource{
{
MBits: helper.IntToPtr(100),
ReservedPorts: []api.Port{{Label: "one", Value: 1}, {Label: "two", Value: 2}, {Label: "three", Value: 3}},
DynamicPorts: []api.Port{{Label: "http", Value: 0}, {Label: "https", Value: 0}, {Label: "admin", Value: 0}},
@ -204,7 +204,7 @@ func TestParse(t *testing.T) {
},
Leader: true,
},
&api.Task{
{
Name: "storagelocker",
Driver: "docker",
User: "",
@ -217,7 +217,7 @@ func TestParse(t *testing.T) {
IOPS: helper.IntToPtr(30),
},
Constraints: []*api.Constraint{
&api.Constraint{
{
LTarget: "kernel.arch",
RTarget: "amd64",
Operand: "=",
@ -270,7 +270,7 @@ func TestParse(t *testing.T) {
ID: helper.StringToPtr("foo"),
Name: helper.StringToPtr("foo"),
Constraints: []*api.Constraint{
&api.Constraint{
{
LTarget: "$attr.kernel.version",
RTarget: "~> 3.2",
Operand: structs.ConstraintVersion,
@ -286,7 +286,7 @@ func TestParse(t *testing.T) {
ID: helper.StringToPtr("foo"),
Name: helper.StringToPtr("foo"),
Constraints: []*api.Constraint{
&api.Constraint{
{
LTarget: "$attr.kernel.version",
RTarget: "[0-9.]+",
Operand: structs.ConstraintRegex,
@ -302,7 +302,7 @@ func TestParse(t *testing.T) {
ID: helper.StringToPtr("foo"),
Name: helper.StringToPtr("foo"),
Constraints: []*api.Constraint{
&api.Constraint{
{
LTarget: "$meta.data",
RTarget: "foo,bar,baz",
Operand: structs.ConstraintSetContains,
@ -318,7 +318,7 @@ func TestParse(t *testing.T) {
ID: helper.StringToPtr("foo"),
Name: helper.StringToPtr("foo"),
Constraints: []*api.Constraint{
&api.Constraint{
{
Operand: structs.ConstraintDistinctHosts,
},
},
@ -332,7 +332,7 @@ func TestParse(t *testing.T) {
ID: helper.StringToPtr("foo"),
Name: helper.StringToPtr("foo"),
Constraints: []*api.Constraint{
&api.Constraint{
{
Operand: structs.ConstraintDistinctProperty,
LTarget: "${meta.rack}",
},
@ -371,16 +371,16 @@ func TestParse(t *testing.T) {
ID: helper.StringToPtr("foo"),
Name: helper.StringToPtr("foo"),
TaskGroups: []*api.TaskGroup{
&api.TaskGroup{
{
Name: helper.StringToPtr("bar"),
Tasks: []*api.Task{
&api.Task{
{
Name: "bar",
Driver: "docker",
Config: map[string]interface{}{
"image": "hashicorp/image",
"port_map": []map[string]interface{}{
map[string]interface{}{
{
"db": 1234,
},
},
@ -405,10 +405,10 @@ func TestParse(t *testing.T) {
ID: helper.StringToPtr("binstore-storagelocker"),
Name: helper.StringToPtr("binstore-storagelocker"),
TaskGroups: []*api.TaskGroup{
&api.TaskGroup{
{
Name: helper.StringToPtr("binsl"),
Tasks: []*api.Task{
&api.Task{
{
Name: "binstore",
Driver: "docker",
Artifacts: []*api.TaskArtifact{
@ -442,11 +442,11 @@ func TestParse(t *testing.T) {
Name: helper.StringToPtr("check_initial_status"),
Type: helper.StringToPtr("service"),
TaskGroups: []*api.TaskGroup{
&api.TaskGroup{
{
Name: helper.StringToPtr("group"),
Count: helper.IntToPtr(1),
Tasks: []*api.Task{
&api.Task{
{
Name: "task",
Services: []*api.Service{
{
@ -492,10 +492,10 @@ func TestParse(t *testing.T) {
ID: helper.StringToPtr("example"),
Name: helper.StringToPtr("example"),
TaskGroups: []*api.TaskGroup{
&api.TaskGroup{
{
Name: helper.StringToPtr("cache"),
Tasks: []*api.Task{
&api.Task{
{
Name: "redis",
Vault: &api.Vault{
Policies: []string{"group"},
@ -503,7 +503,7 @@ func TestParse(t *testing.T) {
ChangeMode: helper.StringToPtr(structs.VaultChangeModeRestart),
},
},
&api.Task{
{
Name: "redis2",
Vault: &api.Vault{
Policies: []string{"task"},
@ -513,10 +513,10 @@ func TestParse(t *testing.T) {
},
},
},
&api.TaskGroup{
{
Name: helper.StringToPtr("cache2"),
Tasks: []*api.Task{
&api.Task{
{
Name: "redis",
Vault: &api.Vault{
Policies: []string{"job"},

View file

@ -27,7 +27,7 @@ func Run(args []string) int {
func RunCustom(args []string, commands map[string]cli.CommandFactory) int {
// Build the commands to include in the help now.
commandsInclude := make([]string, 0, len(commands))
for k, _ := range commands {
for k := range commands {
switch k {
case "deployment list", "deployment status", "deployment pause",
"deployment resume", "deployment fail", "deployment promote":

View file

@ -283,7 +283,7 @@ func (a *ACL) Bootstrap(args *structs.ACLTokenBootstrapRequest, reply *structs.A
}
defer metrics.MeasureSince([]string{"nomad", "acl", "bootstrap"}, time.Now())
// Always ignore the reset index from the arguements
// Always ignore the reset index from the arguments
args.ResetIndex = 0
// Snapshot the state

View file

@ -11,7 +11,7 @@ import (
)
// NamespacePolicy is a helper for generating the policy hcl for a given
// namepsace. Either policy or capabilites may be nil but not both.
// namepsace. Either policy or capabilities may be nil but not both.
func NamespacePolicy(namespace string, policy string, capabilities []string) string {
policyHCL := fmt.Sprintf("namespace %q {", namespace)
if policy != "" {

View file

@ -356,7 +356,7 @@ func TestFSM_RegisterJob_BadNamespace(t *testing.T) {
if !ok {
t.Fatalf("resp not of error type: %T %v", resp, resp)
}
if !strings.Contains(err.Error(), "non-existant namespace") {
if !strings.Contains(err.Error(), "non-existent namespace") {
t.Fatalf("bad error: %v", err)
}
@ -1362,11 +1362,11 @@ func TestFSM_DeploymentPromotion(t *testing.T) {
d := mock.Deployment()
d.JobID = j.ID
d.TaskGroups = map[string]*structs.DeploymentState{
"web": &structs.DeploymentState{
"web": {
DesiredTotal: 10,
DesiredCanaries: 1,
},
"foo": &structs.DeploymentState{
"foo": {
DesiredTotal: 10,
DesiredCanaries: 1,
},
@ -2156,7 +2156,7 @@ func TestFSM_SnapshotRestore_AddMissingSummary(t *testing.T) {
JobID: alloc.Job.ID,
Namespace: alloc.Job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{
"web": {
Starting: 1,
},
},
@ -2210,7 +2210,7 @@ func TestFSM_ReconcileSummaries(t *testing.T) {
JobID: job1.ID,
Namespace: job1.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{
"web": {
Queued: 10,
},
},
@ -2229,7 +2229,7 @@ func TestFSM_ReconcileSummaries(t *testing.T) {
JobID: alloc.Job.ID,
Namespace: alloc.Job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{
"web": {
Queued: 9,
Starting: 1,
},

View file

@ -160,7 +160,7 @@ func TestJobEndpoint_Register_InvalidNamespace(t *testing.T) {
// Try without a token, expect failure
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), "non-existant namespace") {
if err == nil || !strings.Contains(err.Error(), "non-existent namespace") {
t.Fatalf("expected namespace error: %v", err)
}
@ -2096,7 +2096,7 @@ func TestJobEndpoint_GetJobSummary(t *testing.T) {
JobID: job.ID,
Namespace: job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{},
"web": {},
},
Children: new(structs.JobChildrenSummary),
CreateIndex: job.CreateIndex,
@ -2158,7 +2158,7 @@ func TestJobEndpoint_Summary_ACL(t *testing.T) {
JobID: job.ID,
Namespace: job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{},
"web": {},
},
Children: new(structs.JobChildrenSummary),
CreateIndex: job.CreateIndex,
@ -2248,7 +2248,6 @@ func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) {
}
start = time.Now()
var resp1 structs.JobSummaryResponse
start = time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &resp1); err != nil {
t.Fatalf("err: %v", err)
}
@ -3063,7 +3062,7 @@ func TestJobEndpoint_ImplicitConstraints_Signals(t *testing.T) {
job := mock.Job()
signal := "SIGUSR1"
job.TaskGroups[0].Tasks[0].Templates = []*structs.Template{
&structs.Template{
{
SourcePath: "foo",
DestPath: "bar",
ChangeMode: structs.TemplateChangeModeSignal,

View file

@ -25,7 +25,7 @@ func Node() *structs.Node {
DiskMB: 100 * 1024,
IOPS: 150,
Networks: []*structs.NetworkResource{
&structs.NetworkResource{
{
Device: "eth0",
CIDR: "192.168.0.100/32",
MBits: 1000,
@ -37,7 +37,7 @@ func Node() *structs.Node {
MemoryMB: 256,
DiskMB: 4 * 1024,
Networks: []*structs.NetworkResource{
&structs.NetworkResource{
{
Device: "eth0",
IP: "192.168.0.100",
ReservedPorts: []structs.Port{{Label: "main", Value: 22}},
@ -71,14 +71,14 @@ func Job() *structs.Job {
AllAtOnce: false,
Datacenters: []string{"dc1"},
Constraints: []*structs.Constraint{
&structs.Constraint{
{
LTarget: "${attr.kernel.name}",
RTarget: "linux",
Operand: "=",
},
},
TaskGroups: []*structs.TaskGroup{
&structs.TaskGroup{
{
Name: "web",
Count: 10,
EphemeralDisk: &structs.EphemeralDisk{
@ -91,7 +91,7 @@ func Job() *structs.Job {
Mode: structs.RestartPolicyModeDelay,
},
Tasks: []*structs.Task{
&structs.Task{
{
Name: "web",
Driver: "exec",
Config: map[string]interface{}{
@ -126,7 +126,7 @@ func Job() *structs.Job {
CPU: 500,
MemoryMB: 256,
Networks: []*structs.NetworkResource{
&structs.NetworkResource{
{
MBits: 50,
DynamicPorts: []structs.Port{{Label: "http"}, {Label: "admin"}},
},
@ -168,14 +168,14 @@ func SystemJob() *structs.Job {
AllAtOnce: false,
Datacenters: []string{"dc1"},
Constraints: []*structs.Constraint{
&structs.Constraint{
{
LTarget: "${attr.kernel.name}",
RTarget: "linux",
Operand: "=",
},
},
TaskGroups: []*structs.TaskGroup{
&structs.TaskGroup{
{
Name: "web",
Count: 1,
RestartPolicy: &structs.RestartPolicy{
@ -186,7 +186,7 @@ func SystemJob() *structs.Job {
},
EphemeralDisk: structs.DefaultEphemeralDisk(),
Tasks: []*structs.Task{
&structs.Task{
{
Name: "web",
Driver: "exec",
Config: map[string]interface{}{
@ -197,7 +197,7 @@ func SystemJob() *structs.Job {
CPU: 500,
MemoryMB: 256,
Networks: []*structs.NetworkResource{
&structs.NetworkResource{
{
MBits: 50,
DynamicPorts: []structs.Port{{Label: "http"}},
},
@ -269,7 +269,7 @@ func Alloc() *structs.Allocation {
MemoryMB: 256,
DiskMB: 150,
Networks: []*structs.NetworkResource{
&structs.NetworkResource{
{
Device: "eth0",
IP: "192.168.0.100",
ReservedPorts: []structs.Port{{Label: "main", Value: 5000}},
@ -279,11 +279,11 @@ func Alloc() *structs.Allocation {
},
},
TaskResources: map[string]*structs.Resources{
"web": &structs.Resources{
"web": {
CPU: 500,
MemoryMB: 256,
Networks: []*structs.NetworkResource{
&structs.NetworkResource{
{
Device: "eth0",
IP: "192.168.0.100",
ReservedPorts: []structs.Port{{Label: "main", Value: 5000}},
@ -323,7 +323,7 @@ func Deployment() *structs.Deployment {
JobModifyIndex: 20,
JobCreateIndex: 18,
TaskGroups: map[string]*structs.DeploymentState{
"web": &structs.DeploymentState{
"web": {
DesiredTotal: 10,
},
},

View file

@ -813,7 +813,7 @@ func TestClientEndpoint_Drain_Down(t *testing.T) {
JobID: job.ID,
Namespace: job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{
"web": {
Queued: 1,
Lost: 1,
},
@ -834,7 +834,7 @@ func TestClientEndpoint_Drain_Down(t *testing.T) {
JobID: job1.ID,
Namespace: job1.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{
"web": {
Lost: 1,
},
},

View file

@ -39,7 +39,7 @@ func TestOperator_RaftGetConfiguration(t *testing.T) {
me := future.Configuration().Servers[0]
expected := structs.RaftConfigurationResponse{
Servers: []*structs.RaftServer{
&structs.RaftServer{
{
ID: me.ID,
Node: fmt.Sprintf("%v.%v", s1.config.NodeName, s1.config.Region),
Address: me.Address,

View file

@ -190,7 +190,7 @@ func (p *PeriodicDispatch) Tracked() []*structs.Job {
// Add begins tracking of a periodic job. If it is already tracked, it acts as
// an update to the jobs periodic spec. The method returns whether the job was
// added and any error that may have occured.
// added and any error that may have occurred.
func (p *PeriodicDispatch) Add(job *structs.Job) (added bool, err error) {
p.l.Lock()
defer p.l.Unlock()

View file

@ -188,7 +188,7 @@ func TestPeriodicDispatch_Add_UpdateJob(t *testing.T) {
// Update the job and add it again.
job.Periodic.Spec = "foo"
if added, err := p.Add(job); err != nil || !added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed: %v %v", added, err)
}
tracked = p.Tracked()
@ -228,7 +228,7 @@ func TestPeriodicDispatch_Add_RemoveJob(t *testing.T) {
p, _ := testPeriodicDispatcher()
job := mock.PeriodicJob()
if added, err := p.Add(job); err != nil || !added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed %v %v", added, err)
}
tracked := p.Tracked()
@ -239,7 +239,7 @@ func TestPeriodicDispatch_Add_RemoveJob(t *testing.T) {
// Update the job to be non-periodic and add it again.
job.Periodic = nil
if added, err := p.Add(job); err != nil || added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed %v %v", added, err)
}
tracked = p.Tracked()
@ -257,14 +257,14 @@ func TestPeriodicDispatch_Add_TriggersUpdate(t *testing.T) {
// Add it.
if added, err := p.Add(job); err != nil || !added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed %v %v", added, err)
}
// Update it to be sooner and re-add.
expected := time.Now().Round(1 * time.Second).Add(1 * time.Second)
job.Periodic.Spec = fmt.Sprintf("%d", expected.Unix())
if added, err := p.Add(job); err != nil || !added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed %v %v", added, err)
}
// Check that nothing is created.
@ -305,7 +305,7 @@ func TestPeriodicDispatch_Remove_Tracked(t *testing.T) {
job := mock.PeriodicJob()
if added, err := p.Add(job); err != nil || !added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed %v %v", added, err)
}
tracked := p.Tracked()
@ -332,12 +332,12 @@ func TestPeriodicDispatch_Remove_TriggersUpdate(t *testing.T) {
// Add it.
if added, err := p.Add(job); err != nil || !added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed %v %v", added, err)
}
// Remove the job.
if err := p.Remove(job.Namespace, job.ID); err != nil {
t.Fatalf("Add failed %v", err)
t.Fatalf("Remove failed %v", err)
}
time.Sleep(2 * time.Second)
@ -371,7 +371,7 @@ func TestPeriodicDispatch_ForceRun_Tracked(t *testing.T) {
// Add it.
if added, err := p.Add(job); err != nil || !added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed %v %v", added, err)
}
// ForceRun the job
@ -403,7 +403,7 @@ func TestPeriodicDispatch_Run_DisallowOverlaps(t *testing.T) {
// Add it.
if added, err := p.Add(job); err != nil || !added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed %v %v", added, err)
}
time.Sleep(3 * time.Second)
@ -432,7 +432,7 @@ func TestPeriodicDispatch_Run_Multiple(t *testing.T) {
// Add it.
if added, err := p.Add(job); err != nil || !added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed %v %v", added, err)
}
time.Sleep(3 * time.Second)
@ -464,10 +464,10 @@ func TestPeriodicDispatch_Run_SameTime(t *testing.T) {
// Add them.
if added, err := p.Add(job); err != nil || !added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed %v %v", added, err)
}
if added, err := p.Add(job2); err != nil || !added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed %v %v", added, err)
}
if l := len(p.Tracked()); l != 2 {
@ -504,10 +504,10 @@ func TestPeriodicDispatch_Run_SameID_Different_Namespace(t *testing.T) {
// Add them.
if added, err := p.Add(job); err != nil || !added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed %v %v", added, err)
}
if added, err := p.Add(job2); err != nil || !added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed %v %v", added, err)
}
if l := len(p.Tracked()); l != 2 {
@ -570,11 +570,11 @@ func TestPeriodicDispatch_Complex(t *testing.T) {
// Create a map of expected eval job ids.
expected := map[string][]time.Time{
job1.ID: []time.Time{same},
job2.ID: []time.Time{same},
job1.ID: {same},
job2.ID: {same},
job3.ID: nil,
job4.ID: []time.Time{launch1, launch3},
job5.ID: []time.Time{launch2},
job4.ID: {launch1, launch3},
job5.ID: {launch2},
job6.ID: nil,
job7.ID: nil,
job8.ID: nil,
@ -588,7 +588,7 @@ func TestPeriodicDispatch_Complex(t *testing.T) {
for _, job := range jobs {
if added, err := p.Add(job); err != nil || !added {
t.Fatalf("Add failed %v", added, err)
t.Fatalf("Add failed %v %v", added, err)
}
}

View file

@ -405,7 +405,6 @@ func evaluateNodePlan(snap *state.StateSnapshot, plan *structs.Plan, nodeID stri
// Determine the proposed allocation by first removing allocations
// that are planned evictions and adding the new allocations.
proposed := existingAlloc
var remove []*structs.Allocation
if update := plan.NodeUpdate[nodeID]; len(update) > 0 {
remove = append(remove, update...)
@ -415,7 +414,7 @@ func evaluateNodePlan(snap *state.StateSnapshot, plan *structs.Plan, nodeID stri
remove = append(remove, alloc)
}
}
proposed = structs.RemoveAllocs(existingAlloc, remove)
proposed := structs.RemoveAllocs(existingAlloc, remove)
proposed = append(proposed, plan.NodeAllocation[nodeID]...)
// Check if these allocations fit

View file

@ -17,7 +17,7 @@ func TestEvaluatePool(t *testing.T) {
alloc := mock.Alloc()
plan := &structs.Plan{
NodeAllocation: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{alloc},
node.ID: {alloc},
},
}

View file

@ -92,7 +92,7 @@ func TestPlanApply_applyPlan(t *testing.T) {
s1.State().UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))
planRes := &structs.PlanResult{
NodeAllocation: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{alloc},
node.ID: {alloc},
},
Deployment: dnew,
DeploymentUpdates: updates,
@ -177,10 +177,10 @@ func TestPlanApply_applyPlan(t *testing.T) {
s1.State().UpsertJobSummary(1500, mock.JobSummary(alloc2.JobID))
planRes = &structs.PlanResult{
NodeUpdate: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{allocEvict},
node.ID: {allocEvict},
},
NodeAllocation: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{alloc2},
node.ID: {alloc2},
},
}
@ -248,7 +248,7 @@ func TestPlanApply_EvalPlan_Simple(t *testing.T) {
alloc := mock.Alloc()
plan := &structs.Plan{
NodeAllocation: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{alloc},
node.ID: {alloc},
},
Deployment: mock.Deployment(),
DeploymentUpdates: []*structs.DeploymentStatusUpdate{
@ -300,8 +300,8 @@ func TestPlanApply_EvalPlan_Partial(t *testing.T) {
plan := &structs.Plan{
NodeAllocation: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{alloc},
node2.ID: []*structs.Allocation{alloc2},
node.ID: {alloc},
node2.ID: {alloc2},
},
Deployment: d,
}
@ -353,8 +353,8 @@ func TestPlanApply_EvalPlan_Partial_AllAtOnce(t *testing.T) {
plan := &structs.Plan{
AllAtOnce: true, // Require all to make progress
NodeAllocation: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{alloc},
node2.ID: []*structs.Allocation{alloc2},
node.ID: {alloc},
node2.ID: {alloc2},
},
Deployment: mock.Deployment(),
DeploymentUpdates: []*structs.DeploymentStatusUpdate{
@ -398,7 +398,7 @@ func TestPlanApply_EvalNodePlan_Simple(t *testing.T) {
alloc := mock.Alloc()
plan := &structs.Plan{
NodeAllocation: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{alloc},
node.ID: {alloc},
},
}
@ -425,7 +425,7 @@ func TestPlanApply_EvalNodePlan_NodeNotReady(t *testing.T) {
alloc := mock.Alloc()
plan := &structs.Plan{
NodeAllocation: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{alloc},
node.ID: {alloc},
},
}
@ -452,7 +452,7 @@ func TestPlanApply_EvalNodePlan_NodeDrain(t *testing.T) {
alloc := mock.Alloc()
plan := &structs.Plan{
NodeAllocation: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{alloc},
node.ID: {alloc},
},
}
@ -477,7 +477,7 @@ func TestPlanApply_EvalNodePlan_NodeNotExist(t *testing.T) {
alloc := mock.Alloc()
plan := &structs.Plan{
NodeAllocation: map[string][]*structs.Allocation{
nodeID: []*structs.Allocation{alloc},
nodeID: {alloc},
},
}
@ -512,7 +512,7 @@ func TestPlanApply_EvalNodePlan_NodeFull(t *testing.T) {
snap, _ := state.Snapshot()
plan := &structs.Plan{
NodeAllocation: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{alloc2},
node.ID: {alloc2},
},
}
@ -542,7 +542,7 @@ func TestPlanApply_EvalNodePlan_UpdateExisting(t *testing.T) {
plan := &structs.Plan{
NodeAllocation: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{alloc},
node.ID: {alloc},
},
}
@ -576,10 +576,10 @@ func TestPlanApply_EvalNodePlan_NodeFull_Evict(t *testing.T) {
alloc2 := mock.Alloc()
plan := &structs.Plan{
NodeUpdate: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{allocEvict},
node.ID: {allocEvict},
},
NodeAllocation: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{alloc2},
node.ID: {alloc2},
},
}
@ -611,7 +611,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_AllocEvict(t *testing.T) {
alloc2 := mock.Alloc()
plan := &structs.Plan{
NodeAllocation: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{alloc2},
node.ID: {alloc2},
},
}
@ -645,7 +645,7 @@ func TestPlanApply_EvalNodePlan_NodeDown_EvictOnly(t *testing.T) {
allocEvict.DesiredStatus = structs.AllocDesiredStatusEvict
plan := &structs.Plan{
NodeUpdate: map[string][]*structs.Allocation{
node.ID: []*structs.Allocation{allocEvict},
node.ID: {allocEvict},
},
}

View file

@ -153,7 +153,7 @@ func (s *Server) maybeBootstrap() {
for attempt := uint(0); attempt < maxPeerRetries; attempt++ {
if err := s.connPool.RPC(s.config.Region, server.Addr, server.MajorVersion,
"Status.Peers", req, &peers); err != nil {
nextRetry := time.Duration((1 << attempt) * peerRetryBase)
nextRetry := (1 << attempt) * peerRetryBase
s.logger.Printf("[ERR] consul: Failed to confirm peer status for %s: %v. Retrying in "+
"%v...", server.Name, err, nextRetry.String())
time.Sleep(nextRetry)

View file

@ -924,7 +924,7 @@ func (s *Server) setupRaft() error {
// we add support for node IDs.
configuration := raft.Configuration{
Servers: []raft.Server{
raft.Server{
{
ID: raft.ServerID(trans.LocalAddr()),
Address: trans.LocalAddr(),
},
@ -1069,7 +1069,7 @@ func (s *Server) Regions() []string {
defer s.peerLock.RUnlock()
regions := make([]string, 0, len(s.peers))
for region, _ := range s.peers {
for region := range s.peers {
regions = append(regions, region)
}
sort.Strings(regions)
@ -1131,7 +1131,7 @@ func (s *Server) Stats() map[string]map[string]string {
return strconv.FormatUint(v, 10)
}
stats := map[string]map[string]string{
"nomad": map[string]string{
"nomad": {
"server": "true",
"leader": fmt.Sprintf("%v", s.IsLeader()),
"leader_addr": string(s.raft.Leader()),

View file

@ -18,7 +18,7 @@ type NotifyGroup struct {
func (n *NotifyGroup) Notify() {
n.l.Lock()
defer n.l.Unlock()
for ch, _ := range n.notify {
for ch := range n.notify {
select {
case ch <- struct{}{}:
default:

View file

@ -69,7 +69,7 @@ func indexTableSchema() *memdb.TableSchema {
return &memdb.TableSchema{
Name: "index",
Indexes: map[string]*memdb.IndexSchema{
"id": &memdb.IndexSchema{
"id": {
Name: "id",
AllowMissing: false,
Unique: true,
@ -91,7 +91,7 @@ func nodeTableSchema() *memdb.TableSchema {
// Primary index is used for node management
// and simple direct lookup. ID is required to be
// unique.
"id": &memdb.IndexSchema{
"id": {
Name: "id",
AllowMissing: false,
Unique: true,
@ -112,7 +112,7 @@ func jobTableSchema() *memdb.TableSchema {
// Primary index is used for job management
// and simple direct lookup. ID is required to be
// unique within a namespace.
"id": &memdb.IndexSchema{
"id": {
Name: "id",
AllowMissing: false,
Unique: true,
@ -131,7 +131,7 @@ func jobTableSchema() *memdb.TableSchema {
},
},
},
"type": &memdb.IndexSchema{
"type": {
Name: "type",
AllowMissing: false,
Unique: false,
@ -140,7 +140,7 @@ func jobTableSchema() *memdb.TableSchema {
Lowercase: false,
},
},
"gc": &memdb.IndexSchema{
"gc": {
Name: "gc",
AllowMissing: false,
Unique: false,
@ -148,7 +148,7 @@ func jobTableSchema() *memdb.TableSchema {
Conditional: jobIsGCable,
},
},
"periodic": &memdb.IndexSchema{
"periodic": {
Name: "periodic",
AllowMissing: false,
Unique: false,
@ -165,7 +165,7 @@ func jobSummarySchema() *memdb.TableSchema {
return &memdb.TableSchema{
Name: "job_summary",
Indexes: map[string]*memdb.IndexSchema{
"id": &memdb.IndexSchema{
"id": {
Name: "id",
AllowMissing: false,
Unique: true,
@ -194,7 +194,7 @@ func jobVersionSchema() *memdb.TableSchema {
return &memdb.TableSchema{
Name: "job_version",
Indexes: map[string]*memdb.IndexSchema{
"id": &memdb.IndexSchema{
"id": {
Name: "id",
AllowMissing: false,
Unique: true,
@ -277,7 +277,7 @@ func deploymentSchema() *memdb.TableSchema {
return &memdb.TableSchema{
Name: "deployment",
Indexes: map[string]*memdb.IndexSchema{
"id": &memdb.IndexSchema{
"id": {
Name: "id",
AllowMissing: false,
Unique: true,
@ -286,7 +286,7 @@ func deploymentSchema() *memdb.TableSchema {
},
},
"namespace": &memdb.IndexSchema{
"namespace": {
Name: "namespace",
AllowMissing: false,
Unique: false,
@ -296,7 +296,7 @@ func deploymentSchema() *memdb.TableSchema {
},
// Job index is used to lookup deployments by job
"job": &memdb.IndexSchema{
"job": {
Name: "job",
AllowMissing: false,
Unique: false,
@ -328,7 +328,7 @@ func periodicLaunchTableSchema() *memdb.TableSchema {
// Primary index is used for job management
// and simple direct lookup. ID is required to be
// unique.
"id": &memdb.IndexSchema{
"id": {
Name: "id",
AllowMissing: false,
Unique: true,
@ -359,7 +359,7 @@ func evalTableSchema() *memdb.TableSchema {
Name: "evals",
Indexes: map[string]*memdb.IndexSchema{
// Primary index is used for direct lookup.
"id": &memdb.IndexSchema{
"id": {
Name: "id",
AllowMissing: false,
Unique: true,
@ -368,7 +368,7 @@ func evalTableSchema() *memdb.TableSchema {
},
},
"namespace": &memdb.IndexSchema{
"namespace": {
Name: "namespace",
AllowMissing: false,
Unique: false,
@ -378,7 +378,7 @@ func evalTableSchema() *memdb.TableSchema {
},
// Job index is used to lookup allocations by job
"job": &memdb.IndexSchema{
"job": {
Name: "job",
AllowMissing: false,
Unique: false,
@ -412,7 +412,7 @@ func allocTableSchema() *memdb.TableSchema {
Name: "allocs",
Indexes: map[string]*memdb.IndexSchema{
// Primary index is a UUID
"id": &memdb.IndexSchema{
"id": {
Name: "id",
AllowMissing: false,
Unique: true,
@ -421,7 +421,7 @@ func allocTableSchema() *memdb.TableSchema {
},
},
"namespace": &memdb.IndexSchema{
"namespace": {
Name: "namespace",
AllowMissing: false,
Unique: false,
@ -431,7 +431,7 @@ func allocTableSchema() *memdb.TableSchema {
},
// Node index is used to lookup allocations by node
"node": &memdb.IndexSchema{
"node": {
Name: "node",
AllowMissing: true, // Missing is allow for failed allocations
Unique: false,
@ -460,7 +460,7 @@ func allocTableSchema() *memdb.TableSchema {
},
// Job index is used to lookup allocations by job
"job": &memdb.IndexSchema{
"job": {
Name: "job",
AllowMissing: false,
Unique: false,
@ -479,7 +479,7 @@ func allocTableSchema() *memdb.TableSchema {
},
// Eval index is used to lookup allocations by eval
"eval": &memdb.IndexSchema{
"eval": {
Name: "eval",
AllowMissing: false,
Unique: false,
@ -489,7 +489,7 @@ func allocTableSchema() *memdb.TableSchema {
},
// Deployment index is used to lookup allocations by deployment
"deployment": &memdb.IndexSchema{
"deployment": {
Name: "deployment",
AllowMissing: true,
Unique: false,
@ -509,7 +509,7 @@ func vaultAccessorTableSchema() *memdb.TableSchema {
Name: "vault_accessors",
Indexes: map[string]*memdb.IndexSchema{
// The primary index is the accessor id
"id": &memdb.IndexSchema{
"id": {
Name: "id",
AllowMissing: false,
Unique: true,
@ -518,7 +518,7 @@ func vaultAccessorTableSchema() *memdb.TableSchema {
},
},
"alloc_id": &memdb.IndexSchema{
"alloc_id": {
Name: "alloc_id",
AllowMissing: false,
Unique: false,
@ -527,7 +527,7 @@ func vaultAccessorTableSchema() *memdb.TableSchema {
},
},
"node_id": &memdb.IndexSchema{
"node_id": {
Name: "node_id",
AllowMissing: false,
Unique: false,
@ -545,7 +545,7 @@ func aclPolicyTableSchema() *memdb.TableSchema {
return &memdb.TableSchema{
Name: "acl_policy",
Indexes: map[string]*memdb.IndexSchema{
"id": &memdb.IndexSchema{
"id": {
Name: "id",
AllowMissing: false,
Unique: true,
@ -563,7 +563,7 @@ func aclTokenTableSchema() *memdb.TableSchema {
return &memdb.TableSchema{
Name: "acl_token",
Indexes: map[string]*memdb.IndexSchema{
"id": &memdb.IndexSchema{
"id": {
Name: "id",
AllowMissing: false,
Unique: true,
@ -571,7 +571,7 @@ func aclTokenTableSchema() *memdb.TableSchema {
Field: "AccessorID",
},
},
"secret": &memdb.IndexSchema{
"secret": {
Name: "secret",
AllowMissing: false,
Unique: true,
@ -579,7 +579,7 @@ func aclTokenTableSchema() *memdb.TableSchema {
Field: "SecretID",
},
},
"global": &memdb.IndexSchema{
"global": {
Name: "global",
AllowMissing: false,
Unique: false,

View file

@ -673,7 +673,7 @@ func (s *StateStore) upsertJobImpl(index uint64, job *structs.Job, keepVersion b
if exists, err := s.namespaceExists(txn, job.Namespace); err != nil {
return err
} else if !exists {
return fmt.Errorf("job %q is in non-existant namespace %q", job.ID, job.Namespace)
return fmt.Errorf("job %q is in non-existent namespace %q", job.ID, job.Namespace)
}
// Check if the job already exists

Some files were not shown because too many files have changed in this diff Show more