Fix make check errors

This commit is contained in:
Alex Dadgar 2018-09-04 16:03:52 -07:00
parent 2c2a8322b5
commit c6576ddac1
9 changed files with 39 additions and 32 deletions

View File

@ -59,7 +59,7 @@ func (u *AutopilotConfiguration) MarshalJSON() ([]byte, error) {
}{
LastContactThreshold: u.LastContactThreshold.String(),
ServerStabilizationTime: u.ServerStabilizationTime.String(),
Alias: (*Alias)(u),
Alias: (*Alias)(u),
})
}

View File

@ -2472,10 +2472,10 @@ func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) {
Name: "nc-demo",
Driver: "docker",
Config: map[string]interface{}{
"image": "busybox",
"load": "busybox.tar",
"command": "/bin/nc",
"args": []string{"-l", "127.0.0.1", "-p", "0"},
"image": "busybox",
"load": "busybox.tar",
"command": "/bin/nc",
"args": []string{"-l", "127.0.0.1", "-p", "0"},
"advertise_ipv6_address": expectedAdvertise,
},
Resources: &structs.Resources{

View File

@ -264,11 +264,11 @@ func TestConfig_Parse(t *testing.T) {
SyslogFacility: "",
DisableUpdateCheck: nil,
DisableAnonymousSignature: false,
Consul: nil,
Vault: nil,
TLSConfig: nil,
HTTPAPIResponseHeaders: nil,
Sentinel: nil,
Consul: nil,
Vault: nil,
TLSConfig: nil,
HTTPAPIResponseHeaders: nil,
Sentinel: nil,
},
false,
},

View File

@ -310,8 +310,8 @@ func TestConfig_OutgoingTLS_PreferServerCipherSuites(t *testing.T) {
}
{
conf := &Config{
VerifyOutgoing: true,
CAFile: cacert,
VerifyOutgoing: true,
CAFile: cacert,
PreferServerCipherSuites: true,
}
tlsConfig, err := conf.OutgoingTLSConfig()

View File

@ -251,7 +251,7 @@ func (w *deploymentWatcher) PromoteDeployment(
// Create the request
areq := &structs.ApplyDeploymentPromoteRequest{
DeploymentPromoteRequest: *req,
Eval: w.getEval(),
Eval: w.getEval(),
}
index, err := w.upsertDeploymentPromotion(areq)

View File

@ -1566,13 +1566,13 @@ func (n *Node) Stub() *NodeListStub {
addr, _, _ := net.SplitHostPort(n.HTTPAddr)
return &NodeListStub{
Address: addr,
ID: n.ID,
Datacenter: n.Datacenter,
Name: n.Name,
NodeClass: n.NodeClass,
Version: n.Attributes["nomad.version"],
Drain: n.Drain,
Address: addr,
ID: n.ID,
Datacenter: n.Datacenter,
Name: n.Name,
NodeClass: n.NodeClass,
Version: n.Attributes["nomad.version"],
Drain: n.Drain,
SchedulingEligibility: n.SchedulingEligibility,
Status: n.Status,
StatusDescription: n.StatusDescription,

View File

@ -2745,7 +2745,7 @@ func TestAllocation_LastEventTime(t *testing.T) {
testCases := []testCase{
{
desc: "nil task state",
desc: "nil task state",
expectedLastEventTime: t1,
},
{
@ -2813,7 +2813,7 @@ func TestAllocation_NextDelay(t *testing.T) {
DelayFunction: "constant",
Delay: 5 * time.Second,
},
alloc: &Allocation{},
alloc: &Allocation{},
expectedRescheduleTime: time.Time{},
expectedRescheduleEligible: false,
},
@ -2824,7 +2824,7 @@ func TestAllocation_NextDelay(t *testing.T) {
Delay: 5 * time.Second,
Unlimited: true,
},
alloc: &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()},
alloc: &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()},
expectedRescheduleTime: now.UTC().Add(5 * time.Second),
expectedRescheduleEligible: true,
},

View File

@ -264,10 +264,10 @@ func (c *Device) startRepl() error {
for {
in, err := c.Ui.Ask("> ")
if err != nil {
if fingerprintCancel != nil {
if fingerprintCancel != nil {
fingerprintCancel()
}
if statsCancel != nil {
if statsCancel != nil {
statsCancel()
}
return err
@ -275,10 +275,10 @@ func (c *Device) startRepl() error {
switch {
case in == "exit()":
if fingerprintCancel != nil {
if fingerprintCancel != nil {
fingerprintCancel()
}
if statsCancel != nil {
if statsCancel != nil {
statsCancel()
}
return nil

View File

@ -4189,11 +4189,18 @@ func Test_updateRescheduleTracker(t *testing.T) {
testCases := []testCase{
{
desc: "No past events",
prevAllocEvents: nil,
reschedPolicy: &structs.ReschedulePolicy{Unlimited: false, Interval: 24 * time.Hour, Attempts: 2, Delay: 5 * time.Second},
reschedTime: t1,
expectedRescheduleEvents: []*structs.RescheduleEvent{{t1.UnixNano(), prevAlloc.ID, prevAlloc.NodeID, 5 * time.Second}},
desc: "No past events",
prevAllocEvents: nil,
reschedPolicy: &structs.ReschedulePolicy{Unlimited: false, Interval: 24 * time.Hour, Attempts: 2, Delay: 5 * time.Second},
reschedTime: t1,
expectedRescheduleEvents: []*structs.RescheduleEvent{
{
RescheduleTime: t1.UnixNano(),
PrevAllocID: prevAlloc.ID,
PrevNodeID: prevAlloc.NodeID,
Delay: 5 * time.Second,
},
},
},
{
desc: "one past event, linear delay",