Merge pull request #5213 from hashicorp/b-api-separate

Slimmer /api package
This commit is contained in:
Mahmood Ali 2019-01-18 20:52:53 -05:00 committed by GitHub
commit 05e32fb525
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
19 changed files with 850 additions and 751 deletions

View file

@ -12,6 +12,20 @@ var (
NodeDownErr = fmt.Errorf("node down")
)
const (
AllocDesiredStatusRun = "run" // Allocation should run
AllocDesiredStatusStop = "stop" // Allocation should stop
AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted
)
const (
AllocClientStatusPending = "pending"
AllocClientStatusRunning = "running"
AllocClientStatusComplete = "complete"
AllocClientStatusFailed = "failed"
AllocClientStatusLost = "lost"
)
// Allocations is used to query the alloc-related endpoints.
type Allocations struct {
client *Client

View file

@ -7,7 +7,6 @@ import (
"time"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/stretchr/testify/require"
)
@ -35,9 +34,9 @@ func TestAllocations_List(t *testing.T) {
return
//job := &Job{
//ID: helper.StringToPtr("job1"),
//Name: helper.StringToPtr("Job #1"),
//Type: helper.StringToPtr(JobTypeService),
//ID: stringToPtr("job1"),
//Name: stringToPtr("Job #1"),
//Type: stringToPtr(JobTypeService),
//}
//eval, _, err := c.Jobs().Register(job, nil)
//if err != nil {
@ -82,9 +81,9 @@ func TestAllocations_PrefixList(t *testing.T) {
return
//job := &Job{
//ID: helper.StringToPtr("job1"),
//Name: helper.StringToPtr("Job #1"),
//Type: helper.StringToPtr(JobTypeService),
//ID: stringToPtr("job1"),
//Name: stringToPtr("Job #1"),
//Type: stringToPtr(JobTypeService),
//}
//eval, _, err := c.Jobs().Register(job, nil)
@ -130,13 +129,13 @@ func TestAllocations_RescheduleInfo(t *testing.T) {
t.Parallel()
// Create a job, task group and alloc
job := &Job{
Name: helper.StringToPtr("foo"),
Namespace: helper.StringToPtr(DefaultNamespace),
ID: helper.StringToPtr("bar"),
ParentID: helper.StringToPtr("lol"),
Name: stringToPtr("foo"),
Namespace: stringToPtr(DefaultNamespace),
ID: stringToPtr("bar"),
ParentID: stringToPtr("lol"),
TaskGroups: []*TaskGroup{
{
Name: helper.StringToPtr("bar"),
Name: stringToPtr("bar"),
Tasks: []*Task{
{
Name: "task1",
@ -176,8 +175,8 @@ func TestAllocations_RescheduleInfo(t *testing.T) {
{
desc: "no reschedule events",
reschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(3),
Interval: helper.TimeToPtr(15 * time.Minute),
Attempts: intToPtr(3),
Interval: timeToPtr(15 * time.Minute),
},
expAttempted: 0,
expTotal: 3,
@ -185,8 +184,8 @@ func TestAllocations_RescheduleInfo(t *testing.T) {
{
desc: "all reschedule events within interval",
reschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(3),
Interval: helper.TimeToPtr(15 * time.Minute),
Attempts: intToPtr(3),
Interval: timeToPtr(15 * time.Minute),
},
time: time.Now(),
rescheduleTracker: &RescheduleTracker{
@ -202,8 +201,8 @@ func TestAllocations_RescheduleInfo(t *testing.T) {
{
desc: "some reschedule events outside interval",
reschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(3),
Interval: helper.TimeToPtr(15 * time.Minute),
Attempts: intToPtr(3),
Interval: timeToPtr(15 * time.Minute),
},
time: time.Now(),
rescheduleTracker: &RescheduleTracker{
@ -242,7 +241,7 @@ func TestAllocations_RescheduleInfo(t *testing.T) {
func TestAllocations_ShouldMigrate(t *testing.T) {
t.Parallel()
require.True(t, DesiredTransition{Migrate: helper.BoolToPtr(true)}.ShouldMigrate())
require.True(t, DesiredTransition{Migrate: boolToPtr(true)}.ShouldMigrate())
require.False(t, DesiredTransition{}.ShouldMigrate())
require.False(t, DesiredTransition{Migrate: helper.BoolToPtr(false)}.ShouldMigrate())
require.False(t, DesiredTransition{Migrate: boolToPtr(false)}.ShouldMigrate())
}

View file

@ -3,8 +3,6 @@ package api
import (
"reflect"
"testing"
"github.com/hashicorp/nomad/helper"
)
func TestCompose(t *testing.T) {
@ -15,13 +13,13 @@ func TestCompose(t *testing.T) {
SetMeta("foo", "bar").
Constrain(NewConstraint("kernel.name", "=", "linux")).
Require(&Resources{
CPU: helper.IntToPtr(1250),
MemoryMB: helper.IntToPtr(1024),
DiskMB: helper.IntToPtr(2048),
CPU: intToPtr(1250),
MemoryMB: intToPtr(1024),
DiskMB: intToPtr(2048),
Networks: []*NetworkResource{
{
CIDR: "0.0.0.0/0",
MBits: helper.IntToPtr(100),
MBits: intToPtr(100),
ReservedPorts: []Port{{"", 80}, {"", 443}},
},
},
@ -47,11 +45,11 @@ func TestCompose(t *testing.T) {
// Check that the composed result looks correct
expect := &Job{
Region: helper.StringToPtr("region1"),
ID: helper.StringToPtr("job1"),
Name: helper.StringToPtr("myjob"),
Type: helper.StringToPtr(JobTypeService),
Priority: helper.IntToPtr(2),
Region: stringToPtr("region1"),
ID: stringToPtr("job1"),
Name: stringToPtr("myjob"),
Type: stringToPtr(JobTypeService),
Priority: intToPtr(2),
Datacenters: []string{
"dc1",
},
@ -67,8 +65,8 @@ func TestCompose(t *testing.T) {
},
TaskGroups: []*TaskGroup{
{
Name: helper.StringToPtr("grp1"),
Count: helper.IntToPtr(2),
Name: stringToPtr("grp1"),
Count: intToPtr(2),
Constraints: []*Constraint{
{
LTarget: "kernel.name",
@ -87,7 +85,7 @@ func TestCompose(t *testing.T) {
Spreads: []*Spread{
{
Attribute: "${node.datacenter}",
Weight: helper.IntToPtr(30),
Weight: intToPtr(30),
SpreadTarget: []*SpreadTarget{
{
Value: "dc1",
@ -105,13 +103,13 @@ func TestCompose(t *testing.T) {
Name: "task1",
Driver: "exec",
Resources: &Resources{
CPU: helper.IntToPtr(1250),
MemoryMB: helper.IntToPtr(1024),
DiskMB: helper.IntToPtr(2048),
CPU: intToPtr(1250),
MemoryMB: intToPtr(1024),
DiskMB: intToPtr(2048),
Networks: []*NetworkResource{
{
CIDR: "0.0.0.0/0",
MBits: helper.IntToPtr(100),
MBits: intToPtr(100),
ReservedPorts: []Port{
{"", 80},
{"", 443},

View file

@ -10,7 +10,6 @@ import (
"time"
units "github.com/docker/go-units"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/testutil"
"github.com/kr/pretty"
"github.com/stretchr/testify/assert"
@ -62,13 +61,13 @@ func TestFS_Logs(t *testing.T) {
}
job := &Job{
ID: helper.StringToPtr("TestFS_Logs"),
Region: helper.StringToPtr("global"),
ID: stringToPtr("TestFS_Logs"),
Region: stringToPtr("global"),
Datacenters: []string{"dc1"},
Type: helper.StringToPtr("batch"),
Type: stringToPtr("batch"),
TaskGroups: []*TaskGroup{
{
Name: helper.StringToPtr("TestFS_LogsGroup"),
Name: stringToPtr("TestFS_LogsGroup"),
Tasks: []*Task{
{
Name: "logger",

View file

@ -8,8 +8,6 @@ import (
"time"
"github.com/gorhill/cronexpr"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
@ -19,6 +17,9 @@ const (
// JobTypeBatch indicates a short-lived process
JobTypeBatch = "batch"
// JobTypeSystem indicates a system process that should run on all clients
JobTypeSystem = "system"
// PeriodicSpecCron is used for a cron spec.
PeriodicSpecCron = "cron"
@ -373,14 +374,14 @@ type UpdateStrategy struct {
// jobs with the old policy or for populating field defaults.
func DefaultUpdateStrategy() *UpdateStrategy {
return &UpdateStrategy{
Stagger: helper.TimeToPtr(30 * time.Second),
MaxParallel: helper.IntToPtr(1),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
ProgressDeadline: helper.TimeToPtr(10 * time.Minute),
AutoRevert: helper.BoolToPtr(false),
Canary: helper.IntToPtr(0),
Stagger: timeToPtr(30 * time.Second),
MaxParallel: intToPtr(1),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(10 * time.Second),
HealthyDeadline: timeToPtr(5 * time.Minute),
ProgressDeadline: timeToPtr(10 * time.Minute),
AutoRevert: boolToPtr(false),
Canary: intToPtr(0),
}
}
@ -392,35 +393,35 @@ func (u *UpdateStrategy) Copy() *UpdateStrategy {
copy := new(UpdateStrategy)
if u.Stagger != nil {
copy.Stagger = helper.TimeToPtr(*u.Stagger)
copy.Stagger = timeToPtr(*u.Stagger)
}
if u.MaxParallel != nil {
copy.MaxParallel = helper.IntToPtr(*u.MaxParallel)
copy.MaxParallel = intToPtr(*u.MaxParallel)
}
if u.HealthCheck != nil {
copy.HealthCheck = helper.StringToPtr(*u.HealthCheck)
copy.HealthCheck = stringToPtr(*u.HealthCheck)
}
if u.MinHealthyTime != nil {
copy.MinHealthyTime = helper.TimeToPtr(*u.MinHealthyTime)
copy.MinHealthyTime = timeToPtr(*u.MinHealthyTime)
}
if u.HealthyDeadline != nil {
copy.HealthyDeadline = helper.TimeToPtr(*u.HealthyDeadline)
copy.HealthyDeadline = timeToPtr(*u.HealthyDeadline)
}
if u.ProgressDeadline != nil {
copy.ProgressDeadline = helper.TimeToPtr(*u.ProgressDeadline)
copy.ProgressDeadline = timeToPtr(*u.ProgressDeadline)
}
if u.AutoRevert != nil {
copy.AutoRevert = helper.BoolToPtr(*u.AutoRevert)
copy.AutoRevert = boolToPtr(*u.AutoRevert)
}
if u.Canary != nil {
copy.Canary = helper.IntToPtr(*u.Canary)
copy.Canary = intToPtr(*u.Canary)
}
return copy
@ -432,35 +433,35 @@ func (u *UpdateStrategy) Merge(o *UpdateStrategy) {
}
if o.Stagger != nil {
u.Stagger = helper.TimeToPtr(*o.Stagger)
u.Stagger = timeToPtr(*o.Stagger)
}
if o.MaxParallel != nil {
u.MaxParallel = helper.IntToPtr(*o.MaxParallel)
u.MaxParallel = intToPtr(*o.MaxParallel)
}
if o.HealthCheck != nil {
u.HealthCheck = helper.StringToPtr(*o.HealthCheck)
u.HealthCheck = stringToPtr(*o.HealthCheck)
}
if o.MinHealthyTime != nil {
u.MinHealthyTime = helper.TimeToPtr(*o.MinHealthyTime)
u.MinHealthyTime = timeToPtr(*o.MinHealthyTime)
}
if o.HealthyDeadline != nil {
u.HealthyDeadline = helper.TimeToPtr(*o.HealthyDeadline)
u.HealthyDeadline = timeToPtr(*o.HealthyDeadline)
}
if o.ProgressDeadline != nil {
u.ProgressDeadline = helper.TimeToPtr(*o.ProgressDeadline)
u.ProgressDeadline = timeToPtr(*o.ProgressDeadline)
}
if o.AutoRevert != nil {
u.AutoRevert = helper.BoolToPtr(*o.AutoRevert)
u.AutoRevert = boolToPtr(*o.AutoRevert)
}
if o.Canary != nil {
u.Canary = helper.IntToPtr(*o.Canary)
u.Canary = intToPtr(*o.Canary)
}
}
@ -552,19 +553,19 @@ type PeriodicConfig struct {
func (p *PeriodicConfig) Canonicalize() {
if p.Enabled == nil {
p.Enabled = helper.BoolToPtr(true)
p.Enabled = boolToPtr(true)
}
if p.Spec == nil {
p.Spec = helper.StringToPtr("")
p.Spec = stringToPtr("")
}
if p.SpecType == nil {
p.SpecType = helper.StringToPtr(PeriodicSpecCron)
p.SpecType = stringToPtr(PeriodicSpecCron)
}
if p.ProhibitOverlap == nil {
p.ProhibitOverlap = helper.BoolToPtr(false)
p.ProhibitOverlap = boolToPtr(false)
}
if p.TimeZone == nil || *p.TimeZone == "" {
p.TimeZone = helper.StringToPtr("UTC")
p.TimeZone = stringToPtr("UTC")
}
}
@ -575,13 +576,27 @@ func (p *PeriodicConfig) Canonicalize() {
func (p *PeriodicConfig) Next(fromTime time.Time) (time.Time, error) {
if *p.SpecType == PeriodicSpecCron {
if e, err := cronexpr.Parse(*p.Spec); err == nil {
return structs.CronParseNext(e, fromTime, *p.Spec)
return cronParseNext(e, fromTime, *p.Spec)
}
}
return time.Time{}, nil
}
// cronParseNext is a helper that parses the next time for the given expression
// but captures any panic that may occur in the underlying library.
// --- THIS FUNCTION IS REPLICATED IN nomad/structs/structs.go
// and should be kept in sync.
func cronParseNext(e *cronexpr.Expression, fromTime time.Time, spec string) (t time.Time, err error) {
defer func() {
if recover() != nil {
t = time.Time{}
err = fmt.Errorf("failed parsing cron expression: %q", spec)
}
}()
return e.Next(fromTime), nil
}
func (p *PeriodicConfig) GetLocation() (*time.Location, error) {
if p.TimeZone == nil || *p.TimeZone == "" {
return time.UTC, nil
@ -644,58 +659,58 @@ func (j *Job) IsParameterized() bool {
func (j *Job) Canonicalize() {
if j.ID == nil {
j.ID = helper.StringToPtr("")
j.ID = stringToPtr("")
}
if j.Name == nil {
j.Name = helper.StringToPtr(*j.ID)
j.Name = stringToPtr(*j.ID)
}
if j.ParentID == nil {
j.ParentID = helper.StringToPtr("")
j.ParentID = stringToPtr("")
}
if j.Namespace == nil {
j.Namespace = helper.StringToPtr(DefaultNamespace)
j.Namespace = stringToPtr(DefaultNamespace)
}
if j.Priority == nil {
j.Priority = helper.IntToPtr(50)
j.Priority = intToPtr(50)
}
if j.Stop == nil {
j.Stop = helper.BoolToPtr(false)
j.Stop = boolToPtr(false)
}
if j.Region == nil {
j.Region = helper.StringToPtr("global")
j.Region = stringToPtr("global")
}
if j.Namespace == nil {
j.Namespace = helper.StringToPtr("default")
j.Namespace = stringToPtr("default")
}
if j.Type == nil {
j.Type = helper.StringToPtr("service")
j.Type = stringToPtr("service")
}
if j.AllAtOnce == nil {
j.AllAtOnce = helper.BoolToPtr(false)
j.AllAtOnce = boolToPtr(false)
}
if j.VaultToken == nil {
j.VaultToken = helper.StringToPtr("")
j.VaultToken = stringToPtr("")
}
if j.Status == nil {
j.Status = helper.StringToPtr("")
j.Status = stringToPtr("")
}
if j.StatusDescription == nil {
j.StatusDescription = helper.StringToPtr("")
j.StatusDescription = stringToPtr("")
}
if j.Stable == nil {
j.Stable = helper.BoolToPtr(false)
j.Stable = boolToPtr(false)
}
if j.Version == nil {
j.Version = helper.Uint64ToPtr(0)
j.Version = uint64ToPtr(0)
}
if j.CreateIndex == nil {
j.CreateIndex = helper.Uint64ToPtr(0)
j.CreateIndex = uint64ToPtr(0)
}
if j.ModifyIndex == nil {
j.ModifyIndex = helper.Uint64ToPtr(0)
j.ModifyIndex = uint64ToPtr(0)
}
if j.JobModifyIndex == nil {
j.JobModifyIndex = helper.Uint64ToPtr(0)
j.JobModifyIndex = uint64ToPtr(0)
}
if j.Periodic != nil {
j.Periodic.Canonicalize()

View file

@ -7,7 +7,6 @@ import (
"testing"
"time"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/testutil"
"github.com/kr/pretty"
@ -131,50 +130,50 @@ func TestJobs_Canonicalize(t *testing.T) {
},
},
expected: &Job{
ID: helper.StringToPtr(""),
Name: helper.StringToPtr(""),
Region: helper.StringToPtr("global"),
Namespace: helper.StringToPtr(DefaultNamespace),
Type: helper.StringToPtr("service"),
ParentID: helper.StringToPtr(""),
Priority: helper.IntToPtr(50),
AllAtOnce: helper.BoolToPtr(false),
VaultToken: helper.StringToPtr(""),
Status: helper.StringToPtr(""),
StatusDescription: helper.StringToPtr(""),
Stop: helper.BoolToPtr(false),
Stable: helper.BoolToPtr(false),
Version: helper.Uint64ToPtr(0),
CreateIndex: helper.Uint64ToPtr(0),
ModifyIndex: helper.Uint64ToPtr(0),
JobModifyIndex: helper.Uint64ToPtr(0),
ID: stringToPtr(""),
Name: stringToPtr(""),
Region: stringToPtr("global"),
Namespace: stringToPtr(DefaultNamespace),
Type: stringToPtr("service"),
ParentID: stringToPtr(""),
Priority: intToPtr(50),
AllAtOnce: boolToPtr(false),
VaultToken: stringToPtr(""),
Status: stringToPtr(""),
StatusDescription: stringToPtr(""),
Stop: boolToPtr(false),
Stable: boolToPtr(false),
Version: uint64ToPtr(0),
CreateIndex: uint64ToPtr(0),
ModifyIndex: uint64ToPtr(0),
JobModifyIndex: uint64ToPtr(0),
TaskGroups: []*TaskGroup{
{
Name: helper.StringToPtr(""),
Count: helper.IntToPtr(1),
Name: stringToPtr(""),
Count: intToPtr(1),
EphemeralDisk: &EphemeralDisk{
Sticky: helper.BoolToPtr(false),
Migrate: helper.BoolToPtr(false),
SizeMB: helper.IntToPtr(300),
Sticky: boolToPtr(false),
Migrate: boolToPtr(false),
SizeMB: intToPtr(300),
},
RestartPolicy: &RestartPolicy{
Delay: helper.TimeToPtr(15 * time.Second),
Attempts: helper.IntToPtr(2),
Interval: helper.TimeToPtr(30 * time.Minute),
Mode: helper.StringToPtr("fail"),
Delay: timeToPtr(15 * time.Second),
Attempts: intToPtr(2),
Interval: timeToPtr(30 * time.Minute),
Mode: stringToPtr("fail"),
},
ReschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(0),
Interval: helper.TimeToPtr(0),
DelayFunction: helper.StringToPtr("exponential"),
Delay: helper.TimeToPtr(30 * time.Second),
MaxDelay: helper.TimeToPtr(1 * time.Hour),
Unlimited: helper.BoolToPtr(true),
Attempts: intToPtr(0),
Interval: timeToPtr(0),
DelayFunction: stringToPtr("exponential"),
Delay: timeToPtr(30 * time.Second),
MaxDelay: timeToPtr(1 * time.Hour),
Unlimited: boolToPtr(true),
},
Migrate: DefaultMigrateStrategy(),
Tasks: []*Task{
{
KillTimeout: helper.TimeToPtr(5 * time.Second),
KillTimeout: timeToPtr(5 * time.Second),
LogConfig: DefaultLogConfig(),
Resources: DefaultResources(),
},
@ -186,13 +185,13 @@ func TestJobs_Canonicalize(t *testing.T) {
{
name: "partial",
input: &Job{
Name: helper.StringToPtr("foo"),
Namespace: helper.StringToPtr("bar"),
ID: helper.StringToPtr("bar"),
ParentID: helper.StringToPtr("lol"),
Name: stringToPtr("foo"),
Namespace: stringToPtr("bar"),
ID: stringToPtr("bar"),
ParentID: stringToPtr("lol"),
TaskGroups: []*TaskGroup{
{
Name: helper.StringToPtr("bar"),
Name: stringToPtr("bar"),
Tasks: []*Task{
{
Name: "task1",
@ -202,45 +201,45 @@ func TestJobs_Canonicalize(t *testing.T) {
},
},
expected: &Job{
Namespace: helper.StringToPtr("bar"),
ID: helper.StringToPtr("bar"),
Name: helper.StringToPtr("foo"),
Region: helper.StringToPtr("global"),
Type: helper.StringToPtr("service"),
ParentID: helper.StringToPtr("lol"),
Priority: helper.IntToPtr(50),
AllAtOnce: helper.BoolToPtr(false),
VaultToken: helper.StringToPtr(""),
Stop: helper.BoolToPtr(false),
Stable: helper.BoolToPtr(false),
Version: helper.Uint64ToPtr(0),
Status: helper.StringToPtr(""),
StatusDescription: helper.StringToPtr(""),
CreateIndex: helper.Uint64ToPtr(0),
ModifyIndex: helper.Uint64ToPtr(0),
JobModifyIndex: helper.Uint64ToPtr(0),
Namespace: stringToPtr("bar"),
ID: stringToPtr("bar"),
Name: stringToPtr("foo"),
Region: stringToPtr("global"),
Type: stringToPtr("service"),
ParentID: stringToPtr("lol"),
Priority: intToPtr(50),
AllAtOnce: boolToPtr(false),
VaultToken: stringToPtr(""),
Stop: boolToPtr(false),
Stable: boolToPtr(false),
Version: uint64ToPtr(0),
Status: stringToPtr(""),
StatusDescription: stringToPtr(""),
CreateIndex: uint64ToPtr(0),
ModifyIndex: uint64ToPtr(0),
JobModifyIndex: uint64ToPtr(0),
TaskGroups: []*TaskGroup{
{
Name: helper.StringToPtr("bar"),
Count: helper.IntToPtr(1),
Name: stringToPtr("bar"),
Count: intToPtr(1),
EphemeralDisk: &EphemeralDisk{
Sticky: helper.BoolToPtr(false),
Migrate: helper.BoolToPtr(false),
SizeMB: helper.IntToPtr(300),
Sticky: boolToPtr(false),
Migrate: boolToPtr(false),
SizeMB: intToPtr(300),
},
RestartPolicy: &RestartPolicy{
Delay: helper.TimeToPtr(15 * time.Second),
Attempts: helper.IntToPtr(2),
Interval: helper.TimeToPtr(30 * time.Minute),
Mode: helper.StringToPtr("fail"),
Delay: timeToPtr(15 * time.Second),
Attempts: intToPtr(2),
Interval: timeToPtr(30 * time.Minute),
Mode: stringToPtr("fail"),
},
ReschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(0),
Interval: helper.TimeToPtr(0),
DelayFunction: helper.StringToPtr("exponential"),
Delay: helper.TimeToPtr(30 * time.Second),
MaxDelay: helper.TimeToPtr(1 * time.Hour),
Unlimited: helper.BoolToPtr(true),
Attempts: intToPtr(0),
Interval: timeToPtr(0),
DelayFunction: stringToPtr("exponential"),
Delay: timeToPtr(30 * time.Second),
MaxDelay: timeToPtr(1 * time.Hour),
Unlimited: boolToPtr(true),
},
Migrate: DefaultMigrateStrategy(),
Tasks: []*Task{
@ -248,7 +247,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Name: "task1",
LogConfig: DefaultLogConfig(),
Resources: DefaultResources(),
KillTimeout: helper.TimeToPtr(5 * time.Second),
KillTimeout: timeToPtr(5 * time.Second),
},
},
},
@ -258,25 +257,25 @@ func TestJobs_Canonicalize(t *testing.T) {
{
name: "example_template",
input: &Job{
ID: helper.StringToPtr("example_template"),
Name: helper.StringToPtr("example_template"),
ID: stringToPtr("example_template"),
Name: stringToPtr("example_template"),
Datacenters: []string{"dc1"},
Type: helper.StringToPtr("service"),
Type: stringToPtr("service"),
Update: &UpdateStrategy{
MaxParallel: helper.IntToPtr(1),
MaxParallel: intToPtr(1),
},
TaskGroups: []*TaskGroup{
{
Name: helper.StringToPtr("cache"),
Count: helper.IntToPtr(1),
Name: stringToPtr("cache"),
Count: intToPtr(1),
RestartPolicy: &RestartPolicy{
Interval: helper.TimeToPtr(5 * time.Minute),
Attempts: helper.IntToPtr(10),
Delay: helper.TimeToPtr(25 * time.Second),
Mode: helper.StringToPtr("delay"),
Interval: timeToPtr(5 * time.Minute),
Attempts: intToPtr(10),
Delay: timeToPtr(25 * time.Second),
Mode: stringToPtr("delay"),
},
EphemeralDisk: &EphemeralDisk{
SizeMB: helper.IntToPtr(300),
SizeMB: intToPtr(300),
},
Tasks: []*Task{
{
@ -289,11 +288,11 @@ func TestJobs_Canonicalize(t *testing.T) {
}},
},
Resources: &Resources{
CPU: helper.IntToPtr(500),
MemoryMB: helper.IntToPtr(256),
CPU: intToPtr(500),
MemoryMB: intToPtr(256),
Networks: []*NetworkResource{
{
MBits: helper.IntToPtr(10),
MBits: intToPtr(10),
DynamicPorts: []Port{
{
Label: "db",
@ -320,14 +319,14 @@ func TestJobs_Canonicalize(t *testing.T) {
},
Templates: []*Template{
{
EmbeddedTmpl: helper.StringToPtr("---"),
DestPath: helper.StringToPtr("local/file.yml"),
EmbeddedTmpl: stringToPtr("---"),
DestPath: stringToPtr("local/file.yml"),
},
{
EmbeddedTmpl: helper.StringToPtr("FOO=bar\n"),
DestPath: helper.StringToPtr("local/file.env"),
Envvars: helper.BoolToPtr(true),
VaultGrace: helper.TimeToPtr(3 * time.Second),
EmbeddedTmpl: stringToPtr("FOO=bar\n"),
DestPath: stringToPtr("local/file.env"),
Envvars: boolToPtr(true),
VaultGrace: timeToPtr(3 * time.Second),
},
},
},
@ -336,67 +335,67 @@ func TestJobs_Canonicalize(t *testing.T) {
},
},
expected: &Job{
Namespace: helper.StringToPtr(DefaultNamespace),
ID: helper.StringToPtr("example_template"),
Name: helper.StringToPtr("example_template"),
ParentID: helper.StringToPtr(""),
Priority: helper.IntToPtr(50),
Region: helper.StringToPtr("global"),
Type: helper.StringToPtr("service"),
AllAtOnce: helper.BoolToPtr(false),
VaultToken: helper.StringToPtr(""),
Stop: helper.BoolToPtr(false),
Stable: helper.BoolToPtr(false),
Version: helper.Uint64ToPtr(0),
Status: helper.StringToPtr(""),
StatusDescription: helper.StringToPtr(""),
CreateIndex: helper.Uint64ToPtr(0),
ModifyIndex: helper.Uint64ToPtr(0),
JobModifyIndex: helper.Uint64ToPtr(0),
Namespace: stringToPtr(DefaultNamespace),
ID: stringToPtr("example_template"),
Name: stringToPtr("example_template"),
ParentID: stringToPtr(""),
Priority: intToPtr(50),
Region: stringToPtr("global"),
Type: stringToPtr("service"),
AllAtOnce: boolToPtr(false),
VaultToken: stringToPtr(""),
Stop: boolToPtr(false),
Stable: boolToPtr(false),
Version: uint64ToPtr(0),
Status: stringToPtr(""),
StatusDescription: stringToPtr(""),
CreateIndex: uint64ToPtr(0),
ModifyIndex: uint64ToPtr(0),
JobModifyIndex: uint64ToPtr(0),
Datacenters: []string{"dc1"},
Update: &UpdateStrategy{
Stagger: helper.TimeToPtr(30 * time.Second),
MaxParallel: helper.IntToPtr(1),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
ProgressDeadline: helper.TimeToPtr(10 * time.Minute),
AutoRevert: helper.BoolToPtr(false),
Canary: helper.IntToPtr(0),
Stagger: timeToPtr(30 * time.Second),
MaxParallel: intToPtr(1),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(10 * time.Second),
HealthyDeadline: timeToPtr(5 * time.Minute),
ProgressDeadline: timeToPtr(10 * time.Minute),
AutoRevert: boolToPtr(false),
Canary: intToPtr(0),
},
TaskGroups: []*TaskGroup{
{
Name: helper.StringToPtr("cache"),
Count: helper.IntToPtr(1),
Name: stringToPtr("cache"),
Count: intToPtr(1),
RestartPolicy: &RestartPolicy{
Interval: helper.TimeToPtr(5 * time.Minute),
Attempts: helper.IntToPtr(10),
Delay: helper.TimeToPtr(25 * time.Second),
Mode: helper.StringToPtr("delay"),
Interval: timeToPtr(5 * time.Minute),
Attempts: intToPtr(10),
Delay: timeToPtr(25 * time.Second),
Mode: stringToPtr("delay"),
},
ReschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(0),
Interval: helper.TimeToPtr(0),
DelayFunction: helper.StringToPtr("exponential"),
Delay: helper.TimeToPtr(30 * time.Second),
MaxDelay: helper.TimeToPtr(1 * time.Hour),
Unlimited: helper.BoolToPtr(true),
Attempts: intToPtr(0),
Interval: timeToPtr(0),
DelayFunction: stringToPtr("exponential"),
Delay: timeToPtr(30 * time.Second),
MaxDelay: timeToPtr(1 * time.Hour),
Unlimited: boolToPtr(true),
},
EphemeralDisk: &EphemeralDisk{
Sticky: helper.BoolToPtr(false),
Migrate: helper.BoolToPtr(false),
SizeMB: helper.IntToPtr(300),
Sticky: boolToPtr(false),
Migrate: boolToPtr(false),
SizeMB: intToPtr(300),
},
Update: &UpdateStrategy{
Stagger: helper.TimeToPtr(30 * time.Second),
MaxParallel: helper.IntToPtr(1),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
ProgressDeadline: helper.TimeToPtr(10 * time.Minute),
AutoRevert: helper.BoolToPtr(false),
Canary: helper.IntToPtr(0),
Stagger: timeToPtr(30 * time.Second),
MaxParallel: intToPtr(1),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(10 * time.Second),
HealthyDeadline: timeToPtr(5 * time.Minute),
ProgressDeadline: timeToPtr(10 * time.Minute),
AutoRevert: boolToPtr(false),
Canary: intToPtr(0),
},
Migrate: DefaultMigrateStrategy(),
Tasks: []*Task{
@ -410,11 +409,11 @@ func TestJobs_Canonicalize(t *testing.T) {
}},
},
Resources: &Resources{
CPU: helper.IntToPtr(500),
MemoryMB: helper.IntToPtr(256),
CPU: intToPtr(500),
MemoryMB: intToPtr(256),
Networks: []*NetworkResource{
{
MBits: helper.IntToPtr(10),
MBits: intToPtr(10),
DynamicPorts: []Port{
{
Label: "db",
@ -440,34 +439,34 @@ func TestJobs_Canonicalize(t *testing.T) {
},
},
},
KillTimeout: helper.TimeToPtr(5 * time.Second),
KillTimeout: timeToPtr(5 * time.Second),
LogConfig: DefaultLogConfig(),
Templates: []*Template{
{
SourcePath: helper.StringToPtr(""),
DestPath: helper.StringToPtr("local/file.yml"),
EmbeddedTmpl: helper.StringToPtr("---"),
ChangeMode: helper.StringToPtr("restart"),
ChangeSignal: helper.StringToPtr(""),
Splay: helper.TimeToPtr(5 * time.Second),
Perms: helper.StringToPtr("0644"),
LeftDelim: helper.StringToPtr("{{"),
RightDelim: helper.StringToPtr("}}"),
Envvars: helper.BoolToPtr(false),
VaultGrace: helper.TimeToPtr(15 * time.Second),
SourcePath: stringToPtr(""),
DestPath: stringToPtr("local/file.yml"),
EmbeddedTmpl: stringToPtr("---"),
ChangeMode: stringToPtr("restart"),
ChangeSignal: stringToPtr(""),
Splay: timeToPtr(5 * time.Second),
Perms: stringToPtr("0644"),
LeftDelim: stringToPtr("{{"),
RightDelim: stringToPtr("}}"),
Envvars: boolToPtr(false),
VaultGrace: timeToPtr(15 * time.Second),
},
{
SourcePath: helper.StringToPtr(""),
DestPath: helper.StringToPtr("local/file.env"),
EmbeddedTmpl: helper.StringToPtr("FOO=bar\n"),
ChangeMode: helper.StringToPtr("restart"),
ChangeSignal: helper.StringToPtr(""),
Splay: helper.TimeToPtr(5 * time.Second),
Perms: helper.StringToPtr("0644"),
LeftDelim: helper.StringToPtr("{{"),
RightDelim: helper.StringToPtr("}}"),
Envvars: helper.BoolToPtr(true),
VaultGrace: helper.TimeToPtr(3 * time.Second),
SourcePath: stringToPtr(""),
DestPath: stringToPtr("local/file.env"),
EmbeddedTmpl: stringToPtr("FOO=bar\n"),
ChangeMode: stringToPtr("restart"),
ChangeSignal: stringToPtr(""),
Splay: timeToPtr(5 * time.Second),
Perms: stringToPtr("0644"),
LeftDelim: stringToPtr("{{"),
RightDelim: stringToPtr("}}"),
Envvars: boolToPtr(true),
VaultGrace: timeToPtr(3 * time.Second),
},
},
},
@ -480,33 +479,33 @@ func TestJobs_Canonicalize(t *testing.T) {
{
name: "periodic",
input: &Job{
ID: helper.StringToPtr("bar"),
ID: stringToPtr("bar"),
Periodic: &PeriodicConfig{},
},
expected: &Job{
Namespace: helper.StringToPtr(DefaultNamespace),
ID: helper.StringToPtr("bar"),
ParentID: helper.StringToPtr(""),
Name: helper.StringToPtr("bar"),
Region: helper.StringToPtr("global"),
Type: helper.StringToPtr("service"),
Priority: helper.IntToPtr(50),
AllAtOnce: helper.BoolToPtr(false),
VaultToken: helper.StringToPtr(""),
Stop: helper.BoolToPtr(false),
Stable: helper.BoolToPtr(false),
Version: helper.Uint64ToPtr(0),
Status: helper.StringToPtr(""),
StatusDescription: helper.StringToPtr(""),
CreateIndex: helper.Uint64ToPtr(0),
ModifyIndex: helper.Uint64ToPtr(0),
JobModifyIndex: helper.Uint64ToPtr(0),
Namespace: stringToPtr(DefaultNamespace),
ID: stringToPtr("bar"),
ParentID: stringToPtr(""),
Name: stringToPtr("bar"),
Region: stringToPtr("global"),
Type: stringToPtr("service"),
Priority: intToPtr(50),
AllAtOnce: boolToPtr(false),
VaultToken: stringToPtr(""),
Stop: boolToPtr(false),
Stable: boolToPtr(false),
Version: uint64ToPtr(0),
Status: stringToPtr(""),
StatusDescription: stringToPtr(""),
CreateIndex: uint64ToPtr(0),
ModifyIndex: uint64ToPtr(0),
JobModifyIndex: uint64ToPtr(0),
Periodic: &PeriodicConfig{
Enabled: helper.BoolToPtr(true),
Spec: helper.StringToPtr(""),
SpecType: helper.StringToPtr(PeriodicSpecCron),
ProhibitOverlap: helper.BoolToPtr(false),
TimeZone: helper.StringToPtr("UTC"),
Enabled: boolToPtr(true),
Spec: stringToPtr(""),
SpecType: stringToPtr(PeriodicSpecCron),
ProhibitOverlap: boolToPtr(false),
TimeZone: stringToPtr("UTC"),
},
},
},
@ -514,29 +513,29 @@ func TestJobs_Canonicalize(t *testing.T) {
{
name: "update_merge",
input: &Job{
Name: helper.StringToPtr("foo"),
ID: helper.StringToPtr("bar"),
ParentID: helper.StringToPtr("lol"),
Name: stringToPtr("foo"),
ID: stringToPtr("bar"),
ParentID: stringToPtr("lol"),
Update: &UpdateStrategy{
Stagger: helper.TimeToPtr(1 * time.Second),
MaxParallel: helper.IntToPtr(1),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
HealthyDeadline: helper.TimeToPtr(6 * time.Minute),
ProgressDeadline: helper.TimeToPtr(7 * time.Minute),
AutoRevert: helper.BoolToPtr(false),
Canary: helper.IntToPtr(0),
Stagger: timeToPtr(1 * time.Second),
MaxParallel: intToPtr(1),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(10 * time.Second),
HealthyDeadline: timeToPtr(6 * time.Minute),
ProgressDeadline: timeToPtr(7 * time.Minute),
AutoRevert: boolToPtr(false),
Canary: intToPtr(0),
},
TaskGroups: []*TaskGroup{
{
Name: helper.StringToPtr("bar"),
Name: stringToPtr("bar"),
Update: &UpdateStrategy{
Stagger: helper.TimeToPtr(2 * time.Second),
MaxParallel: helper.IntToPtr(2),
HealthCheck: helper.StringToPtr("manual"),
MinHealthyTime: helper.TimeToPtr(1 * time.Second),
AutoRevert: helper.BoolToPtr(true),
Canary: helper.IntToPtr(1),
Stagger: timeToPtr(2 * time.Second),
MaxParallel: intToPtr(2),
HealthCheck: stringToPtr("manual"),
MinHealthyTime: timeToPtr(1 * time.Second),
AutoRevert: boolToPtr(true),
Canary: intToPtr(1),
},
Tasks: []*Task{
{
@ -545,7 +544,7 @@ func TestJobs_Canonicalize(t *testing.T) {
},
},
{
Name: helper.StringToPtr("baz"),
Name: stringToPtr("baz"),
Tasks: []*Task{
{
Name: "task1",
@ -555,65 +554,65 @@ func TestJobs_Canonicalize(t *testing.T) {
},
},
expected: &Job{
Namespace: helper.StringToPtr(DefaultNamespace),
ID: helper.StringToPtr("bar"),
Name: helper.StringToPtr("foo"),
Region: helper.StringToPtr("global"),
Type: helper.StringToPtr("service"),
ParentID: helper.StringToPtr("lol"),
Priority: helper.IntToPtr(50),
AllAtOnce: helper.BoolToPtr(false),
VaultToken: helper.StringToPtr(""),
Stop: helper.BoolToPtr(false),
Stable: helper.BoolToPtr(false),
Version: helper.Uint64ToPtr(0),
Status: helper.StringToPtr(""),
StatusDescription: helper.StringToPtr(""),
CreateIndex: helper.Uint64ToPtr(0),
ModifyIndex: helper.Uint64ToPtr(0),
JobModifyIndex: helper.Uint64ToPtr(0),
Namespace: stringToPtr(DefaultNamespace),
ID: stringToPtr("bar"),
Name: stringToPtr("foo"),
Region: stringToPtr("global"),
Type: stringToPtr("service"),
ParentID: stringToPtr("lol"),
Priority: intToPtr(50),
AllAtOnce: boolToPtr(false),
VaultToken: stringToPtr(""),
Stop: boolToPtr(false),
Stable: boolToPtr(false),
Version: uint64ToPtr(0),
Status: stringToPtr(""),
StatusDescription: stringToPtr(""),
CreateIndex: uint64ToPtr(0),
ModifyIndex: uint64ToPtr(0),
JobModifyIndex: uint64ToPtr(0),
Update: &UpdateStrategy{
Stagger: helper.TimeToPtr(1 * time.Second),
MaxParallel: helper.IntToPtr(1),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
HealthyDeadline: helper.TimeToPtr(6 * time.Minute),
ProgressDeadline: helper.TimeToPtr(7 * time.Minute),
AutoRevert: helper.BoolToPtr(false),
Canary: helper.IntToPtr(0),
Stagger: timeToPtr(1 * time.Second),
MaxParallel: intToPtr(1),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(10 * time.Second),
HealthyDeadline: timeToPtr(6 * time.Minute),
ProgressDeadline: timeToPtr(7 * time.Minute),
AutoRevert: boolToPtr(false),
Canary: intToPtr(0),
},
TaskGroups: []*TaskGroup{
{
Name: helper.StringToPtr("bar"),
Count: helper.IntToPtr(1),
Name: stringToPtr("bar"),
Count: intToPtr(1),
EphemeralDisk: &EphemeralDisk{
Sticky: helper.BoolToPtr(false),
Migrate: helper.BoolToPtr(false),
SizeMB: helper.IntToPtr(300),
Sticky: boolToPtr(false),
Migrate: boolToPtr(false),
SizeMB: intToPtr(300),
},
RestartPolicy: &RestartPolicy{
Delay: helper.TimeToPtr(15 * time.Second),
Attempts: helper.IntToPtr(2),
Interval: helper.TimeToPtr(30 * time.Minute),
Mode: helper.StringToPtr("fail"),
Delay: timeToPtr(15 * time.Second),
Attempts: intToPtr(2),
Interval: timeToPtr(30 * time.Minute),
Mode: stringToPtr("fail"),
},
ReschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(0),
Interval: helper.TimeToPtr(0),
DelayFunction: helper.StringToPtr("exponential"),
Delay: helper.TimeToPtr(30 * time.Second),
MaxDelay: helper.TimeToPtr(1 * time.Hour),
Unlimited: helper.BoolToPtr(true),
Attempts: intToPtr(0),
Interval: timeToPtr(0),
DelayFunction: stringToPtr("exponential"),
Delay: timeToPtr(30 * time.Second),
MaxDelay: timeToPtr(1 * time.Hour),
Unlimited: boolToPtr(true),
},
Update: &UpdateStrategy{
Stagger: helper.TimeToPtr(2 * time.Second),
MaxParallel: helper.IntToPtr(2),
HealthCheck: helper.StringToPtr("manual"),
MinHealthyTime: helper.TimeToPtr(1 * time.Second),
HealthyDeadline: helper.TimeToPtr(6 * time.Minute),
ProgressDeadline: helper.TimeToPtr(7 * time.Minute),
AutoRevert: helper.BoolToPtr(true),
Canary: helper.IntToPtr(1),
Stagger: timeToPtr(2 * time.Second),
MaxParallel: intToPtr(2),
HealthCheck: stringToPtr("manual"),
MinHealthyTime: timeToPtr(1 * time.Second),
HealthyDeadline: timeToPtr(6 * time.Minute),
ProgressDeadline: timeToPtr(7 * time.Minute),
AutoRevert: boolToPtr(true),
Canary: intToPtr(1),
},
Migrate: DefaultMigrateStrategy(),
Tasks: []*Task{
@ -621,41 +620,41 @@ func TestJobs_Canonicalize(t *testing.T) {
Name: "task1",
LogConfig: DefaultLogConfig(),
Resources: DefaultResources(),
KillTimeout: helper.TimeToPtr(5 * time.Second),
KillTimeout: timeToPtr(5 * time.Second),
},
},
},
{
Name: helper.StringToPtr("baz"),
Count: helper.IntToPtr(1),
Name: stringToPtr("baz"),
Count: intToPtr(1),
EphemeralDisk: &EphemeralDisk{
Sticky: helper.BoolToPtr(false),
Migrate: helper.BoolToPtr(false),
SizeMB: helper.IntToPtr(300),
Sticky: boolToPtr(false),
Migrate: boolToPtr(false),
SizeMB: intToPtr(300),
},
RestartPolicy: &RestartPolicy{
Delay: helper.TimeToPtr(15 * time.Second),
Attempts: helper.IntToPtr(2),
Interval: helper.TimeToPtr(30 * time.Minute),
Mode: helper.StringToPtr("fail"),
Delay: timeToPtr(15 * time.Second),
Attempts: intToPtr(2),
Interval: timeToPtr(30 * time.Minute),
Mode: stringToPtr("fail"),
},
ReschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(0),
Interval: helper.TimeToPtr(0),
DelayFunction: helper.StringToPtr("exponential"),
Delay: helper.TimeToPtr(30 * time.Second),
MaxDelay: helper.TimeToPtr(1 * time.Hour),
Unlimited: helper.BoolToPtr(true),
Attempts: intToPtr(0),
Interval: timeToPtr(0),
DelayFunction: stringToPtr("exponential"),
Delay: timeToPtr(30 * time.Second),
MaxDelay: timeToPtr(1 * time.Hour),
Unlimited: boolToPtr(true),
},
Update: &UpdateStrategy{
Stagger: helper.TimeToPtr(1 * time.Second),
MaxParallel: helper.IntToPtr(1),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
HealthyDeadline: helper.TimeToPtr(6 * time.Minute),
ProgressDeadline: helper.TimeToPtr(7 * time.Minute),
AutoRevert: helper.BoolToPtr(false),
Canary: helper.IntToPtr(0),
Stagger: timeToPtr(1 * time.Second),
MaxParallel: intToPtr(1),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(10 * time.Second),
HealthyDeadline: timeToPtr(6 * time.Minute),
ProgressDeadline: timeToPtr(7 * time.Minute),
AutoRevert: boolToPtr(false),
Canary: intToPtr(0),
},
Migrate: DefaultMigrateStrategy(),
Tasks: []*Task{
@ -663,7 +662,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Name: "task1",
LogConfig: DefaultLogConfig(),
Resources: DefaultResources(),
KillTimeout: helper.TimeToPtr(5 * time.Second),
KillTimeout: timeToPtr(5 * time.Second),
},
},
},
@ -756,13 +755,13 @@ func TestJobs_Revert(t *testing.T) {
assertWriteMeta(t, wm)
// Fail revert at incorrect enforce
_, _, err = jobs.Revert(*job.ID, 0, helper.Uint64ToPtr(10), nil)
_, _, err = jobs.Revert(*job.ID, 0, uint64ToPtr(10), nil)
if err == nil || !strings.Contains(err.Error(), "enforcing version") {
t.Fatalf("expected enforcement error: %v", err)
}
// Works at correct index
revertResp, wm, err := jobs.Revert(*job.ID, 0, helper.Uint64ToPtr(1), nil)
revertResp, wm, err := jobs.Revert(*job.ID, 0, uint64ToPtr(1), nil)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -1248,11 +1247,11 @@ func TestJobs_NewBatchJob(t *testing.T) {
t.Parallel()
job := NewBatchJob("job1", "myjob", "region1", 5)
expect := &Job{
Region: helper.StringToPtr("region1"),
ID: helper.StringToPtr("job1"),
Name: helper.StringToPtr("myjob"),
Type: helper.StringToPtr(JobTypeBatch),
Priority: helper.IntToPtr(5),
Region: stringToPtr("region1"),
ID: stringToPtr("job1"),
Name: stringToPtr("myjob"),
Type: stringToPtr(JobTypeBatch),
Priority: intToPtr(5),
}
if !reflect.DeepEqual(job, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, job)
@ -1263,11 +1262,11 @@ func TestJobs_NewServiceJob(t *testing.T) {
t.Parallel()
job := NewServiceJob("job1", "myjob", "region1", 5)
expect := &Job{
Region: helper.StringToPtr("region1"),
ID: helper.StringToPtr("job1"),
Name: helper.StringToPtr("myjob"),
Type: helper.StringToPtr(JobTypeService),
Priority: helper.IntToPtr(5),
Region: stringToPtr("region1"),
ID: stringToPtr("job1"),
Name: stringToPtr("myjob"),
Type: stringToPtr(JobTypeService),
Priority: intToPtr(5),
}
if !reflect.DeepEqual(job, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, job)
@ -1413,7 +1412,7 @@ func TestJobs_AddSpread(t *testing.T) {
expect := []*Spread{
{
Attribute: "${meta.rack}",
Weight: helper.IntToPtr(100),
Weight: intToPtr(100),
SpreadTarget: []*SpreadTarget{
{
Value: "r1",
@ -1423,7 +1422,7 @@ func TestJobs_AddSpread(t *testing.T) {
},
{
Attribute: "${node.datacenter}",
Weight: helper.IntToPtr(100),
Weight: intToPtr(100),
SpreadTarget: []*SpreadTarget{
{
Value: "dc1",

View file

@ -6,9 +6,18 @@ import (
"sort"
"strconv"
"time"
)
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
const (
NodeStatusInit = "initializing"
NodeStatusReady = "ready"
NodeStatusDown = "down"
// NodeSchedulingEligible and Ineligible marks the node as eligible or not,
// respectively, for receiving allocations. This is orthoginal to the node
// status being ready.
NodeSchedulingEligible = "eligible"
NodeSchedulingIneligible = "ineligible"
)
// Nodes is used to query node-related API endpoints
@ -224,7 +233,7 @@ func (n *Nodes) monitorDrainNode(ctx context.Context, nodeID string,
return
}
if node.Status == structs.NodeStatusDown {
if node.Status == NodeStatusDown {
msg := Messagef(MonitorMsgLevelWarn, "Node %q down", nodeID)
select {
case nodeCh <- msg:
@ -295,7 +304,7 @@ func (n *Nodes) monitorDrainAllocs(ctx context.Context, nodeID string, ignoreSys
// Alloc was marked for migration
msg = "marked for migration"
case migrating && (orig.DesiredStatus != a.DesiredStatus) && a.DesiredStatus == structs.AllocDesiredStatusStop:
case migrating && (orig.DesiredStatus != a.DesiredStatus) && a.DesiredStatus == AllocDesiredStatusStop:
// Alloc has already been marked for migration and is now being stopped
msg = "draining"
}
@ -314,12 +323,12 @@ func (n *Nodes) monitorDrainAllocs(ctx context.Context, nodeID string, ignoreSys
}
// Track how many allocs are still running
if ignoreSys && a.Job.Type != nil && *a.Job.Type == structs.JobTypeSystem {
if ignoreSys && a.Job.Type != nil && *a.Job.Type == JobTypeSystem {
continue
}
switch a.ClientStatus {
case structs.AllocClientStatusPending, structs.AllocClientStatusRunning:
case AllocClientStatusPending, AllocClientStatusRunning:
runningAllocs++
}
}
@ -353,9 +362,9 @@ type NodeEligibilityUpdateResponse struct {
// ToggleEligibility is used to update the scheduling eligibility of the node
func (n *Nodes) ToggleEligibility(nodeID string, eligible bool, q *WriteOptions) (*NodeEligibilityUpdateResponse, error) {
e := structs.NodeSchedulingEligible
e := NodeSchedulingEligible
if !eligible {
e = structs.NodeSchedulingIneligible
e = NodeSchedulingIneligible
}
req := &NodeUpdateEligibilityRequest{
@ -662,9 +671,9 @@ func (v *StatValue) String() string {
case v.StringVal != nil:
return *v.StringVal
case v.FloatNumeratorVal != nil:
str := helper.FormatFloat(*v.FloatNumeratorVal, 3)
str := formatFloat(*v.FloatNumeratorVal, 3)
if v.FloatDenominatorVal != nil {
str += " / " + helper.FormatFloat(*v.FloatDenominatorVal, 3)
str += " / " + formatFloat(*v.FloatDenominatorVal, 3)
}
if v.Unit != "" {

View file

@ -9,7 +9,6 @@ import (
"testing"
"time"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
@ -185,8 +184,8 @@ func TestNodes_ToggleDrain(t *testing.T) {
// Check again
out, _, err = nodes.Info(nodeID, nil)
require.Nil(err)
if out.SchedulingEligibility != structs.NodeSchedulingIneligible {
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, structs.NodeSchedulingIneligible)
if out.SchedulingEligibility != NodeSchedulingIneligible {
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, NodeSchedulingIneligible)
}
// Toggle off again
@ -203,7 +202,7 @@ func TestNodes_ToggleDrain(t *testing.T) {
if out.DrainStrategy != nil {
t.Fatalf("drain strategy should be unset")
}
if out.SchedulingEligibility != structs.NodeSchedulingEligible {
if out.SchedulingEligibility != NodeSchedulingEligible {
t.Fatalf("should be eligible")
}
}
@ -237,7 +236,7 @@ func TestNodes_ToggleEligibility(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
if out.SchedulingEligibility != structs.NodeSchedulingEligible {
if out.SchedulingEligibility != NodeSchedulingEligible {
t.Fatalf("node should be eligible")
}
@ -253,8 +252,8 @@ func TestNodes_ToggleEligibility(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
if out.SchedulingEligibility != structs.NodeSchedulingIneligible {
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, structs.NodeSchedulingIneligible)
if out.SchedulingEligibility != NodeSchedulingIneligible {
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, NodeSchedulingIneligible)
}
// Toggle on
@ -269,8 +268,8 @@ func TestNodes_ToggleEligibility(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
if out.SchedulingEligibility != structs.NodeSchedulingEligible {
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, structs.NodeSchedulingEligible)
if out.SchedulingEligibility != NodeSchedulingEligible {
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, NodeSchedulingEligible)
}
if out.DrainStrategy != nil {
t.Fatalf("drain strategy should be unset")
@ -542,69 +541,69 @@ func TestNodeStatValueFormatting(t *testing.T) {
}{
{
"true",
StatValue{BoolVal: helper.BoolToPtr(true)},
StatValue{BoolVal: boolToPtr(true)},
},
{
"false",
StatValue{BoolVal: helper.BoolToPtr(false)},
StatValue{BoolVal: boolToPtr(false)},
},
{
"myvalue",
StatValue{StringVal: helper.StringToPtr("myvalue")},
StatValue{StringVal: stringToPtr("myvalue")},
},
{
"2.718",
StatValue{
FloatNumeratorVal: helper.Float64ToPtr(2.718),
FloatNumeratorVal: float64ToPtr(2.718),
},
},
{
"2.718 / 3.14",
StatValue{
FloatNumeratorVal: helper.Float64ToPtr(2.718),
FloatDenominatorVal: helper.Float64ToPtr(3.14),
FloatNumeratorVal: float64ToPtr(2.718),
FloatDenominatorVal: float64ToPtr(3.14),
},
},
{
"2.718 MHz",
StatValue{
FloatNumeratorVal: helper.Float64ToPtr(2.718),
FloatNumeratorVal: float64ToPtr(2.718),
Unit: "MHz",
},
},
{
"2.718 / 3.14 MHz",
StatValue{
FloatNumeratorVal: helper.Float64ToPtr(2.718),
FloatDenominatorVal: helper.Float64ToPtr(3.14),
FloatNumeratorVal: float64ToPtr(2.718),
FloatDenominatorVal: float64ToPtr(3.14),
Unit: "MHz",
},
},
{
"2",
StatValue{
IntNumeratorVal: helper.Int64ToPtr(2),
IntNumeratorVal: int64ToPtr(2),
},
},
{
"2 / 3",
StatValue{
IntNumeratorVal: helper.Int64ToPtr(2),
IntDenominatorVal: helper.Int64ToPtr(3),
IntNumeratorVal: int64ToPtr(2),
IntDenominatorVal: int64ToPtr(3),
},
},
{
"2 MHz",
StatValue{
IntNumeratorVal: helper.Int64ToPtr(2),
IntNumeratorVal: int64ToPtr(2),
Unit: "MHz",
},
},
{
"2 / 3 MHz",
StatValue{
IntNumeratorVal: helper.Int64ToPtr(2),
IntDenominatorVal: helper.Int64ToPtr(3),
IntNumeratorVal: int64ToPtr(2),
IntDenominatorVal: int64ToPtr(3),
Unit: "MHz",
},
},

View file

@ -2,8 +2,6 @@ package api
import (
"strconv"
"github.com/hashicorp/nomad/helper"
)
// Resources encapsulates the required resources of
@ -46,8 +44,8 @@ func (r *Resources) Canonicalize() {
// and should be kept in sync.
func DefaultResources() *Resources {
return &Resources{
CPU: helper.IntToPtr(100),
MemoryMB: helper.IntToPtr(300),
CPU: intToPtr(100),
MemoryMB: intToPtr(300),
}
}
@ -58,8 +56,8 @@ func DefaultResources() *Resources {
// IN nomad/structs/structs.go and should be kept in sync.
func MinResources() *Resources {
return &Resources{
CPU: helper.IntToPtr(20),
MemoryMB: helper.IntToPtr(10),
CPU: intToPtr(20),
MemoryMB: intToPtr(10),
}
}
@ -103,7 +101,7 @@ type NetworkResource struct {
func (n *NetworkResource) Canonicalize() {
if n.MBits == nil {
n.MBits = helper.IntToPtr(10)
n.MBits = intToPtr(10)
}
}
@ -169,7 +167,7 @@ type Attribute struct {
func (a Attribute) String() string {
switch {
case a.FloatVal != nil:
str := helper.FormatFloat(*a.FloatVal, 3)
str := formatFloat(*a.FloatVal, 3)
if a.Unit != "" {
str += " " + a.Unit
}
@ -223,6 +221,6 @@ type RequestedDevice struct {
func (d *RequestedDevice) Canonicalize() {
if d.Count == nil {
d.Count = helper.Uint64ToPtr(1)
d.Count = uint64ToPtr(1)
}
}

View file

@ -6,9 +6,16 @@ import (
"path/filepath"
"strings"
"time"
)
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
const (
// RestartPolicyModeDelay causes an artificial delay till the next interval is
// reached when the specified attempts have been reached in the interval.
RestartPolicyModeDelay = "delay"
// RestartPolicyModeFail causes a job to fail if the specified number of
// attempts are reached within an interval.
RestartPolicyModeFail = "fail"
)
// MemoryStats holds memory usage related stats
@ -171,32 +178,38 @@ func NewDefaultReschedulePolicy(jobType string) *ReschedulePolicy {
var dp *ReschedulePolicy
switch jobType {
case "service":
// This needs to be in sync with DefaultServiceJobReschedulePolicy
// in nomad/structs/structs.go
dp = &ReschedulePolicy{
Attempts: helper.IntToPtr(structs.DefaultServiceJobReschedulePolicy.Attempts),
Interval: helper.TimeToPtr(structs.DefaultServiceJobReschedulePolicy.Interval),
Delay: helper.TimeToPtr(structs.DefaultServiceJobReschedulePolicy.Delay),
DelayFunction: helper.StringToPtr(structs.DefaultServiceJobReschedulePolicy.DelayFunction),
MaxDelay: helper.TimeToPtr(structs.DefaultServiceJobReschedulePolicy.MaxDelay),
Unlimited: helper.BoolToPtr(structs.DefaultServiceJobReschedulePolicy.Unlimited),
Delay: timeToPtr(30 * time.Second),
DelayFunction: stringToPtr("exponential"),
MaxDelay: timeToPtr(1 * time.Hour),
Unlimited: boolToPtr(true),
Attempts: intToPtr(0),
Interval: timeToPtr(0),
}
case "batch":
// This needs to be in sync with DefaultBatchJobReschedulePolicy
// in nomad/structs/structs.go
dp = &ReschedulePolicy{
Attempts: helper.IntToPtr(structs.DefaultBatchJobReschedulePolicy.Attempts),
Interval: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
Delay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
DelayFunction: helper.StringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
MaxDelay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
Unlimited: helper.BoolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
Attempts: intToPtr(1),
Interval: timeToPtr(24 * time.Hour),
Delay: timeToPtr(5 * time.Second),
DelayFunction: stringToPtr("constant"),
MaxDelay: timeToPtr(0),
Unlimited: boolToPtr(false),
}
case "system":
dp = &ReschedulePolicy{
Attempts: helper.IntToPtr(0),
Interval: helper.TimeToPtr(0),
Delay: helper.TimeToPtr(0),
DelayFunction: helper.StringToPtr(""),
MaxDelay: helper.TimeToPtr(0),
Unlimited: helper.BoolToPtr(false),
Attempts: intToPtr(0),
Interval: timeToPtr(0),
Delay: timeToPtr(0),
DelayFunction: stringToPtr(""),
MaxDelay: timeToPtr(0),
Unlimited: boolToPtr(false),
}
}
return dp
@ -244,14 +257,14 @@ func NewSpreadTarget(value string, percent uint32) *SpreadTarget {
func NewSpread(attribute string, weight int, spreadTargets []*SpreadTarget) *Spread {
return &Spread{
Attribute: attribute,
Weight: helper.IntToPtr(weight),
Weight: intToPtr(weight),
SpreadTarget: spreadTargets,
}
}
func (s *Spread) Canonicalize() {
if s.Weight == nil {
s.Weight = helper.IntToPtr(50)
s.Weight = intToPtr(50)
}
}
@ -270,7 +283,7 @@ func (c *CheckRestart) Canonicalize() {
}
if c.Grace == nil {
c.Grace = helper.TimeToPtr(1 * time.Second)
c.Grace = timeToPtr(1 * time.Second)
}
}
@ -382,21 +395,21 @@ type EphemeralDisk struct {
func DefaultEphemeralDisk() *EphemeralDisk {
return &EphemeralDisk{
Sticky: helper.BoolToPtr(false),
Migrate: helper.BoolToPtr(false),
SizeMB: helper.IntToPtr(300),
Sticky: boolToPtr(false),
Migrate: boolToPtr(false),
SizeMB: intToPtr(300),
}
}
func (e *EphemeralDisk) Canonicalize() {
if e.Sticky == nil {
e.Sticky = helper.BoolToPtr(false)
e.Sticky = boolToPtr(false)
}
if e.Migrate == nil {
e.Migrate = helper.BoolToPtr(false)
e.Migrate = boolToPtr(false)
}
if e.SizeMB == nil {
e.SizeMB = helper.IntToPtr(300)
e.SizeMB = intToPtr(300)
}
}
@ -411,10 +424,10 @@ type MigrateStrategy struct {
func DefaultMigrateStrategy() *MigrateStrategy {
return &MigrateStrategy{
MaxParallel: helper.IntToPtr(1),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
MaxParallel: intToPtr(1),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(10 * time.Second),
HealthyDeadline: timeToPtr(5 * time.Minute),
}
}
@ -480,17 +493,17 @@ type TaskGroup struct {
// NewTaskGroup creates a new TaskGroup.
func NewTaskGroup(name string, count int) *TaskGroup {
return &TaskGroup{
Name: helper.StringToPtr(name),
Count: helper.IntToPtr(count),
Name: stringToPtr(name),
Count: intToPtr(count),
}
}
func (g *TaskGroup) Canonicalize(job *Job) {
if g.Name == nil {
g.Name = helper.StringToPtr("")
g.Name = stringToPtr("")
}
if g.Count == nil {
g.Count = helper.IntToPtr(1)
g.Count = intToPtr(1)
}
for _, t := range g.Tasks {
t.Canonicalize(g, job)
@ -556,18 +569,22 @@ func (g *TaskGroup) Canonicalize(job *Job) {
var defaultRestartPolicy *RestartPolicy
switch *job.Type {
case "service", "system":
// These needs to be in sync with DefaultServiceJobRestartPolicy in
// in nomad/structs/structs.go
defaultRestartPolicy = &RestartPolicy{
Delay: helper.TimeToPtr(structs.DefaultServiceJobRestartPolicy.Delay),
Attempts: helper.IntToPtr(structs.DefaultServiceJobRestartPolicy.Attempts),
Interval: helper.TimeToPtr(structs.DefaultServiceJobRestartPolicy.Interval),
Mode: helper.StringToPtr(structs.DefaultServiceJobRestartPolicy.Mode),
Delay: timeToPtr(15 * time.Second),
Attempts: intToPtr(2),
Interval: timeToPtr(30 * time.Minute),
Mode: stringToPtr(RestartPolicyModeFail),
}
default:
// These needs to be in sync with DefaultBatchJobRestartPolicy in
// in nomad/structs/structs.go
defaultRestartPolicy = &RestartPolicy{
Delay: helper.TimeToPtr(structs.DefaultBatchJobRestartPolicy.Delay),
Attempts: helper.IntToPtr(structs.DefaultBatchJobRestartPolicy.Attempts),
Interval: helper.TimeToPtr(structs.DefaultBatchJobRestartPolicy.Interval),
Mode: helper.StringToPtr(structs.DefaultBatchJobRestartPolicy.Mode),
Delay: timeToPtr(15 * time.Second),
Attempts: intToPtr(3),
Interval: timeToPtr(24 * time.Hour),
Mode: stringToPtr(RestartPolicyModeFail),
}
}
@ -629,17 +646,17 @@ type LogConfig struct {
func DefaultLogConfig() *LogConfig {
return &LogConfig{
MaxFiles: helper.IntToPtr(10),
MaxFileSizeMB: helper.IntToPtr(10),
MaxFiles: intToPtr(10),
MaxFileSizeMB: intToPtr(10),
}
}
func (l *LogConfig) Canonicalize() {
if l.MaxFiles == nil {
l.MaxFiles = helper.IntToPtr(10)
l.MaxFiles = intToPtr(10)
}
if l.MaxFileSizeMB == nil {
l.MaxFileSizeMB = helper.IntToPtr(10)
l.MaxFileSizeMB = intToPtr(10)
}
}
@ -677,7 +694,7 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) {
}
t.Resources.Canonicalize()
if t.KillTimeout == nil {
t.KillTimeout = helper.TimeToPtr(5 * time.Second)
t.KillTimeout = timeToPtr(5 * time.Second)
}
if t.LogConfig == nil {
t.LogConfig = DefaultLogConfig()
@ -708,11 +725,11 @@ type TaskArtifact struct {
func (a *TaskArtifact) Canonicalize() {
if a.GetterMode == nil {
a.GetterMode = helper.StringToPtr("any")
a.GetterMode = stringToPtr("any")
}
if a.GetterSource == nil {
// Shouldn't be possible, but we don't want to panic
a.GetterSource = helper.StringToPtr("")
a.GetterSource = stringToPtr("")
}
if a.RelativeDest == nil {
switch *a.GetterMode {
@ -724,7 +741,7 @@ func (a *TaskArtifact) Canonicalize() {
a.RelativeDest = &dest
default:
// Default to a directory
a.RelativeDest = helper.StringToPtr("local/")
a.RelativeDest = stringToPtr("local/")
}
}
}
@ -745,44 +762,44 @@ type Template struct {
func (tmpl *Template) Canonicalize() {
if tmpl.SourcePath == nil {
tmpl.SourcePath = helper.StringToPtr("")
tmpl.SourcePath = stringToPtr("")
}
if tmpl.DestPath == nil {
tmpl.DestPath = helper.StringToPtr("")
tmpl.DestPath = stringToPtr("")
}
if tmpl.EmbeddedTmpl == nil {
tmpl.EmbeddedTmpl = helper.StringToPtr("")
tmpl.EmbeddedTmpl = stringToPtr("")
}
if tmpl.ChangeMode == nil {
tmpl.ChangeMode = helper.StringToPtr("restart")
tmpl.ChangeMode = stringToPtr("restart")
}
if tmpl.ChangeSignal == nil {
if *tmpl.ChangeMode == "signal" {
tmpl.ChangeSignal = helper.StringToPtr("SIGHUP")
tmpl.ChangeSignal = stringToPtr("SIGHUP")
} else {
tmpl.ChangeSignal = helper.StringToPtr("")
tmpl.ChangeSignal = stringToPtr("")
}
} else {
sig := *tmpl.ChangeSignal
tmpl.ChangeSignal = helper.StringToPtr(strings.ToUpper(sig))
tmpl.ChangeSignal = stringToPtr(strings.ToUpper(sig))
}
if tmpl.Splay == nil {
tmpl.Splay = helper.TimeToPtr(5 * time.Second)
tmpl.Splay = timeToPtr(5 * time.Second)
}
if tmpl.Perms == nil {
tmpl.Perms = helper.StringToPtr("0644")
tmpl.Perms = stringToPtr("0644")
}
if tmpl.LeftDelim == nil {
tmpl.LeftDelim = helper.StringToPtr("{{")
tmpl.LeftDelim = stringToPtr("{{")
}
if tmpl.RightDelim == nil {
tmpl.RightDelim = helper.StringToPtr("}}")
tmpl.RightDelim = stringToPtr("}}")
}
if tmpl.Envvars == nil {
tmpl.Envvars = helper.BoolToPtr(false)
tmpl.Envvars = boolToPtr(false)
}
if tmpl.VaultGrace == nil {
tmpl.VaultGrace = helper.TimeToPtr(15 * time.Second)
tmpl.VaultGrace = timeToPtr(15 * time.Second)
}
}
@ -795,13 +812,13 @@ type Vault struct {
func (v *Vault) Canonicalize() {
if v.Env == nil {
v.Env = helper.BoolToPtr(true)
v.Env = boolToPtr(true)
}
if v.ChangeMode == nil {
v.ChangeMode = helper.StringToPtr("restart")
v.ChangeMode = stringToPtr("restart")
}
if v.ChangeSignal == nil {
v.ChangeSignal = helper.StringToPtr("SIGHUP")
v.ChangeSignal = stringToPtr("SIGHUP")
}
}

View file

@ -6,7 +6,6 @@ import (
"testing"
"time"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -16,8 +15,8 @@ func TestTaskGroup_NewTaskGroup(t *testing.T) {
t.Parallel()
grp := NewTaskGroup("grp1", 2)
expect := &TaskGroup{
Name: helper.StringToPtr("grp1"),
Count: helper.IntToPtr(2),
Name: stringToPtr("grp1"),
Count: intToPtr(2),
}
if !reflect.DeepEqual(grp, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, grp)
@ -144,7 +143,7 @@ func TestTaskGroup_AddSpread(t *testing.T) {
expect := []*Spread{
{
Attribute: "${meta.rack}",
Weight: helper.IntToPtr(100),
Weight: intToPtr(100),
SpreadTarget: []*SpreadTarget{
{
Value: "r1",
@ -154,7 +153,7 @@ func TestTaskGroup_AddSpread(t *testing.T) {
},
{
Attribute: "${node.datacenter}",
Weight: helper.IntToPtr(100),
Weight: intToPtr(100),
SpreadTarget: []*SpreadTarget{
{
Value: "dc1",
@ -264,13 +263,13 @@ func TestTask_Require(t *testing.T) {
// Create some require resources
resources := &Resources{
CPU: helper.IntToPtr(1250),
MemoryMB: helper.IntToPtr(128),
DiskMB: helper.IntToPtr(2048),
CPU: intToPtr(1250),
MemoryMB: intToPtr(128),
DiskMB: intToPtr(2048),
Networks: []*NetworkResource{
{
CIDR: "0.0.0.0/0",
MBits: helper.IntToPtr(100),
MBits: intToPtr(100),
ReservedPorts: []Port{{"", 80}, {"", 443}},
},
},
@ -358,8 +357,8 @@ func TestTask_AddAffinity(t *testing.T) {
func TestTask_Artifact(t *testing.T) {
t.Parallel()
a := TaskArtifact{
GetterSource: helper.StringToPtr("http://localhost/foo.txt"),
GetterMode: helper.StringToPtr("file"),
GetterSource: stringToPtr("http://localhost/foo.txt"),
GetterMode: stringToPtr("file"),
}
a.Canonicalize()
if *a.GetterMode != "file" {
@ -373,21 +372,21 @@ func TestTask_Artifact(t *testing.T) {
// Ensures no regression on https://github.com/hashicorp/nomad/issues/3132
func TestTaskGroup_Canonicalize_Update(t *testing.T) {
job := &Job{
ID: helper.StringToPtr("test"),
ID: stringToPtr("test"),
Update: &UpdateStrategy{
AutoRevert: helper.BoolToPtr(false),
Canary: helper.IntToPtr(0),
HealthCheck: helper.StringToPtr(""),
HealthyDeadline: helper.TimeToPtr(0),
ProgressDeadline: helper.TimeToPtr(0),
MaxParallel: helper.IntToPtr(0),
MinHealthyTime: helper.TimeToPtr(0),
Stagger: helper.TimeToPtr(0),
AutoRevert: boolToPtr(false),
Canary: intToPtr(0),
HealthCheck: stringToPtr(""),
HealthyDeadline: timeToPtr(0),
ProgressDeadline: timeToPtr(0),
MaxParallel: intToPtr(0),
MinHealthyTime: timeToPtr(0),
Stagger: timeToPtr(0),
},
}
job.Canonicalize()
tg := &TaskGroup{
Name: helper.StringToPtr("foo"),
Name: stringToPtr("foo"),
}
tg.Canonicalize(job)
assert.Nil(t, tg.Update)
@ -408,130 +407,130 @@ func TestTaskGroup_Canonicalize_ReschedulePolicy(t *testing.T) {
jobReschedulePolicy: nil,
taskReschedulePolicy: nil,
expected: &ReschedulePolicy{
Attempts: helper.IntToPtr(structs.DefaultBatchJobReschedulePolicy.Attempts),
Interval: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
Delay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
DelayFunction: helper.StringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
MaxDelay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
Unlimited: helper.BoolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
Attempts: intToPtr(structs.DefaultBatchJobReschedulePolicy.Attempts),
Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
Delay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
DelayFunction: stringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
MaxDelay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
Unlimited: boolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
},
},
{
desc: "Empty job reschedule policy",
jobReschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(0),
Interval: helper.TimeToPtr(0),
Delay: helper.TimeToPtr(0),
MaxDelay: helper.TimeToPtr(0),
DelayFunction: helper.StringToPtr(""),
Unlimited: helper.BoolToPtr(false),
Attempts: intToPtr(0),
Interval: timeToPtr(0),
Delay: timeToPtr(0),
MaxDelay: timeToPtr(0),
DelayFunction: stringToPtr(""),
Unlimited: boolToPtr(false),
},
taskReschedulePolicy: nil,
expected: &ReschedulePolicy{
Attempts: helper.IntToPtr(0),
Interval: helper.TimeToPtr(0),
Delay: helper.TimeToPtr(0),
MaxDelay: helper.TimeToPtr(0),
DelayFunction: helper.StringToPtr(""),
Unlimited: helper.BoolToPtr(false),
Attempts: intToPtr(0),
Interval: timeToPtr(0),
Delay: timeToPtr(0),
MaxDelay: timeToPtr(0),
DelayFunction: stringToPtr(""),
Unlimited: boolToPtr(false),
},
},
{
desc: "Inherit from job",
jobReschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(1),
Interval: helper.TimeToPtr(20 * time.Second),
Delay: helper.TimeToPtr(20 * time.Second),
MaxDelay: helper.TimeToPtr(10 * time.Minute),
DelayFunction: helper.StringToPtr("constant"),
Unlimited: helper.BoolToPtr(false),
Attempts: intToPtr(1),
Interval: timeToPtr(20 * time.Second),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
taskReschedulePolicy: nil,
expected: &ReschedulePolicy{
Attempts: helper.IntToPtr(1),
Interval: helper.TimeToPtr(20 * time.Second),
Delay: helper.TimeToPtr(20 * time.Second),
MaxDelay: helper.TimeToPtr(10 * time.Minute),
DelayFunction: helper.StringToPtr("constant"),
Unlimited: helper.BoolToPtr(false),
Attempts: intToPtr(1),
Interval: timeToPtr(20 * time.Second),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
},
{
desc: "Set in task",
jobReschedulePolicy: nil,
taskReschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(5),
Interval: helper.TimeToPtr(2 * time.Minute),
Delay: helper.TimeToPtr(20 * time.Second),
MaxDelay: helper.TimeToPtr(10 * time.Minute),
DelayFunction: helper.StringToPtr("constant"),
Unlimited: helper.BoolToPtr(false),
Attempts: intToPtr(5),
Interval: timeToPtr(2 * time.Minute),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
expected: &ReschedulePolicy{
Attempts: helper.IntToPtr(5),
Interval: helper.TimeToPtr(2 * time.Minute),
Delay: helper.TimeToPtr(20 * time.Second),
MaxDelay: helper.TimeToPtr(10 * time.Minute),
DelayFunction: helper.StringToPtr("constant"),
Unlimited: helper.BoolToPtr(false),
Attempts: intToPtr(5),
Interval: timeToPtr(2 * time.Minute),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
},
{
desc: "Merge from job",
jobReschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(1),
Delay: helper.TimeToPtr(20 * time.Second),
MaxDelay: helper.TimeToPtr(10 * time.Minute),
Attempts: intToPtr(1),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
},
taskReschedulePolicy: &ReschedulePolicy{
Interval: helper.TimeToPtr(5 * time.Minute),
DelayFunction: helper.StringToPtr("constant"),
Unlimited: helper.BoolToPtr(false),
Interval: timeToPtr(5 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
expected: &ReschedulePolicy{
Attempts: helper.IntToPtr(1),
Interval: helper.TimeToPtr(5 * time.Minute),
Delay: helper.TimeToPtr(20 * time.Second),
MaxDelay: helper.TimeToPtr(10 * time.Minute),
DelayFunction: helper.StringToPtr("constant"),
Unlimited: helper.BoolToPtr(false),
Attempts: intToPtr(1),
Interval: timeToPtr(5 * time.Minute),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
},
{
desc: "Override from group",
jobReschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(1),
MaxDelay: helper.TimeToPtr(10 * time.Second),
Attempts: intToPtr(1),
MaxDelay: timeToPtr(10 * time.Second),
},
taskReschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(5),
Delay: helper.TimeToPtr(20 * time.Second),
MaxDelay: helper.TimeToPtr(20 * time.Minute),
DelayFunction: helper.StringToPtr("constant"),
Unlimited: helper.BoolToPtr(false),
Attempts: intToPtr(5),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(20 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
expected: &ReschedulePolicy{
Attempts: helper.IntToPtr(5),
Interval: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
Delay: helper.TimeToPtr(20 * time.Second),
MaxDelay: helper.TimeToPtr(20 * time.Minute),
DelayFunction: helper.StringToPtr("constant"),
Unlimited: helper.BoolToPtr(false),
Attempts: intToPtr(5),
Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(20 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
},
{
desc: "Attempts from job, default interval",
jobReschedulePolicy: &ReschedulePolicy{
Attempts: helper.IntToPtr(1),
Attempts: intToPtr(1),
},
taskReschedulePolicy: nil,
expected: &ReschedulePolicy{
Attempts: helper.IntToPtr(1),
Interval: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
Delay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
DelayFunction: helper.StringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
MaxDelay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
Unlimited: helper.BoolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
Attempts: intToPtr(1),
Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
Delay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
DelayFunction: stringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
MaxDelay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
Unlimited: boolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
},
},
}
@ -539,13 +538,13 @@ func TestTaskGroup_Canonicalize_ReschedulePolicy(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
job := &Job{
ID: helper.StringToPtr("test"),
ID: stringToPtr("test"),
Reschedule: tc.jobReschedulePolicy,
Type: helper.StringToPtr(JobTypeBatch),
Type: stringToPtr(JobTypeBatch),
}
job.Canonicalize()
tg := &TaskGroup{
Name: helper.StringToPtr("foo"),
Name: stringToPtr("foo"),
ReschedulePolicy: tc.taskReschedulePolicy,
}
tg.Canonicalize(job)
@ -578,44 +577,44 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
jobMigrate: nil,
taskMigrate: nil,
expected: &MigrateStrategy{
MaxParallel: helper.IntToPtr(1),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
MaxParallel: intToPtr(1),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(10 * time.Second),
HealthyDeadline: timeToPtr(5 * time.Minute),
},
},
{
desc: "Empty job migrate strategy",
jobType: "service",
jobMigrate: &MigrateStrategy{
MaxParallel: helper.IntToPtr(0),
HealthCheck: helper.StringToPtr(""),
MinHealthyTime: helper.TimeToPtr(0),
HealthyDeadline: helper.TimeToPtr(0),
MaxParallel: intToPtr(0),
HealthCheck: stringToPtr(""),
MinHealthyTime: timeToPtr(0),
HealthyDeadline: timeToPtr(0),
},
taskMigrate: nil,
expected: &MigrateStrategy{
MaxParallel: helper.IntToPtr(0),
HealthCheck: helper.StringToPtr(""),
MinHealthyTime: helper.TimeToPtr(0),
HealthyDeadline: helper.TimeToPtr(0),
MaxParallel: intToPtr(0),
HealthCheck: stringToPtr(""),
MinHealthyTime: timeToPtr(0),
HealthyDeadline: timeToPtr(0),
},
},
{
desc: "Inherit from job",
jobType: "service",
jobMigrate: &MigrateStrategy{
MaxParallel: helper.IntToPtr(3),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(2),
HealthyDeadline: helper.TimeToPtr(2),
MaxParallel: intToPtr(3),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(2),
HealthyDeadline: timeToPtr(2),
},
taskMigrate: nil,
expected: &MigrateStrategy{
MaxParallel: helper.IntToPtr(3),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(2),
HealthyDeadline: helper.TimeToPtr(2),
MaxParallel: intToPtr(3),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(2),
HealthyDeadline: timeToPtr(2),
},
},
{
@ -623,67 +622,67 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
jobType: "service",
jobMigrate: nil,
taskMigrate: &MigrateStrategy{
MaxParallel: helper.IntToPtr(3),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(2),
HealthyDeadline: helper.TimeToPtr(2),
MaxParallel: intToPtr(3),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(2),
HealthyDeadline: timeToPtr(2),
},
expected: &MigrateStrategy{
MaxParallel: helper.IntToPtr(3),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(2),
HealthyDeadline: helper.TimeToPtr(2),
MaxParallel: intToPtr(3),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(2),
HealthyDeadline: timeToPtr(2),
},
},
{
desc: "Merge from job",
jobType: "service",
jobMigrate: &MigrateStrategy{
MaxParallel: helper.IntToPtr(11),
MaxParallel: intToPtr(11),
},
taskMigrate: &MigrateStrategy{
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(2),
HealthyDeadline: helper.TimeToPtr(2),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(2),
HealthyDeadline: timeToPtr(2),
},
expected: &MigrateStrategy{
MaxParallel: helper.IntToPtr(11),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(2),
HealthyDeadline: helper.TimeToPtr(2),
MaxParallel: intToPtr(11),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(2),
HealthyDeadline: timeToPtr(2),
},
},
{
desc: "Override from group",
jobType: "service",
jobMigrate: &MigrateStrategy{
MaxParallel: helper.IntToPtr(11),
MaxParallel: intToPtr(11),
},
taskMigrate: &MigrateStrategy{
MaxParallel: helper.IntToPtr(5),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(2),
HealthyDeadline: helper.TimeToPtr(2),
MaxParallel: intToPtr(5),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(2),
HealthyDeadline: timeToPtr(2),
},
expected: &MigrateStrategy{
MaxParallel: helper.IntToPtr(5),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(2),
HealthyDeadline: helper.TimeToPtr(2),
MaxParallel: intToPtr(5),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(2),
HealthyDeadline: timeToPtr(2),
},
},
{
desc: "Parallel from job, defaulting",
jobType: "service",
jobMigrate: &MigrateStrategy{
MaxParallel: helper.IntToPtr(5),
MaxParallel: intToPtr(5),
},
taskMigrate: nil,
expected: &MigrateStrategy{
MaxParallel: helper.IntToPtr(5),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
MaxParallel: intToPtr(5),
HealthCheck: stringToPtr("checks"),
MinHealthyTime: timeToPtr(10 * time.Second),
HealthyDeadline: timeToPtr(5 * time.Minute),
},
},
}
@ -691,13 +690,13 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
job := &Job{
ID: helper.StringToPtr("test"),
ID: stringToPtr("test"),
Migrate: tc.jobMigrate,
Type: helper.StringToPtr(tc.jobType),
Type: stringToPtr(tc.jobType),
}
job.Canonicalize()
tg := &TaskGroup{
Name: helper.StringToPtr("foo"),
Name: stringToPtr("foo"),
Migrate: tc.taskMigrate,
}
tg.Canonicalize(job)
@ -709,13 +708,13 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
// TestService_CheckRestart asserts Service.CheckRestart settings are properly
// inherited by Checks.
func TestService_CheckRestart(t *testing.T) {
job := &Job{Name: helper.StringToPtr("job")}
tg := &TaskGroup{Name: helper.StringToPtr("group")}
job := &Job{Name: stringToPtr("job")}
tg := &TaskGroup{Name: stringToPtr("group")}
task := &Task{Name: "task"}
service := &Service{
CheckRestart: &CheckRestart{
Limit: 11,
Grace: helper.TimeToPtr(11 * time.Second),
Grace: timeToPtr(11 * time.Second),
IgnoreWarnings: true,
},
Checks: []ServiceCheck{
@ -723,7 +722,7 @@ func TestService_CheckRestart(t *testing.T) {
Name: "all-set",
CheckRestart: &CheckRestart{
Limit: 22,
Grace: helper.TimeToPtr(22 * time.Second),
Grace: timeToPtr(22 * time.Second),
IgnoreWarnings: true,
},
},
@ -731,7 +730,7 @@ func TestService_CheckRestart(t *testing.T) {
Name: "some-set",
CheckRestart: &CheckRestart{
Limit: 33,
Grace: helper.TimeToPtr(33 * time.Second),
Grace: timeToPtr(33 * time.Second),
},
},
{
@ -757,12 +756,12 @@ func TestService_CheckRestart(t *testing.T) {
// TestSpread_Canonicalize asserts that the spread stanza is canonicalized correctly
func TestSpread_Canonicalize(t *testing.T) {
job := &Job{
ID: helper.StringToPtr("test"),
Type: helper.StringToPtr("batch"),
ID: stringToPtr("test"),
Type: stringToPtr("batch"),
}
job.Canonicalize()
tg := &TaskGroup{
Name: helper.StringToPtr("foo"),
Name: stringToPtr("foo"),
}
type testCase struct {
desc string
@ -782,7 +781,7 @@ func TestSpread_Canonicalize(t *testing.T) {
"Zero spread",
&Spread{
Attribute: "test",
Weight: helper.IntToPtr(0),
Weight: intToPtr(0),
},
0,
},
@ -790,7 +789,7 @@ func TestSpread_Canonicalize(t *testing.T) {
"Non Zero spread",
&Spread{
Attribute: "test",
Weight: helper.IntToPtr(100),
Weight: intToPtr(100),
},
100,
},

View file

@ -2,8 +2,6 @@ package api
import (
"testing"
"github.com/hashicorp/nomad/helper"
)
func assertQueryMeta(t *testing.T, qm *QueryMeta) {
@ -27,18 +25,18 @@ func testJob() *Job {
task := NewTask("task1", "exec").
SetConfig("command", "/bin/sleep").
Require(&Resources{
CPU: helper.IntToPtr(100),
MemoryMB: helper.IntToPtr(256),
CPU: intToPtr(100),
MemoryMB: intToPtr(256),
}).
SetLogConfig(&LogConfig{
MaxFiles: helper.IntToPtr(1),
MaxFileSizeMB: helper.IntToPtr(2),
MaxFiles: intToPtr(1),
MaxFileSizeMB: intToPtr(2),
})
group := NewTaskGroup("group1", 1).
AddTask(task).
RequireDisk(&EphemeralDisk{
SizeMB: helper.IntToPtr(25),
SizeMB: intToPtr(25),
})
job := NewBatchJob("job1", "redis", "region1", 1).
@ -50,9 +48,9 @@ func testJob() *Job {
func testPeriodicJob() *Job {
job := testJob().AddPeriodicConfig(&PeriodicConfig{
Enabled: helper.BoolToPtr(true),
Spec: helper.StringToPtr("*/30 * * * *"),
SpecType: helper.StringToPtr("cron"),
Enabled: boolToPtr(true),
Spec: stringToPtr("*/30 * * * *"),
SpecType: stringToPtr("cron"),
})
return job
}
@ -72,10 +70,23 @@ func testQuotaSpec() *QuotaSpec {
{
Region: "global",
RegionLimit: &Resources{
CPU: helper.IntToPtr(2000),
MemoryMB: helper.IntToPtr(2000),
CPU: intToPtr(2000),
MemoryMB: intToPtr(2000),
},
},
},
}
}
// conversions utils only used for testing
// added here to avoid linter warning
// int64ToPtr returns the pointer to an int
func int64ToPtr(i int64) *int64 {
return &i
}
// float64ToPtr returns the pointer to an float64
func float64ToPtr(f float64) *float64 {
return &f
}

53
api/utils.go Normal file
View file

@ -0,0 +1,53 @@
package api
import (
"strconv"
"strings"
"time"
)
// boolToPtr returns the pointer to a boolean
func boolToPtr(b bool) *bool {
return &b
}
// intToPtr returns the pointer to an int
func intToPtr(i int) *int {
return &i
}
// uint64ToPtr returns the pointer to an uint64
func uint64ToPtr(u uint64) *uint64 {
return &u
}
// stringToPtr returns the pointer to a string
func stringToPtr(str string) *string {
return &str
}
// timeToPtr returns the pointer to a time stamp
func timeToPtr(t time.Duration) *time.Duration {
return &t
}
// formatFloat converts the floating-point number f to a string,
// after rounding it to the passed unit.
//
// Uses 'f' format (-ddd.dddddd, no exponent), and uses at most
// maxPrec digits after the decimal point.
func formatFloat(f float64, maxPrec int) string {
v := strconv.FormatFloat(f, 'f', -1, 64)
idx := strings.LastIndex(v, ".")
if idx == -1 {
return v
}
sublen := idx + maxPrec + 1
if sublen > len(v) {
sublen = len(v)
}
return v[:sublen]
}

39
api/utils_test.go Normal file
View file

@ -0,0 +1,39 @@
package api
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestFormatRoundedFloat(t *testing.T) {
cases := []struct {
input float64
expected string
}{
{
1323,
"1323",
},
{
10.321,
"10.321",
},
{
100000.31324324,
"100000.313",
},
{
100000.3,
"100000.3",
},
{
0.7654321,
"0.765",
},
}
for _, c := range cases {
require.Equal(t, c.expected, formatFloat(c.input, 3))
}
}

View file

@ -130,7 +130,7 @@ func TestHTTP_JobsRegister(t *testing.T) {
t.Parallel()
httpTest(t, nil, func(s *TestAgent) {
// Create the job
job := api.MockJob()
job := MockJob()
args := api.JobRegisterRequest{
Job: job,
WriteRequest: api.WriteRequest{Region: "global"},
@ -185,7 +185,7 @@ func TestHTTP_JobsRegister_ACL(t *testing.T) {
t.Parallel()
httpACLTest(t, nil, func(s *TestAgent) {
// Create the job
job := api.MockJob()
job := MockJob()
args := api.JobRegisterRequest{
Job: job,
WriteRequest: api.WriteRequest{
@ -215,7 +215,7 @@ func TestHTTP_JobsRegister_Defaulting(t *testing.T) {
t.Parallel()
httpTest(t, nil, func(s *TestAgent) {
// Create the job
job := api.MockJob()
job := MockJob()
// Do not set its priority
job.Priority = nil
@ -411,7 +411,7 @@ func TestHTTP_JobUpdate(t *testing.T) {
t.Parallel()
httpTest(t, nil, func(s *TestAgent) {
// Create the job
job := api.MockJob()
job := MockJob()
args := api.JobRegisterRequest{
Job: job,
WriteRequest: api.WriteRequest{
@ -985,7 +985,7 @@ func TestHTTP_JobPlan(t *testing.T) {
t.Parallel()
httpTest(t, nil, func(s *TestAgent) {
// Create the job
job := api.MockJob()
job := MockJob()
args := api.JobPlanRequest{
Job: job,
Diff: true,

View file

@ -1,14 +1,15 @@
package api
package agent
import (
"time"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/uuid"
)
func MockJob() *Job {
job := &Job{
func MockJob() *api.Job {
job := &api.Job{
Region: helper.StringToPtr("global"),
ID: helper.StringToPtr(uuid.Generate()),
Name: helper.StringToPtr("my-job"),
@ -16,27 +17,27 @@ func MockJob() *Job {
Priority: helper.IntToPtr(50),
AllAtOnce: helper.BoolToPtr(false),
Datacenters: []string{"dc1"},
Constraints: []*Constraint{
Constraints: []*api.Constraint{
{
LTarget: "${attr.kernel.name}",
RTarget: "linux",
Operand: "=",
},
},
TaskGroups: []*TaskGroup{
TaskGroups: []*api.TaskGroup{
{
Name: helper.StringToPtr("web"),
Count: helper.IntToPtr(10),
EphemeralDisk: &EphemeralDisk{
EphemeralDisk: &api.EphemeralDisk{
SizeMB: helper.IntToPtr(150),
},
RestartPolicy: &RestartPolicy{
RestartPolicy: &api.RestartPolicy{
Attempts: helper.IntToPtr(3),
Interval: helper.TimeToPtr(10 * time.Minute),
Delay: helper.TimeToPtr(1 * time.Minute),
Mode: helper.StringToPtr("delay"),
},
Tasks: []*Task{
Tasks: []*api.Task{
{
Name: "web",
Driver: "exec",
@ -46,12 +47,12 @@ func MockJob() *Job {
Env: map[string]string{
"FOO": "bar",
},
Services: []*Service{
Services: []*api.Service{
{
Name: "${TASK}-frontend",
PortLabel: "http",
Tags: []string{"pci:${meta.pci-dss}", "datacenter:${node.datacenter}"},
Checks: []ServiceCheck{
Checks: []api.ServiceCheck{
{
Name: "check-table",
Type: "script",
@ -67,14 +68,14 @@ func MockJob() *Job {
PortLabel: "admin",
},
},
LogConfig: DefaultLogConfig(),
Resources: &Resources{
LogConfig: api.DefaultLogConfig(),
Resources: &api.Resources{
CPU: helper.IntToPtr(500),
MemoryMB: helper.IntToPtr(256),
Networks: []*NetworkResource{
Networks: []*api.NetworkResource{
{
MBits: helper.IntToPtr(50),
DynamicPorts: []Port{{Label: "http"}, {Label: "admin"}},
DynamicPorts: []api.Port{{Label: "http"}, {Label: "admin"}},
},
},
},
@ -98,10 +99,10 @@ func MockJob() *Job {
return job
}
func MockPeriodicJob() *Job {
func MockPeriodicJob() *api.Job {
j := MockJob()
j.Type = helper.StringToPtr("batch")
j.Periodic = &PeriodicConfig{
j.Periodic = &api.PeriodicConfig{
Enabled: helper.BoolToPtr(true),
SpecType: helper.StringToPtr("cron"),
Spec: helper.StringToPtr("*/30 * * * *"),

View file

@ -4,8 +4,6 @@ import (
"crypto/sha512"
"fmt"
"regexp"
"strconv"
"strings"
"time"
multierror "github.com/hashicorp/go-multierror"
@ -359,24 +357,3 @@ func CheckHCLKeys(node ast.Node, valid []string) error {
return result
}
// FormatFloat converts the floating-point number f to a string,
// after rounding it to the passed unit.
//
// Uses 'f' format (-ddd.dddddd, no exponent), and uses at most
// maxPrec digits after the decimal point.
func FormatFloat(f float64, maxPrec int) string {
v := strconv.FormatFloat(f, 'f', -1, 64)
idx := strings.LastIndex(v, ".")
if idx == -1 {
return v
}
sublen := idx + maxPrec + 1
if sublen > len(v) {
sublen = len(v)
}
return v[:sublen]
}

View file

@ -4,8 +4,6 @@ import (
"reflect"
"sort"
"testing"
"github.com/stretchr/testify/require"
)
func TestSliceStringIsSubset(t *testing.T) {
@ -89,35 +87,3 @@ func BenchmarkCleanEnvVar(b *testing.B) {
CleanEnvVar(in, replacement)
}
}
func TestFormatRoundedFloat(t *testing.T) {
cases := []struct {
input float64
expected string
}{
{
1323,
"1323",
},
{
10.321,
"10.321",
},
{
100000.31324324,
"100000.313",
},
{
100000.3,
"100000.3",
},
{
0.7654321,
"0.765",
},
}
for _, c := range cases {
require.Equal(t, c.expected, FormatFloat(c.input, 3))
}
}

View file

@ -4017,6 +4017,9 @@ func (d *DispatchPayloadConfig) Validate() error {
}
var (
// These default restart policies needs to be in sync with
// Canonicalize in api/tasks.go
DefaultServiceJobRestartPolicy = RestartPolicy{
Delay: 15 * time.Second,
Attempts: 2,
@ -4032,6 +4035,9 @@ var (
)
var (
// These default reschedule policies needs to be in sync with
// NewDefaultReschedulePolicy in api/tasks.go
DefaultServiceJobReschedulePolicy = ReschedulePolicy{
Delay: 30 * time.Second,
DelayFunction: "exponential",