2015-06-05 22:21:17 +00:00
package structs
import (
2017-01-14 00:46:08 +00:00
"fmt"
2017-11-13 17:14:57 +00:00
"os"
2015-06-05 22:21:17 +00:00
"reflect"
2015-09-15 18:23:03 +00:00
"strings"
2015-06-05 22:21:17 +00:00
"testing"
2015-11-02 21:24:59 +00:00
"time"
2015-12-01 00:51:56 +00:00
2016-08-16 19:05:15 +00:00
"github.com/hashicorp/consul/api"
2019-03-05 21:41:41 +00:00
"github.com/hashicorp/go-multierror"
2017-09-29 16:58:48 +00:00
"github.com/hashicorp/nomad/helper/uuid"
2020-03-22 11:54:04 +00:00
2019-02-20 16:41:51 +00:00
"github.com/kr/pretty"
2017-08-12 21:36:10 +00:00
"github.com/stretchr/testify/assert"
2018-02-27 17:21:06 +00:00
"github.com/stretchr/testify/require"
2015-06-05 22:21:17 +00:00
)
2015-09-15 18:23:03 +00:00
func TestJob_Validate ( t * testing . T ) {
j := & Job { }
err := j . Validate ( )
2021-01-21 19:53:02 +00:00
requireErrors ( t , err ,
"datacenters" ,
"job ID" ,
"job name" ,
"job region" ,
"job type" ,
"namespace" ,
"priority" ,
"task groups" ,
)
2015-09-15 18:23:03 +00:00
2017-07-07 22:34:26 +00:00
j = & Job {
Type : "invalid-job-type" ,
}
err = j . Validate ( )
if expected := ` Invalid job type: "invalid-job-type" ` ; ! strings . Contains ( err . Error ( ) , expected ) {
t . Errorf ( "expected %s but found: %v" , expected , err )
}
2015-12-01 00:51:56 +00:00
j = & Job {
Type : JobTypeService ,
2015-12-01 16:40:32 +00:00
Periodic : & PeriodicConfig {
2015-12-01 00:51:56 +00:00
Enabled : true ,
} ,
}
err = j . Validate ( )
2021-01-21 19:53:02 +00:00
require . Error ( t , err , "Periodic" )
2015-12-01 00:51:56 +00:00
2015-09-15 18:23:03 +00:00
j = & Job {
Region : "global" ,
2017-09-29 16:58:48 +00:00
ID : uuid . Generate ( ) ,
2017-09-07 23:56:15 +00:00
Namespace : "test" ,
2015-09-15 18:23:03 +00:00
Name : "my-job" ,
Type : JobTypeService ,
Priority : 50 ,
Datacenters : [ ] string { "dc1" } ,
TaskGroups : [ ] * TaskGroup {
2017-09-26 22:26:33 +00:00
{
2015-09-15 18:23:03 +00:00
Name : "web" ,
2015-11-02 21:24:59 +00:00
RestartPolicy : & RestartPolicy {
Interval : 5 * time . Minute ,
Delay : 10 * time . Second ,
Attempts : 10 ,
} ,
2015-09-15 18:23:03 +00:00
} ,
2017-09-26 22:26:33 +00:00
{
2015-09-15 18:23:03 +00:00
Name : "web" ,
2015-11-02 21:24:59 +00:00
RestartPolicy : & RestartPolicy {
Interval : 5 * time . Minute ,
Delay : 10 * time . Second ,
Attempts : 10 ,
} ,
} ,
2017-09-26 22:26:33 +00:00
{
2015-11-02 21:24:59 +00:00
RestartPolicy : & RestartPolicy {
Interval : 5 * time . Minute ,
Delay : 10 * time . Second ,
Attempts : 10 ,
} ,
2015-09-15 18:23:03 +00:00
} ,
} ,
}
err = j . Validate ( )
2021-01-21 19:53:02 +00:00
requireErrors ( t , err ,
"2 redefines 'web' from group 1" ,
"group 3 missing name" ,
"Task group web validation failed" ,
)
2019-05-08 18:51:52 +00:00
// test for empty datacenters
j = & Job {
Datacenters : [ ] string { "" } ,
}
err = j . Validate ( )
2021-01-21 19:53:02 +00:00
require . Error ( t , err , "datacenter must be non-empty string" )
2015-09-15 18:23:03 +00:00
}
2020-03-22 11:54:04 +00:00
func TestJob_ValidateScaling ( t * testing . T ) {
require := require . New ( t )
p := & ScalingPolicy {
Policy : nil , // allowed to be nil
2020-09-29 21:57:46 +00:00
Type : ScalingPolicyTypeHorizontal ,
2020-03-22 11:54:04 +00:00
Min : 5 ,
Max : 5 ,
Enabled : true ,
}
job := testJob ( )
job . TaskGroups [ 0 ] . Scaling = p
job . TaskGroups [ 0 ] . Count = 5
require . NoError ( job . Validate ( ) )
// min <= max
p . Max = 0
p . Min = 10
err := job . Validate ( )
2021-01-21 19:53:02 +00:00
requireErrors ( t , err ,
"task group count must not be less than minimum count in scaling policy" ,
"task group count must not be greater than maximum count in scaling policy" ,
)
2020-03-22 11:54:04 +00:00
// count <= max
p . Max = 0
p . Min = 5
job . TaskGroups [ 0 ] . Count = 5
err = job . Validate ( )
2021-01-21 19:53:02 +00:00
require . Error ( err ,
"task group count must not be greater than maximum count in scaling policy" ,
)
2020-03-22 11:54:04 +00:00
// min <= count
job . TaskGroups [ 0 ] . Count = 0
p . Min = 5
p . Max = 5
err = job . Validate ( )
2021-01-21 19:53:02 +00:00
require . Error ( err ,
"task group count must not be less than minimum count in scaling policy" ,
)
2020-03-22 11:54:04 +00:00
}
2020-10-03 21:46:58 +00:00
func TestJob_ValidateNullChar ( t * testing . T ) {
assert := assert . New ( t )
// job id should not allow null characters
job := testJob ( )
job . ID = "id_with\000null_character"
assert . Error ( job . Validate ( ) , "null character in job ID should not validate" )
2020-10-05 14:52:07 +00:00
// job name should not allow null characters
2020-10-03 21:46:58 +00:00
job . ID = "happy_little_job_id"
2020-10-05 14:52:07 +00:00
job . Name = "my job name with \000 characters"
assert . Error ( job . Validate ( ) , "null character in job name should not validate" )
// task group name should not allow null characters
job . Name = "my job"
2020-10-03 21:46:58 +00:00
job . TaskGroups [ 0 ] . Name = "oh_no_another_\000_char"
assert . Error ( job . Validate ( ) , "null character in task group name should not validate" )
// task name should not allow null characters
job . TaskGroups [ 0 ] . Name = "so_much_better"
2020-10-03 22:07:18 +00:00
job . TaskGroups [ 0 ] . Tasks [ 0 ] . Name = "ive_had_it_with_these_\000_chars_in_these_names"
2020-10-03 21:46:58 +00:00
assert . Error ( job . Validate ( ) , "null character in task name should not validate" )
}
2017-05-10 03:52:47 +00:00
func TestJob_Warnings ( t * testing . T ) {
cases := [ ] struct {
Name string
Job * Job
Expected [ ] string
2017-07-13 20:10:45 +00:00
} {
{
Name : "Higher counts for update stanza" ,
Expected : [ ] string { "max parallel count is greater" } ,
Job : & Job {
Type : JobTypeService ,
TaskGroups : [ ] * TaskGroup {
{
Name : "foo" ,
Count : 2 ,
Update : & UpdateStrategy {
MaxParallel : 10 ,
} ,
} ,
} ,
} ,
} ,
2019-05-21 20:52:43 +00:00
{
Name : "AutoPromote mixed TaskGroups" ,
Expected : [ ] string { "auto_promote must be true for all groups" } ,
Job : & Job {
Type : JobTypeService ,
TaskGroups : [ ] * TaskGroup {
{
Update : & UpdateStrategy {
AutoPromote : true ,
} ,
} ,
{
Update : & UpdateStrategy {
AutoPromote : false ,
} ,
} ,
} ,
} ,
} ,
2020-03-10 21:55:16 +00:00
{
Name : "Template.VaultGrace Deprecated" ,
Expected : [ ] string { "VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza." } ,
Job : & Job {
Type : JobTypeService ,
TaskGroups : [ ] * TaskGroup {
{
Tasks : [ ] * Task {
{
Templates : [ ] * Template {
{
VaultGrace : 1 ,
} ,
} ,
} ,
} ,
} ,
} ,
} ,
} ,
2017-07-13 20:10:45 +00:00
}
2017-05-10 03:52:47 +00:00
for _ , c := range cases {
t . Run ( c . Name , func ( t * testing . T ) {
warnings := c . Job . Warnings ( )
if warnings == nil {
if len ( c . Expected ) == 0 {
return
}
2021-01-21 19:53:02 +00:00
t . Fatal ( "Got no warnings when they were expected" )
2017-05-10 03:52:47 +00:00
}
a := warnings . Error ( )
for _ , e := range c . Expected {
if ! strings . Contains ( a , e ) {
t . Fatalf ( "Got warnings %q; didn't contain %q" , a , e )
}
}
} )
}
}
2017-05-23 00:02:20 +00:00
func TestJob_SpecChanged ( t * testing . T ) {
// Get a base test job
base := testJob ( )
// Only modify the indexes/mutable state of the job
mutatedBase := base . Copy ( )
mutatedBase . Status = "foo"
mutatedBase . ModifyIndex = base . ModifyIndex + 100
// changed contains a spec change that should be detected
change := base . Copy ( )
change . Priority = 99
cases := [ ] struct {
Name string
Original * Job
New * Job
Changed bool
} {
{
Name : "Same job except mutable indexes" ,
Changed : false ,
Original : base ,
New : mutatedBase ,
} ,
{
Name : "Different" ,
Changed : true ,
Original : base ,
New : change ,
} ,
}
for _ , c := range cases {
t . Run ( c . Name , func ( t * testing . T ) {
if actual := c . Original . SpecChanged ( c . New ) ; actual != c . Changed {
t . Fatalf ( "SpecChanged() returned %v; want %v" , actual , c . Changed )
}
} )
}
}
2016-02-21 01:43:17 +00:00
func testJob ( ) * Job {
return & Job {
2016-02-21 01:36:39 +00:00
Region : "global" ,
2017-09-29 16:58:48 +00:00
ID : uuid . Generate ( ) ,
2017-09-07 23:56:15 +00:00
Namespace : "test" ,
2016-02-21 01:36:39 +00:00
Name : "my-job" ,
Type : JobTypeService ,
Priority : 50 ,
AllAtOnce : false ,
Datacenters : [ ] string { "dc1" } ,
Constraints : [ ] * Constraint {
2017-09-26 22:26:33 +00:00
{
2016-02-21 01:36:39 +00:00
LTarget : "$attr.kernel.name" ,
RTarget : "linux" ,
Operand : "=" ,
} ,
} ,
Periodic : & PeriodicConfig {
Enabled : false ,
} ,
TaskGroups : [ ] * TaskGroup {
2017-09-26 22:26:33 +00:00
{
2016-09-14 22:43:42 +00:00
Name : "web" ,
Count : 10 ,
EphemeralDisk : DefaultEphemeralDisk ( ) ,
2016-02-21 01:36:39 +00:00
RestartPolicy : & RestartPolicy {
2016-07-13 19:50:08 +00:00
Mode : RestartPolicyModeFail ,
2016-02-21 01:36:39 +00:00
Attempts : 3 ,
Interval : 10 * time . Minute ,
Delay : 1 * time . Minute ,
} ,
2018-01-14 15:03:08 +00:00
ReschedulePolicy : & ReschedulePolicy {
2018-02-22 23:43:07 +00:00
Interval : 5 * time . Minute ,
Attempts : 10 ,
Delay : 5 * time . Second ,
2018-03-26 19:45:09 +00:00
DelayFunction : "constant" ,
2018-01-14 15:03:08 +00:00
} ,
2020-08-29 01:40:53 +00:00
Networks : [ ] * NetworkResource {
{
DynamicPorts : [ ] Port {
{ Label : "http" } ,
} ,
} ,
} ,
Services : [ ] * Service {
{
Name : "${TASK}-frontend" ,
PortLabel : "http" ,
} ,
} ,
2016-02-21 01:36:39 +00:00
Tasks : [ ] * Task {
2017-09-26 22:26:33 +00:00
{
2016-02-21 01:36:39 +00:00
Name : "web" ,
Driver : "exec" ,
Config : map [ string ] interface { } {
"command" : "/bin/date" ,
} ,
Env : map [ string ] string {
"FOO" : "bar" ,
} ,
2016-03-16 03:21:52 +00:00
Artifacts : [ ] * TaskArtifact {
{
GetterSource : "http://foo.com" ,
} ,
} ,
2016-02-21 01:36:39 +00:00
Resources : & Resources {
CPU : 500 ,
MemoryMB : 256 ,
} ,
2016-07-13 19:50:08 +00:00
LogConfig : & LogConfig {
MaxFiles : 10 ,
MaxFileSizeMB : 1 ,
} ,
2016-02-21 01:36:39 +00:00
} ,
} ,
Meta : map [ string ] string {
"elb_check_type" : "http" ,
"elb_check_interval" : "30s" ,
"elb_check_min" : "3" ,
} ,
} ,
} ,
Meta : map [ string ] string {
"owner" : "armon" ,
} ,
}
2016-02-21 01:43:17 +00:00
}
2016-02-21 01:36:39 +00:00
2016-02-21 01:43:17 +00:00
func TestJob_Copy ( t * testing . T ) {
j := testJob ( )
2015-12-18 20:26:28 +00:00
c := j . Copy ( )
if ! reflect . DeepEqual ( j , c ) {
2016-02-11 17:08:20 +00:00
t . Fatalf ( "Copy() returned an unequal Job; got %#v; want %#v" , c , j )
2015-12-18 20:26:28 +00:00
}
}
2015-12-01 16:40:32 +00:00
func TestJob_IsPeriodic ( t * testing . T ) {
j := & Job {
Type : JobTypeService ,
Periodic : & PeriodicConfig {
Enabled : true ,
} ,
}
if ! j . IsPeriodic ( ) {
t . Fatalf ( "IsPeriodic() returned false on periodic job" )
}
j = & Job {
Type : JobTypeService ,
}
if j . IsPeriodic ( ) {
t . Fatalf ( "IsPeriodic() returned true on non-periodic job" )
}
}
2017-12-11 21:55:17 +00:00
func TestJob_IsPeriodicActive ( t * testing . T ) {
cases := [ ] struct {
job * Job
active bool
} {
{
job : & Job {
Type : JobTypeService ,
Periodic : & PeriodicConfig {
Enabled : true ,
} ,
} ,
active : true ,
} ,
{
job : & Job {
Type : JobTypeService ,
Periodic : & PeriodicConfig {
Enabled : false ,
} ,
} ,
active : false ,
} ,
{
job : & Job {
Type : JobTypeService ,
Periodic : & PeriodicConfig {
Enabled : true ,
} ,
Stop : true ,
} ,
active : false ,
} ,
{
job : & Job {
Type : JobTypeService ,
Periodic : & PeriodicConfig {
Enabled : false ,
} ,
ParameterizedJob : & ParameterizedJobConfig { } ,
} ,
active : false ,
} ,
}
for i , c := range cases {
if act := c . job . IsPeriodicActive ( ) ; act != c . active {
t . Fatalf ( "case %d failed: got %v; want %v" , i , act , c . active )
}
}
}
2016-07-13 19:50:08 +00:00
func TestJob_SystemJob_Validate ( t * testing . T ) {
j := testJob ( )
j . Type = JobTypeSystem
2018-04-11 20:49:23 +00:00
j . TaskGroups [ 0 ] . ReschedulePolicy = nil
2016-07-20 23:07:15 +00:00
j . Canonicalize ( )
2016-07-13 19:50:08 +00:00
err := j . Validate ( )
if err == nil || ! strings . Contains ( err . Error ( ) , "exceed" ) {
t . Fatalf ( "expect error due to count" )
}
j . TaskGroups [ 0 ] . Count = 0
if err := j . Validate ( ) ; err != nil {
t . Fatalf ( "unexpected err: %v" , err )
}
j . TaskGroups [ 0 ] . Count = 1
if err := j . Validate ( ) ; err != nil {
t . Fatalf ( "unexpected err: %v" , err )
}
2018-07-23 16:06:49 +00:00
// Add affinities at job, task group and task level, that should fail validation
j . Affinities = [ ] * Affinity { {
Operand : "=" ,
LTarget : "${node.datacenter}" ,
RTarget : "dc1" ,
} }
j . TaskGroups [ 0 ] . Affinities = [ ] * Affinity { {
Operand : "=" ,
LTarget : "${meta.rack}" ,
RTarget : "r1" ,
} }
j . TaskGroups [ 0 ] . Tasks [ 0 ] . Affinities = [ ] * Affinity { {
Operand : "=" ,
LTarget : "${meta.rack}" ,
RTarget : "r1" ,
} }
err = j . Validate ( )
require . NotNil ( t , err )
2018-07-24 15:37:13 +00:00
require . Contains ( t , err . Error ( ) , "System jobs may not have an affinity stanza" )
2018-07-26 00:08:25 +00:00
// Add spread at job and task group level, that should fail validation
j . Spreads = [ ] * Spread { {
Attribute : "${node.datacenter}" ,
Weight : 100 ,
} }
j . TaskGroups [ 0 ] . Spreads = [ ] * Spread { {
Attribute : "${node.datacenter}" ,
Weight : 100 ,
} }
err = j . Validate ( )
require . NotNil ( t , err )
require . Contains ( t , err . Error ( ) , "System jobs may not have a spread stanza" )
2016-07-13 19:50:08 +00:00
}
2016-08-17 00:50:14 +00:00
func TestJob_VaultPolicies ( t * testing . T ) {
j0 := & Job { }
2016-08-18 17:50:47 +00:00
e0 := make ( map [ string ] map [ string ] * Vault , 0 )
2016-08-17 00:50:14 +00:00
2016-08-18 17:50:47 +00:00
vj1 := & Vault {
Policies : [ ] string {
"p1" ,
"p2" ,
} ,
}
vj2 := & Vault {
Policies : [ ] string {
"p3" ,
"p4" ,
} ,
}
vj3 := & Vault {
Policies : [ ] string {
"p5" ,
} ,
}
2016-08-17 00:50:14 +00:00
j1 := & Job {
TaskGroups : [ ] * TaskGroup {
2017-09-26 22:26:33 +00:00
{
2016-08-17 00:50:14 +00:00
Name : "foo" ,
Tasks : [ ] * Task {
2017-09-26 22:26:33 +00:00
{
2016-08-17 00:50:14 +00:00
Name : "t1" ,
} ,
2017-09-26 22:26:33 +00:00
{
2016-08-18 17:50:47 +00:00
Name : "t2" ,
Vault : vj1 ,
2016-08-17 00:50:14 +00:00
} ,
} ,
} ,
2017-09-26 22:26:33 +00:00
{
2016-08-17 00:50:14 +00:00
Name : "bar" ,
Tasks : [ ] * Task {
2017-09-26 22:26:33 +00:00
{
2016-08-18 17:50:47 +00:00
Name : "t3" ,
Vault : vj2 ,
2016-08-17 00:50:14 +00:00
} ,
2017-09-26 22:26:33 +00:00
{
2016-08-18 17:50:47 +00:00
Name : "t4" ,
Vault : vj3 ,
2016-08-17 00:50:14 +00:00
} ,
} ,
} ,
} ,
}
2016-08-18 17:50:47 +00:00
e1 := map [ string ] map [ string ] * Vault {
2017-09-26 22:26:33 +00:00
"foo" : {
2016-08-18 17:50:47 +00:00
"t2" : vj1 ,
2016-08-17 00:50:14 +00:00
} ,
2017-09-26 22:26:33 +00:00
"bar" : {
2016-08-18 17:50:47 +00:00
"t3" : vj2 ,
"t4" : vj3 ,
2016-08-17 00:50:14 +00:00
} ,
}
cases := [ ] struct {
Job * Job
2016-08-18 17:50:47 +00:00
Expected map [ string ] map [ string ] * Vault
2016-08-17 00:50:14 +00:00
} {
{
Job : j0 ,
Expected : e0 ,
} ,
{
Job : j1 ,
Expected : e1 ,
} ,
}
for i , c := range cases {
got := c . Job . VaultPolicies ( )
if ! reflect . DeepEqual ( got , c . Expected ) {
t . Fatalf ( "case %d: got %#v; want %#v" , i + 1 , got , c . Expected )
}
}
}
2019-12-06 20:46:46 +00:00
func TestJob_ConnectTasks ( t * testing . T ) {
t . Parallel ( )
r := require . New ( t )
j0 := & Job {
TaskGroups : [ ] * TaskGroup { {
Name : "tg1" ,
Tasks : [ ] * Task { {
Name : "connect-proxy-task1" ,
Kind : "connect-proxy:task1" ,
} , {
Name : "task2" ,
Kind : "task2" ,
} , {
Name : "connect-proxy-task3" ,
Kind : "connect-proxy:task3" ,
} } ,
} , {
Name : "tg2" ,
Tasks : [ ] * Task { {
Name : "task1" ,
Kind : "task1" ,
} , {
Name : "connect-proxy-task2" ,
Kind : "connect-proxy:task2" ,
} } ,
2020-08-27 16:53:41 +00:00
} , {
Name : "tg3" ,
Tasks : [ ] * Task { {
Name : "ingress" ,
Kind : "connect-ingress:ingress" ,
} } ,
} , {
Name : "tg4" ,
Tasks : [ ] * Task { {
Name : "frontend" ,
Kind : "connect-native:uuid-fe" ,
} , {
Name : "generator" ,
Kind : "connect-native:uuid-api" ,
} } ,
2020-12-15 20:38:33 +00:00
} , {
Name : "tg5" ,
Tasks : [ ] * Task { {
Name : "t1000" ,
Kind : "connect-terminating:t1000" ,
} } ,
2019-12-06 20:46:46 +00:00
} } ,
}
connectTasks := j0 . ConnectTasks ( )
2020-08-27 16:53:41 +00:00
exp := [ ] TaskKind {
NewTaskKind ( ConnectProxyPrefix , "task1" ) ,
NewTaskKind ( ConnectProxyPrefix , "task3" ) ,
NewTaskKind ( ConnectProxyPrefix , "task2" ) ,
NewTaskKind ( ConnectIngressPrefix , "ingress" ) ,
NewTaskKind ( ConnectNativePrefix , "uuid-fe" ) ,
NewTaskKind ( ConnectNativePrefix , "uuid-api" ) ,
2020-12-15 20:38:33 +00:00
NewTaskKind ( ConnectTerminatingPrefix , "t1000" ) ,
2019-12-06 20:46:46 +00:00
}
2020-08-27 16:53:41 +00:00
2019-12-06 20:46:46 +00:00
r . Equal ( exp , connectTasks )
}
2016-10-20 20:55:35 +00:00
func TestJob_RequiredSignals ( t * testing . T ) {
j0 := & Job { }
e0 := make ( map [ string ] map [ string ] [ ] string , 0 )
vj1 := & Vault {
Policies : [ ] string { "p1" } ,
ChangeMode : VaultChangeModeNoop ,
}
vj2 := & Vault {
Policies : [ ] string { "p1" } ,
ChangeMode : VaultChangeModeSignal ,
ChangeSignal : "SIGUSR1" ,
}
tj1 := & Template {
SourcePath : "foo" ,
DestPath : "bar" ,
ChangeMode : TemplateChangeModeNoop ,
}
tj2 := & Template {
SourcePath : "foo" ,
DestPath : "bar" ,
ChangeMode : TemplateChangeModeSignal ,
ChangeSignal : "SIGUSR2" ,
}
j1 := & Job {
TaskGroups : [ ] * TaskGroup {
2017-09-26 22:26:33 +00:00
{
2016-10-20 20:55:35 +00:00
Name : "foo" ,
Tasks : [ ] * Task {
2017-09-26 22:26:33 +00:00
{
2016-10-20 20:55:35 +00:00
Name : "t1" ,
} ,
2017-09-26 22:26:33 +00:00
{
2016-10-20 20:55:35 +00:00
Name : "t2" ,
Vault : vj2 ,
Templates : [ ] * Template { tj2 } ,
} ,
} ,
} ,
2017-09-26 22:26:33 +00:00
{
2016-10-20 20:55:35 +00:00
Name : "bar" ,
Tasks : [ ] * Task {
2017-09-26 22:26:33 +00:00
{
2016-10-20 20:55:35 +00:00
Name : "t3" ,
Vault : vj1 ,
Templates : [ ] * Template { tj1 } ,
} ,
2017-09-26 22:26:33 +00:00
{
2016-10-20 20:55:35 +00:00
Name : "t4" ,
Vault : vj2 ,
} ,
} ,
} ,
} ,
}
e1 := map [ string ] map [ string ] [ ] string {
2017-09-26 22:26:33 +00:00
"foo" : {
"t2" : { "SIGUSR1" , "SIGUSR2" } ,
2016-10-20 20:55:35 +00:00
} ,
2017-09-26 22:26:33 +00:00
"bar" : {
"t4" : { "SIGUSR1" } ,
2016-10-20 20:55:35 +00:00
} ,
}
2017-12-07 15:45:21 +00:00
j2 := & Job {
TaskGroups : [ ] * TaskGroup {
{
Name : "foo" ,
Tasks : [ ] * Task {
{
Name : "t1" ,
KillSignal : "SIGQUIT" ,
} ,
} ,
} ,
} ,
}
e2 := map [ string ] map [ string ] [ ] string {
"foo" : {
"t1" : { "SIGQUIT" } ,
} ,
}
2016-10-20 20:55:35 +00:00
cases := [ ] struct {
Job * Job
Expected map [ string ] map [ string ] [ ] string
} {
{
Job : j0 ,
Expected : e0 ,
} ,
{
Job : j1 ,
Expected : e1 ,
} ,
2017-12-07 15:45:21 +00:00
{
Job : j2 ,
Expected : e2 ,
} ,
2016-10-20 20:55:35 +00:00
}
for i , c := range cases {
got := c . Job . RequiredSignals ( )
if ! reflect . DeepEqual ( got , c . Expected ) {
t . Fatalf ( "case %d: got %#v; want %#v" , i + 1 , got , c . Expected )
}
}
}
2019-04-19 19:49:24 +00:00
// test new Equal comparisons for components of Jobs
func TestJob_PartEqual ( t * testing . T ) {
ns := & Networks { }
2019-04-23 21:00:11 +00:00
require . True ( t , ns . Equals ( & Networks { } ) )
2019-04-19 19:49:24 +00:00
ns = & Networks {
& NetworkResource { Device : "eth0" } ,
}
2019-04-23 21:00:11 +00:00
require . True ( t , ns . Equals ( & Networks {
2019-04-19 19:49:24 +00:00
& NetworkResource { Device : "eth0" } ,
2019-04-23 21:00:11 +00:00
} ) )
2019-04-19 19:49:24 +00:00
ns = & Networks {
& NetworkResource { Device : "eth0" } ,
& NetworkResource { Device : "eth1" } ,
& NetworkResource { Device : "eth2" } ,
}
2019-04-23 21:00:11 +00:00
require . True ( t , ns . Equals ( & Networks {
2019-04-19 19:49:24 +00:00
& NetworkResource { Device : "eth2" } ,
& NetworkResource { Device : "eth0" } ,
& NetworkResource { Device : "eth1" } ,
2019-04-23 21:00:11 +00:00
} ) )
2019-04-19 19:49:24 +00:00
cs := & Constraints {
& Constraint { "left0" , "right0" , "=" , "" } ,
& Constraint { "left1" , "right1" , "=" , "" } ,
& Constraint { "left2" , "right2" , "=" , "" } ,
}
2019-04-23 21:00:11 +00:00
require . True ( t , cs . Equals ( & Constraints {
2019-04-19 19:49:24 +00:00
& Constraint { "left0" , "right0" , "=" , "" } ,
& Constraint { "left2" , "right2" , "=" , "" } ,
& Constraint { "left1" , "right1" , "=" , "" } ,
2019-04-23 21:00:11 +00:00
} ) )
2019-04-19 19:49:24 +00:00
as := & Affinities {
& Affinity { "left0" , "right0" , "=" , 0 , "" } ,
& Affinity { "left1" , "right1" , "=" , 0 , "" } ,
& Affinity { "left2" , "right2" , "=" , 0 , "" } ,
}
2019-04-23 21:00:11 +00:00
require . True ( t , as . Equals ( & Affinities {
2019-04-19 19:49:24 +00:00
& Affinity { "left0" , "right0" , "=" , 0 , "" } ,
& Affinity { "left2" , "right2" , "=" , 0 , "" } ,
& Affinity { "left1" , "right1" , "=" , 0 , "" } ,
2019-04-23 21:00:11 +00:00
} ) )
2019-04-19 19:49:24 +00:00
}
2019-12-06 20:46:46 +00:00
func TestTask_UsesConnect ( t * testing . T ) {
t . Parallel ( )
t . Run ( "normal task" , func ( t * testing . T ) {
task := testJob ( ) . TaskGroups [ 0 ] . Tasks [ 0 ]
usesConnect := task . UsesConnect ( )
require . False ( t , usesConnect )
} )
t . Run ( "sidecar proxy" , func ( t * testing . T ) {
task := & Task {
Name : "connect-proxy-task1" ,
2020-05-13 20:15:55 +00:00
Kind : NewTaskKind ( ConnectProxyPrefix , "task1" ) ,
2019-12-06 20:46:46 +00:00
}
usesConnect := task . UsesConnect ( )
require . True ( t , usesConnect )
} )
2020-05-13 20:15:55 +00:00
t . Run ( "native task" , func ( t * testing . T ) {
task := & Task {
Name : "task1" ,
Kind : NewTaskKind ( ConnectNativePrefix , "task1" ) ,
}
usesConnect := task . UsesConnect ( )
require . True ( t , usesConnect )
} )
2020-07-28 20:12:08 +00:00
t . Run ( "ingress gateway" , func ( t * testing . T ) {
task := & Task {
Name : "task1" ,
Kind : NewTaskKind ( ConnectIngressPrefix , "task1" ) ,
}
usesConnect := task . UsesConnect ( )
require . True ( t , usesConnect )
} )
2020-12-15 20:38:33 +00:00
t . Run ( "terminating gateway" , func ( t * testing . T ) {
task := & Task {
Name : "task1" ,
Kind : NewTaskKind ( ConnectTerminatingPrefix , "task1" ) ,
}
usesConnect := task . UsesConnect ( )
require . True ( t , usesConnect )
} )
2019-12-06 20:46:46 +00:00
}
2020-01-30 16:49:07 +00:00
func TestTaskGroup_UsesConnect ( t * testing . T ) {
t . Parallel ( )
try := func ( t * testing . T , tg * TaskGroup , exp bool ) {
result := tg . UsesConnect ( )
require . Equal ( t , exp , result )
}
t . Run ( "tg uses native" , func ( t * testing . T ) {
try ( t , & TaskGroup {
Services : [ ] * Service {
{ Connect : nil } ,
2020-06-22 17:55:59 +00:00
{ Connect : & ConsulConnect { Native : true } } ,
2020-01-30 16:49:07 +00:00
} ,
} , true )
} )
t . Run ( "tg uses sidecar" , func ( t * testing . T ) {
try ( t , & TaskGroup {
Services : [ ] * Service { {
Connect : & ConsulConnect {
SidecarService : & ConsulSidecarService {
Port : "9090" ,
} ,
} ,
} } ,
} , true )
} )
2020-07-28 20:12:08 +00:00
t . Run ( "tg uses gateway" , func ( t * testing . T ) {
try ( t , & TaskGroup {
Services : [ ] * Service { {
Connect : & ConsulConnect {
Gateway : consulIngressGateway1 ,
} ,
} } ,
} , true )
} )
2020-01-30 16:49:07 +00:00
t . Run ( "tg does not use connect" , func ( t * testing . T ) {
try ( t , & TaskGroup {
Services : [ ] * Service {
{ Connect : nil } ,
} ,
} , false )
} )
}
2015-09-15 18:23:03 +00:00
func TestTaskGroup_Validate ( t * testing . T ) {
2017-07-07 02:08:51 +00:00
j := testJob ( )
2015-11-02 21:24:59 +00:00
tg := & TaskGroup {
2016-03-17 18:29:41 +00:00
Count : - 1 ,
2015-11-02 21:24:59 +00:00
RestartPolicy : & RestartPolicy {
2016-02-02 23:08:07 +00:00
Interval : 5 * time . Minute ,
Delay : 10 * time . Second ,
Attempts : 10 ,
Mode : RestartPolicyModeDelay ,
2015-11-02 21:24:59 +00:00
} ,
2018-01-14 15:03:08 +00:00
ReschedulePolicy : & ReschedulePolicy {
Interval : 5 * time . Minute ,
Attempts : 5 ,
2018-02-22 23:43:07 +00:00
Delay : 5 * time . Second ,
2018-01-14 15:03:08 +00:00
} ,
2015-11-02 21:24:59 +00:00
}
2017-07-07 02:08:51 +00:00
err := tg . Validate ( j )
2021-01-21 19:53:02 +00:00
requireErrors ( t , err ,
"group name" ,
"count can't be negative" ,
"Missing tasks" ,
)
2015-09-15 18:23:03 +00:00
2017-07-07 23:17:05 +00:00
tg = & TaskGroup {
Tasks : [ ] * Task {
2017-09-26 22:26:33 +00:00
{
2017-07-07 23:17:05 +00:00
Name : "task-a" ,
Resources : & Resources {
Networks : [ ] * NetworkResource {
2017-09-26 22:26:33 +00:00
{
2017-07-07 23:17:05 +00:00
ReservedPorts : [ ] Port { { Label : "foo" , Value : 123 } } ,
} ,
} ,
} ,
} ,
2017-09-26 22:26:33 +00:00
{
2017-07-07 23:17:05 +00:00
Name : "task-b" ,
Resources : & Resources {
Networks : [ ] * NetworkResource {
2017-09-26 22:26:33 +00:00
{
2017-07-07 23:17:05 +00:00
ReservedPorts : [ ] Port { { Label : "foo" , Value : 123 } } ,
} ,
} ,
} ,
} ,
} ,
}
err = tg . Validate ( & Job { } )
2017-07-07 23:58:20 +00:00
expected := ` Static port 123 already reserved by task-a:foo `
if ! strings . Contains ( err . Error ( ) , expected ) {
t . Errorf ( "expected %s but found: %v" , expected , err )
}
tg = & TaskGroup {
Tasks : [ ] * Task {
2017-09-26 22:26:33 +00:00
{
2017-07-07 23:58:20 +00:00
Name : "task-a" ,
Resources : & Resources {
Networks : [ ] * NetworkResource {
2017-09-26 22:26:33 +00:00
{
2017-07-07 23:58:20 +00:00
ReservedPorts : [ ] Port {
{ Label : "foo" , Value : 123 } ,
{ Label : "bar" , Value : 123 } ,
} ,
} ,
} ,
} ,
} ,
} ,
}
err = tg . Validate ( & Job { } )
expected = ` Static port 123 already reserved by task-a:foo `
2017-07-07 23:17:05 +00:00
if ! strings . Contains ( err . Error ( ) , expected ) {
t . Errorf ( "expected %s but found: %v" , expected , err )
}
2015-09-15 18:23:03 +00:00
tg = & TaskGroup {
Name : "web" ,
Count : 1 ,
Tasks : [ ] * Task {
2017-09-26 22:26:33 +00:00
{ Name : "web" , Leader : true } ,
{ Name : "web" , Leader : true } ,
{ } ,
2015-09-15 18:23:03 +00:00
} ,
2015-11-02 21:24:59 +00:00
RestartPolicy : & RestartPolicy {
2016-02-02 23:08:07 +00:00
Interval : 5 * time . Minute ,
Delay : 10 * time . Second ,
Attempts : 10 ,
Mode : RestartPolicyModeDelay ,
2015-11-02 21:24:59 +00:00
} ,
2018-01-14 15:03:08 +00:00
ReschedulePolicy : & ReschedulePolicy {
2018-02-22 23:43:07 +00:00
Interval : 5 * time . Minute ,
Attempts : 10 ,
Delay : 5 * time . Second ,
2018-03-26 19:45:09 +00:00
DelayFunction : "constant" ,
2018-01-14 15:03:08 +00:00
} ,
2015-09-15 18:23:03 +00:00
}
2016-07-20 23:43:20 +00:00
2017-07-07 02:08:51 +00:00
err = tg . Validate ( j )
2021-01-21 19:53:02 +00:00
requireErrors ( t , err ,
"should have an ephemeral disk object" ,
"2 redefines 'web' from task 1" ,
"Task 3 missing name" ,
"Only one task may be marked as leader" ,
"Task web validation failed" ,
)
2017-07-07 02:08:51 +00:00
2018-03-29 18:13:50 +00:00
tg = & TaskGroup {
Name : "web" ,
Count : 1 ,
Tasks : [ ] * Task {
{ Name : "web" , Leader : true } ,
} ,
Update : DefaultUpdateStrategy . Copy ( ) ,
}
j . Type = JobTypeBatch
err = tg . Validate ( j )
2021-01-21 19:53:02 +00:00
require . Error ( t , err , "does not allow update block" )
2018-04-11 19:56:20 +00:00
tg = & TaskGroup {
Count : - 1 ,
RestartPolicy : & RestartPolicy {
Interval : 5 * time . Minute ,
Delay : 10 * time . Second ,
Attempts : 10 ,
Mode : RestartPolicyModeDelay ,
} ,
ReschedulePolicy : & ReschedulePolicy {
Interval : 5 * time . Minute ,
Attempts : 5 ,
Delay : 5 * time . Second ,
} ,
}
j . Type = JobTypeSystem
err = tg . Validate ( j )
if ! strings . Contains ( err . Error ( ) , "System jobs should not have a reschedule policy" ) {
t . Fatalf ( "err: %s" , err )
}
2019-05-03 14:26:26 +00:00
tg = & TaskGroup {
Networks : [ ] * NetworkResource {
{
2020-06-19 17:53:31 +00:00
DynamicPorts : [ ] Port { { "http" , 0 , 80 , "" } } ,
2019-05-03 14:26:26 +00:00
} ,
} ,
Tasks : [ ] * Task {
{
Resources : & Resources {
Networks : [ ] * NetworkResource {
{
2020-06-19 17:53:31 +00:00
DynamicPorts : [ ] Port { { "http" , 0 , 80 , "" } } ,
2019-05-03 14:26:26 +00:00
} ,
} ,
} ,
} ,
} ,
}
err = tg . Validate ( j )
require . Contains ( t , err . Error ( ) , "Port label http already in use" )
2019-07-25 14:44:08 +00:00
tg = & TaskGroup {
Volumes : map [ string ] * VolumeRequest {
"foo" : {
config: Hoist volume.config.source into volume
Currently, using a Volume in a job uses the following configuration:
```
volume "alias-name" {
type = "volume-type"
read_only = true
config {
source = "host_volume_name"
}
}
```
This commit migrates to the following:
```
volume "alias-name" {
type = "volume-type"
source = "host_volume_name"
read_only = true
}
```
The original design was based due to being uncertain about the future of storage
plugins, and to allow maxium flexibility.
However, this causes a few issues, namely:
- We frequently need to parse this configuration during submission,
scheduling, and mounting
- It complicates the configuration from and end users perspective
- It complicates the ability to do validation
As we understand the problem space of CSI a little more, it has become
clear that we won't need the `source` to be in config, as it will be
used in the majority of cases:
- Host Volumes: Always need a source
- Preallocated CSI Volumes: Always needs a source from a volume or claim name
- Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes
to for managing upgrades and to avoid dangling.
- Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point
to the plugin name, and a `config` block will
allow you to pass meta to the plugin. Or will
point to a pre-configured ephemeral config.
*If implemented
The new design simplifies this by merging the source into the volume
stanza to solve the above issues with usability, performance, and error
handling.
2019-09-13 02:09:58 +00:00
Type : "nothost" ,
Source : "foo" ,
2019-07-25 14:44:08 +00:00
} ,
} ,
Tasks : [ ] * Task {
{
Name : "task-a" ,
Resources : & Resources { } ,
} ,
} ,
}
err = tg . Validate ( & Job { } )
require . Contains ( t , err . Error ( ) , ` Volume foo has unrecognised type nothost ` )
tg = & TaskGroup {
Volumes : map [ string ] * VolumeRequest {
"foo" : {
2019-08-01 09:33:26 +00:00
Type : "host" ,
2019-07-25 14:44:08 +00:00
} ,
} ,
Tasks : [ ] * Task {
{
Name : "task-a" ,
Resources : & Resources { } ,
} ,
} ,
}
err = tg . Validate ( & Job { } )
require . Contains ( t , err . Error ( ) , ` Volume foo has an empty source ` )
2021-03-18 19:35:11 +00:00
tg = & TaskGroup {
Name : "group-a" ,
Update : & UpdateStrategy {
Canary : 1 ,
} ,
Volumes : map [ string ] * VolumeRequest {
"foo" : {
Type : "csi" ,
PerAlloc : true ,
} ,
} ,
Tasks : [ ] * Task {
{
Name : "task-a" ,
Resources : & Resources { } ,
} ,
} ,
}
err = tg . Validate ( & Job { } )
require . Contains ( t , err . Error ( ) , ` Volume foo has an empty source ` )
require . Contains ( t , err . Error ( ) , ` Volume foo cannot be per_alloc when canaries are in use ` )
2019-07-25 14:44:08 +00:00
tg = & TaskGroup {
Volumes : map [ string ] * VolumeRequest {
"foo" : {
2019-08-01 09:33:26 +00:00
Type : "host" ,
2019-07-25 14:44:08 +00:00
} ,
} ,
Tasks : [ ] * Task {
{
Name : "task-a" ,
Resources : & Resources { } ,
VolumeMounts : [ ] * VolumeMount {
{
Volume : "" ,
} ,
} ,
} ,
{
Name : "task-b" ,
Resources : & Resources { } ,
VolumeMounts : [ ] * VolumeMount {
{
Volume : "foob" ,
} ,
} ,
} ,
} ,
}
err = tg . Validate ( & Job { } )
expected = ` Task task-a has a volume mount (0) referencing an empty volume `
require . Contains ( t , err . Error ( ) , expected )
expected = ` Task task-b has a volume mount (0) referencing undefined volume foob `
require . Contains ( t , err . Error ( ) , expected )
2019-08-19 13:17:38 +00:00
taskA := & Task { Name : "task-a" }
tg = & TaskGroup {
Name : "group-a" ,
Services : [ ] * Service {
{
Name : "service-a" ,
Checks : [ ] * ServiceCheck {
{
Name : "check-a" ,
Type : "tcp" ,
TaskName : "task-b" ,
PortLabel : "http" ,
Interval : time . Duration ( 1 * time . Second ) ,
Timeout : time . Duration ( 1 * time . Second ) ,
} ,
} ,
} ,
} ,
Tasks : [ ] * Task { taskA } ,
}
err = tg . Validate ( & Job { } )
expected = ` Check check-a invalid: refers to non-existent task task-b `
require . Contains ( t , err . Error ( ) , expected )
expected = ` Check check-a invalid: only script and gRPC checks should have tasks `
require . Contains ( t , err . Error ( ) , expected )
2015-09-15 18:23:03 +00:00
}
2020-06-19 21:39:28 +00:00
func TestTaskGroupNetwork_Validate ( t * testing . T ) {
cases := [ ] struct {
TG * TaskGroup
ErrContains string
} {
{
TG : & TaskGroup {
Name : "group-static-value-ok" ,
Networks : Networks {
& NetworkResource {
ReservedPorts : [ ] Port {
{
Label : "ok" ,
Value : 65535 ,
} ,
} ,
} ,
} ,
} ,
} ,
{
TG : & TaskGroup {
Name : "group-dynamic-value-ok" ,
Networks : Networks {
& NetworkResource {
DynamicPorts : [ ] Port {
{
Label : "ok" ,
Value : 65535 ,
} ,
} ,
} ,
} ,
} ,
} ,
{
TG : & TaskGroup {
Name : "group-static-to-ok" ,
Networks : Networks {
& NetworkResource {
ReservedPorts : [ ] Port {
{
Label : "ok" ,
To : 65535 ,
} ,
} ,
} ,
} ,
} ,
} ,
{
TG : & TaskGroup {
Name : "group-dynamic-to-ok" ,
Networks : Networks {
& NetworkResource {
DynamicPorts : [ ] Port {
{
Label : "ok" ,
To : 65535 ,
} ,
} ,
} ,
} ,
} ,
} ,
{
TG : & TaskGroup {
Name : "group-static-value-too-high" ,
Networks : Networks {
& NetworkResource {
ReservedPorts : [ ] Port {
{
Label : "too-high" ,
Value : 65536 ,
} ,
} ,
} ,
} ,
} ,
ErrContains : "greater than" ,
} ,
{
TG : & TaskGroup {
Name : "group-dynamic-value-too-high" ,
Networks : Networks {
& NetworkResource {
DynamicPorts : [ ] Port {
{
Label : "too-high" ,
Value : 65536 ,
} ,
} ,
} ,
} ,
} ,
ErrContains : "greater than" ,
} ,
{
TG : & TaskGroup {
Name : "group-static-to-too-high" ,
Networks : Networks {
& NetworkResource {
ReservedPorts : [ ] Port {
{
Label : "too-high" ,
To : 65536 ,
} ,
} ,
} ,
} ,
} ,
ErrContains : "greater than" ,
} ,
{
TG : & TaskGroup {
Name : "group-dynamic-to-too-high" ,
Networks : Networks {
& NetworkResource {
DynamicPorts : [ ] Port {
{
Label : "too-high" ,
To : 65536 ,
} ,
} ,
} ,
} ,
} ,
ErrContains : "greater than" ,
} ,
2021-02-02 19:56:52 +00:00
{
TG : & TaskGroup {
Name : "group-same-static-port-different-host_network" ,
Networks : Networks {
& NetworkResource {
ReservedPorts : [ ] Port {
{
Label : "net1_http" ,
Value : 80 ,
HostNetwork : "net1" ,
} ,
{
Label : "net2_http" ,
Value : 80 ,
HostNetwork : "net2" ,
} ,
} ,
} ,
} ,
} ,
} ,
{
TG : & TaskGroup {
Name : "mixing-group-task-ports" ,
Networks : Networks {
& NetworkResource {
ReservedPorts : [ ] Port {
{
Label : "group_http" ,
Value : 80 ,
} ,
} ,
} ,
} ,
Tasks : [ ] * Task {
& Task {
Name : "task1" ,
Resources : & Resources {
Networks : Networks {
& NetworkResource {
ReservedPorts : [ ] Port {
{
Label : "task_http" ,
Value : 80 ,
} ,
} ,
} ,
} ,
} ,
} ,
} ,
} ,
ErrContains : "already reserved by" ,
} ,
{
TG : & TaskGroup {
Name : "mixing-group-task-ports-with-host_network" ,
Networks : Networks {
& NetworkResource {
ReservedPorts : [ ] Port {
{
Label : "group_http" ,
Value : 80 ,
HostNetwork : "net1" ,
} ,
} ,
} ,
} ,
Tasks : [ ] * Task {
& Task {
Name : "task1" ,
Resources : & Resources {
Networks : Networks {
& NetworkResource {
ReservedPorts : [ ] Port {
{
Label : "task_http" ,
Value : 80 ,
} ,
} ,
} ,
} ,
} ,
} ,
} ,
} ,
} ,
2020-06-19 21:39:28 +00:00
}
for i := range cases {
tc := cases [ i ]
t . Run ( tc . TG . Name , func ( t * testing . T ) {
err := tc . TG . validateNetworks ( )
t . Logf ( "%s -> %v" , tc . TG . Name , err )
if tc . ErrContains == "" {
require . NoError ( t , err )
return
}
require . Error ( t , err )
require . Contains ( t , err . Error ( ) , tc . ErrContains )
} )
}
}
2015-09-15 18:23:03 +00:00
func TestTask_Validate ( t * testing . T ) {
task := & Task { }
2016-09-14 22:43:42 +00:00
ephemeralDisk := DefaultEphemeralDisk ( )
2020-08-28 16:38:30 +00:00
err := task . Validate ( ephemeralDisk , JobTypeBatch , nil , nil )
2021-01-21 19:53:02 +00:00
requireErrors ( t , err ,
"task name" ,
"task driver" ,
"task resources" ,
)
2015-09-15 18:23:03 +00:00
2016-05-28 00:17:10 +00:00
task = & Task { Name : "web/foo" }
2020-08-28 16:38:30 +00:00
err = task . Validate ( ephemeralDisk , JobTypeBatch , nil , nil )
2021-01-21 19:53:02 +00:00
require . Error ( t , err , "slashes" )
2016-05-28 00:17:10 +00:00
2015-09-15 18:23:03 +00:00
task = & Task {
2016-02-02 21:50:30 +00:00
Name : "web" ,
Driver : "docker" ,
Resources : & Resources {
CPU : 100 ,
MemoryMB : 100 ,
} ,
2016-02-11 18:42:56 +00:00
LogConfig : DefaultLogConfig ( ) ,
2015-09-15 18:23:03 +00:00
}
2016-09-14 22:43:42 +00:00
ephemeralDisk . SizeMB = 200
2020-08-28 16:38:30 +00:00
err = task . Validate ( ephemeralDisk , JobTypeBatch , nil , nil )
2015-09-15 18:23:03 +00:00
if err != nil {
t . Fatalf ( "err: %s" , err )
}
2017-03-12 00:23:24 +00:00
task . Constraints = append ( task . Constraints ,
& Constraint {
Operand : ConstraintDistinctHosts ,
} ,
& Constraint {
Operand : ConstraintDistinctProperty ,
LTarget : "${meta.rack}" ,
} )
2020-08-28 16:38:30 +00:00
err = task . Validate ( ephemeralDisk , JobTypeBatch , nil , nil )
2021-01-21 19:53:02 +00:00
requireErrors ( t , err ,
"task level: distinct_hosts" ,
"task level: distinct_property" ,
)
2015-09-15 18:23:03 +00:00
}
2020-09-30 19:09:41 +00:00
func TestTask_Validate_Resources ( t * testing . T ) {
cases := [ ] struct {
name string
res * Resources
} {
{
name : "Minimum" ,
res : MinResources ( ) ,
} ,
{
name : "Default" ,
res : DefaultResources ( ) ,
} ,
{
name : "Full" ,
res : & Resources {
CPU : 1000 ,
MemoryMB : 1000 ,
IOPS : 1000 ,
Networks : [ ] * NetworkResource {
{
Mode : "host" ,
Device : "localhost" ,
CIDR : "127.0.0.0/8" ,
IP : "127.0.0.1" ,
MBits : 1000 ,
DNS : & DNSConfig {
Servers : [ ] string { "localhost" } ,
Searches : [ ] string { "localdomain" } ,
Options : [ ] string { "ndots:5" } ,
} ,
ReservedPorts : [ ] Port {
{
Label : "reserved" ,
Value : 1234 ,
To : 1234 ,
HostNetwork : "loopback" ,
} ,
} ,
DynamicPorts : [ ] Port {
{
Label : "dynamic" ,
Value : 5678 ,
To : 5678 ,
HostNetwork : "loopback" ,
} ,
} ,
} ,
} ,
} ,
} ,
}
for i := range cases {
tc := cases [ i ]
t . Run ( tc . name , func ( t * testing . T ) {
require . NoError ( t , tc . res . Validate ( ) )
} )
}
}
2016-04-19 02:38:47 +00:00
func TestTask_Validate_Services ( t * testing . T ) {
2016-06-12 23:36:49 +00:00
s1 := & Service {
2016-04-19 02:38:47 +00:00
Name : "service-name" ,
PortLabel : "bar" ,
Checks : [ ] * ServiceCheck {
{
2016-07-09 05:33:04 +00:00
Name : "check-name" ,
Type : ServiceCheckTCP ,
Interval : 0 * time . Second ,
2016-04-19 02:38:47 +00:00
} ,
2016-05-03 20:16:02 +00:00
{
2016-07-09 05:33:04 +00:00
Name : "check-name" ,
Type : ServiceCheckTCP ,
Timeout : 2 * time . Second ,
2016-05-03 20:16:02 +00:00
} ,
2016-11-01 23:02:16 +00:00
{
Name : "check-name" ,
Type : ServiceCheckTCP ,
Interval : 1 * time . Second ,
} ,
2016-04-19 02:38:47 +00:00
} ,
}
2016-06-12 23:36:49 +00:00
s2 := & Service {
2017-01-17 20:26:07 +00:00
Name : "service-name" ,
PortLabel : "bar" ,
}
s3 := & Service {
Name : "service-A" ,
PortLabel : "a" ,
}
s4 := & Service {
Name : "service-A" ,
PortLabel : "b" ,
2016-05-02 20:40:49 +00:00
}
2016-09-14 22:43:42 +00:00
ephemeralDisk := DefaultEphemeralDisk ( )
2017-01-17 20:26:07 +00:00
ephemeralDisk . SizeMB = 200
2016-04-19 02:38:47 +00:00
task := & Task {
Name : "web" ,
Driver : "docker" ,
Resources : & Resources {
CPU : 100 ,
MemoryMB : 100 ,
} ,
2016-06-12 23:36:49 +00:00
Services : [ ] * Service { s1 , s2 } ,
2016-04-19 02:38:47 +00:00
}
2017-01-17 20:26:07 +00:00
task1 := & Task {
Name : "web" ,
Driver : "docker" ,
Resources : DefaultResources ( ) ,
Services : [ ] * Service { s3 , s4 } ,
LogConfig : DefaultLogConfig ( ) ,
}
2020-08-28 16:38:30 +00:00
tgNetworks := [ ] * NetworkResource {
2017-09-26 22:26:33 +00:00
{
2017-01-17 20:26:07 +00:00
MBits : 10 ,
DynamicPorts : [ ] Port {
2017-09-26 22:26:33 +00:00
{
2017-01-17 20:26:07 +00:00
Label : "a" ,
Value : 1000 ,
} ,
2017-09-26 22:26:33 +00:00
{
2017-01-17 20:26:07 +00:00
Label : "b" ,
Value : 2000 ,
} ,
} ,
} ,
}
2016-08-25 18:53:09 +00:00
2020-08-28 16:38:30 +00:00
err := task . Validate ( ephemeralDisk , JobTypeService , nil , tgNetworks )
2016-04-19 02:38:47 +00:00
if err == nil {
t . Fatal ( "expected an error" )
}
2016-05-02 20:40:49 +00:00
if ! strings . Contains ( err . Error ( ) , "service \"service-name\" is duplicate" ) {
2016-05-03 20:16:02 +00:00
t . Fatalf ( "err: %v" , err )
}
if ! strings . Contains ( err . Error ( ) , "check \"check-name\" is duplicate" ) {
2016-05-02 20:40:49 +00:00
t . Fatalf ( "err: %v" , err )
}
2016-07-09 05:33:04 +00:00
2016-11-01 23:02:16 +00:00
if ! strings . Contains ( err . Error ( ) , "missing required value interval" ) {
t . Fatalf ( "err: %v" , err )
}
2016-11-01 23:05:34 +00:00
if ! strings . Contains ( err . Error ( ) , "cannot be less than" ) {
2016-08-12 19:09:44 +00:00
t . Fatalf ( "err: %v" , err )
2016-08-16 19:05:15 +00:00
}
2017-01-17 20:26:07 +00:00
2020-08-28 16:38:30 +00:00
if err = task1 . Validate ( ephemeralDisk , JobTypeService , nil , tgNetworks ) ; err != nil {
2017-01-17 20:26:07 +00:00
t . Fatalf ( "err : %v" , err )
}
2016-08-16 19:05:15 +00:00
}
2018-01-12 23:32:51 +00:00
func TestTask_Validate_Service_AddressMode_Ok ( t * testing . T ) {
ephemeralDisk := DefaultEphemeralDisk ( )
getTask := func ( s * Service ) * Task {
task := & Task {
Name : "web" ,
Driver : "docker" ,
Resources : DefaultResources ( ) ,
Services : [ ] * Service { s } ,
LogConfig : DefaultLogConfig ( ) ,
}
2020-08-28 16:38:30 +00:00
return task
}
tgNetworks := [ ] * NetworkResource {
{
DynamicPorts : [ ] Port {
{
Label : "http" ,
Value : 80 ,
2018-01-12 23:32:51 +00:00
} ,
} ,
2020-08-28 16:38:30 +00:00
} ,
2018-01-12 23:32:51 +00:00
}
cases := [ ] * Service {
{
// https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177
Name : "DriverModeWithLabel" ,
PortLabel : "http" ,
AddressMode : AddressModeDriver ,
} ,
{
Name : "DriverModeWithPort" ,
PortLabel : "80" ,
AddressMode : AddressModeDriver ,
} ,
{
Name : "HostModeWithLabel" ,
PortLabel : "http" ,
AddressMode : AddressModeHost ,
} ,
{
Name : "HostModeWithoutLabel" ,
AddressMode : AddressModeHost ,
} ,
{
Name : "DriverModeWithoutLabel" ,
AddressMode : AddressModeDriver ,
} ,
}
for _ , service := range cases {
task := getTask ( service )
t . Run ( service . Name , func ( t * testing . T ) {
2020-08-28 16:38:30 +00:00
if err := task . Validate ( ephemeralDisk , JobTypeService , nil , tgNetworks ) ; err != nil {
2018-01-12 23:32:51 +00:00
t . Fatalf ( "unexpected err: %v" , err )
}
} )
}
}
func TestTask_Validate_Service_AddressMode_Bad ( t * testing . T ) {
ephemeralDisk := DefaultEphemeralDisk ( )
getTask := func ( s * Service ) * Task {
2020-08-28 16:38:30 +00:00
return & Task {
2018-01-12 23:32:51 +00:00
Name : "web" ,
Driver : "docker" ,
Resources : DefaultResources ( ) ,
Services : [ ] * Service { s } ,
LogConfig : DefaultLogConfig ( ) ,
}
2020-08-28 16:38:30 +00:00
}
tgNetworks := [ ] * NetworkResource {
{
DynamicPorts : [ ] Port {
{
Label : "http" ,
Value : 80 ,
2018-01-12 23:32:51 +00:00
} ,
} ,
2020-08-28 16:38:30 +00:00
} ,
2018-01-12 23:32:51 +00:00
}
cases := [ ] * Service {
{
// https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177
Name : "DriverModeWithLabel" ,
PortLabel : "asdf" ,
AddressMode : AddressModeDriver ,
} ,
{
Name : "HostModeWithLabel" ,
PortLabel : "asdf" ,
AddressMode : AddressModeHost ,
} ,
{
Name : "HostModeWithPort" ,
PortLabel : "80" ,
AddressMode : AddressModeHost ,
} ,
}
for _ , service := range cases {
task := getTask ( service )
t . Run ( service . Name , func ( t * testing . T ) {
2020-08-28 16:38:30 +00:00
err := task . Validate ( ephemeralDisk , JobTypeService , nil , tgNetworks )
2018-01-12 23:32:51 +00:00
if err == nil {
t . Fatalf ( "expected an error" )
}
//t.Logf("err: %v", err)
} )
}
}
2016-08-16 19:05:15 +00:00
func TestTask_Validate_Service_Check ( t * testing . T ) {
2017-08-14 17:23:52 +00:00
invalidCheck := ServiceCheck {
Name : "check-name" ,
Command : "/bin/true" ,
Type : ServiceCheckScript ,
Interval : 10 * time . Second ,
}
err := invalidCheck . validate ( )
if err == nil || ! strings . Contains ( err . Error ( ) , "Timeout cannot be less" ) {
t . Fatalf ( "expected a timeout validation error but received: %q" , err )
}
2016-08-16 19:05:15 +00:00
check1 := ServiceCheck {
Name : "check-name" ,
Type : ServiceCheckTCP ,
Interval : 10 * time . Second ,
Timeout : 2 * time . Second ,
}
2017-08-14 17:23:52 +00:00
if err := check1 . validate ( ) ; err != nil {
2017-02-28 00:00:19 +00:00
t . Fatalf ( "err: %v" , err )
2016-08-16 19:05:15 +00:00
}
check1 . InitialStatus = "foo"
err = check1 . validate ( )
if err == nil {
t . Fatal ( "Expected an error" )
}
if ! strings . Contains ( err . Error ( ) , "invalid initial check state (foo)" ) {
t . Fatalf ( "err: %v" , err )
}
check1 . InitialStatus = api . HealthCritical
err = check1 . validate ( )
if err != nil {
t . Fatalf ( "err: %v" , err )
}
check1 . InitialStatus = api . HealthPassing
err = check1 . validate ( )
if err != nil {
t . Fatalf ( "err: %v" , err )
}
check1 . InitialStatus = ""
err = check1 . validate ( )
if err != nil {
t . Fatalf ( "err: %v" , err )
2016-07-09 05:33:04 +00:00
}
2017-12-21 09:32:12 +00:00
check2 := ServiceCheck {
Name : "check-name-2" ,
Type : ServiceCheckHTTP ,
Interval : 10 * time . Second ,
Timeout : 2 * time . Second ,
Path : "/foo/bar" ,
}
err = check2 . validate ( )
if err != nil {
t . Fatalf ( "err: %v" , err )
}
check2 . Path = ""
err = check2 . validate ( )
if err == nil {
t . Fatal ( "Expected an error" )
}
if ! strings . Contains ( err . Error ( ) , "valid http path" ) {
t . Fatalf ( "err: %v" , err )
}
check2 . Path = "http://www.example.com"
err = check2 . validate ( )
if err == nil {
t . Fatal ( "Expected an error" )
}
if ! strings . Contains ( err . Error ( ) , "relative http path" ) {
t . Fatalf ( "err: %v" , err )
}
connect: enable automatic expose paths for individual group service checks
Part of #6120
Building on the support for enabling connect proxy paths in #7323, this change
adds the ability to configure the 'service.check.expose' flag on group-level
service check definitions for services that are connect-enabled. This is a slight
deviation from the "magic" that Consul provides. With Consul, the 'expose' flag
exists on the connect.proxy stanza, which will then auto-generate expose paths
for every HTTP and gRPC service check associated with that connect-enabled
service.
A first attempt at providing similar magic for Nomad's Consul Connect integration
followed that pattern exactly, as seen in #7396. However, on reviewing the PR
we realized having the `expose` flag on the proxy stanza inseperably ties together
the automatic path generation with every HTTP/gRPC defined on the service. This
makes sense in Consul's context, because a service definition is reasonably
associated with a single "task". With Nomad's group level service definitions
however, there is a reasonable expectation that a service definition is more
abstractly representative of multiple services within the task group. In this
case, one would want to define checks of that service which concretely make HTTP
or gRPC requests to different underlying tasks. Such a model is not possible
with the course `proxy.expose` flag.
Instead, we now have the flag made available within the check definitions themselves.
By making the expose feature resolute to each check, it is possible to have
some HTTP/gRPC checks which make use of the envoy exposed paths, as well as
some HTTP/gRPC checks which make use of some orthongonal port-mapping to do
checks on some other task (or even some other bound port of the same task)
within the task group.
Given this example,
group "server-group" {
network {
mode = "bridge"
port "forchecks" {
to = -1
}
}
service {
name = "myserver"
port = 2000
connect {
sidecar_service {
}
}
check {
name = "mycheck-myserver"
type = "http"
port = "forchecks"
interval = "3s"
timeout = "2s"
method = "GET"
path = "/classic/responder/health"
expose = true
}
}
}
Nomad will automatically inject (via job endpoint mutator) the
extrapolated expose path configuration, i.e.
expose {
path {
path = "/classic/responder/health"
protocol = "http"
local_path_port = 2000
listener_port = "forchecks"
}
}
Documentation is coming in #7440 (needs updating, doing next)
Modifications to the `countdash` examples in https://github.com/hashicorp/demo-consul-101/pull/6
which will make the examples in the documentation actually runnable.
Will add some e2e tests based on the above when it becomes available.
2020-03-25 01:49:55 +00:00
t . Run ( "check expose" , func ( t * testing . T ) {
t . Run ( "type http" , func ( t * testing . T ) {
require . NoError ( t , ( & ServiceCheck {
Type : ServiceCheckHTTP ,
Interval : 1 * time . Second ,
Timeout : 1 * time . Second ,
Path : "/health" ,
Expose : true ,
} ) . validate ( ) )
} )
t . Run ( "type tcp" , func ( t * testing . T ) {
require . EqualError ( t , ( & ServiceCheck {
Type : ServiceCheckTCP ,
Interval : 1 * time . Second ,
Timeout : 1 * time . Second ,
Expose : true ,
} ) . validate ( ) , "expose may only be set on HTTP or gRPC checks" )
} )
} )
2016-04-19 02:38:47 +00:00
}
2017-12-08 21:49:57 +00:00
// TestTask_Validate_Service_Check_AddressMode asserts that checks do not
// inherit address mode but do inherit ports.
func TestTask_Validate_Service_Check_AddressMode ( t * testing . T ) {
2020-08-28 16:38:30 +00:00
getTask := func ( s * Service ) ( * Task , * TaskGroup ) {
2017-12-19 00:18:42 +00:00
return & Task {
2020-08-28 16:38:30 +00:00
Services : [ ] * Service { s } ,
} , & TaskGroup {
2017-12-19 00:18:42 +00:00
Networks : [ ] * NetworkResource {
{
DynamicPorts : [ ] Port {
{
Label : "http" ,
Value : 9999 ,
} ,
2017-12-08 21:49:57 +00:00
} ,
} ,
} ,
2020-08-28 16:38:30 +00:00
}
2017-12-19 00:18:42 +00:00
}
cases := [ ] struct {
Service * Service
ErrContains string
} {
{
Service : & Service {
2017-12-08 21:49:57 +00:00
Name : "invalid-driver" ,
PortLabel : "80" ,
AddressMode : "host" ,
} ,
2017-12-19 00:18:42 +00:00
ErrContains : ` port label "80" referenced ` ,
} ,
{
Service : & Service {
Name : "http-driver-fail-1" ,
2017-12-08 21:49:57 +00:00
PortLabel : "80" ,
AddressMode : "driver" ,
Checks : [ ] * ServiceCheck {
{
Name : "invalid-check-1" ,
Type : "tcp" ,
Interval : time . Second ,
Timeout : time . Second ,
} ,
2017-12-19 00:18:42 +00:00
} ,
} ,
ErrContains : ` check "invalid-check-1" cannot use a numeric port ` ,
} ,
{
Service : & Service {
Name : "http-driver-fail-2" ,
PortLabel : "80" ,
AddressMode : "driver" ,
Checks : [ ] * ServiceCheck {
2017-12-08 21:49:57 +00:00
{
Name : "invalid-check-2" ,
Type : "tcp" ,
PortLabel : "80" ,
Interval : time . Second ,
Timeout : time . Second ,
} ,
2017-12-19 00:18:42 +00:00
} ,
} ,
ErrContains : ` check "invalid-check-2" cannot use a numeric port ` ,
} ,
{
Service : & Service {
Name : "http-driver-fail-3" ,
PortLabel : "80" ,
AddressMode : "driver" ,
Checks : [ ] * ServiceCheck {
2017-12-08 21:49:57 +00:00
{
Name : "invalid-check-3" ,
Type : "tcp" ,
PortLabel : "missing-port-label" ,
Interval : time . Second ,
Timeout : time . Second ,
} ,
2017-12-19 00:18:42 +00:00
} ,
} ,
ErrContains : ` port label "missing-port-label" referenced ` ,
} ,
{
Service : & Service {
Name : "http-driver-passes" ,
PortLabel : "80" ,
AddressMode : "driver" ,
Checks : [ ] * ServiceCheck {
2017-12-08 21:49:57 +00:00
{
Name : "valid-script-check" ,
Type : "script" ,
Command : "ok" ,
Interval : time . Second ,
Timeout : time . Second ,
} ,
{
Name : "valid-host-check" ,
Type : "tcp" ,
PortLabel : "http" ,
Interval : time . Second ,
Timeout : time . Second ,
} ,
{
Name : "valid-driver-check" ,
Type : "tcp" ,
AddressMode : "driver" ,
Interval : time . Second ,
Timeout : time . Second ,
} ,
} ,
} ,
} ,
2017-12-19 00:18:42 +00:00
{
Service : & Service {
Name : "empty-address-3673-passes-1" ,
Checks : [ ] * ServiceCheck {
{
Name : "valid-port-label" ,
Type : "tcp" ,
PortLabel : "http" ,
Interval : time . Second ,
Timeout : time . Second ,
} ,
{
Name : "empty-is-ok" ,
Type : "script" ,
Command : "ok" ,
Interval : time . Second ,
Timeout : time . Second ,
} ,
} ,
} ,
} ,
{
Service : & Service {
Name : "empty-address-3673-passes-2" ,
} ,
} ,
{
Service : & Service {
Name : "empty-address-3673-fails" ,
Checks : [ ] * ServiceCheck {
{
Name : "empty-is-not-ok" ,
Type : "tcp" ,
Interval : time . Second ,
Timeout : time . Second ,
} ,
} ,
} ,
ErrContains : ` invalid: check requires a port but neither check nor service ` ,
} ,
2021-01-20 19:34:23 +00:00
{
Service : & Service {
Name : "conect-block-on-task-level" ,
Connect : & ConsulConnect { SidecarService : & ConsulSidecarService { } } ,
} ,
ErrContains : ` cannot have "connect" block ` ,
} ,
2017-12-08 21:49:57 +00:00
}
2017-12-19 00:18:42 +00:00
for _ , tc := range cases {
tc := tc
2020-08-28 16:38:30 +00:00
task , tg := getTask ( tc . Service )
2017-12-19 00:18:42 +00:00
t . Run ( tc . Service . Name , func ( t * testing . T ) {
2020-08-28 16:38:30 +00:00
err := validateServices ( task , tg . Networks )
2017-12-19 00:18:42 +00:00
if err == nil && tc . ErrContains == "" {
// Ok!
return
}
if err == nil {
t . Fatalf ( "no error returned. expected: %s" , tc . ErrContains )
}
if ! strings . Contains ( err . Error ( ) , tc . ErrContains ) {
t . Fatalf ( "expected %q but found: %v" , tc . ErrContains , err )
}
} )
}
2017-12-08 21:49:57 +00:00
}
2018-05-03 22:18:12 +00:00
func TestTask_Validate_Service_Check_GRPC ( t * testing . T ) {
t . Parallel ( )
// Bad (no port)
invalidGRPC := & ServiceCheck {
Type : ServiceCheckGRPC ,
Interval : time . Second ,
Timeout : time . Second ,
}
service := & Service {
Name : "test" ,
Checks : [ ] * ServiceCheck { invalidGRPC } ,
}
assert . Error ( t , service . Validate ( ) )
// Good
service . Checks [ 0 ] = & ServiceCheck {
Type : ServiceCheckGRPC ,
Interval : time . Second ,
Timeout : time . Second ,
PortLabel : "some-port-label" ,
}
assert . NoError ( t , service . Validate ( ) )
}
2017-09-15 22:12:47 +00:00
func TestTask_Validate_Service_Check_CheckRestart ( t * testing . T ) {
2018-05-03 22:18:12 +00:00
t . Parallel ( )
2017-09-15 22:12:47 +00:00
invalidCheckRestart := & CheckRestart {
Limit : - 1 ,
Grace : - 1 ,
}
err := invalidCheckRestart . Validate ( )
assert . NotNil ( t , err , "invalidateCheckRestart.Validate()" )
assert . Len ( t , err . ( * multierror . Error ) . Errors , 2 )
validCheckRestart := & CheckRestart { }
assert . Nil ( t , validCheckRestart . Validate ( ) )
validCheckRestart . Limit = 1
validCheckRestart . Grace = 1
assert . Nil ( t , validCheckRestart . Validate ( ) )
}
2019-08-15 15:22:37 +00:00
func TestTask_Validate_ConnectProxyKind ( t * testing . T ) {
2019-08-09 21:40:51 +00:00
ephemeralDisk := DefaultEphemeralDisk ( )
2019-08-12 22:41:40 +00:00
getTask := func ( kind TaskKind , leader bool ) * Task {
2019-08-09 21:40:51 +00:00
task := & Task {
Name : "web" ,
Driver : "docker" ,
Resources : DefaultResources ( ) ,
LogConfig : DefaultLogConfig ( ) ,
Kind : kind ,
Leader : leader ,
}
task . Resources . Networks = [ ] * NetworkResource {
{
MBits : 10 ,
DynamicPorts : [ ] Port {
{
Label : "http" ,
Value : 80 ,
} ,
} ,
} ,
}
return task
}
cases := [ ] struct {
Desc string
2019-08-12 22:41:40 +00:00
Kind TaskKind
2019-08-09 21:40:51 +00:00
Leader bool
Service * Service
TgService [ ] * Service
ErrContains string
} {
{
Desc : "Not connect" ,
Kind : "test" ,
} ,
{
Desc : "Invalid because of service in task definition" ,
2019-08-12 22:41:40 +00:00
Kind : "connect-proxy:redis" ,
2019-08-09 21:40:51 +00:00
Service : & Service {
Name : "redis" ,
} ,
2019-08-12 22:41:40 +00:00
ErrContains : "Connect proxy task must not have a service stanza" ,
2019-08-09 21:40:51 +00:00
} ,
{
Desc : "Leader should not be set" ,
2019-08-12 22:41:40 +00:00
Kind : "connect-proxy:redis" ,
2019-08-09 21:40:51 +00:00
Leader : true ,
Service : & Service {
Name : "redis" ,
} ,
ErrContains : "Connect proxy task must not have leader set" ,
} ,
{
Desc : "Service name invalid" ,
2019-08-12 22:41:40 +00:00
Kind : "connect-proxy:redis:test" ,
2019-08-09 21:40:51 +00:00
Service : & Service {
Name : "redis" ,
} ,
2019-12-12 23:46:14 +00:00
ErrContains : ` No Connect services in task group with Connect proxy ("redis:test") ` ,
2019-08-09 21:40:51 +00:00
} ,
{
Desc : "Service name not found in group" ,
2019-08-12 22:41:40 +00:00
Kind : "connect-proxy:redis" ,
2019-12-12 23:46:14 +00:00
ErrContains : ` No Connect services in task group with Connect proxy ("redis") ` ,
2019-08-09 21:40:51 +00:00
} ,
{
Desc : "Connect stanza not configured in group" ,
2019-08-12 22:41:40 +00:00
Kind : "connect-proxy:redis" ,
2019-08-09 21:40:51 +00:00
TgService : [ ] * Service { {
Name : "redis" ,
} } ,
2019-12-12 23:46:14 +00:00
ErrContains : ` No Connect services in task group with Connect proxy ("redis") ` ,
2019-08-09 21:40:51 +00:00
} ,
{
Desc : "Valid connect proxy kind" ,
2019-08-12 22:41:40 +00:00
Kind : "connect-proxy:redis" ,
2019-08-09 21:40:51 +00:00
TgService : [ ] * Service { {
Name : "redis" ,
Connect : & ConsulConnect {
SidecarService : & ConsulSidecarService {
Port : "db" ,
} ,
} ,
} } ,
} ,
}
for _ , tc := range cases {
tc := tc
task := getTask ( tc . Kind , tc . Leader )
if tc . Service != nil {
task . Services = [ ] * Service { tc . Service }
}
t . Run ( tc . Desc , func ( t * testing . T ) {
2020-08-28 16:38:30 +00:00
err := task . Validate ( ephemeralDisk , "service" , tc . TgService , nil )
2019-08-09 21:40:51 +00:00
if err == nil && tc . ErrContains == "" {
// Ok!
return
}
2019-12-12 23:46:14 +00:00
require . Errorf ( t , err , "no error returned. expected: %s" , tc . ErrContains )
require . Containsf ( t , err . Error ( ) , tc . ErrContains , "expected %q but found: %v" , tc . ErrContains , err )
2019-08-09 21:40:51 +00:00
} )
}
}
2016-02-11 20:30:47 +00:00
func TestTask_Validate_LogConfig ( t * testing . T ) {
task := & Task {
LogConfig : DefaultLogConfig ( ) ,
2016-08-25 18:53:09 +00:00
}
2016-09-14 22:43:42 +00:00
ephemeralDisk := & EphemeralDisk {
SizeMB : 1 ,
2016-02-11 20:30:47 +00:00
}
2020-08-28 16:38:30 +00:00
err := task . Validate ( ephemeralDisk , JobTypeService , nil , nil )
2021-01-21 19:53:02 +00:00
require . Error ( t , err , "log storage" )
2016-02-11 20:30:47 +00:00
}
2020-10-05 19:13:39 +00:00
func TestLogConfig_Equals ( t * testing . T ) {
t . Run ( "both nil" , func ( t * testing . T ) {
a := ( * LogConfig ) ( nil )
b := ( * LogConfig ) ( nil )
require . True ( t , a . Equals ( b ) )
} )
t . Run ( "one nil" , func ( t * testing . T ) {
a := new ( LogConfig )
b := ( * LogConfig ) ( nil )
require . False ( t , a . Equals ( b ) )
} )
t . Run ( "max files" , func ( t * testing . T ) {
a := & LogConfig { MaxFiles : 1 , MaxFileSizeMB : 200 }
b := & LogConfig { MaxFiles : 2 , MaxFileSizeMB : 200 }
require . False ( t , a . Equals ( b ) )
} )
t . Run ( "max file size" , func ( t * testing . T ) {
a := & LogConfig { MaxFiles : 1 , MaxFileSizeMB : 100 }
b := & LogConfig { MaxFiles : 1 , MaxFileSizeMB : 200 }
require . False ( t , a . Equals ( b ) )
} )
t . Run ( "same" , func ( t * testing . T ) {
a := & LogConfig { MaxFiles : 1 , MaxFileSizeMB : 200 }
b := & LogConfig { MaxFiles : 1 , MaxFileSizeMB : 200 }
require . True ( t , a . Equals ( b ) )
} )
}
2019-10-22 13:20:26 +00:00
func TestTask_Validate_CSIPluginConfig ( t * testing . T ) {
table := [ ] struct {
2021-01-21 19:53:02 +00:00
name string
pc * TaskCSIPluginConfig
expectedErr string
unexpectedErr string
2019-10-22 13:20:26 +00:00
} {
{
2021-01-21 19:53:02 +00:00
name : "no errors when not specified" ,
pc : nil ,
unexpectedErr : "CSIPluginConfig" ,
2019-10-22 13:20:26 +00:00
} ,
{
name : "requires non-empty plugin id" ,
pc : & TaskCSIPluginConfig { } ,
expectedErr : "CSIPluginConfig must have a non-empty PluginID" ,
} ,
{
name : "requires valid plugin type" ,
pc : & TaskCSIPluginConfig {
ID : "com.hashicorp.csi" ,
Type : "nonsense" ,
} ,
expectedErr : "CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"nonsense\"" ,
} ,
}
for _ , tt := range table {
t . Run ( tt . name , func ( t * testing . T ) {
2021-01-21 19:53:02 +00:00
task := testJob ( ) . TaskGroups [ 0 ] . Tasks [ 0 ]
task . CSIPluginConfig = tt . pc
2019-10-22 13:20:26 +00:00
ephemeralDisk := & EphemeralDisk {
2021-01-21 19:53:02 +00:00
SizeMB : 100 ,
2019-10-22 13:20:26 +00:00
}
2020-08-28 16:38:30 +00:00
err := task . Validate ( ephemeralDisk , JobTypeService , nil , nil )
2019-10-22 13:20:26 +00:00
if tt . expectedErr != "" {
2021-01-21 19:53:02 +00:00
require . Error ( t , err )
require . Contains ( t , err . Error ( ) , tt . expectedErr )
2019-10-22 13:20:26 +00:00
} else {
2021-01-21 19:53:02 +00:00
require . NoError ( t , err )
2019-10-22 13:20:26 +00:00
}
} )
}
}
2016-09-23 22:39:52 +00:00
func TestTask_Validate_Template ( t * testing . T ) {
bad := & Template { }
task := & Task {
Templates : [ ] * Template { bad } ,
}
ephemeralDisk := & EphemeralDisk {
SizeMB : 1 ,
}
2020-08-28 16:38:30 +00:00
err := task . Validate ( ephemeralDisk , JobTypeService , nil , nil )
2016-09-23 22:39:52 +00:00
if ! strings . Contains ( err . Error ( ) , "Template 1 validation failed" ) {
t . Fatalf ( "err: %s" , err )
}
2016-10-10 22:19:00 +00:00
// Have two templates that share the same destination
good := & Template {
SourcePath : "foo" ,
DestPath : "local/foo" ,
ChangeMode : "noop" ,
}
task . Templates = [ ] * Template { good , good }
2020-08-28 16:38:30 +00:00
err = task . Validate ( ephemeralDisk , JobTypeService , nil , nil )
2016-10-10 22:19:00 +00:00
if ! strings . Contains ( err . Error ( ) , "same destination as" ) {
t . Fatalf ( "err: %s" , err )
}
2017-07-26 03:27:18 +00:00
// Env templates can't use signals
task . Templates = [ ] * Template {
{
Envvars : true ,
ChangeMode : "signal" ,
} ,
}
2020-08-28 16:38:30 +00:00
err = task . Validate ( ephemeralDisk , JobTypeService , nil , nil )
2017-07-26 03:27:18 +00:00
if err == nil {
t . Fatalf ( "expected error from Template.Validate" )
}
if expected := "cannot use signals" ; ! strings . Contains ( err . Error ( ) , expected ) {
t . Errorf ( "expected to find %q but found %v" , expected , err )
}
2016-09-23 22:39:52 +00:00
}
func TestTemplate_Validate ( t * testing . T ) {
cases := [ ] struct {
Tmpl * Template
Fail bool
ContainsErrs [ ] string
} {
{
Tmpl : & Template { } ,
Fail : true ,
ContainsErrs : [ ] string {
"specify a source path" ,
"specify a destination" ,
TemplateChangeModeInvalidError . Error ( ) ,
} ,
} ,
{
Tmpl : & Template {
Splay : - 100 ,
} ,
Fail : true ,
ContainsErrs : [ ] string {
"positive splay" ,
} ,
} ,
{
Tmpl : & Template {
ChangeMode : "foo" ,
} ,
Fail : true ,
ContainsErrs : [ ] string {
TemplateChangeModeInvalidError . Error ( ) ,
} ,
} ,
{
Tmpl : & Template {
ChangeMode : "signal" ,
} ,
Fail : true ,
ContainsErrs : [ ] string {
"specify signal value" ,
} ,
} ,
{
Tmpl : & Template {
SourcePath : "foo" ,
DestPath : "../../root" ,
ChangeMode : "noop" ,
} ,
Fail : true ,
ContainsErrs : [ ] string {
"destination escapes" ,
} ,
} ,
{
Tmpl : & Template {
SourcePath : "foo" ,
DestPath : "local/foo" ,
ChangeMode : "noop" ,
} ,
Fail : false ,
} ,
2017-02-01 04:00:33 +00:00
{
Tmpl : & Template {
SourcePath : "foo" ,
DestPath : "local/foo" ,
ChangeMode : "noop" ,
Perms : "0444" ,
} ,
Fail : false ,
} ,
{
Tmpl : & Template {
SourcePath : "foo" ,
DestPath : "local/foo" ,
ChangeMode : "noop" ,
Perms : "zza" ,
} ,
Fail : true ,
ContainsErrs : [ ] string {
"as octal" ,
} ,
} ,
2016-09-23 22:39:52 +00:00
}
for i , c := range cases {
err := c . Tmpl . Validate ( )
if err != nil {
if ! c . Fail {
t . Fatalf ( "Case %d: shouldn't have failed: %v" , i + 1 , err )
}
e := err . Error ( )
for _ , exp := range c . ContainsErrs {
if ! strings . Contains ( e , exp ) {
t . Fatalf ( "Cased %d: should have contained error %q: %q" , i + 1 , exp , e )
}
}
} else if c . Fail {
t . Fatalf ( "Case %d: should have failed: %v" , i + 1 , err )
}
}
}
2015-10-11 19:50:16 +00:00
func TestConstraint_Validate ( t * testing . T ) {
c := & Constraint { }
err := c . Validate ( )
2021-01-21 19:53:02 +00:00
require . Error ( t , err , "Missing constraint operand" )
2015-10-11 19:50:16 +00:00
c = & Constraint {
LTarget : "$attr.kernel.name" ,
RTarget : "linux" ,
Operand : "=" ,
}
err = c . Validate ( )
2019-11-13 23:36:15 +00:00
require . NoError ( t , err )
2015-10-11 19:50:16 +00:00
// Perform additional regexp validation
2015-10-26 20:47:56 +00:00
c . Operand = ConstraintRegex
2015-10-11 19:50:16 +00:00
c . RTarget = "(foo"
err = c . Validate ( )
2021-01-21 19:53:02 +00:00
require . Error ( t , err , "missing closing" )
2015-10-11 19:50:16 +00:00
// Perform version validation
2015-10-26 20:47:56 +00:00
c . Operand = ConstraintVersion
2015-10-11 19:50:16 +00:00
c . RTarget = "~> foo"
err = c . Validate ( )
2021-01-21 19:53:02 +00:00
require . Error ( t , err , "Malformed constraint" )
2017-07-31 23:44:17 +00:00
2019-11-13 23:36:15 +00:00
// Perform semver validation
c . Operand = ConstraintSemver
err = c . Validate ( )
2021-01-21 19:53:02 +00:00
require . Error ( t , err , "Malformed constraint" )
2019-11-13 23:36:15 +00:00
c . RTarget = ">= 0.6.1"
require . NoError ( t , c . Validate ( ) )
2017-07-31 23:44:17 +00:00
// Perform distinct_property validation
c . Operand = ConstraintDistinctProperty
c . RTarget = "0"
err = c . Validate ( )
2021-01-21 19:53:02 +00:00
require . Error ( t , err , "count of 1 or greater" )
2017-07-31 23:44:17 +00:00
c . RTarget = "-1"
err = c . Validate ( )
2021-01-21 19:53:02 +00:00
require . Error ( t , err , "to uint64" )
2017-07-31 23:44:17 +00:00
// Perform distinct_hosts validation
c . Operand = ConstraintDistinctHosts
2017-08-30 17:30:01 +00:00
c . LTarget = ""
c . RTarget = ""
if err := c . Validate ( ) ; err != nil {
t . Fatalf ( "expected valid constraint: %v" , err )
2017-07-31 23:44:17 +00:00
}
2018-10-15 22:31:13 +00:00
// Perform set_contains* validation
2017-07-31 23:44:17 +00:00
c . RTarget = ""
2018-10-15 22:31:13 +00:00
for _ , o := range [ ] string { ConstraintSetContains , ConstraintSetContainsAll , ConstraintSetContainsAny } {
c . Operand = o
err = c . Validate ( )
2021-01-21 19:53:02 +00:00
require . Error ( t , err , "requires an RTarget" )
2017-07-31 23:44:17 +00:00
}
// Perform LTarget validation
c . Operand = ConstraintRegex
c . RTarget = "foo"
c . LTarget = ""
err = c . Validate ( )
2021-01-21 19:53:02 +00:00
require . Error ( t , err , "No LTarget" )
2017-07-31 23:44:17 +00:00
// Perform constraint type validation
c . Operand = "foo"
err = c . Validate ( )
2021-01-21 19:53:02 +00:00
require . Error ( t , err , "Unknown constraint type" )
2015-10-11 19:50:16 +00:00
}
2018-07-16 13:30:58 +00:00
func TestAffinity_Validate ( t * testing . T ) {
type tc struct {
affinity * Affinity
err error
name string
}
testCases := [ ] tc {
{
affinity : & Affinity { } ,
err : fmt . Errorf ( "Missing affinity operand" ) ,
} ,
{
affinity : & Affinity {
Operand : "foo" ,
LTarget : "${meta.node_class}" ,
Weight : 10 ,
} ,
err : fmt . Errorf ( "Unknown affinity operator \"foo\"" ) ,
} ,
{
affinity : & Affinity {
Operand : "=" ,
LTarget : "${meta.node_class}" ,
Weight : 10 ,
} ,
err : fmt . Errorf ( "Operator \"=\" requires an RTarget" ) ,
} ,
{
affinity : & Affinity {
Operand : "=" ,
LTarget : "${meta.node_class}" ,
RTarget : "c4" ,
Weight : 0 ,
} ,
err : fmt . Errorf ( "Affinity weight cannot be zero" ) ,
} ,
{
affinity : & Affinity {
Operand : "=" ,
LTarget : "${meta.node_class}" ,
RTarget : "c4" ,
2019-01-30 20:20:38 +00:00
Weight : 110 ,
2018-07-16 13:30:58 +00:00
} ,
err : fmt . Errorf ( "Affinity weight must be within the range [-100,100]" ) ,
} ,
{
affinity : & Affinity {
Operand : "=" ,
LTarget : "${node.class}" ,
Weight : 10 ,
} ,
err : fmt . Errorf ( "Operator \"=\" requires an RTarget" ) ,
} ,
{
affinity : & Affinity {
Operand : "version" ,
LTarget : "${meta.os}" ,
RTarget : ">>2.0" ,
2019-01-30 20:20:38 +00:00
Weight : 110 ,
2018-07-16 13:30:58 +00:00
} ,
err : fmt . Errorf ( "Version affinity is invalid" ) ,
} ,
{
affinity : & Affinity {
Operand : "regexp" ,
LTarget : "${meta.os}" ,
RTarget : "\\K2.0" ,
Weight : 100 ,
} ,
err : fmt . Errorf ( "Regular expression failed to compile" ) ,
} ,
}
for _ , tc := range testCases {
t . Run ( tc . name , func ( t * testing . T ) {
err := tc . affinity . Validate ( )
if tc . err != nil {
require . NotNil ( t , err )
require . Contains ( t , err . Error ( ) , tc . err . Error ( ) )
} else {
require . Nil ( t , err )
}
} )
}
}
2017-05-09 00:44:26 +00:00
func TestUpdateStrategy_Validate ( t * testing . T ) {
u := & UpdateStrategy {
2019-09-02 17:30:09 +00:00
MaxParallel : - 1 ,
2018-03-23 17:56:00 +00:00
HealthCheck : "foo" ,
MinHealthyTime : - 10 ,
HealthyDeadline : - 15 ,
ProgressDeadline : - 25 ,
AutoRevert : false ,
Canary : - 1 ,
2017-05-09 00:44:26 +00:00
}
err := u . Validate ( )
2021-01-21 19:53:02 +00:00
requireErrors ( t , err ,
"Invalid health check given" ,
"Max parallel can not be less than zero" ,
"Canary count can not be less than zero" ,
"Minimum healthy time may not be less than zero" ,
"Healthy deadline must be greater than zero" ,
"Progress deadline must be zero or greater" ,
"Minimum healthy time must be less than healthy deadline" ,
"Healthy deadline must be less than progress deadline" ,
)
2017-05-09 00:44:26 +00:00
}
2015-09-13 22:04:36 +00:00
func TestResource_NetIndex ( t * testing . T ) {
2015-08-05 00:23:42 +00:00
r := & Resources {
Networks : [ ] * NetworkResource {
2017-09-26 22:26:33 +00:00
{ Device : "eth0" } ,
{ Device : "lo0" } ,
{ Device : "" } ,
2015-08-05 00:23:42 +00:00
} ,
}
2015-09-13 22:04:36 +00:00
if idx := r . NetIndex ( & NetworkResource { Device : "eth0" } ) ; idx != 0 {
2015-08-05 00:23:42 +00:00
t . Fatalf ( "Bad: %d" , idx )
}
2015-09-13 22:04:36 +00:00
if idx := r . NetIndex ( & NetworkResource { Device : "lo0" } ) ; idx != 1 {
2015-08-05 00:23:42 +00:00
t . Fatalf ( "Bad: %d" , idx )
}
2015-09-13 22:04:36 +00:00
if idx := r . NetIndex ( & NetworkResource { Device : "eth1" } ) ; idx != - 1 {
2015-09-12 23:21:57 +00:00
t . Fatalf ( "Bad: %d" , idx )
}
}
2015-08-05 00:32:57 +00:00
func TestResource_Superset ( t * testing . T ) {
r1 := & Resources {
2015-09-23 18:14:32 +00:00
CPU : 2000 ,
2015-08-05 00:32:57 +00:00
MemoryMB : 2048 ,
DiskMB : 10000 ,
}
r2 := & Resources {
2015-09-23 18:14:32 +00:00
CPU : 2000 ,
2015-08-05 00:32:57 +00:00
MemoryMB : 1024 ,
DiskMB : 5000 ,
}
2015-09-14 01:38:11 +00:00
if s , _ := r1 . Superset ( r1 ) ; ! s {
2015-08-05 00:32:57 +00:00
t . Fatalf ( "bad" )
}
2015-09-14 01:38:11 +00:00
if s , _ := r1 . Superset ( r2 ) ; ! s {
2015-08-05 00:32:57 +00:00
t . Fatalf ( "bad" )
}
2015-09-14 01:38:11 +00:00
if s , _ := r2 . Superset ( r1 ) ; s {
2015-08-05 00:32:57 +00:00
t . Fatalf ( "bad" )
}
2015-09-14 01:38:11 +00:00
if s , _ := r2 . Superset ( r2 ) ; ! s {
2015-08-05 00:32:57 +00:00
t . Fatalf ( "bad" )
}
}
2015-08-05 00:41:02 +00:00
func TestResource_Add ( t * testing . T ) {
r1 := & Resources {
2015-09-23 18:14:32 +00:00
CPU : 2000 ,
2015-08-05 00:41:02 +00:00
MemoryMB : 2048 ,
DiskMB : 10000 ,
Networks : [ ] * NetworkResource {
2017-09-26 22:26:33 +00:00
{
2015-08-05 00:41:02 +00:00
CIDR : "10.0.0.0/8" ,
MBits : 100 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "ssh" , 22 , 0 , "" } } ,
2015-08-05 00:41:02 +00:00
} ,
} ,
}
r2 := & Resources {
2015-09-23 18:14:32 +00:00
CPU : 2000 ,
2015-08-05 00:41:02 +00:00
MemoryMB : 1024 ,
DiskMB : 5000 ,
Networks : [ ] * NetworkResource {
2017-09-26 22:26:33 +00:00
{
2015-09-12 23:33:41 +00:00
IP : "10.0.0.1" ,
2015-08-05 00:41:02 +00:00
MBits : 50 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "web" , 80 , 0 , "" } } ,
2015-08-05 00:41:02 +00:00
} ,
} ,
}
2021-01-14 20:46:35 +00:00
r1 . Add ( r2 )
2015-08-05 00:41:02 +00:00
expect := & Resources {
2015-09-23 18:14:32 +00:00
CPU : 3000 ,
2015-08-05 00:41:02 +00:00
MemoryMB : 3072 ,
DiskMB : 15000 ,
Networks : [ ] * NetworkResource {
2017-09-26 22:26:33 +00:00
{
2015-08-05 00:41:02 +00:00
CIDR : "10.0.0.0/8" ,
MBits : 150 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "ssh" , 22 , 0 , "" } , { "web" , 80 , 0 , "" } } ,
2015-08-05 00:41:02 +00:00
} ,
} ,
}
if ! reflect . DeepEqual ( expect . Networks , r1 . Networks ) {
t . Fatalf ( "bad: %#v %#v" , expect , r1 )
}
}
2015-09-13 00:04:09 +00:00
func TestResource_Add_Network ( t * testing . T ) {
r1 := & Resources { }
r2 := & Resources {
Networks : [ ] * NetworkResource {
2017-09-26 22:26:33 +00:00
{
2015-09-13 00:04:09 +00:00
MBits : 50 ,
2020-06-19 17:53:31 +00:00
DynamicPorts : [ ] Port { { "http" , 0 , 80 , "" } , { "https" , 0 , 443 , "" } } ,
2015-09-13 00:04:09 +00:00
} ,
} ,
}
r3 := & Resources {
Networks : [ ] * NetworkResource {
2017-09-26 22:26:33 +00:00
{
2015-09-13 00:04:09 +00:00
MBits : 25 ,
2020-06-19 17:53:31 +00:00
DynamicPorts : [ ] Port { { "admin" , 0 , 8080 , "" } } ,
2015-09-13 00:04:09 +00:00
} ,
} ,
}
2021-01-14 20:46:35 +00:00
r1 . Add ( r2 )
r1 . Add ( r3 )
2015-09-13 00:04:09 +00:00
expect := & Resources {
Networks : [ ] * NetworkResource {
2017-09-26 22:26:33 +00:00
{
2015-09-13 00:04:09 +00:00
MBits : 75 ,
2020-06-19 17:53:31 +00:00
DynamicPorts : [ ] Port { { "http" , 0 , 80 , "" } , { "https" , 0 , 443 , "" } , { "admin" , 0 , 8080 , "" } } ,
2015-09-13 00:04:09 +00:00
} ,
} ,
}
if ! reflect . DeepEqual ( expect . Networks , r1 . Networks ) {
2015-09-14 01:38:11 +00:00
t . Fatalf ( "bad: %#v %#v" , expect . Networks [ 0 ] , r1 . Networks [ 0 ] )
2015-09-13 00:04:09 +00:00
}
}
2018-10-18 04:49:37 +00:00
func TestComparableResources_Subtract ( t * testing . T ) {
r1 := & ComparableResources {
Flattened : AllocatedTaskResources {
Cpu : AllocatedCpuResources {
CpuShares : 2000 ,
2018-09-10 17:38:36 +00:00
} ,
2018-10-18 04:49:37 +00:00
Memory : AllocatedMemoryResources {
MemoryMB : 2048 ,
} ,
Networks : [ ] * NetworkResource {
{
CIDR : "10.0.0.0/8" ,
MBits : 100 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "ssh" , 22 , 0 , "" } } ,
2018-10-18 04:49:37 +00:00
} ,
2018-09-10 17:38:36 +00:00
} ,
} ,
2018-10-18 04:49:37 +00:00
Shared : AllocatedSharedResources {
DiskMB : 10000 ,
} ,
2018-09-10 17:38:36 +00:00
}
2018-10-18 04:49:37 +00:00
r2 := & ComparableResources {
Flattened : AllocatedTaskResources {
Cpu : AllocatedCpuResources {
CpuShares : 1000 ,
} ,
Memory : AllocatedMemoryResources {
MemoryMB : 1024 ,
} ,
Networks : [ ] * NetworkResource {
{
CIDR : "10.0.0.0/8" ,
MBits : 20 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "ssh" , 22 , 0 , "" } } ,
2018-10-18 04:49:37 +00:00
} ,
} ,
} ,
Shared : AllocatedSharedResources {
DiskMB : 5000 ,
} ,
2018-09-10 17:38:36 +00:00
}
2018-10-18 04:49:37 +00:00
r1 . Subtract ( r2 )
2018-09-10 17:38:36 +00:00
2018-10-18 04:49:37 +00:00
expect := & ComparableResources {
Flattened : AllocatedTaskResources {
Cpu : AllocatedCpuResources {
CpuShares : 1000 ,
} ,
Memory : AllocatedMemoryResources {
MemoryMB : 1024 ,
} ,
Networks : [ ] * NetworkResource {
{
CIDR : "10.0.0.0/8" ,
2018-11-06 18:26:26 +00:00
MBits : 100 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "ssh" , 22 , 0 , "" } } ,
2018-10-18 04:49:37 +00:00
} ,
2018-09-10 17:38:36 +00:00
} ,
} ,
2018-10-18 04:49:37 +00:00
Shared : AllocatedSharedResources {
DiskMB : 5000 ,
} ,
2018-09-10 17:38:36 +00:00
}
2018-10-18 04:49:37 +00:00
require := require . New ( t )
require . Equal ( expect , r1 )
2018-09-10 17:38:36 +00:00
}
2015-06-05 22:21:17 +00:00
func TestEncodeDecode ( t * testing . T ) {
type FooRequest struct {
Foo string
Bar int
Baz bool
}
arg := & FooRequest {
Foo : "test" ,
Bar : 42 ,
Baz : true ,
}
buf , err := Encode ( 1 , arg )
if err != nil {
t . Fatalf ( "err: %v" , err )
}
var out FooRequest
err = Decode ( buf [ 1 : ] , & out )
if err != nil {
t . Fatalf ( "err: %v" , err )
}
if ! reflect . DeepEqual ( arg , & out ) {
t . Fatalf ( "bad: %#v %#v" , arg , out )
}
}
2015-11-17 07:56:11 +00:00
2016-02-21 01:43:17 +00:00
func BenchmarkEncodeDecode ( b * testing . B ) {
job := testJob ( )
for i := 0 ; i < b . N ; i ++ {
buf , err := Encode ( 1 , job )
if err != nil {
b . Fatalf ( "err: %v" , err )
}
var out Job
err = Decode ( buf [ 1 : ] , & out )
if err != nil {
b . Fatalf ( "err: %v" , err )
}
}
}
2015-11-17 21:36:59 +00:00
func TestInvalidServiceCheck ( t * testing . T ) {
2016-06-12 23:36:49 +00:00
s := Service {
2015-11-17 21:36:59 +00:00
Name : "service-name" ,
PortLabel : "bar" ,
2015-11-26 20:40:42 +00:00
Checks : [ ] * ServiceCheck {
2015-11-17 21:36:59 +00:00
{
Name : "check-name" ,
Type : "lol" ,
} ,
} ,
}
if err := s . Validate ( ) ; err == nil {
2016-03-15 00:54:49 +00:00
t . Fatalf ( "Service should be invalid (invalid type)" )
2015-11-17 07:56:11 +00:00
}
2016-02-05 22:42:35 +00:00
2016-06-12 23:36:49 +00:00
s = Service {
2016-02-05 22:42:35 +00:00
Name : "service.name" ,
PortLabel : "bar" ,
}
2016-10-24 19:13:47 +00:00
if err := s . ValidateName ( s . Name ) ; err == nil {
2016-03-15 00:54:49 +00:00
t . Fatalf ( "Service should be invalid (contains a dot): %v" , err )
}
2016-06-12 23:36:49 +00:00
s = Service {
2016-03-15 00:54:49 +00:00
Name : "-my-service" ,
PortLabel : "bar" ,
}
if err := s . Validate ( ) ; err == nil {
t . Fatalf ( "Service should be invalid (begins with a hyphen): %v" , err )
}
2016-10-24 19:13:47 +00:00
s = Service {
Name : "my-service-${NOMAD_META_FOO}" ,
PortLabel : "bar" ,
}
if err := s . Validate ( ) ; err != nil {
t . Fatalf ( "Service should be valid: %v" , err )
}
2017-11-15 21:35:43 +00:00
s = Service {
Name : "my_service-${NOMAD_META_FOO}" ,
PortLabel : "bar" ,
}
if err := s . Validate ( ) ; err == nil {
t . Fatalf ( "Service should be invalid (contains underscore but not in a variable name): %v" , err )
}
2016-06-12 23:36:49 +00:00
s = Service {
2016-03-15 00:54:49 +00:00
Name : "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456" ,
PortLabel : "bar" ,
}
2016-10-24 19:13:47 +00:00
if err := s . ValidateName ( s . Name ) ; err == nil {
2016-03-15 00:54:49 +00:00
t . Fatalf ( "Service should be invalid (too long): %v" , err )
2016-02-05 22:42:35 +00:00
}
2016-04-15 08:50:55 +00:00
2016-06-12 23:36:49 +00:00
s = Service {
2016-04-15 08:50:55 +00:00
Name : "service-name" ,
Checks : [ ] * ServiceCheck {
{
Name : "check-tcp" ,
Type : ServiceCheckTCP ,
Interval : 5 * time . Second ,
Timeout : 2 * time . Second ,
} ,
{
Name : "check-http" ,
Type : ServiceCheckHTTP ,
Path : "/foo" ,
Interval : 5 * time . Second ,
Timeout : 2 * time . Second ,
} ,
} ,
}
if err := s . Validate ( ) ; err == nil {
t . Fatalf ( "service should be invalid (tcp/http checks with no port): %v" , err )
}
2016-06-12 23:36:49 +00:00
s = Service {
2016-04-15 08:50:55 +00:00
Name : "service-name" ,
Checks : [ ] * ServiceCheck {
{
Name : "check-script" ,
Type : ServiceCheckScript ,
Command : "/bin/date" ,
Interval : 5 * time . Second ,
Timeout : 2 * time . Second ,
} ,
} ,
}
if err := s . Validate ( ) ; err != nil {
t . Fatalf ( "un-expected error: %v" , err )
}
2019-08-21 16:42:53 +00:00
s = Service {
Name : "service-name" ,
Checks : [ ] * ServiceCheck {
{
Name : "tcp-check" ,
Type : ServiceCheckTCP ,
Interval : 5 * time . Second ,
Timeout : 2 * time . Second ,
} ,
} ,
Connect : & ConsulConnect {
SidecarService : & ConsulSidecarService { } ,
} ,
}
require . Error ( t , s . Validate ( ) )
2015-11-17 07:56:11 +00:00
}
2015-11-21 20:34:01 +00:00
2015-12-14 23:57:56 +00:00
func TestDistinctCheckID ( t * testing . T ) {
2015-11-21 20:34:01 +00:00
c1 := ServiceCheck {
Name : "web-health" ,
Type : "http" ,
Path : "/health" ,
Interval : 2 * time . Second ,
Timeout : 3 * time . Second ,
}
c2 := ServiceCheck {
Name : "web-health" ,
Type : "http" ,
Path : "/health1" ,
Interval : 2 * time . Second ,
Timeout : 3 * time . Second ,
}
c3 := ServiceCheck {
Name : "web-health" ,
Type : "http" ,
Path : "/health" ,
Interval : 4 * time . Second ,
Timeout : 3 * time . Second ,
}
2015-12-14 23:57:56 +00:00
serviceID := "123"
c1Hash := c1 . Hash ( serviceID )
c2Hash := c2 . Hash ( serviceID )
c3Hash := c3 . Hash ( serviceID )
2015-11-21 20:34:01 +00:00
2015-11-26 21:47:02 +00:00
if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash {
t . Fatalf ( "Checks need to be uniq c1: %s, c2: %s, c3: %s" , c1Hash , c2Hash , c3Hash )
2015-11-21 20:34:01 +00:00
}
}
2015-11-27 03:26:00 +00:00
2016-07-20 23:07:15 +00:00
func TestService_Canonicalize ( t * testing . T ) {
2015-11-27 03:26:00 +00:00
job := "example"
taskGroup := "cache"
task := "redis"
2016-06-12 23:36:49 +00:00
s := Service {
2015-11-27 03:26:00 +00:00
Name : "${TASK}-db" ,
}
2016-07-20 23:07:15 +00:00
s . Canonicalize ( job , taskGroup , task )
2015-11-27 03:26:00 +00:00
if s . Name != "redis-db" {
t . Fatalf ( "Expected name: %v, Actual: %v" , "redis-db" , s . Name )
}
s . Name = "db"
2016-07-20 23:07:15 +00:00
s . Canonicalize ( job , taskGroup , task )
2015-11-27 03:26:00 +00:00
if s . Name != "db" {
t . Fatalf ( "Expected name: %v, Actual: %v" , "redis-db" , s . Name )
}
s . Name = "${JOB}-${TASKGROUP}-${TASK}-db"
2016-07-20 23:07:15 +00:00
s . Canonicalize ( job , taskGroup , task )
2015-11-27 03:26:00 +00:00
if s . Name != "example-cache-redis-db" {
2018-03-11 18:05:59 +00:00
t . Fatalf ( "Expected name: %v, Actual: %v" , "example-cache-redis-db" , s . Name )
2015-11-27 03:26:00 +00:00
}
s . Name = "${BASE}-db"
2016-07-20 23:07:15 +00:00
s . Canonicalize ( job , taskGroup , task )
2015-11-27 03:26:00 +00:00
if s . Name != "example-cache-redis-db" {
2018-03-11 18:05:59 +00:00
t . Fatalf ( "Expected name: %v, Actual: %v" , "example-cache-redis-db" , s . Name )
2015-11-27 03:26:00 +00:00
}
}
2019-07-30 22:40:45 +00:00
func TestService_Validate ( t * testing . T ) {
s := Service {
Name : "testservice" ,
}
s . Canonicalize ( "testjob" , "testgroup" , "testtask" )
// Base service should be valid
require . NoError ( t , s . Validate ( ) )
2020-06-22 17:55:59 +00:00
// Native Connect requires task name on service
2019-07-30 22:40:45 +00:00
s . Connect = & ConsulConnect {
2020-06-22 17:55:59 +00:00
Native : true ,
2019-07-30 22:40:45 +00:00
}
2020-06-22 17:55:59 +00:00
require . Error ( t , s . Validate ( ) )
// Native Connect should work with task name on service set
s . TaskName = "testtask"
2019-07-30 22:40:45 +00:00
require . NoError ( t , s . Validate ( ) )
// Native Connect + Sidecar should be invalid
s . Connect . SidecarService = & ConsulSidecarService { }
require . Error ( t , s . Validate ( ) )
}
func TestService_Equals ( t * testing . T ) {
s := Service {
Name : "testservice" ,
}
s . Canonicalize ( "testjob" , "testgroup" , "testtask" )
o := s . Copy ( )
// Base service should be equal to copy of itself
require . True ( t , s . Equals ( o ) )
// create a helper to assert a diff and reset the struct
assertDiff := func ( ) {
require . False ( t , s . Equals ( o ) )
o = s . Copy ( )
require . True ( t , s . Equals ( o ) , "bug in copy" )
}
// Changing any field should cause inequality
o . Name = "diff"
assertDiff ( )
o . PortLabel = "diff"
assertDiff ( )
o . AddressMode = AddressModeDriver
assertDiff ( )
o . Tags = [ ] string { "diff" }
assertDiff ( )
o . CanaryTags = [ ] string { "diff" }
assertDiff ( )
o . Checks = [ ] * ServiceCheck { { Name : "diff" } }
assertDiff ( )
2020-06-22 17:55:59 +00:00
o . Connect = & ConsulConnect { Native : true }
2019-07-30 22:40:45 +00:00
assertDiff ( )
client: enable configuring enable_tag_override for services
Consul provides a feature of Service Definitions where the tags
associated with a service can be modified through the Catalog API,
overriding the value(s) configured in the agent's service configuration.
To enable this feature, the flag enable_tag_override must be configured
in the service definition.
Previously, Nomad did not allow configuring this flag, and thus the default
value of false was used. Now, it is configurable.
Because Nomad itself acts as a state machine around the the service definitions
of the tasks it manages, it's worth describing what happens when this feature
is enabled and why.
Consider the basic case where there is no Nomad, and your service is provided
to consul as a boring JSON file. The ultimate source of truth for the definition
of that service is the file, and is stored in the agent. Later, Consul performs
"anti-entropy" which synchronizes the Catalog (stored only the leaders). Then
with enable_tag_override=true, the tags field is available for "external"
modification through the Catalog API (rather than directly configuring the
service definition file, or using the Agent API). The important observation
is that if the service definition ever changes (i.e. the file is changed &
config reloaded OR the Agent API is used to modify the service), those
"external" tag values are thrown away, and the new service definition is
once again the source of truth.
In the Nomad case, Nomad itself is the source of truth over the Agent in
the same way the JSON file was the source of truth in the example above.
That means any time Nomad sets a new service definition, any externally
configured tags are going to be replaced. When does this happen? Only on
major lifecycle events, for example when a task is modified because of an
updated job spec from the 'nomad job run <existing>' command. Otherwise,
Nomad's periodic re-sync's with Consul will now no longer try to restore
the externally modified tag values (as long as enable_tag_override=true).
Fixes #2057
2020-02-07 21:22:19 +00:00
o . EnableTagOverride = true
assertDiff ( )
2019-07-30 22:40:45 +00:00
}
2015-11-27 03:26:00 +00:00
func TestJob_ExpandServiceNames ( t * testing . T ) {
j := & Job {
Name : "my-job" ,
TaskGroups : [ ] * TaskGroup {
2017-09-26 22:26:33 +00:00
{
2015-11-27 03:26:00 +00:00
Name : "web" ,
Tasks : [ ] * Task {
{
Name : "frontend" ,
2016-06-12 23:36:49 +00:00
Services : [ ] * Service {
2015-11-27 03:26:00 +00:00
{
Name : "${BASE}-default" ,
} ,
{
Name : "jmx" ,
} ,
} ,
} ,
} ,
} ,
2017-09-26 22:26:33 +00:00
{
2015-11-27 03:26:00 +00:00
Name : "admin" ,
Tasks : [ ] * Task {
{
Name : "admin-web" ,
} ,
} ,
} ,
} ,
}
2016-07-20 23:07:15 +00:00
j . Canonicalize ( )
2015-11-27 03:26:00 +00:00
2016-06-12 23:36:49 +00:00
service1Name := j . TaskGroups [ 0 ] . Tasks [ 0 ] . Services [ 0 ] . Name
2015-11-27 03:26:00 +00:00
if service1Name != "my-job-web-frontend-default" {
t . Fatalf ( "Expected Service Name: %s, Actual: %s" , "my-job-web-frontend-default" , service1Name )
}
2016-06-12 23:36:49 +00:00
service2Name := j . TaskGroups [ 0 ] . Tasks [ 0 ] . Services [ 1 ] . Name
2015-11-27 03:26:00 +00:00
if service2Name != "jmx" {
t . Fatalf ( "Expected Service Name: %s, Actual: %s" , "jmx" , service2Name )
}
}
2015-12-01 00:51:56 +00:00
2019-11-18 18:04:01 +00:00
func TestJob_CombinedTaskMeta ( t * testing . T ) {
j := & Job {
Meta : map [ string ] string {
"job_test" : "job" ,
"group_test" : "job" ,
"task_test" : "job" ,
} ,
TaskGroups : [ ] * TaskGroup {
{
Name : "group" ,
Meta : map [ string ] string {
"group_test" : "group" ,
"task_test" : "group" ,
} ,
Tasks : [ ] * Task {
{
Name : "task" ,
Meta : map [ string ] string {
"task_test" : "task" ,
} ,
} ,
} ,
} ,
} ,
}
require := require . New ( t )
require . EqualValues ( map [ string ] string {
"job_test" : "job" ,
"group_test" : "group" ,
"task_test" : "task" ,
} , j . CombinedTaskMeta ( "group" , "task" ) )
require . EqualValues ( map [ string ] string {
"job_test" : "job" ,
"group_test" : "group" ,
"task_test" : "group" ,
} , j . CombinedTaskMeta ( "group" , "" ) )
require . EqualValues ( map [ string ] string {
"job_test" : "job" ,
"group_test" : "job" ,
"task_test" : "job" ,
} , j . CombinedTaskMeta ( "" , "task" ) )
}
2015-12-01 00:51:56 +00:00
func TestPeriodicConfig_EnabledInvalid ( t * testing . T ) {
// Create a config that is enabled but with no interval specified.
p := & PeriodicConfig { Enabled : true }
if err := p . Validate ( ) ; err == nil {
t . Fatal ( "Enabled PeriodicConfig with no spec or type shouldn't be valid" )
}
// Create a config that is enabled, with a spec but no type specified.
p = & PeriodicConfig { Enabled : true , Spec : "foo" }
if err := p . Validate ( ) ; err == nil {
t . Fatal ( "Enabled PeriodicConfig with no spec type shouldn't be valid" )
}
// Create a config that is enabled, with a spec type but no spec specified.
p = & PeriodicConfig { Enabled : true , SpecType : PeriodicSpecCron }
if err := p . Validate ( ) ; err == nil {
t . Fatal ( "Enabled PeriodicConfig with no spec shouldn't be valid" )
}
2017-02-15 22:37:06 +00:00
// Create a config that is enabled, with a bad time zone.
p = & PeriodicConfig { Enabled : true , TimeZone : "FOO" }
if err := p . Validate ( ) ; err == nil || ! strings . Contains ( err . Error ( ) , "time zone" ) {
2017-02-28 00:00:19 +00:00
t . Fatalf ( "Enabled PeriodicConfig with bad time zone shouldn't be valid: %v" , err )
2017-02-15 22:37:06 +00:00
}
2015-12-01 00:51:56 +00:00
}
func TestPeriodicConfig_InvalidCron ( t * testing . T ) {
specs := [ ] string { "foo" , "* *" , "@foo" }
for _ , spec := range specs {
p := & PeriodicConfig { Enabled : true , SpecType : PeriodicSpecCron , Spec : spec }
2017-02-15 22:37:06 +00:00
p . Canonicalize ( )
2015-12-01 00:51:56 +00:00
if err := p . Validate ( ) ; err == nil {
t . Fatal ( "Invalid cron spec" )
}
}
}
func TestPeriodicConfig_ValidCron ( t * testing . T ) {
specs := [ ] string { "0 0 29 2 *" , "@hourly" , "0 0-15 * * *" }
for _ , spec := range specs {
p := & PeriodicConfig { Enabled : true , SpecType : PeriodicSpecCron , Spec : spec }
2017-02-15 22:37:06 +00:00
p . Canonicalize ( )
2015-12-01 00:51:56 +00:00
if err := p . Validate ( ) ; err != nil {
t . Fatal ( "Passed valid cron" )
}
}
}
func TestPeriodicConfig_NextCron ( t * testing . T ) {
from := time . Date ( 2009 , time . November , 10 , 23 , 22 , 30 , 0 , time . UTC )
2020-05-07 22:33:48 +00:00
cases := [ ] struct {
spec string
nextTime time . Time
errorMsg string
} {
2018-04-26 22:36:23 +00:00
{
2020-05-07 22:33:48 +00:00
spec : "0 0 29 2 * 1980" ,
nextTime : time . Time { } ,
2018-04-26 22:36:23 +00:00
} ,
{
2020-05-07 22:33:48 +00:00
spec : "*/5 * * * *" ,
nextTime : time . Date ( 2009 , time . November , 10 , 23 , 25 , 0 , 0 , time . UTC ) ,
2018-04-26 22:36:23 +00:00
} ,
{
2020-05-07 22:33:48 +00:00
spec : "1 15-0 *" ,
nextTime : time . Time { } ,
errorMsg : "failed parsing cron expression" ,
2018-04-26 22:36:23 +00:00
} ,
}
2020-05-07 22:33:48 +00:00
for i , c := range cases {
t . Run ( fmt . Sprintf ( "case: %d: %s" , i , c . spec ) , func ( t * testing . T ) {
p := & PeriodicConfig { Enabled : true , SpecType : PeriodicSpecCron , Spec : c . spec }
p . Canonicalize ( )
n , err := p . Next ( from )
require . Equal ( t , c . nextTime , n )
if c . errorMsg == "" {
require . NoError ( t , err )
} else {
require . Error ( t , err )
require . Contains ( t , err . Error ( ) , c . errorMsg )
}
} )
2015-12-01 00:51:56 +00:00
}
}
2016-02-02 22:32:30 +00:00
2017-02-15 22:37:06 +00:00
func TestPeriodicConfig_ValidTimeZone ( t * testing . T ) {
zones := [ ] string { "Africa/Abidjan" , "America/Chicago" , "Europe/Minsk" , "UTC" }
for _ , zone := range zones {
p := & PeriodicConfig { Enabled : true , SpecType : PeriodicSpecCron , Spec : "0 0 29 2 * 1980" , TimeZone : zone }
p . Canonicalize ( )
if err := p . Validate ( ) ; err != nil {
2017-02-28 00:00:19 +00:00
t . Fatalf ( "Valid tz errored: %v" , err )
2017-02-15 22:37:06 +00:00
}
}
}
func TestPeriodicConfig_DST ( t * testing . T ) {
2018-04-26 22:36:23 +00:00
require := require . New ( t )
2017-02-15 22:37:06 +00:00
// On Sun, Mar 12, 2:00 am 2017: +1 hour UTC
p := & PeriodicConfig {
Enabled : true ,
SpecType : PeriodicSpecCron ,
2020-05-07 22:33:48 +00:00
Spec : "0 2 11-13 3 * 2017" ,
2017-02-15 22:37:06 +00:00
TimeZone : "America/Los_Angeles" ,
}
p . Canonicalize ( )
t1 := time . Date ( 2017 , time . March , 11 , 1 , 0 , 0 , 0 , p . location )
t2 := time . Date ( 2017 , time . March , 12 , 1 , 0 , 0 , 0 , p . location )
// E1 is an 8 hour adjustment, E2 is a 7 hour adjustment
e1 := time . Date ( 2017 , time . March , 11 , 10 , 0 , 0 , 0 , time . UTC )
2020-05-07 22:33:48 +00:00
e2 := time . Date ( 2017 , time . March , 13 , 9 , 0 , 0 , 0 , time . UTC )
2017-02-15 22:37:06 +00:00
2018-04-26 22:36:23 +00:00
n1 , err := p . Next ( t1 )
require . Nil ( err )
2018-04-26 22:15:43 +00:00
2018-04-26 22:36:23 +00:00
n2 , err := p . Next ( t2 )
require . Nil ( err )
2017-02-15 22:37:06 +00:00
2018-04-26 22:36:23 +00:00
require . Equal ( e1 , n1 . UTC ( ) )
require . Equal ( e2 , n2 . UTC ( ) )
2017-02-15 22:37:06 +00:00
}
2019-12-12 18:59:38 +00:00
2019-12-09 18:58:53 +00:00
func TestTaskLifecycleConfig_Validate ( t * testing . T ) {
testCases := [ ] struct {
name string
tlc * TaskLifecycleConfig
err error
} {
{
name : "prestart completed" ,
tlc : & TaskLifecycleConfig {
2020-03-02 19:12:16 +00:00
Hook : "prestart" ,
Sidecar : false ,
2019-12-09 18:58:53 +00:00
} ,
err : nil ,
} ,
{
name : "prestart running" ,
tlc : & TaskLifecycleConfig {
2020-03-02 19:12:16 +00:00
Hook : "prestart" ,
Sidecar : true ,
2019-12-09 18:58:53 +00:00
} ,
err : nil ,
} ,
{
name : "no hook" ,
tlc : & TaskLifecycleConfig {
2020-03-02 19:12:16 +00:00
Sidecar : true ,
2019-12-09 18:58:53 +00:00
} ,
err : fmt . Errorf ( "no lifecycle hook provided" ) ,
} ,
}
for _ , tc := range testCases {
t . Run ( tc . name , func ( t * testing . T ) {
err := tc . tlc . Validate ( )
if tc . err != nil {
2019-12-12 18:59:38 +00:00
require . Error ( t , err )
2019-12-09 18:58:53 +00:00
require . Contains ( t , err . Error ( ) , tc . err . Error ( ) )
} else {
require . Nil ( t , err )
}
} )
}
}
2017-02-15 22:37:06 +00:00
2016-02-02 22:32:30 +00:00
func TestRestartPolicy_Validate ( t * testing . T ) {
// Policy with acceptable restart options passes
p := & RestartPolicy {
Mode : RestartPolicyModeFail ,
Attempts : 0 ,
2017-02-13 23:27:36 +00:00
Interval : 5 * time . Second ,
2016-02-02 22:32:30 +00:00
}
if err := p . Validate ( ) ; err != nil {
t . Fatalf ( "err: %v" , err )
}
// Policy with ambiguous restart options fails
p = & RestartPolicy {
Mode : RestartPolicyModeDelay ,
Attempts : 0 ,
2017-02-13 23:27:36 +00:00
Interval : 5 * time . Second ,
2016-02-02 22:32:30 +00:00
}
if err := p . Validate ( ) ; err == nil || ! strings . Contains ( err . Error ( ) , "ambiguous" ) {
t . Fatalf ( "expect ambiguity error, got: %v" , err )
}
// Bad policy mode fails
p = & RestartPolicy {
Mode : "nope" ,
Attempts : 1 ,
2017-02-13 23:27:36 +00:00
Interval : 5 * time . Second ,
2016-02-02 22:32:30 +00:00
}
if err := p . Validate ( ) ; err == nil || ! strings . Contains ( err . Error ( ) , "mode" ) {
t . Fatalf ( "expect mode error, got: %v" , err )
}
// Fails when attempts*delay does not fit inside interval
p = & RestartPolicy {
Mode : RestartPolicyModeDelay ,
Attempts : 3 ,
Delay : 5 * time . Second ,
2017-02-13 23:27:36 +00:00
Interval : 5 * time . Second ,
2016-02-02 22:32:30 +00:00
}
if err := p . Validate ( ) ; err == nil || ! strings . Contains ( err . Error ( ) , "can't restart" ) {
t . Fatalf ( "expect restart interval error, got: %v" , err )
}
2017-02-13 23:27:36 +00:00
// Fails when interval is to small
p = & RestartPolicy {
Mode : RestartPolicyModeDelay ,
Attempts : 3 ,
Delay : 5 * time . Second ,
Interval : 2 * time . Second ,
}
if err := p . Validate ( ) ; err == nil || ! strings . Contains ( err . Error ( ) , "Interval can not be less than" ) {
t . Fatalf ( "expect interval too small error, got: %v" , err )
}
2016-02-02 22:32:30 +00:00
}
2016-03-10 02:09:51 +00:00
2018-01-14 15:03:08 +00:00
func TestReschedulePolicy_Validate ( t * testing . T ) {
type testCase struct {
2018-02-22 23:43:07 +00:00
desc string
2018-01-14 15:03:08 +00:00
ReschedulePolicy * ReschedulePolicy
2018-02-22 23:43:07 +00:00
errors [ ] error
2018-01-14 15:03:08 +00:00
}
testCases := [ ] testCase {
{
2018-02-22 23:43:07 +00:00
desc : "Nil" ,
} ,
{
desc : "Disabled" ,
2018-01-17 22:34:15 +00:00
ReschedulePolicy : & ReschedulePolicy {
Attempts : 0 ,
Interval : 0 * time . Second } ,
2018-01-14 15:03:08 +00:00
} ,
{
2018-02-22 23:43:07 +00:00
desc : "Disabled" ,
2018-01-17 22:34:15 +00:00
ReschedulePolicy : & ReschedulePolicy {
2018-02-22 23:43:07 +00:00
Attempts : - 1 ,
2018-01-17 22:34:15 +00:00
Interval : 5 * time . Minute } ,
2018-01-14 15:03:08 +00:00
} ,
{
2018-02-22 23:43:07 +00:00
desc : "Valid Linear Delay" ,
2018-01-17 22:34:15 +00:00
ReschedulePolicy : & ReschedulePolicy {
2018-02-22 23:43:07 +00:00
Attempts : 1 ,
Interval : 5 * time . Minute ,
Delay : 10 * time . Second ,
2018-03-26 19:45:09 +00:00
DelayFunction : "constant" } ,
2018-02-22 23:43:07 +00:00
} ,
{
desc : "Valid Exponential Delay" ,
ReschedulePolicy : & ReschedulePolicy {
Attempts : 5 ,
Interval : 1 * time . Hour ,
Delay : 30 * time . Second ,
2018-03-13 15:06:26 +00:00
MaxDelay : 5 * time . Minute ,
2018-02-22 23:43:07 +00:00
DelayFunction : "exponential" } ,
} ,
{
desc : "Valid Fibonacci Delay" ,
ReschedulePolicy : & ReschedulePolicy {
Attempts : 5 ,
Interval : 15 * time . Minute ,
Delay : 10 * time . Second ,
2018-03-13 15:06:26 +00:00
MaxDelay : 5 * time . Minute ,
2018-02-22 23:43:07 +00:00
DelayFunction : "fibonacci" } ,
} ,
{
desc : "Invalid delay function" ,
ReschedulePolicy : & ReschedulePolicy {
Attempts : 1 ,
Interval : 1 * time . Second ,
DelayFunction : "blah" } ,
errors : [ ] error {
fmt . Errorf ( "Interval cannot be less than %v (got %v)" , ReschedulePolicyMinInterval , time . Second ) ,
fmt . Errorf ( "Delay cannot be less than %v (got %v)" , ReschedulePolicyMinDelay , 0 * time . Second ) ,
fmt . Errorf ( "Invalid delay function %q, must be one of %q" , "blah" , RescheduleDelayFunctions ) ,
} ,
} ,
{
desc : "Invalid delay ceiling" ,
ReschedulePolicy : & ReschedulePolicy {
Attempts : 1 ,
Interval : 8 * time . Second ,
DelayFunction : "exponential" ,
Delay : 15 * time . Second ,
2018-03-13 15:06:26 +00:00
MaxDelay : 5 * time . Second } ,
2018-02-22 23:43:07 +00:00
errors : [ ] error {
2018-03-24 15:29:20 +00:00
fmt . Errorf ( "Max Delay cannot be less than Delay %v (got %v)" ,
2018-02-23 18:57:29 +00:00
15 * time . Second , 5 * time . Second ) ,
2018-02-22 23:43:07 +00:00
} ,
} ,
{
desc : "Invalid delay and interval" ,
ReschedulePolicy : & ReschedulePolicy {
Attempts : 1 ,
Interval : 1 * time . Second ,
2018-03-26 19:45:09 +00:00
DelayFunction : "constant" } ,
2018-02-22 23:43:07 +00:00
errors : [ ] error {
fmt . Errorf ( "Interval cannot be less than %v (got %v)" , ReschedulePolicyMinInterval , time . Second ) ,
fmt . Errorf ( "Delay cannot be less than %v (got %v)" , ReschedulePolicyMinDelay , 0 * time . Second ) ,
} ,
} , {
// Should suggest 2h40m as the interval
desc : "Invalid Attempts - linear delay" ,
ReschedulePolicy : & ReschedulePolicy {
Attempts : 10 ,
Interval : 1 * time . Hour ,
Delay : 20 * time . Minute ,
2018-03-26 19:45:09 +00:00
DelayFunction : "constant" ,
2018-02-22 23:43:07 +00:00
} ,
errors : [ ] error {
2018-02-23 18:57:29 +00:00
fmt . Errorf ( "Nomad can only make %v attempts in %v with initial delay %v and" +
2018-03-26 19:45:09 +00:00
" delay function %q" , 3 , time . Hour , 20 * time . Minute , "constant" ) ,
2018-02-23 18:57:29 +00:00
fmt . Errorf ( "Set the interval to at least %v to accommodate %v attempts" ,
200 * time . Minute , 10 ) ,
2018-02-22 23:43:07 +00:00
} ,
2018-01-17 22:34:15 +00:00
} ,
{
2018-02-22 23:43:07 +00:00
// Should suggest 4h40m as the interval
// Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40}
desc : "Invalid Attempts - exponential delay" ,
2018-01-17 22:34:15 +00:00
ReschedulePolicy : & ReschedulePolicy {
2018-02-22 23:43:07 +00:00
Attempts : 10 ,
Interval : 30 * time . Minute ,
Delay : 5 * time . Minute ,
2018-03-13 15:06:26 +00:00
MaxDelay : 40 * time . Minute ,
2018-02-22 23:43:07 +00:00
DelayFunction : "exponential" ,
} ,
errors : [ ] error {
fmt . Errorf ( "Nomad can only make %v attempts in %v with initial delay %v, " +
2018-02-23 18:57:29 +00:00
"delay function %q, and delay ceiling %v" , 3 , 30 * time . Minute , 5 * time . Minute ,
"exponential" , 40 * time . Minute ) ,
fmt . Errorf ( "Set the interval to at least %v to accommodate %v attempts" ,
280 * time . Minute , 10 ) ,
2018-02-22 23:43:07 +00:00
} ,
} ,
{
// Should suggest 8h as the interval
// Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80}
desc : "Invalid Attempts - fibonacci delay" ,
ReschedulePolicy : & ReschedulePolicy {
Attempts : 10 ,
Interval : 1 * time . Hour ,
Delay : 20 * time . Minute ,
2018-03-13 15:06:26 +00:00
MaxDelay : 80 * time . Minute ,
2018-02-22 23:43:07 +00:00
DelayFunction : "fibonacci" ,
} ,
errors : [ ] error {
fmt . Errorf ( "Nomad can only make %v attempts in %v with initial delay %v, " +
2018-02-23 18:57:29 +00:00
"delay function %q, and delay ceiling %v" , 4 , 1 * time . Hour , 20 * time . Minute ,
"fibonacci" , 80 * time . Minute ) ,
fmt . Errorf ( "Set the interval to at least %v to accommodate %v attempts" ,
480 * time . Minute , 10 ) ,
2018-02-22 23:43:07 +00:00
} ,
} ,
{
2018-03-26 18:30:09 +00:00
desc : "Ambiguous Unlimited config, has both attempts and unlimited set" ,
2018-02-22 23:43:07 +00:00
ReschedulePolicy : & ReschedulePolicy {
Attempts : 1 ,
Unlimited : true ,
DelayFunction : "exponential" ,
Delay : 5 * time . Minute ,
2018-03-13 15:06:26 +00:00
MaxDelay : 1 * time . Hour ,
2018-02-22 23:43:07 +00:00
} ,
2018-03-24 15:29:20 +00:00
errors : [ ] error {
fmt . Errorf ( "Interval must be a non zero value if Attempts > 0" ) ,
fmt . Errorf ( "Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous" , 1 , time . Duration ( 0 ) , true ) ,
} ,
2018-02-22 23:43:07 +00:00
} ,
{
desc : "Invalid Unlimited config" ,
ReschedulePolicy : & ReschedulePolicy {
Attempts : 1 ,
Interval : 1 * time . Second ,
Unlimited : true ,
DelayFunction : "exponential" ,
} ,
errors : [ ] error {
fmt . Errorf ( "Delay cannot be less than %v (got %v)" , ReschedulePolicyMinDelay , 0 * time . Second ) ,
2018-03-24 15:29:20 +00:00
fmt . Errorf ( "Max Delay cannot be less than %v (got %v)" , ReschedulePolicyMinDelay , 0 * time . Second ) ,
2018-02-22 23:43:07 +00:00
} ,
2018-01-14 15:03:08 +00:00
} ,
2018-03-26 18:30:09 +00:00
{
desc : "Valid Unlimited config" ,
ReschedulePolicy : & ReschedulePolicy {
Unlimited : true ,
DelayFunction : "exponential" ,
Delay : 5 * time . Second ,
MaxDelay : 1 * time . Hour ,
} ,
} ,
2018-01-14 15:03:08 +00:00
}
for _ , tc := range testCases {
2018-02-22 23:43:07 +00:00
t . Run ( tc . desc , func ( t * testing . T ) {
require := require . New ( t )
gotErr := tc . ReschedulePolicy . Validate ( )
if tc . errors != nil {
// Validate all errors
for _ , err := range tc . errors {
require . Contains ( gotErr . Error ( ) , err . Error ( ) )
}
} else {
require . Nil ( gotErr )
}
} )
2018-01-14 15:03:08 +00:00
}
}
2016-03-10 02:09:51 +00:00
func TestAllocation_Index ( t * testing . T ) {
2017-05-31 18:34:46 +00:00
a1 := Allocation {
Name : "example.cache[1]" ,
TaskGroup : "cache" ,
JobID : "example" ,
Job : & Job {
ID : "example" ,
TaskGroups : [ ] * TaskGroup { { Name : "cache" } } } ,
}
e1 := uint ( 1 )
a2 := a1 . Copy ( )
a2 . Name = "example.cache[713127]"
e2 := uint ( 713127 )
2016-03-10 02:09:51 +00:00
if a1 . Index ( ) != e1 || a2 . Index ( ) != e2 {
2017-05-31 18:34:46 +00:00
t . Fatalf ( "Got %d and %d" , a1 . Index ( ) , a2 . Index ( ) )
2016-03-10 02:09:51 +00:00
}
}
2016-03-14 22:46:06 +00:00
2016-03-15 02:55:30 +00:00
func TestTaskArtifact_Validate_Source ( t * testing . T ) {
valid := & TaskArtifact { GetterSource : "google.com" }
if err := valid . Validate ( ) ; err != nil {
t . Fatalf ( "unexpected error: %v" , err )
}
}
2016-03-18 19:01:46 +00:00
func TestTaskArtifact_Validate_Dest ( t * testing . T ) {
valid := & TaskArtifact { GetterSource : "google.com" }
if err := valid . Validate ( ) ; err != nil {
t . Fatalf ( "unexpected error: %v" , err )
}
valid . RelativeDest = "local/"
if err := valid . Validate ( ) ; err != nil {
t . Fatalf ( "unexpected error: %v" , err )
}
valid . RelativeDest = "local/.."
if err := valid . Validate ( ) ; err != nil {
t . Fatalf ( "unexpected error: %v" , err )
}
2016-12-18 23:48:30 +00:00
valid . RelativeDest = "local/../../.."
2016-03-18 19:01:46 +00:00
if err := valid . Validate ( ) ; err == nil {
t . Fatalf ( "expected error: %v" , err )
}
}
2019-02-20 16:41:51 +00:00
// TestTaskArtifact_Hash asserts an artifact's hash changes when any of the
// fields change.
func TestTaskArtifact_Hash ( t * testing . T ) {
t . Parallel ( )
cases := [ ] TaskArtifact {
{ } ,
{
GetterSource : "a" ,
} ,
{
GetterSource : "b" ,
} ,
{
GetterSource : "b" ,
GetterOptions : map [ string ] string { "c" : "c" } ,
} ,
{
GetterSource : "b" ,
GetterOptions : map [ string ] string {
"c" : "c" ,
"d" : "d" ,
} ,
} ,
{
GetterSource : "b" ,
GetterOptions : map [ string ] string {
"c" : "c" ,
"d" : "e" ,
} ,
} ,
{
GetterSource : "b" ,
GetterOptions : map [ string ] string {
"c" : "c" ,
"d" : "e" ,
} ,
GetterMode : "f" ,
} ,
{
GetterSource : "b" ,
GetterOptions : map [ string ] string {
"c" : "c" ,
"d" : "e" ,
} ,
GetterMode : "g" ,
} ,
{
GetterSource : "b" ,
GetterOptions : map [ string ] string {
"c" : "c" ,
"d" : "e" ,
} ,
GetterMode : "g" ,
RelativeDest : "h" ,
} ,
{
GetterSource : "b" ,
GetterOptions : map [ string ] string {
"c" : "c" ,
"d" : "e" ,
} ,
GetterMode : "g" ,
RelativeDest : "i" ,
} ,
}
// Map of hash to source
hashes := make ( map [ string ] TaskArtifact , len ( cases ) )
for _ , tc := range cases {
h := tc . Hash ( )
// Hash should be deterministic
require . Equal ( t , h , tc . Hash ( ) )
// Hash should be unique
if orig , ok := hashes [ h ] ; ok {
require . Failf ( t , "hashes match" , "artifact 1: %s\n\n artifact 2: %s\n" ,
pretty . Sprint ( tc ) , pretty . Sprint ( orig ) ,
)
}
hashes [ h ] = tc
}
require . Len ( t , hashes , len ( cases ) )
}
2016-10-03 16:59:57 +00:00
func TestAllocation_ShouldMigrate ( t * testing . T ) {
alloc := Allocation {
2017-10-12 01:08:37 +00:00
PreviousAllocation : "123" ,
TaskGroup : "foo" ,
2016-10-03 16:59:57 +00:00
Job : & Job {
TaskGroups : [ ] * TaskGroup {
{
Name : "foo" ,
EphemeralDisk : & EphemeralDisk {
Migrate : true ,
Sticky : true ,
} ,
} ,
} ,
} ,
}
if ! alloc . ShouldMigrate ( ) {
t . Fatalf ( "bad: %v" , alloc )
}
alloc1 := Allocation {
2017-10-12 01:08:37 +00:00
PreviousAllocation : "123" ,
TaskGroup : "foo" ,
2016-10-03 16:59:57 +00:00
Job : & Job {
TaskGroups : [ ] * TaskGroup {
{
Name : "foo" ,
EphemeralDisk : & EphemeralDisk { } ,
} ,
} ,
} ,
}
if alloc1 . ShouldMigrate ( ) {
t . Fatalf ( "bad: %v" , alloc )
}
alloc2 := Allocation {
2017-10-12 01:08:37 +00:00
PreviousAllocation : "123" ,
TaskGroup : "foo" ,
2016-10-03 16:59:57 +00:00
Job : & Job {
TaskGroups : [ ] * TaskGroup {
{
Name : "foo" ,
EphemeralDisk : & EphemeralDisk {
Sticky : false ,
Migrate : true ,
} ,
} ,
} ,
} ,
}
if alloc2 . ShouldMigrate ( ) {
t . Fatalf ( "bad: %v" , alloc )
}
2016-10-19 18:12:25 +00:00
alloc3 := Allocation {
2017-10-12 01:08:37 +00:00
PreviousAllocation : "123" ,
TaskGroup : "foo" ,
2016-10-19 18:12:25 +00:00
Job : & Job {
TaskGroups : [ ] * TaskGroup {
{
Name : "foo" ,
} ,
} ,
} ,
}
if alloc3 . ShouldMigrate ( ) {
t . Fatalf ( "bad: %v" , alloc )
}
2017-10-12 01:08:37 +00:00
// No previous
alloc4 := Allocation {
TaskGroup : "foo" ,
Job : & Job {
TaskGroups : [ ] * TaskGroup {
{
Name : "foo" ,
EphemeralDisk : & EphemeralDisk {
Migrate : true ,
Sticky : true ,
} ,
} ,
} ,
} ,
}
if alloc4 . ShouldMigrate ( ) {
t . Fatalf ( "bad: %v" , alloc4 )
}
2016-10-03 16:59:57 +00:00
}
2016-03-14 22:46:06 +00:00
func TestTaskArtifact_Validate_Checksum ( t * testing . T ) {
cases := [ ] struct {
Input * TaskArtifact
Err bool
} {
{
& TaskArtifact {
GetterSource : "foo.com" ,
GetterOptions : map [ string ] string {
"checksum" : "no-type" ,
} ,
} ,
true ,
} ,
{
& TaskArtifact {
GetterSource : "foo.com" ,
GetterOptions : map [ string ] string {
"checksum" : "md5:toosmall" ,
} ,
} ,
true ,
} ,
{
& TaskArtifact {
GetterSource : "foo.com" ,
GetterOptions : map [ string ] string {
"checksum" : "invalid:type" ,
} ,
} ,
true ,
} ,
2018-10-30 17:24:30 +00:00
{
& TaskArtifact {
GetterSource : "foo.com" ,
GetterOptions : map [ string ] string {
"checksum" : "md5:${ARTIFACT_CHECKSUM}" ,
} ,
} ,
false ,
} ,
2016-03-14 22:46:06 +00:00
}
for i , tc := range cases {
err := tc . Input . Validate ( )
if ( err != nil ) != tc . Err {
t . Fatalf ( "case %d: %v" , i , err )
continue
}
}
}
2016-08-22 16:34:24 +00:00
2019-03-08 12:48:12 +00:00
func TestPlan_NormalizeAllocations ( t * testing . T ) {
2019-03-05 21:41:41 +00:00
t . Parallel ( )
plan := & Plan {
2019-03-08 12:48:12 +00:00
NodeUpdate : make ( map [ string ] [ ] * Allocation ) ,
2019-03-05 21:41:41 +00:00
NodePreemptions : make ( map [ string ] [ ] * Allocation ) ,
}
stoppedAlloc := MockAlloc ( )
desiredDesc := "Desired desc"
2020-06-09 21:13:53 +00:00
plan . AppendStoppedAlloc ( stoppedAlloc , desiredDesc , AllocClientStatusLost , "followup-eval-id" )
2019-03-05 21:41:41 +00:00
preemptedAlloc := MockAlloc ( )
preemptingAllocID := uuid . Generate ( )
plan . AppendPreemptedAlloc ( preemptedAlloc , preemptingAllocID )
plan . NormalizeAllocations ( )
actualStoppedAlloc := plan . NodeUpdate [ stoppedAlloc . NodeID ] [ 0 ]
expectedStoppedAlloc := & Allocation {
ID : stoppedAlloc . ID ,
DesiredDescription : desiredDesc ,
ClientStatus : AllocClientStatusLost ,
2020-06-09 21:13:53 +00:00
FollowupEvalID : "followup-eval-id" ,
2019-03-05 21:41:41 +00:00
}
assert . Equal ( t , expectedStoppedAlloc , actualStoppedAlloc )
actualPreemptedAlloc := plan . NodePreemptions [ preemptedAlloc . NodeID ] [ 0 ]
expectedPreemptedAlloc := & Allocation {
ID : preemptedAlloc . ID ,
PreemptedByAllocation : preemptingAllocID ,
}
assert . Equal ( t , expectedPreemptedAlloc , actualPreemptedAlloc )
}
func TestPlan_AppendStoppedAllocAppendsAllocWithUpdatedAttrs ( t * testing . T ) {
t . Parallel ( )
plan := & Plan {
NodeUpdate : make ( map [ string ] [ ] * Allocation ) ,
}
alloc := MockAlloc ( )
desiredDesc := "Desired desc"
2020-06-09 21:13:53 +00:00
plan . AppendStoppedAlloc ( alloc , desiredDesc , AllocClientStatusLost , "" )
2019-03-05 21:41:41 +00:00
expectedAlloc := new ( Allocation )
* expectedAlloc = * alloc
expectedAlloc . DesiredDescription = desiredDesc
expectedAlloc . DesiredStatus = AllocDesiredStatusStop
expectedAlloc . ClientStatus = AllocClientStatusLost
expectedAlloc . Job = nil
2020-05-13 20:39:04 +00:00
expectedAlloc . AllocStates = [ ] * AllocState { {
Field : AllocStateFieldClientStatus ,
Value : "lost" ,
} }
// This value is set to time.Now() in AppendStoppedAlloc, so clear it
appendedAlloc := plan . NodeUpdate [ alloc . NodeID ] [ 0 ]
appendedAlloc . AllocStates [ 0 ] . Time = time . Time { }
2019-03-05 21:41:41 +00:00
assert . Equal ( t , expectedAlloc , appendedAlloc )
assert . Equal ( t , alloc . Job , plan . Job )
}
func TestPlan_AppendPreemptedAllocAppendsAllocWithUpdatedAttrs ( t * testing . T ) {
t . Parallel ( )
plan := & Plan {
NodePreemptions : make ( map [ string ] [ ] * Allocation ) ,
}
alloc := MockAlloc ( )
preemptingAllocID := uuid . Generate ( )
plan . AppendPreemptedAlloc ( alloc , preemptingAllocID )
appendedAlloc := plan . NodePreemptions [ alloc . NodeID ] [ 0 ]
expectedAlloc := & Allocation {
ID : alloc . ID ,
PreemptedByAllocation : preemptingAllocID ,
JobID : alloc . JobID ,
Namespace : alloc . Namespace ,
DesiredStatus : AllocDesiredStatusEvict ,
DesiredDescription : fmt . Sprintf ( "Preempted by alloc ID %v" , preemptingAllocID ) ,
AllocatedResources : alloc . AllocatedResources ,
TaskResources : alloc . TaskResources ,
SharedResources : alloc . SharedResources ,
}
assert . Equal ( t , expectedAlloc , appendedAlloc )
}
func TestAllocation_MsgPackTags ( t * testing . T ) {
t . Parallel ( )
planType := reflect . TypeOf ( Allocation { } )
msgPackTags , _ := planType . FieldByName ( "_struct" )
assert . Equal ( t , msgPackTags . Tag , reflect . StructTag ( ` codec:",omitempty" ` ) )
}
func TestEvaluation_MsgPackTags ( t * testing . T ) {
t . Parallel ( )
planType := reflect . TypeOf ( Evaluation { } )
msgPackTags , _ := planType . FieldByName ( "_struct" )
assert . Equal ( t , msgPackTags . Tag , reflect . StructTag ( ` codec:",omitempty" ` ) )
}
2016-08-22 16:34:24 +00:00
func TestAllocation_Terminated ( t * testing . T ) {
type desiredState struct {
ClientStatus string
DesiredStatus string
Terminated bool
}
harness := [ ] desiredState {
{
ClientStatus : AllocClientStatusPending ,
DesiredStatus : AllocDesiredStatusStop ,
Terminated : false ,
} ,
{
ClientStatus : AllocClientStatusRunning ,
DesiredStatus : AllocDesiredStatusStop ,
Terminated : false ,
} ,
{
ClientStatus : AllocClientStatusFailed ,
DesiredStatus : AllocDesiredStatusStop ,
Terminated : true ,
} ,
{
ClientStatus : AllocClientStatusFailed ,
DesiredStatus : AllocDesiredStatusRun ,
Terminated : true ,
} ,
}
for _ , state := range harness {
alloc := Allocation { }
alloc . DesiredStatus = state . DesiredStatus
alloc . ClientStatus = state . ClientStatus
if alloc . Terminated ( ) != state . Terminated {
t . Fatalf ( "expected: %v, actual: %v" , state . Terminated , alloc . Terminated ( ) )
}
}
}
2016-10-11 22:25:49 +00:00
2018-01-14 15:03:08 +00:00
func TestAllocation_ShouldReschedule ( t * testing . T ) {
type testCase struct {
Desc string
2018-01-17 17:05:22 +00:00
FailTime time . Time
2018-01-14 15:03:08 +00:00
ClientStatus string
DesiredStatus string
ReschedulePolicy * ReschedulePolicy
2018-01-17 17:05:22 +00:00
RescheduleTrackers [ ] * RescheduleEvent
2018-01-14 15:03:08 +00:00
ShouldReschedule bool
}
2018-01-17 17:05:22 +00:00
fail := time . Now ( )
2018-01-14 15:03:08 +00:00
harness := [ ] testCase {
{
Desc : "Reschedule when desired state is stop" ,
ClientStatus : AllocClientStatusPending ,
DesiredStatus : AllocDesiredStatusStop ,
2018-01-17 17:05:22 +00:00
FailTime : fail ,
2018-01-14 15:03:08 +00:00
ReschedulePolicy : nil ,
ShouldReschedule : false ,
} ,
2018-01-17 22:34:15 +00:00
{
2018-03-11 18:40:32 +00:00
Desc : "Disabled rescheduling" ,
2018-01-17 22:34:15 +00:00
ClientStatus : AllocClientStatusFailed ,
DesiredStatus : AllocDesiredStatusRun ,
FailTime : fail ,
2018-02-22 23:43:07 +00:00
ReschedulePolicy : & ReschedulePolicy { Attempts : 0 , Interval : 1 * time . Minute } ,
2018-01-17 22:34:15 +00:00
ShouldReschedule : false ,
} ,
2018-01-14 15:03:08 +00:00
{
Desc : "Reschedule when client status is complete" ,
ClientStatus : AllocClientStatusComplete ,
DesiredStatus : AllocDesiredStatusRun ,
2018-01-17 17:05:22 +00:00
FailTime : fail ,
2018-01-14 15:03:08 +00:00
ReschedulePolicy : nil ,
ShouldReschedule : false ,
} ,
{
Desc : "Reschedule with nil reschedule policy" ,
ClientStatus : AllocClientStatusFailed ,
DesiredStatus : AllocDesiredStatusRun ,
2018-01-17 17:05:22 +00:00
FailTime : fail ,
2018-01-14 15:03:08 +00:00
ReschedulePolicy : nil ,
ShouldReschedule : false ,
} ,
2018-03-24 15:29:20 +00:00
{
Desc : "Reschedule with unlimited and attempts >0" ,
ClientStatus : AllocClientStatusFailed ,
DesiredStatus : AllocDesiredStatusRun ,
FailTime : fail ,
ReschedulePolicy : & ReschedulePolicy { Attempts : 1 , Unlimited : true } ,
ShouldReschedule : true ,
} ,
2018-01-14 15:03:08 +00:00
{
Desc : "Reschedule when client status is complete" ,
ClientStatus : AllocClientStatusComplete ,
DesiredStatus : AllocDesiredStatusRun ,
2018-01-17 17:05:22 +00:00
FailTime : fail ,
2018-01-14 15:03:08 +00:00
ReschedulePolicy : nil ,
ShouldReschedule : false ,
} ,
{
Desc : "Reschedule with policy when client status complete" ,
ClientStatus : AllocClientStatusComplete ,
DesiredStatus : AllocDesiredStatusRun ,
2018-01-17 17:05:22 +00:00
FailTime : fail ,
2018-02-22 23:43:07 +00:00
ReschedulePolicy : & ReschedulePolicy { Attempts : 1 , Interval : 1 * time . Minute } ,
2018-01-14 15:03:08 +00:00
ShouldReschedule : false ,
} ,
{
Desc : "Reschedule with no previous attempts" ,
ClientStatus : AllocClientStatusFailed ,
DesiredStatus : AllocDesiredStatusRun ,
2018-01-17 17:05:22 +00:00
FailTime : fail ,
2018-02-22 23:43:07 +00:00
ReschedulePolicy : & ReschedulePolicy { Attempts : 1 , Interval : 1 * time . Minute } ,
2018-01-14 15:03:08 +00:00
ShouldReschedule : true ,
} ,
{
Desc : "Reschedule with leftover attempts" ,
ClientStatus : AllocClientStatusFailed ,
DesiredStatus : AllocDesiredStatusRun ,
2018-02-22 23:43:07 +00:00
ReschedulePolicy : & ReschedulePolicy { Attempts : 2 , Interval : 5 * time . Minute } ,
2018-01-17 17:05:22 +00:00
FailTime : fail ,
RescheduleTrackers : [ ] * RescheduleEvent {
2018-01-14 15:03:08 +00:00
{
2018-01-17 17:05:22 +00:00
RescheduleTime : fail . Add ( - 1 * time . Minute ) . UTC ( ) . UnixNano ( ) ,
2018-01-14 15:03:08 +00:00
} ,
} ,
ShouldReschedule : true ,
} ,
{
Desc : "Reschedule with too old previous attempts" ,
ClientStatus : AllocClientStatusFailed ,
DesiredStatus : AllocDesiredStatusRun ,
2018-01-17 17:05:22 +00:00
FailTime : fail ,
2018-02-22 23:43:07 +00:00
ReschedulePolicy : & ReschedulePolicy { Attempts : 1 , Interval : 5 * time . Minute } ,
2018-01-17 17:05:22 +00:00
RescheduleTrackers : [ ] * RescheduleEvent {
2018-01-14 15:03:08 +00:00
{
2018-01-17 17:05:22 +00:00
RescheduleTime : fail . Add ( - 6 * time . Minute ) . UTC ( ) . UnixNano ( ) ,
2018-01-14 15:03:08 +00:00
} ,
} ,
ShouldReschedule : true ,
} ,
{
Desc : "Reschedule with no leftover attempts" ,
ClientStatus : AllocClientStatusFailed ,
DesiredStatus : AllocDesiredStatusRun ,
2018-01-17 17:05:22 +00:00
FailTime : fail ,
2018-02-22 23:43:07 +00:00
ReschedulePolicy : & ReschedulePolicy { Attempts : 2 , Interval : 5 * time . Minute } ,
2018-01-17 17:05:22 +00:00
RescheduleTrackers : [ ] * RescheduleEvent {
2018-01-14 15:03:08 +00:00
{
2018-01-17 17:05:22 +00:00
RescheduleTime : fail . Add ( - 3 * time . Minute ) . UTC ( ) . UnixNano ( ) ,
2018-01-14 15:03:08 +00:00
} ,
{
2018-01-17 17:05:22 +00:00
RescheduleTime : fail . Add ( - 4 * time . Minute ) . UTC ( ) . UnixNano ( ) ,
2018-01-14 15:03:08 +00:00
} ,
} ,
ShouldReschedule : false ,
} ,
}
for _ , state := range harness {
alloc := Allocation { }
alloc . DesiredStatus = state . DesiredStatus
alloc . ClientStatus = state . ClientStatus
2018-01-17 17:05:22 +00:00
alloc . RescheduleTracker = & RescheduleTracker { state . RescheduleTrackers }
2018-01-14 15:03:08 +00:00
t . Run ( state . Desc , func ( t * testing . T ) {
2018-01-17 17:05:22 +00:00
if got := alloc . ShouldReschedule ( state . ReschedulePolicy , state . FailTime ) ; got != state . ShouldReschedule {
2018-01-14 15:03:08 +00:00
t . Fatalf ( "expected %v but got %v" , state . ShouldReschedule , got )
}
} )
}
}
2018-03-02 00:20:09 +00:00
func TestAllocation_LastEventTime ( t * testing . T ) {
type testCase struct {
desc string
taskState map [ string ] * TaskState
expectedLastEventTime time . Time
}
2018-03-29 02:22:25 +00:00
t1 := time . Now ( ) . UTC ( )
2018-03-02 00:20:09 +00:00
testCases := [ ] testCase {
{
2018-09-04 23:03:52 +00:00
desc : "nil task state" ,
2018-03-29 02:22:25 +00:00
expectedLastEventTime : t1 ,
2018-03-02 00:20:09 +00:00
} ,
{
desc : "empty task state" ,
taskState : make ( map [ string ] * TaskState ) ,
2018-03-29 02:22:25 +00:00
expectedLastEventTime : t1 ,
2018-03-02 00:20:09 +00:00
} ,
{
desc : "Finished At not set" ,
taskState : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : t1 . Add ( - 2 * time . Hour ) } } ,
2018-03-29 02:22:25 +00:00
expectedLastEventTime : t1 ,
2018-03-02 00:20:09 +00:00
} ,
{
2018-03-29 19:05:56 +00:00
desc : "One finished " ,
2018-03-02 00:20:09 +00:00
taskState : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : t1 . Add ( - 2 * time . Hour ) ,
FinishedAt : t1 . Add ( - 1 * time . Hour ) } } ,
expectedLastEventTime : t1 . Add ( - 1 * time . Hour ) ,
} ,
{
2018-03-29 19:05:56 +00:00
desc : "Multiple task groups" ,
2018-03-02 00:20:09 +00:00
taskState : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : t1 . Add ( - 2 * time . Hour ) ,
FinishedAt : t1 . Add ( - 1 * time . Hour ) } ,
"bar" : { State : "start" ,
StartedAt : t1 . Add ( - 2 * time . Hour ) ,
FinishedAt : t1 . Add ( - 40 * time . Minute ) } } ,
expectedLastEventTime : t1 . Add ( - 40 * time . Minute ) ,
} ,
2018-03-29 19:05:56 +00:00
{
2018-04-03 20:49:18 +00:00
desc : "No finishedAt set, one task event, should use modify time" ,
2018-03-29 19:05:56 +00:00
taskState : map [ string ] * TaskState { "foo" : {
State : "run" ,
StartedAt : t1 . Add ( - 2 * time . Hour ) ,
Events : [ ] * TaskEvent {
{ Type : "start" , Time : t1 . Add ( - 20 * time . Minute ) . UnixNano ( ) } ,
} } ,
} ,
2018-04-03 20:49:18 +00:00
expectedLastEventTime : t1 ,
2018-03-29 19:05:56 +00:00
} ,
2018-03-02 00:20:09 +00:00
}
for _ , tc := range testCases {
t . Run ( tc . desc , func ( t * testing . T ) {
2018-03-29 02:22:25 +00:00
alloc := & Allocation { CreateTime : t1 . UnixNano ( ) , ModifyTime : t1 . UnixNano ( ) }
2018-03-02 00:20:09 +00:00
alloc . TaskStates = tc . taskState
require . Equal ( t , tc . expectedLastEventTime , alloc . LastEventTime ( ) )
} )
}
}
func TestAllocation_NextDelay ( t * testing . T ) {
type testCase struct {
desc string
reschedulePolicy * ReschedulePolicy
alloc * Allocation
expectedRescheduleTime time . Time
expectedRescheduleEligible bool
}
now := time . Now ( )
testCases := [ ] testCase {
{
desc : "Allocation hasn't failed yet" ,
reschedulePolicy : & ReschedulePolicy {
2018-03-26 19:45:09 +00:00
DelayFunction : "constant" ,
2018-03-02 00:20:09 +00:00
Delay : 5 * time . Second ,
} ,
2018-09-04 23:03:52 +00:00
alloc : & Allocation { } ,
2018-03-02 00:20:09 +00:00
expectedRescheduleTime : time . Time { } ,
expectedRescheduleEligible : false ,
} ,
2018-09-05 22:01:02 +00:00
{
desc : "Allocation has no reschedule policy" ,
alloc : & Allocation { } ,
expectedRescheduleTime : time . Time { } ,
expectedRescheduleEligible : false ,
} ,
2018-03-02 00:20:09 +00:00
{
desc : "Allocation lacks task state" ,
reschedulePolicy : & ReschedulePolicy {
2018-03-26 19:45:09 +00:00
DelayFunction : "constant" ,
2018-03-02 00:20:09 +00:00
Delay : 5 * time . Second ,
2018-03-29 12:59:38 +00:00
Unlimited : true ,
2018-03-02 00:20:09 +00:00
} ,
2018-09-04 23:03:52 +00:00
alloc : & Allocation { ClientStatus : AllocClientStatusFailed , ModifyTime : now . UnixNano ( ) } ,
2018-03-29 12:59:38 +00:00
expectedRescheduleTime : now . UTC ( ) . Add ( 5 * time . Second ) ,
expectedRescheduleEligible : true ,
2018-03-02 00:20:09 +00:00
} ,
{
desc : "linear delay, unlimited restarts, no reschedule tracker" ,
reschedulePolicy : & ReschedulePolicy {
2018-03-26 19:45:09 +00:00
DelayFunction : "constant" ,
2018-03-02 00:20:09 +00:00
Delay : 5 * time . Second ,
Unlimited : true ,
} ,
alloc : & Allocation {
ClientStatus : AllocClientStatusFailed ,
TaskStates : map [ string ] * TaskState { "foo" : { State : "dead" ,
StartedAt : now . Add ( - 1 * time . Hour ) ,
FinishedAt : now . Add ( - 2 * time . Second ) } } ,
} ,
expectedRescheduleTime : now . Add ( - 2 * time . Second ) . Add ( 5 * time . Second ) ,
expectedRescheduleEligible : true ,
} ,
{
desc : "linear delay with reschedule tracker" ,
reschedulePolicy : & ReschedulePolicy {
2018-03-26 19:45:09 +00:00
DelayFunction : "constant" ,
2018-03-02 00:20:09 +00:00
Delay : 5 * time . Second ,
Interval : 10 * time . Minute ,
Attempts : 2 ,
} ,
alloc : & Allocation {
ClientStatus : AllocClientStatusFailed ,
TaskStates : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : now . Add ( - 1 * time . Hour ) ,
FinishedAt : now . Add ( - 2 * time . Second ) } } ,
RescheduleTracker : & RescheduleTracker {
Events : [ ] * RescheduleEvent { {
RescheduleTime : now . Add ( - 2 * time . Minute ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} } ,
} } ,
expectedRescheduleTime : now . Add ( - 2 * time . Second ) . Add ( 5 * time . Second ) ,
expectedRescheduleEligible : true ,
} ,
{
desc : "linear delay with reschedule tracker, attempts exhausted" ,
reschedulePolicy : & ReschedulePolicy {
2018-03-26 19:45:09 +00:00
DelayFunction : "constant" ,
2018-03-02 00:20:09 +00:00
Delay : 5 * time . Second ,
Interval : 10 * time . Minute ,
Attempts : 2 ,
} ,
alloc : & Allocation {
ClientStatus : AllocClientStatusFailed ,
TaskStates : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : now . Add ( - 1 * time . Hour ) ,
FinishedAt : now . Add ( - 2 * time . Second ) } } ,
RescheduleTracker : & RescheduleTracker {
Events : [ ] * RescheduleEvent {
{
RescheduleTime : now . Add ( - 3 * time . Minute ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 2 * time . Minute ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
} ,
} } ,
expectedRescheduleTime : now . Add ( - 2 * time . Second ) . Add ( 5 * time . Second ) ,
expectedRescheduleEligible : false ,
} ,
{
desc : "exponential delay - no reschedule tracker" ,
reschedulePolicy : & ReschedulePolicy {
DelayFunction : "exponential" ,
Delay : 5 * time . Second ,
2018-03-13 15:06:26 +00:00
MaxDelay : 90 * time . Second ,
2018-03-02 00:20:09 +00:00
Unlimited : true ,
} ,
alloc : & Allocation {
ClientStatus : AllocClientStatusFailed ,
TaskStates : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : now . Add ( - 1 * time . Hour ) ,
FinishedAt : now . Add ( - 2 * time . Second ) } } ,
} ,
expectedRescheduleTime : now . Add ( - 2 * time . Second ) . Add ( 5 * time . Second ) ,
expectedRescheduleEligible : true ,
} ,
{
desc : "exponential delay with reschedule tracker" ,
reschedulePolicy : & ReschedulePolicy {
DelayFunction : "exponential" ,
Delay : 5 * time . Second ,
2018-03-13 15:06:26 +00:00
MaxDelay : 90 * time . Second ,
2018-03-02 00:20:09 +00:00
Unlimited : true ,
} ,
alloc : & Allocation {
ClientStatus : AllocClientStatusFailed ,
TaskStates : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : now . Add ( - 1 * time . Hour ) ,
FinishedAt : now . Add ( - 2 * time . Second ) } } ,
RescheduleTracker : & RescheduleTracker {
Events : [ ] * RescheduleEvent {
{
RescheduleTime : now . Add ( - 2 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 10 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 20 * time . Second ,
} ,
} ,
} } ,
expectedRescheduleTime : now . Add ( - 2 * time . Second ) . Add ( 40 * time . Second ) ,
expectedRescheduleEligible : true ,
} ,
{
desc : "exponential delay with delay ceiling reached" ,
reschedulePolicy : & ReschedulePolicy {
DelayFunction : "exponential" ,
Delay : 5 * time . Second ,
2018-03-13 15:06:26 +00:00
MaxDelay : 90 * time . Second ,
2018-03-02 00:20:09 +00:00
Unlimited : true ,
} ,
alloc : & Allocation {
ClientStatus : AllocClientStatusFailed ,
TaskStates : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : now . Add ( - 1 * time . Hour ) ,
FinishedAt : now . Add ( - 15 * time . Second ) } } ,
RescheduleTracker : & RescheduleTracker {
Events : [ ] * RescheduleEvent {
{
RescheduleTime : now . Add ( - 2 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 10 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 20 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 40 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 40 * time . Second ) . UTC ( ) . UnixNano ( ) ,
Delay : 80 * time . Second ,
} ,
} ,
} } ,
expectedRescheduleTime : now . Add ( - 15 * time . Second ) . Add ( 90 * time . Second ) ,
expectedRescheduleEligible : true ,
} ,
{
// Test case where most recent reschedule ran longer than delay ceiling
desc : "exponential delay, delay ceiling reset condition met" ,
reschedulePolicy : & ReschedulePolicy {
DelayFunction : "exponential" ,
Delay : 5 * time . Second ,
2018-03-13 15:06:26 +00:00
MaxDelay : 90 * time . Second ,
2018-03-02 00:20:09 +00:00
Unlimited : true ,
} ,
alloc : & Allocation {
ClientStatus : AllocClientStatusFailed ,
TaskStates : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : now . Add ( - 1 * time . Hour ) ,
FinishedAt : now . Add ( - 15 * time . Minute ) } } ,
RescheduleTracker : & RescheduleTracker {
Events : [ ] * RescheduleEvent {
{
RescheduleTime : now . Add ( - 2 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 10 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 20 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 40 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 80 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 90 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 90 * time . Second ,
} ,
} ,
} } ,
expectedRescheduleTime : now . Add ( - 15 * time . Minute ) . Add ( 5 * time . Second ) ,
expectedRescheduleEligible : true ,
} ,
{
desc : "fibonacci delay - no reschedule tracker" ,
reschedulePolicy : & ReschedulePolicy {
DelayFunction : "fibonacci" ,
Delay : 5 * time . Second ,
2018-03-13 15:06:26 +00:00
MaxDelay : 90 * time . Second ,
2018-03-02 00:20:09 +00:00
Unlimited : true ,
} ,
alloc : & Allocation {
ClientStatus : AllocClientStatusFailed ,
TaskStates : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : now . Add ( - 1 * time . Hour ) ,
FinishedAt : now . Add ( - 2 * time . Second ) } } } ,
expectedRescheduleTime : now . Add ( - 2 * time . Second ) . Add ( 5 * time . Second ) ,
expectedRescheduleEligible : true ,
} ,
{
desc : "fibonacci delay with reschedule tracker" ,
reschedulePolicy : & ReschedulePolicy {
DelayFunction : "fibonacci" ,
Delay : 5 * time . Second ,
2018-03-13 15:06:26 +00:00
MaxDelay : 90 * time . Second ,
2018-03-02 00:20:09 +00:00
Unlimited : true ,
} ,
alloc : & Allocation {
ClientStatus : AllocClientStatusFailed ,
TaskStates : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : now . Add ( - 1 * time . Hour ) ,
FinishedAt : now . Add ( - 2 * time . Second ) } } ,
RescheduleTracker : & RescheduleTracker {
Events : [ ] * RescheduleEvent {
{
RescheduleTime : now . Add ( - 2 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 5 * time . Second ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
} ,
} } ,
expectedRescheduleTime : now . Add ( - 2 * time . Second ) . Add ( 10 * time . Second ) ,
expectedRescheduleEligible : true ,
} ,
{
desc : "fibonacci delay with more events" ,
reschedulePolicy : & ReschedulePolicy {
DelayFunction : "fibonacci" ,
Delay : 5 * time . Second ,
2018-03-13 15:06:26 +00:00
MaxDelay : 90 * time . Second ,
2018-03-02 00:20:09 +00:00
Unlimited : true ,
} ,
alloc : & Allocation {
ClientStatus : AllocClientStatusFailed ,
TaskStates : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : now . Add ( - 1 * time . Hour ) ,
FinishedAt : now . Add ( - 2 * time . Second ) } } ,
RescheduleTracker : & RescheduleTracker {
Events : [ ] * RescheduleEvent {
{
RescheduleTime : now . Add ( - 2 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 10 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 15 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 25 * time . Second ,
} ,
} ,
} } ,
expectedRescheduleTime : now . Add ( - 2 * time . Second ) . Add ( 40 * time . Second ) ,
expectedRescheduleEligible : true ,
} ,
{
desc : "fibonacci delay with delay ceiling reached" ,
reschedulePolicy : & ReschedulePolicy {
DelayFunction : "fibonacci" ,
Delay : 5 * time . Second ,
2018-03-13 15:06:26 +00:00
MaxDelay : 50 * time . Second ,
2018-03-02 00:20:09 +00:00
Unlimited : true ,
} ,
alloc : & Allocation {
ClientStatus : AllocClientStatusFailed ,
TaskStates : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : now . Add ( - 1 * time . Hour ) ,
FinishedAt : now . Add ( - 15 * time . Second ) } } ,
RescheduleTracker : & RescheduleTracker {
Events : [ ] * RescheduleEvent {
{
RescheduleTime : now . Add ( - 2 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 10 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 15 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 25 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 40 * time . Second ) . UTC ( ) . UnixNano ( ) ,
Delay : 40 * time . Second ,
} ,
} ,
} } ,
expectedRescheduleTime : now . Add ( - 15 * time . Second ) . Add ( 50 * time . Second ) ,
expectedRescheduleEligible : true ,
} ,
{
desc : "fibonacci delay with delay reset condition met" ,
reschedulePolicy : & ReschedulePolicy {
DelayFunction : "fibonacci" ,
Delay : 5 * time . Second ,
2018-03-13 15:06:26 +00:00
MaxDelay : 50 * time . Second ,
2018-03-02 00:20:09 +00:00
Unlimited : true ,
} ,
alloc : & Allocation {
ClientStatus : AllocClientStatusFailed ,
TaskStates : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : now . Add ( - 1 * time . Hour ) ,
FinishedAt : now . Add ( - 5 * time . Minute ) } } ,
RescheduleTracker : & RescheduleTracker {
Events : [ ] * RescheduleEvent {
{
RescheduleTime : now . Add ( - 2 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 10 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 15 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 25 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 40 * time . Second ,
} ,
} ,
} } ,
expectedRescheduleTime : now . Add ( - 5 * time . Minute ) . Add ( 5 * time . Second ) ,
expectedRescheduleEligible : true ,
} ,
{
desc : "fibonacci delay with the most recent event that reset delay value" ,
reschedulePolicy : & ReschedulePolicy {
DelayFunction : "fibonacci" ,
Delay : 5 * time . Second ,
2018-03-13 15:06:26 +00:00
MaxDelay : 50 * time . Second ,
2018-03-02 00:20:09 +00:00
Unlimited : true ,
} ,
alloc : & Allocation {
ClientStatus : AllocClientStatusFailed ,
TaskStates : map [ string ] * TaskState { "foo" : { State : "start" ,
StartedAt : now . Add ( - 1 * time . Hour ) ,
FinishedAt : now . Add ( - 5 * time . Second ) } } ,
RescheduleTracker : & RescheduleTracker {
Events : [ ] * RescheduleEvent {
{
RescheduleTime : now . Add ( - 2 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 10 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 15 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 25 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 40 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Hour ) . UTC ( ) . UnixNano ( ) ,
Delay : 50 * time . Second ,
} ,
{
RescheduleTime : now . Add ( - 1 * time . Minute ) . UTC ( ) . UnixNano ( ) ,
Delay : 5 * time . Second ,
} ,
} ,
} } ,
expectedRescheduleTime : now . Add ( - 5 * time . Second ) . Add ( 5 * time . Second ) ,
expectedRescheduleEligible : true ,
} ,
}
for _ , tc := range testCases {
t . Run ( tc . desc , func ( t * testing . T ) {
require := require . New ( t )
2018-03-08 15:36:01 +00:00
j := testJob ( )
2018-09-05 22:01:02 +00:00
if tc . reschedulePolicy != nil {
j . TaskGroups [ 0 ] . ReschedulePolicy = tc . reschedulePolicy
}
2018-03-08 15:36:01 +00:00
tc . alloc . Job = j
tc . alloc . TaskGroup = j . TaskGroups [ 0 ] . Name
reschedTime , allowed := tc . alloc . NextRescheduleTime ( )
2018-03-02 00:20:09 +00:00
require . Equal ( tc . expectedRescheduleEligible , allowed )
require . Equal ( tc . expectedRescheduleTime , reschedTime )
} )
}
}
2020-05-13 20:39:04 +00:00
func TestAllocation_WaitClientStop ( t * testing . T ) {
type testCase struct {
desc string
stop time . Duration
status string
expectedShould bool
expectedRescheduleTime time . Time
}
now := time . Now ( ) . UTC ( )
testCases := [ ] testCase {
{
desc : "running" ,
stop : 2 * time . Second ,
status : AllocClientStatusRunning ,
expectedShould : true ,
} ,
{
desc : "no stop_after_client_disconnect" ,
status : AllocClientStatusLost ,
expectedShould : false ,
} ,
{
desc : "stop" ,
status : AllocClientStatusLost ,
stop : 2 * time . Second ,
expectedShould : true ,
expectedRescheduleTime : now . Add ( ( 2 + 5 ) * time . Second ) ,
} ,
}
for _ , tc := range testCases {
t . Run ( tc . desc , func ( t * testing . T ) {
j := testJob ( )
a := & Allocation {
ClientStatus : tc . status ,
Job : j ,
TaskStates : map [ string ] * TaskState { } ,
}
if tc . status == AllocClientStatusLost {
a . AppendState ( AllocStateFieldClientStatus , AllocClientStatusLost )
}
j . TaskGroups [ 0 ] . StopAfterClientDisconnect = & tc . stop
a . TaskGroup = j . TaskGroups [ 0 ] . Name
require . Equal ( t , tc . expectedShould , a . ShouldClientStop ( ) )
if ! tc . expectedShould || tc . status != AllocClientStatusLost {
return
}
// the reschedTime is close to the expectedRescheduleTime
reschedTime := a . WaitClientStop ( )
e := reschedTime . Unix ( ) - tc . expectedRescheduleTime . Unix ( )
require . Less ( t , e , int64 ( 2 ) )
} )
}
}
2020-01-08 22:23:56 +00:00
func TestAllocation_Canonicalize_Old ( t * testing . T ) {
alloc := MockAlloc ( )
alloc . AllocatedResources = nil
alloc . TaskResources = map [ string ] * Resources {
"web" : {
CPU : 500 ,
MemoryMB : 256 ,
Networks : [ ] * NetworkResource {
{
Device : "eth0" ,
IP : "192.168.0.100" ,
ReservedPorts : [ ] Port { { Label : "admin" , Value : 5000 } } ,
MBits : 50 ,
DynamicPorts : [ ] Port { { Label : "http" , Value : 9876 } } ,
} ,
} ,
} ,
}
alloc . SharedResources = & Resources {
DiskMB : 150 ,
}
alloc . Canonicalize ( )
expected := & AllocatedResources {
Tasks : map [ string ] * AllocatedTaskResources {
"web" : {
Cpu : AllocatedCpuResources {
CpuShares : 500 ,
} ,
Memory : AllocatedMemoryResources {
MemoryMB : 256 ,
} ,
Networks : [ ] * NetworkResource {
{
Device : "eth0" ,
IP : "192.168.0.100" ,
ReservedPorts : [ ] Port { { Label : "admin" , Value : 5000 } } ,
MBits : 50 ,
DynamicPorts : [ ] Port { { Label : "http" , Value : 9876 } } ,
} ,
} ,
} ,
} ,
Shared : AllocatedSharedResources {
DiskMB : 150 ,
} ,
}
require . Equal ( t , expected , alloc . AllocatedResources )
}
// TestAllocation_Canonicalize_New asserts that an alloc with latest
// schema isn't modified with Canonicalize
func TestAllocation_Canonicalize_New ( t * testing . T ) {
alloc := MockAlloc ( )
copy := alloc . Copy ( )
alloc . Canonicalize ( )
require . Equal ( t , copy , alloc )
}
2018-01-17 17:44:06 +00:00
func TestRescheduleTracker_Copy ( t * testing . T ) {
type testCase struct {
original * RescheduleTracker
expected * RescheduleTracker
}
cases := [ ] testCase {
{ nil , nil } ,
{ & RescheduleTracker { Events : [ ] * RescheduleEvent {
2018-03-02 00:20:09 +00:00
{ RescheduleTime : 2 ,
PrevAllocID : "12" ,
PrevNodeID : "12" ,
Delay : 30 * time . Second } ,
2018-01-17 17:44:06 +00:00
} } , & RescheduleTracker { Events : [ ] * RescheduleEvent {
2018-03-02 00:20:09 +00:00
{ RescheduleTime : 2 ,
PrevAllocID : "12" ,
PrevNodeID : "12" ,
Delay : 30 * time . Second } ,
2018-01-17 17:44:06 +00:00
} } } ,
}
for _ , tc := range cases {
if got := tc . original . Copy ( ) ; ! reflect . DeepEqual ( got , tc . expected ) {
t . Fatalf ( "expected %v but got %v" , * tc . expected , * got )
}
}
}
2016-10-11 22:25:49 +00:00
func TestVault_Validate ( t * testing . T ) {
v := & Vault {
Env : true ,
ChangeMode : VaultChangeModeNoop ,
}
if err := v . Validate ( ) ; err == nil || ! strings . Contains ( err . Error ( ) , "Policy list" ) {
t . Fatalf ( "Expected policy list empty error" )
}
2017-02-13 18:51:29 +00:00
v . Policies = [ ] string { "foo" , "root" }
2016-10-11 22:25:49 +00:00
v . ChangeMode = VaultChangeModeSignal
2017-02-13 18:51:29 +00:00
err := v . Validate ( )
if err == nil {
t . Fatalf ( "Expected validation errors" )
}
if ! strings . Contains ( err . Error ( ) , "Signal must" ) {
2016-10-11 22:25:49 +00:00
t . Fatalf ( "Expected signal empty error" )
}
2017-02-13 18:51:29 +00:00
if ! strings . Contains ( err . Error ( ) , "root" ) {
t . Fatalf ( "Expected root error" )
}
2016-10-11 22:25:49 +00:00
}
2016-11-23 22:56:50 +00:00
2017-01-20 18:33:52 +00:00
func TestParameterizedJobConfig_Validate ( t * testing . T ) {
d := & ParameterizedJobConfig {
2016-12-14 22:51:28 +00:00
Payload : "foo" ,
2016-11-23 22:56:50 +00:00
}
2016-12-14 22:51:28 +00:00
if err := d . Validate ( ) ; err == nil || ! strings . Contains ( err . Error ( ) , "payload" ) {
t . Fatalf ( "Expected unknown payload requirement: %v" , err )
2016-11-23 22:56:50 +00:00
}
2016-12-14 22:51:28 +00:00
d . Payload = DispatchPayloadOptional
2016-11-23 22:56:50 +00:00
d . MetaOptional = [ ] string { "foo" , "bar" }
d . MetaRequired = [ ] string { "bar" , "baz" }
if err := d . Validate ( ) ; err == nil || ! strings . Contains ( err . Error ( ) , "disjoint" ) {
t . Fatalf ( "Expected meta not being disjoint error: %v" , err )
}
}
2017-01-20 18:33:52 +00:00
func TestParameterizedJobConfig_Validate_NonBatch ( t * testing . T ) {
2016-12-16 23:20:12 +00:00
job := testJob ( )
2017-01-20 18:33:52 +00:00
job . ParameterizedJob = & ParameterizedJobConfig {
2016-12-16 23:20:12 +00:00
Payload : DispatchPayloadOptional ,
}
job . Type = JobTypeSystem
if err := job . Validate ( ) ; err == nil || ! strings . Contains ( err . Error ( ) , "only be used with" ) {
t . Fatalf ( "Expected bad scheduler tpye: %v" , err )
}
}
2020-07-20 14:27:25 +00:00
func TestJobConfig_Validate_StopAferClientDisconnect ( t * testing . T ) {
// Setup a system Job with stop_after_client_disconnect set, which is invalid
job := testJob ( )
job . Type = JobTypeSystem
stop := 1 * time . Minute
job . TaskGroups [ 0 ] . StopAfterClientDisconnect = & stop
err := job . Validate ( )
require . Error ( t , err )
require . Contains ( t , err . Error ( ) , "stop_after_client_disconnect can only be set in batch and service jobs" )
// Modify the job to a batch job with an invalid stop_after_client_disconnect value
job . Type = JobTypeBatch
invalid := - 1 * time . Minute
job . TaskGroups [ 0 ] . StopAfterClientDisconnect = & invalid
err = job . Validate ( )
require . Error ( t , err )
require . Contains ( t , err . Error ( ) , "stop_after_client_disconnect must be a positive value" )
// Modify the job to a batch job with a valid stop_after_client_disconnect value
job . Type = JobTypeBatch
job . TaskGroups [ 0 ] . StopAfterClientDisconnect = & stop
err = job . Validate ( )
require . NoError ( t , err )
}
2017-01-20 18:33:52 +00:00
func TestParameterizedJobConfig_Canonicalize ( t * testing . T ) {
d := & ParameterizedJobConfig { }
2016-11-23 22:56:50 +00:00
d . Canonicalize ( )
2016-12-14 22:51:28 +00:00
if d . Payload != DispatchPayloadOptional {
2016-11-23 22:56:50 +00:00
t . Fatalf ( "Canonicalize failed" )
}
}
2016-12-18 23:48:30 +00:00
2017-01-26 05:06:16 +00:00
func TestDispatchPayloadConfig_Validate ( t * testing . T ) {
d := & DispatchPayloadConfig {
2016-12-18 23:48:30 +00:00
File : "foo" ,
}
// task/local/haha
if err := d . Validate ( ) ; err != nil {
t . Fatalf ( "bad: %v" , err )
}
// task/haha
d . File = "../haha"
if err := d . Validate ( ) ; err != nil {
t . Fatalf ( "bad: %v" , err )
}
// ../haha
d . File = "../../../haha"
if err := d . Validate ( ) ; err == nil {
t . Fatalf ( "bad: %v" , err )
}
}
2017-01-14 00:46:08 +00:00
2020-09-29 21:57:46 +00:00
func TestScalingPolicy_Canonicalize ( t * testing . T ) {
cases := [ ] struct {
name string
input * ScalingPolicy
expected * ScalingPolicy
} {
{
name : "empty policy" ,
input : & ScalingPolicy { } ,
expected : & ScalingPolicy { Type : ScalingPolicyTypeHorizontal } ,
} ,
{
name : "policy with type" ,
input : & ScalingPolicy { Type : "other-type" } ,
expected : & ScalingPolicy { Type : "other-type" } ,
} ,
}
for _ , c := range cases {
t . Run ( c . name , func ( t * testing . T ) {
require := require . New ( t )
c . input . Canonicalize ( )
require . Equal ( c . expected , c . input )
} )
}
}
func TestScalingPolicy_Validate ( t * testing . T ) {
type testCase struct {
name string
input * ScalingPolicy
expectedErr string
}
cases := [ ] testCase {
{
name : "full horizontal policy" ,
input : & ScalingPolicy {
Policy : map [ string ] interface { } {
"key" : "value" ,
} ,
Type : ScalingPolicyTypeHorizontal ,
Min : 5 ,
Max : 5 ,
Enabled : true ,
Target : map [ string ] string {
ScalingTargetNamespace : "my-namespace" ,
ScalingTargetJob : "my-job" ,
ScalingTargetGroup : "my-task-group" ,
} ,
} ,
} ,
{
name : "missing type" ,
input : & ScalingPolicy { } ,
expectedErr : "missing scaling policy type" ,
} ,
{
name : "invalid type" ,
input : & ScalingPolicy {
Type : "not valid" ,
} ,
expectedErr : ` scaling policy type "not valid" is not valid ` ,
} ,
{
name : "min < 0" ,
input : & ScalingPolicy {
Type : ScalingPolicyTypeHorizontal ,
Min : - 1 ,
Max : 5 ,
} ,
expectedErr : "minimum count must be specified and non-negative" ,
} ,
{
name : "max < 0" ,
input : & ScalingPolicy {
Type : ScalingPolicyTypeHorizontal ,
Min : 5 ,
Max : - 1 ,
} ,
expectedErr : "maximum count must be specified and non-negative" ,
} ,
{
name : "min > max" ,
input : & ScalingPolicy {
Type : ScalingPolicyTypeHorizontal ,
Min : 10 ,
Max : 0 ,
} ,
expectedErr : "maximum count must not be less than minimum count" ,
} ,
{
name : "min == max" ,
input : & ScalingPolicy {
Type : ScalingPolicyTypeHorizontal ,
Min : 10 ,
Max : 10 ,
} ,
} ,
{
name : "min == 0" ,
input : & ScalingPolicy {
Type : ScalingPolicyTypeHorizontal ,
Min : 0 ,
Max : 10 ,
} ,
} ,
{
name : "max == 0" ,
input : & ScalingPolicy {
Type : ScalingPolicyTypeHorizontal ,
Min : 0 ,
Max : 0 ,
} ,
} ,
{
name : "horizontal missing namespace" ,
input : & ScalingPolicy {
Type : ScalingPolicyTypeHorizontal ,
Target : map [ string ] string {
ScalingTargetJob : "my-job" ,
ScalingTargetGroup : "my-group" ,
} ,
} ,
expectedErr : "missing target namespace" ,
} ,
{
name : "horizontal missing job" ,
input : & ScalingPolicy {
Type : ScalingPolicyTypeHorizontal ,
Target : map [ string ] string {
ScalingTargetNamespace : "my-namespace" ,
ScalingTargetGroup : "my-group" ,
} ,
} ,
expectedErr : "missing target job" ,
} ,
{
name : "horizontal missing group" ,
input : & ScalingPolicy {
Type : ScalingPolicyTypeHorizontal ,
Target : map [ string ] string {
ScalingTargetNamespace : "my-namespace" ,
ScalingTargetJob : "my-job" ,
} ,
} ,
expectedErr : "missing target group" ,
} ,
}
for _ , c := range cases {
t . Run ( c . name , func ( t * testing . T ) {
require := require . New ( t )
err := c . input . Validate ( )
if len ( c . expectedErr ) > 0 {
2021-01-21 19:53:02 +00:00
require . Error ( err , c . expectedErr )
2020-09-29 21:57:46 +00:00
} else {
require . NoError ( err )
}
} )
}
}
2017-01-14 00:46:08 +00:00
func TestIsRecoverable ( t * testing . T ) {
if IsRecoverable ( nil ) {
t . Errorf ( "nil should not be recoverable" )
}
if IsRecoverable ( NewRecoverableError ( nil , true ) ) {
t . Errorf ( "NewRecoverableError(nil, true) should not be recoverable" )
}
if IsRecoverable ( fmt . Errorf ( "i promise im recoverable" ) ) {
t . Errorf ( "Custom errors should not be recoverable" )
}
if IsRecoverable ( NewRecoverableError ( fmt . Errorf ( "" ) , false ) ) {
t . Errorf ( "Explicitly unrecoverable errors should not be recoverable" )
}
if ! IsRecoverable ( NewRecoverableError ( fmt . Errorf ( "" ) , true ) ) {
t . Errorf ( "Explicitly recoverable errors *should* be recoverable" )
}
}
2017-08-12 21:36:10 +00:00
func TestACLTokenValidate ( t * testing . T ) {
tk := & ACLToken { }
2018-03-11 18:27:02 +00:00
// Missing a type
2017-08-12 21:36:10 +00:00
err := tk . Validate ( )
assert . NotNil ( t , err )
if ! strings . Contains ( err . Error ( ) , "client or management" ) {
t . Fatalf ( "bad: %v" , err )
}
// Missing policies
tk . Type = ACLClientToken
err = tk . Validate ( )
assert . NotNil ( t , err )
if ! strings . Contains ( err . Error ( ) , "missing policies" ) {
t . Fatalf ( "bad: %v" , err )
}
2018-03-11 18:37:37 +00:00
// Invalid policies
2017-08-12 21:36:10 +00:00
tk . Type = ACLManagementToken
tk . Policies = [ ] string { "foo" }
err = tk . Validate ( )
assert . NotNil ( t , err )
if ! strings . Contains ( err . Error ( ) , "associated with policies" ) {
t . Fatalf ( "bad: %v" , err )
}
2018-03-11 18:37:37 +00:00
// Name too long policies
2018-03-26 21:28:33 +00:00
tk . Name = ""
for i := 0 ; i < 8 ; i ++ {
tk . Name += uuid . Generate ( )
}
2017-08-12 21:36:10 +00:00
tk . Policies = nil
err = tk . Validate ( )
assert . NotNil ( t , err )
if ! strings . Contains ( err . Error ( ) , "too long" ) {
t . Fatalf ( "bad: %v" , err )
}
// Make it valid
tk . Name = "foo"
err = tk . Validate ( )
assert . Nil ( t , err )
}
2017-08-22 00:45:11 +00:00
func TestACLTokenPolicySubset ( t * testing . T ) {
tk := & ACLToken {
Type : ACLClientToken ,
Policies : [ ] string { "foo" , "bar" , "baz" } ,
}
assert . Equal ( t , true , tk . PolicySubset ( [ ] string { "foo" , "bar" , "baz" } ) )
assert . Equal ( t , true , tk . PolicySubset ( [ ] string { "foo" , "bar" } ) )
assert . Equal ( t , true , tk . PolicySubset ( [ ] string { "foo" } ) )
assert . Equal ( t , true , tk . PolicySubset ( [ ] string { } ) )
assert . Equal ( t , false , tk . PolicySubset ( [ ] string { "foo" , "bar" , "new" } ) )
assert . Equal ( t , false , tk . PolicySubset ( [ ] string { "new" } ) )
tk = & ACLToken {
Type : ACLManagementToken ,
}
assert . Equal ( t , true , tk . PolicySubset ( [ ] string { "foo" , "bar" , "baz" } ) )
assert . Equal ( t , true , tk . PolicySubset ( [ ] string { "foo" , "bar" } ) )
assert . Equal ( t , true , tk . PolicySubset ( [ ] string { "foo" } ) )
assert . Equal ( t , true , tk . PolicySubset ( [ ] string { } ) )
assert . Equal ( t , true , tk . PolicySubset ( [ ] string { "foo" , "bar" , "new" } ) )
assert . Equal ( t , true , tk . PolicySubset ( [ ] string { "new" } ) )
}
2017-08-30 16:58:42 +00:00
func TestACLTokenSetHash ( t * testing . T ) {
tk := & ACLToken {
Name : "foo" ,
Type : ACLClientToken ,
Policies : [ ] string { "foo" , "bar" } ,
Global : false ,
}
out1 := tk . SetHash ( )
assert . NotNil ( t , out1 )
assert . NotNil ( t , tk . Hash )
assert . Equal ( t , out1 , tk . Hash )
tk . Policies = [ ] string { "foo" }
out2 := tk . SetHash ( )
assert . NotNil ( t , out2 )
assert . NotNil ( t , tk . Hash )
assert . Equal ( t , out2 , tk . Hash )
assert . NotEqual ( t , out1 , out2 )
}
func TestACLPolicySetHash ( t * testing . T ) {
ap := & ACLPolicy {
Name : "foo" ,
Description : "great policy" ,
Rules : "node { policy = \"read\" }" ,
}
out1 := ap . SetHash ( )
assert . NotNil ( t , out1 )
assert . NotNil ( t , ap . Hash )
assert . Equal ( t , out1 , ap . Hash )
ap . Rules = "node { policy = \"write\" }"
out2 := ap . SetHash ( )
assert . NotNil ( t , out2 )
assert . NotNil ( t , ap . Hash )
assert . Equal ( t , out2 , ap . Hash )
assert . NotEqual ( t , out1 , out2 )
}
2017-10-31 20:00:23 +00:00
func TestTaskEventPopulate ( t * testing . T ) {
2017-11-13 17:14:57 +00:00
prepopulatedEvent := NewTaskEvent ( TaskSetup )
prepopulatedEvent . DisplayMessage = "Hola"
2017-10-31 20:00:23 +00:00
testcases := [ ] struct {
event * TaskEvent
expectedMsg string
} {
{ nil , "" } ,
2017-11-13 17:14:57 +00:00
{ prepopulatedEvent , "Hola" } ,
{ NewTaskEvent ( TaskSetup ) . SetMessage ( "Setup" ) , "Setup" } ,
2017-10-31 20:00:23 +00:00
{ NewTaskEvent ( TaskStarted ) , "Task started by client" } ,
{ NewTaskEvent ( TaskReceived ) , "Task received by client" } ,
2017-11-13 17:14:57 +00:00
{ NewTaskEvent ( TaskFailedValidation ) , "Validation of task failed" } ,
2017-10-31 20:00:23 +00:00
{ NewTaskEvent ( TaskFailedValidation ) . SetValidationError ( fmt . Errorf ( "task failed validation" ) ) , "task failed validation" } ,
2017-11-13 17:14:57 +00:00
{ NewTaskEvent ( TaskSetupFailure ) , "Task setup failed" } ,
2017-10-31 20:00:23 +00:00
{ NewTaskEvent ( TaskSetupFailure ) . SetSetupError ( fmt . Errorf ( "task failed setup" ) ) , "task failed setup" } ,
{ NewTaskEvent ( TaskDriverFailure ) , "Failed to start task" } ,
{ NewTaskEvent ( TaskDownloadingArtifacts ) , "Client is downloading artifacts" } ,
{ NewTaskEvent ( TaskArtifactDownloadFailed ) , "Failed to download artifacts" } ,
{ NewTaskEvent ( TaskArtifactDownloadFailed ) . SetDownloadError ( fmt . Errorf ( "connection reset by peer" ) ) , "connection reset by peer" } ,
2017-11-13 17:14:57 +00:00
{ NewTaskEvent ( TaskRestarting ) . SetRestartDelay ( 2 * time . Second ) . SetRestartReason ( ReasonWithinPolicy ) , "Task restarting in 2s" } ,
{ NewTaskEvent ( TaskRestarting ) . SetRestartReason ( "Chaos Monkey did it" ) , "Chaos Monkey did it - Task restarting in 0s" } ,
{ NewTaskEvent ( TaskKilling ) , "Sent interrupt" } ,
2018-03-13 22:09:03 +00:00
{ NewTaskEvent ( TaskKilling ) . SetKillReason ( "Its time for you to die" ) , "Its time for you to die" } ,
2017-10-31 20:00:23 +00:00
{ NewTaskEvent ( TaskKilling ) . SetKillTimeout ( 1 * time . Second ) , "Sent interrupt. Waiting 1s before force killing" } ,
{ NewTaskEvent ( TaskTerminated ) . SetExitCode ( - 1 ) . SetSignal ( 3 ) , "Exit Code: -1, Signal: 3" } ,
2017-11-13 17:14:57 +00:00
{ NewTaskEvent ( TaskTerminated ) . SetMessage ( "Goodbye" ) , "Exit Code: 0, Exit Message: \"Goodbye\"" } ,
{ NewTaskEvent ( TaskKilled ) , "Task successfully killed" } ,
{ NewTaskEvent ( TaskKilled ) . SetKillError ( fmt . Errorf ( "undead creatures can't be killed" ) ) , "undead creatures can't be killed" } ,
{ NewTaskEvent ( TaskNotRestarting ) . SetRestartReason ( "Chaos Monkey did it" ) , "Chaos Monkey did it" } ,
{ NewTaskEvent ( TaskNotRestarting ) , "Task exceeded restart policy" } ,
2017-10-31 20:00:23 +00:00
{ NewTaskEvent ( TaskLeaderDead ) , "Leader Task in Group dead" } ,
2017-11-13 17:14:57 +00:00
{ NewTaskEvent ( TaskSiblingFailed ) , "Task's sibling failed" } ,
2017-10-31 20:00:23 +00:00
{ NewTaskEvent ( TaskSiblingFailed ) . SetFailedSibling ( "patient zero" ) , "Task's sibling \"patient zero\" failed" } ,
2017-11-13 17:14:57 +00:00
{ NewTaskEvent ( TaskSignaling ) , "Task being sent a signal" } ,
{ NewTaskEvent ( TaskSignaling ) . SetTaskSignal ( os . Interrupt ) , "Task being sent signal interrupt" } ,
{ NewTaskEvent ( TaskSignaling ) . SetTaskSignal ( os . Interrupt ) . SetTaskSignalReason ( "process interrupted" ) , "Task being sent signal interrupt: process interrupted" } ,
{ NewTaskEvent ( TaskRestartSignal ) , "Task signaled to restart" } ,
{ NewTaskEvent ( TaskRestartSignal ) . SetRestartReason ( "Chaos Monkey restarted it" ) , "Chaos Monkey restarted it" } ,
{ NewTaskEvent ( TaskDriverMessage ) . SetDriverMessage ( "YOLO" ) , "YOLO" } ,
{ NewTaskEvent ( "Unknown Type, No message" ) , "" } ,
{ NewTaskEvent ( "Unknown Type" ) . SetMessage ( "Hello world" ) , "Hello world" } ,
2017-10-31 20:00:23 +00:00
}
for _ , tc := range testcases {
tc . event . PopulateEventDisplayMessage ( )
if tc . event != nil && tc . event . DisplayMessage != tc . expectedMsg {
t . Fatalf ( "Expected %v but got %v" , tc . expectedMsg , tc . event . DisplayMessage )
}
}
}
2018-02-27 17:21:06 +00:00
func TestNetworkResourcesEquals ( t * testing . T ) {
require := require . New ( t )
var networkResourcesTest = [ ] struct {
input [ ] * NetworkResource
expected bool
errorMsg string
} {
{
[ ] * NetworkResource {
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "web" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "web" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
} ,
true ,
"Equal network resources should return true" ,
} ,
{
[ ] * NetworkResource {
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.0" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "web" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "web" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
} ,
false ,
"Different IP addresses should return false" ,
} ,
{
[ ] * NetworkResource {
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 40 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "web" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "web" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
} ,
false ,
"Different MBits values should return false" ,
} ,
{
[ ] * NetworkResource {
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "web" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "web" , 80 , 0 , "" } , { "web" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
} ,
false ,
"Different ReservedPorts lengths should return false" ,
} ,
{
[ ] * NetworkResource {
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "web" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
ReservedPorts : [ ] Port { } ,
} ,
} ,
false ,
"Empty and non empty ReservedPorts values should return false" ,
} ,
{
[ ] * NetworkResource {
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "web" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
ReservedPorts : [ ] Port { { "notweb" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
} ,
false ,
"Different valued ReservedPorts values should return false" ,
} ,
{
[ ] * NetworkResource {
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
DynamicPorts : [ ] Port { { "web" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
DynamicPorts : [ ] Port { { "web" , 80 , 0 , "" } , { "web" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
} ,
false ,
"Different DynamicPorts lengths should return false" ,
} ,
{
[ ] * NetworkResource {
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
DynamicPorts : [ ] Port { { "web" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
DynamicPorts : [ ] Port { } ,
} ,
} ,
false ,
"Empty and non empty DynamicPorts values should return false" ,
} ,
{
[ ] * NetworkResource {
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
DynamicPorts : [ ] Port { { "web" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
2018-02-28 17:58:44 +00:00
{
2018-02-27 17:21:06 +00:00
IP : "10.0.0.1" ,
MBits : 50 ,
2020-06-19 17:53:31 +00:00
DynamicPorts : [ ] Port { { "notweb" , 80 , 0 , "" } } ,
2018-02-27 17:21:06 +00:00
} ,
} ,
false ,
"Different valued DynamicPorts values should return false" ,
} ,
}
for _ , testCase := range networkResourcesTest {
first := testCase . input [ 0 ]
second := testCase . input [ 1 ]
require . Equal ( testCase . expected , first . Equals ( second ) , testCase . errorMsg )
}
}
2018-03-06 22:37:37 +00:00
2018-03-29 23:33:11 +00:00
func TestNode_Canonicalize ( t * testing . T ) {
t . Parallel ( )
require := require . New ( t )
// Make sure the eligiblity is set properly
node := & Node { }
node . Canonicalize ( )
require . Equal ( NodeSchedulingEligible , node . SchedulingEligibility )
node = & Node {
Drain : true ,
}
node . Canonicalize ( )
require . Equal ( NodeSchedulingIneligible , node . SchedulingEligibility )
}
2018-04-17 15:53:18 +00:00
func TestNode_Copy ( t * testing . T ) {
t . Parallel ( )
require := require . New ( t )
node := & Node {
ID : uuid . Generate ( ) ,
SecretID : uuid . Generate ( ) ,
Datacenter : "dc1" ,
Name : "foobar" ,
Attributes : map [ string ] string {
"kernel.name" : "linux" ,
"arch" : "x86" ,
"nomad.version" : "0.5.0" ,
"driver.exec" : "1" ,
"driver.mock_driver" : "1" ,
} ,
Resources : & Resources {
CPU : 4000 ,
MemoryMB : 8192 ,
DiskMB : 100 * 1024 ,
Networks : [ ] * NetworkResource {
{
Device : "eth0" ,
CIDR : "192.168.0.100/32" ,
MBits : 1000 ,
} ,
} ,
} ,
Reserved : & Resources {
CPU : 100 ,
MemoryMB : 256 ,
DiskMB : 4 * 1024 ,
Networks : [ ] * NetworkResource {
{
Device : "eth0" ,
IP : "192.168.0.100" ,
ReservedPorts : [ ] Port { { Label : "ssh" , Value : 22 } } ,
MBits : 1 ,
} ,
} ,
} ,
2018-10-03 16:47:18 +00:00
NodeResources : & NodeResources {
Cpu : NodeCpuResources {
2018-10-04 21:33:09 +00:00
CpuShares : 4000 ,
2018-10-03 16:47:18 +00:00
} ,
Memory : NodeMemoryResources {
MemoryMB : 8192 ,
} ,
Disk : NodeDiskResources {
DiskMB : 100 * 1024 ,
} ,
Networks : [ ] * NetworkResource {
{
Device : "eth0" ,
CIDR : "192.168.0.100/32" ,
MBits : 1000 ,
} ,
} ,
} ,
ReservedResources : & NodeReservedResources {
Cpu : NodeReservedCpuResources {
2018-10-04 21:33:09 +00:00
CpuShares : 100 ,
2018-10-03 16:47:18 +00:00
} ,
Memory : NodeReservedMemoryResources {
MemoryMB : 256 ,
} ,
Disk : NodeReservedDiskResources {
DiskMB : 4 * 1024 ,
} ,
Networks : NodeReservedNetworkResources {
ReservedHostPorts : "22" ,
} ,
} ,
2018-04-17 15:53:18 +00:00
Links : map [ string ] string {
"consul" : "foobar.dc1" ,
} ,
Meta : map [ string ] string {
"pci-dss" : "true" ,
"database" : "mysql" ,
"version" : "5.6" ,
} ,
NodeClass : "linux-medium-pci" ,
Status : NodeStatusReady ,
SchedulingEligibility : NodeSchedulingEligible ,
Drivers : map [ string ] * DriverInfo {
2018-04-17 20:39:32 +00:00
"mock_driver" : {
2018-04-17 15:53:18 +00:00
Attributes : map [ string ] string { "running" : "1" } ,
Detected : true ,
Healthy : true ,
HealthDescription : "Currently active" ,
UpdateTime : time . Now ( ) ,
} ,
} ,
}
node . ComputeClass ( )
node2 := node . Copy ( )
require . Equal ( node . Attributes , node2 . Attributes )
require . Equal ( node . Resources , node2 . Resources )
require . Equal ( node . Reserved , node2 . Reserved )
require . Equal ( node . Links , node2 . Links )
require . Equal ( node . Meta , node2 . Meta )
require . Equal ( node . Events , node2 . Events )
require . Equal ( node . DrainStrategy , node2 . DrainStrategy )
require . Equal ( node . Drivers , node2 . Drivers )
}
2018-07-17 22:21:00 +00:00
func TestSpread_Validate ( t * testing . T ) {
type tc struct {
spread * Spread
err error
name string
}
testCases := [ ] tc {
{
spread : & Spread { } ,
err : fmt . Errorf ( "Missing spread attribute" ) ,
name : "empty spread" ,
} ,
{
spread : & Spread {
Attribute : "${node.datacenter}" ,
Weight : - 1 ,
} ,
err : fmt . Errorf ( "Spread stanza must have a positive weight from 0 to 100" ) ,
name : "Invalid weight" ,
} ,
{
spread : & Spread {
Attribute : "${node.datacenter}" ,
2019-01-30 20:20:38 +00:00
Weight : 110 ,
2018-07-17 22:21:00 +00:00
} ,
err : fmt . Errorf ( "Spread stanza must have a positive weight from 0 to 100" ) ,
name : "Invalid weight" ,
} ,
{
spread : & Spread {
Attribute : "${node.datacenter}" ,
Weight : 50 ,
2018-07-26 00:08:25 +00:00
SpreadTarget : [ ] * SpreadTarget {
{
Value : "dc1" ,
Percent : 25 ,
} ,
{
Value : "dc2" ,
Percent : 150 ,
} ,
} ,
2018-07-17 22:21:00 +00:00
} ,
2018-07-26 00:08:25 +00:00
err : fmt . Errorf ( "Spread target percentage for value \"dc2\" must be between 0 and 100" ) ,
name : "Invalid percentages" ,
} ,
{
spread : & Spread {
Attribute : "${node.datacenter}" ,
Weight : 50 ,
SpreadTarget : [ ] * SpreadTarget {
{
Value : "dc1" ,
Percent : 75 ,
} ,
{
Value : "dc2" ,
Percent : 75 ,
} ,
} ,
} ,
2018-07-31 02:59:35 +00:00
err : fmt . Errorf ( "Sum of spread target percentages must not be greater than 100%%; got %d%%" , 150 ) ,
2018-07-26 00:08:25 +00:00
name : "Invalid percentages" ,
2018-07-17 22:21:00 +00:00
} ,
{
spread : & Spread {
Attribute : "${node.datacenter}" ,
Weight : 50 ,
SpreadTarget : [ ] * SpreadTarget {
{
Value : "dc1" ,
Percent : 25 ,
} ,
{
Value : "dc1" ,
Percent : 50 ,
} ,
} ,
} ,
err : fmt . Errorf ( "Spread target value \"dc1\" already defined" ) ,
name : "No spread targets" ,
} ,
{
spread : & Spread {
Attribute : "${node.datacenter}" ,
Weight : 50 ,
SpreadTarget : [ ] * SpreadTarget {
{
Value : "dc1" ,
Percent : 25 ,
} ,
{
Value : "dc2" ,
Percent : 50 ,
} ,
} ,
} ,
err : nil ,
name : "Valid spread" ,
} ,
}
for _ , tc := range testCases {
t . Run ( tc . name , func ( t * testing . T ) {
err := tc . spread . Validate ( )
if tc . err != nil {
require . NotNil ( t , err )
require . Contains ( t , err . Error ( ) , tc . err . Error ( ) )
} else {
require . Nil ( t , err )
}
} )
}
}
2018-10-02 20:36:04 +00:00
func TestNodeReservedNetworkResources_ParseReserved ( t * testing . T ) {
require := require . New ( t )
cases := [ ] struct {
Input string
Parsed [ ] uint64
Err bool
} {
{
"1,2,3" ,
[ ] uint64 { 1 , 2 , 3 } ,
false ,
} ,
{
"3,1,2,1,2,3,1-3" ,
[ ] uint64 { 1 , 2 , 3 } ,
false ,
} ,
{
"3-1" ,
nil ,
true ,
} ,
{
"1-3,2-4" ,
[ ] uint64 { 1 , 2 , 3 , 4 } ,
false ,
} ,
{
"1-3,4,5-5,6,7,8-10" ,
[ ] uint64 { 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 } ,
false ,
} ,
}
for i , tc := range cases {
r := & NodeReservedNetworkResources { ReservedHostPorts : tc . Input }
out , err := r . ParseReservedHostPorts ( )
if ( err != nil ) != tc . Err {
t . Fatalf ( "test case %d: %v" , i , err )
continue
}
require . Equal ( out , tc . Parsed )
}
}
2020-06-09 17:56:58 +00:00
func TestMultiregion_CopyCanonicalize ( t * testing . T ) {
require := require . New ( t )
emptyOld := & Multiregion { }
expected := & Multiregion {
Strategy : & MultiregionStrategy { } ,
Regions : [ ] * MultiregionRegion { } ,
}
old := emptyOld . Copy ( )
old . Canonicalize ( )
require . Equal ( old , expected )
require . False ( old . Diff ( expected ) )
nonEmptyOld := & Multiregion {
Strategy : & MultiregionStrategy {
MaxParallel : 2 ,
2020-06-16 19:17:53 +00:00
OnFailure : "fail_all" ,
2020-06-09 17:56:58 +00:00
} ,
Regions : [ ] * MultiregionRegion {
{
Name : "west" ,
Count : 2 ,
Datacenters : [ ] string { "west-1" , "west-2" } ,
Meta : map [ string ] string { } ,
} ,
{
Name : "east" ,
Count : 1 ,
Datacenters : [ ] string { "east-1" } ,
Meta : map [ string ] string { } ,
} ,
} ,
}
old = nonEmptyOld . Copy ( )
old . Canonicalize ( )
require . Equal ( old , nonEmptyOld )
require . False ( old . Diff ( nonEmptyOld ) )
}
2020-05-15 15:09:01 +00:00
func TestNodeResources_Merge ( t * testing . T ) {
res := & NodeResources {
Cpu : NodeCpuResources {
CpuShares : int64 ( 32000 ) ,
} ,
Memory : NodeMemoryResources {
MemoryMB : int64 ( 64000 ) ,
} ,
Networks : Networks {
{
Device : "foo" ,
} ,
} ,
}
res . Merge ( & NodeResources {
Memory : NodeMemoryResources {
MemoryMB : int64 ( 100000 ) ,
} ,
Networks : Networks {
{
Mode : "foo/bar" ,
} ,
} ,
} )
require . Exactly ( t , & NodeResources {
Cpu : NodeCpuResources {
CpuShares : int64 ( 32000 ) ,
} ,
Memory : NodeMemoryResources {
MemoryMB : int64 ( 100000 ) ,
} ,
Networks : Networks {
{
Device : "foo" ,
} ,
{
Mode : "foo/bar" ,
} ,
} ,
} , res )
}
2020-08-20 15:07:13 +00:00
2020-11-11 21:21:47 +00:00
func TestAllocatedResources_Canonicalize ( t * testing . T ) {
cases := map [ string ] struct {
input * AllocatedResources
expected * AllocatedResources
} {
"base" : {
input : & AllocatedResources {
Tasks : map [ string ] * AllocatedTaskResources {
"task" : {
Networks : Networks {
{
IP : "127.0.0.1" ,
DynamicPorts : [ ] Port { { "admin" , 8080 , 0 , "default" } } ,
} ,
} ,
} ,
} ,
} ,
expected : & AllocatedResources {
Tasks : map [ string ] * AllocatedTaskResources {
"task" : {
Networks : Networks {
{
IP : "127.0.0.1" ,
DynamicPorts : [ ] Port { { "admin" , 8080 , 0 , "default" } } ,
} ,
} ,
} ,
} ,
Shared : AllocatedSharedResources {
Ports : AllocatedPorts {
{
Label : "admin" ,
Value : 8080 ,
To : 0 ,
HostIP : "127.0.0.1" ,
} ,
} ,
} ,
} ,
} ,
"base with existing" : {
input : & AllocatedResources {
Tasks : map [ string ] * AllocatedTaskResources {
"task" : {
Networks : Networks {
{
IP : "127.0.0.1" ,
DynamicPorts : [ ] Port { { "admin" , 8080 , 0 , "default" } } ,
} ,
} ,
} ,
} ,
Shared : AllocatedSharedResources {
Ports : AllocatedPorts {
{
Label : "http" ,
Value : 80 ,
To : 8080 ,
HostIP : "127.0.0.1" ,
} ,
} ,
} ,
} ,
expected : & AllocatedResources {
Tasks : map [ string ] * AllocatedTaskResources {
"task" : {
Networks : Networks {
{
IP : "127.0.0.1" ,
DynamicPorts : [ ] Port { { "admin" , 8080 , 0 , "default" } } ,
} ,
} ,
} ,
} ,
Shared : AllocatedSharedResources {
Ports : AllocatedPorts {
{
Label : "http" ,
Value : 80 ,
To : 8080 ,
HostIP : "127.0.0.1" ,
} ,
{
Label : "admin" ,
Value : 8080 ,
To : 0 ,
HostIP : "127.0.0.1" ,
} ,
} ,
} ,
} ,
} ,
}
for name , tc := range cases {
tc . input . Canonicalize ( )
require . Exactly ( t , tc . expected , tc . input , "case %s did not match" , name )
}
}
2020-08-20 15:07:13 +00:00
func TestAllocatedSharedResources_Canonicalize ( t * testing . T ) {
a := & AllocatedSharedResources {
Networks : [ ] * NetworkResource {
{
IP : "127.0.0.1" ,
DynamicPorts : [ ] Port {
{
Label : "http" ,
Value : 22222 ,
To : 8080 ,
} ,
} ,
ReservedPorts : [ ] Port {
{
Label : "redis" ,
Value : 6783 ,
To : 6783 ,
} ,
} ,
} ,
} ,
}
a . Canonicalize ( )
require . Exactly ( t , AllocatedPorts {
{
Label : "http" ,
Value : 22222 ,
To : 8080 ,
HostIP : "127.0.0.1" ,
} ,
{
Label : "redis" ,
Value : 6783 ,
To : 6783 ,
HostIP : "127.0.0.1" ,
} ,
} , a . Ports )
}
2020-09-28 15:48:28 +00:00
func TestTaskGroup_validateScriptChecksInGroupServices ( t * testing . T ) {
t . Run ( "service task not set" , func ( t * testing . T ) {
tg := & TaskGroup {
Name : "group1" ,
Services : [ ] * Service { {
Name : "service1" ,
TaskName : "" , // unset
Checks : [ ] * ServiceCheck { {
Name : "check1" ,
Type : "script" ,
TaskName : "" , // unset
} , {
Name : "check2" ,
Type : "ttl" , // not script
} , {
Name : "check3" ,
Type : "script" ,
TaskName : "" , // unset
} } ,
} , {
Name : "service2" ,
Checks : [ ] * ServiceCheck { {
Type : "script" ,
TaskName : "task1" , // set
} } ,
} , {
Name : "service3" ,
TaskName : "" , // unset
Checks : [ ] * ServiceCheck { {
Name : "check1" ,
Type : "script" ,
TaskName : "" , // unset
} } ,
} } ,
}
errStr := tg . validateScriptChecksInGroupServices ( ) . Error ( )
require . Contains ( t , errStr , "Service [group1]->service1 or Check check1 must specify task parameter" )
require . Contains ( t , errStr , "Service [group1]->service1 or Check check3 must specify task parameter" )
require . Contains ( t , errStr , "Service [group1]->service3 or Check check1 must specify task parameter" )
} )
t . Run ( "service task set" , func ( t * testing . T ) {
tgOK := & TaskGroup {
Name : "group1" ,
Services : [ ] * Service { {
Name : "service1" ,
TaskName : "task1" ,
Checks : [ ] * ServiceCheck { {
Name : "check1" ,
Type : "script" ,
} , {
Name : "check2" ,
Type : "ttl" ,
} , {
Name : "check3" ,
Type : "script" ,
} } ,
} } ,
}
mErrOK := tgOK . validateScriptChecksInGroupServices ( )
require . Nil ( t , mErrOK )
} )
}
2021-01-21 19:53:02 +00:00
func requireErrors ( t * testing . T , err error , expected ... string ) {
t . Helper ( )
require . Error ( t , err )
mErr , ok := err . ( * multierror . Error )
require . True ( t , ok )
var found [ ] string
for _ , e := range expected {
for _ , actual := range mErr . Errors {
if strings . Contains ( actual . Error ( ) , e ) {
found = append ( found , e )
break
}
}
}
require . Equal ( t , expected , found )
}