Merge pull request #6097 from hashicorp/f-kind-validate
Add validation for kind field if it is a consul connect proxy
This commit is contained in:
commit
8c6312d973
|
@ -805,7 +805,7 @@ func ApiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) {
|
|||
structsTask.KillTimeout = *apiTask.KillTimeout
|
||||
structsTask.ShutdownDelay = apiTask.ShutdownDelay
|
||||
structsTask.KillSignal = apiTask.KillSignal
|
||||
structsTask.Kind = apiTask.Kind
|
||||
structsTask.Kind = structs.TaskKind(apiTask.Kind)
|
||||
structsTask.Constraints = ApiConstraintsToStructs(apiTask.Constraints)
|
||||
structsTask.Affinities = ApiAffinitiesToStructs(apiTask.Affinities)
|
||||
|
||||
|
|
|
@ -4923,7 +4923,7 @@ func (tg *TaskGroup) Validate(j *Job) error {
|
|||
}
|
||||
}
|
||||
|
||||
if err := task.Validate(tg.EphemeralDisk, j.Type); err != nil {
|
||||
if err := task.Validate(tg.EphemeralDisk, j.Type, tg.Services); err != nil {
|
||||
outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err)
|
||||
mErr.Errors = append(mErr.Errors, outer)
|
||||
}
|
||||
|
@ -5202,9 +5202,9 @@ type Task struct {
|
|||
// specification and defaults to SIGINT
|
||||
KillSignal string
|
||||
|
||||
// Used internally to manage tasks according to their Kind. Initial use case
|
||||
// Used internally to manage tasks according to their TaskKind. Initial use case
|
||||
// is for Consul Connect
|
||||
Kind string
|
||||
Kind TaskKind
|
||||
}
|
||||
|
||||
func (t *Task) Copy() *Task {
|
||||
|
@ -5301,7 +5301,7 @@ func (t *Task) GoString() string {
|
|||
}
|
||||
|
||||
// Validate is used to sanity check a task
|
||||
func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string) error {
|
||||
func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string, tgServices []*Service) error {
|
||||
var mErr multierror.Error
|
||||
if t.Name == "" {
|
||||
mErr.Errors = append(mErr.Errors, errors.New("Missing task name"))
|
||||
|
@ -5410,6 +5410,21 @@ func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Validation for TaskKind field which is used for Consul Connect integration
|
||||
taskKind := t.Kind
|
||||
if taskKind.IsConnect() {
|
||||
// This task is a Connect proxy so it should not have service stanzas
|
||||
if len(t.Services) > 0 {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have a service stanza"))
|
||||
}
|
||||
if t.Leader {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy task must not have leader set"))
|
||||
}
|
||||
serviceErr := taskKind.validateProxyService(tgServices)
|
||||
if serviceErr != nil {
|
||||
mErr.Errors = append(mErr.Errors, serviceErr)
|
||||
}
|
||||
}
|
||||
return mErr.ErrorOrNil()
|
||||
}
|
||||
|
||||
|
@ -5553,6 +5568,40 @@ func (t *Task) Warnings() error {
|
|||
return mErr.ErrorOrNil()
|
||||
}
|
||||
|
||||
type TaskKind string
|
||||
|
||||
const connectPrefix = "connect-proxy:"
|
||||
|
||||
func (k TaskKind) IsConnect() bool {
|
||||
return strings.HasPrefix(string(k), connectPrefix) && len(k) > len(connectPrefix)
|
||||
}
|
||||
|
||||
// validateProxyService checks that the service that is being
|
||||
// proxied by this task exists in the task group and contains
|
||||
// valid Connect config.
|
||||
func (k TaskKind) validateProxyService(tgServices []*Service) error {
|
||||
var mErr multierror.Error
|
||||
parts := strings.Split(string(k), ":")
|
||||
serviceName := strings.Join(parts[1:], "")
|
||||
if len(parts) > 2 {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy service kind %q must not contain `:`", k))
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, svc := range tgServices {
|
||||
if svc.Name == serviceName && svc.Connect != nil && svc.Connect.SidecarService != nil {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
mErr.Errors = append(mErr.Errors, fmt.Errorf("Connect proxy service name not found in services from task group"))
|
||||
}
|
||||
|
||||
return mErr.ErrorOrNil()
|
||||
}
|
||||
|
||||
const (
|
||||
// TemplateChangeModeNoop marks that no action should be taken if the
|
||||
// template is re-rendered
|
||||
|
|
|
@ -942,7 +942,7 @@ func TestTaskGroup_Validate(t *testing.T) {
|
|||
func TestTask_Validate(t *testing.T) {
|
||||
task := &Task{}
|
||||
ephemeralDisk := DefaultEphemeralDisk()
|
||||
err := task.Validate(ephemeralDisk, JobTypeBatch)
|
||||
err := task.Validate(ephemeralDisk, JobTypeBatch, nil)
|
||||
mErr := err.(*multierror.Error)
|
||||
if !strings.Contains(mErr.Errors[0].Error(), "task name") {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
@ -955,7 +955,7 @@ func TestTask_Validate(t *testing.T) {
|
|||
}
|
||||
|
||||
task = &Task{Name: "web/foo"}
|
||||
err = task.Validate(ephemeralDisk, JobTypeBatch)
|
||||
err = task.Validate(ephemeralDisk, JobTypeBatch, nil)
|
||||
mErr = err.(*multierror.Error)
|
||||
if !strings.Contains(mErr.Errors[0].Error(), "slashes") {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
@ -971,7 +971,7 @@ func TestTask_Validate(t *testing.T) {
|
|||
LogConfig: DefaultLogConfig(),
|
||||
}
|
||||
ephemeralDisk.SizeMB = 200
|
||||
err = task.Validate(ephemeralDisk, JobTypeBatch)
|
||||
err = task.Validate(ephemeralDisk, JobTypeBatch, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -985,7 +985,7 @@ func TestTask_Validate(t *testing.T) {
|
|||
LTarget: "${meta.rack}",
|
||||
})
|
||||
|
||||
err = task.Validate(ephemeralDisk, JobTypeBatch)
|
||||
err = task.Validate(ephemeralDisk, JobTypeBatch, nil)
|
||||
mErr = err.(*multierror.Error)
|
||||
if !strings.Contains(mErr.Errors[0].Error(), "task level: distinct_hosts") {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
@ -1067,7 +1067,7 @@ func TestTask_Validate_Services(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
err := task.Validate(ephemeralDisk, JobTypeService)
|
||||
err := task.Validate(ephemeralDisk, JobTypeService, nil)
|
||||
if err == nil {
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
|
@ -1088,7 +1088,7 @@ func TestTask_Validate_Services(t *testing.T) {
|
|||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if err = task1.Validate(ephemeralDisk, JobTypeService); err != nil {
|
||||
if err = task1.Validate(ephemeralDisk, JobTypeService, nil); err != nil {
|
||||
t.Fatalf("err : %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -1147,7 +1147,7 @@ func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) {
|
|||
for _, service := range cases {
|
||||
task := getTask(service)
|
||||
t.Run(service.Name, func(t *testing.T) {
|
||||
if err := task.Validate(ephemeralDisk, JobTypeService); err != nil {
|
||||
if err := task.Validate(ephemeralDisk, JobTypeService, nil); err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
})
|
||||
|
@ -1200,7 +1200,7 @@ func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) {
|
|||
for _, service := range cases {
|
||||
task := getTask(service)
|
||||
t.Run(service.Name, func(t *testing.T) {
|
||||
err := task.Validate(ephemeralDisk, JobTypeService)
|
||||
err := task.Validate(ephemeralDisk, JobTypeService, nil)
|
||||
if err == nil {
|
||||
t.Fatalf("expected an error")
|
||||
}
|
||||
|
@ -1513,6 +1513,117 @@ func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) {
|
|||
assert.Nil(t, validCheckRestart.Validate())
|
||||
}
|
||||
|
||||
func TestTask_Validate_ConnectKind(t *testing.T) {
|
||||
ephemeralDisk := DefaultEphemeralDisk()
|
||||
getTask := func(kind TaskKind, leader bool) *Task {
|
||||
task := &Task{
|
||||
Name: "web",
|
||||
Driver: "docker",
|
||||
Resources: DefaultResources(),
|
||||
LogConfig: DefaultLogConfig(),
|
||||
Kind: kind,
|
||||
Leader: leader,
|
||||
}
|
||||
task.Resources.Networks = []*NetworkResource{
|
||||
{
|
||||
MBits: 10,
|
||||
DynamicPorts: []Port{
|
||||
{
|
||||
Label: "http",
|
||||
Value: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return task
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
Desc string
|
||||
Kind TaskKind
|
||||
Leader bool
|
||||
Service *Service
|
||||
TgService []*Service
|
||||
ErrContains string
|
||||
}{
|
||||
{
|
||||
Desc: "Not connect",
|
||||
Kind: "test",
|
||||
},
|
||||
{
|
||||
Desc: "Invalid because of service in task definition",
|
||||
Kind: "connect-proxy:redis",
|
||||
Service: &Service{
|
||||
Name: "redis",
|
||||
},
|
||||
ErrContains: "Connect proxy task must not have a service stanza",
|
||||
},
|
||||
{
|
||||
Desc: "Leader should not be set",
|
||||
Kind: "connect-proxy:redis",
|
||||
Leader: true,
|
||||
Service: &Service{
|
||||
Name: "redis",
|
||||
},
|
||||
ErrContains: "Connect proxy task must not have leader set",
|
||||
},
|
||||
{
|
||||
Desc: "Service name invalid",
|
||||
Kind: "connect-proxy:redis:test",
|
||||
Service: &Service{
|
||||
Name: "redis",
|
||||
},
|
||||
ErrContains: "Connect proxy service kind \"connect-proxy:redis:test\" must not contain `:`",
|
||||
},
|
||||
{
|
||||
Desc: "Service name not found in group",
|
||||
Kind: "connect-proxy:redis",
|
||||
ErrContains: "Connect proxy service name not found in services from task group",
|
||||
},
|
||||
{
|
||||
Desc: "Connect stanza not configured in group",
|
||||
Kind: "connect-proxy:redis",
|
||||
TgService: []*Service{{
|
||||
Name: "redis",
|
||||
}},
|
||||
ErrContains: "Connect proxy service name not found in services from task group",
|
||||
},
|
||||
{
|
||||
Desc: "Valid connect proxy kind",
|
||||
Kind: "connect-proxy:redis",
|
||||
TgService: []*Service{{
|
||||
Name: "redis",
|
||||
Connect: &ConsulConnect{
|
||||
SidecarService: &ConsulSidecarService{
|
||||
Port: "db",
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
task := getTask(tc.Kind, tc.Leader)
|
||||
if tc.Service != nil {
|
||||
task.Services = []*Service{tc.Service}
|
||||
}
|
||||
t.Run(tc.Desc, func(t *testing.T) {
|
||||
err := task.Validate(ephemeralDisk, "service", tc.TgService)
|
||||
if err == nil && tc.ErrContains == "" {
|
||||
// Ok!
|
||||
return
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("no error returned. expected: %s", tc.ErrContains)
|
||||
}
|
||||
if !strings.Contains(err.Error(), tc.ErrContains) {
|
||||
t.Fatalf("expected %q but found: %v", tc.ErrContains, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
func TestTask_Validate_LogConfig(t *testing.T) {
|
||||
task := &Task{
|
||||
LogConfig: DefaultLogConfig(),
|
||||
|
@ -1521,7 +1632,7 @@ func TestTask_Validate_LogConfig(t *testing.T) {
|
|||
SizeMB: 1,
|
||||
}
|
||||
|
||||
err := task.Validate(ephemeralDisk, JobTypeService)
|
||||
err := task.Validate(ephemeralDisk, JobTypeService, nil)
|
||||
mErr := err.(*multierror.Error)
|
||||
if !strings.Contains(mErr.Errors[3].Error(), "log storage") {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
@ -1538,7 +1649,7 @@ func TestTask_Validate_Template(t *testing.T) {
|
|||
SizeMB: 1,
|
||||
}
|
||||
|
||||
err := task.Validate(ephemeralDisk, JobTypeService)
|
||||
err := task.Validate(ephemeralDisk, JobTypeService, nil)
|
||||
if !strings.Contains(err.Error(), "Template 1 validation failed") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -1551,7 +1662,7 @@ func TestTask_Validate_Template(t *testing.T) {
|
|||
}
|
||||
|
||||
task.Templates = []*Template{good, good}
|
||||
err = task.Validate(ephemeralDisk, JobTypeService)
|
||||
err = task.Validate(ephemeralDisk, JobTypeService, nil)
|
||||
if !strings.Contains(err.Error(), "same destination as") {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -1564,7 +1675,7 @@ func TestTask_Validate_Template(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
err = task.Validate(ephemeralDisk, JobTypeService)
|
||||
err = task.Validate(ephemeralDisk, JobTypeService, nil)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error from Template.Validate")
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue