2019-08-09 19:18:53 +00:00
|
|
|
package jobspec
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
|
|
|
|
multierror "github.com/hashicorp/go-multierror"
|
|
|
|
"github.com/hashicorp/hcl"
|
|
|
|
"github.com/hashicorp/hcl/hcl/ast"
|
|
|
|
"github.com/hashicorp/nomad/api"
|
|
|
|
"github.com/mitchellh/mapstructure"
|
|
|
|
)
|
|
|
|
|
|
|
|
func parseGroups(result *api.Job, list *ast.ObjectList) error {
|
|
|
|
list = list.Children()
|
|
|
|
if len(list.Items) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Go through each object and turn it into an actual result.
|
|
|
|
collection := make([]*api.TaskGroup, 0, len(list.Items))
|
|
|
|
seen := make(map[string]struct{})
|
|
|
|
for _, item := range list.Items {
|
|
|
|
n := item.Keys[0].Token.Value().(string)
|
|
|
|
|
|
|
|
// Make sure we haven't already found this
|
|
|
|
if _, ok := seen[n]; ok {
|
|
|
|
return fmt.Errorf("group '%s' defined more than once", n)
|
|
|
|
}
|
|
|
|
seen[n] = struct{}{}
|
|
|
|
|
|
|
|
// We need this later
|
|
|
|
var listVal *ast.ObjectList
|
|
|
|
if ot, ok := item.Val.(*ast.ObjectType); ok {
|
|
|
|
listVal = ot.List
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("group '%s': should be an object", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for invalid keys
|
|
|
|
valid := []string{
|
|
|
|
"count",
|
|
|
|
"constraint",
|
|
|
|
"affinity",
|
|
|
|
"restart",
|
|
|
|
"meta",
|
|
|
|
"task",
|
|
|
|
"ephemeral_disk",
|
|
|
|
"update",
|
|
|
|
"reschedule",
|
|
|
|
"vault",
|
|
|
|
"migrate",
|
|
|
|
"spread",
|
2019-11-18 16:16:25 +00:00
|
|
|
"shutdown_delay",
|
2019-08-09 19:18:53 +00:00
|
|
|
"network",
|
|
|
|
"service",
|
2019-07-25 14:42:11 +00:00
|
|
|
"volume",
|
2020-01-15 20:51:57 +00:00
|
|
|
"scaling",
|
2020-05-13 20:39:04 +00:00
|
|
|
"stop_after_client_disconnect",
|
2019-08-09 19:18:53 +00:00
|
|
|
}
|
2020-09-03 11:34:04 +00:00
|
|
|
if err := checkHCLKeys(listVal, valid); err != nil {
|
2019-08-09 19:18:53 +00:00
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s' ->", n))
|
|
|
|
}
|
|
|
|
|
|
|
|
var m map[string]interface{}
|
|
|
|
if err := hcl.DecodeObject(&m, item.Val); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-11-18 16:16:25 +00:00
|
|
|
|
2019-08-09 19:18:53 +00:00
|
|
|
delete(m, "constraint")
|
|
|
|
delete(m, "affinity")
|
|
|
|
delete(m, "meta")
|
|
|
|
delete(m, "task")
|
|
|
|
delete(m, "restart")
|
|
|
|
delete(m, "ephemeral_disk")
|
|
|
|
delete(m, "update")
|
|
|
|
delete(m, "vault")
|
|
|
|
delete(m, "migrate")
|
|
|
|
delete(m, "spread")
|
|
|
|
delete(m, "network")
|
|
|
|
delete(m, "service")
|
2019-07-25 14:42:11 +00:00
|
|
|
delete(m, "volume")
|
2020-01-15 20:51:57 +00:00
|
|
|
delete(m, "scaling")
|
2019-08-09 19:18:53 +00:00
|
|
|
|
|
|
|
// Build the group with the basic decode
|
|
|
|
var g api.TaskGroup
|
2020-09-03 11:34:04 +00:00
|
|
|
g.Name = stringToPtr(n)
|
2019-11-18 16:16:25 +00:00
|
|
|
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
|
|
|
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
|
|
|
|
WeaklyTypedInput: true,
|
|
|
|
Result: &g,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := dec.Decode(m); err != nil {
|
2019-08-09 19:18:53 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse constraints
|
|
|
|
if o := listVal.Filter("constraint"); len(o.Items) > 0 {
|
|
|
|
if err := parseConstraints(&g.Constraints, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', constraint ->", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse affinities
|
|
|
|
if o := listVal.Filter("affinity"); len(o.Items) > 0 {
|
|
|
|
if err := parseAffinities(&g.Affinities, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', affinity ->", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse restart policy
|
|
|
|
if o := listVal.Filter("restart"); len(o.Items) > 0 {
|
|
|
|
if err := parseRestartPolicy(&g.RestartPolicy, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', restart ->", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse spread
|
|
|
|
if o := listVal.Filter("spread"); len(o.Items) > 0 {
|
|
|
|
if err := parseSpread(&g.Spreads, o); err != nil {
|
|
|
|
return multierror.Prefix(err, "spread ->")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse network
|
|
|
|
if o := listVal.Filter("network"); len(o.Items) > 0 {
|
2019-10-24 14:41:54 +00:00
|
|
|
networks, err := ParseNetwork(o)
|
2019-08-09 19:18:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
g.Networks = []*api.NetworkResource{networks}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse reschedule policy
|
|
|
|
if o := listVal.Filter("reschedule"); len(o.Items) > 0 {
|
|
|
|
if err := parseReschedulePolicy(&g.ReschedulePolicy, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', reschedule ->", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Parse ephemeral disk
|
|
|
|
if o := listVal.Filter("ephemeral_disk"); len(o.Items) > 0 {
|
|
|
|
g.EphemeralDisk = &api.EphemeralDisk{}
|
|
|
|
if err := parseEphemeralDisk(&g.EphemeralDisk, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', ephemeral_disk ->", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have an update strategy, then parse that
|
|
|
|
if o := listVal.Filter("update"); len(o.Items) > 0 {
|
|
|
|
if err := parseUpdate(&g.Update, o); err != nil {
|
|
|
|
return multierror.Prefix(err, "update ->")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have a migration strategy, then parse that
|
|
|
|
if o := listVal.Filter("migrate"); len(o.Items) > 0 {
|
|
|
|
if err := parseMigrate(&g.Migrate, o); err != nil {
|
|
|
|
return multierror.Prefix(err, "migrate ->")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse out meta fields. These are in HCL as a list so we need
|
|
|
|
// to iterate over them and merge them.
|
|
|
|
if metaO := listVal.Filter("meta"); len(metaO.Items) > 0 {
|
|
|
|
for _, o := range metaO.Elem().Items {
|
|
|
|
var m map[string]interface{}
|
|
|
|
if err := hcl.DecodeObject(&m, o.Val); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := mapstructure.WeakDecode(m, &g.Meta); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-25 14:42:11 +00:00
|
|
|
// Parse any volume declarations
|
|
|
|
if o := listVal.Filter("volume"); len(o.Items) > 0 {
|
|
|
|
if err := parseVolumes(&g.Volumes, o); err != nil {
|
|
|
|
return multierror.Prefix(err, "volume ->")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-15 20:51:57 +00:00
|
|
|
// Parse scaling policy
|
|
|
|
if o := listVal.Filter("scaling"); len(o.Items) > 0 {
|
|
|
|
if err := parseScalingPolicy(&g.Scaling, o); err != nil {
|
|
|
|
return multierror.Prefix(err, "scaling ->")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-09 19:18:53 +00:00
|
|
|
// Parse tasks
|
|
|
|
if o := listVal.Filter("task"); len(o.Items) > 0 {
|
|
|
|
if err := parseTasks(&g.Tasks, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', task:", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have a vault block, then parse that
|
|
|
|
if o := listVal.Filter("vault"); len(o.Items) > 0 {
|
|
|
|
tgVault := &api.Vault{
|
2020-09-03 11:34:04 +00:00
|
|
|
Env: boolToPtr(true),
|
|
|
|
ChangeMode: stringToPtr("restart"),
|
2019-08-09 19:18:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := parseVault(tgVault, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s', vault ->", n))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Go through the tasks and if they don't have a Vault block, set it
|
|
|
|
for _, task := range g.Tasks {
|
|
|
|
if task.Vault == nil {
|
|
|
|
task.Vault = tgVault
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if o := listVal.Filter("service"); len(o.Items) > 0 {
|
|
|
|
if err := parseGroupServices(&g, o); err != nil {
|
|
|
|
return multierror.Prefix(err, fmt.Sprintf("'%s',", n))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
collection = append(collection, &g)
|
|
|
|
}
|
|
|
|
|
|
|
|
result.TaskGroups = append(result.TaskGroups, collection...)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseEphemeralDisk(result **api.EphemeralDisk, list *ast.ObjectList) error {
|
|
|
|
list = list.Elem()
|
|
|
|
if len(list.Items) > 1 {
|
|
|
|
return fmt.Errorf("only one 'ephemeral_disk' block allowed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get our ephemeral_disk object
|
|
|
|
obj := list.Items[0]
|
|
|
|
|
|
|
|
// Check for invalid keys
|
|
|
|
valid := []string{
|
|
|
|
"sticky",
|
|
|
|
"size",
|
|
|
|
"migrate",
|
|
|
|
}
|
2020-09-03 11:34:04 +00:00
|
|
|
if err := checkHCLKeys(obj.Val, valid); err != nil {
|
2019-08-09 19:18:53 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var m map[string]interface{}
|
|
|
|
if err := hcl.DecodeObject(&m, obj.Val); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var ephemeralDisk api.EphemeralDisk
|
|
|
|
if err := mapstructure.WeakDecode(m, &ephemeralDisk); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
*result = &ephemeralDisk
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseRestartPolicy(final **api.RestartPolicy, list *ast.ObjectList) error {
|
|
|
|
list = list.Elem()
|
|
|
|
if len(list.Items) > 1 {
|
|
|
|
return fmt.Errorf("only one 'restart' block allowed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get our job object
|
|
|
|
obj := list.Items[0]
|
|
|
|
|
|
|
|
// Check for invalid keys
|
|
|
|
valid := []string{
|
|
|
|
"attempts",
|
|
|
|
"interval",
|
|
|
|
"delay",
|
|
|
|
"mode",
|
|
|
|
}
|
2020-09-03 11:34:04 +00:00
|
|
|
if err := checkHCLKeys(obj.Val, valid); err != nil {
|
2019-08-09 19:18:53 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var m map[string]interface{}
|
|
|
|
if err := hcl.DecodeObject(&m, obj.Val); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var result api.RestartPolicy
|
|
|
|
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
|
|
|
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
|
|
|
|
WeaklyTypedInput: true,
|
|
|
|
Result: &result,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := dec.Decode(m); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
*final = &result
|
|
|
|
return nil
|
|
|
|
}
|
2019-07-25 14:42:11 +00:00
|
|
|
|
2019-08-12 14:22:27 +00:00
|
|
|
func parseVolumes(out *map[string]*api.VolumeRequest, list *ast.ObjectList) error {
|
2020-03-23 17:55:26 +00:00
|
|
|
hcl.DecodeObject(out, list)
|
2019-07-25 14:42:11 +00:00
|
|
|
|
2020-03-23 17:55:26 +00:00
|
|
|
for k, v := range *out {
|
2020-09-03 11:34:04 +00:00
|
|
|
err := unusedKeys(v)
|
2019-07-25 14:42:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-03-23 17:55:26 +00:00
|
|
|
// This is supported by `hcl:",key"`, but that only works if we start at the
|
|
|
|
// parent ast.ObjectItem
|
|
|
|
v.Name = k
|
2019-07-25 14:42:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2020-01-15 20:51:57 +00:00
|
|
|
|
|
|
|
func parseScalingPolicy(out **api.ScalingPolicy, list *ast.ObjectList) error {
|
|
|
|
list = list.Elem()
|
|
|
|
if len(list.Items) > 1 {
|
|
|
|
return fmt.Errorf("only one 'scaling' block allowed")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get our resource object
|
|
|
|
o := list.Items[0]
|
|
|
|
|
2020-01-16 15:32:00 +00:00
|
|
|
// We need this later
|
|
|
|
var listVal *ast.ObjectList
|
|
|
|
if ot, ok := o.Val.(*ast.ObjectType); ok {
|
|
|
|
listVal = ot.List
|
|
|
|
} else {
|
|
|
|
return fmt.Errorf("should be an object")
|
|
|
|
}
|
|
|
|
|
2020-01-15 20:51:57 +00:00
|
|
|
valid := []string{
|
2020-03-19 14:30:14 +00:00
|
|
|
"min",
|
|
|
|
"max",
|
2020-01-15 20:51:57 +00:00
|
|
|
"policy",
|
|
|
|
"enabled",
|
|
|
|
}
|
2020-09-03 11:34:04 +00:00
|
|
|
if err := checkHCLKeys(o.Val, valid); err != nil {
|
2020-01-15 20:51:57 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var m map[string]interface{}
|
|
|
|
if err := hcl.DecodeObject(&m, o.Val); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-01-16 15:32:00 +00:00
|
|
|
delete(m, "policy")
|
2020-01-15 20:51:57 +00:00
|
|
|
|
|
|
|
var result api.ScalingPolicy
|
|
|
|
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
|
|
|
WeaklyTypedInput: true,
|
|
|
|
Result: &result,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := dec.Decode(m); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-07-04 19:05:50 +00:00
|
|
|
if result.Max == nil {
|
|
|
|
return fmt.Errorf("missing 'max'")
|
|
|
|
}
|
2020-01-15 20:51:57 +00:00
|
|
|
|
2020-01-16 15:32:00 +00:00
|
|
|
// If we have policy, then parse that
|
|
|
|
if o := listVal.Filter("policy"); len(o.Items) > 0 {
|
2020-04-23 12:37:45 +00:00
|
|
|
if len(o.Elem().Items) > 1 {
|
|
|
|
return fmt.Errorf("only one 'policy' block allowed per 'scaling' block")
|
|
|
|
}
|
|
|
|
p := o.Elem().Items[0]
|
|
|
|
var m map[string]interface{}
|
|
|
|
if err := hcl.DecodeObject(&m, p.Val); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := mapstructure.WeakDecode(m, &result.Policy); err != nil {
|
|
|
|
return err
|
2020-01-16 15:32:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-15 20:51:57 +00:00
|
|
|
*out = &result
|
|
|
|
return nil
|
|
|
|
}
|