2015-09-09 20:02:39 +00:00
|
|
|
package api
|
|
|
|
|
2015-10-30 23:32:05 +00:00
|
|
|
import (
|
2017-02-22 20:30:05 +00:00
|
|
|
"fmt"
|
2017-07-06 03:44:49 +00:00
|
|
|
"path"
|
|
|
|
"path/filepath"
|
2017-02-13 23:18:17 +00:00
|
|
|
"strings"
|
2015-10-30 23:32:05 +00:00
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
2019-01-18 23:36:16 +00:00
|
|
|
const (
|
|
|
|
// RestartPolicyModeDelay causes an artificial delay till the next interval is
|
|
|
|
// reached when the specified attempts have been reached in the interval.
|
|
|
|
RestartPolicyModeDelay = "delay"
|
2017-02-06 19:48:28 +00:00
|
|
|
|
2019-01-18 23:36:16 +00:00
|
|
|
// RestartPolicyModeFail causes a job to fail if the specified number of
|
|
|
|
// attempts are reached within an interval.
|
|
|
|
RestartPolicyModeFail = "fail"
|
2015-10-30 23:32:05 +00:00
|
|
|
)
|
|
|
|
|
2016-05-27 22:24:22 +00:00
|
|
|
// MemoryStats holds memory usage related stats
|
2016-04-29 20:03:02 +00:00
|
|
|
type MemoryStats struct {
|
|
|
|
RSS uint64
|
|
|
|
Cache uint64
|
|
|
|
Swap uint64
|
2019-01-14 23:47:52 +00:00
|
|
|
Usage uint64
|
2016-04-29 20:03:02 +00:00
|
|
|
MaxUsage uint64
|
|
|
|
KernelUsage uint64
|
|
|
|
KernelMaxUsage uint64
|
2016-06-10 02:45:41 +00:00
|
|
|
Measured []string
|
2016-04-29 20:03:02 +00:00
|
|
|
}
|
|
|
|
|
2016-05-27 22:24:22 +00:00
|
|
|
// CpuStats holds cpu usage related stats
|
2016-05-21 07:49:17 +00:00
|
|
|
type CpuStats struct {
|
2016-05-19 21:06:19 +00:00
|
|
|
SystemMode float64
|
|
|
|
UserMode float64
|
2016-06-10 21:32:45 +00:00
|
|
|
TotalTicks float64
|
2016-04-29 20:03:02 +00:00
|
|
|
ThrottledPeriods uint64
|
|
|
|
ThrottledTime uint64
|
2016-05-19 21:06:19 +00:00
|
|
|
Percent float64
|
2016-06-10 02:45:41 +00:00
|
|
|
Measured []string
|
2016-04-29 20:03:02 +00:00
|
|
|
}
|
|
|
|
|
2016-05-27 22:24:22 +00:00
|
|
|
// ResourceUsage holds information related to cpu and memory stats
|
2016-05-21 09:05:08 +00:00
|
|
|
type ResourceUsage struct {
|
2016-04-29 20:03:02 +00:00
|
|
|
MemoryStats *MemoryStats
|
2016-05-21 07:49:17 +00:00
|
|
|
CpuStats *CpuStats
|
2018-11-15 15:13:14 +00:00
|
|
|
DeviceStats []*DeviceGroupStats
|
2016-05-21 09:05:08 +00:00
|
|
|
}
|
|
|
|
|
2016-05-27 22:24:22 +00:00
|
|
|
// TaskResourceUsage holds aggregated resource usage of all processes in a Task
|
|
|
|
// and the resource usage of the individual pids
|
2016-05-21 09:05:08 +00:00
|
|
|
type TaskResourceUsage struct {
|
|
|
|
ResourceUsage *ResourceUsage
|
2016-05-27 22:24:22 +00:00
|
|
|
Timestamp int64
|
2016-05-21 09:05:08 +00:00
|
|
|
Pids map[string]*ResourceUsage
|
2016-04-29 20:03:02 +00:00
|
|
|
}
|
|
|
|
|
2016-06-12 03:15:50 +00:00
|
|
|
// AllocResourceUsage holds the aggregated task resource usage of the
|
|
|
|
// allocation.
|
|
|
|
type AllocResourceUsage struct {
|
|
|
|
ResourceUsage *ResourceUsage
|
|
|
|
Tasks map[string]*TaskResourceUsage
|
|
|
|
Timestamp int64
|
|
|
|
}
|
|
|
|
|
2022-08-05 13:30:17 +00:00
|
|
|
// AllocCheckStatus contains the current status of a nomad service discovery check.
|
|
|
|
type AllocCheckStatus struct {
|
|
|
|
ID string
|
|
|
|
Check string
|
|
|
|
Group string
|
|
|
|
Mode string
|
|
|
|
Output string
|
|
|
|
Service string
|
|
|
|
Task string
|
|
|
|
Status string
|
|
|
|
StatusCode int
|
|
|
|
Timestamp int64
|
|
|
|
}
|
|
|
|
|
|
|
|
// AllocCheckStatuses holds the set of nomad service discovery checks within
|
|
|
|
// the allocation (including group and task level service checks).
|
|
|
|
type AllocCheckStatuses map[string]AllocCheckStatus
|
|
|
|
|
2015-11-02 21:24:59 +00:00
|
|
|
// RestartPolicy defines how the Nomad client restarts
|
|
|
|
// tasks in a taskgroup when they fail
|
2015-10-30 23:32:05 +00:00
|
|
|
type RestartPolicy struct {
|
2020-10-21 13:58:45 +00:00
|
|
|
Interval *time.Duration `hcl:"interval,optional"`
|
|
|
|
Attempts *int `hcl:"attempts,optional"`
|
|
|
|
Delay *time.Duration `hcl:"delay,optional"`
|
|
|
|
Mode *string `hcl:"mode,optional"`
|
2017-02-13 23:18:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (r *RestartPolicy) Merge(rp *RestartPolicy) {
|
|
|
|
if rp.Interval != nil {
|
|
|
|
r.Interval = rp.Interval
|
|
|
|
}
|
|
|
|
if rp.Attempts != nil {
|
|
|
|
r.Attempts = rp.Attempts
|
|
|
|
}
|
|
|
|
if rp.Delay != nil {
|
|
|
|
r.Delay = rp.Delay
|
|
|
|
}
|
|
|
|
if rp.Mode != nil {
|
|
|
|
r.Mode = rp.Mode
|
|
|
|
}
|
2015-10-31 04:28:56 +00:00
|
|
|
}
|
|
|
|
|
2018-01-18 20:49:01 +00:00
|
|
|
// Reschedule configures how Tasks are rescheduled when they crash or fail.
|
|
|
|
type ReschedulePolicy struct {
|
|
|
|
// Attempts limits the number of rescheduling attempts that can occur in an interval.
|
2020-10-21 13:58:45 +00:00
|
|
|
Attempts *int `mapstructure:"attempts" hcl:"attempts,optional"`
|
2018-01-18 20:49:01 +00:00
|
|
|
|
|
|
|
// Interval is a duration in which we can limit the number of reschedule attempts.
|
2020-10-21 13:58:45 +00:00
|
|
|
Interval *time.Duration `mapstructure:"interval" hcl:"interval,optional"`
|
2018-02-23 16:23:32 +00:00
|
|
|
|
|
|
|
// Delay is a minimum duration to wait between reschedule attempts.
|
|
|
|
// The delay function determines how much subsequent reschedule attempts are delayed by.
|
2020-10-21 13:58:45 +00:00
|
|
|
Delay *time.Duration `mapstructure:"delay" hcl:"delay,optional"`
|
2018-02-23 16:23:32 +00:00
|
|
|
|
|
|
|
// DelayFunction determines how the delay progressively changes on subsequent reschedule
|
2018-03-26 19:45:09 +00:00
|
|
|
// attempts. Valid values are "exponential", "constant", and "fibonacci".
|
2020-10-21 13:58:45 +00:00
|
|
|
DelayFunction *string `mapstructure:"delay_function" hcl:"delay_function,optional"`
|
2018-02-23 16:23:32 +00:00
|
|
|
|
2018-03-13 15:06:26 +00:00
|
|
|
// MaxDelay is an upper bound on the delay.
|
2020-10-21 13:58:45 +00:00
|
|
|
MaxDelay *time.Duration `mapstructure:"max_delay" hcl:"max_delay,optional"`
|
2018-02-23 16:23:32 +00:00
|
|
|
|
|
|
|
// Unlimited allows rescheduling attempts until they succeed
|
2020-10-21 13:58:45 +00:00
|
|
|
Unlimited *bool `mapstructure:"unlimited" hcl:"unlimited,optional"`
|
2018-01-18 20:49:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (r *ReschedulePolicy) Merge(rp *ReschedulePolicy) {
|
2018-04-11 20:26:01 +00:00
|
|
|
if rp == nil {
|
|
|
|
return
|
|
|
|
}
|
2018-01-18 20:49:01 +00:00
|
|
|
if rp.Interval != nil {
|
|
|
|
r.Interval = rp.Interval
|
|
|
|
}
|
|
|
|
if rp.Attempts != nil {
|
|
|
|
r.Attempts = rp.Attempts
|
|
|
|
}
|
2018-02-23 16:23:32 +00:00
|
|
|
if rp.Delay != nil {
|
|
|
|
r.Delay = rp.Delay
|
|
|
|
}
|
|
|
|
if rp.DelayFunction != nil {
|
|
|
|
r.DelayFunction = rp.DelayFunction
|
|
|
|
}
|
2018-03-13 15:06:26 +00:00
|
|
|
if rp.MaxDelay != nil {
|
|
|
|
r.MaxDelay = rp.MaxDelay
|
2018-02-23 16:23:32 +00:00
|
|
|
}
|
|
|
|
if rp.Unlimited != nil {
|
|
|
|
r.Unlimited = rp.Unlimited
|
|
|
|
}
|
2018-01-18 20:49:01 +00:00
|
|
|
}
|
|
|
|
|
2018-04-11 23:47:27 +00:00
|
|
|
func (r *ReschedulePolicy) Canonicalize(jobType string) {
|
|
|
|
dp := NewDefaultReschedulePolicy(jobType)
|
|
|
|
if r.Interval == nil {
|
|
|
|
r.Interval = dp.Interval
|
|
|
|
}
|
|
|
|
if r.Attempts == nil {
|
|
|
|
r.Attempts = dp.Attempts
|
|
|
|
}
|
|
|
|
if r.Delay == nil {
|
|
|
|
r.Delay = dp.Delay
|
|
|
|
}
|
|
|
|
if r.DelayFunction == nil {
|
|
|
|
r.DelayFunction = dp.DelayFunction
|
|
|
|
}
|
|
|
|
if r.MaxDelay == nil {
|
|
|
|
r.MaxDelay = dp.MaxDelay
|
|
|
|
}
|
|
|
|
if r.Unlimited == nil {
|
|
|
|
r.Unlimited = dp.Unlimited
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-16 13:30:58 +00:00
|
|
|
// Affinity is used to serialize task group affinities
|
|
|
|
type Affinity struct {
|
2020-10-21 13:58:45 +00:00
|
|
|
LTarget string `hcl:"attribute,optional"` // Left-hand target
|
|
|
|
RTarget string `hcl:"value,optional"` // Right-hand target
|
|
|
|
Operand string `hcl:"operator,optional"` // Constraint operand (<=, <, =, !=, >, >=), set_contains_all, set_contains_any
|
|
|
|
Weight *int8 `hcl:"weight,optional"` // Weight applied to nodes that match the affinity. Can be negative
|
2018-07-16 13:30:58 +00:00
|
|
|
}
|
|
|
|
|
2021-04-03 07:50:23 +00:00
|
|
|
func NewAffinity(lTarget string, operand string, rTarget string, weight int8) *Affinity {
|
2018-07-16 13:30:58 +00:00
|
|
|
return &Affinity{
|
2021-04-03 07:50:23 +00:00
|
|
|
LTarget: lTarget,
|
|
|
|
RTarget: rTarget,
|
|
|
|
Operand: operand,
|
2022-08-17 16:26:34 +00:00
|
|
|
Weight: pointerOf(int8(weight)),
|
2019-01-30 20:20:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a *Affinity) Canonicalize() {
|
|
|
|
if a.Weight == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
a.Weight = pointerOf(int8(50))
|
2018-07-16 13:30:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-11 23:47:27 +00:00
|
|
|
func NewDefaultReschedulePolicy(jobType string) *ReschedulePolicy {
|
|
|
|
var dp *ReschedulePolicy
|
|
|
|
switch jobType {
|
|
|
|
case "service":
|
2019-01-18 23:36:16 +00:00
|
|
|
// This needs to be in sync with DefaultServiceJobReschedulePolicy
|
|
|
|
// in nomad/structs/structs.go
|
2018-04-11 23:47:27 +00:00
|
|
|
dp = &ReschedulePolicy{
|
2022-08-17 16:26:34 +00:00
|
|
|
Delay: pointerOf(30 * time.Second),
|
|
|
|
DelayFunction: pointerOf("exponential"),
|
|
|
|
MaxDelay: pointerOf(1 * time.Hour),
|
|
|
|
Unlimited: pointerOf(true),
|
2019-01-18 23:36:16 +00:00
|
|
|
|
2022-08-17 16:26:34 +00:00
|
|
|
Attempts: pointerOf(0),
|
|
|
|
Interval: pointerOf(time.Duration(0)),
|
2018-04-11 23:47:27 +00:00
|
|
|
}
|
|
|
|
case "batch":
|
2019-01-18 23:36:16 +00:00
|
|
|
// This needs to be in sync with DefaultBatchJobReschedulePolicy
|
|
|
|
// in nomad/structs/structs.go
|
2018-04-11 23:47:27 +00:00
|
|
|
dp = &ReschedulePolicy{
|
2022-08-17 16:26:34 +00:00
|
|
|
Attempts: pointerOf(1),
|
|
|
|
Interval: pointerOf(24 * time.Hour),
|
|
|
|
Delay: pointerOf(5 * time.Second),
|
|
|
|
DelayFunction: pointerOf("constant"),
|
2019-01-18 23:36:16 +00:00
|
|
|
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxDelay: pointerOf(time.Duration(0)),
|
|
|
|
Unlimited: pointerOf(false),
|
2018-04-11 23:47:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
case "system":
|
|
|
|
dp = &ReschedulePolicy{
|
2022-08-17 16:26:34 +00:00
|
|
|
Attempts: pointerOf(0),
|
|
|
|
Interval: pointerOf(time.Duration(0)),
|
|
|
|
Delay: pointerOf(time.Duration(0)),
|
|
|
|
DelayFunction: pointerOf(""),
|
|
|
|
MaxDelay: pointerOf(time.Duration(0)),
|
|
|
|
Unlimited: pointerOf(false),
|
2018-04-11 23:47:27 +00:00
|
|
|
}
|
2020-02-21 08:14:36 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
// GH-7203: it is possible an unknown job type is passed to this
|
|
|
|
// function and we need to ensure a non-nil object is returned so that
|
|
|
|
// the canonicalization runs without panicking.
|
|
|
|
dp = &ReschedulePolicy{
|
2022-08-17 16:26:34 +00:00
|
|
|
Attempts: pointerOf(0),
|
|
|
|
Interval: pointerOf(time.Duration(0)),
|
|
|
|
Delay: pointerOf(time.Duration(0)),
|
|
|
|
DelayFunction: pointerOf(""),
|
|
|
|
MaxDelay: pointerOf(time.Duration(0)),
|
|
|
|
Unlimited: pointerOf(false),
|
2020-02-21 08:14:36 +00:00
|
|
|
}
|
2018-04-11 23:47:27 +00:00
|
|
|
}
|
|
|
|
return dp
|
|
|
|
}
|
|
|
|
|
2018-01-18 20:49:01 +00:00
|
|
|
func (r *ReschedulePolicy) Copy() *ReschedulePolicy {
|
|
|
|
if r == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
nrp := new(ReschedulePolicy)
|
|
|
|
*nrp = *r
|
|
|
|
return nrp
|
|
|
|
}
|
|
|
|
|
2018-03-20 18:34:29 +00:00
|
|
|
func (p *ReschedulePolicy) String() string {
|
|
|
|
if p == nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
if *p.Unlimited {
|
|
|
|
return fmt.Sprintf("unlimited with %v delay, max_delay = %v", *p.DelayFunction, *p.MaxDelay)
|
|
|
|
}
|
2018-03-21 14:15:29 +00:00
|
|
|
return fmt.Sprintf("%v in %v with %v delay, max_delay = %v", *p.Attempts, *p.Interval, *p.DelayFunction, *p.MaxDelay)
|
2018-03-20 18:34:29 +00:00
|
|
|
}
|
|
|
|
|
2018-07-18 15:53:03 +00:00
|
|
|
// Spread is used to serialize task group allocation spread preferences
|
|
|
|
type Spread struct {
|
2020-10-21 13:58:45 +00:00
|
|
|
Attribute string `hcl:"attribute,optional"`
|
|
|
|
Weight *int8 `hcl:"weight,optional"`
|
|
|
|
SpreadTarget []*SpreadTarget `hcl:"target,block"`
|
2018-07-18 15:53:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// SpreadTarget is used to serialize target allocation spread percentages
|
|
|
|
type SpreadTarget struct {
|
2020-10-21 13:58:45 +00:00
|
|
|
Value string `hcl:",label"`
|
|
|
|
Percent uint8 `hcl:"percent,optional"`
|
2018-07-18 15:53:03 +00:00
|
|
|
}
|
|
|
|
|
2019-01-30 20:20:38 +00:00
|
|
|
func NewSpreadTarget(value string, percent uint8) *SpreadTarget {
|
2018-07-18 15:53:03 +00:00
|
|
|
return &SpreadTarget{
|
|
|
|
Value: value,
|
|
|
|
Percent: percent,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-30 20:20:38 +00:00
|
|
|
func NewSpread(attribute string, weight int8, spreadTargets []*SpreadTarget) *Spread {
|
2018-07-18 15:53:03 +00:00
|
|
|
return &Spread{
|
|
|
|
Attribute: attribute,
|
2022-08-17 16:26:34 +00:00
|
|
|
Weight: pointerOf(int8(weight)),
|
2018-07-18 15:53:03 +00:00
|
|
|
SpreadTarget: spreadTargets,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-11 15:48:12 +00:00
|
|
|
func (s *Spread) Canonicalize() {
|
|
|
|
if s.Weight == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
s.Weight = pointerOf(int8(50))
|
2019-01-11 15:48:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-14 22:43:42 +00:00
|
|
|
// EphemeralDisk is an ephemeral disk object
|
|
|
|
type EphemeralDisk struct {
|
2020-10-21 13:58:45 +00:00
|
|
|
Sticky *bool `hcl:"sticky,optional"`
|
|
|
|
Migrate *bool `hcl:"migrate,optional"`
|
|
|
|
SizeMB *int `mapstructure:"size" hcl:"size,optional"`
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func DefaultEphemeralDisk() *EphemeralDisk {
|
|
|
|
return &EphemeralDisk{
|
2022-08-17 16:26:34 +00:00
|
|
|
Sticky: pointerOf(false),
|
|
|
|
Migrate: pointerOf(false),
|
|
|
|
SizeMB: pointerOf(300),
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *EphemeralDisk) Canonicalize() {
|
|
|
|
if e.Sticky == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
e.Sticky = pointerOf(false)
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
if e.Migrate == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
e.Migrate = pointerOf(false)
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
if e.SizeMB == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
e.SizeMB = pointerOf(300)
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2016-08-24 18:51:15 +00:00
|
|
|
}
|
|
|
|
|
2018-01-24 00:47:00 +00:00
|
|
|
// MigrateStrategy describes how allocations for a task group should be
|
|
|
|
// migrated between nodes (eg when draining).
|
|
|
|
type MigrateStrategy struct {
|
2020-10-21 13:58:45 +00:00
|
|
|
MaxParallel *int `mapstructure:"max_parallel" hcl:"max_parallel,optional"`
|
|
|
|
HealthCheck *string `mapstructure:"health_check" hcl:"health_check,optional"`
|
|
|
|
MinHealthyTime *time.Duration `mapstructure:"min_healthy_time" hcl:"min_healthy_time,optional"`
|
|
|
|
HealthyDeadline *time.Duration `mapstructure:"healthy_deadline" hcl:"healthy_deadline,optional"`
|
2018-01-24 00:47:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func DefaultMigrateStrategy() *MigrateStrategy {
|
|
|
|
return &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(1),
|
|
|
|
HealthCheck: pointerOf("checks"),
|
|
|
|
MinHealthyTime: pointerOf(10 * time.Second),
|
|
|
|
HealthyDeadline: pointerOf(5 * time.Minute),
|
2018-01-24 00:47:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MigrateStrategy) Canonicalize() {
|
|
|
|
if m == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defaults := DefaultMigrateStrategy()
|
|
|
|
if m.MaxParallel == nil {
|
|
|
|
m.MaxParallel = defaults.MaxParallel
|
|
|
|
}
|
|
|
|
if m.HealthCheck == nil {
|
|
|
|
m.HealthCheck = defaults.HealthCheck
|
|
|
|
}
|
|
|
|
if m.MinHealthyTime == nil {
|
|
|
|
m.MinHealthyTime = defaults.MinHealthyTime
|
|
|
|
}
|
|
|
|
if m.HealthyDeadline == nil {
|
|
|
|
m.HealthyDeadline = defaults.HealthyDeadline
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-01 19:21:32 +00:00
|
|
|
func (m *MigrateStrategy) Merge(o *MigrateStrategy) {
|
|
|
|
if o.MaxParallel != nil {
|
|
|
|
m.MaxParallel = o.MaxParallel
|
|
|
|
}
|
|
|
|
if o.HealthCheck != nil {
|
|
|
|
m.HealthCheck = o.HealthCheck
|
|
|
|
}
|
|
|
|
if o.MinHealthyTime != nil {
|
|
|
|
m.MinHealthyTime = o.MinHealthyTime
|
|
|
|
}
|
|
|
|
if o.HealthyDeadline != nil {
|
|
|
|
m.HealthyDeadline = o.HealthyDeadline
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MigrateStrategy) Copy() *MigrateStrategy {
|
|
|
|
if m == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
nm := new(MigrateStrategy)
|
|
|
|
*nm = *m
|
|
|
|
return nm
|
|
|
|
}
|
|
|
|
|
2019-08-12 14:22:27 +00:00
|
|
|
// VolumeRequest is a representation of a storage volume that a TaskGroup wishes to use.
|
|
|
|
type VolumeRequest struct {
|
2021-04-02 18:36:13 +00:00
|
|
|
Name string `hcl:"name,label"`
|
|
|
|
Type string `hcl:"type,optional"`
|
|
|
|
Source string `hcl:"source,optional"`
|
|
|
|
ReadOnly bool `hcl:"read_only,optional"`
|
|
|
|
AccessMode string `hcl:"access_mode,optional"`
|
|
|
|
AttachmentMode string `hcl:"attachment_mode,optional"`
|
|
|
|
MountOptions *CSIMountOptions `hcl:"mount_options,block"`
|
|
|
|
PerAlloc bool `hcl:"per_alloc,optional"`
|
|
|
|
ExtraKeysHCL []string `hcl1:",unusedKeys,optional" json:"-"`
|
2019-07-25 14:44:08 +00:00
|
|
|
}
|
|
|
|
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
const (
|
|
|
|
VolumeMountPropagationPrivate = "private"
|
|
|
|
VolumeMountPropagationHostToTask = "host-to-task"
|
|
|
|
VolumeMountPropagationBidirectional = "bidirectional"
|
|
|
|
)
|
|
|
|
|
2019-08-12 14:22:27 +00:00
|
|
|
// VolumeMount represents the relationship between a destination path in a task
|
|
|
|
// and the task group volume that should be mounted there.
|
2019-07-25 14:44:08 +00:00
|
|
|
type VolumeMount struct {
|
2020-10-21 13:58:45 +00:00
|
|
|
Volume *string `hcl:"volume,optional"`
|
|
|
|
Destination *string `hcl:"destination,optional"`
|
|
|
|
ReadOnly *bool `mapstructure:"read_only" hcl:"read_only,optional"`
|
|
|
|
PropagationMode *string `mapstructure:"propagation_mode" hcl:"propagation_mode,optional"`
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (vm *VolumeMount) Canonicalize() {
|
|
|
|
if vm.PropagationMode == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
vm.PropagationMode = pointerOf(VolumeMountPropagationPrivate)
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
}
|
|
|
|
if vm.ReadOnly == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
vm.ReadOnly = pointerOf(false)
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
}
|
2019-07-25 14:44:08 +00:00
|
|
|
}
|
|
|
|
|
2015-09-09 20:02:39 +00:00
|
|
|
// TaskGroup is the unit of scheduling.
|
|
|
|
type TaskGroup struct {
|
2020-10-21 13:58:45 +00:00
|
|
|
Name *string `hcl:"name,label"`
|
|
|
|
Count *int `hcl:"count,optional"`
|
|
|
|
Constraints []*Constraint `hcl:"constraint,block"`
|
|
|
|
Affinities []*Affinity `hcl:"affinity,block"`
|
|
|
|
Tasks []*Task `hcl:"task,block"`
|
|
|
|
Spreads []*Spread `hcl:"spread,block"`
|
|
|
|
Volumes map[string]*VolumeRequest `hcl:"volume,block"`
|
|
|
|
RestartPolicy *RestartPolicy `hcl:"restart,block"`
|
|
|
|
ReschedulePolicy *ReschedulePolicy `hcl:"reschedule,block"`
|
|
|
|
EphemeralDisk *EphemeralDisk `hcl:"ephemeral_disk,block"`
|
|
|
|
Update *UpdateStrategy `hcl:"update,block"`
|
|
|
|
Migrate *MigrateStrategy `hcl:"migrate,block"`
|
|
|
|
Networks []*NetworkResource `hcl:"network,block"`
|
|
|
|
Meta map[string]string `hcl:"meta,block"`
|
|
|
|
Services []*Service `hcl:"service,block"`
|
|
|
|
ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"`
|
|
|
|
StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect" hcl:"stop_after_client_disconnect,optional"`
|
2022-03-04 16:28:47 +00:00
|
|
|
MaxClientDisconnect *time.Duration `mapstructure:"max_client_disconnect" hcl:"max_client_disconnect,optional"`
|
2020-10-21 13:58:45 +00:00
|
|
|
Scaling *ScalingPolicy `hcl:"scaling,block"`
|
2021-03-16 18:22:21 +00:00
|
|
|
Consul *Consul `hcl:"consul,block"`
|
2015-09-09 20:02:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewTaskGroup creates a new TaskGroup.
|
2015-09-10 00:59:18 +00:00
|
|
|
func NewTaskGroup(name string, count int) *TaskGroup {
|
2015-09-09 20:02:39 +00:00
|
|
|
return &TaskGroup{
|
2022-08-17 16:26:34 +00:00
|
|
|
Name: pointerOf(name),
|
|
|
|
Count: pointerOf(count),
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-21 19:37:19 +00:00
|
|
|
// Canonicalize sets defaults and merges settings that should be inherited from the job
|
2017-02-22 20:30:05 +00:00
|
|
|
func (g *TaskGroup) Canonicalize(job *Job) {
|
2017-02-06 19:48:28 +00:00
|
|
|
if g.Name == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
g.Name = pointerOf("")
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2020-05-18 22:03:10 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
if g.Count == nil {
|
2020-03-22 11:54:04 +00:00
|
|
|
if g.Scaling != nil && g.Scaling.Min != nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
g.Count = pointerOf(int(*g.Scaling.Min))
|
2020-03-22 11:54:04 +00:00
|
|
|
} else {
|
2022-08-17 16:26:34 +00:00
|
|
|
g.Count = pointerOf(1)
|
2020-03-22 11:54:04 +00:00
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2020-03-24 19:29:34 +00:00
|
|
|
if g.Scaling != nil {
|
|
|
|
g.Scaling.Canonicalize(*g.Count)
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
if g.EphemeralDisk == nil {
|
|
|
|
g.EphemeralDisk = DefaultEphemeralDisk()
|
|
|
|
} else {
|
|
|
|
g.EphemeralDisk.Canonicalize()
|
|
|
|
}
|
2017-02-13 23:18:17 +00:00
|
|
|
|
2021-03-16 18:22:21 +00:00
|
|
|
// Merge job.consul onto group.consul
|
|
|
|
if g.Consul == nil {
|
|
|
|
g.Consul = new(Consul)
|
|
|
|
}
|
|
|
|
g.Consul.MergeNamespace(job.ConsulNamespace)
|
|
|
|
g.Consul.Canonicalize()
|
|
|
|
|
2017-05-09 00:44:26 +00:00
|
|
|
// Merge the update policy from the job
|
|
|
|
if ju, tu := job.Update != nil, g.Update != nil; ju && tu {
|
|
|
|
// Merge the jobs and task groups definition of the update strategy
|
|
|
|
jc := job.Update.Copy()
|
|
|
|
jc.Merge(g.Update)
|
|
|
|
g.Update = jc
|
2017-08-30 18:35:19 +00:00
|
|
|
} else if ju && !job.Update.Empty() {
|
|
|
|
// Inherit the jobs as long as it is non-empty.
|
2017-05-09 00:44:26 +00:00
|
|
|
jc := job.Update.Copy()
|
|
|
|
g.Update = jc
|
|
|
|
}
|
|
|
|
|
|
|
|
if g.Update != nil {
|
|
|
|
g.Update.Canonicalize()
|
|
|
|
}
|
|
|
|
|
2018-01-18 20:49:01 +00:00
|
|
|
// Merge the reschedule policy from the job
|
|
|
|
if jr, tr := job.Reschedule != nil, g.ReschedulePolicy != nil; jr && tr {
|
|
|
|
jobReschedule := job.Reschedule.Copy()
|
|
|
|
jobReschedule.Merge(g.ReschedulePolicy)
|
|
|
|
g.ReschedulePolicy = jobReschedule
|
2018-01-23 01:58:23 +00:00
|
|
|
} else if jr {
|
2018-01-18 20:49:01 +00:00
|
|
|
jobReschedule := job.Reschedule.Copy()
|
|
|
|
g.ReschedulePolicy = jobReschedule
|
|
|
|
}
|
2018-04-11 23:47:27 +00:00
|
|
|
// Only use default reschedule policy for non system jobs
|
|
|
|
if g.ReschedulePolicy == nil && *job.Type != "system" {
|
|
|
|
g.ReschedulePolicy = NewDefaultReschedulePolicy(*job.Type)
|
2018-01-18 20:49:01 +00:00
|
|
|
}
|
2018-04-11 23:47:27 +00:00
|
|
|
if g.ReschedulePolicy != nil {
|
|
|
|
g.ReschedulePolicy.Canonicalize(*job.Type)
|
2018-01-18 20:49:01 +00:00
|
|
|
}
|
2018-03-01 19:21:32 +00:00
|
|
|
// Merge the migrate strategy from the job
|
|
|
|
if jm, tm := job.Migrate != nil, g.Migrate != nil; jm && tm {
|
|
|
|
jobMigrate := job.Migrate.Copy()
|
|
|
|
jobMigrate.Merge(g.Migrate)
|
|
|
|
g.Migrate = jobMigrate
|
|
|
|
} else if jm {
|
|
|
|
jobMigrate := job.Migrate.Copy()
|
|
|
|
g.Migrate = jobMigrate
|
|
|
|
}
|
|
|
|
|
|
|
|
// Merge with default reschedule policy
|
2019-03-28 15:59:27 +00:00
|
|
|
if g.Migrate == nil && *job.Type == "service" {
|
|
|
|
g.Migrate = &MigrateStrategy{}
|
|
|
|
}
|
|
|
|
if g.Migrate != nil {
|
|
|
|
g.Migrate.Canonicalize()
|
2018-03-01 19:21:32 +00:00
|
|
|
}
|
2018-01-24 00:47:00 +00:00
|
|
|
|
2017-02-13 23:18:17 +00:00
|
|
|
var defaultRestartPolicy *RestartPolicy
|
2017-02-22 20:30:05 +00:00
|
|
|
switch *job.Type {
|
2017-02-13 23:18:17 +00:00
|
|
|
case "service", "system":
|
2020-03-07 02:52:58 +00:00
|
|
|
defaultRestartPolicy = defaultServiceJobRestartPolicy()
|
2017-02-13 23:18:17 +00:00
|
|
|
default:
|
2020-03-07 02:52:58 +00:00
|
|
|
defaultRestartPolicy = defaultBatchJobRestartPolicy()
|
2017-02-13 23:18:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if g.RestartPolicy != nil {
|
|
|
|
defaultRestartPolicy.Merge(g.RestartPolicy)
|
2015-09-09 20:02:39 +00:00
|
|
|
}
|
2017-02-13 23:18:17 +00:00
|
|
|
g.RestartPolicy = defaultRestartPolicy
|
2019-01-11 15:48:12 +00:00
|
|
|
|
2020-03-07 02:52:58 +00:00
|
|
|
for _, t := range g.Tasks {
|
|
|
|
t.Canonicalize(g, job)
|
|
|
|
}
|
|
|
|
|
2019-01-11 15:48:12 +00:00
|
|
|
for _, spread := range g.Spreads {
|
|
|
|
spread.Canonicalize()
|
|
|
|
}
|
2019-01-30 20:20:38 +00:00
|
|
|
for _, a := range g.Affinities {
|
|
|
|
a.Canonicalize()
|
|
|
|
}
|
2019-04-29 17:15:12 +00:00
|
|
|
for _, n := range g.Networks {
|
|
|
|
n.Canonicalize()
|
|
|
|
}
|
2019-07-30 22:40:45 +00:00
|
|
|
for _, s := range g.Services {
|
|
|
|
s.Canonicalize(nil, g, job)
|
|
|
|
}
|
2015-09-09 20:02:39 +00:00
|
|
|
}
|
|
|
|
|
2020-03-07 02:52:58 +00:00
|
|
|
// These needs to be in sync with DefaultServiceJobRestartPolicy in
|
|
|
|
// in nomad/structs/structs.go
|
|
|
|
func defaultServiceJobRestartPolicy() *RestartPolicy {
|
|
|
|
return &RestartPolicy{
|
2022-08-17 16:26:34 +00:00
|
|
|
Delay: pointerOf(15 * time.Second),
|
|
|
|
Attempts: pointerOf(2),
|
|
|
|
Interval: pointerOf(30 * time.Minute),
|
|
|
|
Mode: pointerOf(RestartPolicyModeFail),
|
2020-03-07 02:52:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// These needs to be in sync with DefaultBatchJobRestartPolicy in
|
|
|
|
// in nomad/structs/structs.go
|
|
|
|
func defaultBatchJobRestartPolicy() *RestartPolicy {
|
|
|
|
return &RestartPolicy{
|
2022-08-17 16:26:34 +00:00
|
|
|
Delay: pointerOf(15 * time.Second),
|
|
|
|
Attempts: pointerOf(3),
|
|
|
|
Interval: pointerOf(24 * time.Hour),
|
|
|
|
Mode: pointerOf(RestartPolicyModeFail),
|
2020-03-07 02:52:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-09 20:02:39 +00:00
|
|
|
// Constrain is used to add a constraint to a task group.
|
|
|
|
func (g *TaskGroup) Constrain(c *Constraint) *TaskGroup {
|
|
|
|
g.Constraints = append(g.Constraints, c)
|
|
|
|
return g
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddMeta is used to add a meta k/v pair to a task group
|
|
|
|
func (g *TaskGroup) SetMeta(key, val string) *TaskGroup {
|
|
|
|
if g.Meta == nil {
|
|
|
|
g.Meta = make(map[string]string)
|
|
|
|
}
|
|
|
|
g.Meta[key] = val
|
|
|
|
return g
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddTask is used to add a new task to a task group.
|
|
|
|
func (g *TaskGroup) AddTask(t *Task) *TaskGroup {
|
|
|
|
g.Tasks = append(g.Tasks, t)
|
|
|
|
return g
|
|
|
|
}
|
|
|
|
|
2018-07-16 13:30:58 +00:00
|
|
|
// AddAffinity is used to add a new affinity to a task group.
|
|
|
|
func (g *TaskGroup) AddAffinity(a *Affinity) *TaskGroup {
|
|
|
|
g.Affinities = append(g.Affinities, a)
|
|
|
|
return g
|
|
|
|
}
|
|
|
|
|
2016-09-14 22:43:42 +00:00
|
|
|
// RequireDisk adds a ephemeral disk to the task group
|
|
|
|
func (g *TaskGroup) RequireDisk(disk *EphemeralDisk) *TaskGroup {
|
|
|
|
g.EphemeralDisk = disk
|
2016-08-26 03:10:25 +00:00
|
|
|
return g
|
|
|
|
}
|
|
|
|
|
2018-07-18 15:53:03 +00:00
|
|
|
// AddSpread is used to add a new spread preference to a task group.
|
|
|
|
func (g *TaskGroup) AddSpread(s *Spread) *TaskGroup {
|
|
|
|
g.Spreads = append(g.Spreads, s)
|
|
|
|
return g
|
|
|
|
}
|
|
|
|
|
2016-02-05 07:54:15 +00:00
|
|
|
// LogConfig provides configuration for log rotation
|
|
|
|
type LogConfig struct {
|
2020-10-21 13:58:45 +00:00
|
|
|
MaxFiles *int `mapstructure:"max_files" hcl:"max_files,optional"`
|
|
|
|
MaxFileSizeMB *int `mapstructure:"max_file_size" hcl:"max_file_size,optional"`
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func DefaultLogConfig() *LogConfig {
|
|
|
|
return &LogConfig{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxFiles: pointerOf(10),
|
|
|
|
MaxFileSizeMB: pointerOf(10),
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *LogConfig) Canonicalize() {
|
|
|
|
if l.MaxFiles == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
l.MaxFiles = pointerOf(10)
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
if l.MaxFileSizeMB == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
l.MaxFileSizeMB = pointerOf(10)
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2016-02-05 07:54:15 +00:00
|
|
|
}
|
|
|
|
|
2017-01-26 05:06:16 +00:00
|
|
|
// DispatchPayloadConfig configures how a task gets its input from a job dispatch
|
|
|
|
type DispatchPayloadConfig struct {
|
2020-10-21 13:58:45 +00:00
|
|
|
File string `hcl:"file,optional"`
|
2016-11-23 22:56:50 +00:00
|
|
|
}
|
|
|
|
|
2020-01-09 19:40:26 +00:00
|
|
|
const (
|
2020-10-21 13:58:45 +00:00
|
|
|
TaskLifecycleHookPrestart = "prestart"
|
2020-07-08 18:01:35 +00:00
|
|
|
TaskLifecycleHookPoststart = "poststart"
|
2020-11-12 16:01:42 +00:00
|
|
|
TaskLifecycleHookPoststop = "poststop"
|
2020-01-09 19:40:26 +00:00
|
|
|
)
|
|
|
|
|
2019-10-11 17:10:45 +00:00
|
|
|
type TaskLifecycle struct {
|
2020-10-21 13:58:45 +00:00
|
|
|
Hook string `mapstructure:"hook" hcl:"hook,optional"`
|
|
|
|
Sidecar bool `mapstructure:"sidecar" hcl:"sidecar,optional"`
|
2019-12-12 18:59:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Determine if lifecycle has user-input values
|
|
|
|
func (l *TaskLifecycle) Empty() bool {
|
2020-03-02 19:12:16 +00:00
|
|
|
return l == nil || (l.Hook == "")
|
2019-12-12 18:59:38 +00:00
|
|
|
}
|
|
|
|
|
2015-09-09 20:02:39 +00:00
|
|
|
// Task is a single process in a task group.
|
|
|
|
type Task struct {
|
2020-10-21 13:58:45 +00:00
|
|
|
Name string `hcl:"name,label"`
|
|
|
|
Driver string `hcl:"driver,optional"`
|
|
|
|
User string `hcl:"user,optional"`
|
|
|
|
Lifecycle *TaskLifecycle `hcl:"lifecycle,block"`
|
|
|
|
Config map[string]interface{} `hcl:"config,block"`
|
|
|
|
Constraints []*Constraint `hcl:"constraint,block"`
|
|
|
|
Affinities []*Affinity `hcl:"affinity,block"`
|
|
|
|
Env map[string]string `hcl:"env,block"`
|
|
|
|
Services []*Service `hcl:"service,block"`
|
|
|
|
Resources *Resources `hcl:"resources,block"`
|
|
|
|
RestartPolicy *RestartPolicy `hcl:"restart,block"`
|
|
|
|
Meta map[string]string `hcl:"meta,block"`
|
|
|
|
KillTimeout *time.Duration `mapstructure:"kill_timeout" hcl:"kill_timeout,optional"`
|
|
|
|
LogConfig *LogConfig `mapstructure:"logs" hcl:"logs,block"`
|
|
|
|
Artifacts []*TaskArtifact `hcl:"artifact,block"`
|
|
|
|
Vault *Vault `hcl:"vault,block"`
|
|
|
|
Templates []*Template `hcl:"template,block"`
|
|
|
|
DispatchPayload *DispatchPayloadConfig `hcl:"dispatch_payload,block"`
|
|
|
|
VolumeMounts []*VolumeMount `hcl:"volume_mount,block"`
|
|
|
|
CSIPluginConfig *TaskCSIPluginConfig `mapstructure:"csi_plugin" json:",omitempty" hcl:"csi_plugin,block"`
|
|
|
|
Leader bool `hcl:"leader,optional"`
|
|
|
|
ShutdownDelay time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"`
|
|
|
|
KillSignal string `mapstructure:"kill_signal" hcl:"kill_signal,optional"`
|
|
|
|
Kind string `hcl:"kind,optional"`
|
2020-09-09 22:30:40 +00:00
|
|
|
ScalingPolicies []*ScalingPolicy `hcl:"scaling,block"`
|
2016-03-14 22:46:06 +00:00
|
|
|
}
|
|
|
|
|
2017-02-22 20:30:05 +00:00
|
|
|
func (t *Task) Canonicalize(tg *TaskGroup, job *Job) {
|
2017-11-09 15:57:57 +00:00
|
|
|
if t.Resources == nil {
|
2017-11-10 01:09:37 +00:00
|
|
|
t.Resources = &Resources{}
|
2017-11-09 15:57:57 +00:00
|
|
|
}
|
2017-11-10 01:09:37 +00:00
|
|
|
t.Resources.Canonicalize()
|
2020-05-18 22:03:10 +00:00
|
|
|
|
2017-02-24 20:08:31 +00:00
|
|
|
if t.KillTimeout == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
t.KillTimeout = pointerOf(5 * time.Second)
|
2017-02-24 20:08:31 +00:00
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
if t.LogConfig == nil {
|
|
|
|
t.LogConfig = DefaultLogConfig()
|
|
|
|
} else {
|
|
|
|
t.LogConfig.Canonicalize()
|
|
|
|
}
|
|
|
|
for _, artifact := range t.Artifacts {
|
|
|
|
artifact.Canonicalize()
|
|
|
|
}
|
2017-02-24 20:08:31 +00:00
|
|
|
if t.Vault != nil {
|
|
|
|
t.Vault.Canonicalize()
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
for _, tmpl := range t.Templates {
|
|
|
|
tmpl.Canonicalize()
|
|
|
|
}
|
2017-03-01 23:30:01 +00:00
|
|
|
for _, s := range t.Services {
|
|
|
|
s.Canonicalize(t, tg, job)
|
|
|
|
}
|
2019-01-30 20:20:38 +00:00
|
|
|
for _, a := range t.Affinities {
|
|
|
|
a.Canonicalize()
|
|
|
|
}
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
for _, vm := range t.VolumeMounts {
|
|
|
|
vm.Canonicalize()
|
|
|
|
}
|
2020-01-09 19:41:27 +00:00
|
|
|
if t.Lifecycle.Empty() {
|
|
|
|
t.Lifecycle = nil
|
2019-12-12 18:59:38 +00:00
|
|
|
}
|
2019-10-22 13:20:26 +00:00
|
|
|
if t.CSIPluginConfig != nil {
|
|
|
|
t.CSIPluginConfig.Canonicalize()
|
|
|
|
}
|
2020-03-07 02:52:58 +00:00
|
|
|
if t.RestartPolicy == nil {
|
|
|
|
t.RestartPolicy = tg.RestartPolicy
|
|
|
|
} else {
|
|
|
|
tgrp := &RestartPolicy{}
|
|
|
|
*tgrp = *tg.RestartPolicy
|
|
|
|
tgrp.Merge(t.RestartPolicy)
|
|
|
|
t.RestartPolicy = tgrp
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
|
2016-03-16 03:21:52 +00:00
|
|
|
// TaskArtifact is used to download artifacts before running a task.
|
2016-03-14 22:46:06 +00:00
|
|
|
type TaskArtifact struct {
|
2020-10-21 13:58:45 +00:00
|
|
|
GetterSource *string `mapstructure:"source" hcl:"source,optional"`
|
|
|
|
GetterOptions map[string]string `mapstructure:"options" hcl:"options,block"`
|
2020-11-12 16:25:57 +00:00
|
|
|
GetterHeaders map[string]string `mapstructure:"headers" hcl:"headers,block"`
|
2020-10-21 13:58:45 +00:00
|
|
|
GetterMode *string `mapstructure:"mode" hcl:"mode,optional"`
|
|
|
|
RelativeDest *string `mapstructure:"destination" hcl:"destination,optional"`
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (a *TaskArtifact) Canonicalize() {
|
2017-07-06 03:44:49 +00:00
|
|
|
if a.GetterMode == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
a.GetterMode = pointerOf("any")
|
2017-07-06 03:44:49 +00:00
|
|
|
}
|
|
|
|
if a.GetterSource == nil {
|
|
|
|
// Shouldn't be possible, but we don't want to panic
|
2022-08-17 16:26:34 +00:00
|
|
|
a.GetterSource = pointerOf("")
|
2017-07-06 03:44:49 +00:00
|
|
|
}
|
2020-11-12 16:25:57 +00:00
|
|
|
if len(a.GetterOptions) == 0 {
|
|
|
|
a.GetterOptions = nil
|
|
|
|
}
|
|
|
|
if len(a.GetterHeaders) == 0 {
|
|
|
|
a.GetterHeaders = nil
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
if a.RelativeDest == nil {
|
2017-07-06 03:44:49 +00:00
|
|
|
switch *a.GetterMode {
|
|
|
|
case "file":
|
|
|
|
// File mode should default to local/filename
|
|
|
|
dest := *a.GetterSource
|
|
|
|
dest = path.Base(dest)
|
|
|
|
dest = filepath.Join("local", dest)
|
|
|
|
a.RelativeDest = &dest
|
|
|
|
default:
|
|
|
|
// Default to a directory
|
2022-08-17 16:26:34 +00:00
|
|
|
a.RelativeDest = pointerOf("local/")
|
2017-07-06 03:44:49 +00:00
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2015-09-09 20:02:39 +00:00
|
|
|
}
|
|
|
|
|
2022-01-10 15:19:07 +00:00
|
|
|
// WaitConfig is the Min/Max duration to wait for the Consul cluster to reach a
|
|
|
|
// consistent state before attempting to render Templates.
|
|
|
|
type WaitConfig struct {
|
|
|
|
Min *time.Duration `mapstructure:"min" hcl:"min"`
|
|
|
|
Max *time.Duration `mapstructure:"max" hcl:"max"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (wc *WaitConfig) Copy() *WaitConfig {
|
|
|
|
if wc == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
nwc := new(WaitConfig)
|
|
|
|
*nwc = *wc
|
|
|
|
|
|
|
|
return nwc
|
|
|
|
}
|
|
|
|
|
2022-08-24 15:43:01 +00:00
|
|
|
type ChangeScript struct {
|
|
|
|
Command *string `mapstructure:"command" hcl:"command"`
|
|
|
|
Args []string `mapstructure:"args" hcl:"args,optional"`
|
|
|
|
Timeout *time.Duration `mapstructure:"timeout" hcl:"timeout,optional"`
|
|
|
|
FailOnError *bool `mapstructure:"fail_on_error" hcl:"fail_on_error"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ch *ChangeScript) Canonicalize() {
|
|
|
|
if ch.Command == nil {
|
|
|
|
ch.Command = pointerOf("")
|
|
|
|
}
|
|
|
|
if ch.Args == nil {
|
|
|
|
ch.Args = []string{}
|
|
|
|
}
|
|
|
|
if ch.Timeout == nil {
|
|
|
|
ch.Timeout = pointerOf(5 * time.Second)
|
|
|
|
}
|
|
|
|
if ch.FailOnError == nil {
|
|
|
|
ch.FailOnError = pointerOf(false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-23 22:39:52 +00:00
|
|
|
type Template struct {
|
2022-11-04 17:23:01 +00:00
|
|
|
SourcePath *string `mapstructure:"source" hcl:"source,optional"`
|
|
|
|
DestPath *string `mapstructure:"destination" hcl:"destination,optional"`
|
|
|
|
EmbeddedTmpl *string `mapstructure:"data" hcl:"data,optional"`
|
|
|
|
ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"`
|
|
|
|
ChangeScript *ChangeScript `mapstructure:"change_script" hcl:"change_script,block"`
|
|
|
|
ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"`
|
|
|
|
Splay *time.Duration `mapstructure:"splay" hcl:"splay,optional"`
|
|
|
|
Perms *string `mapstructure:"perms" hcl:"perms,optional"`
|
|
|
|
Uid *int `mapstructure:"uid" hcl:"uid,optional"`
|
|
|
|
Gid *int `mapstructure:"gid" hcl:"gid,optional"`
|
|
|
|
LeftDelim *string `mapstructure:"left_delimiter" hcl:"left_delimiter,optional"`
|
|
|
|
RightDelim *string `mapstructure:"right_delimiter" hcl:"right_delimiter,optional"`
|
|
|
|
Envvars *bool `mapstructure:"env" hcl:"env,optional"`
|
|
|
|
VaultGrace *time.Duration `mapstructure:"vault_grace" hcl:"vault_grace,optional"`
|
|
|
|
Wait *WaitConfig `mapstructure:"wait" hcl:"wait,block"`
|
|
|
|
ErrMissingKey *bool `mapstructure:"error_on_missing_key" hcl:"error_on_missing_key,optional"`
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (tmpl *Template) Canonicalize() {
|
2017-02-24 18:31:05 +00:00
|
|
|
if tmpl.SourcePath == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
tmpl.SourcePath = pointerOf("")
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2017-02-24 18:31:05 +00:00
|
|
|
if tmpl.DestPath == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
tmpl.DestPath = pointerOf("")
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2017-02-24 18:31:05 +00:00
|
|
|
if tmpl.EmbeddedTmpl == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
tmpl.EmbeddedTmpl = pointerOf("")
|
2017-02-24 18:31:05 +00:00
|
|
|
}
|
|
|
|
if tmpl.ChangeMode == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
tmpl.ChangeMode = pointerOf("restart")
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2017-02-24 18:47:50 +00:00
|
|
|
if tmpl.ChangeSignal == nil {
|
|
|
|
if *tmpl.ChangeMode == "signal" {
|
2022-08-17 16:26:34 +00:00
|
|
|
tmpl.ChangeSignal = pointerOf("SIGHUP")
|
2017-02-24 18:47:50 +00:00
|
|
|
} else {
|
2022-08-17 16:26:34 +00:00
|
|
|
tmpl.ChangeSignal = pointerOf("")
|
2017-02-24 18:47:50 +00:00
|
|
|
}
|
|
|
|
} else {
|
2017-02-13 23:18:17 +00:00
|
|
|
sig := *tmpl.ChangeSignal
|
2022-08-17 16:26:34 +00:00
|
|
|
tmpl.ChangeSignal = pointerOf(strings.ToUpper(sig))
|
2017-02-13 23:18:17 +00:00
|
|
|
}
|
2022-08-24 15:43:01 +00:00
|
|
|
if tmpl.ChangeScript != nil {
|
|
|
|
tmpl.ChangeScript.Canonicalize()
|
|
|
|
}
|
2017-02-24 18:31:05 +00:00
|
|
|
if tmpl.Splay == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
tmpl.Splay = pointerOf(5 * time.Second)
|
2017-02-24 18:31:05 +00:00
|
|
|
}
|
|
|
|
if tmpl.Perms == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
tmpl.Perms = pointerOf("0644")
|
2017-02-24 18:31:05 +00:00
|
|
|
}
|
2017-02-21 00:43:28 +00:00
|
|
|
if tmpl.LeftDelim == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
tmpl.LeftDelim = pointerOf("{{")
|
2017-02-21 00:43:28 +00:00
|
|
|
}
|
|
|
|
if tmpl.RightDelim == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
tmpl.RightDelim = pointerOf("}}")
|
2017-02-21 00:43:28 +00:00
|
|
|
}
|
2017-05-13 00:07:54 +00:00
|
|
|
if tmpl.Envvars == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
tmpl.Envvars = pointerOf(false)
|
2017-05-13 00:07:54 +00:00
|
|
|
}
|
2022-11-04 17:23:01 +00:00
|
|
|
if tmpl.ErrMissingKey == nil {
|
|
|
|
tmpl.ErrMissingKey = pointerOf(false)
|
|
|
|
}
|
2020-03-10 21:55:16 +00:00
|
|
|
//COMPAT(0.12) VaultGrace is deprecated and unused as of Vault 0.5
|
|
|
|
if tmpl.VaultGrace == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
tmpl.VaultGrace = pointerOf(time.Duration(0))
|
2020-03-10 21:55:16 +00:00
|
|
|
}
|
2016-09-23 22:39:52 +00:00
|
|
|
}
|
|
|
|
|
2016-08-17 04:32:25 +00:00
|
|
|
type Vault struct {
|
2020-10-21 13:58:45 +00:00
|
|
|
Policies []string `hcl:"policies,optional"`
|
|
|
|
Namespace *string `mapstructure:"namespace" hcl:"namespace,optional"`
|
|
|
|
Env *bool `hcl:"env,optional"`
|
|
|
|
ChangeMode *string `mapstructure:"change_mode" hcl:"change_mode,optional"`
|
|
|
|
ChangeSignal *string `mapstructure:"change_signal" hcl:"change_signal,optional"`
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (v *Vault) Canonicalize() {
|
|
|
|
if v.Env == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
v.Env = pointerOf(true)
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2020-07-17 14:41:45 +00:00
|
|
|
if v.Namespace == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
v.Namespace = pointerOf("")
|
2020-07-17 14:41:45 +00:00
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
if v.ChangeMode == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
v.ChangeMode = pointerOf("restart")
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
if v.ChangeSignal == nil {
|
2022-08-17 16:26:34 +00:00
|
|
|
v.ChangeSignal = pointerOf("SIGHUP")
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2016-08-17 04:32:25 +00:00
|
|
|
}
|
|
|
|
|
2015-09-09 20:02:39 +00:00
|
|
|
// NewTask creates and initializes a new Task.
|
|
|
|
func NewTask(name, driver string) *Task {
|
|
|
|
return &Task{
|
|
|
|
Name: name,
|
|
|
|
Driver: driver,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Configure is used to configure a single k/v pair on
|
|
|
|
// the task.
|
2016-08-22 16:35:25 +00:00
|
|
|
func (t *Task) SetConfig(key string, val interface{}) *Task {
|
2015-09-09 20:02:39 +00:00
|
|
|
if t.Config == nil {
|
2015-11-14 04:51:30 +00:00
|
|
|
t.Config = make(map[string]interface{})
|
2015-09-09 20:02:39 +00:00
|
|
|
}
|
|
|
|
t.Config[key] = val
|
|
|
|
return t
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetMeta is used to add metadata k/v pairs to the task.
|
|
|
|
func (t *Task) SetMeta(key, val string) *Task {
|
|
|
|
if t.Meta == nil {
|
|
|
|
t.Meta = make(map[string]string)
|
|
|
|
}
|
|
|
|
t.Meta[key] = val
|
|
|
|
return t
|
|
|
|
}
|
|
|
|
|
|
|
|
// Require is used to add resource requirements to a task.
|
|
|
|
func (t *Task) Require(r *Resources) *Task {
|
|
|
|
t.Resources = r
|
|
|
|
return t
|
|
|
|
}
|
2015-09-10 00:29:43 +00:00
|
|
|
|
|
|
|
// Constraint adds a new constraints to a single task.
|
|
|
|
func (t *Task) Constrain(c *Constraint) *Task {
|
|
|
|
t.Constraints = append(t.Constraints, c)
|
|
|
|
return t
|
|
|
|
}
|
2015-11-12 23:28:22 +00:00
|
|
|
|
2018-07-16 13:30:58 +00:00
|
|
|
// AddAffinity adds a new affinity to a single task.
|
|
|
|
func (t *Task) AddAffinity(a *Affinity) *Task {
|
|
|
|
t.Affinities = append(t.Affinities, a)
|
|
|
|
return t
|
|
|
|
}
|
|
|
|
|
2016-02-10 21:36:47 +00:00
|
|
|
// SetLogConfig sets a log config to a task
|
|
|
|
func (t *Task) SetLogConfig(l *LogConfig) *Task {
|
|
|
|
t.LogConfig = l
|
|
|
|
return t
|
|
|
|
}
|
|
|
|
|
2015-11-12 23:28:22 +00:00
|
|
|
// TaskState tracks the current state of a task and events that caused state
|
2016-05-15 16:41:34 +00:00
|
|
|
// transitions.
|
2015-11-12 23:28:22 +00:00
|
|
|
type TaskState struct {
|
2017-07-07 06:04:32 +00:00
|
|
|
State string
|
|
|
|
Failed bool
|
|
|
|
Restarts uint64
|
|
|
|
LastRestart time.Time
|
|
|
|
StartedAt time.Time
|
|
|
|
FinishedAt time.Time
|
|
|
|
Events []*TaskEvent
|
2020-12-17 23:21:46 +00:00
|
|
|
|
|
|
|
// Experimental - TaskHandle is based on drivers.TaskHandle and used
|
|
|
|
// by remote task drivers to migrate task handles between allocations.
|
|
|
|
TaskHandle *TaskHandle
|
|
|
|
}
|
|
|
|
|
2021-04-29 22:02:12 +00:00
|
|
|
// Experimental - TaskHandle is based on drivers.TaskHandle and used by remote
|
|
|
|
// task drivers to migrate task handles between allocations.
|
2020-12-17 23:21:46 +00:00
|
|
|
type TaskHandle struct {
|
|
|
|
Version int
|
|
|
|
DriverState []byte
|
2015-11-12 23:28:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
2017-02-10 01:40:13 +00:00
|
|
|
TaskSetup = "Task Setup"
|
2016-10-10 21:49:37 +00:00
|
|
|
TaskSetupFailure = "Setup Failure"
|
2016-03-15 17:53:20 +00:00
|
|
|
TaskDriverFailure = "Driver Failure"
|
2016-12-20 19:51:09 +00:00
|
|
|
TaskDriverMessage = "Driver"
|
2016-03-15 17:53:20 +00:00
|
|
|
TaskReceived = "Received"
|
2016-03-24 17:55:14 +00:00
|
|
|
TaskFailedValidation = "Failed Validation"
|
2016-03-15 17:53:20 +00:00
|
|
|
TaskStarted = "Started"
|
|
|
|
TaskTerminated = "Terminated"
|
2016-07-21 22:49:54 +00:00
|
|
|
TaskKilling = "Killing"
|
2016-03-15 17:53:20 +00:00
|
|
|
TaskKilled = "Killed"
|
|
|
|
TaskRestarting = "Restarting"
|
2016-03-24 22:43:55 +00:00
|
|
|
TaskNotRestarting = "Not Restarting"
|
2016-03-15 17:53:20 +00:00
|
|
|
TaskDownloadingArtifacts = "Downloading Artifacts"
|
|
|
|
TaskArtifactDownloadFailed = "Failed Artifact Download"
|
2017-02-11 01:55:19 +00:00
|
|
|
TaskSiblingFailed = "Sibling Task Failed"
|
2016-10-05 20:41:29 +00:00
|
|
|
TaskSignaling = "Signaling"
|
2016-10-05 22:11:09 +00:00
|
|
|
TaskRestartSignal = "Restart Signaled"
|
2017-02-11 01:55:19 +00:00
|
|
|
TaskLeaderDead = "Leader Task Dead"
|
2017-07-07 06:04:42 +00:00
|
|
|
TaskBuildingTaskDir = "Building Task Directory"
|
2022-03-02 10:47:26 +00:00
|
|
|
TaskClientReconnected = "Reconnected"
|
2015-11-12 23:28:22 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// TaskEvent is an event that effects the state of a task and contains meta-data
|
|
|
|
// appropriate to the events type.
|
|
|
|
type TaskEvent struct {
|
2017-11-03 17:48:55 +00:00
|
|
|
Type string
|
|
|
|
Time int64
|
|
|
|
DisplayMessage string
|
|
|
|
Details map[string]string
|
2020-08-11 16:24:41 +00:00
|
|
|
Message string
|
2017-11-03 15:12:37 +00:00
|
|
|
// DEPRECATION NOTICE: The following fields are all deprecated. see TaskEvent struct in structs.go for details.
|
2016-12-20 19:51:09 +00:00
|
|
|
FailsTask bool
|
|
|
|
RestartReason string
|
|
|
|
SetupError string
|
|
|
|
DriverError string
|
|
|
|
DriverMessage string
|
|
|
|
ExitCode int
|
|
|
|
Signal int
|
|
|
|
KillReason string
|
|
|
|
KillTimeout time.Duration
|
|
|
|
KillError string
|
|
|
|
StartDelay int64
|
|
|
|
DownloadError string
|
|
|
|
ValidationError string
|
|
|
|
DiskLimit int64
|
|
|
|
DiskSize int64
|
|
|
|
FailedSibling string
|
|
|
|
VaultError string
|
|
|
|
TaskSignalReason string
|
|
|
|
TaskSignal string
|
2017-08-08 04:26:04 +00:00
|
|
|
GenericSource string
|
2015-11-12 23:28:22 +00:00
|
|
|
}
|
2019-10-22 13:20:26 +00:00
|
|
|
|
|
|
|
// CSIPluginType is an enum string that encapsulates the valid options for a
|
|
|
|
// CSIPlugin stanza's Type. These modes will allow the plugin to be used in
|
|
|
|
// different ways by the client.
|
|
|
|
type CSIPluginType string
|
|
|
|
|
|
|
|
const (
|
|
|
|
// CSIPluginTypeNode indicates that Nomad should only use the plugin for
|
|
|
|
// performing Node RPCs against the provided plugin.
|
|
|
|
CSIPluginTypeNode CSIPluginType = "node"
|
|
|
|
|
|
|
|
// CSIPluginTypeController indicates that Nomad should only use the plugin for
|
|
|
|
// performing Controller RPCs against the provided plugin.
|
|
|
|
CSIPluginTypeController CSIPluginType = "controller"
|
|
|
|
|
|
|
|
// CSIPluginTypeMonolith indicates that Nomad can use the provided plugin for
|
|
|
|
// both controller and node rpcs.
|
|
|
|
CSIPluginTypeMonolith CSIPluginType = "monolith"
|
|
|
|
)
|
|
|
|
|
|
|
|
// TaskCSIPluginConfig contains the data that is required to setup a task as a
|
|
|
|
// CSI plugin. This will be used by the csi_plugin_supervisor_hook to configure
|
|
|
|
// mounts for the plugin and initiate the connection to the plugin catalog.
|
|
|
|
type TaskCSIPluginConfig struct {
|
|
|
|
// ID is the identifier of the plugin.
|
|
|
|
// Ideally this should be the FQDN of the plugin.
|
2020-10-21 13:58:45 +00:00
|
|
|
ID string `mapstructure:"id" hcl:"id,optional"`
|
2019-10-22 13:20:26 +00:00
|
|
|
|
|
|
|
// CSIPluginType instructs Nomad on how to handle processing a plugin
|
2020-10-21 13:58:45 +00:00
|
|
|
Type CSIPluginType `mapstructure:"type" hcl:"type,optional"`
|
2019-10-22 13:20:26 +00:00
|
|
|
|
2022-08-02 13:42:44 +00:00
|
|
|
// MountDir is the directory (within its container) in which the plugin creates a
|
|
|
|
// socket (called CSISocketName) for communication with Nomad. Default is /csi.
|
2020-10-21 13:58:45 +00:00
|
|
|
MountDir string `mapstructure:"mount_dir" hcl:"mount_dir,optional"`
|
2022-06-14 14:04:16 +00:00
|
|
|
|
2022-08-02 13:42:44 +00:00
|
|
|
// StagePublishBaseDir is the base directory (within its container) in which the plugin
|
|
|
|
// mounts volumes being staged and bind mounts volumes being published.
|
|
|
|
// e.g. staging_target_path = {StagePublishBaseDir}/staging/{volume-id}/{usage-mode}
|
|
|
|
// e.g. target_path = {StagePublishBaseDir}/per-alloc/{alloc-id}/{volume-id}/{usage-mode}
|
|
|
|
// Default is /local/csi.
|
|
|
|
StagePublishBaseDir string `mapstructure:"stage_publish_base_dir" hcl:"stage_publish_base_dir,optional"`
|
|
|
|
|
2022-06-14 14:04:16 +00:00
|
|
|
// HealthTimeout is the time after which the CSI plugin tasks will be killed
|
|
|
|
// if the CSI Plugin is not healthy.
|
|
|
|
HealthTimeout time.Duration `mapstructure:"health_timeout" hcl:"health_timeout,optional"`
|
2019-10-22 13:20:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (t *TaskCSIPluginConfig) Canonicalize() {
|
|
|
|
if t.MountDir == "" {
|
|
|
|
t.MountDir = "/csi"
|
|
|
|
}
|
2022-06-14 14:04:16 +00:00
|
|
|
|
2022-08-02 13:42:44 +00:00
|
|
|
if t.StagePublishBaseDir == "" {
|
|
|
|
t.StagePublishBaseDir = filepath.Join("/local", "csi")
|
|
|
|
}
|
|
|
|
|
2022-06-14 14:04:16 +00:00
|
|
|
if t.HealthTimeout == 0 {
|
|
|
|
t.HealthTimeout = 30 * time.Second
|
|
|
|
}
|
2019-10-22 13:20:26 +00:00
|
|
|
}
|