Merge branch 'master' into f-driver-upgradepath-test
* master: (91 commits) increase log level CHANGELOG: added 0.9 ui changes test: port TestTaskRunner_CheckWatcher_Restart test: port RestartTask from 0.8 test: port SignalFailure test from 0.8 Rename TaskKillRequest/Response to TaskPreKillRequest/Response Fix log comments Rename TaskKillHook to TaskPreKillHook to more closely match usage Fix comment Rename TaskKillHook to TaskPreKillHook to more closely match usage Update CHANGELOG.md tests: deflake TestClientAllocations_GarbageCollect_Remote tests: deflake client TestFS_Logs_TaskPending test Update CHANGELOG.md test: fix flaky garbage collect test drivers: regen proto api: avoid codegen for syncing code review comments revert unintended change ar: return error from hooks if occured ...
This commit is contained in:
commit
753e03f9b8
15
CHANGELOG.md
15
CHANGELOG.md
|
@ -31,16 +31,31 @@ IMPROVEMENTS:
|
|||
* telemetry: All client metrics include a new `node_class` tag [[GH-3882](https://github.com/hashicorp/nomad/issues/3882)]
|
||||
* telemetry: Added new tags with value of child job id and parent job id for
|
||||
parameterized and periodic jobs [[GH-4392](https://github.com/hashicorp/nomad/issues/4392)]
|
||||
* ui: CPU and Memory metrics are plotted over time during a session in line charts on node detail, allocation detail, and task detail pages [[GH-4661](https://github.com/hashicorp/nomad/issues/4661)], [[GH-4718](https://github.com/hashicorp/nomad/issues/4718)], [[GH-4727](https://github.com/hashicorp/nomad/issues/4727)]
|
||||
* ui: Switching namespaces in the UI will now always "reset" back to the jobs list page [[GH-4533](https://github.com/hashicorp/nomad/issues/4533)]
|
||||
* ui: Refactored breadcrumbs and adjusted the breadcrumb paths on each page [[GH-4458](https://github.com/hashicorp/nomad/issues/4458)]
|
||||
* ui: Jobs can be authored, planned, submitted, and edited from the UI [[GH-4600](https://github.com/hashicorp/nomad/issues/4600)]
|
||||
* ui: Added links to Jobs and Clients from the error page template [[GH-4850](https://github.com/hashicorp/nomad/issues/4850)]
|
||||
* ui: Gracefully handle errors from the stats end points [[GH-4833](https://github.com/hashicorp/nomad/issues/4833)]
|
||||
* ui: Stopped jobs can be restarted from the UI [[GH-4615](https://github.com/hashicorp/nomad/issues/4615)]
|
||||
* ui: Canaries can now be promoted from the UI [[GH-4616](https://github.com/hashicorp/nomad/issues/4616)]
|
||||
* ui: Filled out the styleguide [[GH-4468](https://github.com/hashicorp/nomad/issues/4468)]
|
||||
* vendor: Removed library obsoleted by go 1.8 [[GH-4469](https://github.com/hashicorp/nomad/issues/4469)]
|
||||
* acls: Allow support for using globs in namespace definitions [[GH-4982](https://github.com/hashicorp/nomad/pull/4982)]
|
||||
|
||||
BUG FIXES:
|
||||
* core: Fix an issue where artifact checksums containing interpolated variables failed validation [[GH-4810](https://github.com/hashicorp/nomad/pull/4819)]
|
||||
* core: Fix an issue where job summaries for parent dispatch/periodic jobs were not being computed correctly [[GH-5205](https://github.com/hashicorp/nomad/pull/5205)]
|
||||
* client: Fix an issue reloading the client config [[GH-4730](https://github.com/hashicorp/nomad/issues/4730)]
|
||||
* client: Fix an issue where driver attributes are not updated in node API responses if they change after after startup [[GH-4984](https://github.com/hashicorp/nomad/pull/4984)]
|
||||
* driver/docker: Fix a path traversal issue where mounting paths outside alloc dir might be possible despite `docker.volumes.enabled` set to false [[GH-4983](https://github.com/hashicorp/nomad/pull/4983)]
|
||||
* driver/raw_exec: Fix an issue where tasks that used an interpolated command in driver configuration would not start [[GH-4813](https://github.com/hashicorp/nomad/pull/4813)]
|
||||
* server/vault: Fixed bug in Vault token renewal that could panic on a malformed Vault response [[GH-4904](https://github.com/hashicorp/nomad/issues/4904)], [[GH-4937](https://github.com/hashicorp/nomad/pull/4937)]
|
||||
* ui: Fixed an issue where distribution bar corners weren't rounded when there was only one or two slices in the chart [[GH-4507](https://github.com/hashicorp/nomad/issues/4507)]
|
||||
* ui: Fixed an issue where dispatched jobs would get the wrong template type which could cause runtime errors [[GH-4852](https://github.com/hashicorp/nomad/issues/4852)]
|
||||
* ui: Added an empty state for the tasks list on the allocation detail page, for when an alloc has no tasks [[GH-4860](https://github.com/hashicorp/nomad/issues/4860)]
|
||||
* ui: Fixed an issue where the task group breadcrumb didn't always include the namesapce query param [[GH-4801](https://github.com/hashicorp/nomad/issues/4801)]
|
||||
* ui: Correctly labeled certain classes of unknown errors as 404 errors [[GH-4841](https://github.com/hashicorp/nomad/issues/4841)]
|
||||
|
||||
## 0.8.7 (January 14, 2019)
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ GO_TEST_CMD = $(if $(shell which gotestsum),gotestsum --,go test)
|
|||
|
||||
default: help
|
||||
|
||||
ifeq (,$(findstring $(THIS_OS),Darwin Linux FreeBSD))
|
||||
ifeq (,$(findstring $(THIS_OS),Darwin Linux FreeBSD Windows))
|
||||
$(error Building Nomad is currently only supported on Darwin and Linux.)
|
||||
endif
|
||||
|
||||
|
|
|
@ -12,6 +12,20 @@ var (
|
|||
NodeDownErr = fmt.Errorf("node down")
|
||||
)
|
||||
|
||||
const (
|
||||
AllocDesiredStatusRun = "run" // Allocation should run
|
||||
AllocDesiredStatusStop = "stop" // Allocation should stop
|
||||
AllocDesiredStatusEvict = "evict" // Allocation should stop, and was evicted
|
||||
)
|
||||
|
||||
const (
|
||||
AllocClientStatusPending = "pending"
|
||||
AllocClientStatusRunning = "running"
|
||||
AllocClientStatusComplete = "complete"
|
||||
AllocClientStatusFailed = "failed"
|
||||
AllocClientStatusLost = "lost"
|
||||
)
|
||||
|
||||
// Allocations is used to query the alloc-related endpoints.
|
||||
type Allocations struct {
|
||||
client *Client
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -35,9 +34,9 @@ func TestAllocations_List(t *testing.T) {
|
|||
return
|
||||
|
||||
//job := &Job{
|
||||
//ID: helper.StringToPtr("job1"),
|
||||
//Name: helper.StringToPtr("Job #1"),
|
||||
//Type: helper.StringToPtr(JobTypeService),
|
||||
//ID: stringToPtr("job1"),
|
||||
//Name: stringToPtr("Job #1"),
|
||||
//Type: stringToPtr(JobTypeService),
|
||||
//}
|
||||
//eval, _, err := c.Jobs().Register(job, nil)
|
||||
//if err != nil {
|
||||
|
@ -82,9 +81,9 @@ func TestAllocations_PrefixList(t *testing.T) {
|
|||
return
|
||||
|
||||
//job := &Job{
|
||||
//ID: helper.StringToPtr("job1"),
|
||||
//Name: helper.StringToPtr("Job #1"),
|
||||
//Type: helper.StringToPtr(JobTypeService),
|
||||
//ID: stringToPtr("job1"),
|
||||
//Name: stringToPtr("Job #1"),
|
||||
//Type: stringToPtr(JobTypeService),
|
||||
//}
|
||||
|
||||
//eval, _, err := c.Jobs().Register(job, nil)
|
||||
|
@ -130,13 +129,13 @@ func TestAllocations_RescheduleInfo(t *testing.T) {
|
|||
t.Parallel()
|
||||
// Create a job, task group and alloc
|
||||
job := &Job{
|
||||
Name: helper.StringToPtr("foo"),
|
||||
Namespace: helper.StringToPtr(DefaultNamespace),
|
||||
ID: helper.StringToPtr("bar"),
|
||||
ParentID: helper.StringToPtr("lol"),
|
||||
Name: stringToPtr("foo"),
|
||||
Namespace: stringToPtr(DefaultNamespace),
|
||||
ID: stringToPtr("bar"),
|
||||
ParentID: stringToPtr("lol"),
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("bar"),
|
||||
Name: stringToPtr("bar"),
|
||||
Tasks: []*Task{
|
||||
{
|
||||
Name: "task1",
|
||||
|
@ -176,8 +175,8 @@ func TestAllocations_RescheduleInfo(t *testing.T) {
|
|||
{
|
||||
desc: "no reschedule events",
|
||||
reschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(3),
|
||||
Interval: helper.TimeToPtr(15 * time.Minute),
|
||||
Attempts: intToPtr(3),
|
||||
Interval: timeToPtr(15 * time.Minute),
|
||||
},
|
||||
expAttempted: 0,
|
||||
expTotal: 3,
|
||||
|
@ -185,8 +184,8 @@ func TestAllocations_RescheduleInfo(t *testing.T) {
|
|||
{
|
||||
desc: "all reschedule events within interval",
|
||||
reschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(3),
|
||||
Interval: helper.TimeToPtr(15 * time.Minute),
|
||||
Attempts: intToPtr(3),
|
||||
Interval: timeToPtr(15 * time.Minute),
|
||||
},
|
||||
time: time.Now(),
|
||||
rescheduleTracker: &RescheduleTracker{
|
||||
|
@ -202,8 +201,8 @@ func TestAllocations_RescheduleInfo(t *testing.T) {
|
|||
{
|
||||
desc: "some reschedule events outside interval",
|
||||
reschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(3),
|
||||
Interval: helper.TimeToPtr(15 * time.Minute),
|
||||
Attempts: intToPtr(3),
|
||||
Interval: timeToPtr(15 * time.Minute),
|
||||
},
|
||||
time: time.Now(),
|
||||
rescheduleTracker: &RescheduleTracker{
|
||||
|
@ -242,7 +241,7 @@ func TestAllocations_RescheduleInfo(t *testing.T) {
|
|||
|
||||
func TestAllocations_ShouldMigrate(t *testing.T) {
|
||||
t.Parallel()
|
||||
require.True(t, DesiredTransition{Migrate: helper.BoolToPtr(true)}.ShouldMigrate())
|
||||
require.True(t, DesiredTransition{Migrate: boolToPtr(true)}.ShouldMigrate())
|
||||
require.False(t, DesiredTransition{}.ShouldMigrate())
|
||||
require.False(t, DesiredTransition{Migrate: helper.BoolToPtr(false)}.ShouldMigrate())
|
||||
require.False(t, DesiredTransition{Migrate: boolToPtr(false)}.ShouldMigrate())
|
||||
}
|
||||
|
|
|
@ -3,8 +3,6 @@ package api
|
|||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
)
|
||||
|
||||
func TestCompose(t *testing.T) {
|
||||
|
@ -15,13 +13,13 @@ func TestCompose(t *testing.T) {
|
|||
SetMeta("foo", "bar").
|
||||
Constrain(NewConstraint("kernel.name", "=", "linux")).
|
||||
Require(&Resources{
|
||||
CPU: helper.IntToPtr(1250),
|
||||
MemoryMB: helper.IntToPtr(1024),
|
||||
DiskMB: helper.IntToPtr(2048),
|
||||
CPU: intToPtr(1250),
|
||||
MemoryMB: intToPtr(1024),
|
||||
DiskMB: intToPtr(2048),
|
||||
Networks: []*NetworkResource{
|
||||
{
|
||||
CIDR: "0.0.0.0/0",
|
||||
MBits: helper.IntToPtr(100),
|
||||
MBits: intToPtr(100),
|
||||
ReservedPorts: []Port{{"", 80}, {"", 443}},
|
||||
},
|
||||
},
|
||||
|
@ -47,11 +45,11 @@ func TestCompose(t *testing.T) {
|
|||
|
||||
// Check that the composed result looks correct
|
||||
expect := &Job{
|
||||
Region: helper.StringToPtr("region1"),
|
||||
ID: helper.StringToPtr("job1"),
|
||||
Name: helper.StringToPtr("myjob"),
|
||||
Type: helper.StringToPtr(JobTypeService),
|
||||
Priority: helper.IntToPtr(2),
|
||||
Region: stringToPtr("region1"),
|
||||
ID: stringToPtr("job1"),
|
||||
Name: stringToPtr("myjob"),
|
||||
Type: stringToPtr(JobTypeService),
|
||||
Priority: intToPtr(2),
|
||||
Datacenters: []string{
|
||||
"dc1",
|
||||
},
|
||||
|
@ -67,8 +65,8 @@ func TestCompose(t *testing.T) {
|
|||
},
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("grp1"),
|
||||
Count: helper.IntToPtr(2),
|
||||
Name: stringToPtr("grp1"),
|
||||
Count: intToPtr(2),
|
||||
Constraints: []*Constraint{
|
||||
{
|
||||
LTarget: "kernel.name",
|
||||
|
@ -87,7 +85,7 @@ func TestCompose(t *testing.T) {
|
|||
Spreads: []*Spread{
|
||||
{
|
||||
Attribute: "${node.datacenter}",
|
||||
Weight: helper.IntToPtr(30),
|
||||
Weight: intToPtr(30),
|
||||
SpreadTarget: []*SpreadTarget{
|
||||
{
|
||||
Value: "dc1",
|
||||
|
@ -105,13 +103,13 @@ func TestCompose(t *testing.T) {
|
|||
Name: "task1",
|
||||
Driver: "exec",
|
||||
Resources: &Resources{
|
||||
CPU: helper.IntToPtr(1250),
|
||||
MemoryMB: helper.IntToPtr(1024),
|
||||
DiskMB: helper.IntToPtr(2048),
|
||||
CPU: intToPtr(1250),
|
||||
MemoryMB: intToPtr(1024),
|
||||
DiskMB: intToPtr(2048),
|
||||
Networks: []*NetworkResource{
|
||||
{
|
||||
CIDR: "0.0.0.0/0",
|
||||
MBits: helper.IntToPtr(100),
|
||||
MBits: intToPtr(100),
|
||||
ReservedPorts: []Port{
|
||||
{"", 80},
|
||||
{"", 443},
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"time"
|
||||
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/kr/pretty"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -62,13 +61,13 @@ func TestFS_Logs(t *testing.T) {
|
|||
}
|
||||
|
||||
job := &Job{
|
||||
ID: helper.StringToPtr("TestFS_Logs"),
|
||||
Region: helper.StringToPtr("global"),
|
||||
ID: stringToPtr("TestFS_Logs"),
|
||||
Region: stringToPtr("global"),
|
||||
Datacenters: []string{"dc1"},
|
||||
Type: helper.StringToPtr("batch"),
|
||||
Type: stringToPtr("batch"),
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("TestFS_LogsGroup"),
|
||||
Name: stringToPtr("TestFS_LogsGroup"),
|
||||
Tasks: []*Task{
|
||||
{
|
||||
Name: "logger",
|
||||
|
|
115
api/jobs.go
115
api/jobs.go
|
@ -8,8 +8,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/gorhill/cronexpr"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -19,6 +17,9 @@ const (
|
|||
// JobTypeBatch indicates a short-lived process
|
||||
JobTypeBatch = "batch"
|
||||
|
||||
// JobTypeSystem indicates a system process that should run on all clients
|
||||
JobTypeSystem = "system"
|
||||
|
||||
// PeriodicSpecCron is used for a cron spec.
|
||||
PeriodicSpecCron = "cron"
|
||||
|
||||
|
@ -373,14 +374,14 @@ type UpdateStrategy struct {
|
|||
// jobs with the old policy or for populating field defaults.
|
||||
func DefaultUpdateStrategy() *UpdateStrategy {
|
||||
return &UpdateStrategy{
|
||||
Stagger: helper.TimeToPtr(30 * time.Second),
|
||||
MaxParallel: helper.IntToPtr(1),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
|
||||
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
|
||||
ProgressDeadline: helper.TimeToPtr(10 * time.Minute),
|
||||
AutoRevert: helper.BoolToPtr(false),
|
||||
Canary: helper.IntToPtr(0),
|
||||
Stagger: timeToPtr(30 * time.Second),
|
||||
MaxParallel: intToPtr(1),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(10 * time.Second),
|
||||
HealthyDeadline: timeToPtr(5 * time.Minute),
|
||||
ProgressDeadline: timeToPtr(10 * time.Minute),
|
||||
AutoRevert: boolToPtr(false),
|
||||
Canary: intToPtr(0),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -392,35 +393,35 @@ func (u *UpdateStrategy) Copy() *UpdateStrategy {
|
|||
copy := new(UpdateStrategy)
|
||||
|
||||
if u.Stagger != nil {
|
||||
copy.Stagger = helper.TimeToPtr(*u.Stagger)
|
||||
copy.Stagger = timeToPtr(*u.Stagger)
|
||||
}
|
||||
|
||||
if u.MaxParallel != nil {
|
||||
copy.MaxParallel = helper.IntToPtr(*u.MaxParallel)
|
||||
copy.MaxParallel = intToPtr(*u.MaxParallel)
|
||||
}
|
||||
|
||||
if u.HealthCheck != nil {
|
||||
copy.HealthCheck = helper.StringToPtr(*u.HealthCheck)
|
||||
copy.HealthCheck = stringToPtr(*u.HealthCheck)
|
||||
}
|
||||
|
||||
if u.MinHealthyTime != nil {
|
||||
copy.MinHealthyTime = helper.TimeToPtr(*u.MinHealthyTime)
|
||||
copy.MinHealthyTime = timeToPtr(*u.MinHealthyTime)
|
||||
}
|
||||
|
||||
if u.HealthyDeadline != nil {
|
||||
copy.HealthyDeadline = helper.TimeToPtr(*u.HealthyDeadline)
|
||||
copy.HealthyDeadline = timeToPtr(*u.HealthyDeadline)
|
||||
}
|
||||
|
||||
if u.ProgressDeadline != nil {
|
||||
copy.ProgressDeadline = helper.TimeToPtr(*u.ProgressDeadline)
|
||||
copy.ProgressDeadline = timeToPtr(*u.ProgressDeadline)
|
||||
}
|
||||
|
||||
if u.AutoRevert != nil {
|
||||
copy.AutoRevert = helper.BoolToPtr(*u.AutoRevert)
|
||||
copy.AutoRevert = boolToPtr(*u.AutoRevert)
|
||||
}
|
||||
|
||||
if u.Canary != nil {
|
||||
copy.Canary = helper.IntToPtr(*u.Canary)
|
||||
copy.Canary = intToPtr(*u.Canary)
|
||||
}
|
||||
|
||||
return copy
|
||||
|
@ -432,35 +433,35 @@ func (u *UpdateStrategy) Merge(o *UpdateStrategy) {
|
|||
}
|
||||
|
||||
if o.Stagger != nil {
|
||||
u.Stagger = helper.TimeToPtr(*o.Stagger)
|
||||
u.Stagger = timeToPtr(*o.Stagger)
|
||||
}
|
||||
|
||||
if o.MaxParallel != nil {
|
||||
u.MaxParallel = helper.IntToPtr(*o.MaxParallel)
|
||||
u.MaxParallel = intToPtr(*o.MaxParallel)
|
||||
}
|
||||
|
||||
if o.HealthCheck != nil {
|
||||
u.HealthCheck = helper.StringToPtr(*o.HealthCheck)
|
||||
u.HealthCheck = stringToPtr(*o.HealthCheck)
|
||||
}
|
||||
|
||||
if o.MinHealthyTime != nil {
|
||||
u.MinHealthyTime = helper.TimeToPtr(*o.MinHealthyTime)
|
||||
u.MinHealthyTime = timeToPtr(*o.MinHealthyTime)
|
||||
}
|
||||
|
||||
if o.HealthyDeadline != nil {
|
||||
u.HealthyDeadline = helper.TimeToPtr(*o.HealthyDeadline)
|
||||
u.HealthyDeadline = timeToPtr(*o.HealthyDeadline)
|
||||
}
|
||||
|
||||
if o.ProgressDeadline != nil {
|
||||
u.ProgressDeadline = helper.TimeToPtr(*o.ProgressDeadline)
|
||||
u.ProgressDeadline = timeToPtr(*o.ProgressDeadline)
|
||||
}
|
||||
|
||||
if o.AutoRevert != nil {
|
||||
u.AutoRevert = helper.BoolToPtr(*o.AutoRevert)
|
||||
u.AutoRevert = boolToPtr(*o.AutoRevert)
|
||||
}
|
||||
|
||||
if o.Canary != nil {
|
||||
u.Canary = helper.IntToPtr(*o.Canary)
|
||||
u.Canary = intToPtr(*o.Canary)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -552,19 +553,19 @@ type PeriodicConfig struct {
|
|||
|
||||
func (p *PeriodicConfig) Canonicalize() {
|
||||
if p.Enabled == nil {
|
||||
p.Enabled = helper.BoolToPtr(true)
|
||||
p.Enabled = boolToPtr(true)
|
||||
}
|
||||
if p.Spec == nil {
|
||||
p.Spec = helper.StringToPtr("")
|
||||
p.Spec = stringToPtr("")
|
||||
}
|
||||
if p.SpecType == nil {
|
||||
p.SpecType = helper.StringToPtr(PeriodicSpecCron)
|
||||
p.SpecType = stringToPtr(PeriodicSpecCron)
|
||||
}
|
||||
if p.ProhibitOverlap == nil {
|
||||
p.ProhibitOverlap = helper.BoolToPtr(false)
|
||||
p.ProhibitOverlap = boolToPtr(false)
|
||||
}
|
||||
if p.TimeZone == nil || *p.TimeZone == "" {
|
||||
p.TimeZone = helper.StringToPtr("UTC")
|
||||
p.TimeZone = stringToPtr("UTC")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -575,13 +576,27 @@ func (p *PeriodicConfig) Canonicalize() {
|
|||
func (p *PeriodicConfig) Next(fromTime time.Time) (time.Time, error) {
|
||||
if *p.SpecType == PeriodicSpecCron {
|
||||
if e, err := cronexpr.Parse(*p.Spec); err == nil {
|
||||
return structs.CronParseNext(e, fromTime, *p.Spec)
|
||||
return cronParseNext(e, fromTime, *p.Spec)
|
||||
}
|
||||
}
|
||||
|
||||
return time.Time{}, nil
|
||||
}
|
||||
|
||||
// cronParseNext is a helper that parses the next time for the given expression
|
||||
// but captures any panic that may occur in the underlying library.
|
||||
// --- THIS FUNCTION IS REPLICATED IN nomad/structs/structs.go
|
||||
// and should be kept in sync.
|
||||
func cronParseNext(e *cronexpr.Expression, fromTime time.Time, spec string) (t time.Time, err error) {
|
||||
defer func() {
|
||||
if recover() != nil {
|
||||
t = time.Time{}
|
||||
err = fmt.Errorf("failed parsing cron expression: %q", spec)
|
||||
}
|
||||
}()
|
||||
|
||||
return e.Next(fromTime), nil
|
||||
}
|
||||
func (p *PeriodicConfig) GetLocation() (*time.Location, error) {
|
||||
if p.TimeZone == nil || *p.TimeZone == "" {
|
||||
return time.UTC, nil
|
||||
|
@ -644,58 +659,58 @@ func (j *Job) IsParameterized() bool {
|
|||
|
||||
func (j *Job) Canonicalize() {
|
||||
if j.ID == nil {
|
||||
j.ID = helper.StringToPtr("")
|
||||
j.ID = stringToPtr("")
|
||||
}
|
||||
if j.Name == nil {
|
||||
j.Name = helper.StringToPtr(*j.ID)
|
||||
j.Name = stringToPtr(*j.ID)
|
||||
}
|
||||
if j.ParentID == nil {
|
||||
j.ParentID = helper.StringToPtr("")
|
||||
j.ParentID = stringToPtr("")
|
||||
}
|
||||
if j.Namespace == nil {
|
||||
j.Namespace = helper.StringToPtr(DefaultNamespace)
|
||||
j.Namespace = stringToPtr(DefaultNamespace)
|
||||
}
|
||||
if j.Priority == nil {
|
||||
j.Priority = helper.IntToPtr(50)
|
||||
j.Priority = intToPtr(50)
|
||||
}
|
||||
if j.Stop == nil {
|
||||
j.Stop = helper.BoolToPtr(false)
|
||||
j.Stop = boolToPtr(false)
|
||||
}
|
||||
if j.Region == nil {
|
||||
j.Region = helper.StringToPtr("global")
|
||||
j.Region = stringToPtr("global")
|
||||
}
|
||||
if j.Namespace == nil {
|
||||
j.Namespace = helper.StringToPtr("default")
|
||||
j.Namespace = stringToPtr("default")
|
||||
}
|
||||
if j.Type == nil {
|
||||
j.Type = helper.StringToPtr("service")
|
||||
j.Type = stringToPtr("service")
|
||||
}
|
||||
if j.AllAtOnce == nil {
|
||||
j.AllAtOnce = helper.BoolToPtr(false)
|
||||
j.AllAtOnce = boolToPtr(false)
|
||||
}
|
||||
if j.VaultToken == nil {
|
||||
j.VaultToken = helper.StringToPtr("")
|
||||
j.VaultToken = stringToPtr("")
|
||||
}
|
||||
if j.Status == nil {
|
||||
j.Status = helper.StringToPtr("")
|
||||
j.Status = stringToPtr("")
|
||||
}
|
||||
if j.StatusDescription == nil {
|
||||
j.StatusDescription = helper.StringToPtr("")
|
||||
j.StatusDescription = stringToPtr("")
|
||||
}
|
||||
if j.Stable == nil {
|
||||
j.Stable = helper.BoolToPtr(false)
|
||||
j.Stable = boolToPtr(false)
|
||||
}
|
||||
if j.Version == nil {
|
||||
j.Version = helper.Uint64ToPtr(0)
|
||||
j.Version = uint64ToPtr(0)
|
||||
}
|
||||
if j.CreateIndex == nil {
|
||||
j.CreateIndex = helper.Uint64ToPtr(0)
|
||||
j.CreateIndex = uint64ToPtr(0)
|
||||
}
|
||||
if j.ModifyIndex == nil {
|
||||
j.ModifyIndex = helper.Uint64ToPtr(0)
|
||||
j.ModifyIndex = uint64ToPtr(0)
|
||||
}
|
||||
if j.JobModifyIndex == nil {
|
||||
j.JobModifyIndex = helper.Uint64ToPtr(0)
|
||||
j.JobModifyIndex = uint64ToPtr(0)
|
||||
}
|
||||
if j.Periodic != nil {
|
||||
j.Periodic.Canonicalize()
|
||||
|
|
589
api/jobs_test.go
589
api/jobs_test.go
|
@ -7,7 +7,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/kr/pretty"
|
||||
|
@ -131,50 +130,50 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
},
|
||||
},
|
||||
expected: &Job{
|
||||
ID: helper.StringToPtr(""),
|
||||
Name: helper.StringToPtr(""),
|
||||
Region: helper.StringToPtr("global"),
|
||||
Namespace: helper.StringToPtr(DefaultNamespace),
|
||||
Type: helper.StringToPtr("service"),
|
||||
ParentID: helper.StringToPtr(""),
|
||||
Priority: helper.IntToPtr(50),
|
||||
AllAtOnce: helper.BoolToPtr(false),
|
||||
VaultToken: helper.StringToPtr(""),
|
||||
Status: helper.StringToPtr(""),
|
||||
StatusDescription: helper.StringToPtr(""),
|
||||
Stop: helper.BoolToPtr(false),
|
||||
Stable: helper.BoolToPtr(false),
|
||||
Version: helper.Uint64ToPtr(0),
|
||||
CreateIndex: helper.Uint64ToPtr(0),
|
||||
ModifyIndex: helper.Uint64ToPtr(0),
|
||||
JobModifyIndex: helper.Uint64ToPtr(0),
|
||||
ID: stringToPtr(""),
|
||||
Name: stringToPtr(""),
|
||||
Region: stringToPtr("global"),
|
||||
Namespace: stringToPtr(DefaultNamespace),
|
||||
Type: stringToPtr("service"),
|
||||
ParentID: stringToPtr(""),
|
||||
Priority: intToPtr(50),
|
||||
AllAtOnce: boolToPtr(false),
|
||||
VaultToken: stringToPtr(""),
|
||||
Status: stringToPtr(""),
|
||||
StatusDescription: stringToPtr(""),
|
||||
Stop: boolToPtr(false),
|
||||
Stable: boolToPtr(false),
|
||||
Version: uint64ToPtr(0),
|
||||
CreateIndex: uint64ToPtr(0),
|
||||
ModifyIndex: uint64ToPtr(0),
|
||||
JobModifyIndex: uint64ToPtr(0),
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr(""),
|
||||
Count: helper.IntToPtr(1),
|
||||
Name: stringToPtr(""),
|
||||
Count: intToPtr(1),
|
||||
EphemeralDisk: &EphemeralDisk{
|
||||
Sticky: helper.BoolToPtr(false),
|
||||
Migrate: helper.BoolToPtr(false),
|
||||
SizeMB: helper.IntToPtr(300),
|
||||
Sticky: boolToPtr(false),
|
||||
Migrate: boolToPtr(false),
|
||||
SizeMB: intToPtr(300),
|
||||
},
|
||||
RestartPolicy: &RestartPolicy{
|
||||
Delay: helper.TimeToPtr(15 * time.Second),
|
||||
Attempts: helper.IntToPtr(2),
|
||||
Interval: helper.TimeToPtr(30 * time.Minute),
|
||||
Mode: helper.StringToPtr("fail"),
|
||||
Delay: timeToPtr(15 * time.Second),
|
||||
Attempts: intToPtr(2),
|
||||
Interval: timeToPtr(30 * time.Minute),
|
||||
Mode: stringToPtr("fail"),
|
||||
},
|
||||
ReschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(0),
|
||||
Interval: helper.TimeToPtr(0),
|
||||
DelayFunction: helper.StringToPtr("exponential"),
|
||||
Delay: helper.TimeToPtr(30 * time.Second),
|
||||
MaxDelay: helper.TimeToPtr(1 * time.Hour),
|
||||
Unlimited: helper.BoolToPtr(true),
|
||||
Attempts: intToPtr(0),
|
||||
Interval: timeToPtr(0),
|
||||
DelayFunction: stringToPtr("exponential"),
|
||||
Delay: timeToPtr(30 * time.Second),
|
||||
MaxDelay: timeToPtr(1 * time.Hour),
|
||||
Unlimited: boolToPtr(true),
|
||||
},
|
||||
Migrate: DefaultMigrateStrategy(),
|
||||
Tasks: []*Task{
|
||||
{
|
||||
KillTimeout: helper.TimeToPtr(5 * time.Second),
|
||||
KillTimeout: timeToPtr(5 * time.Second),
|
||||
LogConfig: DefaultLogConfig(),
|
||||
Resources: DefaultResources(),
|
||||
},
|
||||
|
@ -186,13 +185,13 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
{
|
||||
name: "partial",
|
||||
input: &Job{
|
||||
Name: helper.StringToPtr("foo"),
|
||||
Namespace: helper.StringToPtr("bar"),
|
||||
ID: helper.StringToPtr("bar"),
|
||||
ParentID: helper.StringToPtr("lol"),
|
||||
Name: stringToPtr("foo"),
|
||||
Namespace: stringToPtr("bar"),
|
||||
ID: stringToPtr("bar"),
|
||||
ParentID: stringToPtr("lol"),
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("bar"),
|
||||
Name: stringToPtr("bar"),
|
||||
Tasks: []*Task{
|
||||
{
|
||||
Name: "task1",
|
||||
|
@ -202,45 +201,45 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
},
|
||||
},
|
||||
expected: &Job{
|
||||
Namespace: helper.StringToPtr("bar"),
|
||||
ID: helper.StringToPtr("bar"),
|
||||
Name: helper.StringToPtr("foo"),
|
||||
Region: helper.StringToPtr("global"),
|
||||
Type: helper.StringToPtr("service"),
|
||||
ParentID: helper.StringToPtr("lol"),
|
||||
Priority: helper.IntToPtr(50),
|
||||
AllAtOnce: helper.BoolToPtr(false),
|
||||
VaultToken: helper.StringToPtr(""),
|
||||
Stop: helper.BoolToPtr(false),
|
||||
Stable: helper.BoolToPtr(false),
|
||||
Version: helper.Uint64ToPtr(0),
|
||||
Status: helper.StringToPtr(""),
|
||||
StatusDescription: helper.StringToPtr(""),
|
||||
CreateIndex: helper.Uint64ToPtr(0),
|
||||
ModifyIndex: helper.Uint64ToPtr(0),
|
||||
JobModifyIndex: helper.Uint64ToPtr(0),
|
||||
Namespace: stringToPtr("bar"),
|
||||
ID: stringToPtr("bar"),
|
||||
Name: stringToPtr("foo"),
|
||||
Region: stringToPtr("global"),
|
||||
Type: stringToPtr("service"),
|
||||
ParentID: stringToPtr("lol"),
|
||||
Priority: intToPtr(50),
|
||||
AllAtOnce: boolToPtr(false),
|
||||
VaultToken: stringToPtr(""),
|
||||
Stop: boolToPtr(false),
|
||||
Stable: boolToPtr(false),
|
||||
Version: uint64ToPtr(0),
|
||||
Status: stringToPtr(""),
|
||||
StatusDescription: stringToPtr(""),
|
||||
CreateIndex: uint64ToPtr(0),
|
||||
ModifyIndex: uint64ToPtr(0),
|
||||
JobModifyIndex: uint64ToPtr(0),
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("bar"),
|
||||
Count: helper.IntToPtr(1),
|
||||
Name: stringToPtr("bar"),
|
||||
Count: intToPtr(1),
|
||||
EphemeralDisk: &EphemeralDisk{
|
||||
Sticky: helper.BoolToPtr(false),
|
||||
Migrate: helper.BoolToPtr(false),
|
||||
SizeMB: helper.IntToPtr(300),
|
||||
Sticky: boolToPtr(false),
|
||||
Migrate: boolToPtr(false),
|
||||
SizeMB: intToPtr(300),
|
||||
},
|
||||
RestartPolicy: &RestartPolicy{
|
||||
Delay: helper.TimeToPtr(15 * time.Second),
|
||||
Attempts: helper.IntToPtr(2),
|
||||
Interval: helper.TimeToPtr(30 * time.Minute),
|
||||
Mode: helper.StringToPtr("fail"),
|
||||
Delay: timeToPtr(15 * time.Second),
|
||||
Attempts: intToPtr(2),
|
||||
Interval: timeToPtr(30 * time.Minute),
|
||||
Mode: stringToPtr("fail"),
|
||||
},
|
||||
ReschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(0),
|
||||
Interval: helper.TimeToPtr(0),
|
||||
DelayFunction: helper.StringToPtr("exponential"),
|
||||
Delay: helper.TimeToPtr(30 * time.Second),
|
||||
MaxDelay: helper.TimeToPtr(1 * time.Hour),
|
||||
Unlimited: helper.BoolToPtr(true),
|
||||
Attempts: intToPtr(0),
|
||||
Interval: timeToPtr(0),
|
||||
DelayFunction: stringToPtr("exponential"),
|
||||
Delay: timeToPtr(30 * time.Second),
|
||||
MaxDelay: timeToPtr(1 * time.Hour),
|
||||
Unlimited: boolToPtr(true),
|
||||
},
|
||||
Migrate: DefaultMigrateStrategy(),
|
||||
Tasks: []*Task{
|
||||
|
@ -248,7 +247,7 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
Name: "task1",
|
||||
LogConfig: DefaultLogConfig(),
|
||||
Resources: DefaultResources(),
|
||||
KillTimeout: helper.TimeToPtr(5 * time.Second),
|
||||
KillTimeout: timeToPtr(5 * time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -258,25 +257,25 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
{
|
||||
name: "example_template",
|
||||
input: &Job{
|
||||
ID: helper.StringToPtr("example_template"),
|
||||
Name: helper.StringToPtr("example_template"),
|
||||
ID: stringToPtr("example_template"),
|
||||
Name: stringToPtr("example_template"),
|
||||
Datacenters: []string{"dc1"},
|
||||
Type: helper.StringToPtr("service"),
|
||||
Type: stringToPtr("service"),
|
||||
Update: &UpdateStrategy{
|
||||
MaxParallel: helper.IntToPtr(1),
|
||||
MaxParallel: intToPtr(1),
|
||||
},
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("cache"),
|
||||
Count: helper.IntToPtr(1),
|
||||
Name: stringToPtr("cache"),
|
||||
Count: intToPtr(1),
|
||||
RestartPolicy: &RestartPolicy{
|
||||
Interval: helper.TimeToPtr(5 * time.Minute),
|
||||
Attempts: helper.IntToPtr(10),
|
||||
Delay: helper.TimeToPtr(25 * time.Second),
|
||||
Mode: helper.StringToPtr("delay"),
|
||||
Interval: timeToPtr(5 * time.Minute),
|
||||
Attempts: intToPtr(10),
|
||||
Delay: timeToPtr(25 * time.Second),
|
||||
Mode: stringToPtr("delay"),
|
||||
},
|
||||
EphemeralDisk: &EphemeralDisk{
|
||||
SizeMB: helper.IntToPtr(300),
|
||||
SizeMB: intToPtr(300),
|
||||
},
|
||||
Tasks: []*Task{
|
||||
{
|
||||
|
@ -289,11 +288,11 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
}},
|
||||
},
|
||||
Resources: &Resources{
|
||||
CPU: helper.IntToPtr(500),
|
||||
MemoryMB: helper.IntToPtr(256),
|
||||
CPU: intToPtr(500),
|
||||
MemoryMB: intToPtr(256),
|
||||
Networks: []*NetworkResource{
|
||||
{
|
||||
MBits: helper.IntToPtr(10),
|
||||
MBits: intToPtr(10),
|
||||
DynamicPorts: []Port{
|
||||
{
|
||||
Label: "db",
|
||||
|
@ -320,14 +319,14 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
},
|
||||
Templates: []*Template{
|
||||
{
|
||||
EmbeddedTmpl: helper.StringToPtr("---"),
|
||||
DestPath: helper.StringToPtr("local/file.yml"),
|
||||
EmbeddedTmpl: stringToPtr("---"),
|
||||
DestPath: stringToPtr("local/file.yml"),
|
||||
},
|
||||
{
|
||||
EmbeddedTmpl: helper.StringToPtr("FOO=bar\n"),
|
||||
DestPath: helper.StringToPtr("local/file.env"),
|
||||
Envvars: helper.BoolToPtr(true),
|
||||
VaultGrace: helper.TimeToPtr(3 * time.Second),
|
||||
EmbeddedTmpl: stringToPtr("FOO=bar\n"),
|
||||
DestPath: stringToPtr("local/file.env"),
|
||||
Envvars: boolToPtr(true),
|
||||
VaultGrace: timeToPtr(3 * time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -336,67 +335,67 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
},
|
||||
},
|
||||
expected: &Job{
|
||||
Namespace: helper.StringToPtr(DefaultNamespace),
|
||||
ID: helper.StringToPtr("example_template"),
|
||||
Name: helper.StringToPtr("example_template"),
|
||||
ParentID: helper.StringToPtr(""),
|
||||
Priority: helper.IntToPtr(50),
|
||||
Region: helper.StringToPtr("global"),
|
||||
Type: helper.StringToPtr("service"),
|
||||
AllAtOnce: helper.BoolToPtr(false),
|
||||
VaultToken: helper.StringToPtr(""),
|
||||
Stop: helper.BoolToPtr(false),
|
||||
Stable: helper.BoolToPtr(false),
|
||||
Version: helper.Uint64ToPtr(0),
|
||||
Status: helper.StringToPtr(""),
|
||||
StatusDescription: helper.StringToPtr(""),
|
||||
CreateIndex: helper.Uint64ToPtr(0),
|
||||
ModifyIndex: helper.Uint64ToPtr(0),
|
||||
JobModifyIndex: helper.Uint64ToPtr(0),
|
||||
Namespace: stringToPtr(DefaultNamespace),
|
||||
ID: stringToPtr("example_template"),
|
||||
Name: stringToPtr("example_template"),
|
||||
ParentID: stringToPtr(""),
|
||||
Priority: intToPtr(50),
|
||||
Region: stringToPtr("global"),
|
||||
Type: stringToPtr("service"),
|
||||
AllAtOnce: boolToPtr(false),
|
||||
VaultToken: stringToPtr(""),
|
||||
Stop: boolToPtr(false),
|
||||
Stable: boolToPtr(false),
|
||||
Version: uint64ToPtr(0),
|
||||
Status: stringToPtr(""),
|
||||
StatusDescription: stringToPtr(""),
|
||||
CreateIndex: uint64ToPtr(0),
|
||||
ModifyIndex: uint64ToPtr(0),
|
||||
JobModifyIndex: uint64ToPtr(0),
|
||||
Datacenters: []string{"dc1"},
|
||||
Update: &UpdateStrategy{
|
||||
Stagger: helper.TimeToPtr(30 * time.Second),
|
||||
MaxParallel: helper.IntToPtr(1),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
|
||||
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
|
||||
ProgressDeadline: helper.TimeToPtr(10 * time.Minute),
|
||||
AutoRevert: helper.BoolToPtr(false),
|
||||
Canary: helper.IntToPtr(0),
|
||||
Stagger: timeToPtr(30 * time.Second),
|
||||
MaxParallel: intToPtr(1),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(10 * time.Second),
|
||||
HealthyDeadline: timeToPtr(5 * time.Minute),
|
||||
ProgressDeadline: timeToPtr(10 * time.Minute),
|
||||
AutoRevert: boolToPtr(false),
|
||||
Canary: intToPtr(0),
|
||||
},
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("cache"),
|
||||
Count: helper.IntToPtr(1),
|
||||
Name: stringToPtr("cache"),
|
||||
Count: intToPtr(1),
|
||||
RestartPolicy: &RestartPolicy{
|
||||
Interval: helper.TimeToPtr(5 * time.Minute),
|
||||
Attempts: helper.IntToPtr(10),
|
||||
Delay: helper.TimeToPtr(25 * time.Second),
|
||||
Mode: helper.StringToPtr("delay"),
|
||||
Interval: timeToPtr(5 * time.Minute),
|
||||
Attempts: intToPtr(10),
|
||||
Delay: timeToPtr(25 * time.Second),
|
||||
Mode: stringToPtr("delay"),
|
||||
},
|
||||
ReschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(0),
|
||||
Interval: helper.TimeToPtr(0),
|
||||
DelayFunction: helper.StringToPtr("exponential"),
|
||||
Delay: helper.TimeToPtr(30 * time.Second),
|
||||
MaxDelay: helper.TimeToPtr(1 * time.Hour),
|
||||
Unlimited: helper.BoolToPtr(true),
|
||||
Attempts: intToPtr(0),
|
||||
Interval: timeToPtr(0),
|
||||
DelayFunction: stringToPtr("exponential"),
|
||||
Delay: timeToPtr(30 * time.Second),
|
||||
MaxDelay: timeToPtr(1 * time.Hour),
|
||||
Unlimited: boolToPtr(true),
|
||||
},
|
||||
EphemeralDisk: &EphemeralDisk{
|
||||
Sticky: helper.BoolToPtr(false),
|
||||
Migrate: helper.BoolToPtr(false),
|
||||
SizeMB: helper.IntToPtr(300),
|
||||
Sticky: boolToPtr(false),
|
||||
Migrate: boolToPtr(false),
|
||||
SizeMB: intToPtr(300),
|
||||
},
|
||||
|
||||
Update: &UpdateStrategy{
|
||||
Stagger: helper.TimeToPtr(30 * time.Second),
|
||||
MaxParallel: helper.IntToPtr(1),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
|
||||
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
|
||||
ProgressDeadline: helper.TimeToPtr(10 * time.Minute),
|
||||
AutoRevert: helper.BoolToPtr(false),
|
||||
Canary: helper.IntToPtr(0),
|
||||
Stagger: timeToPtr(30 * time.Second),
|
||||
MaxParallel: intToPtr(1),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(10 * time.Second),
|
||||
HealthyDeadline: timeToPtr(5 * time.Minute),
|
||||
ProgressDeadline: timeToPtr(10 * time.Minute),
|
||||
AutoRevert: boolToPtr(false),
|
||||
Canary: intToPtr(0),
|
||||
},
|
||||
Migrate: DefaultMigrateStrategy(),
|
||||
Tasks: []*Task{
|
||||
|
@ -410,11 +409,11 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
}},
|
||||
},
|
||||
Resources: &Resources{
|
||||
CPU: helper.IntToPtr(500),
|
||||
MemoryMB: helper.IntToPtr(256),
|
||||
CPU: intToPtr(500),
|
||||
MemoryMB: intToPtr(256),
|
||||
Networks: []*NetworkResource{
|
||||
{
|
||||
MBits: helper.IntToPtr(10),
|
||||
MBits: intToPtr(10),
|
||||
DynamicPorts: []Port{
|
||||
{
|
||||
Label: "db",
|
||||
|
@ -440,34 +439,34 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
KillTimeout: helper.TimeToPtr(5 * time.Second),
|
||||
KillTimeout: timeToPtr(5 * time.Second),
|
||||
LogConfig: DefaultLogConfig(),
|
||||
Templates: []*Template{
|
||||
{
|
||||
SourcePath: helper.StringToPtr(""),
|
||||
DestPath: helper.StringToPtr("local/file.yml"),
|
||||
EmbeddedTmpl: helper.StringToPtr("---"),
|
||||
ChangeMode: helper.StringToPtr("restart"),
|
||||
ChangeSignal: helper.StringToPtr(""),
|
||||
Splay: helper.TimeToPtr(5 * time.Second),
|
||||
Perms: helper.StringToPtr("0644"),
|
||||
LeftDelim: helper.StringToPtr("{{"),
|
||||
RightDelim: helper.StringToPtr("}}"),
|
||||
Envvars: helper.BoolToPtr(false),
|
||||
VaultGrace: helper.TimeToPtr(15 * time.Second),
|
||||
SourcePath: stringToPtr(""),
|
||||
DestPath: stringToPtr("local/file.yml"),
|
||||
EmbeddedTmpl: stringToPtr("---"),
|
||||
ChangeMode: stringToPtr("restart"),
|
||||
ChangeSignal: stringToPtr(""),
|
||||
Splay: timeToPtr(5 * time.Second),
|
||||
Perms: stringToPtr("0644"),
|
||||
LeftDelim: stringToPtr("{{"),
|
||||
RightDelim: stringToPtr("}}"),
|
||||
Envvars: boolToPtr(false),
|
||||
VaultGrace: timeToPtr(15 * time.Second),
|
||||
},
|
||||
{
|
||||
SourcePath: helper.StringToPtr(""),
|
||||
DestPath: helper.StringToPtr("local/file.env"),
|
||||
EmbeddedTmpl: helper.StringToPtr("FOO=bar\n"),
|
||||
ChangeMode: helper.StringToPtr("restart"),
|
||||
ChangeSignal: helper.StringToPtr(""),
|
||||
Splay: helper.TimeToPtr(5 * time.Second),
|
||||
Perms: helper.StringToPtr("0644"),
|
||||
LeftDelim: helper.StringToPtr("{{"),
|
||||
RightDelim: helper.StringToPtr("}}"),
|
||||
Envvars: helper.BoolToPtr(true),
|
||||
VaultGrace: helper.TimeToPtr(3 * time.Second),
|
||||
SourcePath: stringToPtr(""),
|
||||
DestPath: stringToPtr("local/file.env"),
|
||||
EmbeddedTmpl: stringToPtr("FOO=bar\n"),
|
||||
ChangeMode: stringToPtr("restart"),
|
||||
ChangeSignal: stringToPtr(""),
|
||||
Splay: timeToPtr(5 * time.Second),
|
||||
Perms: stringToPtr("0644"),
|
||||
LeftDelim: stringToPtr("{{"),
|
||||
RightDelim: stringToPtr("}}"),
|
||||
Envvars: boolToPtr(true),
|
||||
VaultGrace: timeToPtr(3 * time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -480,33 +479,33 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
{
|
||||
name: "periodic",
|
||||
input: &Job{
|
||||
ID: helper.StringToPtr("bar"),
|
||||
ID: stringToPtr("bar"),
|
||||
Periodic: &PeriodicConfig{},
|
||||
},
|
||||
expected: &Job{
|
||||
Namespace: helper.StringToPtr(DefaultNamespace),
|
||||
ID: helper.StringToPtr("bar"),
|
||||
ParentID: helper.StringToPtr(""),
|
||||
Name: helper.StringToPtr("bar"),
|
||||
Region: helper.StringToPtr("global"),
|
||||
Type: helper.StringToPtr("service"),
|
||||
Priority: helper.IntToPtr(50),
|
||||
AllAtOnce: helper.BoolToPtr(false),
|
||||
VaultToken: helper.StringToPtr(""),
|
||||
Stop: helper.BoolToPtr(false),
|
||||
Stable: helper.BoolToPtr(false),
|
||||
Version: helper.Uint64ToPtr(0),
|
||||
Status: helper.StringToPtr(""),
|
||||
StatusDescription: helper.StringToPtr(""),
|
||||
CreateIndex: helper.Uint64ToPtr(0),
|
||||
ModifyIndex: helper.Uint64ToPtr(0),
|
||||
JobModifyIndex: helper.Uint64ToPtr(0),
|
||||
Namespace: stringToPtr(DefaultNamespace),
|
||||
ID: stringToPtr("bar"),
|
||||
ParentID: stringToPtr(""),
|
||||
Name: stringToPtr("bar"),
|
||||
Region: stringToPtr("global"),
|
||||
Type: stringToPtr("service"),
|
||||
Priority: intToPtr(50),
|
||||
AllAtOnce: boolToPtr(false),
|
||||
VaultToken: stringToPtr(""),
|
||||
Stop: boolToPtr(false),
|
||||
Stable: boolToPtr(false),
|
||||
Version: uint64ToPtr(0),
|
||||
Status: stringToPtr(""),
|
||||
StatusDescription: stringToPtr(""),
|
||||
CreateIndex: uint64ToPtr(0),
|
||||
ModifyIndex: uint64ToPtr(0),
|
||||
JobModifyIndex: uint64ToPtr(0),
|
||||
Periodic: &PeriodicConfig{
|
||||
Enabled: helper.BoolToPtr(true),
|
||||
Spec: helper.StringToPtr(""),
|
||||
SpecType: helper.StringToPtr(PeriodicSpecCron),
|
||||
ProhibitOverlap: helper.BoolToPtr(false),
|
||||
TimeZone: helper.StringToPtr("UTC"),
|
||||
Enabled: boolToPtr(true),
|
||||
Spec: stringToPtr(""),
|
||||
SpecType: stringToPtr(PeriodicSpecCron),
|
||||
ProhibitOverlap: boolToPtr(false),
|
||||
TimeZone: stringToPtr("UTC"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -514,29 +513,29 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
{
|
||||
name: "update_merge",
|
||||
input: &Job{
|
||||
Name: helper.StringToPtr("foo"),
|
||||
ID: helper.StringToPtr("bar"),
|
||||
ParentID: helper.StringToPtr("lol"),
|
||||
Name: stringToPtr("foo"),
|
||||
ID: stringToPtr("bar"),
|
||||
ParentID: stringToPtr("lol"),
|
||||
Update: &UpdateStrategy{
|
||||
Stagger: helper.TimeToPtr(1 * time.Second),
|
||||
MaxParallel: helper.IntToPtr(1),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
|
||||
HealthyDeadline: helper.TimeToPtr(6 * time.Minute),
|
||||
ProgressDeadline: helper.TimeToPtr(7 * time.Minute),
|
||||
AutoRevert: helper.BoolToPtr(false),
|
||||
Canary: helper.IntToPtr(0),
|
||||
Stagger: timeToPtr(1 * time.Second),
|
||||
MaxParallel: intToPtr(1),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(10 * time.Second),
|
||||
HealthyDeadline: timeToPtr(6 * time.Minute),
|
||||
ProgressDeadline: timeToPtr(7 * time.Minute),
|
||||
AutoRevert: boolToPtr(false),
|
||||
Canary: intToPtr(0),
|
||||
},
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("bar"),
|
||||
Name: stringToPtr("bar"),
|
||||
Update: &UpdateStrategy{
|
||||
Stagger: helper.TimeToPtr(2 * time.Second),
|
||||
MaxParallel: helper.IntToPtr(2),
|
||||
HealthCheck: helper.StringToPtr("manual"),
|
||||
MinHealthyTime: helper.TimeToPtr(1 * time.Second),
|
||||
AutoRevert: helper.BoolToPtr(true),
|
||||
Canary: helper.IntToPtr(1),
|
||||
Stagger: timeToPtr(2 * time.Second),
|
||||
MaxParallel: intToPtr(2),
|
||||
HealthCheck: stringToPtr("manual"),
|
||||
MinHealthyTime: timeToPtr(1 * time.Second),
|
||||
AutoRevert: boolToPtr(true),
|
||||
Canary: intToPtr(1),
|
||||
},
|
||||
Tasks: []*Task{
|
||||
{
|
||||
|
@ -545,7 +544,7 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
Name: helper.StringToPtr("baz"),
|
||||
Name: stringToPtr("baz"),
|
||||
Tasks: []*Task{
|
||||
{
|
||||
Name: "task1",
|
||||
|
@ -555,65 +554,65 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
},
|
||||
},
|
||||
expected: &Job{
|
||||
Namespace: helper.StringToPtr(DefaultNamespace),
|
||||
ID: helper.StringToPtr("bar"),
|
||||
Name: helper.StringToPtr("foo"),
|
||||
Region: helper.StringToPtr("global"),
|
||||
Type: helper.StringToPtr("service"),
|
||||
ParentID: helper.StringToPtr("lol"),
|
||||
Priority: helper.IntToPtr(50),
|
||||
AllAtOnce: helper.BoolToPtr(false),
|
||||
VaultToken: helper.StringToPtr(""),
|
||||
Stop: helper.BoolToPtr(false),
|
||||
Stable: helper.BoolToPtr(false),
|
||||
Version: helper.Uint64ToPtr(0),
|
||||
Status: helper.StringToPtr(""),
|
||||
StatusDescription: helper.StringToPtr(""),
|
||||
CreateIndex: helper.Uint64ToPtr(0),
|
||||
ModifyIndex: helper.Uint64ToPtr(0),
|
||||
JobModifyIndex: helper.Uint64ToPtr(0),
|
||||
Namespace: stringToPtr(DefaultNamespace),
|
||||
ID: stringToPtr("bar"),
|
||||
Name: stringToPtr("foo"),
|
||||
Region: stringToPtr("global"),
|
||||
Type: stringToPtr("service"),
|
||||
ParentID: stringToPtr("lol"),
|
||||
Priority: intToPtr(50),
|
||||
AllAtOnce: boolToPtr(false),
|
||||
VaultToken: stringToPtr(""),
|
||||
Stop: boolToPtr(false),
|
||||
Stable: boolToPtr(false),
|
||||
Version: uint64ToPtr(0),
|
||||
Status: stringToPtr(""),
|
||||
StatusDescription: stringToPtr(""),
|
||||
CreateIndex: uint64ToPtr(0),
|
||||
ModifyIndex: uint64ToPtr(0),
|
||||
JobModifyIndex: uint64ToPtr(0),
|
||||
Update: &UpdateStrategy{
|
||||
Stagger: helper.TimeToPtr(1 * time.Second),
|
||||
MaxParallel: helper.IntToPtr(1),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
|
||||
HealthyDeadline: helper.TimeToPtr(6 * time.Minute),
|
||||
ProgressDeadline: helper.TimeToPtr(7 * time.Minute),
|
||||
AutoRevert: helper.BoolToPtr(false),
|
||||
Canary: helper.IntToPtr(0),
|
||||
Stagger: timeToPtr(1 * time.Second),
|
||||
MaxParallel: intToPtr(1),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(10 * time.Second),
|
||||
HealthyDeadline: timeToPtr(6 * time.Minute),
|
||||
ProgressDeadline: timeToPtr(7 * time.Minute),
|
||||
AutoRevert: boolToPtr(false),
|
||||
Canary: intToPtr(0),
|
||||
},
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("bar"),
|
||||
Count: helper.IntToPtr(1),
|
||||
Name: stringToPtr("bar"),
|
||||
Count: intToPtr(1),
|
||||
EphemeralDisk: &EphemeralDisk{
|
||||
Sticky: helper.BoolToPtr(false),
|
||||
Migrate: helper.BoolToPtr(false),
|
||||
SizeMB: helper.IntToPtr(300),
|
||||
Sticky: boolToPtr(false),
|
||||
Migrate: boolToPtr(false),
|
||||
SizeMB: intToPtr(300),
|
||||
},
|
||||
RestartPolicy: &RestartPolicy{
|
||||
Delay: helper.TimeToPtr(15 * time.Second),
|
||||
Attempts: helper.IntToPtr(2),
|
||||
Interval: helper.TimeToPtr(30 * time.Minute),
|
||||
Mode: helper.StringToPtr("fail"),
|
||||
Delay: timeToPtr(15 * time.Second),
|
||||
Attempts: intToPtr(2),
|
||||
Interval: timeToPtr(30 * time.Minute),
|
||||
Mode: stringToPtr("fail"),
|
||||
},
|
||||
ReschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(0),
|
||||
Interval: helper.TimeToPtr(0),
|
||||
DelayFunction: helper.StringToPtr("exponential"),
|
||||
Delay: helper.TimeToPtr(30 * time.Second),
|
||||
MaxDelay: helper.TimeToPtr(1 * time.Hour),
|
||||
Unlimited: helper.BoolToPtr(true),
|
||||
Attempts: intToPtr(0),
|
||||
Interval: timeToPtr(0),
|
||||
DelayFunction: stringToPtr("exponential"),
|
||||
Delay: timeToPtr(30 * time.Second),
|
||||
MaxDelay: timeToPtr(1 * time.Hour),
|
||||
Unlimited: boolToPtr(true),
|
||||
},
|
||||
Update: &UpdateStrategy{
|
||||
Stagger: helper.TimeToPtr(2 * time.Second),
|
||||
MaxParallel: helper.IntToPtr(2),
|
||||
HealthCheck: helper.StringToPtr("manual"),
|
||||
MinHealthyTime: helper.TimeToPtr(1 * time.Second),
|
||||
HealthyDeadline: helper.TimeToPtr(6 * time.Minute),
|
||||
ProgressDeadline: helper.TimeToPtr(7 * time.Minute),
|
||||
AutoRevert: helper.BoolToPtr(true),
|
||||
Canary: helper.IntToPtr(1),
|
||||
Stagger: timeToPtr(2 * time.Second),
|
||||
MaxParallel: intToPtr(2),
|
||||
HealthCheck: stringToPtr("manual"),
|
||||
MinHealthyTime: timeToPtr(1 * time.Second),
|
||||
HealthyDeadline: timeToPtr(6 * time.Minute),
|
||||
ProgressDeadline: timeToPtr(7 * time.Minute),
|
||||
AutoRevert: boolToPtr(true),
|
||||
Canary: intToPtr(1),
|
||||
},
|
||||
Migrate: DefaultMigrateStrategy(),
|
||||
Tasks: []*Task{
|
||||
|
@ -621,41 +620,41 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
Name: "task1",
|
||||
LogConfig: DefaultLogConfig(),
|
||||
Resources: DefaultResources(),
|
||||
KillTimeout: helper.TimeToPtr(5 * time.Second),
|
||||
KillTimeout: timeToPtr(5 * time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: helper.StringToPtr("baz"),
|
||||
Count: helper.IntToPtr(1),
|
||||
Name: stringToPtr("baz"),
|
||||
Count: intToPtr(1),
|
||||
EphemeralDisk: &EphemeralDisk{
|
||||
Sticky: helper.BoolToPtr(false),
|
||||
Migrate: helper.BoolToPtr(false),
|
||||
SizeMB: helper.IntToPtr(300),
|
||||
Sticky: boolToPtr(false),
|
||||
Migrate: boolToPtr(false),
|
||||
SizeMB: intToPtr(300),
|
||||
},
|
||||
RestartPolicy: &RestartPolicy{
|
||||
Delay: helper.TimeToPtr(15 * time.Second),
|
||||
Attempts: helper.IntToPtr(2),
|
||||
Interval: helper.TimeToPtr(30 * time.Minute),
|
||||
Mode: helper.StringToPtr("fail"),
|
||||
Delay: timeToPtr(15 * time.Second),
|
||||
Attempts: intToPtr(2),
|
||||
Interval: timeToPtr(30 * time.Minute),
|
||||
Mode: stringToPtr("fail"),
|
||||
},
|
||||
ReschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(0),
|
||||
Interval: helper.TimeToPtr(0),
|
||||
DelayFunction: helper.StringToPtr("exponential"),
|
||||
Delay: helper.TimeToPtr(30 * time.Second),
|
||||
MaxDelay: helper.TimeToPtr(1 * time.Hour),
|
||||
Unlimited: helper.BoolToPtr(true),
|
||||
Attempts: intToPtr(0),
|
||||
Interval: timeToPtr(0),
|
||||
DelayFunction: stringToPtr("exponential"),
|
||||
Delay: timeToPtr(30 * time.Second),
|
||||
MaxDelay: timeToPtr(1 * time.Hour),
|
||||
Unlimited: boolToPtr(true),
|
||||
},
|
||||
Update: &UpdateStrategy{
|
||||
Stagger: helper.TimeToPtr(1 * time.Second),
|
||||
MaxParallel: helper.IntToPtr(1),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
|
||||
HealthyDeadline: helper.TimeToPtr(6 * time.Minute),
|
||||
ProgressDeadline: helper.TimeToPtr(7 * time.Minute),
|
||||
AutoRevert: helper.BoolToPtr(false),
|
||||
Canary: helper.IntToPtr(0),
|
||||
Stagger: timeToPtr(1 * time.Second),
|
||||
MaxParallel: intToPtr(1),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(10 * time.Second),
|
||||
HealthyDeadline: timeToPtr(6 * time.Minute),
|
||||
ProgressDeadline: timeToPtr(7 * time.Minute),
|
||||
AutoRevert: boolToPtr(false),
|
||||
Canary: intToPtr(0),
|
||||
},
|
||||
Migrate: DefaultMigrateStrategy(),
|
||||
Tasks: []*Task{
|
||||
|
@ -663,7 +662,7 @@ func TestJobs_Canonicalize(t *testing.T) {
|
|||
Name: "task1",
|
||||
LogConfig: DefaultLogConfig(),
|
||||
Resources: DefaultResources(),
|
||||
KillTimeout: helper.TimeToPtr(5 * time.Second),
|
||||
KillTimeout: timeToPtr(5 * time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -756,13 +755,13 @@ func TestJobs_Revert(t *testing.T) {
|
|||
assertWriteMeta(t, wm)
|
||||
|
||||
// Fail revert at incorrect enforce
|
||||
_, _, err = jobs.Revert(*job.ID, 0, helper.Uint64ToPtr(10), nil)
|
||||
_, _, err = jobs.Revert(*job.ID, 0, uint64ToPtr(10), nil)
|
||||
if err == nil || !strings.Contains(err.Error(), "enforcing version") {
|
||||
t.Fatalf("expected enforcement error: %v", err)
|
||||
}
|
||||
|
||||
// Works at correct index
|
||||
revertResp, wm, err := jobs.Revert(*job.ID, 0, helper.Uint64ToPtr(1), nil)
|
||||
revertResp, wm, err := jobs.Revert(*job.ID, 0, uint64ToPtr(1), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -1248,11 +1247,11 @@ func TestJobs_NewBatchJob(t *testing.T) {
|
|||
t.Parallel()
|
||||
job := NewBatchJob("job1", "myjob", "region1", 5)
|
||||
expect := &Job{
|
||||
Region: helper.StringToPtr("region1"),
|
||||
ID: helper.StringToPtr("job1"),
|
||||
Name: helper.StringToPtr("myjob"),
|
||||
Type: helper.StringToPtr(JobTypeBatch),
|
||||
Priority: helper.IntToPtr(5),
|
||||
Region: stringToPtr("region1"),
|
||||
ID: stringToPtr("job1"),
|
||||
Name: stringToPtr("myjob"),
|
||||
Type: stringToPtr(JobTypeBatch),
|
||||
Priority: intToPtr(5),
|
||||
}
|
||||
if !reflect.DeepEqual(job, expect) {
|
||||
t.Fatalf("expect: %#v, got: %#v", expect, job)
|
||||
|
@ -1263,11 +1262,11 @@ func TestJobs_NewServiceJob(t *testing.T) {
|
|||
t.Parallel()
|
||||
job := NewServiceJob("job1", "myjob", "region1", 5)
|
||||
expect := &Job{
|
||||
Region: helper.StringToPtr("region1"),
|
||||
ID: helper.StringToPtr("job1"),
|
||||
Name: helper.StringToPtr("myjob"),
|
||||
Type: helper.StringToPtr(JobTypeService),
|
||||
Priority: helper.IntToPtr(5),
|
||||
Region: stringToPtr("region1"),
|
||||
ID: stringToPtr("job1"),
|
||||
Name: stringToPtr("myjob"),
|
||||
Type: stringToPtr(JobTypeService),
|
||||
Priority: intToPtr(5),
|
||||
}
|
||||
if !reflect.DeepEqual(job, expect) {
|
||||
t.Fatalf("expect: %#v, got: %#v", expect, job)
|
||||
|
@ -1413,7 +1412,7 @@ func TestJobs_AddSpread(t *testing.T) {
|
|||
expect := []*Spread{
|
||||
{
|
||||
Attribute: "${meta.rack}",
|
||||
Weight: helper.IntToPtr(100),
|
||||
Weight: intToPtr(100),
|
||||
SpreadTarget: []*SpreadTarget{
|
||||
{
|
||||
Value: "r1",
|
||||
|
@ -1423,7 +1422,7 @@ func TestJobs_AddSpread(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Attribute: "${node.datacenter}",
|
||||
Weight: helper.IntToPtr(100),
|
||||
Weight: intToPtr(100),
|
||||
SpreadTarget: []*SpreadTarget{
|
||||
{
|
||||
Value: "dc1",
|
||||
|
|
29
api/nodes.go
29
api/nodes.go
|
@ -6,9 +6,18 @@ import (
|
|||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
const (
|
||||
NodeStatusInit = "initializing"
|
||||
NodeStatusReady = "ready"
|
||||
NodeStatusDown = "down"
|
||||
|
||||
// NodeSchedulingEligible and Ineligible marks the node as eligible or not,
|
||||
// respectively, for receiving allocations. This is orthoginal to the node
|
||||
// status being ready.
|
||||
NodeSchedulingEligible = "eligible"
|
||||
NodeSchedulingIneligible = "ineligible"
|
||||
)
|
||||
|
||||
// Nodes is used to query node-related API endpoints
|
||||
|
@ -224,7 +233,7 @@ func (n *Nodes) monitorDrainNode(ctx context.Context, nodeID string,
|
|||
return
|
||||
}
|
||||
|
||||
if node.Status == structs.NodeStatusDown {
|
||||
if node.Status == NodeStatusDown {
|
||||
msg := Messagef(MonitorMsgLevelWarn, "Node %q down", nodeID)
|
||||
select {
|
||||
case nodeCh <- msg:
|
||||
|
@ -295,7 +304,7 @@ func (n *Nodes) monitorDrainAllocs(ctx context.Context, nodeID string, ignoreSys
|
|||
// Alloc was marked for migration
|
||||
msg = "marked for migration"
|
||||
|
||||
case migrating && (orig.DesiredStatus != a.DesiredStatus) && a.DesiredStatus == structs.AllocDesiredStatusStop:
|
||||
case migrating && (orig.DesiredStatus != a.DesiredStatus) && a.DesiredStatus == AllocDesiredStatusStop:
|
||||
// Alloc has already been marked for migration and is now being stopped
|
||||
msg = "draining"
|
||||
}
|
||||
|
@ -314,12 +323,12 @@ func (n *Nodes) monitorDrainAllocs(ctx context.Context, nodeID string, ignoreSys
|
|||
}
|
||||
|
||||
// Track how many allocs are still running
|
||||
if ignoreSys && a.Job.Type != nil && *a.Job.Type == structs.JobTypeSystem {
|
||||
if ignoreSys && a.Job.Type != nil && *a.Job.Type == JobTypeSystem {
|
||||
continue
|
||||
}
|
||||
|
||||
switch a.ClientStatus {
|
||||
case structs.AllocClientStatusPending, structs.AllocClientStatusRunning:
|
||||
case AllocClientStatusPending, AllocClientStatusRunning:
|
||||
runningAllocs++
|
||||
}
|
||||
}
|
||||
|
@ -353,9 +362,9 @@ type NodeEligibilityUpdateResponse struct {
|
|||
|
||||
// ToggleEligibility is used to update the scheduling eligibility of the node
|
||||
func (n *Nodes) ToggleEligibility(nodeID string, eligible bool, q *WriteOptions) (*NodeEligibilityUpdateResponse, error) {
|
||||
e := structs.NodeSchedulingEligible
|
||||
e := NodeSchedulingEligible
|
||||
if !eligible {
|
||||
e = structs.NodeSchedulingIneligible
|
||||
e = NodeSchedulingIneligible
|
||||
}
|
||||
|
||||
req := &NodeUpdateEligibilityRequest{
|
||||
|
@ -662,9 +671,9 @@ func (v *StatValue) String() string {
|
|||
case v.StringVal != nil:
|
||||
return *v.StringVal
|
||||
case v.FloatNumeratorVal != nil:
|
||||
str := helper.FormatFloat(*v.FloatNumeratorVal, 3)
|
||||
str := formatFloat(*v.FloatNumeratorVal, 3)
|
||||
if v.FloatDenominatorVal != nil {
|
||||
str += " / " + helper.FormatFloat(*v.FloatDenominatorVal, 3)
|
||||
str += " / " + formatFloat(*v.FloatDenominatorVal, 3)
|
||||
}
|
||||
|
||||
if v.Unit != "" {
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
|
@ -185,8 +184,8 @@ func TestNodes_ToggleDrain(t *testing.T) {
|
|||
// Check again
|
||||
out, _, err = nodes.Info(nodeID, nil)
|
||||
require.Nil(err)
|
||||
if out.SchedulingEligibility != structs.NodeSchedulingIneligible {
|
||||
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, structs.NodeSchedulingIneligible)
|
||||
if out.SchedulingEligibility != NodeSchedulingIneligible {
|
||||
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, NodeSchedulingIneligible)
|
||||
}
|
||||
|
||||
// Toggle off again
|
||||
|
@ -203,7 +202,7 @@ func TestNodes_ToggleDrain(t *testing.T) {
|
|||
if out.DrainStrategy != nil {
|
||||
t.Fatalf("drain strategy should be unset")
|
||||
}
|
||||
if out.SchedulingEligibility != structs.NodeSchedulingEligible {
|
||||
if out.SchedulingEligibility != NodeSchedulingEligible {
|
||||
t.Fatalf("should be eligible")
|
||||
}
|
||||
}
|
||||
|
@ -237,7 +236,7 @@ func TestNodes_ToggleEligibility(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if out.SchedulingEligibility != structs.NodeSchedulingEligible {
|
||||
if out.SchedulingEligibility != NodeSchedulingEligible {
|
||||
t.Fatalf("node should be eligible")
|
||||
}
|
||||
|
||||
|
@ -253,8 +252,8 @@ func TestNodes_ToggleEligibility(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if out.SchedulingEligibility != structs.NodeSchedulingIneligible {
|
||||
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, structs.NodeSchedulingIneligible)
|
||||
if out.SchedulingEligibility != NodeSchedulingIneligible {
|
||||
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, NodeSchedulingIneligible)
|
||||
}
|
||||
|
||||
// Toggle on
|
||||
|
@ -269,8 +268,8 @@ func TestNodes_ToggleEligibility(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if out.SchedulingEligibility != structs.NodeSchedulingEligible {
|
||||
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, structs.NodeSchedulingEligible)
|
||||
if out.SchedulingEligibility != NodeSchedulingEligible {
|
||||
t.Fatalf("bad eligibility: %v vs %v", out.SchedulingEligibility, NodeSchedulingEligible)
|
||||
}
|
||||
if out.DrainStrategy != nil {
|
||||
t.Fatalf("drain strategy should be unset")
|
||||
|
@ -542,69 +541,69 @@ func TestNodeStatValueFormatting(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
"true",
|
||||
StatValue{BoolVal: helper.BoolToPtr(true)},
|
||||
StatValue{BoolVal: boolToPtr(true)},
|
||||
},
|
||||
{
|
||||
"false",
|
||||
StatValue{BoolVal: helper.BoolToPtr(false)},
|
||||
StatValue{BoolVal: boolToPtr(false)},
|
||||
},
|
||||
{
|
||||
"myvalue",
|
||||
StatValue{StringVal: helper.StringToPtr("myvalue")},
|
||||
StatValue{StringVal: stringToPtr("myvalue")},
|
||||
},
|
||||
{
|
||||
"2.718",
|
||||
StatValue{
|
||||
FloatNumeratorVal: helper.Float64ToPtr(2.718),
|
||||
FloatNumeratorVal: float64ToPtr(2.718),
|
||||
},
|
||||
},
|
||||
{
|
||||
"2.718 / 3.14",
|
||||
StatValue{
|
||||
FloatNumeratorVal: helper.Float64ToPtr(2.718),
|
||||
FloatDenominatorVal: helper.Float64ToPtr(3.14),
|
||||
FloatNumeratorVal: float64ToPtr(2.718),
|
||||
FloatDenominatorVal: float64ToPtr(3.14),
|
||||
},
|
||||
},
|
||||
{
|
||||
"2.718 MHz",
|
||||
StatValue{
|
||||
FloatNumeratorVal: helper.Float64ToPtr(2.718),
|
||||
FloatNumeratorVal: float64ToPtr(2.718),
|
||||
Unit: "MHz",
|
||||
},
|
||||
},
|
||||
{
|
||||
"2.718 / 3.14 MHz",
|
||||
StatValue{
|
||||
FloatNumeratorVal: helper.Float64ToPtr(2.718),
|
||||
FloatDenominatorVal: helper.Float64ToPtr(3.14),
|
||||
FloatNumeratorVal: float64ToPtr(2.718),
|
||||
FloatDenominatorVal: float64ToPtr(3.14),
|
||||
Unit: "MHz",
|
||||
},
|
||||
},
|
||||
{
|
||||
"2",
|
||||
StatValue{
|
||||
IntNumeratorVal: helper.Int64ToPtr(2),
|
||||
IntNumeratorVal: int64ToPtr(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
"2 / 3",
|
||||
StatValue{
|
||||
IntNumeratorVal: helper.Int64ToPtr(2),
|
||||
IntDenominatorVal: helper.Int64ToPtr(3),
|
||||
IntNumeratorVal: int64ToPtr(2),
|
||||
IntDenominatorVal: int64ToPtr(3),
|
||||
},
|
||||
},
|
||||
{
|
||||
"2 MHz",
|
||||
StatValue{
|
||||
IntNumeratorVal: helper.Int64ToPtr(2),
|
||||
IntNumeratorVal: int64ToPtr(2),
|
||||
Unit: "MHz",
|
||||
},
|
||||
},
|
||||
{
|
||||
"2 / 3 MHz",
|
||||
StatValue{
|
||||
IntNumeratorVal: helper.Int64ToPtr(2),
|
||||
IntDenominatorVal: helper.Int64ToPtr(3),
|
||||
IntNumeratorVal: int64ToPtr(2),
|
||||
IntDenominatorVal: int64ToPtr(3),
|
||||
Unit: "MHz",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -2,8 +2,6 @@ package api
|
|||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
)
|
||||
|
||||
// Resources encapsulates the required resources of
|
||||
|
@ -46,8 +44,8 @@ func (r *Resources) Canonicalize() {
|
|||
// and should be kept in sync.
|
||||
func DefaultResources() *Resources {
|
||||
return &Resources{
|
||||
CPU: helper.IntToPtr(100),
|
||||
MemoryMB: helper.IntToPtr(300),
|
||||
CPU: intToPtr(100),
|
||||
MemoryMB: intToPtr(300),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,8 +56,8 @@ func DefaultResources() *Resources {
|
|||
// IN nomad/structs/structs.go and should be kept in sync.
|
||||
func MinResources() *Resources {
|
||||
return &Resources{
|
||||
CPU: helper.IntToPtr(20),
|
||||
MemoryMB: helper.IntToPtr(10),
|
||||
CPU: intToPtr(20),
|
||||
MemoryMB: intToPtr(10),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -103,7 +101,7 @@ type NetworkResource struct {
|
|||
|
||||
func (n *NetworkResource) Canonicalize() {
|
||||
if n.MBits == nil {
|
||||
n.MBits = helper.IntToPtr(10)
|
||||
n.MBits = intToPtr(10)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -169,7 +167,7 @@ type Attribute struct {
|
|||
func (a Attribute) String() string {
|
||||
switch {
|
||||
case a.FloatVal != nil:
|
||||
str := helper.FormatFloat(*a.FloatVal, 3)
|
||||
str := formatFloat(*a.FloatVal, 3)
|
||||
if a.Unit != "" {
|
||||
str += " " + a.Unit
|
||||
}
|
||||
|
@ -223,6 +221,6 @@ type RequestedDevice struct {
|
|||
|
||||
func (d *RequestedDevice) Canonicalize() {
|
||||
if d.Count == nil {
|
||||
d.Count = helper.Uint64ToPtr(1)
|
||||
d.Count = uint64ToPtr(1)
|
||||
}
|
||||
}
|
||||
|
|
156
api/tasks.go
156
api/tasks.go
|
@ -6,9 +6,16 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
const (
|
||||
// RestartPolicyModeDelay causes an artificial delay till the next interval is
|
||||
// reached when the specified attempts have been reached in the interval.
|
||||
RestartPolicyModeDelay = "delay"
|
||||
|
||||
// RestartPolicyModeFail causes a job to fail if the specified number of
|
||||
// attempts are reached within an interval.
|
||||
RestartPolicyModeFail = "fail"
|
||||
)
|
||||
|
||||
// MemoryStats holds memory usage related stats
|
||||
|
@ -16,6 +23,7 @@ type MemoryStats struct {
|
|||
RSS uint64
|
||||
Cache uint64
|
||||
Swap uint64
|
||||
Usage uint64
|
||||
MaxUsage uint64
|
||||
KernelUsage uint64
|
||||
KernelMaxUsage uint64
|
||||
|
@ -170,32 +178,38 @@ func NewDefaultReschedulePolicy(jobType string) *ReschedulePolicy {
|
|||
var dp *ReschedulePolicy
|
||||
switch jobType {
|
||||
case "service":
|
||||
// This needs to be in sync with DefaultServiceJobReschedulePolicy
|
||||
// in nomad/structs/structs.go
|
||||
dp = &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(structs.DefaultServiceJobReschedulePolicy.Attempts),
|
||||
Interval: helper.TimeToPtr(structs.DefaultServiceJobReschedulePolicy.Interval),
|
||||
Delay: helper.TimeToPtr(structs.DefaultServiceJobReschedulePolicy.Delay),
|
||||
DelayFunction: helper.StringToPtr(structs.DefaultServiceJobReschedulePolicy.DelayFunction),
|
||||
MaxDelay: helper.TimeToPtr(structs.DefaultServiceJobReschedulePolicy.MaxDelay),
|
||||
Unlimited: helper.BoolToPtr(structs.DefaultServiceJobReschedulePolicy.Unlimited),
|
||||
Delay: timeToPtr(30 * time.Second),
|
||||
DelayFunction: stringToPtr("exponential"),
|
||||
MaxDelay: timeToPtr(1 * time.Hour),
|
||||
Unlimited: boolToPtr(true),
|
||||
|
||||
Attempts: intToPtr(0),
|
||||
Interval: timeToPtr(0),
|
||||
}
|
||||
case "batch":
|
||||
// This needs to be in sync with DefaultBatchJobReschedulePolicy
|
||||
// in nomad/structs/structs.go
|
||||
dp = &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(structs.DefaultBatchJobReschedulePolicy.Attempts),
|
||||
Interval: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
|
||||
Delay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
|
||||
DelayFunction: helper.StringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
|
||||
MaxDelay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
|
||||
Unlimited: helper.BoolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
|
||||
Attempts: intToPtr(1),
|
||||
Interval: timeToPtr(24 * time.Hour),
|
||||
Delay: timeToPtr(5 * time.Second),
|
||||
DelayFunction: stringToPtr("constant"),
|
||||
|
||||
MaxDelay: timeToPtr(0),
|
||||
Unlimited: boolToPtr(false),
|
||||
}
|
||||
|
||||
case "system":
|
||||
dp = &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(0),
|
||||
Interval: helper.TimeToPtr(0),
|
||||
Delay: helper.TimeToPtr(0),
|
||||
DelayFunction: helper.StringToPtr(""),
|
||||
MaxDelay: helper.TimeToPtr(0),
|
||||
Unlimited: helper.BoolToPtr(false),
|
||||
Attempts: intToPtr(0),
|
||||
Interval: timeToPtr(0),
|
||||
Delay: timeToPtr(0),
|
||||
DelayFunction: stringToPtr(""),
|
||||
MaxDelay: timeToPtr(0),
|
||||
Unlimited: boolToPtr(false),
|
||||
}
|
||||
}
|
||||
return dp
|
||||
|
@ -243,14 +257,14 @@ func NewSpreadTarget(value string, percent uint32) *SpreadTarget {
|
|||
func NewSpread(attribute string, weight int, spreadTargets []*SpreadTarget) *Spread {
|
||||
return &Spread{
|
||||
Attribute: attribute,
|
||||
Weight: helper.IntToPtr(weight),
|
||||
Weight: intToPtr(weight),
|
||||
SpreadTarget: spreadTargets,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spread) Canonicalize() {
|
||||
if s.Weight == nil {
|
||||
s.Weight = helper.IntToPtr(50)
|
||||
s.Weight = intToPtr(50)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -269,7 +283,7 @@ func (c *CheckRestart) Canonicalize() {
|
|||
}
|
||||
|
||||
if c.Grace == nil {
|
||||
c.Grace = helper.TimeToPtr(1 * time.Second)
|
||||
c.Grace = timeToPtr(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -381,21 +395,21 @@ type EphemeralDisk struct {
|
|||
|
||||
func DefaultEphemeralDisk() *EphemeralDisk {
|
||||
return &EphemeralDisk{
|
||||
Sticky: helper.BoolToPtr(false),
|
||||
Migrate: helper.BoolToPtr(false),
|
||||
SizeMB: helper.IntToPtr(300),
|
||||
Sticky: boolToPtr(false),
|
||||
Migrate: boolToPtr(false),
|
||||
SizeMB: intToPtr(300),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EphemeralDisk) Canonicalize() {
|
||||
if e.Sticky == nil {
|
||||
e.Sticky = helper.BoolToPtr(false)
|
||||
e.Sticky = boolToPtr(false)
|
||||
}
|
||||
if e.Migrate == nil {
|
||||
e.Migrate = helper.BoolToPtr(false)
|
||||
e.Migrate = boolToPtr(false)
|
||||
}
|
||||
if e.SizeMB == nil {
|
||||
e.SizeMB = helper.IntToPtr(300)
|
||||
e.SizeMB = intToPtr(300)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -410,10 +424,10 @@ type MigrateStrategy struct {
|
|||
|
||||
func DefaultMigrateStrategy() *MigrateStrategy {
|
||||
return &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(1),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
|
||||
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
|
||||
MaxParallel: intToPtr(1),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(10 * time.Second),
|
||||
HealthyDeadline: timeToPtr(5 * time.Minute),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -479,17 +493,17 @@ type TaskGroup struct {
|
|||
// NewTaskGroup creates a new TaskGroup.
|
||||
func NewTaskGroup(name string, count int) *TaskGroup {
|
||||
return &TaskGroup{
|
||||
Name: helper.StringToPtr(name),
|
||||
Count: helper.IntToPtr(count),
|
||||
Name: stringToPtr(name),
|
||||
Count: intToPtr(count),
|
||||
}
|
||||
}
|
||||
|
||||
func (g *TaskGroup) Canonicalize(job *Job) {
|
||||
if g.Name == nil {
|
||||
g.Name = helper.StringToPtr("")
|
||||
g.Name = stringToPtr("")
|
||||
}
|
||||
if g.Count == nil {
|
||||
g.Count = helper.IntToPtr(1)
|
||||
g.Count = intToPtr(1)
|
||||
}
|
||||
for _, t := range g.Tasks {
|
||||
t.Canonicalize(g, job)
|
||||
|
@ -555,18 +569,22 @@ func (g *TaskGroup) Canonicalize(job *Job) {
|
|||
var defaultRestartPolicy *RestartPolicy
|
||||
switch *job.Type {
|
||||
case "service", "system":
|
||||
// These needs to be in sync with DefaultServiceJobRestartPolicy in
|
||||
// in nomad/structs/structs.go
|
||||
defaultRestartPolicy = &RestartPolicy{
|
||||
Delay: helper.TimeToPtr(structs.DefaultServiceJobRestartPolicy.Delay),
|
||||
Attempts: helper.IntToPtr(structs.DefaultServiceJobRestartPolicy.Attempts),
|
||||
Interval: helper.TimeToPtr(structs.DefaultServiceJobRestartPolicy.Interval),
|
||||
Mode: helper.StringToPtr(structs.DefaultServiceJobRestartPolicy.Mode),
|
||||
Delay: timeToPtr(15 * time.Second),
|
||||
Attempts: intToPtr(2),
|
||||
Interval: timeToPtr(30 * time.Minute),
|
||||
Mode: stringToPtr(RestartPolicyModeFail),
|
||||
}
|
||||
default:
|
||||
// These needs to be in sync with DefaultBatchJobRestartPolicy in
|
||||
// in nomad/structs/structs.go
|
||||
defaultRestartPolicy = &RestartPolicy{
|
||||
Delay: helper.TimeToPtr(structs.DefaultBatchJobRestartPolicy.Delay),
|
||||
Attempts: helper.IntToPtr(structs.DefaultBatchJobRestartPolicy.Attempts),
|
||||
Interval: helper.TimeToPtr(structs.DefaultBatchJobRestartPolicy.Interval),
|
||||
Mode: helper.StringToPtr(structs.DefaultBatchJobRestartPolicy.Mode),
|
||||
Delay: timeToPtr(15 * time.Second),
|
||||
Attempts: intToPtr(3),
|
||||
Interval: timeToPtr(24 * time.Hour),
|
||||
Mode: stringToPtr(RestartPolicyModeFail),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -628,17 +646,17 @@ type LogConfig struct {
|
|||
|
||||
func DefaultLogConfig() *LogConfig {
|
||||
return &LogConfig{
|
||||
MaxFiles: helper.IntToPtr(10),
|
||||
MaxFileSizeMB: helper.IntToPtr(10),
|
||||
MaxFiles: intToPtr(10),
|
||||
MaxFileSizeMB: intToPtr(10),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LogConfig) Canonicalize() {
|
||||
if l.MaxFiles == nil {
|
||||
l.MaxFiles = helper.IntToPtr(10)
|
||||
l.MaxFiles = intToPtr(10)
|
||||
}
|
||||
if l.MaxFileSizeMB == nil {
|
||||
l.MaxFileSizeMB = helper.IntToPtr(10)
|
||||
l.MaxFileSizeMB = intToPtr(10)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -676,7 +694,7 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) {
|
|||
}
|
||||
t.Resources.Canonicalize()
|
||||
if t.KillTimeout == nil {
|
||||
t.KillTimeout = helper.TimeToPtr(5 * time.Second)
|
||||
t.KillTimeout = timeToPtr(5 * time.Second)
|
||||
}
|
||||
if t.LogConfig == nil {
|
||||
t.LogConfig = DefaultLogConfig()
|
||||
|
@ -707,11 +725,11 @@ type TaskArtifact struct {
|
|||
|
||||
func (a *TaskArtifact) Canonicalize() {
|
||||
if a.GetterMode == nil {
|
||||
a.GetterMode = helper.StringToPtr("any")
|
||||
a.GetterMode = stringToPtr("any")
|
||||
}
|
||||
if a.GetterSource == nil {
|
||||
// Shouldn't be possible, but we don't want to panic
|
||||
a.GetterSource = helper.StringToPtr("")
|
||||
a.GetterSource = stringToPtr("")
|
||||
}
|
||||
if a.RelativeDest == nil {
|
||||
switch *a.GetterMode {
|
||||
|
@ -723,7 +741,7 @@ func (a *TaskArtifact) Canonicalize() {
|
|||
a.RelativeDest = &dest
|
||||
default:
|
||||
// Default to a directory
|
||||
a.RelativeDest = helper.StringToPtr("local/")
|
||||
a.RelativeDest = stringToPtr("local/")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -744,44 +762,44 @@ type Template struct {
|
|||
|
||||
func (tmpl *Template) Canonicalize() {
|
||||
if tmpl.SourcePath == nil {
|
||||
tmpl.SourcePath = helper.StringToPtr("")
|
||||
tmpl.SourcePath = stringToPtr("")
|
||||
}
|
||||
if tmpl.DestPath == nil {
|
||||
tmpl.DestPath = helper.StringToPtr("")
|
||||
tmpl.DestPath = stringToPtr("")
|
||||
}
|
||||
if tmpl.EmbeddedTmpl == nil {
|
||||
tmpl.EmbeddedTmpl = helper.StringToPtr("")
|
||||
tmpl.EmbeddedTmpl = stringToPtr("")
|
||||
}
|
||||
if tmpl.ChangeMode == nil {
|
||||
tmpl.ChangeMode = helper.StringToPtr("restart")
|
||||
tmpl.ChangeMode = stringToPtr("restart")
|
||||
}
|
||||
if tmpl.ChangeSignal == nil {
|
||||
if *tmpl.ChangeMode == "signal" {
|
||||
tmpl.ChangeSignal = helper.StringToPtr("SIGHUP")
|
||||
tmpl.ChangeSignal = stringToPtr("SIGHUP")
|
||||
} else {
|
||||
tmpl.ChangeSignal = helper.StringToPtr("")
|
||||
tmpl.ChangeSignal = stringToPtr("")
|
||||
}
|
||||
} else {
|
||||
sig := *tmpl.ChangeSignal
|
||||
tmpl.ChangeSignal = helper.StringToPtr(strings.ToUpper(sig))
|
||||
tmpl.ChangeSignal = stringToPtr(strings.ToUpper(sig))
|
||||
}
|
||||
if tmpl.Splay == nil {
|
||||
tmpl.Splay = helper.TimeToPtr(5 * time.Second)
|
||||
tmpl.Splay = timeToPtr(5 * time.Second)
|
||||
}
|
||||
if tmpl.Perms == nil {
|
||||
tmpl.Perms = helper.StringToPtr("0644")
|
||||
tmpl.Perms = stringToPtr("0644")
|
||||
}
|
||||
if tmpl.LeftDelim == nil {
|
||||
tmpl.LeftDelim = helper.StringToPtr("{{")
|
||||
tmpl.LeftDelim = stringToPtr("{{")
|
||||
}
|
||||
if tmpl.RightDelim == nil {
|
||||
tmpl.RightDelim = helper.StringToPtr("}}")
|
||||
tmpl.RightDelim = stringToPtr("}}")
|
||||
}
|
||||
if tmpl.Envvars == nil {
|
||||
tmpl.Envvars = helper.BoolToPtr(false)
|
||||
tmpl.Envvars = boolToPtr(false)
|
||||
}
|
||||
if tmpl.VaultGrace == nil {
|
||||
tmpl.VaultGrace = helper.TimeToPtr(15 * time.Second)
|
||||
tmpl.VaultGrace = timeToPtr(15 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -794,13 +812,13 @@ type Vault struct {
|
|||
|
||||
func (v *Vault) Canonicalize() {
|
||||
if v.Env == nil {
|
||||
v.Env = helper.BoolToPtr(true)
|
||||
v.Env = boolToPtr(true)
|
||||
}
|
||||
if v.ChangeMode == nil {
|
||||
v.ChangeMode = helper.StringToPtr("restart")
|
||||
v.ChangeMode = stringToPtr("restart")
|
||||
}
|
||||
if v.ChangeSignal == nil {
|
||||
v.ChangeSignal = helper.StringToPtr("SIGHUP")
|
||||
v.ChangeSignal = stringToPtr("SIGHUP")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -15,8 +15,8 @@ func TestTaskGroup_NewTaskGroup(t *testing.T) {
|
|||
t.Parallel()
|
||||
grp := NewTaskGroup("grp1", 2)
|
||||
expect := &TaskGroup{
|
||||
Name: helper.StringToPtr("grp1"),
|
||||
Count: helper.IntToPtr(2),
|
||||
Name: stringToPtr("grp1"),
|
||||
Count: intToPtr(2),
|
||||
}
|
||||
if !reflect.DeepEqual(grp, expect) {
|
||||
t.Fatalf("expect: %#v, got: %#v", expect, grp)
|
||||
|
@ -143,7 +143,7 @@ func TestTaskGroup_AddSpread(t *testing.T) {
|
|||
expect := []*Spread{
|
||||
{
|
||||
Attribute: "${meta.rack}",
|
||||
Weight: helper.IntToPtr(100),
|
||||
Weight: intToPtr(100),
|
||||
SpreadTarget: []*SpreadTarget{
|
||||
{
|
||||
Value: "r1",
|
||||
|
@ -153,7 +153,7 @@ func TestTaskGroup_AddSpread(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Attribute: "${node.datacenter}",
|
||||
Weight: helper.IntToPtr(100),
|
||||
Weight: intToPtr(100),
|
||||
SpreadTarget: []*SpreadTarget{
|
||||
{
|
||||
Value: "dc1",
|
||||
|
@ -263,13 +263,13 @@ func TestTask_Require(t *testing.T) {
|
|||
|
||||
// Create some require resources
|
||||
resources := &Resources{
|
||||
CPU: helper.IntToPtr(1250),
|
||||
MemoryMB: helper.IntToPtr(128),
|
||||
DiskMB: helper.IntToPtr(2048),
|
||||
CPU: intToPtr(1250),
|
||||
MemoryMB: intToPtr(128),
|
||||
DiskMB: intToPtr(2048),
|
||||
Networks: []*NetworkResource{
|
||||
{
|
||||
CIDR: "0.0.0.0/0",
|
||||
MBits: helper.IntToPtr(100),
|
||||
MBits: intToPtr(100),
|
||||
ReservedPorts: []Port{{"", 80}, {"", 443}},
|
||||
},
|
||||
},
|
||||
|
@ -357,14 +357,14 @@ func TestTask_AddAffinity(t *testing.T) {
|
|||
func TestTask_Artifact(t *testing.T) {
|
||||
t.Parallel()
|
||||
a := TaskArtifact{
|
||||
GetterSource: helper.StringToPtr("http://localhost/foo.txt"),
|
||||
GetterMode: helper.StringToPtr("file"),
|
||||
GetterSource: stringToPtr("http://localhost/foo.txt"),
|
||||
GetterMode: stringToPtr("file"),
|
||||
}
|
||||
a.Canonicalize()
|
||||
if *a.GetterMode != "file" {
|
||||
t.Errorf("expected file but found %q", *a.GetterMode)
|
||||
}
|
||||
if *a.RelativeDest != "local/foo.txt" {
|
||||
if filepath.ToSlash(*a.RelativeDest) != "local/foo.txt" {
|
||||
t.Errorf("expected local/foo.txt but found %q", *a.RelativeDest)
|
||||
}
|
||||
}
|
||||
|
@ -372,21 +372,21 @@ func TestTask_Artifact(t *testing.T) {
|
|||
// Ensures no regression on https://github.com/hashicorp/nomad/issues/3132
|
||||
func TestTaskGroup_Canonicalize_Update(t *testing.T) {
|
||||
job := &Job{
|
||||
ID: helper.StringToPtr("test"),
|
||||
ID: stringToPtr("test"),
|
||||
Update: &UpdateStrategy{
|
||||
AutoRevert: helper.BoolToPtr(false),
|
||||
Canary: helper.IntToPtr(0),
|
||||
HealthCheck: helper.StringToPtr(""),
|
||||
HealthyDeadline: helper.TimeToPtr(0),
|
||||
ProgressDeadline: helper.TimeToPtr(0),
|
||||
MaxParallel: helper.IntToPtr(0),
|
||||
MinHealthyTime: helper.TimeToPtr(0),
|
||||
Stagger: helper.TimeToPtr(0),
|
||||
AutoRevert: boolToPtr(false),
|
||||
Canary: intToPtr(0),
|
||||
HealthCheck: stringToPtr(""),
|
||||
HealthyDeadline: timeToPtr(0),
|
||||
ProgressDeadline: timeToPtr(0),
|
||||
MaxParallel: intToPtr(0),
|
||||
MinHealthyTime: timeToPtr(0),
|
||||
Stagger: timeToPtr(0),
|
||||
},
|
||||
}
|
||||
job.Canonicalize()
|
||||
tg := &TaskGroup{
|
||||
Name: helper.StringToPtr("foo"),
|
||||
Name: stringToPtr("foo"),
|
||||
}
|
||||
tg.Canonicalize(job)
|
||||
assert.Nil(t, tg.Update)
|
||||
|
@ -407,130 +407,130 @@ func TestTaskGroup_Canonicalize_ReschedulePolicy(t *testing.T) {
|
|||
jobReschedulePolicy: nil,
|
||||
taskReschedulePolicy: nil,
|
||||
expected: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(structs.DefaultBatchJobReschedulePolicy.Attempts),
|
||||
Interval: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
|
||||
Delay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
|
||||
DelayFunction: helper.StringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
|
||||
MaxDelay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
|
||||
Unlimited: helper.BoolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
|
||||
Attempts: intToPtr(structs.DefaultBatchJobReschedulePolicy.Attempts),
|
||||
Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
|
||||
Delay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
|
||||
DelayFunction: stringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
|
||||
MaxDelay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
|
||||
Unlimited: boolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Empty job reschedule policy",
|
||||
jobReschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(0),
|
||||
Interval: helper.TimeToPtr(0),
|
||||
Delay: helper.TimeToPtr(0),
|
||||
MaxDelay: helper.TimeToPtr(0),
|
||||
DelayFunction: helper.StringToPtr(""),
|
||||
Unlimited: helper.BoolToPtr(false),
|
||||
Attempts: intToPtr(0),
|
||||
Interval: timeToPtr(0),
|
||||
Delay: timeToPtr(0),
|
||||
MaxDelay: timeToPtr(0),
|
||||
DelayFunction: stringToPtr(""),
|
||||
Unlimited: boolToPtr(false),
|
||||
},
|
||||
taskReschedulePolicy: nil,
|
||||
expected: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(0),
|
||||
Interval: helper.TimeToPtr(0),
|
||||
Delay: helper.TimeToPtr(0),
|
||||
MaxDelay: helper.TimeToPtr(0),
|
||||
DelayFunction: helper.StringToPtr(""),
|
||||
Unlimited: helper.BoolToPtr(false),
|
||||
Attempts: intToPtr(0),
|
||||
Interval: timeToPtr(0),
|
||||
Delay: timeToPtr(0),
|
||||
MaxDelay: timeToPtr(0),
|
||||
DelayFunction: stringToPtr(""),
|
||||
Unlimited: boolToPtr(false),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Inherit from job",
|
||||
jobReschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(1),
|
||||
Interval: helper.TimeToPtr(20 * time.Second),
|
||||
Delay: helper.TimeToPtr(20 * time.Second),
|
||||
MaxDelay: helper.TimeToPtr(10 * time.Minute),
|
||||
DelayFunction: helper.StringToPtr("constant"),
|
||||
Unlimited: helper.BoolToPtr(false),
|
||||
Attempts: intToPtr(1),
|
||||
Interval: timeToPtr(20 * time.Second),
|
||||
Delay: timeToPtr(20 * time.Second),
|
||||
MaxDelay: timeToPtr(10 * time.Minute),
|
||||
DelayFunction: stringToPtr("constant"),
|
||||
Unlimited: boolToPtr(false),
|
||||
},
|
||||
taskReschedulePolicy: nil,
|
||||
expected: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(1),
|
||||
Interval: helper.TimeToPtr(20 * time.Second),
|
||||
Delay: helper.TimeToPtr(20 * time.Second),
|
||||
MaxDelay: helper.TimeToPtr(10 * time.Minute),
|
||||
DelayFunction: helper.StringToPtr("constant"),
|
||||
Unlimited: helper.BoolToPtr(false),
|
||||
Attempts: intToPtr(1),
|
||||
Interval: timeToPtr(20 * time.Second),
|
||||
Delay: timeToPtr(20 * time.Second),
|
||||
MaxDelay: timeToPtr(10 * time.Minute),
|
||||
DelayFunction: stringToPtr("constant"),
|
||||
Unlimited: boolToPtr(false),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Set in task",
|
||||
jobReschedulePolicy: nil,
|
||||
taskReschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(5),
|
||||
Interval: helper.TimeToPtr(2 * time.Minute),
|
||||
Delay: helper.TimeToPtr(20 * time.Second),
|
||||
MaxDelay: helper.TimeToPtr(10 * time.Minute),
|
||||
DelayFunction: helper.StringToPtr("constant"),
|
||||
Unlimited: helper.BoolToPtr(false),
|
||||
Attempts: intToPtr(5),
|
||||
Interval: timeToPtr(2 * time.Minute),
|
||||
Delay: timeToPtr(20 * time.Second),
|
||||
MaxDelay: timeToPtr(10 * time.Minute),
|
||||
DelayFunction: stringToPtr("constant"),
|
||||
Unlimited: boolToPtr(false),
|
||||
},
|
||||
expected: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(5),
|
||||
Interval: helper.TimeToPtr(2 * time.Minute),
|
||||
Delay: helper.TimeToPtr(20 * time.Second),
|
||||
MaxDelay: helper.TimeToPtr(10 * time.Minute),
|
||||
DelayFunction: helper.StringToPtr("constant"),
|
||||
Unlimited: helper.BoolToPtr(false),
|
||||
Attempts: intToPtr(5),
|
||||
Interval: timeToPtr(2 * time.Minute),
|
||||
Delay: timeToPtr(20 * time.Second),
|
||||
MaxDelay: timeToPtr(10 * time.Minute),
|
||||
DelayFunction: stringToPtr("constant"),
|
||||
Unlimited: boolToPtr(false),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Merge from job",
|
||||
jobReschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(1),
|
||||
Delay: helper.TimeToPtr(20 * time.Second),
|
||||
MaxDelay: helper.TimeToPtr(10 * time.Minute),
|
||||
Attempts: intToPtr(1),
|
||||
Delay: timeToPtr(20 * time.Second),
|
||||
MaxDelay: timeToPtr(10 * time.Minute),
|
||||
},
|
||||
taskReschedulePolicy: &ReschedulePolicy{
|
||||
Interval: helper.TimeToPtr(5 * time.Minute),
|
||||
DelayFunction: helper.StringToPtr("constant"),
|
||||
Unlimited: helper.BoolToPtr(false),
|
||||
Interval: timeToPtr(5 * time.Minute),
|
||||
DelayFunction: stringToPtr("constant"),
|
||||
Unlimited: boolToPtr(false),
|
||||
},
|
||||
expected: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(1),
|
||||
Interval: helper.TimeToPtr(5 * time.Minute),
|
||||
Delay: helper.TimeToPtr(20 * time.Second),
|
||||
MaxDelay: helper.TimeToPtr(10 * time.Minute),
|
||||
DelayFunction: helper.StringToPtr("constant"),
|
||||
Unlimited: helper.BoolToPtr(false),
|
||||
Attempts: intToPtr(1),
|
||||
Interval: timeToPtr(5 * time.Minute),
|
||||
Delay: timeToPtr(20 * time.Second),
|
||||
MaxDelay: timeToPtr(10 * time.Minute),
|
||||
DelayFunction: stringToPtr("constant"),
|
||||
Unlimited: boolToPtr(false),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Override from group",
|
||||
jobReschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(1),
|
||||
MaxDelay: helper.TimeToPtr(10 * time.Second),
|
||||
Attempts: intToPtr(1),
|
||||
MaxDelay: timeToPtr(10 * time.Second),
|
||||
},
|
||||
taskReschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(5),
|
||||
Delay: helper.TimeToPtr(20 * time.Second),
|
||||
MaxDelay: helper.TimeToPtr(20 * time.Minute),
|
||||
DelayFunction: helper.StringToPtr("constant"),
|
||||
Unlimited: helper.BoolToPtr(false),
|
||||
Attempts: intToPtr(5),
|
||||
Delay: timeToPtr(20 * time.Second),
|
||||
MaxDelay: timeToPtr(20 * time.Minute),
|
||||
DelayFunction: stringToPtr("constant"),
|
||||
Unlimited: boolToPtr(false),
|
||||
},
|
||||
expected: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(5),
|
||||
Interval: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
|
||||
Delay: helper.TimeToPtr(20 * time.Second),
|
||||
MaxDelay: helper.TimeToPtr(20 * time.Minute),
|
||||
DelayFunction: helper.StringToPtr("constant"),
|
||||
Unlimited: helper.BoolToPtr(false),
|
||||
Attempts: intToPtr(5),
|
||||
Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
|
||||
Delay: timeToPtr(20 * time.Second),
|
||||
MaxDelay: timeToPtr(20 * time.Minute),
|
||||
DelayFunction: stringToPtr("constant"),
|
||||
Unlimited: boolToPtr(false),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Attempts from job, default interval",
|
||||
jobReschedulePolicy: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(1),
|
||||
Attempts: intToPtr(1),
|
||||
},
|
||||
taskReschedulePolicy: nil,
|
||||
expected: &ReschedulePolicy{
|
||||
Attempts: helper.IntToPtr(1),
|
||||
Interval: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
|
||||
Delay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
|
||||
DelayFunction: helper.StringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
|
||||
MaxDelay: helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
|
||||
Unlimited: helper.BoolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
|
||||
Attempts: intToPtr(1),
|
||||
Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
|
||||
Delay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
|
||||
DelayFunction: stringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
|
||||
MaxDelay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
|
||||
Unlimited: boolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -538,13 +538,13 @@ func TestTaskGroup_Canonicalize_ReschedulePolicy(t *testing.T) {
|
|||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
job := &Job{
|
||||
ID: helper.StringToPtr("test"),
|
||||
ID: stringToPtr("test"),
|
||||
Reschedule: tc.jobReschedulePolicy,
|
||||
Type: helper.StringToPtr(JobTypeBatch),
|
||||
Type: stringToPtr(JobTypeBatch),
|
||||
}
|
||||
job.Canonicalize()
|
||||
tg := &TaskGroup{
|
||||
Name: helper.StringToPtr("foo"),
|
||||
Name: stringToPtr("foo"),
|
||||
ReschedulePolicy: tc.taskReschedulePolicy,
|
||||
}
|
||||
tg.Canonicalize(job)
|
||||
|
@ -577,44 +577,44 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
|
|||
jobMigrate: nil,
|
||||
taskMigrate: nil,
|
||||
expected: &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(1),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
|
||||
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
|
||||
MaxParallel: intToPtr(1),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(10 * time.Second),
|
||||
HealthyDeadline: timeToPtr(5 * time.Minute),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Empty job migrate strategy",
|
||||
jobType: "service",
|
||||
jobMigrate: &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(0),
|
||||
HealthCheck: helper.StringToPtr(""),
|
||||
MinHealthyTime: helper.TimeToPtr(0),
|
||||
HealthyDeadline: helper.TimeToPtr(0),
|
||||
MaxParallel: intToPtr(0),
|
||||
HealthCheck: stringToPtr(""),
|
||||
MinHealthyTime: timeToPtr(0),
|
||||
HealthyDeadline: timeToPtr(0),
|
||||
},
|
||||
taskMigrate: nil,
|
||||
expected: &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(0),
|
||||
HealthCheck: helper.StringToPtr(""),
|
||||
MinHealthyTime: helper.TimeToPtr(0),
|
||||
HealthyDeadline: helper.TimeToPtr(0),
|
||||
MaxParallel: intToPtr(0),
|
||||
HealthCheck: stringToPtr(""),
|
||||
MinHealthyTime: timeToPtr(0),
|
||||
HealthyDeadline: timeToPtr(0),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Inherit from job",
|
||||
jobType: "service",
|
||||
jobMigrate: &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(3),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(2),
|
||||
HealthyDeadline: helper.TimeToPtr(2),
|
||||
MaxParallel: intToPtr(3),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(2),
|
||||
HealthyDeadline: timeToPtr(2),
|
||||
},
|
||||
taskMigrate: nil,
|
||||
expected: &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(3),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(2),
|
||||
HealthyDeadline: helper.TimeToPtr(2),
|
||||
MaxParallel: intToPtr(3),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(2),
|
||||
HealthyDeadline: timeToPtr(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -622,67 +622,67 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
|
|||
jobType: "service",
|
||||
jobMigrate: nil,
|
||||
taskMigrate: &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(3),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(2),
|
||||
HealthyDeadline: helper.TimeToPtr(2),
|
||||
MaxParallel: intToPtr(3),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(2),
|
||||
HealthyDeadline: timeToPtr(2),
|
||||
},
|
||||
expected: &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(3),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(2),
|
||||
HealthyDeadline: helper.TimeToPtr(2),
|
||||
MaxParallel: intToPtr(3),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(2),
|
||||
HealthyDeadline: timeToPtr(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Merge from job",
|
||||
jobType: "service",
|
||||
jobMigrate: &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(11),
|
||||
MaxParallel: intToPtr(11),
|
||||
},
|
||||
taskMigrate: &MigrateStrategy{
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(2),
|
||||
HealthyDeadline: helper.TimeToPtr(2),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(2),
|
||||
HealthyDeadline: timeToPtr(2),
|
||||
},
|
||||
expected: &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(11),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(2),
|
||||
HealthyDeadline: helper.TimeToPtr(2),
|
||||
MaxParallel: intToPtr(11),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(2),
|
||||
HealthyDeadline: timeToPtr(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Override from group",
|
||||
jobType: "service",
|
||||
jobMigrate: &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(11),
|
||||
MaxParallel: intToPtr(11),
|
||||
},
|
||||
taskMigrate: &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(5),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(2),
|
||||
HealthyDeadline: helper.TimeToPtr(2),
|
||||
MaxParallel: intToPtr(5),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(2),
|
||||
HealthyDeadline: timeToPtr(2),
|
||||
},
|
||||
expected: &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(5),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(2),
|
||||
HealthyDeadline: helper.TimeToPtr(2),
|
||||
MaxParallel: intToPtr(5),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(2),
|
||||
HealthyDeadline: timeToPtr(2),
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Parallel from job, defaulting",
|
||||
jobType: "service",
|
||||
jobMigrate: &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(5),
|
||||
MaxParallel: intToPtr(5),
|
||||
},
|
||||
taskMigrate: nil,
|
||||
expected: &MigrateStrategy{
|
||||
MaxParallel: helper.IntToPtr(5),
|
||||
HealthCheck: helper.StringToPtr("checks"),
|
||||
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
|
||||
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
|
||||
MaxParallel: intToPtr(5),
|
||||
HealthCheck: stringToPtr("checks"),
|
||||
MinHealthyTime: timeToPtr(10 * time.Second),
|
||||
HealthyDeadline: timeToPtr(5 * time.Minute),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -690,13 +690,13 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
|
|||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
job := &Job{
|
||||
ID: helper.StringToPtr("test"),
|
||||
ID: stringToPtr("test"),
|
||||
Migrate: tc.jobMigrate,
|
||||
Type: helper.StringToPtr(tc.jobType),
|
||||
Type: stringToPtr(tc.jobType),
|
||||
}
|
||||
job.Canonicalize()
|
||||
tg := &TaskGroup{
|
||||
Name: helper.StringToPtr("foo"),
|
||||
Name: stringToPtr("foo"),
|
||||
Migrate: tc.taskMigrate,
|
||||
}
|
||||
tg.Canonicalize(job)
|
||||
|
@ -708,13 +708,13 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
|
|||
// TestService_CheckRestart asserts Service.CheckRestart settings are properly
|
||||
// inherited by Checks.
|
||||
func TestService_CheckRestart(t *testing.T) {
|
||||
job := &Job{Name: helper.StringToPtr("job")}
|
||||
tg := &TaskGroup{Name: helper.StringToPtr("group")}
|
||||
job := &Job{Name: stringToPtr("job")}
|
||||
tg := &TaskGroup{Name: stringToPtr("group")}
|
||||
task := &Task{Name: "task"}
|
||||
service := &Service{
|
||||
CheckRestart: &CheckRestart{
|
||||
Limit: 11,
|
||||
Grace: helper.TimeToPtr(11 * time.Second),
|
||||
Grace: timeToPtr(11 * time.Second),
|
||||
IgnoreWarnings: true,
|
||||
},
|
||||
Checks: []ServiceCheck{
|
||||
|
@ -722,7 +722,7 @@ func TestService_CheckRestart(t *testing.T) {
|
|||
Name: "all-set",
|
||||
CheckRestart: &CheckRestart{
|
||||
Limit: 22,
|
||||
Grace: helper.TimeToPtr(22 * time.Second),
|
||||
Grace: timeToPtr(22 * time.Second),
|
||||
IgnoreWarnings: true,
|
||||
},
|
||||
},
|
||||
|
@ -730,7 +730,7 @@ func TestService_CheckRestart(t *testing.T) {
|
|||
Name: "some-set",
|
||||
CheckRestart: &CheckRestart{
|
||||
Limit: 33,
|
||||
Grace: helper.TimeToPtr(33 * time.Second),
|
||||
Grace: timeToPtr(33 * time.Second),
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -756,12 +756,12 @@ func TestService_CheckRestart(t *testing.T) {
|
|||
// TestSpread_Canonicalize asserts that the spread stanza is canonicalized correctly
|
||||
func TestSpread_Canonicalize(t *testing.T) {
|
||||
job := &Job{
|
||||
ID: helper.StringToPtr("test"),
|
||||
Type: helper.StringToPtr("batch"),
|
||||
ID: stringToPtr("test"),
|
||||
Type: stringToPtr("batch"),
|
||||
}
|
||||
job.Canonicalize()
|
||||
tg := &TaskGroup{
|
||||
Name: helper.StringToPtr("foo"),
|
||||
Name: stringToPtr("foo"),
|
||||
}
|
||||
type testCase struct {
|
||||
desc string
|
||||
|
@ -781,7 +781,7 @@ func TestSpread_Canonicalize(t *testing.T) {
|
|||
"Zero spread",
|
||||
&Spread{
|
||||
Attribute: "test",
|
||||
Weight: helper.IntToPtr(0),
|
||||
Weight: intToPtr(0),
|
||||
},
|
||||
0,
|
||||
},
|
||||
|
@ -789,7 +789,7 @@ func TestSpread_Canonicalize(t *testing.T) {
|
|||
"Non Zero spread",
|
||||
&Spread{
|
||||
Attribute: "test",
|
||||
Weight: helper.IntToPtr(100),
|
||||
Weight: intToPtr(100),
|
||||
},
|
||||
100,
|
||||
},
|
||||
|
|
|
@ -2,8 +2,6 @@ package api
|
|||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
)
|
||||
|
||||
func assertQueryMeta(t *testing.T, qm *QueryMeta) {
|
||||
|
@ -27,18 +25,18 @@ func testJob() *Job {
|
|||
task := NewTask("task1", "exec").
|
||||
SetConfig("command", "/bin/sleep").
|
||||
Require(&Resources{
|
||||
CPU: helper.IntToPtr(100),
|
||||
MemoryMB: helper.IntToPtr(256),
|
||||
CPU: intToPtr(100),
|
||||
MemoryMB: intToPtr(256),
|
||||
}).
|
||||
SetLogConfig(&LogConfig{
|
||||
MaxFiles: helper.IntToPtr(1),
|
||||
MaxFileSizeMB: helper.IntToPtr(2),
|
||||
MaxFiles: intToPtr(1),
|
||||
MaxFileSizeMB: intToPtr(2),
|
||||
})
|
||||
|
||||
group := NewTaskGroup("group1", 1).
|
||||
AddTask(task).
|
||||
RequireDisk(&EphemeralDisk{
|
||||
SizeMB: helper.IntToPtr(25),
|
||||
SizeMB: intToPtr(25),
|
||||
})
|
||||
|
||||
job := NewBatchJob("job1", "redis", "region1", 1).
|
||||
|
@ -50,9 +48,9 @@ func testJob() *Job {
|
|||
|
||||
func testPeriodicJob() *Job {
|
||||
job := testJob().AddPeriodicConfig(&PeriodicConfig{
|
||||
Enabled: helper.BoolToPtr(true),
|
||||
Spec: helper.StringToPtr("*/30 * * * *"),
|
||||
SpecType: helper.StringToPtr("cron"),
|
||||
Enabled: boolToPtr(true),
|
||||
Spec: stringToPtr("*/30 * * * *"),
|
||||
SpecType: stringToPtr("cron"),
|
||||
})
|
||||
return job
|
||||
}
|
||||
|
@ -72,10 +70,23 @@ func testQuotaSpec() *QuotaSpec {
|
|||
{
|
||||
Region: "global",
|
||||
RegionLimit: &Resources{
|
||||
CPU: helper.IntToPtr(2000),
|
||||
MemoryMB: helper.IntToPtr(2000),
|
||||
CPU: intToPtr(2000),
|
||||
MemoryMB: intToPtr(2000),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// conversions utils only used for testing
|
||||
// added here to avoid linter warning
|
||||
|
||||
// int64ToPtr returns the pointer to an int
|
||||
func int64ToPtr(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
// float64ToPtr returns the pointer to an float64
|
||||
func float64ToPtr(f float64) *float64 {
|
||||
return &f
|
||||
}
|
||||
|
|
53
api/utils.go
Normal file
53
api/utils.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// boolToPtr returns the pointer to a boolean
|
||||
func boolToPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
// intToPtr returns the pointer to an int
|
||||
func intToPtr(i int) *int {
|
||||
return &i
|
||||
}
|
||||
|
||||
// uint64ToPtr returns the pointer to an uint64
|
||||
func uint64ToPtr(u uint64) *uint64 {
|
||||
return &u
|
||||
}
|
||||
|
||||
// stringToPtr returns the pointer to a string
|
||||
func stringToPtr(str string) *string {
|
||||
return &str
|
||||
}
|
||||
|
||||
// timeToPtr returns the pointer to a time stamp
|
||||
func timeToPtr(t time.Duration) *time.Duration {
|
||||
return &t
|
||||
}
|
||||
|
||||
// formatFloat converts the floating-point number f to a string,
|
||||
// after rounding it to the passed unit.
|
||||
//
|
||||
// Uses 'f' format (-ddd.dddddd, no exponent), and uses at most
|
||||
// maxPrec digits after the decimal point.
|
||||
func formatFloat(f float64, maxPrec int) string {
|
||||
v := strconv.FormatFloat(f, 'f', -1, 64)
|
||||
|
||||
idx := strings.LastIndex(v, ".")
|
||||
if idx == -1 {
|
||||
return v
|
||||
}
|
||||
|
||||
sublen := idx + maxPrec + 1
|
||||
if sublen > len(v) {
|
||||
sublen = len(v)
|
||||
}
|
||||
|
||||
return v[:sublen]
|
||||
}
|
39
api/utils_test.go
Normal file
39
api/utils_test.go
Normal file
|
@ -0,0 +1,39 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFormatRoundedFloat(t *testing.T) {
|
||||
cases := []struct {
|
||||
input float64
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
1323,
|
||||
"1323",
|
||||
},
|
||||
{
|
||||
10.321,
|
||||
"10.321",
|
||||
},
|
||||
{
|
||||
100000.31324324,
|
||||
"100000.313",
|
||||
},
|
||||
{
|
||||
100000.3,
|
||||
"100000.3",
|
||||
},
|
||||
{
|
||||
0.7654321,
|
||||
"0.765",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
require.Equal(t, c.expected, formatFloat(c.input, 3))
|
||||
}
|
||||
}
|
37
appveyor.yml
37
appveyor.yml
|
@ -1,15 +1,12 @@
|
|||
version: "build-{branch}-{build}"
|
||||
image: Visual Studio 2017
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\hashicorp\nomad
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOBIN: c:\gopath\bin
|
||||
|
||||
matrix:
|
||||
- RUN_UI_TESTS: 1
|
||||
SKIP_NOMAD_TESTS: 1
|
||||
- {}
|
||||
GOMAXPROCS: 1
|
||||
|
||||
install:
|
||||
- cmd: set PATH=%GOBIN%;c:\go\bin;%PATH%
|
||||
|
@ -17,11 +14,29 @@ install:
|
|||
- cmd: go version
|
||||
- cmd: go env
|
||||
- ps: mkdir C:\gopath\bin
|
||||
- ps: appveyor DownloadFile "https://releases.hashicorp.com/vault/0.7.0/vault_0.7.0_windows_amd64.zip" -FileName "C:\\gopath\\bin\\vault.zip"
|
||||
- ps: appveyor DownloadFile "https://releases.hashicorp.com/vault/0.10.2/vault_0.10.2_windows_amd64.zip" -FileName "C:\\gopath\\bin\\vault.zip"
|
||||
- ps: Expand-Archive C:\gopath\bin\vault.zip -DestinationPath C:\gopath\bin
|
||||
- ps: appveyor DownloadFile "https://releases.hashicorp.com/consul/0.7.0/consul_0.7.0_windows_amd64.zip" -FileName "C:\\gopath\\bin\\consul.zip"
|
||||
- ps: Expand-Archive C:\gopath\bin\consul.zip -DestinationPath C:\gopath\bin
|
||||
#- cmd: go install
|
||||
# - ps: appveyor DownloadFile "https://releases.hashicorp.com/consul/1.0.0/consul_1.0.0_windows_amd64.zip" -FileName "C:\\gopath\\bin\\consul.zip"
|
||||
# - ps: Expand-Archive C:\gopath\bin\consul.zip -DestinationPath C:\gopath\bin
|
||||
- ps: choco install make
|
||||
- ps: |
|
||||
go get -u github.com/kardianos/govendor
|
||||
go get -u github.com/ugorji/go/codec/codecgen
|
||||
go get -u github.com/hashicorp/go-bindata/go-bindata
|
||||
go get -u github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs
|
||||
go get -u github.com/a8m/tree/cmd/tree
|
||||
go get -u github.com/magiconair/vendorfmt/cmd/vendorfmt
|
||||
go get -u github.com/golang/protobuf/protoc-gen-go
|
||||
go get -u gotest.tools/gotestsum
|
||||
build_script:
|
||||
#- cmd: go test ./...
|
||||
- cmd: go install
|
||||
- cmd: |
|
||||
set PATH=%GOPATH%/bin;%PATH%
|
||||
mkdir -p $GOPATH\bin
|
||||
go build -o $GOPATH\bin\nomad
|
||||
# test_script:
|
||||
# - cmd: gotestsum -f short-verbose --junitfile results.xml
|
||||
# on_finish:
|
||||
# - ps: |
|
||||
# Push-AppveyorArtifact (Resolve-Path .\results.xml)
|
||||
# $wc = New-Object 'System.Net.WebClient'
|
||||
# $wc.UploadFile("https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)", (Resolve-Path .\results.xml))
|
||||
|
|
|
@ -249,7 +249,7 @@ func (ar *allocRunner) destroy() error {
|
|||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return merr.ErrorOrNil()
|
||||
}
|
||||
|
||||
// shutdownHooks calls graceful shutdown hooks for when the agent is exiting.
|
||||
|
|
|
@ -114,14 +114,14 @@ type TaskPoststartHook interface {
|
|||
Poststart(context.Context, *TaskPoststartRequest, *TaskPoststartResponse) error
|
||||
}
|
||||
|
||||
type TaskKillRequest struct{}
|
||||
type TaskKillResponse struct{}
|
||||
type TaskPreKillRequest struct{}
|
||||
type TaskPreKillResponse struct{}
|
||||
|
||||
type TaskKillHook interface {
|
||||
type TaskPreKillHook interface {
|
||||
TaskHook
|
||||
|
||||
// Killing is called when a task is going to be Killed or Restarted.
|
||||
Killing(context.Context, *TaskKillRequest, *TaskKillResponse) error
|
||||
// PreKilling is called right before a task is going to be killed or restarted.
|
||||
PreKilling(context.Context, *TaskPreKillRequest, *TaskPreKillResponse) error
|
||||
}
|
||||
|
||||
type TaskExitedRequest struct{}
|
||||
|
|
|
@ -22,8 +22,8 @@ func (tr *TaskRunner) Restart(ctx context.Context, event *structs.TaskEvent, fai
|
|||
// Emit the event since it may take a long time to kill
|
||||
tr.EmitEvent(event)
|
||||
|
||||
// Run the hooks prior to restarting the task
|
||||
tr.killing()
|
||||
// Run the pre-kill hooks prior to restarting the task
|
||||
tr.preKill()
|
||||
|
||||
// Tell the restart tracker that a restart triggered the exit
|
||||
tr.restartTracker.SetRestartTriggered(failure)
|
||||
|
|
|
@ -2,6 +2,7 @@ package taskrunner
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
@ -11,6 +12,7 @@ import (
|
|||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/logmon"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/plugins/shared"
|
||||
)
|
||||
|
||||
// logmonHook launches logmon and manages task logging
|
||||
|
@ -58,8 +60,8 @@ func (*logmonHook) Name() string {
|
|||
return "logmon"
|
||||
}
|
||||
|
||||
func (h *logmonHook) launchLogMon() error {
|
||||
l, c, err := logmon.LaunchLogMon(h.logger)
|
||||
func (h *logmonHook) launchLogMon(reattachConfig *plugin.ReattachConfig) error {
|
||||
l, c, err := logmon.LaunchLogMon(h.logger, reattachConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -69,15 +71,37 @@ func (h *logmonHook) launchLogMon() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func reattachConfigFromHookData(data map[string]string) (*plugin.ReattachConfig, error) {
|
||||
if data == nil || data["reattach_config"] == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var cfg *shared.ReattachConfig
|
||||
err := json.Unmarshal([]byte(data["reattach_config"]), cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return shared.ReattachConfigToGoPlugin(cfg)
|
||||
}
|
||||
|
||||
func (h *logmonHook) Prestart(ctx context.Context,
|
||||
req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error {
|
||||
|
||||
// Launch logmon instance for the task.
|
||||
if err := h.launchLogMon(); err != nil {
|
||||
reattachConfig, err := reattachConfigFromHookData(req.HookData)
|
||||
if err != nil {
|
||||
h.logger.Error("failed to load reattach config", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Launch or reattach logmon instance for the task.
|
||||
if err := h.launchLogMon(reattachConfig); err != nil {
|
||||
h.logger.Error("failed to launch logmon process", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Only tell logmon to start when we are not reattaching to a running instance
|
||||
if reattachConfig == nil {
|
||||
err := h.logmon.Start(&logmon.LogConfig{
|
||||
LogDir: h.config.logDir,
|
||||
StdoutLogFile: fmt.Sprintf("%s.stdout", req.Task.Name),
|
||||
|
@ -91,6 +115,14 @@ func (h *logmonHook) Prestart(ctx context.Context,
|
|||
h.logger.Error("failed to start logmon", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rCfg := shared.ReattachConfigFromGoPlugin(h.logmonPluginClient.ReattachConfig())
|
||||
jsonCfg, err := json.Marshal(rCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.HookData = map[string]string{"reattach_config": string(jsonCfg)}
|
||||
|
||||
resp.Done = true
|
||||
return nil
|
||||
|
|
|
@ -140,7 +140,7 @@ func (h *serviceHook) Update(ctx context.Context, req *interfaces.TaskUpdateRequ
|
|||
return h.consul.UpdateTask(oldTaskServices, newTaskServices)
|
||||
}
|
||||
|
||||
func (h *serviceHook) Killing(ctx context.Context, req *interfaces.TaskKillRequest, resp *interfaces.TaskKillResponse) error {
|
||||
func (h *serviceHook) Killing(ctx context.Context, req *interfaces.TaskPreKillRequest, resp *interfaces.TaskPreKillResponse) error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
|
@ -200,7 +200,7 @@ func (h *serviceHook) getTaskServices() *agentconsul.TaskServices {
|
|||
// values from the task's environment.
|
||||
func interpolateServices(taskEnv *taskenv.TaskEnv, services []*structs.Service) []*structs.Service {
|
||||
// Guard against not having a valid taskEnv. This can be the case if the
|
||||
// Killing or Exited hook is run before post-run.
|
||||
// PreKilling or Exited hook is run before Poststart.
|
||||
if taskEnv == nil || len(services) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -35,6 +35,10 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// defaultMaxEvents is the default max capacity for task events on the
|
||||
// task state. Overrideable for testing.
|
||||
defaultMaxEvents = 10
|
||||
|
||||
// killBackoffBaseline is the baseline time for exponential backoff while
|
||||
// killing a task.
|
||||
killBackoffBaseline = 5 * time.Second
|
||||
|
@ -191,6 +195,10 @@ type TaskRunner struct {
|
|||
// be accessed via helpers
|
||||
runLaunched bool
|
||||
runLaunchedLock sync.Mutex
|
||||
|
||||
// maxEvents is the capacity of the TaskEvents on the TaskState.
|
||||
// Defaults to defaultMaxEvents but overrideable for testing.
|
||||
maxEvents int
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
|
@ -267,6 +275,7 @@ func NewTaskRunner(config *Config) (*TaskRunner, error) {
|
|||
waitCh: make(chan struct{}),
|
||||
devicemanager: config.DeviceManager,
|
||||
driverManager: config.DriverManager,
|
||||
maxEvents: defaultMaxEvents,
|
||||
}
|
||||
|
||||
// Create the logger based on the allocation ID
|
||||
|
@ -440,7 +449,7 @@ MAIN:
|
|||
case <-tr.killCtx.Done():
|
||||
// We can go through the normal should restart check since
|
||||
// the restart tracker knowns it is killed
|
||||
tr.handleKill()
|
||||
result = tr.handleKill()
|
||||
case <-tr.shutdownCtx.Done():
|
||||
// TaskRunner was told to exit immediately
|
||||
return
|
||||
|
@ -703,11 +712,12 @@ func (tr *TaskRunner) initDriver() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// handleKill is used to handle the a request to kill a task. It will store any
|
||||
// error in the task runner killErr value.
|
||||
func (tr *TaskRunner) handleKill() {
|
||||
// Run the hooks prior to killing the task
|
||||
tr.killing()
|
||||
// handleKill is used to handle the a request to kill a task. It will return
|
||||
//// the handle exit result if one is available and store any error in the task
|
||||
//// runner killErr value.
|
||||
func (tr *TaskRunner) handleKill() *drivers.ExitResult {
|
||||
// Run the pre killing hooks
|
||||
tr.preKill()
|
||||
|
||||
// Tell the restart tracker that the task has been killed so it doesn't
|
||||
// attempt to restart it.
|
||||
|
@ -716,7 +726,7 @@ func (tr *TaskRunner) handleKill() {
|
|||
// Check it is running
|
||||
handle := tr.getDriverHandle()
|
||||
if handle == nil {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
// Kill the task using an exponential backoff in-case of failures.
|
||||
|
@ -734,16 +744,18 @@ func (tr *TaskRunner) handleKill() {
|
|||
// failure in the driver or transport layer occurred
|
||||
if err != nil {
|
||||
if err == drivers.ErrTaskNotFound {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
tr.logger.Error("failed to wait on task. Resources may have been leaked", "error", err)
|
||||
tr.setKillErr(killErr)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-waitCh:
|
||||
case result := <-waitCh:
|
||||
return result
|
||||
case <-tr.shutdownCtx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -797,6 +809,7 @@ func (tr *TaskRunner) buildTaskConfig() *drivers.TaskConfig {
|
|||
ID: fmt.Sprintf("%s/%s/%s", alloc.ID, task.Name, invocationid),
|
||||
Name: task.Name,
|
||||
JobName: alloc.Job.Name,
|
||||
TaskGroupName: alloc.TaskGroup,
|
||||
Resources: &drivers.Resources{
|
||||
NomadResources: taskResources,
|
||||
LinuxResources: &drivers.LinuxResources{
|
||||
|
@ -1019,7 +1032,7 @@ func (tr *TaskRunner) appendEvent(event *structs.TaskEvent) error {
|
|||
}
|
||||
|
||||
// Append event to slice
|
||||
appendTaskEvent(tr.state, event)
|
||||
appendTaskEvent(tr.state, event, tr.maxEvents)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -1114,14 +1127,14 @@ func (tr *TaskRunner) UpdateStats(ru *cstructs.TaskResourceUsage) {
|
|||
//TODO Remove Backwardscompat or use tr.Alloc()?
|
||||
func (tr *TaskRunner) setGaugeForMemory(ru *cstructs.TaskResourceUsage) {
|
||||
if !tr.clientConfig.DisableTaggedMetrics {
|
||||
metrics.SetGaugeWithLabels([]string{"client", "allocs", "memory", "rss"},
|
||||
float32(ru.ResourceUsage.MemoryStats.RSS), tr.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "allocs", "memory", "rss"},
|
||||
float32(ru.ResourceUsage.MemoryStats.RSS), tr.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "allocs", "memory", "cache"},
|
||||
float32(ru.ResourceUsage.MemoryStats.Cache), tr.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "allocs", "memory", "swap"},
|
||||
float32(ru.ResourceUsage.MemoryStats.Swap), tr.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "allocs", "memory", "usage"},
|
||||
float32(ru.ResourceUsage.MemoryStats.Usage), tr.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "allocs", "memory", "max_usage"},
|
||||
float32(ru.ResourceUsage.MemoryStats.MaxUsage), tr.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "allocs", "memory", "kernel_usage"},
|
||||
|
@ -1134,6 +1147,7 @@ func (tr *TaskRunner) setGaugeForMemory(ru *cstructs.TaskResourceUsage) {
|
|||
metrics.SetGauge([]string{"client", "allocs", tr.alloc.Job.Name, tr.alloc.TaskGroup, tr.allocID, tr.taskName, "memory", "rss"}, float32(ru.ResourceUsage.MemoryStats.RSS))
|
||||
metrics.SetGauge([]string{"client", "allocs", tr.alloc.Job.Name, tr.alloc.TaskGroup, tr.allocID, tr.taskName, "memory", "cache"}, float32(ru.ResourceUsage.MemoryStats.Cache))
|
||||
metrics.SetGauge([]string{"client", "allocs", tr.alloc.Job.Name, tr.alloc.TaskGroup, tr.allocID, tr.taskName, "memory", "swap"}, float32(ru.ResourceUsage.MemoryStats.Swap))
|
||||
metrics.SetGauge([]string{"client", "allocs", tr.alloc.Job.Name, tr.alloc.TaskGroup, tr.allocID, tr.taskName, "memory", "usage"}, float32(ru.ResourceUsage.MemoryStats.Usage))
|
||||
metrics.SetGauge([]string{"client", "allocs", tr.alloc.Job.Name, tr.alloc.TaskGroup, tr.allocID, tr.taskName, "memory", "max_usage"}, float32(ru.ResourceUsage.MemoryStats.MaxUsage))
|
||||
metrics.SetGauge([]string{"client", "allocs", tr.alloc.Job.Name, tr.alloc.TaskGroup, tr.allocID, tr.taskName, "memory", "kernel_usage"}, float32(ru.ResourceUsage.MemoryStats.KernelUsage))
|
||||
metrics.SetGauge([]string{"client", "allocs", tr.alloc.Job.Name, tr.alloc.TaskGroup, tr.allocID, tr.taskName, "memory", "kernel_max_usage"}, float32(ru.ResourceUsage.MemoryStats.KernelMaxUsage))
|
||||
|
@ -1184,8 +1198,7 @@ func (tr *TaskRunner) emitStats(ru *cstructs.TaskResourceUsage) {
|
|||
}
|
||||
|
||||
// appendTaskEvent updates the task status by appending the new event.
|
||||
func appendTaskEvent(state *structs.TaskState, event *structs.TaskEvent) {
|
||||
const capacity = 10
|
||||
func appendTaskEvent(state *structs.TaskState, event *structs.TaskEvent, capacity int) {
|
||||
if state.Events == nil {
|
||||
state.Events = make([]*structs.TaskEvent, 1, capacity)
|
||||
state.Events[0] = event
|
||||
|
|
|
@ -437,36 +437,38 @@ func (tr *TaskRunner) updateHooks() {
|
|||
}
|
||||
}
|
||||
|
||||
// killing is used to run the runners kill hooks.
|
||||
func (tr *TaskRunner) killing() {
|
||||
// preKill is used to run the runners preKill hooks
|
||||
// preKill hooks contain logic that must be executed before
|
||||
// a task is killed or restarted
|
||||
func (tr *TaskRunner) preKill() {
|
||||
if tr.logger.IsTrace() {
|
||||
start := time.Now()
|
||||
tr.logger.Trace("running kill hooks", "start", start)
|
||||
tr.logger.Trace("running pre kill hooks", "start", start)
|
||||
defer func() {
|
||||
end := time.Now()
|
||||
tr.logger.Trace("finished kill hooks", "end", end, "duration", end.Sub(start))
|
||||
tr.logger.Trace("finished pre kill hooks", "end", end, "duration", end.Sub(start))
|
||||
}()
|
||||
}
|
||||
|
||||
for _, hook := range tr.runnerHooks {
|
||||
killHook, ok := hook.(interfaces.TaskKillHook)
|
||||
killHook, ok := hook.(interfaces.TaskPreKillHook)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
name := killHook.Name()
|
||||
|
||||
// Time the update hook
|
||||
// Time the pre kill hook
|
||||
var start time.Time
|
||||
if tr.logger.IsTrace() {
|
||||
start = time.Now()
|
||||
tr.logger.Trace("running kill hook", "name", name, "start", start)
|
||||
}
|
||||
|
||||
// Run the kill hook
|
||||
req := interfaces.TaskKillRequest{}
|
||||
var resp interfaces.TaskKillResponse
|
||||
if err := killHook.Killing(context.Background(), &req, &resp); err != nil {
|
||||
// Run the pre kill hook
|
||||
req := interfaces.TaskPreKillRequest{}
|
||||
var resp interfaces.TaskPreKillResponse
|
||||
if err := killHook.PreKilling(context.Background(), &req, &resp); err != nil {
|
||||
tr.emitHookError(err, name)
|
||||
tr.logger.Error("kill hook failed", "name", name, "error", err)
|
||||
}
|
||||
|
|
|
@ -3,10 +3,12 @@ package taskrunner
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
|
@ -16,6 +18,7 @@ import (
|
|||
"github.com/hashicorp/nomad/client/pluginmanager/drivermanager"
|
||||
cstate "github.com/hashicorp/nomad/client/state"
|
||||
"github.com/hashicorp/nomad/client/vaultclient"
|
||||
agentconsul "github.com/hashicorp/nomad/command/agent/consul"
|
||||
mockdriver "github.com/hashicorp/nomad/drivers/mock"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
|
@ -124,12 +127,7 @@ func TestTaskRunner_Restore_Running(t *testing.T) {
|
|||
defer origTR.Kill(context.Background(), structs.NewTaskEvent("cleanup"))
|
||||
|
||||
// Wait for it to be running
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
ts := origTR.TaskState()
|
||||
return ts.State == structs.TaskStateRunning, fmt.Errorf("%v", ts.State)
|
||||
}, func(err error) {
|
||||
t.Fatalf("expected running; got: %v", err)
|
||||
})
|
||||
testWaitForTaskToStart(t, origTR)
|
||||
|
||||
// Cause TR to exit without shutting down task
|
||||
origTR.Shutdown()
|
||||
|
@ -207,6 +205,48 @@ func TestTaskRunner_TaskEnv(t *testing.T) {
|
|||
assert.Equal(t, "global bar somebody", mockCfg.StdoutString)
|
||||
}
|
||||
|
||||
func TestTaskRunner_TaskConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
|
||||
//// Use interpolation from both node attributes and meta vars
|
||||
//task.Config = map[string]interface{}{
|
||||
// "run_for": "1ms",
|
||||
//}
|
||||
|
||||
conf, cleanup := testTaskRunnerConfig(t, alloc, task.Name)
|
||||
defer cleanup()
|
||||
|
||||
// Run the first TaskRunner
|
||||
tr, err := NewTaskRunner(conf)
|
||||
require.NoError(err)
|
||||
go tr.Run()
|
||||
defer tr.Kill(context.Background(), structs.NewTaskEvent("cleanup"))
|
||||
|
||||
// Wait for task to complete
|
||||
select {
|
||||
case <-tr.WaitCh():
|
||||
case <-time.After(3 * time.Second):
|
||||
}
|
||||
|
||||
// Get the mock driver plugin
|
||||
driverPlugin, err := conf.DriverManager.Dispense(mockdriver.PluginID.Name)
|
||||
require.NoError(err)
|
||||
mockDriver := driverPlugin.(*mockdriver.Driver)
|
||||
|
||||
// Assert its config has been properly interpolated
|
||||
driverCfg, mockCfg := mockDriver.GetTaskConfig()
|
||||
require.NotNil(driverCfg)
|
||||
require.NotNil(mockCfg)
|
||||
assert.Equal(t, alloc.Job.Name, driverCfg.JobName)
|
||||
assert.Equal(t, alloc.TaskGroup, driverCfg.TaskGroupName)
|
||||
assert.Equal(t, alloc.Job.TaskGroups[0].Tasks[0].Name, driverCfg.Name)
|
||||
}
|
||||
|
||||
// Test that devices get sent to the driver
|
||||
func TestTaskRunner_DevicePropogation(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
@ -502,16 +542,237 @@ WAIT:
|
|||
}
|
||||
}
|
||||
|
||||
// testWaitForTaskToStart waits for the task to or fails the test
|
||||
func testWaitForTaskToStart(t *testing.T, tr *TaskRunner) {
|
||||
// Wait for the task to start
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
tr.stateLock.RLock()
|
||||
started := !tr.state.StartedAt.IsZero()
|
||||
tr.stateLock.RUnlock()
|
||||
// TestTaskRunner_Dispatch_Payload asserts that a dispatch job runs and the
|
||||
// payload was written to disk.
|
||||
func TestTaskRunner_Dispatch_Payload(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
return started, nil
|
||||
alloc := mock.BatchAlloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
task.Config = map[string]interface{}{
|
||||
"run_for": "1s",
|
||||
}
|
||||
|
||||
fileName := "test"
|
||||
task.DispatchPayload = &structs.DispatchPayloadConfig{
|
||||
File: fileName,
|
||||
}
|
||||
alloc.Job.ParameterizedJob = &structs.ParameterizedJobConfig{}
|
||||
|
||||
// Add a payload (they're snappy encoded bytes)
|
||||
expected := []byte("hello world")
|
||||
compressed := snappy.Encode(nil, expected)
|
||||
alloc.Job.Payload = compressed
|
||||
|
||||
conf, cleanup := testTaskRunnerConfig(t, alloc, task.Name)
|
||||
defer cleanup()
|
||||
|
||||
tr, err := NewTaskRunner(conf)
|
||||
require.NoError(t, err)
|
||||
go tr.Run()
|
||||
defer tr.Kill(context.Background(), structs.NewTaskEvent("cleanup"))
|
||||
|
||||
// Wait for it to finish
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
ts := tr.TaskState()
|
||||
return ts.State == structs.TaskStateDead, fmt.Errorf("%v", ts.State)
|
||||
}, func(err error) {
|
||||
t.Fatalf("not started")
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Should have exited successfully
|
||||
ts := tr.TaskState()
|
||||
require.False(t, ts.Failed)
|
||||
require.Zero(t, ts.Restarts)
|
||||
|
||||
// Check that the file was written to disk properly
|
||||
payloadPath := filepath.Join(tr.taskDir.LocalDir, fileName)
|
||||
data, err := ioutil.ReadFile(payloadPath)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, data)
|
||||
}
|
||||
|
||||
// TestTaskRunner_SignalFailure asserts that signal errors are properly
|
||||
// propagated from the driver to TaskRunner.
|
||||
func TestTaskRunner_SignalFailure(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
errMsg := "test forcing failure"
|
||||
task.Config = map[string]interface{}{
|
||||
"run_for": "10m",
|
||||
"signal_error": errMsg,
|
||||
}
|
||||
|
||||
conf, cleanup := testTaskRunnerConfig(t, alloc, task.Name)
|
||||
defer cleanup()
|
||||
|
||||
tr, err := NewTaskRunner(conf)
|
||||
require.NoError(t, err)
|
||||
go tr.Run()
|
||||
defer tr.Kill(context.Background(), structs.NewTaskEvent("cleanup"))
|
||||
|
||||
testWaitForTaskToStart(t, tr)
|
||||
|
||||
require.EqualError(t, tr.Signal(&structs.TaskEvent{}, "SIGINT"), errMsg)
|
||||
}
|
||||
|
||||
// TestTaskRunner_RestartTask asserts that restarting a task works and emits a
|
||||
// Restarting event.
|
||||
func TestTaskRunner_RestartTask(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
task.Config = map[string]interface{}{
|
||||
"run_for": "10m",
|
||||
}
|
||||
|
||||
conf, cleanup := testTaskRunnerConfig(t, alloc, task.Name)
|
||||
defer cleanup()
|
||||
|
||||
tr, err := NewTaskRunner(conf)
|
||||
require.NoError(t, err)
|
||||
go tr.Run()
|
||||
defer tr.Kill(context.Background(), structs.NewTaskEvent("cleanup"))
|
||||
|
||||
testWaitForTaskToStart(t, tr)
|
||||
|
||||
// Restart task. Send a RestartSignal event like check watcher. Restart
|
||||
// handler emits the Restarting event.
|
||||
event := structs.NewTaskEvent(structs.TaskRestartSignal).SetRestartReason("test")
|
||||
const fail = false
|
||||
tr.Restart(context.Background(), event.Copy(), fail)
|
||||
|
||||
// Wait for it to restart and be running again
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
ts := tr.TaskState()
|
||||
if ts.Restarts != 1 {
|
||||
return false, fmt.Errorf("expected 1 restart but found %d\nevents: %s",
|
||||
ts.Restarts, pretty.Sprint(ts.Events))
|
||||
}
|
||||
if ts.State != structs.TaskStateRunning {
|
||||
return false, fmt.Errorf("expected running but received %s", ts.State)
|
||||
}
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Assert the expected Restarting event was emitted
|
||||
found := false
|
||||
events := tr.TaskState().Events
|
||||
for _, e := range events {
|
||||
if e.Type == structs.TaskRestartSignal {
|
||||
found = true
|
||||
require.Equal(t, event.Time, e.Time)
|
||||
require.Equal(t, event.RestartReason, e.RestartReason)
|
||||
require.Contains(t, e.DisplayMessage, event.RestartReason)
|
||||
}
|
||||
}
|
||||
require.True(t, found, "restarting task event not found", pretty.Sprint(events))
|
||||
}
|
||||
|
||||
// TestTaskRunner_CheckWatcher_Restart asserts that when enabled an unhealthy
|
||||
// Consul check will cause a task to restart following restart policy rules.
|
||||
func TestTaskRunner_CheckWatcher_Restart(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
alloc := mock.Alloc()
|
||||
|
||||
// Make the restart policy fail within this test
|
||||
tg := alloc.Job.TaskGroups[0]
|
||||
tg.RestartPolicy.Attempts = 2
|
||||
tg.RestartPolicy.Interval = 1 * time.Minute
|
||||
tg.RestartPolicy.Delay = 10 * time.Millisecond
|
||||
tg.RestartPolicy.Mode = structs.RestartPolicyModeFail
|
||||
|
||||
task := tg.Tasks[0]
|
||||
task.Driver = "mock_driver"
|
||||
task.Config = map[string]interface{}{
|
||||
"run_for": "10m",
|
||||
}
|
||||
|
||||
// Make the task register a check that fails
|
||||
task.Services[0].Checks[0] = &structs.ServiceCheck{
|
||||
Name: "test-restarts",
|
||||
Type: structs.ServiceCheckTCP,
|
||||
Interval: 50 * time.Millisecond,
|
||||
CheckRestart: &structs.CheckRestart{
|
||||
Limit: 2,
|
||||
Grace: 100 * time.Millisecond,
|
||||
},
|
||||
}
|
||||
|
||||
conf, cleanup := testTaskRunnerConfig(t, alloc, task.Name)
|
||||
defer cleanup()
|
||||
|
||||
// Replace mock Consul ServiceClient, with the real ServiceClient
|
||||
// backed by a mock consul whose checks are always unhealthy.
|
||||
consulAgent := agentconsul.NewMockAgent()
|
||||
consulAgent.SetStatus("critical")
|
||||
consulClient := agentconsul.NewServiceClient(consulAgent, conf.Logger, true)
|
||||
go consulClient.Run()
|
||||
defer consulClient.Shutdown()
|
||||
|
||||
conf.Consul = consulClient
|
||||
|
||||
tr, err := NewTaskRunner(conf)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedEvents := []string{
|
||||
"Received",
|
||||
"Task Setup",
|
||||
"Started",
|
||||
"Restart Signaled",
|
||||
"Terminated",
|
||||
"Restarting",
|
||||
"Started",
|
||||
"Restart Signaled",
|
||||
"Terminated",
|
||||
"Restarting",
|
||||
"Started",
|
||||
"Restart Signaled",
|
||||
"Terminated",
|
||||
"Not Restarting",
|
||||
}
|
||||
|
||||
// Bump maxEvents so task events aren't dropped
|
||||
tr.maxEvents = 100
|
||||
|
||||
go tr.Run()
|
||||
defer tr.Kill(context.Background(), structs.NewTaskEvent("cleanup"))
|
||||
|
||||
// Wait until the task exits. Don't simply wait for it to run as it may
|
||||
// get restarted and terminated before the test is able to observe it
|
||||
// running.
|
||||
select {
|
||||
case <-tr.WaitCh():
|
||||
case <-time.After(time.Duration(testutil.TestMultiplier()*15) * time.Second):
|
||||
require.Fail(t, "timeout")
|
||||
}
|
||||
|
||||
state := tr.TaskState()
|
||||
actualEvents := make([]string, len(state.Events))
|
||||
for i, e := range state.Events {
|
||||
actualEvents[i] = string(e.Type)
|
||||
}
|
||||
require.Equal(t, actualEvents, expectedEvents)
|
||||
|
||||
require.Equal(t, structs.TaskStateDead, state.State)
|
||||
require.True(t, state.Failed, pretty.Sprint(state))
|
||||
}
|
||||
|
||||
// testWaitForTaskToStart waits for the task to be running or fails the test
|
||||
func testWaitForTaskToStart(t *testing.T, tr *TaskRunner) {
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
ts := tr.TaskState()
|
||||
return ts.State == structs.TaskStateRunning, fmt.Errorf("%v", ts.State)
|
||||
}, func(err error) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -5,19 +5,16 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
cstructs "github.com/hashicorp/nomad/client/structs"
|
||||
ctestutil "github.com/hashicorp/nomad/client/testutil"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -201,148 +198,6 @@ func TestPrevAlloc_LocalPrevAlloc_Terminated(t *testing.T) {
|
|||
require.NoError(t, waiter.Wait(ctx))
|
||||
}
|
||||
|
||||
// TestPrevAlloc_StreamAllocDir_Ok asserts that streaming a tar to an alloc dir
|
||||
// works.
|
||||
func TestPrevAlloc_StreamAllocDir_Ok(t *testing.T) {
|
||||
ctestutil.RequireRoot(t)
|
||||
t.Parallel()
|
||||
dir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// Create foo/
|
||||
fooDir := filepath.Join(dir, "foo")
|
||||
if err := os.Mkdir(fooDir, 0777); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Change ownership of foo/ to test #3702 (any non-root user is fine)
|
||||
const uid, gid = 1, 1
|
||||
if err := os.Chown(fooDir, uid, gid); err != nil {
|
||||
t.Fatalf("err : %v", err)
|
||||
}
|
||||
|
||||
dirInfo, err := os.Stat(fooDir)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Create foo/bar
|
||||
f, err := os.Create(filepath.Join(fooDir, "bar"))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if _, err := f.WriteString("123"); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := f.Chmod(0644); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
fInfo, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
// Create foo/baz -> bar symlink
|
||||
if err := os.Symlink("bar", filepath.Join(dir, "foo", "baz")); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
linkInfo, err := os.Lstat(filepath.Join(dir, "foo", "baz"))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
|
||||
walkFn := func(path string, fileInfo os.FileInfo, err error) error {
|
||||
// Include the path of the file name relative to the alloc dir
|
||||
// so that we can put the files in the right directories
|
||||
link := ""
|
||||
if fileInfo.Mode()&os.ModeSymlink != 0 {
|
||||
target, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading symlink: %v", err)
|
||||
}
|
||||
link = target
|
||||
}
|
||||
hdr, err := tar.FileInfoHeader(fileInfo, link)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating file header: %v", err)
|
||||
}
|
||||
hdr.Name = fileInfo.Name()
|
||||
tw.WriteHeader(hdr)
|
||||
|
||||
// If it's a directory or symlink we just write the header into the tar
|
||||
if fileInfo.IsDir() || (fileInfo.Mode()&os.ModeSymlink != 0) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write the file into the archive
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if _, err := io.Copy(tw, file); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := filepath.Walk(dir, walkFn); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
tw.Close()
|
||||
|
||||
dir1, err := ioutil.TempDir("", "nomadtest-")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(dir1)
|
||||
|
||||
rc := ioutil.NopCloser(buf)
|
||||
prevAlloc := &remotePrevAlloc{logger: testlog.HCLogger(t)}
|
||||
if err := prevAlloc.streamAllocDir(context.Background(), rc, dir1); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure foo is present
|
||||
fi, err := os.Stat(filepath.Join(dir1, "foo"))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if fi.Mode() != dirInfo.Mode() {
|
||||
t.Fatalf("mode: %v", fi.Mode())
|
||||
}
|
||||
stat := fi.Sys().(*syscall.Stat_t)
|
||||
if stat.Uid != uid || stat.Gid != gid {
|
||||
t.Fatalf("foo/ has incorrect ownership: expected %d:%d found %d:%d",
|
||||
uid, gid, stat.Uid, stat.Gid)
|
||||
}
|
||||
|
||||
fi1, err := os.Stat(filepath.Join(dir1, "bar"))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if fi1.Mode() != fInfo.Mode() {
|
||||
t.Fatalf("mode: %v", fi1.Mode())
|
||||
}
|
||||
|
||||
fi2, err := os.Lstat(filepath.Join(dir1, "baz"))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if fi2.Mode() != linkInfo.Mode() {
|
||||
t.Fatalf("mode: %v", fi2.Mode())
|
||||
}
|
||||
}
|
||||
|
||||
// TestPrevAlloc_StreamAllocDir_Error asserts that errors encountered while
|
||||
// streaming a tar cause the migration to be cancelled and no files are written
|
||||
// (migrations are atomic).
|
||||
|
|
161
client/allocwatcher/alloc_watcher_unix_test.go
Normal file
161
client/allocwatcher/alloc_watcher_unix_test.go
Normal file
|
@ -0,0 +1,161 @@
|
|||
// +build !windows
|
||||
|
||||
package allocwatcher
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
ctestutil "github.com/hashicorp/nomad/client/testutil"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
)
|
||||
|
||||
// TestPrevAlloc_StreamAllocDir_Ok asserts that streaming a tar to an alloc dir
|
||||
// works.
|
||||
func TestPrevAlloc_StreamAllocDir_Ok(t *testing.T) {
|
||||
ctestutil.RequireRoot(t)
|
||||
t.Parallel()
|
||||
dir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// Create foo/
|
||||
fooDir := filepath.Join(dir, "foo")
|
||||
if err := os.Mkdir(fooDir, 0777); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Change ownership of foo/ to test #3702 (any non-root user is fine)
|
||||
const uid, gid = 1, 1
|
||||
if err := os.Chown(fooDir, uid, gid); err != nil {
|
||||
t.Fatalf("err : %v", err)
|
||||
}
|
||||
|
||||
dirInfo, err := os.Stat(fooDir)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Create foo/bar
|
||||
f, err := os.Create(filepath.Join(fooDir, "bar"))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if _, err := f.WriteString("123"); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := f.Chmod(0644); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
fInfo, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
// Create foo/baz -> bar symlink
|
||||
if err := os.Symlink("bar", filepath.Join(dir, "foo", "baz")); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
linkInfo, err := os.Lstat(filepath.Join(dir, "foo", "baz"))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
|
||||
walkFn := func(path string, fileInfo os.FileInfo, err error) error {
|
||||
// Include the path of the file name relative to the alloc dir
|
||||
// so that we can put the files in the right directories
|
||||
link := ""
|
||||
if fileInfo.Mode()&os.ModeSymlink != 0 {
|
||||
target, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading symlink: %v", err)
|
||||
}
|
||||
link = target
|
||||
}
|
||||
hdr, err := tar.FileInfoHeader(fileInfo, link)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating file header: %v", err)
|
||||
}
|
||||
hdr.Name = fileInfo.Name()
|
||||
tw.WriteHeader(hdr)
|
||||
|
||||
// If it's a directory or symlink we just write the header into the tar
|
||||
if fileInfo.IsDir() || (fileInfo.Mode()&os.ModeSymlink != 0) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write the file into the archive
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if _, err := io.Copy(tw, file); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := filepath.Walk(dir, walkFn); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
tw.Close()
|
||||
|
||||
dir1, err := ioutil.TempDir("", "nomadtest-")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(dir1)
|
||||
|
||||
rc := ioutil.NopCloser(buf)
|
||||
prevAlloc := &remotePrevAlloc{logger: testlog.HCLogger(t)}
|
||||
if err := prevAlloc.streamAllocDir(context.Background(), rc, dir1); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Ensure foo is present
|
||||
fi, err := os.Stat(filepath.Join(dir1, "foo"))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if fi.Mode() != dirInfo.Mode() {
|
||||
t.Fatalf("mode: %v", fi.Mode())
|
||||
}
|
||||
stat := fi.Sys().(*syscall.Stat_t)
|
||||
if stat.Uid != uid || stat.Gid != gid {
|
||||
t.Fatalf("foo/ has incorrect ownership: expected %d:%d found %d:%d",
|
||||
uid, gid, stat.Uid, stat.Gid)
|
||||
}
|
||||
|
||||
fi1, err := os.Stat(filepath.Join(dir1, "bar"))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if fi1.Mode() != fInfo.Mode() {
|
||||
t.Fatalf("mode: %v", fi1.Mode())
|
||||
}
|
||||
|
||||
fi2, err := os.Lstat(filepath.Join(dir1, "baz"))
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if fi2.Mode() != linkInfo.Mode() {
|
||||
t.Fatalf("mode: %v", fi2.Mode())
|
||||
}
|
||||
}
|
|
@ -7,7 +7,7 @@ import (
|
|||
)
|
||||
|
||||
func TestNetworkFingerPrint_linkspeed_parse(t *testing.T) {
|
||||
f := &NetworkFingerprint{logger: testlog.Logger(t), interfaceDetector: &DefaultNetworkInterfaceDetector{}}
|
||||
f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &DefaultNetworkInterfaceDetector{}}
|
||||
|
||||
var outputTests = []struct {
|
||||
in string
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -890,7 +891,7 @@ func TestFS_Logs_TaskPending(t *testing.T) {
|
|||
job := mock.BatchJob()
|
||||
job.TaskGroups[0].Count = 1
|
||||
job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
|
||||
"start_block_for": "4s",
|
||||
"start_block_for": "10s",
|
||||
}
|
||||
|
||||
// Register job
|
||||
|
@ -915,6 +916,12 @@ func TestFS_Logs_TaskPending(t *testing.T) {
|
|||
}
|
||||
|
||||
allocID = resp.Allocations[0].ID
|
||||
|
||||
// wait for alloc runner to be created; otherwise, we get no alloc found error
|
||||
if _, err := c.getAllocRunner(allocID); err != nil {
|
||||
return false, fmt.Errorf("alloc runner was not created yet for %v", allocID)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
t.Fatalf("error getting alloc id: %v", err)
|
||||
|
@ -1523,7 +1530,11 @@ func TestFS_streamFile_NoFile(t *testing.T) {
|
|||
err := c.endpoints.FileSystem.streamFile(
|
||||
context.Background(), 0, "foo", 0, ad, framer, nil)
|
||||
require.NotNil(err)
|
||||
if runtime.GOOS == "windows" {
|
||||
require.Contains(err.Error(), "cannot find the file")
|
||||
} else {
|
||||
require.Contains(err.Error(), "no such file")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFS_streamFile_Modify(t *testing.T) {
|
||||
|
@ -1701,6 +1712,9 @@ func TestFS_streamFile_Truncate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_streamImpl_Delete(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Windows does not allow us to delete a file while it is open")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
c, cleanup := TestClient(t, nil)
|
||||
|
@ -1725,7 +1739,11 @@ func TestFS_streamImpl_Delete(t *testing.T) {
|
|||
frames := make(chan *sframer.StreamFrame, 4)
|
||||
go func() {
|
||||
for {
|
||||
frame := <-frames
|
||||
frame, ok := <-frames
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if frame.IsHeartbeat() {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
|
||||
// LaunchLogMon an instance of logmon
|
||||
// TODO: Integrate with base plugin loader
|
||||
func LaunchLogMon(logger hclog.Logger) (LogMon, *plugin.Client, error) {
|
||||
func LaunchLogMon(logger hclog.Logger, reattachConfig *plugin.ReattachConfig) (LogMon, *plugin.Client, error) {
|
||||
logger = logger.Named("logmon")
|
||||
bin, err := discover.NomadExecutable()
|
||||
if err != nil {
|
||||
|
@ -23,6 +23,7 @@ func LaunchLogMon(logger hclog.Logger) (LogMon, *plugin.Client, error) {
|
|||
|
||||
client := plugin.NewClient(&plugin.ClientConfig{
|
||||
HandshakeConfig: base.Handshake,
|
||||
Reattach: reattachConfig,
|
||||
Plugins: map[string]plugin.Plugin{
|
||||
"logmon": &Plugin{},
|
||||
},
|
||||
|
@ -45,7 +46,6 @@ func LaunchLogMon(logger hclog.Logger) (LogMon, *plugin.Client, error) {
|
|||
|
||||
l := raw.(LogMon)
|
||||
return l, client, nil
|
||||
|
||||
}
|
||||
|
||||
type Plugin struct {
|
||||
|
|
|
@ -40,7 +40,7 @@ func (m *StartRequest) Reset() { *m = StartRequest{} }
|
|||
func (m *StartRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StartRequest) ProtoMessage() {}
|
||||
func (*StartRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_logmon_6dbff459851a9ae9, []int{0}
|
||||
return fileDescriptor_logmon_c8f5fe5f286cd193, []int{0}
|
||||
}
|
||||
func (m *StartRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StartRequest.Unmarshal(m, b)
|
||||
|
@ -119,7 +119,7 @@ func (m *StartResponse) Reset() { *m = StartResponse{} }
|
|||
func (m *StartResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StartResponse) ProtoMessage() {}
|
||||
func (*StartResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_logmon_6dbff459851a9ae9, []int{1}
|
||||
return fileDescriptor_logmon_c8f5fe5f286cd193, []int{1}
|
||||
}
|
||||
func (m *StartResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StartResponse.Unmarshal(m, b)
|
||||
|
@ -149,7 +149,7 @@ func (m *StopRequest) Reset() { *m = StopRequest{} }
|
|||
func (m *StopRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StopRequest) ProtoMessage() {}
|
||||
func (*StopRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_logmon_6dbff459851a9ae9, []int{2}
|
||||
return fileDescriptor_logmon_c8f5fe5f286cd193, []int{2}
|
||||
}
|
||||
func (m *StopRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StopRequest.Unmarshal(m, b)
|
||||
|
@ -179,7 +179,7 @@ func (m *StopResponse) Reset() { *m = StopResponse{} }
|
|||
func (m *StopResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StopResponse) ProtoMessage() {}
|
||||
func (*StopResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_logmon_6dbff459851a9ae9, []int{3}
|
||||
return fileDescriptor_logmon_c8f5fe5f286cd193, []int{3}
|
||||
}
|
||||
func (m *StopResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StopResponse.Unmarshal(m, b)
|
||||
|
@ -312,10 +312,10 @@ var _LogMon_serviceDesc = grpc.ServiceDesc{
|
|||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("client/logmon/proto/logmon.proto", fileDescriptor_logmon_6dbff459851a9ae9)
|
||||
proto.RegisterFile("client/logmon/proto/logmon.proto", fileDescriptor_logmon_c8f5fe5f286cd193)
|
||||
}
|
||||
|
||||
var fileDescriptor_logmon_6dbff459851a9ae9 = []byte{
|
||||
var fileDescriptor_logmon_c8f5fe5f286cd193 = []byte{
|
||||
// 320 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x31, 0x6f, 0xc2, 0x30,
|
||||
0x10, 0x85, 0x1b, 0x0a, 0xa1, 0x1c, 0x0d, 0x45, 0x5e, 0x1a, 0xd1, 0xa1, 0x28, 0x1d, 0xca, 0x14,
|
||||
|
|
|
@ -32,7 +32,7 @@ type allocRunnerMutableState08 struct {
|
|||
// 0.8.
|
||||
//
|
||||
// https://github.com/hashicorp/nomad/blob/v0.8.6/client/task_runner.go#L188-L197
|
||||
//
|
||||
// COMPAT(0.10): Allows upgrading from 0.8.X to 0.9.0.
|
||||
type taskRunnerState08 struct {
|
||||
Version string
|
||||
HandleID string
|
||||
|
|
|
@ -168,6 +168,7 @@ type MemoryStats struct {
|
|||
RSS uint64
|
||||
Cache uint64
|
||||
Swap uint64
|
||||
Usage uint64
|
||||
MaxUsage uint64
|
||||
KernelUsage uint64
|
||||
KernelMaxUsage uint64
|
||||
|
@ -184,6 +185,7 @@ func (ms *MemoryStats) Add(other *MemoryStats) {
|
|||
ms.RSS += other.RSS
|
||||
ms.Cache += other.Cache
|
||||
ms.Swap += other.Swap
|
||||
ms.Usage += other.Usage
|
||||
ms.MaxUsage += other.MaxUsage
|
||||
ms.KernelUsage += other.KernelUsage
|
||||
ms.KernelMaxUsage += other.KernelMaxUsage
|
||||
|
|
|
@ -9,10 +9,12 @@ import (
|
|||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
vaultapi "github.com/hashicorp/vault/api"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestVaultClient_TokenRenewals(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
v := testutil.NewTestVault(t)
|
||||
defer v.Stop()
|
||||
|
||||
|
@ -67,9 +69,7 @@ func TestVaultClient_TokenRenewals(t *testing.T) {
|
|||
for {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
t.Fatalf("error while renewing the token: %v", err)
|
||||
}
|
||||
require.NoError(err, "unexpected error while renewing vault token")
|
||||
}
|
||||
}
|
||||
}(errCh)
|
||||
|
@ -83,7 +83,7 @@ func TestVaultClient_TokenRenewals(t *testing.T) {
|
|||
|
||||
for i := 0; i < num; i++ {
|
||||
if err := c.StopRenewToken(tokens[i]); err != nil {
|
||||
t.Fatal(err)
|
||||
require.NoError(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -275,7 +275,8 @@ func TestVaultClient_RenewNonexistentLease(t *testing.T) {
|
|||
_, err = c.RenewToken(c.client.Token(), 10)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error, got nil")
|
||||
} else if !strings.Contains(err.Error(), "lease not found") {
|
||||
t.Fatalf("expected \"%s\" in error message, got \"%v\"", "lease not found", err)
|
||||
// The Vault error message changed between 0.10.2 and 1.0.1
|
||||
} else if !strings.Contains(err.Error(), "lease not found") && !strings.Contains(err.Error(), "lease is not renewable") {
|
||||
t.Fatalf("expected \"%s\" or \"%s\" in error message, got \"%v\"", "lease not found", "lease is not renewable", err.Error())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -130,7 +130,7 @@ func TestHTTP_JobsRegister(t *testing.T) {
|
|||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := api.MockJob()
|
||||
job := MockJob()
|
||||
args := api.JobRegisterRequest{
|
||||
Job: job,
|
||||
WriteRequest: api.WriteRequest{Region: "global"},
|
||||
|
@ -185,7 +185,7 @@ func TestHTTP_JobsRegister_ACL(t *testing.T) {
|
|||
t.Parallel()
|
||||
httpACLTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := api.MockJob()
|
||||
job := MockJob()
|
||||
args := api.JobRegisterRequest{
|
||||
Job: job,
|
||||
WriteRequest: api.WriteRequest{
|
||||
|
@ -215,7 +215,7 @@ func TestHTTP_JobsRegister_Defaulting(t *testing.T) {
|
|||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := api.MockJob()
|
||||
job := MockJob()
|
||||
|
||||
// Do not set its priority
|
||||
job.Priority = nil
|
||||
|
@ -411,7 +411,7 @@ func TestHTTP_JobUpdate(t *testing.T) {
|
|||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := api.MockJob()
|
||||
job := MockJob()
|
||||
args := api.JobRegisterRequest{
|
||||
Job: job,
|
||||
WriteRequest: api.WriteRequest{
|
||||
|
@ -985,7 +985,7 @@ func TestHTTP_JobPlan(t *testing.T) {
|
|||
t.Parallel()
|
||||
httpTest(t, nil, func(s *TestAgent) {
|
||||
// Create the job
|
||||
job := api.MockJob()
|
||||
job := MockJob()
|
||||
args := api.JobPlanRequest{
|
||||
Job: job,
|
||||
Diff: true,
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
package api
|
||||
package agent
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
)
|
||||
|
||||
func MockJob() *Job {
|
||||
job := &Job{
|
||||
func MockJob() *api.Job {
|
||||
job := &api.Job{
|
||||
Region: helper.StringToPtr("global"),
|
||||
ID: helper.StringToPtr(uuid.Generate()),
|
||||
Name: helper.StringToPtr("my-job"),
|
||||
|
@ -16,27 +17,27 @@ func MockJob() *Job {
|
|||
Priority: helper.IntToPtr(50),
|
||||
AllAtOnce: helper.BoolToPtr(false),
|
||||
Datacenters: []string{"dc1"},
|
||||
Constraints: []*Constraint{
|
||||
Constraints: []*api.Constraint{
|
||||
{
|
||||
LTarget: "${attr.kernel.name}",
|
||||
RTarget: "linux",
|
||||
Operand: "=",
|
||||
},
|
||||
},
|
||||
TaskGroups: []*TaskGroup{
|
||||
TaskGroups: []*api.TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("web"),
|
||||
Count: helper.IntToPtr(10),
|
||||
EphemeralDisk: &EphemeralDisk{
|
||||
EphemeralDisk: &api.EphemeralDisk{
|
||||
SizeMB: helper.IntToPtr(150),
|
||||
},
|
||||
RestartPolicy: &RestartPolicy{
|
||||
RestartPolicy: &api.RestartPolicy{
|
||||
Attempts: helper.IntToPtr(3),
|
||||
Interval: helper.TimeToPtr(10 * time.Minute),
|
||||
Delay: helper.TimeToPtr(1 * time.Minute),
|
||||
Mode: helper.StringToPtr("delay"),
|
||||
},
|
||||
Tasks: []*Task{
|
||||
Tasks: []*api.Task{
|
||||
{
|
||||
Name: "web",
|
||||
Driver: "exec",
|
||||
|
@ -46,12 +47,12 @@ func MockJob() *Job {
|
|||
Env: map[string]string{
|
||||
"FOO": "bar",
|
||||
},
|
||||
Services: []*Service{
|
||||
Services: []*api.Service{
|
||||
{
|
||||
Name: "${TASK}-frontend",
|
||||
PortLabel: "http",
|
||||
Tags: []string{"pci:${meta.pci-dss}", "datacenter:${node.datacenter}"},
|
||||
Checks: []ServiceCheck{
|
||||
Checks: []api.ServiceCheck{
|
||||
{
|
||||
Name: "check-table",
|
||||
Type: "script",
|
||||
|
@ -67,14 +68,14 @@ func MockJob() *Job {
|
|||
PortLabel: "admin",
|
||||
},
|
||||
},
|
||||
LogConfig: DefaultLogConfig(),
|
||||
Resources: &Resources{
|
||||
LogConfig: api.DefaultLogConfig(),
|
||||
Resources: &api.Resources{
|
||||
CPU: helper.IntToPtr(500),
|
||||
MemoryMB: helper.IntToPtr(256),
|
||||
Networks: []*NetworkResource{
|
||||
Networks: []*api.NetworkResource{
|
||||
{
|
||||
MBits: helper.IntToPtr(50),
|
||||
DynamicPorts: []Port{{Label: "http"}, {Label: "admin"}},
|
||||
DynamicPorts: []api.Port{{Label: "http"}, {Label: "admin"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -98,10 +99,10 @@ func MockJob() *Job {
|
|||
return job
|
||||
}
|
||||
|
||||
func MockPeriodicJob() *Job {
|
||||
func MockPeriodicJob() *api.Job {
|
||||
j := MockJob()
|
||||
j.Type = helper.StringToPtr("batch")
|
||||
j.Periodic = &PeriodicConfig{
|
||||
j.Periodic = &api.PeriodicConfig{
|
||||
Enabled: helper.BoolToPtr(true),
|
||||
SpecType: helper.StringToPtr("cron"),
|
||||
Spec: helper.StringToPtr("*/30 * * * *"),
|
|
@ -557,6 +557,8 @@ func (c *AllocStatusCommand) outputVerboseResourceUsage(task string, resourceUsa
|
|||
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.Cache))
|
||||
case "Swap":
|
||||
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.Swap))
|
||||
case "Usage":
|
||||
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.Usage))
|
||||
case "Max Usage":
|
||||
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.MaxUsage))
|
||||
case "Kernel Usage":
|
||||
|
|
|
@ -10,7 +10,6 @@ name = "client1"
|
|||
# Enable the client
|
||||
client {
|
||||
enabled = true
|
||||
|
||||
server_join {
|
||||
retry_join = ["127.0.0.1:4647", "127.0.0.1:5647", "127.0.0.1:6647"]
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ const (
|
|||
|
||||
const (
|
||||
// Nvidia-container-runtime environment variable names
|
||||
nvidiaVisibleDevices = "NVIDIA_VISIBLE_DEVICES"
|
||||
NvidiaVisibleDevices = "NVIDIA_VISIBLE_DEVICES"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -181,7 +181,7 @@ func (d *NvidiaDevice) Reserve(deviceIDs []string) (*device.ContainerReservation
|
|||
|
||||
return &device.ContainerReservation{
|
||||
Envs: map[string]string{
|
||||
nvidiaVisibleDevices: strings.Join(deviceIDs, ","),
|
||||
NvidiaVisibleDevices: strings.Join(deviceIDs, ","),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ func TestReserve(t *testing.T) {
|
|||
Name: "All RequestedIDs are managed by Device",
|
||||
ExpectedReservation: &device.ContainerReservation{
|
||||
Envs: map[string]string{
|
||||
nvidiaVisibleDevices: "UUID1,UUID2,UUID3",
|
||||
NvidiaVisibleDevices: "UUID1,UUID2,UUID3",
|
||||
},
|
||||
},
|
||||
ExpectedError: nil,
|
||||
|
|
|
@ -101,6 +101,12 @@ func PluginLoader(opts map[string]string) (map[string]interface{}, error) {
|
|||
if v, err := strconv.ParseBool(opts["docker.privileged.enabled"]); err == nil {
|
||||
conf["allow_privileged"] = v
|
||||
}
|
||||
|
||||
// nvidia_runtime
|
||||
if v, ok := opts["docker.nvidia_runtime"]; ok {
|
||||
conf["nvidia_runtime"] = v
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
||||
|
||||
|
@ -153,6 +159,7 @@ var (
|
|||
// }
|
||||
// allow_privileged = false
|
||||
// allow_caps = ["CHOWN", "NET_RAW" ... ]
|
||||
// nvidia_runtime = "nvidia"
|
||||
// }
|
||||
// }
|
||||
configSpec = hclspec.NewObject(map[string]*hclspec.Spec{
|
||||
|
@ -204,6 +211,10 @@ var (
|
|||
hclspec.NewAttr("allow_caps", "list(string)", false),
|
||||
hclspec.NewLiteral(`["CHOWN","DAC_OVERRIDE","FSETID","FOWNER","MKNOD","NET_RAW","SETGID","SETUID","SETFCAP","SETPCAP","NET_BIND_SERVICE","SYS_CHROOT","KILL","AUDIT_WRITE"]`),
|
||||
),
|
||||
"nvidia_runtime": hclspec.NewDefault(
|
||||
hclspec.NewAttr("nvidia_runtime", "string", false),
|
||||
hclspec.NewLiteral(`"nvidia"`),
|
||||
),
|
||||
})
|
||||
|
||||
// taskConfigSpec is the hcl specification for the driver config section of
|
||||
|
@ -470,6 +481,7 @@ type DriverConfig struct {
|
|||
Volumes VolumeConfig `codec:"volumes"`
|
||||
AllowPrivileged bool `codec:"allow_privileged"`
|
||||
AllowCaps []string `codec:"allow_caps"`
|
||||
GPURuntimeName string `codec:"nvidia_runtime"`
|
||||
}
|
||||
|
||||
type AuthConfig struct {
|
||||
|
|
|
@ -3,6 +3,7 @@ package docklog
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
|
@ -13,22 +14,34 @@ import (
|
|||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func testContainerDetails() (image string, imageName string, imageTag string) {
|
||||
if runtime.GOOS == "windows" {
|
||||
return "dantoml/busybox-windows:08012019",
|
||||
"dantoml/busybox-windows",
|
||||
"08012019"
|
||||
}
|
||||
|
||||
return "busybox:1", "busybox", "1"
|
||||
}
|
||||
|
||||
func TestDockerLogger(t *testing.T) {
|
||||
ctu.DockerCompatible(t)
|
||||
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
|
||||
containerImage, containerImageName, containerImageTag := testContainerDetails()
|
||||
|
||||
client, err := docker.NewClientFromEnv()
|
||||
if err != nil {
|
||||
t.Skip("docker unavailable:", err)
|
||||
}
|
||||
|
||||
if img, err := client.InspectImage("busybox:1"); err != nil || img == nil {
|
||||
if img, err := client.InspectImage(containerImage); err != nil || img == nil {
|
||||
t.Log("image not found locally, downloading...")
|
||||
err = client.PullImage(docker.PullImageOptions{
|
||||
Repository: "busybox",
|
||||
Tag: "1",
|
||||
Repository: containerImageName,
|
||||
Tag: containerImageTag,
|
||||
}, docker.AuthConfiguration{})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to pull image: %v", err)
|
||||
|
@ -38,9 +51,9 @@ func TestDockerLogger(t *testing.T) {
|
|||
containerConf := docker.CreateContainerOptions{
|
||||
Config: &docker.Config{
|
||||
Cmd: []string{
|
||||
"/bin/sh", "-c", "touch /tmp/docklog; tail -f /tmp/docklog",
|
||||
"sh", "-c", "touch ~/docklog; tail -f ~/docklog",
|
||||
},
|
||||
Image: "busybox:1",
|
||||
Image: containerImage,
|
||||
},
|
||||
Context: context.Background(),
|
||||
}
|
||||
|
@ -98,8 +111,8 @@ func echoToContainer(t *testing.T, client *docker.Client, id string, line string
|
|||
op := docker.CreateExecOptions{
|
||||
Container: id,
|
||||
Cmd: []string{
|
||||
"/bin/ash", "-c",
|
||||
fmt.Sprintf("echo %s >>/tmp/docklog", line),
|
||||
"ash", "-c",
|
||||
fmt.Sprintf("echo %s >>~/docklog", line),
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ func (m *StartRequest) Reset() { *m = StartRequest{} }
|
|||
func (m *StartRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StartRequest) ProtoMessage() {}
|
||||
func (*StartRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_docker_logger_0aa5a411831bd10e, []int{0}
|
||||
return fileDescriptor_docker_logger_550e35425edc00c0, []int{0}
|
||||
}
|
||||
func (m *StartRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StartRequest.Unmarshal(m, b)
|
||||
|
@ -119,7 +119,7 @@ func (m *StartResponse) Reset() { *m = StartResponse{} }
|
|||
func (m *StartResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StartResponse) ProtoMessage() {}
|
||||
func (*StartResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_docker_logger_0aa5a411831bd10e, []int{1}
|
||||
return fileDescriptor_docker_logger_550e35425edc00c0, []int{1}
|
||||
}
|
||||
func (m *StartResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StartResponse.Unmarshal(m, b)
|
||||
|
@ -149,7 +149,7 @@ func (m *StopRequest) Reset() { *m = StopRequest{} }
|
|||
func (m *StopRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StopRequest) ProtoMessage() {}
|
||||
func (*StopRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_docker_logger_0aa5a411831bd10e, []int{2}
|
||||
return fileDescriptor_docker_logger_550e35425edc00c0, []int{2}
|
||||
}
|
||||
func (m *StopRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StopRequest.Unmarshal(m, b)
|
||||
|
@ -179,7 +179,7 @@ func (m *StopResponse) Reset() { *m = StopResponse{} }
|
|||
func (m *StopResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StopResponse) ProtoMessage() {}
|
||||
func (*StopResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_docker_logger_0aa5a411831bd10e, []int{3}
|
||||
return fileDescriptor_docker_logger_550e35425edc00c0, []int{3}
|
||||
}
|
||||
func (m *StopResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StopResponse.Unmarshal(m, b)
|
||||
|
@ -312,10 +312,10 @@ var _DockerLogger_serviceDesc = grpc.ServiceDesc{
|
|||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("drivers/docker/docklog/proto/docker_logger.proto", fileDescriptor_docker_logger_0aa5a411831bd10e)
|
||||
proto.RegisterFile("drivers/docker/docklog/proto/docker_logger.proto", fileDescriptor_docker_logger_550e35425edc00c0)
|
||||
}
|
||||
|
||||
var fileDescriptor_docker_logger_0aa5a411831bd10e = []byte{
|
||||
var fileDescriptor_docker_logger_550e35425edc00c0 = []byte{
|
||||
// 328 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x50, 0xb1, 0x4e, 0xeb, 0x40,
|
||||
0x10, 0x7c, 0xce, 0x8b, 0x9d, 0x70, 0x49, 0x40, 0x3a, 0x09, 0x61, 0xd2, 0x00, 0xae, 0x28, 0x90,
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
hclog "github.com/hashicorp/go-hclog"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/nomad/client/taskenv"
|
||||
"github.com/hashicorp/nomad/devices/gpu/nvidia"
|
||||
"github.com/hashicorp/nomad/drivers/docker/docklog"
|
||||
"github.com/hashicorp/nomad/drivers/shared/eventer"
|
||||
nstructs "github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -40,7 +41,7 @@ var (
|
|||
waitClient *docker.Client
|
||||
|
||||
// The statistics the Docker driver exposes
|
||||
DockerMeasuredMemStats = []string{"RSS", "Cache", "Swap", "Max Usage"}
|
||||
DockerMeasuredMemStats = []string{"RSS", "Cache", "Swap", "Usage", "Max Usage"}
|
||||
DockerMeasuredCpuStats = []string{"Throttled Periods", "Throttled Time", "Percent"}
|
||||
|
||||
// recoverableErrTimeouts returns a recoverable error if the error was due
|
||||
|
@ -88,6 +89,14 @@ type Driver struct {
|
|||
|
||||
// logger will log to the Nomad agent
|
||||
logger hclog.Logger
|
||||
|
||||
// gpuRuntime indicates nvidia-docker runtime availability
|
||||
gpuRuntime bool
|
||||
|
||||
// A tri-state boolean to know if the fingerprinting has happened and
|
||||
// whether it has been successful
|
||||
fingerprintSuccess *bool
|
||||
fingerprintLock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewDockerDriver returns a docker implementation of a driver plugin
|
||||
|
@ -629,6 +638,13 @@ func (d *Driver) createContainerConfig(task *drivers.TaskConfig, driverConfig *T
|
|||
PidsLimit: driverConfig.PidsLimit,
|
||||
}
|
||||
|
||||
if _, ok := task.DeviceEnv[nvidia.NvidiaVisibleDevices]; ok {
|
||||
if !d.gpuRuntime {
|
||||
return c, fmt.Errorf("requested docker-runtime %q was not found", d.config.GPURuntimeName)
|
||||
}
|
||||
hostConfig.Runtime = d.config.GPURuntimeName
|
||||
}
|
||||
|
||||
// Calculate CPU Quota
|
||||
// cfs_quota_us is the time per core, so we must
|
||||
// multiply the time by the number of cores available
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,98 +1,735 @@
|
|||
// +build !windows
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/testutil"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
tu "github.com/hashicorp/nomad/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDockerDriver_Signal(t *testing.T) {
|
||||
func TestDockerDriver_User(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
if !testutil.DockerIsConnected(t) {
|
||||
t.Skip("Docker not connected")
|
||||
}
|
||||
|
||||
testutil.DockerCompatible(t)
|
||||
task, cfg, _ := dockerTask(t)
|
||||
cfg.Command = "/bin/sh"
|
||||
cfg.Args = []string{"local/test.sh"}
|
||||
task.User = "alice"
|
||||
cfg.Command = "/bin/sleep"
|
||||
cfg.Args = []string{"10000"}
|
||||
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
||||
|
||||
driver := dockerDriverHarness(t, nil)
|
||||
cleanup := driver.MkAllocDir(task, true)
|
||||
d := dockerDriverHarness(t, nil)
|
||||
cleanup := d.MkAllocDir(task, true)
|
||||
defer cleanup()
|
||||
|
||||
// Copy the image into the task's directory
|
||||
copyImage(t, task.TaskDir(), "busybox.tar")
|
||||
|
||||
testFile := filepath.Join(task.TaskDir().LocalDir, "test.sh")
|
||||
testData := []byte(`
|
||||
at_term() {
|
||||
echo 'Terminated.' > $NOMAD_TASK_DIR/output
|
||||
exit 3
|
||||
_, _, err := d.StartTask(task)
|
||||
if err == nil {
|
||||
d.DestroyTask(task.ID, true)
|
||||
t.Fatalf("Should've failed")
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "alice") {
|
||||
t.Fatalf("Expected failure string not found, found %q instead", err.Error())
|
||||
}
|
||||
}
|
||||
trap at_term INT
|
||||
while true; do
|
||||
echo 'sleeping'
|
||||
sleep 0.2
|
||||
done
|
||||
`)
|
||||
require.NoError(t, ioutil.WriteFile(testFile, testData, 0777))
|
||||
_, _, err := driver.StartTask(task)
|
||||
|
||||
func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
testutil.DockerCompatible(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Because go-dockerclient doesn't provide api for query network aliases, just check that
|
||||
// a container can be created with a 'network_aliases' property
|
||||
|
||||
// Create network, network-scoped alias is supported only for containers in user defined networks
|
||||
client := newTestDockerClient(t)
|
||||
networkOpts := docker.CreateNetworkOptions{Name: "foobar", Driver: "bridge"}
|
||||
network, err := client.CreateNetwork(networkOpts)
|
||||
require.NoError(err)
|
||||
defer client.RemoveNetwork(network.ID)
|
||||
|
||||
expected := []string{"foobar"}
|
||||
taskCfg := newTaskConfig("", busyboxLongRunningCmd)
|
||||
taskCfg.NetworkMode = network.Name
|
||||
taskCfg.NetworkAliases = expected
|
||||
task := &drivers.TaskConfig{
|
||||
ID: uuid.Generate(),
|
||||
Name: "busybox",
|
||||
Resources: basicResources,
|
||||
}
|
||||
require.NoError(task.EncodeConcreteDriverConfig(&taskCfg))
|
||||
|
||||
d := dockerDriverHarness(t, nil)
|
||||
cleanup := d.MkAllocDir(task, true)
|
||||
defer cleanup()
|
||||
copyImage(t, task.TaskDir(), "busybox.tar")
|
||||
|
||||
_, _, err = d.StartTask(task)
|
||||
require.NoError(err)
|
||||
require.NoError(d.WaitUntilStarted(task.ID, 5*time.Second))
|
||||
|
||||
defer d.DestroyTask(task.ID, true)
|
||||
|
||||
dockerDriver, ok := d.Impl().(*Driver)
|
||||
require.True(ok)
|
||||
|
||||
handle, ok := dockerDriver.tasks.Get(task.ID)
|
||||
require.True(ok)
|
||||
|
||||
_, err = client.InspectContainer(handle.containerID)
|
||||
require.NoError(err)
|
||||
}
|
||||
|
||||
func TestDockerDriver_NetworkMode_Host(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
testutil.DockerCompatible(t)
|
||||
expected := "host"
|
||||
|
||||
taskCfg := newTaskConfig("", busyboxLongRunningCmd)
|
||||
taskCfg.NetworkMode = expected
|
||||
|
||||
task := &drivers.TaskConfig{
|
||||
ID: uuid.Generate(),
|
||||
Name: "busybox-demo",
|
||||
Resources: basicResources,
|
||||
}
|
||||
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
||||
|
||||
d := dockerDriverHarness(t, nil)
|
||||
cleanup := d.MkAllocDir(task, true)
|
||||
defer cleanup()
|
||||
copyImage(t, task.TaskDir(), "busybox.tar")
|
||||
|
||||
_, _, err := d.StartTask(task)
|
||||
require.NoError(t, err)
|
||||
defer driver.DestroyTask(task.ID, true)
|
||||
require.NoError(t, driver.WaitUntilStarted(task.ID, time.Duration(tu.TestMultiplier()*5)*time.Second))
|
||||
handle, ok := driver.Impl().(*Driver).tasks.Get(task.ID)
|
||||
|
||||
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
||||
|
||||
defer d.DestroyTask(task.ID, true)
|
||||
|
||||
dockerDriver, ok := d.Impl().(*Driver)
|
||||
require.True(t, ok)
|
||||
|
||||
waitForExist(t, newTestDockerClient(t), handle.containerID)
|
||||
require.NoError(t, handle.Kill(time.Duration(tu.TestMultiplier()*5)*time.Second, os.Interrupt))
|
||||
handle, ok := dockerDriver.tasks.Get(task.ID)
|
||||
require.True(t, ok)
|
||||
|
||||
waitCh, err := driver.WaitTask(context.Background(), task.ID)
|
||||
require.NoError(t, err)
|
||||
select {
|
||||
case res := <-waitCh:
|
||||
if res.Successful() {
|
||||
require.Fail(t, "should err: %v", res)
|
||||
}
|
||||
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
||||
require.Fail(t, "timeout")
|
||||
}
|
||||
|
||||
// Check the log file to see it exited because of the signal
|
||||
outputFile := filepath.Join(task.TaskDir().LocalDir, "output")
|
||||
act, err := ioutil.ReadFile(outputFile)
|
||||
container, err := client.InspectContainer(handle.containerID)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't read expected output: %v", err)
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
exp := "Terminated."
|
||||
if strings.TrimSpace(string(act)) != exp {
|
||||
t.Fatalf("Command outputted %v; want %v", act, exp)
|
||||
}
|
||||
actual := container.HostConfig.NetworkMode
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestDockerDriver_containerBinds(t *testing.T) {
|
||||
func TestDockerDriver_CPUCFSPeriod(t *testing.T) {
|
||||
if !tu.IsTravis() {
|
||||
t.Parallel()
|
||||
}
|
||||
testutil.DockerCompatible(t)
|
||||
|
||||
task, cfg, _ := dockerTask(t)
|
||||
driver := dockerDriverHarness(t, nil)
|
||||
cleanup := driver.MkAllocDir(task, false)
|
||||
cfg.CPUHardLimit = true
|
||||
cfg.CPUCFSPeriod = 1000000
|
||||
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
||||
|
||||
client, _, handle, cleanup := dockerSetup(t, task)
|
||||
defer cleanup()
|
||||
|
||||
binds, err := driver.Impl().(*Driver).containerBinds(task, cfg)
|
||||
waitForExist(t, client, handle.containerID)
|
||||
|
||||
container, err := client.InspectContainer(handle.containerID)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, binds, fmt.Sprintf("%s:/alloc", task.TaskDir().SharedAllocDir))
|
||||
require.Contains(t, binds, fmt.Sprintf("%s:/local", task.TaskDir().LocalDir))
|
||||
require.Contains(t, binds, fmt.Sprintf("%s:/secrets", task.TaskDir().SecretsDir))
|
||||
|
||||
require.Equal(t, cfg.CPUCFSPeriod, container.HostConfig.CPUPeriod)
|
||||
}
|
||||
|
||||
func TestDockerDriver_Sysctl_Ulimit(t *testing.T) {
|
||||
testutil.DockerCompatible(t)
|
||||
task, cfg, _ := dockerTask(t)
|
||||
expectedUlimits := map[string]string{
|
||||
"nproc": "4242",
|
||||
"nofile": "2048:4096",
|
||||
}
|
||||
cfg.Sysctl = map[string]string{
|
||||
"net.core.somaxconn": "16384",
|
||||
}
|
||||
cfg.Ulimit = expectedUlimits
|
||||
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
||||
|
||||
client, d, handle, cleanup := dockerSetup(t, task)
|
||||
defer cleanup()
|
||||
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
||||
|
||||
container, err := client.InspectContainer(handle.containerID)
|
||||
assert.Nil(t, err, "unexpected error: %v", err)
|
||||
|
||||
want := "16384"
|
||||
got := container.HostConfig.Sysctls["net.core.somaxconn"]
|
||||
assert.Equal(t, want, got, "Wrong net.core.somaxconn config for docker job. Expect: %s, got: %s", want, got)
|
||||
|
||||
expectedUlimitLen := 2
|
||||
actualUlimitLen := len(container.HostConfig.Ulimits)
|
||||
assert.Equal(t, want, got, "Wrong number of ulimit configs for docker job. Expect: %d, got: %d", expectedUlimitLen, actualUlimitLen)
|
||||
|
||||
for _, got := range container.HostConfig.Ulimits {
|
||||
if expectedStr, ok := expectedUlimits[got.Name]; !ok {
|
||||
t.Errorf("%s config unexpected for docker job.", got.Name)
|
||||
} else {
|
||||
if !strings.Contains(expectedStr, ":") {
|
||||
expectedStr = expectedStr + ":" + expectedStr
|
||||
}
|
||||
|
||||
splitted := strings.SplitN(expectedStr, ":", 2)
|
||||
soft, _ := strconv.Atoi(splitted[0])
|
||||
hard, _ := strconv.Atoi(splitted[1])
|
||||
assert.Equal(t, int64(soft), got.Soft, "Wrong soft %s ulimit for docker job. Expect: %d, got: %d", got.Name, soft, got.Soft)
|
||||
assert.Equal(t, int64(hard), got.Hard, "Wrong hard %s ulimit for docker job. Expect: %d, got: %d", got.Name, hard, got.Hard)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerDriver_Sysctl_Ulimit_Errors(t *testing.T) {
|
||||
testutil.DockerCompatible(t)
|
||||
brokenConfigs := []map[string]string{
|
||||
{
|
||||
"nofile": "",
|
||||
},
|
||||
{
|
||||
"nofile": "abc:1234",
|
||||
},
|
||||
{
|
||||
"nofile": "1234:abc",
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
ulimitConfig map[string]string
|
||||
err error
|
||||
}{
|
||||
{brokenConfigs[0], fmt.Errorf("Malformed ulimit specification nofile: \"\", cannot be empty")},
|
||||
{brokenConfigs[1], fmt.Errorf("Malformed soft ulimit nofile: abc:1234")},
|
||||
{brokenConfigs[2], fmt.Errorf("Malformed hard ulimit nofile: 1234:abc")},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
task, cfg, _ := dockerTask(t)
|
||||
cfg.Ulimit = tc.ulimitConfig
|
||||
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
||||
|
||||
d := dockerDriverHarness(t, nil)
|
||||
cleanup := d.MkAllocDir(task, true)
|
||||
defer cleanup()
|
||||
copyImage(t, task.TaskDir(), "busybox.tar")
|
||||
|
||||
_, _, err := d.StartTask(task)
|
||||
require.NotNil(t, err, "Expected non nil error")
|
||||
require.Contains(t, err.Error(), tc.err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// This test does not run on Windows due to stricter path validation in the
|
||||
// negative case for non existent mount paths. We should write a similar test
|
||||
// for windows.
|
||||
func TestDockerDriver_BindMountsHonorVolumesEnabledFlag(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testutil.DockerCompatible(t)
|
||||
|
||||
allocDir := "/tmp/nomad/alloc-dir"
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
requiresVolumes bool
|
||||
|
||||
volumeDriver string
|
||||
volumes []string
|
||||
|
||||
expectedVolumes []string
|
||||
}{
|
||||
{
|
||||
name: "basic plugin",
|
||||
requiresVolumes: true,
|
||||
volumeDriver: "nfs",
|
||||
volumes: []string{"test-path:/tmp/taskpath"},
|
||||
expectedVolumes: []string{"test-path:/tmp/taskpath"},
|
||||
},
|
||||
{
|
||||
name: "absolute default driver",
|
||||
requiresVolumes: true,
|
||||
volumeDriver: "",
|
||||
volumes: []string{"/abs/test-path:/tmp/taskpath"},
|
||||
expectedVolumes: []string{"/abs/test-path:/tmp/taskpath"},
|
||||
},
|
||||
{
|
||||
name: "absolute local driver",
|
||||
requiresVolumes: true,
|
||||
volumeDriver: "local",
|
||||
volumes: []string{"/abs/test-path:/tmp/taskpath"},
|
||||
expectedVolumes: []string{"/abs/test-path:/tmp/taskpath"},
|
||||
},
|
||||
{
|
||||
name: "relative default driver",
|
||||
requiresVolumes: false,
|
||||
volumeDriver: "",
|
||||
volumes: []string{"test-path:/tmp/taskpath"},
|
||||
expectedVolumes: []string{"/tmp/nomad/alloc-dir/demo/test-path:/tmp/taskpath"},
|
||||
},
|
||||
{
|
||||
name: "relative local driver",
|
||||
requiresVolumes: false,
|
||||
volumeDriver: "local",
|
||||
volumes: []string{"test-path:/tmp/taskpath"},
|
||||
expectedVolumes: []string{"/tmp/nomad/alloc-dir/demo/test-path:/tmp/taskpath"},
|
||||
},
|
||||
{
|
||||
name: "relative outside task-dir default driver",
|
||||
requiresVolumes: false,
|
||||
volumeDriver: "",
|
||||
volumes: []string{"../test-path:/tmp/taskpath"},
|
||||
expectedVolumes: []string{"/tmp/nomad/alloc-dir/test-path:/tmp/taskpath"},
|
||||
},
|
||||
{
|
||||
name: "relative outside task-dir local driver",
|
||||
requiresVolumes: false,
|
||||
volumeDriver: "local",
|
||||
volumes: []string{"../test-path:/tmp/taskpath"},
|
||||
expectedVolumes: []string{"/tmp/nomad/alloc-dir/test-path:/tmp/taskpath"},
|
||||
},
|
||||
{
|
||||
name: "relative outside alloc-dir default driver",
|
||||
requiresVolumes: true,
|
||||
volumeDriver: "",
|
||||
volumes: []string{"../../test-path:/tmp/taskpath"},
|
||||
expectedVolumes: []string{"/tmp/nomad/test-path:/tmp/taskpath"},
|
||||
},
|
||||
{
|
||||
name: "relative outside task-dir local driver",
|
||||
requiresVolumes: true,
|
||||
volumeDriver: "local",
|
||||
volumes: []string{"../../test-path:/tmp/taskpath"},
|
||||
expectedVolumes: []string{"/tmp/nomad/test-path:/tmp/taskpath"},
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("with volumes enabled", func(t *testing.T) {
|
||||
dh := dockerDriverHarness(t, nil)
|
||||
driver := dh.Impl().(*Driver)
|
||||
driver.config.Volumes.Enabled = true
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
task, cfg, _ := dockerTask(t)
|
||||
cfg.VolumeDriver = c.volumeDriver
|
||||
cfg.Volumes = c.volumes
|
||||
|
||||
task.AllocDir = allocDir
|
||||
task.Name = "demo"
|
||||
|
||||
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
||||
|
||||
cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, v := range c.expectedVolumes {
|
||||
require.Contains(t, cc.HostConfig.Binds, v)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("with volumes disabled", func(t *testing.T) {
|
||||
dh := dockerDriverHarness(t, nil)
|
||||
driver := dh.Impl().(*Driver)
|
||||
driver.config.Volumes.Enabled = false
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
task, cfg, _ := dockerTask(t)
|
||||
cfg.VolumeDriver = c.volumeDriver
|
||||
cfg.Volumes = c.volumes
|
||||
|
||||
task.AllocDir = allocDir
|
||||
task.Name = "demo"
|
||||
|
||||
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
||||
|
||||
cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
||||
if c.requiresVolumes {
|
||||
require.Error(t, err, "volumes are not enabled")
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, v := range c.expectedVolumes {
|
||||
require.Contains(t, cc.HostConfig.Binds, v)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// This test does not run on windows due to differences in the definition of
|
||||
// an absolute path, changing path expansion behaviour. A similar test should
|
||||
// be written for windows.
|
||||
func TestDockerDriver_MountsSerialization(t *testing.T) {
|
||||
t.Parallel()
|
||||
testutil.DockerCompatible(t)
|
||||
|
||||
allocDir := "/tmp/nomad/alloc-dir"
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
requiresVolumes bool
|
||||
passedMounts []DockerMount
|
||||
expectedMounts []docker.HostMount
|
||||
}{
|
||||
{
|
||||
name: "basic volume",
|
||||
passedMounts: []DockerMount{
|
||||
{
|
||||
Target: "/nomad",
|
||||
ReadOnly: true,
|
||||
Source: "test",
|
||||
},
|
||||
},
|
||||
expectedMounts: []docker.HostMount{
|
||||
{
|
||||
Type: "volume",
|
||||
Target: "/nomad",
|
||||
Source: "test",
|
||||
ReadOnly: true,
|
||||
VolumeOptions: &docker.VolumeOptions{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "basic bind",
|
||||
passedMounts: []DockerMount{
|
||||
{
|
||||
Type: "bind",
|
||||
Target: "/nomad",
|
||||
Source: "test",
|
||||
},
|
||||
},
|
||||
expectedMounts: []docker.HostMount{
|
||||
{
|
||||
Type: "bind",
|
||||
Target: "/nomad",
|
||||
Source: "/tmp/nomad/alloc-dir/demo/test",
|
||||
BindOptions: &docker.BindOptions{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "basic absolute bind",
|
||||
requiresVolumes: true,
|
||||
passedMounts: []DockerMount{
|
||||
{
|
||||
Type: "bind",
|
||||
Target: "/nomad",
|
||||
Source: "/tmp/test",
|
||||
},
|
||||
},
|
||||
expectedMounts: []docker.HostMount{
|
||||
{
|
||||
Type: "bind",
|
||||
Target: "/nomad",
|
||||
Source: "/tmp/test",
|
||||
BindOptions: &docker.BindOptions{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "bind relative outside",
|
||||
requiresVolumes: true,
|
||||
passedMounts: []DockerMount{
|
||||
{
|
||||
Type: "bind",
|
||||
Target: "/nomad",
|
||||
Source: "../../test",
|
||||
},
|
||||
},
|
||||
expectedMounts: []docker.HostMount{
|
||||
{
|
||||
Type: "bind",
|
||||
Target: "/nomad",
|
||||
Source: "/tmp/nomad/test",
|
||||
BindOptions: &docker.BindOptions{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "basic tmpfs",
|
||||
requiresVolumes: false,
|
||||
passedMounts: []DockerMount{
|
||||
{
|
||||
Type: "tmpfs",
|
||||
Target: "/nomad",
|
||||
TmpfsOptions: DockerTmpfsOptions{
|
||||
SizeBytes: 321,
|
||||
Mode: 0666,
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedMounts: []docker.HostMount{
|
||||
{
|
||||
Type: "tmpfs",
|
||||
Target: "/nomad",
|
||||
TempfsOptions: &docker.TempfsOptions{
|
||||
SizeBytes: 321,
|
||||
Mode: 0666,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("with volumes enabled", func(t *testing.T) {
|
||||
dh := dockerDriverHarness(t, nil)
|
||||
driver := dh.Impl().(*Driver)
|
||||
driver.config.Volumes.Enabled = true
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
task, cfg, _ := dockerTask(t)
|
||||
cfg.Mounts = c.passedMounts
|
||||
|
||||
task.AllocDir = allocDir
|
||||
task.Name = "demo"
|
||||
|
||||
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
||||
|
||||
cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, c.expectedMounts, cc.HostConfig.Mounts)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("with volumes disabled", func(t *testing.T) {
|
||||
dh := dockerDriverHarness(t, nil)
|
||||
driver := dh.Impl().(*Driver)
|
||||
driver.config.Volumes.Enabled = false
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
task, cfg, _ := dockerTask(t)
|
||||
cfg.Mounts = c.passedMounts
|
||||
|
||||
task.AllocDir = allocDir
|
||||
task.Name = "demo"
|
||||
|
||||
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
||||
|
||||
cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
||||
if c.requiresVolumes {
|
||||
require.Error(t, err, "volumes are not enabled")
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, c.expectedMounts, cc.HostConfig.Mounts)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestDockerDriver_CreateContainerConfig_MountsCombined asserts that
|
||||
// devices and mounts set by device managers/plugins are honored
|
||||
// and present in docker.CreateContainerOptions, and that it is appended
|
||||
// to any devices/mounts a user sets in the task config.
|
||||
func TestDockerDriver_CreateContainerConfig_MountsCombined(t *testing.T) {
|
||||
t.Parallel()
|
||||
testutil.DockerCompatible(t)
|
||||
|
||||
task, cfg, _ := dockerTask(t)
|
||||
|
||||
task.Devices = []*drivers.DeviceConfig{
|
||||
{
|
||||
HostPath: "/dev/fuse",
|
||||
TaskPath: "/container/dev/task-fuse",
|
||||
Permissions: "rw",
|
||||
},
|
||||
}
|
||||
task.Mounts = []*drivers.MountConfig{
|
||||
{
|
||||
HostPath: "/tmp/task-mount",
|
||||
TaskPath: "/container/tmp/task-mount",
|
||||
Readonly: true,
|
||||
},
|
||||
}
|
||||
|
||||
cfg.Devices = []DockerDevice{
|
||||
{
|
||||
HostPath: "/dev/stdout",
|
||||
ContainerPath: "/container/dev/cfg-stdout",
|
||||
CgroupPermissions: "rwm",
|
||||
},
|
||||
}
|
||||
cfg.Mounts = []DockerMount{
|
||||
{
|
||||
Type: "bind",
|
||||
Source: "/tmp/cfg-mount",
|
||||
Target: "/container/tmp/cfg-mount",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}
|
||||
|
||||
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
||||
|
||||
dh := dockerDriverHarness(t, nil)
|
||||
driver := dh.Impl().(*Driver)
|
||||
|
||||
c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedMounts := []docker.HostMount{
|
||||
{
|
||||
Type: "bind",
|
||||
Source: "/tmp/cfg-mount",
|
||||
Target: "/container/tmp/cfg-mount",
|
||||
ReadOnly: false,
|
||||
BindOptions: &docker.BindOptions{},
|
||||
},
|
||||
{
|
||||
Type: "bind",
|
||||
Source: "/tmp/task-mount",
|
||||
Target: "/container/tmp/task-mount",
|
||||
ReadOnly: true,
|
||||
},
|
||||
}
|
||||
foundMounts := c.HostConfig.Mounts
|
||||
sort.Slice(foundMounts, func(i, j int) bool {
|
||||
return foundMounts[i].Target < foundMounts[j].Target
|
||||
})
|
||||
require.EqualValues(t, expectedMounts, foundMounts)
|
||||
|
||||
expectedDevices := []docker.Device{
|
||||
{
|
||||
PathOnHost: "/dev/stdout",
|
||||
PathInContainer: "/container/dev/cfg-stdout",
|
||||
CgroupPermissions: "rwm",
|
||||
},
|
||||
{
|
||||
PathOnHost: "/dev/fuse",
|
||||
PathInContainer: "/container/dev/task-fuse",
|
||||
CgroupPermissions: "rw",
|
||||
},
|
||||
}
|
||||
|
||||
foundDevices := c.HostConfig.Devices
|
||||
sort.Slice(foundDevices, func(i, j int) bool {
|
||||
return foundDevices[i].PathInContainer < foundDevices[j].PathInContainer
|
||||
})
|
||||
require.EqualValues(t, expectedDevices, foundDevices)
|
||||
}
|
||||
|
||||
// TestDockerDriver_Cleanup ensures Cleanup removes only downloaded images.
|
||||
// Doesn't run on windows because it requires an image variant
|
||||
func TestDockerDriver_Cleanup(t *testing.T) {
|
||||
testutil.DockerCompatible(t)
|
||||
|
||||
// using a small image and an specific point release to avoid accidental conflicts with other tasks
|
||||
cfg := newTaskConfig("", []string{"sleep", "100"})
|
||||
cfg.Image = "busybox:1.29.2"
|
||||
cfg.LoadImage = ""
|
||||
task := &drivers.TaskConfig{
|
||||
ID: uuid.Generate(),
|
||||
Name: "cleanup_test",
|
||||
Resources: basicResources,
|
||||
}
|
||||
|
||||
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
||||
|
||||
client, driver, handle, cleanup := dockerSetup(t, task)
|
||||
defer cleanup()
|
||||
|
||||
require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
|
||||
// Cleanup
|
||||
require.NoError(t, driver.DestroyTask(task.ID, true))
|
||||
|
||||
// Ensure image was removed
|
||||
tu.WaitForResult(func() (bool, error) {
|
||||
if _, err := client.InspectImage(cfg.Image); err == nil {
|
||||
return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// The image doesn't exist which shouldn't be an error when calling
|
||||
// Cleanup, so call it again to make sure.
|
||||
require.NoError(t, driver.Impl().(*Driver).cleanupImage(handle))
|
||||
}
|
||||
|
||||
func newTaskConfig(variant string, command []string) TaskConfig {
|
||||
// busyboxImageID is the ID stored in busybox.tar
|
||||
busyboxImageID := "busybox:1.29.3"
|
||||
|
||||
image := busyboxImageID
|
||||
loadImage := "busybox.tar"
|
||||
if variant != "" {
|
||||
image = fmt.Sprintf("%s-%s", busyboxImageID, variant)
|
||||
loadImage = fmt.Sprintf("busybox_%s.tar", variant)
|
||||
}
|
||||
|
||||
return TaskConfig{
|
||||
Image: image,
|
||||
LoadImage: loadImage,
|
||||
Command: command[0],
|
||||
Args: command[1:],
|
||||
}
|
||||
}
|
||||
|
||||
func copyImage(t *testing.T, taskDir *allocdir.TaskDir, image string) {
|
||||
dst := filepath.Join(taskDir.LocalDir, image)
|
||||
copyFile(filepath.Join("./test-resources/docker", image), dst, t)
|
||||
}
|
||||
|
||||
// copyFile moves an existing file to the destination
|
||||
func copyFile(src, dst string, t *testing.T) {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
|
||||
}
|
||||
defer in.Close()
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := out.Close(); err != nil {
|
||||
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
|
||||
}
|
||||
}()
|
||||
if _, err = io.Copy(out, in); err != nil {
|
||||
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
|
||||
}
|
||||
if err := out.Sync(); err != nil {
|
||||
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
|
||||
}
|
||||
}
|
||||
|
|
26
drivers/docker/driver_windows_test.go
Normal file
26
drivers/docker/driver_windows_test.go
Normal file
|
@ -0,0 +1,26 @@
|
|||
// +build windows
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
)
|
||||
|
||||
func newTaskConfig(variant string, command []string) TaskConfig {
|
||||
// busyboxImageID is an id of an image containing nanoserver windows and
|
||||
// a busybox exe.
|
||||
// See https://github.com/dantoml/windows/blob/81cff1ed77729d1fa36721abd6cb6efebff2f8ef/docker/busybox/Dockerfile
|
||||
busyboxImageID := "dantoml/busybox-windows:08012019"
|
||||
|
||||
return TaskConfig{
|
||||
Image: busyboxImageID,
|
||||
Command: command[0],
|
||||
Args: command[1:],
|
||||
}
|
||||
}
|
||||
|
||||
// No-op on windows because we don't load images.
|
||||
func copyImage(t *testing.T, taskDir *allocdir.TaskDir, image string) {
|
||||
}
|
|
@ -2,8 +2,11 @@ package docker
|
|||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
pstructs "github.com/hashicorp/nomad/plugins/shared/structs"
|
||||
)
|
||||
|
@ -14,6 +17,28 @@ func (d *Driver) Fingerprint(ctx context.Context) (<-chan *drivers.Fingerprint,
|
|||
return ch, nil
|
||||
}
|
||||
|
||||
// setFingerprintSuccess marks the driver as having fingerprinted successfully
|
||||
func (d *Driver) setFingerprintSuccess() {
|
||||
d.fingerprintLock.Lock()
|
||||
d.fingerprintSuccess = helper.BoolToPtr(true)
|
||||
d.fingerprintLock.Unlock()
|
||||
}
|
||||
|
||||
// setFingerprintFailure marks the driver as having failed fingerprinting
|
||||
func (d *Driver) setFingerprintFailure() {
|
||||
d.fingerprintLock.Lock()
|
||||
d.fingerprintSuccess = helper.BoolToPtr(false)
|
||||
d.fingerprintLock.Unlock()
|
||||
}
|
||||
|
||||
// fingerprintSuccessful returns true if the driver has
|
||||
// never fingerprinted or has successfully fingerprinted
|
||||
func (d *Driver) fingerprintSuccessful() bool {
|
||||
d.fingerprintLock.Lock()
|
||||
defer d.fingerprintLock.Unlock()
|
||||
return d.fingerprintSuccess == nil || *d.fingerprintSuccess
|
||||
}
|
||||
|
||||
func (d *Driver) handleFingerprint(ctx context.Context, ch chan *drivers.Fingerprint) {
|
||||
defer close(ch)
|
||||
ticker := time.NewTimer(0)
|
||||
|
@ -38,7 +63,10 @@ func (d *Driver) buildFingerprint() *drivers.Fingerprint {
|
|||
}
|
||||
client, _, err := d.dockerClients()
|
||||
if err != nil {
|
||||
if d.fingerprintSuccessful() {
|
||||
d.logger.Info("failed to initialize client", "error", err)
|
||||
}
|
||||
d.setFingerprintFailure()
|
||||
return &drivers.Fingerprint{
|
||||
Health: drivers.HealthStateUndetected,
|
||||
HealthDescription: "Failed to initialize docker client",
|
||||
|
@ -47,7 +75,10 @@ func (d *Driver) buildFingerprint() *drivers.Fingerprint {
|
|||
|
||||
env, err := client.Version()
|
||||
if err != nil {
|
||||
if d.fingerprintSuccessful() {
|
||||
d.logger.Debug("could not connect to docker daemon", "endpoint", client.Endpoint(), "error", err)
|
||||
}
|
||||
d.setFingerprintFailure()
|
||||
return &drivers.Fingerprint{
|
||||
Health: drivers.HealthStateUnhealthy,
|
||||
HealthDescription: "Failed to connect to docker daemon",
|
||||
|
@ -82,11 +113,33 @@ func (d *Driver) buildFingerprint() *drivers.Fingerprint {
|
|||
} else {
|
||||
// Docker 17.09.0-ce dropped the Gateway IP from the bridge network
|
||||
// See https://github.com/moby/moby/issues/32648
|
||||
if d.fingerprintSuccessful() {
|
||||
d.logger.Debug("bridge_ip could not be discovered")
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if dockerInfo, err := client.Info(); err != nil {
|
||||
d.logger.Warn("failed to get Docker system info", "error", err)
|
||||
} else {
|
||||
runtimeNames := make([]string, 0, len(dockerInfo.Runtimes))
|
||||
for name := range dockerInfo.Runtimes {
|
||||
if d.config.GPURuntimeName == name {
|
||||
// Nvidia runtime is detected by Docker.
|
||||
// It makes possible to run GPU workloads using Docker driver on this host.
|
||||
d.gpuRuntime = true
|
||||
}
|
||||
runtimeNames = append(runtimeNames, name)
|
||||
}
|
||||
sort.Strings(runtimeNames)
|
||||
|
||||
fp.Attributes["runtimes"] = pstructs.NewStringAttribute(
|
||||
strings.Join(runtimeNames, ","))
|
||||
}
|
||||
|
||||
d.setFingerprintSuccess()
|
||||
|
||||
return fp
|
||||
}
|
||||
|
|
|
@ -134,6 +134,7 @@ func dockerStatsToTaskResourceUsage(s *docker.Stats) *cstructs.TaskResourceUsage
|
|||
RSS: s.MemoryStats.Stats.Rss,
|
||||
Cache: s.MemoryStats.Stats.Cache,
|
||||
Swap: s.MemoryStats.Stats.Swap,
|
||||
Usage: s.MemoryStats.Usage,
|
||||
MaxUsage: s.MemoryStats.MaxUsage,
|
||||
Measured: DockerMeasuredMemStats,
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ func TestDriver_DockerStatsCollector(t *testing.T) {
|
|||
stats.MemoryStats.Stats.Rss = 6537216
|
||||
stats.MemoryStats.Stats.Cache = 1234
|
||||
stats.MemoryStats.Stats.Swap = 0
|
||||
stats.MemoryStats.Usage = 5651904
|
||||
stats.MemoryStats.MaxUsage = 6651904
|
||||
|
||||
go dockerStatsCollector(dst, src, time.Second)
|
||||
|
@ -38,6 +39,7 @@ func TestDriver_DockerStatsCollector(t *testing.T) {
|
|||
require.Equal(stats.MemoryStats.Stats.Rss, ru.ResourceUsage.MemoryStats.RSS)
|
||||
require.Equal(stats.MemoryStats.Stats.Cache, ru.ResourceUsage.MemoryStats.Cache)
|
||||
require.Equal(stats.MemoryStats.Stats.Swap, ru.ResourceUsage.MemoryStats.Swap)
|
||||
require.Equal(stats.MemoryStats.Usage, ru.ResourceUsage.MemoryStats.Usage)
|
||||
require.Equal(stats.MemoryStats.MaxUsage, ru.ResourceUsage.MemoryStats.MaxUsage)
|
||||
require.Equal(stats.CPUStats.ThrottlingData.ThrottledPeriods, ru.ResourceUsage.CpuStats.ThrottledPeriods)
|
||||
require.Equal(stats.CPUStats.ThrottlingData.ThrottledTime, ru.ResourceUsage.CpuStats.ThrottledTime)
|
||||
|
|
|
@ -6,60 +6,6 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestValidateCgroupPermission(t *testing.T) {
|
||||
positiveCases := []string{
|
||||
"r",
|
||||
"rw",
|
||||
"rwm",
|
||||
"mr",
|
||||
"mrw",
|
||||
"",
|
||||
}
|
||||
|
||||
for _, c := range positiveCases {
|
||||
t.Run("positive case: "+c, func(t *testing.T) {
|
||||
require.True(t, validateCgroupPermission(c))
|
||||
})
|
||||
}
|
||||
|
||||
negativeCases := []string{
|
||||
"q",
|
||||
"asdf",
|
||||
"rq",
|
||||
}
|
||||
|
||||
for _, c := range negativeCases {
|
||||
t.Run("negative case: "+c, func(t *testing.T) {
|
||||
require.False(t, validateCgroupPermission(c))
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestExpandPath(t *testing.T) {
|
||||
cases := []struct {
|
||||
base string
|
||||
target string
|
||||
expected string
|
||||
}{
|
||||
{"/tmp/alloc/task", "/home/user", "/home/user"},
|
||||
{"/tmp/alloc/task", "/home/user/..", "/home"},
|
||||
|
||||
{"/tmp/alloc/task", ".", "/tmp/alloc/task"},
|
||||
{"/tmp/alloc/task", "..", "/tmp/alloc"},
|
||||
|
||||
{"/tmp/alloc/task", "d1/d2", "/tmp/alloc/task/d1/d2"},
|
||||
{"/tmp/alloc/task", "../d1/d2", "/tmp/alloc/d1/d2"},
|
||||
{"/tmp/alloc/task", "../../d1/d2", "/tmp/d1/d2"},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.expected, func(t *testing.T) {
|
||||
require.Equal(t, c.expected, expandPath(c.base, c.target))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsParentPath(t *testing.T) {
|
||||
require.True(t, isParentPath("/a/b/c", "/a/b/c"))
|
||||
require.True(t, isParentPath("/a/b/c", "/a/b/c/d"))
|
||||
|
|
63
drivers/docker/utils_unix_test.go
Normal file
63
drivers/docker/utils_unix_test.go
Normal file
|
@ -0,0 +1,63 @@
|
|||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestValidateCgroupPermission(t *testing.T) {
|
||||
positiveCases := []string{
|
||||
"r",
|
||||
"rw",
|
||||
"rwm",
|
||||
"mr",
|
||||
"mrw",
|
||||
"",
|
||||
}
|
||||
|
||||
for _, c := range positiveCases {
|
||||
t.Run("positive case: "+c, func(t *testing.T) {
|
||||
require.True(t, validateCgroupPermission(c))
|
||||
})
|
||||
}
|
||||
|
||||
negativeCases := []string{
|
||||
"q",
|
||||
"asdf",
|
||||
"rq",
|
||||
}
|
||||
|
||||
for _, c := range negativeCases {
|
||||
t.Run("negative case: "+c, func(t *testing.T) {
|
||||
require.False(t, validateCgroupPermission(c))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpandPath(t *testing.T) {
|
||||
cases := []struct {
|
||||
base string
|
||||
target string
|
||||
expected string
|
||||
}{
|
||||
{"/tmp/alloc/task", ".", "/tmp/alloc/task"},
|
||||
{"/tmp/alloc/task", "..", "/tmp/alloc"},
|
||||
|
||||
{"/tmp/alloc/task", "d1/d2", "/tmp/alloc/task/d1/d2"},
|
||||
{"/tmp/alloc/task", "../d1/d2", "/tmp/alloc/d1/d2"},
|
||||
{"/tmp/alloc/task", "../../d1/d2", "/tmp/d1/d2"},
|
||||
|
||||
{"/tmp/alloc/task", "/home/user", "/home/user"},
|
||||
{"/tmp/alloc/task", "/home/user/..", "/home"},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.expected, func(t *testing.T) {
|
||||
require.Equal(t, c.expected, filepath.ToSlash(expandPath(c.base, c.target)))
|
||||
})
|
||||
}
|
||||
}
|
34
drivers/docker/utils_windows_test.go
Normal file
34
drivers/docker/utils_windows_test.go
Normal file
|
@ -0,0 +1,34 @@
|
|||
// +build windows
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestExpandPath(t *testing.T) {
|
||||
cases := []struct {
|
||||
base string
|
||||
target string
|
||||
expected string
|
||||
}{
|
||||
{"/tmp/alloc/task", ".", "/tmp/alloc/task"},
|
||||
{"/tmp/alloc/task", "..", "/tmp/alloc"},
|
||||
|
||||
{"/tmp/alloc/task", "d1/d2", "/tmp/alloc/task/d1/d2"},
|
||||
{"/tmp/alloc/task", "../d1/d2", "/tmp/alloc/d1/d2"},
|
||||
{"/tmp/alloc/task", "../../d1/d2", "/tmp/d1/d2"},
|
||||
|
||||
{"/tmp/alloc/task", "c:/home/user", "c:/home/user"},
|
||||
{"/tmp/alloc/task", "c:/home/user/..", "c:/home"},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.expected, func(t *testing.T) {
|
||||
require.Equal(t, c.expected, filepath.ToSlash(expandPath(c.base, c.target)))
|
||||
})
|
||||
}
|
||||
}
|
|
@ -7,6 +7,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/consul-template/signals"
|
||||
|
@ -14,6 +15,7 @@ import (
|
|||
"github.com/hashicorp/nomad/client/fingerprint"
|
||||
"github.com/hashicorp/nomad/drivers/shared/eventer"
|
||||
"github.com/hashicorp/nomad/drivers/shared/executor"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/plugins/base"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
"github.com/hashicorp/nomad/plugins/drivers/utils"
|
||||
|
@ -100,6 +102,11 @@ type Driver struct {
|
|||
|
||||
// logger will log to the Nomad agent
|
||||
logger hclog.Logger
|
||||
|
||||
// A tri-state boolean to know if the fingerprinting has happened and
|
||||
// whether it has been successful
|
||||
fingerprintSuccess *bool
|
||||
fingerprintLock sync.Mutex
|
||||
}
|
||||
|
||||
// TaskConfig is the driver configuration of a task within a job
|
||||
|
@ -131,6 +138,28 @@ func NewExecDriver(logger hclog.Logger) drivers.DriverPlugin {
|
|||
}
|
||||
}
|
||||
|
||||
// setFingerprintSuccess marks the driver as having fingerprinted successfully
|
||||
func (d *Driver) setFingerprintSuccess() {
|
||||
d.fingerprintLock.Lock()
|
||||
d.fingerprintSuccess = helper.BoolToPtr(true)
|
||||
d.fingerprintLock.Unlock()
|
||||
}
|
||||
|
||||
// setFingerprintFailure marks the driver as having failed fingerprinting
|
||||
func (d *Driver) setFingerprintFailure() {
|
||||
d.fingerprintLock.Lock()
|
||||
d.fingerprintSuccess = helper.BoolToPtr(false)
|
||||
d.fingerprintLock.Unlock()
|
||||
}
|
||||
|
||||
// fingerprintSuccessful returns true if the driver has
|
||||
// never fingerprinted or has successfully fingerprinted
|
||||
func (d *Driver) fingerprintSuccessful() bool {
|
||||
d.fingerprintLock.Lock()
|
||||
defer d.fingerprintLock.Unlock()
|
||||
return d.fingerprintSuccess == nil || *d.fingerprintSuccess
|
||||
}
|
||||
|
||||
func (d *Driver) PluginInfo() (*base.PluginInfoResponse, error) {
|
||||
return pluginInfo, nil
|
||||
}
|
||||
|
@ -182,6 +211,7 @@ func (d *Driver) handleFingerprint(ctx context.Context, ch chan<- *drivers.Finge
|
|||
|
||||
func (d *Driver) buildFingerprint() *drivers.Fingerprint {
|
||||
if runtime.GOOS != "linux" {
|
||||
d.setFingerprintFailure()
|
||||
return &drivers.Fingerprint{
|
||||
Health: drivers.HealthStateUndetected,
|
||||
HealthDescription: "exec driver unsupported on client OS",
|
||||
|
@ -197,6 +227,7 @@ func (d *Driver) buildFingerprint() *drivers.Fingerprint {
|
|||
if !utils.IsUnixRoot() {
|
||||
fp.Health = drivers.HealthStateUndetected
|
||||
fp.HealthDescription = drivers.DriverRequiresRootMessage
|
||||
d.setFingerprintFailure()
|
||||
return fp
|
||||
}
|
||||
|
||||
|
@ -204,17 +235,22 @@ func (d *Driver) buildFingerprint() *drivers.Fingerprint {
|
|||
if err != nil {
|
||||
fp.Health = drivers.HealthStateUnhealthy
|
||||
fp.HealthDescription = drivers.NoCgroupMountMessage
|
||||
if d.fingerprintSuccessful() {
|
||||
d.logger.Warn(fp.HealthDescription, "error", err)
|
||||
}
|
||||
d.setFingerprintFailure()
|
||||
return fp
|
||||
}
|
||||
|
||||
if mount == "" {
|
||||
fp.Health = drivers.HealthStateUnhealthy
|
||||
fp.HealthDescription = drivers.CgroupMountEmpty
|
||||
d.setFingerprintFailure()
|
||||
return fp
|
||||
}
|
||||
|
||||
fp.Attributes["driver.exec"] = pstructs.NewBoolAttribute(true)
|
||||
d.setFingerprintSuccess()
|
||||
return fp
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
"github.com/hashicorp/nomad/plugins/shared/hclutils"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -123,66 +122,6 @@ func TestExecDriver_StartWait(t *testing.T) {
|
|||
require.NoError(harness.DestroyTask(task.ID, true))
|
||||
}
|
||||
|
||||
func TestExecDriver_StartWaitStop(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
ctestutils.ExecCompatible(t)
|
||||
|
||||
d := NewExecDriver(testlog.HCLogger(t))
|
||||
harness := dtestutil.NewDriverHarness(t, d)
|
||||
task := &drivers.TaskConfig{
|
||||
ID: uuid.Generate(),
|
||||
Name: "test",
|
||||
Resources: testResources,
|
||||
}
|
||||
|
||||
taskConfig := map[string]interface{}{
|
||||
"command": "/bin/sleep",
|
||||
"args": []string{"600"},
|
||||
}
|
||||
encodeDriverHelper(require, task, taskConfig)
|
||||
|
||||
cleanup := harness.MkAllocDir(task, false)
|
||||
defer cleanup()
|
||||
|
||||
handle, _, err := harness.StartTask(task)
|
||||
require.NoError(err)
|
||||
|
||||
ch, err := harness.WaitTask(context.Background(), handle.Config.ID)
|
||||
require.NoError(err)
|
||||
|
||||
require.NoError(harness.WaitUntilStarted(task.ID, 1*time.Second))
|
||||
|
||||
go func() {
|
||||
harness.StopTask(task.ID, 2*time.Second, "SIGINT")
|
||||
}()
|
||||
|
||||
select {
|
||||
case result := <-ch:
|
||||
require.Equal(int(unix.SIGINT), result.Signal)
|
||||
case <-time.After(10 * time.Second):
|
||||
require.Fail("timeout waiting for task to shutdown")
|
||||
}
|
||||
|
||||
// Ensure that the task is marked as dead, but account
|
||||
// for WaitTask() closing channel before internal state is updated
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
status, err := harness.InspectTask(task.ID)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("inspecting task failed: %v", err)
|
||||
}
|
||||
if status.State != drivers.TaskStateExited {
|
||||
return false, fmt.Errorf("task hasn't exited yet; status: %v", status.State)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
require.NoError(err)
|
||||
})
|
||||
|
||||
require.NoError(harness.DestroyTask(task.ID, true))
|
||||
}
|
||||
|
||||
func TestExecDriver_StartWaitStopKill(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
|
|
79
drivers/exec/driver_unix_test.go
Normal file
79
drivers/exec/driver_unix_test.go
Normal file
|
@ -0,0 +1,79 @@
|
|||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package exec
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ctestutils "github.com/hashicorp/nomad/client/testutil"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func TestExecDriver_StartWaitStop(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
ctestutils.ExecCompatible(t)
|
||||
|
||||
d := NewExecDriver(testlog.HCLogger(t))
|
||||
harness := dtestutil.NewDriverHarness(t, d)
|
||||
task := &drivers.TaskConfig{
|
||||
ID: uuid.Generate(),
|
||||
Name: "test",
|
||||
Resources: testResources,
|
||||
}
|
||||
|
||||
taskConfig := map[string]interface{}{
|
||||
"command": "/bin/sleep",
|
||||
"args": []string{"600"},
|
||||
}
|
||||
encodeDriverHelper(require, task, taskConfig)
|
||||
|
||||
cleanup := harness.MkAllocDir(task, false)
|
||||
defer cleanup()
|
||||
|
||||
handle, _, err := harness.StartTask(task)
|
||||
require.NoError(err)
|
||||
|
||||
ch, err := harness.WaitTask(context.Background(), handle.Config.ID)
|
||||
require.NoError(err)
|
||||
|
||||
require.NoError(harness.WaitUntilStarted(task.ID, 1*time.Second))
|
||||
|
||||
go func() {
|
||||
harness.StopTask(task.ID, 2*time.Second, "SIGINT")
|
||||
}()
|
||||
|
||||
select {
|
||||
case result := <-ch:
|
||||
require.Equal(int(unix.SIGINT), result.Signal)
|
||||
case <-time.After(10 * time.Second):
|
||||
require.Fail("timeout waiting for task to shutdown")
|
||||
}
|
||||
|
||||
// Ensure that the task is marked as dead, but account
|
||||
// for WaitTask() closing channel before internal state is updated
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
status, err := harness.InspectTask(task.ID)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("inspecting task failed: %v", err)
|
||||
}
|
||||
if status.State != drivers.TaskStateExited {
|
||||
return false, fmt.Errorf("task hasn't exited yet; status: %v", status.State)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
require.NoError(err)
|
||||
})
|
||||
|
||||
require.NoError(harness.DestroyTask(task.ID, true))
|
||||
}
|
|
@ -76,7 +76,7 @@ var (
|
|||
"exit_code": hclspec.NewAttr("exit_code", "number", false),
|
||||
"exit_signal": hclspec.NewAttr("exit_signal", "number", false),
|
||||
"exit_err_msg": hclspec.NewAttr("exit_err_msg", "string", false),
|
||||
"signal_err": hclspec.NewAttr("signal_err", "string", false),
|
||||
"signal_error": hclspec.NewAttr("signal_error", "string", false),
|
||||
"driver_ip": hclspec.NewAttr("driver_ip", "string", false),
|
||||
"driver_advertise": hclspec.NewAttr("driver_advertise", "bool", false),
|
||||
"driver_port_map": hclspec.NewAttr("driver_port_map", "string", false),
|
||||
|
@ -88,7 +88,7 @@ var (
|
|||
// capabilities is returned by the Capabilities RPC and indicates what
|
||||
// optional features this driver supports
|
||||
capabilities = &drivers.Capabilities{
|
||||
SendSignals: false,
|
||||
SendSignals: true,
|
||||
Exec: true,
|
||||
FSIsolation: drivers.FSIsolationNone,
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
@ -25,7 +26,6 @@ import (
|
|||
pstructs "github.com/hashicorp/nomad/plugins/shared/structs"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
@ -164,73 +164,6 @@ func TestRawExecDriver_StartWait(t *testing.T) {
|
|||
require.NoError(harness.DestroyTask(task.ID, true))
|
||||
}
|
||||
|
||||
func TestRawExecDriver_StartWaitStop(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
|
||||
d := NewRawExecDriver(testlog.HCLogger(t))
|
||||
harness := dtestutil.NewDriverHarness(t, d)
|
||||
defer harness.Kill()
|
||||
|
||||
// Disable cgroups so test works without root
|
||||
config := &Config{NoCgroups: true}
|
||||
var data []byte
|
||||
require.NoError(basePlug.MsgPackEncode(&data, config))
|
||||
bconfig := &basePlug.Config{PluginConfig: data}
|
||||
require.NoError(harness.SetConfig(bconfig))
|
||||
|
||||
task := &drivers.TaskConfig{
|
||||
ID: uuid.Generate(),
|
||||
Name: "test",
|
||||
}
|
||||
|
||||
taskConfig := map[string]interface{}{}
|
||||
taskConfig["command"] = testtask.Path()
|
||||
taskConfig["args"] = []string{"sleep", "100s"}
|
||||
|
||||
encodeDriverHelper(require, task, taskConfig)
|
||||
|
||||
cleanup := harness.MkAllocDir(task, false)
|
||||
defer cleanup()
|
||||
|
||||
handle, _, err := harness.StartTask(task)
|
||||
require.NoError(err)
|
||||
|
||||
ch, err := harness.WaitTask(context.Background(), handle.Config.ID)
|
||||
require.NoError(err)
|
||||
|
||||
require.NoError(harness.WaitUntilStarted(task.ID, 1*time.Second))
|
||||
|
||||
go func() {
|
||||
harness.StopTask(task.ID, 2*time.Second, "SIGINT")
|
||||
}()
|
||||
|
||||
select {
|
||||
case result := <-ch:
|
||||
require.Equal(int(unix.SIGINT), result.Signal)
|
||||
case <-time.After(10 * time.Second):
|
||||
require.Fail("timeout waiting for task to shutdown")
|
||||
}
|
||||
|
||||
// Ensure that the task is marked as dead, but account
|
||||
// for WaitTask() closing channel before internal state is updated
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
status, err := harness.InspectTask(task.ID)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("inspecting task failed: %v", err)
|
||||
}
|
||||
if status.State != drivers.TaskStateExited {
|
||||
return false, fmt.Errorf("task hasn't exited yet; status: %v", status.State)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
require.NoError(err)
|
||||
})
|
||||
|
||||
require.NoError(harness.DestroyTask(task.ID, true))
|
||||
}
|
||||
|
||||
func TestRawExecDriver_StartWaitRecoverWaitStop(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
|
@ -312,7 +245,6 @@ func TestRawExecDriver_StartWaitRecoverWaitStop(t *testing.T) {
|
|||
wg.Wait()
|
||||
require.NoError(d.DestroyTask(task.ID, false))
|
||||
require.True(waitDone)
|
||||
|
||||
}
|
||||
|
||||
func TestRawExecDriver_Start_Wait_AllocDir(t *testing.T) {
|
||||
|
@ -483,6 +415,19 @@ func TestRawExecDriver_Exec(t *testing.T) {
|
|||
_, _, err := harness.StartTask(task)
|
||||
require.NoError(err)
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
// Exec a command that should work
|
||||
res, err := harness.ExecTask(task.ID, []string{"cmd.exe", "/c", "echo", "hello"}, 1*time.Second)
|
||||
require.NoError(err)
|
||||
require.True(res.ExitResult.Successful())
|
||||
require.Equal(string(res.Stdout), "hello\r\n")
|
||||
|
||||
// Exec a command that should fail
|
||||
res, err = harness.ExecTask(task.ID, []string{"cmd.exe", "/c", "stat", "notarealfile123abc"}, 1*time.Second)
|
||||
require.NoError(err)
|
||||
require.False(res.ExitResult.Successful())
|
||||
require.Contains(string(res.Stdout), "not recognized")
|
||||
} else {
|
||||
// Exec a command that should work
|
||||
res, err := harness.ExecTask(task.ID, []string{"/usr/bin/stat", "/tmp"}, 1*time.Second)
|
||||
require.NoError(err)
|
||||
|
@ -494,6 +439,7 @@ func TestRawExecDriver_Exec(t *testing.T) {
|
|||
require.NoError(err)
|
||||
require.False(res.ExitResult.Successful())
|
||||
require.Contains(string(res.Stdout), "No such file or directory")
|
||||
}
|
||||
|
||||
require.NoError(harness.DestroyTask(task.ID, true))
|
||||
}
|
||||
|
|
|
@ -16,10 +16,12 @@ import (
|
|||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/helper/testtask"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
basePlug "github.com/hashicorp/nomad/plugins/base"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func TestRawExecDriver_User(t *testing.T) {
|
||||
|
@ -128,3 +130,70 @@ done
|
|||
return true, nil
|
||||
}, func(err error) { require.NoError(err) })
|
||||
}
|
||||
|
||||
func TestRawExecDriver_StartWaitStop(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
|
||||
d := NewRawExecDriver(testlog.HCLogger(t))
|
||||
harness := dtestutil.NewDriverHarness(t, d)
|
||||
defer harness.Kill()
|
||||
|
||||
// Disable cgroups so test works without root
|
||||
config := &Config{NoCgroups: true}
|
||||
var data []byte
|
||||
require.NoError(basePlug.MsgPackEncode(&data, config))
|
||||
bconfig := &basePlug.Config{PluginConfig: data}
|
||||
require.NoError(harness.SetConfig(bconfig))
|
||||
|
||||
task := &drivers.TaskConfig{
|
||||
ID: uuid.Generate(),
|
||||
Name: "test",
|
||||
}
|
||||
|
||||
taskConfig := map[string]interface{}{}
|
||||
taskConfig["command"] = testtask.Path()
|
||||
taskConfig["args"] = []string{"sleep", "100s"}
|
||||
|
||||
encodeDriverHelper(require, task, taskConfig)
|
||||
|
||||
cleanup := harness.MkAllocDir(task, false)
|
||||
defer cleanup()
|
||||
|
||||
handle, _, err := harness.StartTask(task)
|
||||
require.NoError(err)
|
||||
|
||||
ch, err := harness.WaitTask(context.Background(), handle.Config.ID)
|
||||
require.NoError(err)
|
||||
|
||||
require.NoError(harness.WaitUntilStarted(task.ID, 1*time.Second))
|
||||
|
||||
go func() {
|
||||
harness.StopTask(task.ID, 2*time.Second, "SIGINT")
|
||||
}()
|
||||
|
||||
select {
|
||||
case result := <-ch:
|
||||
require.Equal(int(unix.SIGINT), result.Signal)
|
||||
case <-time.After(10 * time.Second):
|
||||
require.Fail("timeout waiting for task to shutdown")
|
||||
}
|
||||
|
||||
// Ensure that the task is marked as dead, but account
|
||||
// for WaitTask() closing channel before internal state is updated
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
status, err := harness.InspectTask(task.ID)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("inspecting task failed: %v", err)
|
||||
}
|
||||
if status.State != drivers.TaskStateExited {
|
||||
return false, fmt.Errorf("task hasn't exited yet; status: %v", status.State)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
require.NoError(err)
|
||||
})
|
||||
|
||||
require.NoError(harness.DestroyTask(task.ID, true))
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/hashicorp/nomad/client/taskenv"
|
||||
"github.com/hashicorp/nomad/drivers/shared/eventer"
|
||||
"github.com/hashicorp/nomad/drivers/shared/executor"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/plugins/base"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
"github.com/hashicorp/nomad/plugins/shared"
|
||||
|
@ -197,8 +198,9 @@ type Driver struct {
|
|||
// logger will log to the Nomad agent
|
||||
logger hclog.Logger
|
||||
|
||||
// hasFingerprinted is used to store whether we have fingerprinted before
|
||||
hasFingerprinted bool
|
||||
// A tri-state boolean to know if the fingerprinting has happened and
|
||||
// whether it has been successful
|
||||
fingerprintSuccess *bool
|
||||
fingerprintLock sync.Mutex
|
||||
}
|
||||
|
||||
|
@ -268,25 +270,29 @@ func (d *Driver) handleFingerprint(ctx context.Context, ch chan *drivers.Fingerp
|
|||
}
|
||||
}
|
||||
|
||||
// setFingerprinted marks the driver as having fingerprinted once before
|
||||
func (d *Driver) setFingerprinted() {
|
||||
// setFingerprintSuccess marks the driver as having fingerprinted successfully
|
||||
func (d *Driver) setFingerprintSuccess() {
|
||||
d.fingerprintLock.Lock()
|
||||
d.hasFingerprinted = true
|
||||
d.fingerprintSuccess = helper.BoolToPtr(true)
|
||||
d.fingerprintLock.Unlock()
|
||||
}
|
||||
|
||||
// fingerprinted returns whether the driver has fingerprinted before
|
||||
func (d *Driver) fingerprinted() bool {
|
||||
// setFingerprintFailure marks the driver as having failed fingerprinting
|
||||
func (d *Driver) setFingerprintFailure() {
|
||||
d.fingerprintLock.Lock()
|
||||
d.fingerprintSuccess = helper.BoolToPtr(false)
|
||||
d.fingerprintLock.Unlock()
|
||||
}
|
||||
|
||||
// fingerprintSuccessful returns true if the driver has
|
||||
// never fingerprinted or has successfully fingerprinted
|
||||
func (d *Driver) fingerprintSuccessful() bool {
|
||||
d.fingerprintLock.Lock()
|
||||
defer d.fingerprintLock.Unlock()
|
||||
return d.hasFingerprinted
|
||||
return d.fingerprintSuccess == nil || *d.fingerprintSuccess
|
||||
}
|
||||
|
||||
func (d *Driver) buildFingerprint() *drivers.Fingerprint {
|
||||
defer func() {
|
||||
d.setFingerprinted()
|
||||
}()
|
||||
|
||||
fingerprint := &drivers.Fingerprint{
|
||||
Attributes: map[string]*pstructs.Attribute{},
|
||||
Health: drivers.HealthStateHealthy,
|
||||
|
@ -295,9 +301,10 @@ func (d *Driver) buildFingerprint() *drivers.Fingerprint {
|
|||
|
||||
// Only enable if we are root
|
||||
if syscall.Geteuid() != 0 {
|
||||
if !d.fingerprinted() {
|
||||
if d.fingerprintSuccessful() {
|
||||
d.logger.Debug("must run as root user, disabling")
|
||||
}
|
||||
d.setFingerprintFailure()
|
||||
fingerprint.Health = drivers.HealthStateUndetected
|
||||
fingerprint.HealthDescription = drivers.DriverRequiresRootMessage
|
||||
return fingerprint
|
||||
|
@ -307,6 +314,7 @@ func (d *Driver) buildFingerprint() *drivers.Fingerprint {
|
|||
if err != nil {
|
||||
fingerprint.Health = drivers.HealthStateUndetected
|
||||
fingerprint.HealthDescription = fmt.Sprintf("Failed to execute %s version: %v", rktCmd, err)
|
||||
d.setFingerprintFailure()
|
||||
return fingerprint
|
||||
}
|
||||
out := strings.TrimSpace(string(outBytes))
|
||||
|
@ -316,6 +324,7 @@ func (d *Driver) buildFingerprint() *drivers.Fingerprint {
|
|||
if len(rktMatches) != 2 || len(appcMatches) != 2 {
|
||||
fingerprint.Health = drivers.HealthStateUndetected
|
||||
fingerprint.HealthDescription = "Unable to parse rkt version string"
|
||||
d.setFingerprintFailure()
|
||||
return fingerprint
|
||||
}
|
||||
|
||||
|
@ -325,10 +334,11 @@ func (d *Driver) buildFingerprint() *drivers.Fingerprint {
|
|||
// Do not allow ancient rkt versions
|
||||
fingerprint.Health = drivers.HealthStateUndetected
|
||||
fingerprint.HealthDescription = fmt.Sprintf("Unsuported rkt version %s", currentVersion)
|
||||
if !d.fingerprinted() {
|
||||
if d.fingerprintSuccessful() {
|
||||
d.logger.Warn("unsupported rkt version please upgrade to >= "+minVersion.String(),
|
||||
"rkt_version", currentVersion)
|
||||
}
|
||||
d.setFingerprintFailure()
|
||||
return fingerprint
|
||||
}
|
||||
|
||||
|
@ -338,7 +348,7 @@ func (d *Driver) buildFingerprint() *drivers.Fingerprint {
|
|||
if d.config.VolumesEnabled {
|
||||
fingerprint.Attributes["driver.rkt.volumes.enabled"] = pstructs.NewBoolAttribute(true)
|
||||
}
|
||||
|
||||
d.setFingerprintSuccess()
|
||||
return fingerprint
|
||||
|
||||
}
|
||||
|
@ -561,7 +571,7 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive
|
|||
prepareArgs = append(prepareArgs, fmt.Sprintf("--memory=%v", cfg.Resources.LinuxResources.MemoryLimitBytes))
|
||||
|
||||
// Add CPU isolator
|
||||
prepareArgs = append(prepareArgs, fmt.Sprintf("--cpu-shares=%v", cfg.Resources.LinuxResources.CPUShares))
|
||||
prepareArgs = append(prepareArgs, fmt.Sprintf("--cpu=%v", cfg.Resources.LinuxResources.CPUShares))
|
||||
|
||||
// Add DNS servers
|
||||
if len(driverConfig.DNSServers) == 1 && (driverConfig.DNSServers[0] == "host" || driverConfig.DNSServers[0] == "none") {
|
||||
|
|
|
@ -39,7 +39,7 @@ const (
|
|||
|
||||
var (
|
||||
// ExecutorCgroupMeasuredMemStats is the list of memory stats captured by the executor
|
||||
ExecutorCgroupMeasuredMemStats = []string{"RSS", "Cache", "Swap", "Max Usage", "Kernel Usage", "Kernel Max Usage"}
|
||||
ExecutorCgroupMeasuredMemStats = []string{"RSS", "Cache", "Swap", "Usage", "Max Usage", "Kernel Usage", "Kernel Max Usage"}
|
||||
|
||||
// ExecutorCgroupMeasuredCpuStats is the list of CPU stats captures by the executor
|
||||
ExecutorCgroupMeasuredCpuStats = []string{"System Mode", "User Mode", "Throttled Periods", "Throttled Time", "Percent"}
|
||||
|
@ -400,6 +400,7 @@ func (l *LibcontainerExecutor) handleStats(ch chan *cstructs.TaskResourceUsage,
|
|||
RSS: rss,
|
||||
Cache: cache,
|
||||
Swap: swap.Usage,
|
||||
Usage: stats.MemoryStats.Usage.Usage,
|
||||
MaxUsage: maxUsage,
|
||||
KernelUsage: stats.MemoryStats.KernelUsage.Usage,
|
||||
KernelMaxUsage: stats.MemoryStats.KernelUsage.MaxUsage,
|
||||
|
|
|
@ -4,8 +4,6 @@ import (
|
|||
"crypto/sha512"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
|
@ -359,24 +357,3 @@ func CheckHCLKeys(node ast.Node, valid []string) error {
|
|||
|
||||
return result
|
||||
}
|
||||
|
||||
// FormatFloat converts the floating-point number f to a string,
|
||||
// after rounding it to the passed unit.
|
||||
//
|
||||
// Uses 'f' format (-ddd.dddddd, no exponent), and uses at most
|
||||
// maxPrec digits after the decimal point.
|
||||
func FormatFloat(f float64, maxPrec int) string {
|
||||
v := strconv.FormatFloat(f, 'f', -1, 64)
|
||||
|
||||
idx := strings.LastIndex(v, ".")
|
||||
if idx == -1 {
|
||||
return v
|
||||
}
|
||||
|
||||
sublen := idx + maxPrec + 1
|
||||
if sublen > len(v) {
|
||||
sublen = len(v)
|
||||
}
|
||||
|
||||
return v[:sublen]
|
||||
}
|
||||
|
|
|
@ -4,8 +4,6 @@ import (
|
|||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSliceStringIsSubset(t *testing.T) {
|
||||
|
@ -89,35 +87,3 @@ func BenchmarkCleanEnvVar(b *testing.B) {
|
|||
CleanEnvVar(in, replacement)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatRoundedFloat(t *testing.T) {
|
||||
cases := []struct {
|
||||
input float64
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
1323,
|
||||
"1323",
|
||||
},
|
||||
{
|
||||
10.321,
|
||||
"10.321",
|
||||
},
|
||||
{
|
||||
100000.31324324,
|
||||
"100000.313",
|
||||
},
|
||||
{
|
||||
100000.3,
|
||||
"100000.3",
|
||||
},
|
||||
{
|
||||
0.7654321,
|
||||
"0.765",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
require.Equal(t, c.expected, FormatFloat(c.input, 3))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,8 +7,6 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -115,21 +113,11 @@ func execute() {
|
|||
ioutil.WriteFile(file, []byte(msg), 0666)
|
||||
|
||||
case "pgrp":
|
||||
// pgrp <group_int> puts the pid in a new process group
|
||||
if len(args) < 1 {
|
||||
fmt.Fprintln(os.Stderr, "expected process group number for pgrp")
|
||||
os.Exit(1)
|
||||
}
|
||||
num := popArg()
|
||||
grp, err := strconv.Atoi(num)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to convert process group number %q: %v\n", num, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := syscall.Setpgid(0, grp); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to set process group: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
executeProcessGroup(popArg())
|
||||
|
||||
case "fork/exec":
|
||||
// fork/exec <pid_file> <args> forks execs the helper process
|
||||
|
|
23
helper/testtask/testtask_unix.go
Normal file
23
helper/testtask/testtask_unix.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package testtask
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func executeProcessGroup(gid string) {
|
||||
// pgrp <group_int> puts the pid in a new process group
|
||||
grp, err := strconv.Atoi(gid)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to convert process group number %q: %v\n", gid, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := syscall.Setpgid(0, grp); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to set process group: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
14
helper/testtask/testtask_windows.go
Normal file
14
helper/testtask/testtask_windows.go
Normal file
|
@ -0,0 +1,14 @@
|
|||
// +build windows
|
||||
|
||||
package testtask
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
func executeProcessGroup(gid string) {
|
||||
fmt.Fprintf(os.Stderr, "TODO: implement process groups are on windows\n")
|
||||
fmt.Fprintf(os.Stderr, "TODO: see https://github.com/hashicorp/nomad/blob/109c5ef650206fc62334d202002cda92ceb67399/drivers/shared/executor/executor_windows.go#L9-L17\n")
|
||||
os.Exit(1)
|
||||
}
|
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/kr/pretty"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -196,7 +197,19 @@ func TestClientAllocations_GarbageCollectAll_Remote(t *testing.T) {
|
|||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
nodes := s2.connectedNodes()
|
||||
return len(nodes) == 1, nil
|
||||
if len(nodes) != 1 {
|
||||
return false, fmt.Errorf("should have 1 client. found %d", len(nodes))
|
||||
}
|
||||
req := &structs.NodeSpecificRequest{
|
||||
NodeID: c.NodeID(),
|
||||
QueryOptions: structs.QueryOptions{Region: "global"},
|
||||
}
|
||||
resp := structs.SingleNodeResponse{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return resp.Node != nil && resp.Node.Status == structs.NodeStatusReady, fmt.Errorf(
|
||||
"expected ready but found %s", pretty.Sprint(resp.Node))
|
||||
}, func(err error) {
|
||||
t.Fatalf("should have a clients")
|
||||
})
|
||||
|
@ -442,7 +455,19 @@ func TestClientAllocations_GarbageCollect_Remote(t *testing.T) {
|
|||
}
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
nodes := s2.connectedNodes()
|
||||
return len(nodes) == 1, nil
|
||||
if len(nodes) != 1 {
|
||||
return false, fmt.Errorf("should have 1 client. found %d", len(nodes))
|
||||
}
|
||||
req := &structs.NodeSpecificRequest{
|
||||
NodeID: c.NodeID(),
|
||||
QueryOptions: structs.QueryOptions{Region: "global"},
|
||||
}
|
||||
resp := structs.SingleNodeResponse{}
|
||||
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return resp.Node != nil && resp.Node.Status == structs.NodeStatusReady, fmt.Errorf(
|
||||
"expected ready but found %s", pretty.Sprint(resp.Node))
|
||||
}, func(err error) {
|
||||
t.Fatalf("should have a clients")
|
||||
})
|
||||
|
|
|
@ -1401,6 +1401,11 @@ func (n *nomadFSM) reconcileQueuedAllocations(index uint64) error {
|
|||
break
|
||||
}
|
||||
job := rawJob.(*structs.Job)
|
||||
|
||||
// Nothing to do for queued allocations if the job is a parent periodic/parameterized job
|
||||
if job.IsParameterized() || job.IsPeriodic() {
|
||||
continue
|
||||
}
|
||||
planner := &scheduler.Harness{
|
||||
State: &snap.StateStore,
|
||||
}
|
||||
|
|
|
@ -2816,6 +2816,78 @@ func TestFSM_ReconcileSummaries(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// COMPAT: Remove in 0.11
|
||||
func TestFSM_ReconcileParentJobSummary(t *testing.T) {
|
||||
// This test exercises code to handle https://github.com/hashicorp/nomad/issues/3886
|
||||
t.Parallel()
|
||||
|
||||
require := require.New(t)
|
||||
// Add some state
|
||||
fsm := testFSM(t)
|
||||
state := fsm.State()
|
||||
|
||||
// Add a node
|
||||
node := mock.Node()
|
||||
state.UpsertNode(800, node)
|
||||
|
||||
// Make a parameterized job
|
||||
job1 := mock.BatchJob()
|
||||
job1.ID = "test"
|
||||
job1.ParameterizedJob = &structs.ParameterizedJobConfig{
|
||||
Payload: "random",
|
||||
}
|
||||
job1.TaskGroups[0].Count = 1
|
||||
state.UpsertJob(1000, job1)
|
||||
|
||||
// Make a child job
|
||||
childJob := job1.Copy()
|
||||
childJob.ID = job1.ID + "dispatch-23423423"
|
||||
childJob.ParentID = job1.ID
|
||||
childJob.Dispatched = true
|
||||
childJob.Status = structs.JobStatusRunning
|
||||
|
||||
// Create an alloc for child job
|
||||
alloc := mock.Alloc()
|
||||
alloc.NodeID = node.ID
|
||||
alloc.Job = childJob
|
||||
alloc.JobID = childJob.ID
|
||||
alloc.ClientStatus = structs.AllocClientStatusRunning
|
||||
|
||||
state.UpsertJob(1010, childJob)
|
||||
state.UpsertAllocs(1011, []*structs.Allocation{alloc})
|
||||
|
||||
// Make the summary incorrect in the state store
|
||||
summary, err := state.JobSummaryByID(nil, job1.Namespace, job1.ID)
|
||||
require.Nil(err)
|
||||
|
||||
summary.Children = nil
|
||||
summary.Summary = make(map[string]structs.TaskGroupSummary)
|
||||
summary.Summary["web"] = structs.TaskGroupSummary{
|
||||
Queued: 1,
|
||||
}
|
||||
|
||||
req := structs.GenericRequest{}
|
||||
buf, err := structs.Encode(structs.ReconcileJobSummariesRequestType, req)
|
||||
require.Nil(err)
|
||||
|
||||
resp := fsm.Apply(makeLog(buf))
|
||||
require.Nil(resp)
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
out1, _ := state.JobSummaryByID(ws, job1.Namespace, job1.ID)
|
||||
expected := structs.JobSummary{
|
||||
JobID: job1.ID,
|
||||
Namespace: job1.Namespace,
|
||||
Summary: make(map[string]structs.TaskGroupSummary),
|
||||
CreateIndex: 1000,
|
||||
ModifyIndex: out1.ModifyIndex,
|
||||
Children: &structs.JobChildrenSummary{
|
||||
Running: 1,
|
||||
},
|
||||
}
|
||||
require.Equal(&expected, out1)
|
||||
}
|
||||
|
||||
func TestFSM_LeakedDeployments(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
|
|
|
@ -6,6 +6,8 @@ import (
|
|||
"sort"
|
||||
"time"
|
||||
|
||||
"reflect"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/go-memdb"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
|
@ -3049,12 +3051,86 @@ func (s *StateStore) ReconcileJobSummaries(index uint64) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// COMPAT: Remove after 0.11
|
||||
// Iterate over jobs to build a list of parent jobs and their children
|
||||
parentMap := make(map[string][]*structs.Job)
|
||||
for {
|
||||
rawJob := iter.Next()
|
||||
if rawJob == nil {
|
||||
break
|
||||
}
|
||||
job := rawJob.(*structs.Job)
|
||||
if job.ParentID != "" {
|
||||
children := parentMap[job.ParentID]
|
||||
children = append(children, job)
|
||||
parentMap[job.ParentID] = children
|
||||
}
|
||||
}
|
||||
|
||||
// Get all the jobs again
|
||||
iter, err = txn.Get("jobs", "id")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
rawJob := iter.Next()
|
||||
if rawJob == nil {
|
||||
break
|
||||
}
|
||||
job := rawJob.(*structs.Job)
|
||||
|
||||
if job.IsParameterized() || job.IsPeriodic() {
|
||||
// COMPAT: Remove after 0.11
|
||||
|
||||
// The following block of code fixes incorrect child summaries due to a bug
|
||||
// See https://github.com/hashicorp/nomad/issues/3886 for details
|
||||
rawSummary, err := txn.First("job_summary", "id", job.Namespace, job.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rawSummary == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
oldSummary := rawSummary.(*structs.JobSummary)
|
||||
|
||||
// Create an empty summary
|
||||
summary := &structs.JobSummary{
|
||||
JobID: job.ID,
|
||||
Namespace: job.Namespace,
|
||||
Summary: make(map[string]structs.TaskGroupSummary),
|
||||
Children: &structs.JobChildrenSummary{},
|
||||
}
|
||||
|
||||
// Iterate over children of this job if any to fix summary counts
|
||||
children := parentMap[job.ID]
|
||||
for _, childJob := range children {
|
||||
switch childJob.Status {
|
||||
case structs.JobStatusPending:
|
||||
summary.Children.Pending++
|
||||
case structs.JobStatusDead:
|
||||
summary.Children.Dead++
|
||||
case structs.JobStatusRunning:
|
||||
summary.Children.Running++
|
||||
}
|
||||
}
|
||||
|
||||
// Insert the job summary if its different
|
||||
if !reflect.DeepEqual(summary, oldSummary) {
|
||||
// Set the create index of the summary same as the job's create index
|
||||
// and the modify index to the current index
|
||||
summary.CreateIndex = job.CreateIndex
|
||||
summary.ModifyIndex = index
|
||||
|
||||
if err := txn.Insert("job_summary", summary); err != nil {
|
||||
return fmt.Errorf("error inserting job summary: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Done with handling a parent job, continue to next
|
||||
continue
|
||||
}
|
||||
|
||||
// Create a job summary for the job
|
||||
summary := &structs.JobSummary{
|
||||
|
|
|
@ -4354,6 +4354,95 @@ func TestStateStore_ReconcileJobSummary(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStateStore_ReconcileParentJobSummary(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
state := testStateStore(t)
|
||||
|
||||
// Add a node
|
||||
node := mock.Node()
|
||||
state.UpsertNode(80, node)
|
||||
|
||||
// Make a parameterized job
|
||||
job1 := mock.BatchJob()
|
||||
job1.ID = "test"
|
||||
job1.ParameterizedJob = &structs.ParameterizedJobConfig{
|
||||
Payload: "random",
|
||||
}
|
||||
job1.TaskGroups[0].Count = 1
|
||||
state.UpsertJob(100, job1)
|
||||
|
||||
// Make a child job
|
||||
childJob := job1.Copy()
|
||||
childJob.ID = job1.ID + "dispatch-23423423"
|
||||
childJob.ParentID = job1.ID
|
||||
childJob.Dispatched = true
|
||||
childJob.Status = structs.JobStatusRunning
|
||||
|
||||
// Make some allocs for child job
|
||||
alloc := mock.Alloc()
|
||||
alloc.NodeID = node.ID
|
||||
alloc.Job = childJob
|
||||
alloc.JobID = childJob.ID
|
||||
alloc.ClientStatus = structs.AllocClientStatusRunning
|
||||
|
||||
alloc2 := mock.Alloc()
|
||||
alloc2.NodeID = node.ID
|
||||
alloc2.Job = childJob
|
||||
alloc2.JobID = childJob.ID
|
||||
alloc2.ClientStatus = structs.AllocClientStatusFailed
|
||||
|
||||
require.Nil(state.UpsertJob(110, childJob))
|
||||
require.Nil(state.UpsertAllocs(111, []*structs.Allocation{alloc, alloc2}))
|
||||
|
||||
// Make the summary incorrect in the state store
|
||||
summary, err := state.JobSummaryByID(nil, job1.Namespace, job1.ID)
|
||||
require.Nil(err)
|
||||
|
||||
summary.Children = nil
|
||||
summary.Summary = make(map[string]structs.TaskGroupSummary)
|
||||
summary.Summary["web"] = structs.TaskGroupSummary{
|
||||
Queued: 1,
|
||||
}
|
||||
|
||||
// Delete the child job summary
|
||||
state.DeleteJobSummary(125, childJob.Namespace, childJob.ID)
|
||||
|
||||
state.ReconcileJobSummaries(120)
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
|
||||
// Verify parent summary is corrected
|
||||
summary, _ = state.JobSummaryByID(ws, alloc.Namespace, job1.ID)
|
||||
expectedSummary := structs.JobSummary{
|
||||
JobID: job1.ID,
|
||||
Namespace: job1.Namespace,
|
||||
Summary: make(map[string]structs.TaskGroupSummary),
|
||||
Children: &structs.JobChildrenSummary{
|
||||
Running: 1,
|
||||
},
|
||||
CreateIndex: 100,
|
||||
ModifyIndex: 120,
|
||||
}
|
||||
require.Equal(&expectedSummary, summary)
|
||||
|
||||
// Verify child job summary is also correct
|
||||
childSummary, _ := state.JobSummaryByID(ws, childJob.Namespace, childJob.ID)
|
||||
expectedChildSummary := structs.JobSummary{
|
||||
JobID: childJob.ID,
|
||||
Namespace: childJob.Namespace,
|
||||
Summary: map[string]structs.TaskGroupSummary{
|
||||
"web": {
|
||||
Running: 1,
|
||||
Failed: 1,
|
||||
},
|
||||
},
|
||||
CreateIndex: 110,
|
||||
ModifyIndex: 120,
|
||||
}
|
||||
require.Equal(&expectedChildSummary, childSummary)
|
||||
}
|
||||
|
||||
func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
|
||||
|
|
|
@ -4017,6 +4017,9 @@ func (d *DispatchPayloadConfig) Validate() error {
|
|||
}
|
||||
|
||||
var (
|
||||
// These default restart policies needs to be in sync with
|
||||
// Canonicalize in api/tasks.go
|
||||
|
||||
DefaultServiceJobRestartPolicy = RestartPolicy{
|
||||
Delay: 15 * time.Second,
|
||||
Attempts: 2,
|
||||
|
@ -4032,6 +4035,9 @@ var (
|
|||
)
|
||||
|
||||
var (
|
||||
// These default reschedule policies needs to be in sync with
|
||||
// NewDefaultReschedulePolicy in api/tasks.go
|
||||
|
||||
DefaultServiceJobReschedulePolicy = ReschedulePolicy{
|
||||
Delay: 30 * time.Second,
|
||||
DelayFunction: "exponential",
|
||||
|
|
|
@ -48,7 +48,7 @@ func (x PluginType) String() string {
|
|||
return proto.EnumName(PluginType_name, int32(x))
|
||||
}
|
||||
func (PluginType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_base_f2480776612a8fbd, []int{0}
|
||||
return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{0}
|
||||
}
|
||||
|
||||
// PluginInfoRequest is used to request the plugins basic information.
|
||||
|
@ -62,7 +62,7 @@ func (m *PluginInfoRequest) Reset() { *m = PluginInfoRequest{} }
|
|||
func (m *PluginInfoRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*PluginInfoRequest) ProtoMessage() {}
|
||||
func (*PluginInfoRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_base_f2480776612a8fbd, []int{0}
|
||||
return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{0}
|
||||
}
|
||||
func (m *PluginInfoRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PluginInfoRequest.Unmarshal(m, b)
|
||||
|
@ -104,7 +104,7 @@ func (m *PluginInfoResponse) Reset() { *m = PluginInfoResponse{} }
|
|||
func (m *PluginInfoResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*PluginInfoResponse) ProtoMessage() {}
|
||||
func (*PluginInfoResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_base_f2480776612a8fbd, []int{1}
|
||||
return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{1}
|
||||
}
|
||||
func (m *PluginInfoResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PluginInfoResponse.Unmarshal(m, b)
|
||||
|
@ -163,7 +163,7 @@ func (m *ConfigSchemaRequest) Reset() { *m = ConfigSchemaRequest{} }
|
|||
func (m *ConfigSchemaRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigSchemaRequest) ProtoMessage() {}
|
||||
func (*ConfigSchemaRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_base_f2480776612a8fbd, []int{2}
|
||||
return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{2}
|
||||
}
|
||||
func (m *ConfigSchemaRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ConfigSchemaRequest.Unmarshal(m, b)
|
||||
|
@ -196,7 +196,7 @@ func (m *ConfigSchemaResponse) Reset() { *m = ConfigSchemaResponse{} }
|
|||
func (m *ConfigSchemaResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfigSchemaResponse) ProtoMessage() {}
|
||||
func (*ConfigSchemaResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_base_f2480776612a8fbd, []int{3}
|
||||
return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{3}
|
||||
}
|
||||
func (m *ConfigSchemaResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ConfigSchemaResponse.Unmarshal(m, b)
|
||||
|
@ -240,7 +240,7 @@ func (m *SetConfigRequest) Reset() { *m = SetConfigRequest{} }
|
|||
func (m *SetConfigRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SetConfigRequest) ProtoMessage() {}
|
||||
func (*SetConfigRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_base_f2480776612a8fbd, []int{4}
|
||||
return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{4}
|
||||
}
|
||||
func (m *SetConfigRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SetConfigRequest.Unmarshal(m, b)
|
||||
|
@ -294,7 +294,7 @@ func (m *NomadConfig) Reset() { *m = NomadConfig{} }
|
|||
func (m *NomadConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*NomadConfig) ProtoMessage() {}
|
||||
func (*NomadConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_base_f2480776612a8fbd, []int{5}
|
||||
return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{5}
|
||||
}
|
||||
func (m *NomadConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_NomadConfig.Unmarshal(m, b)
|
||||
|
@ -339,7 +339,7 @@ func (m *NomadDriverConfig) Reset() { *m = NomadDriverConfig{} }
|
|||
func (m *NomadDriverConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*NomadDriverConfig) ProtoMessage() {}
|
||||
func (*NomadDriverConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_base_f2480776612a8fbd, []int{6}
|
||||
return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{6}
|
||||
}
|
||||
func (m *NomadDriverConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_NomadDriverConfig.Unmarshal(m, b)
|
||||
|
@ -384,7 +384,7 @@ func (m *SetConfigResponse) Reset() { *m = SetConfigResponse{} }
|
|||
func (m *SetConfigResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SetConfigResponse) ProtoMessage() {}
|
||||
func (*SetConfigResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_base_f2480776612a8fbd, []int{7}
|
||||
return fileDescriptor_base_6a1a5ff99a0b9e5d, []int{7}
|
||||
}
|
||||
func (m *SetConfigResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SetConfigResponse.Unmarshal(m, b)
|
||||
|
@ -560,9 +560,9 @@ var _BasePlugin_serviceDesc = grpc.ServiceDesc{
|
|||
Metadata: "plugins/base/proto/base.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("plugins/base/proto/base.proto", fileDescriptor_base_f2480776612a8fbd) }
|
||||
func init() { proto.RegisterFile("plugins/base/proto/base.proto", fileDescriptor_base_6a1a5ff99a0b9e5d) }
|
||||
|
||||
var fileDescriptor_base_f2480776612a8fbd = []byte{
|
||||
var fileDescriptor_base_6a1a5ff99a0b9e5d = []byte{
|
||||
// 535 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x41, 0x8f, 0x12, 0x4d,
|
||||
0x10, 0xdd, 0x01, 0x3e, 0x36, 0x14, 0xb0, 0x81, 0xe6, 0x33, 0x21, 0x24, 0x26, 0x64, 0xa2, 0x09,
|
||||
|
|
|
@ -37,7 +37,7 @@ func (m *FingerprintRequest) Reset() { *m = FingerprintRequest{} }
|
|||
func (m *FingerprintRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*FingerprintRequest) ProtoMessage() {}
|
||||
func (*FingerprintRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_device_c21dc006d6a19ae5, []int{0}
|
||||
return fileDescriptor_device_a4d1cccedbd8401c, []int{0}
|
||||
}
|
||||
func (m *FingerprintRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FingerprintRequest.Unmarshal(m, b)
|
||||
|
@ -72,7 +72,7 @@ func (m *FingerprintResponse) Reset() { *m = FingerprintResponse{} }
|
|||
func (m *FingerprintResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*FingerprintResponse) ProtoMessage() {}
|
||||
func (*FingerprintResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_device_c21dc006d6a19ae5, []int{1}
|
||||
return fileDescriptor_device_a4d1cccedbd8401c, []int{1}
|
||||
}
|
||||
func (m *FingerprintResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FingerprintResponse.Unmarshal(m, b)
|
||||
|
@ -121,7 +121,7 @@ func (m *DeviceGroup) Reset() { *m = DeviceGroup{} }
|
|||
func (m *DeviceGroup) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeviceGroup) ProtoMessage() {}
|
||||
func (*DeviceGroup) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_device_c21dc006d6a19ae5, []int{2}
|
||||
return fileDescriptor_device_a4d1cccedbd8401c, []int{2}
|
||||
}
|
||||
func (m *DeviceGroup) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DeviceGroup.Unmarshal(m, b)
|
||||
|
@ -198,7 +198,7 @@ func (m *DetectedDevice) Reset() { *m = DetectedDevice{} }
|
|||
func (m *DetectedDevice) String() string { return proto.CompactTextString(m) }
|
||||
func (*DetectedDevice) ProtoMessage() {}
|
||||
func (*DetectedDevice) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_device_c21dc006d6a19ae5, []int{3}
|
||||
return fileDescriptor_device_a4d1cccedbd8401c, []int{3}
|
||||
}
|
||||
func (m *DetectedDevice) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DetectedDevice.Unmarshal(m, b)
|
||||
|
@ -260,7 +260,7 @@ func (m *DeviceLocality) Reset() { *m = DeviceLocality{} }
|
|||
func (m *DeviceLocality) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeviceLocality) ProtoMessage() {}
|
||||
func (*DeviceLocality) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_device_c21dc006d6a19ae5, []int{4}
|
||||
return fileDescriptor_device_a4d1cccedbd8401c, []int{4}
|
||||
}
|
||||
func (m *DeviceLocality) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DeviceLocality.Unmarshal(m, b)
|
||||
|
@ -301,7 +301,7 @@ func (m *ReserveRequest) Reset() { *m = ReserveRequest{} }
|
|||
func (m *ReserveRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReserveRequest) ProtoMessage() {}
|
||||
func (*ReserveRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_device_c21dc006d6a19ae5, []int{5}
|
||||
return fileDescriptor_device_a4d1cccedbd8401c, []int{5}
|
||||
}
|
||||
func (m *ReserveRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ReserveRequest.Unmarshal(m, b)
|
||||
|
@ -344,7 +344,7 @@ func (m *ReserveResponse) Reset() { *m = ReserveResponse{} }
|
|||
func (m *ReserveResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReserveResponse) ProtoMessage() {}
|
||||
func (*ReserveResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_device_c21dc006d6a19ae5, []int{6}
|
||||
return fileDescriptor_device_a4d1cccedbd8401c, []int{6}
|
||||
}
|
||||
func (m *ReserveResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ReserveResponse.Unmarshal(m, b)
|
||||
|
@ -389,7 +389,7 @@ func (m *ContainerReservation) Reset() { *m = ContainerReservation{} }
|
|||
func (m *ContainerReservation) String() string { return proto.CompactTextString(m) }
|
||||
func (*ContainerReservation) ProtoMessage() {}
|
||||
func (*ContainerReservation) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_device_c21dc006d6a19ae5, []int{7}
|
||||
return fileDescriptor_device_a4d1cccedbd8401c, []int{7}
|
||||
}
|
||||
func (m *ContainerReservation) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ContainerReservation.Unmarshal(m, b)
|
||||
|
@ -448,7 +448,7 @@ func (m *Mount) Reset() { *m = Mount{} }
|
|||
func (m *Mount) String() string { return proto.CompactTextString(m) }
|
||||
func (*Mount) ProtoMessage() {}
|
||||
func (*Mount) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_device_c21dc006d6a19ae5, []int{8}
|
||||
return fileDescriptor_device_a4d1cccedbd8401c, []int{8}
|
||||
}
|
||||
func (m *Mount) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Mount.Unmarshal(m, b)
|
||||
|
@ -509,7 +509,7 @@ func (m *DeviceSpec) Reset() { *m = DeviceSpec{} }
|
|||
func (m *DeviceSpec) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeviceSpec) ProtoMessage() {}
|
||||
func (*DeviceSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_device_c21dc006d6a19ae5, []int{9}
|
||||
return fileDescriptor_device_a4d1cccedbd8401c, []int{9}
|
||||
}
|
||||
func (m *DeviceSpec) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DeviceSpec.Unmarshal(m, b)
|
||||
|
@ -563,7 +563,7 @@ func (m *StatsRequest) Reset() { *m = StatsRequest{} }
|
|||
func (m *StatsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatsRequest) ProtoMessage() {}
|
||||
func (*StatsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_device_c21dc006d6a19ae5, []int{10}
|
||||
return fileDescriptor_device_a4d1cccedbd8401c, []int{10}
|
||||
}
|
||||
func (m *StatsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StatsRequest.Unmarshal(m, b)
|
||||
|
@ -603,7 +603,7 @@ func (m *StatsResponse) Reset() { *m = StatsResponse{} }
|
|||
func (m *StatsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatsResponse) ProtoMessage() {}
|
||||
func (*StatsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_device_c21dc006d6a19ae5, []int{11}
|
||||
return fileDescriptor_device_a4d1cccedbd8401c, []int{11}
|
||||
}
|
||||
func (m *StatsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StatsResponse.Unmarshal(m, b)
|
||||
|
@ -647,7 +647,7 @@ func (m *DeviceGroupStats) Reset() { *m = DeviceGroupStats{} }
|
|||
func (m *DeviceGroupStats) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeviceGroupStats) ProtoMessage() {}
|
||||
func (*DeviceGroupStats) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_device_c21dc006d6a19ae5, []int{12}
|
||||
return fileDescriptor_device_a4d1cccedbd8401c, []int{12}
|
||||
}
|
||||
func (m *DeviceGroupStats) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DeviceGroupStats.Unmarshal(m, b)
|
||||
|
@ -713,7 +713,7 @@ func (m *DeviceStats) Reset() { *m = DeviceStats{} }
|
|||
func (m *DeviceStats) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeviceStats) ProtoMessage() {}
|
||||
func (*DeviceStats) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_device_c21dc006d6a19ae5, []int{13}
|
||||
return fileDescriptor_device_a4d1cccedbd8401c, []int{13}
|
||||
}
|
||||
func (m *DeviceStats) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DeviceStats.Unmarshal(m, b)
|
||||
|
@ -984,10 +984,10 @@ var _DevicePlugin_serviceDesc = grpc.ServiceDesc{
|
|||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("plugins/device/proto/device.proto", fileDescriptor_device_c21dc006d6a19ae5)
|
||||
proto.RegisterFile("plugins/device/proto/device.proto", fileDescriptor_device_a4d1cccedbd8401c)
|
||||
}
|
||||
|
||||
var fileDescriptor_device_c21dc006d6a19ae5 = []byte{
|
||||
var fileDescriptor_device_a4d1cccedbd8401c = []byte{
|
||||
// 979 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xef, 0x8e, 0x1b, 0x35,
|
||||
0x10, 0x27, 0xb9, 0xcb, 0x25, 0x99, 0xdc, 0x5d, 0x8b, 0x7b, 0x42, 0x61, 0x81, 0xf6, 0x58, 0x09,
|
||||
|
|
|
@ -50,7 +50,7 @@ func (x TaskState) String() string {
|
|||
return proto.EnumName(TaskState_name, int32(x))
|
||||
}
|
||||
func (TaskState) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{0}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{0}
|
||||
}
|
||||
|
||||
type FingerprintResponse_HealthState int32
|
||||
|
@ -76,7 +76,7 @@ func (x FingerprintResponse_HealthState) String() string {
|
|||
return proto.EnumName(FingerprintResponse_HealthState_name, int32(x))
|
||||
}
|
||||
func (FingerprintResponse_HealthState) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{5, 0}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{5, 0}
|
||||
}
|
||||
|
||||
type StartTaskResponse_Result int32
|
||||
|
@ -102,7 +102,7 @@ func (x StartTaskResponse_Result) String() string {
|
|||
return proto.EnumName(StartTaskResponse_Result_name, int32(x))
|
||||
}
|
||||
func (StartTaskResponse_Result) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{9, 0}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{9, 0}
|
||||
}
|
||||
|
||||
type DriverCapabilities_FSIsolation int32
|
||||
|
@ -128,7 +128,7 @@ func (x DriverCapabilities_FSIsolation) String() string {
|
|||
return proto.EnumName(DriverCapabilities_FSIsolation_name, int32(x))
|
||||
}
|
||||
func (DriverCapabilities_FSIsolation) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{25, 0}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{25, 0}
|
||||
}
|
||||
|
||||
type CPUUsage_Fields int32
|
||||
|
@ -163,7 +163,7 @@ func (x CPUUsage_Fields) String() string {
|
|||
return proto.EnumName(CPUUsage_Fields_name, int32(x))
|
||||
}
|
||||
func (CPUUsage_Fields) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{43, 0}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{43, 0}
|
||||
}
|
||||
|
||||
type MemoryUsage_Fields int32
|
||||
|
@ -174,6 +174,7 @@ const (
|
|||
MemoryUsage_MAX_USAGE MemoryUsage_Fields = 2
|
||||
MemoryUsage_KERNEL_USAGE MemoryUsage_Fields = 3
|
||||
MemoryUsage_KERNEL_MAX_USAGE MemoryUsage_Fields = 4
|
||||
MemoryUsage_USAGE MemoryUsage_Fields = 5
|
||||
)
|
||||
|
||||
var MemoryUsage_Fields_name = map[int32]string{
|
||||
|
@ -182,6 +183,7 @@ var MemoryUsage_Fields_name = map[int32]string{
|
|||
2: "MAX_USAGE",
|
||||
3: "KERNEL_USAGE",
|
||||
4: "KERNEL_MAX_USAGE",
|
||||
5: "USAGE",
|
||||
}
|
||||
var MemoryUsage_Fields_value = map[string]int32{
|
||||
"RSS": 0,
|
||||
|
@ -189,13 +191,14 @@ var MemoryUsage_Fields_value = map[string]int32{
|
|||
"MAX_USAGE": 2,
|
||||
"KERNEL_USAGE": 3,
|
||||
"KERNEL_MAX_USAGE": 4,
|
||||
"USAGE": 5,
|
||||
}
|
||||
|
||||
func (x MemoryUsage_Fields) String() string {
|
||||
return proto.EnumName(MemoryUsage_Fields_name, int32(x))
|
||||
}
|
||||
func (MemoryUsage_Fields) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{44, 0}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{44, 0}
|
||||
}
|
||||
|
||||
type TaskConfigSchemaRequest struct {
|
||||
|
@ -208,7 +211,7 @@ func (m *TaskConfigSchemaRequest) Reset() { *m = TaskConfigSchemaRequest
|
|||
func (m *TaskConfigSchemaRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*TaskConfigSchemaRequest) ProtoMessage() {}
|
||||
func (*TaskConfigSchemaRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{0}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{0}
|
||||
}
|
||||
func (m *TaskConfigSchemaRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TaskConfigSchemaRequest.Unmarshal(m, b)
|
||||
|
@ -240,7 +243,7 @@ func (m *TaskConfigSchemaResponse) Reset() { *m = TaskConfigSchemaRespon
|
|||
func (m *TaskConfigSchemaResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*TaskConfigSchemaResponse) ProtoMessage() {}
|
||||
func (*TaskConfigSchemaResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{1}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{1}
|
||||
}
|
||||
func (m *TaskConfigSchemaResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TaskConfigSchemaResponse.Unmarshal(m, b)
|
||||
|
@ -277,7 +280,7 @@ func (m *CapabilitiesRequest) Reset() { *m = CapabilitiesRequest{} }
|
|||
func (m *CapabilitiesRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CapabilitiesRequest) ProtoMessage() {}
|
||||
func (*CapabilitiesRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{2}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{2}
|
||||
}
|
||||
func (m *CapabilitiesRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CapabilitiesRequest.Unmarshal(m, b)
|
||||
|
@ -312,7 +315,7 @@ func (m *CapabilitiesResponse) Reset() { *m = CapabilitiesResponse{} }
|
|||
func (m *CapabilitiesResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*CapabilitiesResponse) ProtoMessage() {}
|
||||
func (*CapabilitiesResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{3}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{3}
|
||||
}
|
||||
func (m *CapabilitiesResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CapabilitiesResponse.Unmarshal(m, b)
|
||||
|
@ -349,7 +352,7 @@ func (m *FingerprintRequest) Reset() { *m = FingerprintRequest{} }
|
|||
func (m *FingerprintRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*FingerprintRequest) ProtoMessage() {}
|
||||
func (*FingerprintRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{4}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{4}
|
||||
}
|
||||
func (m *FingerprintRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FingerprintRequest.Unmarshal(m, b)
|
||||
|
@ -392,7 +395,7 @@ func (m *FingerprintResponse) Reset() { *m = FingerprintResponse{} }
|
|||
func (m *FingerprintResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*FingerprintResponse) ProtoMessage() {}
|
||||
func (*FingerprintResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{5}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{5}
|
||||
}
|
||||
func (m *FingerprintResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_FingerprintResponse.Unmarshal(m, b)
|
||||
|
@ -447,7 +450,7 @@ func (m *RecoverTaskRequest) Reset() { *m = RecoverTaskRequest{} }
|
|||
func (m *RecoverTaskRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*RecoverTaskRequest) ProtoMessage() {}
|
||||
func (*RecoverTaskRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{6}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{6}
|
||||
}
|
||||
func (m *RecoverTaskRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RecoverTaskRequest.Unmarshal(m, b)
|
||||
|
@ -491,7 +494,7 @@ func (m *RecoverTaskResponse) Reset() { *m = RecoverTaskResponse{} }
|
|||
func (m *RecoverTaskResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*RecoverTaskResponse) ProtoMessage() {}
|
||||
func (*RecoverTaskResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{7}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{7}
|
||||
}
|
||||
func (m *RecoverTaskResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RecoverTaskResponse.Unmarshal(m, b)
|
||||
|
@ -523,7 +526,7 @@ func (m *StartTaskRequest) Reset() { *m = StartTaskRequest{} }
|
|||
func (m *StartTaskRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StartTaskRequest) ProtoMessage() {}
|
||||
func (*StartTaskRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{8}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{8}
|
||||
}
|
||||
func (m *StartTaskRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StartTaskRequest.Unmarshal(m, b)
|
||||
|
@ -577,7 +580,7 @@ func (m *StartTaskResponse) Reset() { *m = StartTaskResponse{} }
|
|||
func (m *StartTaskResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StartTaskResponse) ProtoMessage() {}
|
||||
func (*StartTaskResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{9}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{9}
|
||||
}
|
||||
func (m *StartTaskResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StartTaskResponse.Unmarshal(m, b)
|
||||
|
@ -637,7 +640,7 @@ func (m *WaitTaskRequest) Reset() { *m = WaitTaskRequest{} }
|
|||
func (m *WaitTaskRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*WaitTaskRequest) ProtoMessage() {}
|
||||
func (*WaitTaskRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{10}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{10}
|
||||
}
|
||||
func (m *WaitTaskRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_WaitTaskRequest.Unmarshal(m, b)
|
||||
|
@ -678,7 +681,7 @@ func (m *WaitTaskResponse) Reset() { *m = WaitTaskResponse{} }
|
|||
func (m *WaitTaskResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*WaitTaskResponse) ProtoMessage() {}
|
||||
func (*WaitTaskResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{11}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{11}
|
||||
}
|
||||
func (m *WaitTaskResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_WaitTaskResponse.Unmarshal(m, b)
|
||||
|
@ -730,7 +733,7 @@ func (m *StopTaskRequest) Reset() { *m = StopTaskRequest{} }
|
|||
func (m *StopTaskRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StopTaskRequest) ProtoMessage() {}
|
||||
func (*StopTaskRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{12}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{12}
|
||||
}
|
||||
func (m *StopTaskRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StopTaskRequest.Unmarshal(m, b)
|
||||
|
@ -781,7 +784,7 @@ func (m *StopTaskResponse) Reset() { *m = StopTaskResponse{} }
|
|||
func (m *StopTaskResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StopTaskResponse) ProtoMessage() {}
|
||||
func (*StopTaskResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{13}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{13}
|
||||
}
|
||||
func (m *StopTaskResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StopTaskResponse.Unmarshal(m, b)
|
||||
|
@ -815,7 +818,7 @@ func (m *DestroyTaskRequest) Reset() { *m = DestroyTaskRequest{} }
|
|||
func (m *DestroyTaskRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DestroyTaskRequest) ProtoMessage() {}
|
||||
func (*DestroyTaskRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{14}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{14}
|
||||
}
|
||||
func (m *DestroyTaskRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DestroyTaskRequest.Unmarshal(m, b)
|
||||
|
@ -859,7 +862,7 @@ func (m *DestroyTaskResponse) Reset() { *m = DestroyTaskResponse{} }
|
|||
func (m *DestroyTaskResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*DestroyTaskResponse) ProtoMessage() {}
|
||||
func (*DestroyTaskResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{15}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{15}
|
||||
}
|
||||
func (m *DestroyTaskResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DestroyTaskResponse.Unmarshal(m, b)
|
||||
|
@ -891,7 +894,7 @@ func (m *InspectTaskRequest) Reset() { *m = InspectTaskRequest{} }
|
|||
func (m *InspectTaskRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*InspectTaskRequest) ProtoMessage() {}
|
||||
func (*InspectTaskRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{16}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{16}
|
||||
}
|
||||
func (m *InspectTaskRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_InspectTaskRequest.Unmarshal(m, b)
|
||||
|
@ -934,7 +937,7 @@ func (m *InspectTaskResponse) Reset() { *m = InspectTaskResponse{} }
|
|||
func (m *InspectTaskResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*InspectTaskResponse) ProtoMessage() {}
|
||||
func (*InspectTaskResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{17}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{17}
|
||||
}
|
||||
func (m *InspectTaskResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_InspectTaskResponse.Unmarshal(m, b)
|
||||
|
@ -989,7 +992,7 @@ func (m *TaskStatsRequest) Reset() { *m = TaskStatsRequest{} }
|
|||
func (m *TaskStatsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*TaskStatsRequest) ProtoMessage() {}
|
||||
func (*TaskStatsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{18}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{18}
|
||||
}
|
||||
func (m *TaskStatsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TaskStatsRequest.Unmarshal(m, b)
|
||||
|
@ -1035,7 +1038,7 @@ func (m *TaskStatsResponse) Reset() { *m = TaskStatsResponse{} }
|
|||
func (m *TaskStatsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*TaskStatsResponse) ProtoMessage() {}
|
||||
func (*TaskStatsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{19}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{19}
|
||||
}
|
||||
func (m *TaskStatsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TaskStatsResponse.Unmarshal(m, b)
|
||||
|
@ -1072,7 +1075,7 @@ func (m *TaskEventsRequest) Reset() { *m = TaskEventsRequest{} }
|
|||
func (m *TaskEventsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*TaskEventsRequest) ProtoMessage() {}
|
||||
func (*TaskEventsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{20}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{20}
|
||||
}
|
||||
func (m *TaskEventsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TaskEventsRequest.Unmarshal(m, b)
|
||||
|
@ -1106,7 +1109,7 @@ func (m *SignalTaskRequest) Reset() { *m = SignalTaskRequest{} }
|
|||
func (m *SignalTaskRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SignalTaskRequest) ProtoMessage() {}
|
||||
func (*SignalTaskRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{21}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{21}
|
||||
}
|
||||
func (m *SignalTaskRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SignalTaskRequest.Unmarshal(m, b)
|
||||
|
@ -1150,7 +1153,7 @@ func (m *SignalTaskResponse) Reset() { *m = SignalTaskResponse{} }
|
|||
func (m *SignalTaskResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SignalTaskResponse) ProtoMessage() {}
|
||||
func (*SignalTaskResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{22}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{22}
|
||||
}
|
||||
func (m *SignalTaskResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SignalTaskResponse.Unmarshal(m, b)
|
||||
|
@ -1187,7 +1190,7 @@ func (m *ExecTaskRequest) Reset() { *m = ExecTaskRequest{} }
|
|||
func (m *ExecTaskRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExecTaskRequest) ProtoMessage() {}
|
||||
func (*ExecTaskRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{23}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{23}
|
||||
}
|
||||
func (m *ExecTaskRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ExecTaskRequest.Unmarshal(m, b)
|
||||
|
@ -1244,7 +1247,7 @@ func (m *ExecTaskResponse) Reset() { *m = ExecTaskResponse{} }
|
|||
func (m *ExecTaskResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExecTaskResponse) ProtoMessage() {}
|
||||
func (*ExecTaskResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{24}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{24}
|
||||
}
|
||||
func (m *ExecTaskResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ExecTaskResponse.Unmarshal(m, b)
|
||||
|
@ -1303,7 +1306,7 @@ func (m *DriverCapabilities) Reset() { *m = DriverCapabilities{} }
|
|||
func (m *DriverCapabilities) String() string { return proto.CompactTextString(m) }
|
||||
func (*DriverCapabilities) ProtoMessage() {}
|
||||
func (*DriverCapabilities) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{25}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{25}
|
||||
}
|
||||
func (m *DriverCapabilities) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DriverCapabilities.Unmarshal(m, b)
|
||||
|
@ -1389,7 +1392,7 @@ func (m *TaskConfig) Reset() { *m = TaskConfig{} }
|
|||
func (m *TaskConfig) String() string { return proto.CompactTextString(m) }
|
||||
func (*TaskConfig) ProtoMessage() {}
|
||||
func (*TaskConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{26}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{26}
|
||||
}
|
||||
func (m *TaskConfig) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TaskConfig.Unmarshal(m, b)
|
||||
|
@ -1528,7 +1531,7 @@ func (m *Resources) Reset() { *m = Resources{} }
|
|||
func (m *Resources) String() string { return proto.CompactTextString(m) }
|
||||
func (*Resources) ProtoMessage() {}
|
||||
func (*Resources) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{27}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{27}
|
||||
}
|
||||
func (m *Resources) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Resources.Unmarshal(m, b)
|
||||
|
@ -1575,7 +1578,7 @@ func (m *AllocatedTaskResources) Reset() { *m = AllocatedTaskResources{}
|
|||
func (m *AllocatedTaskResources) String() string { return proto.CompactTextString(m) }
|
||||
func (*AllocatedTaskResources) ProtoMessage() {}
|
||||
func (*AllocatedTaskResources) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{28}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{28}
|
||||
}
|
||||
func (m *AllocatedTaskResources) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AllocatedTaskResources.Unmarshal(m, b)
|
||||
|
@ -1627,7 +1630,7 @@ func (m *AllocatedCpuResources) Reset() { *m = AllocatedCpuResources{} }
|
|||
func (m *AllocatedCpuResources) String() string { return proto.CompactTextString(m) }
|
||||
func (*AllocatedCpuResources) ProtoMessage() {}
|
||||
func (*AllocatedCpuResources) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{29}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{29}
|
||||
}
|
||||
func (m *AllocatedCpuResources) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AllocatedCpuResources.Unmarshal(m, b)
|
||||
|
@ -1665,7 +1668,7 @@ func (m *AllocatedMemoryResources) Reset() { *m = AllocatedMemoryResourc
|
|||
func (m *AllocatedMemoryResources) String() string { return proto.CompactTextString(m) }
|
||||
func (*AllocatedMemoryResources) ProtoMessage() {}
|
||||
func (*AllocatedMemoryResources) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{30}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{30}
|
||||
}
|
||||
func (m *AllocatedMemoryResources) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AllocatedMemoryResources.Unmarshal(m, b)
|
||||
|
@ -1708,7 +1711,7 @@ func (m *NetworkResource) Reset() { *m = NetworkResource{} }
|
|||
func (m *NetworkResource) String() string { return proto.CompactTextString(m) }
|
||||
func (*NetworkResource) ProtoMessage() {}
|
||||
func (*NetworkResource) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{31}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{31}
|
||||
}
|
||||
func (m *NetworkResource) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_NetworkResource.Unmarshal(m, b)
|
||||
|
@ -1782,7 +1785,7 @@ func (m *NetworkPort) Reset() { *m = NetworkPort{} }
|
|||
func (m *NetworkPort) String() string { return proto.CompactTextString(m) }
|
||||
func (*NetworkPort) ProtoMessage() {}
|
||||
func (*NetworkPort) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{32}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{32}
|
||||
}
|
||||
func (m *NetworkPort) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_NetworkPort.Unmarshal(m, b)
|
||||
|
@ -1842,7 +1845,7 @@ func (m *LinuxResources) Reset() { *m = LinuxResources{} }
|
|||
func (m *LinuxResources) String() string { return proto.CompactTextString(m) }
|
||||
func (*LinuxResources) ProtoMessage() {}
|
||||
func (*LinuxResources) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{33}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{33}
|
||||
}
|
||||
func (m *LinuxResources) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_LinuxResources.Unmarshal(m, b)
|
||||
|
@ -1934,7 +1937,7 @@ func (m *Mount) Reset() { *m = Mount{} }
|
|||
func (m *Mount) String() string { return proto.CompactTextString(m) }
|
||||
func (*Mount) ProtoMessage() {}
|
||||
func (*Mount) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{34}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{34}
|
||||
}
|
||||
func (m *Mount) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Mount.Unmarshal(m, b)
|
||||
|
@ -1997,7 +2000,7 @@ func (m *Device) Reset() { *m = Device{} }
|
|||
func (m *Device) String() string { return proto.CompactTextString(m) }
|
||||
func (*Device) ProtoMessage() {}
|
||||
func (*Device) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{35}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{35}
|
||||
}
|
||||
func (m *Device) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Device.Unmarshal(m, b)
|
||||
|
@ -2040,7 +2043,8 @@ func (m *Device) GetCgroupPermissions() string {
|
|||
|
||||
// TaskHandle is created when starting a task and is used to recover task
|
||||
type TaskHandle struct {
|
||||
// Version is used by the driver to version the DriverState schema
|
||||
// Version is used by the driver to version the DriverState schema.
|
||||
// Version 0 is reserved by Nomad and should not be used.
|
||||
Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
|
||||
// Config is the TaskConfig for the task
|
||||
Config *TaskConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
|
||||
|
@ -2057,7 +2061,7 @@ func (m *TaskHandle) Reset() { *m = TaskHandle{} }
|
|||
func (m *TaskHandle) String() string { return proto.CompactTextString(m) }
|
||||
func (*TaskHandle) ProtoMessage() {}
|
||||
func (*TaskHandle) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{36}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{36}
|
||||
}
|
||||
func (m *TaskHandle) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TaskHandle.Unmarshal(m, b)
|
||||
|
@ -2124,7 +2128,7 @@ func (m *NetworkOverride) Reset() { *m = NetworkOverride{} }
|
|||
func (m *NetworkOverride) String() string { return proto.CompactTextString(m) }
|
||||
func (*NetworkOverride) ProtoMessage() {}
|
||||
func (*NetworkOverride) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{37}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{37}
|
||||
}
|
||||
func (m *NetworkOverride) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_NetworkOverride.Unmarshal(m, b)
|
||||
|
@ -2182,7 +2186,7 @@ func (m *ExitResult) Reset() { *m = ExitResult{} }
|
|||
func (m *ExitResult) String() string { return proto.CompactTextString(m) }
|
||||
func (*ExitResult) ProtoMessage() {}
|
||||
func (*ExitResult) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{38}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{38}
|
||||
}
|
||||
func (m *ExitResult) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ExitResult.Unmarshal(m, b)
|
||||
|
@ -2245,7 +2249,7 @@ func (m *TaskStatus) Reset() { *m = TaskStatus{} }
|
|||
func (m *TaskStatus) String() string { return proto.CompactTextString(m) }
|
||||
func (*TaskStatus) ProtoMessage() {}
|
||||
func (*TaskStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{39}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{39}
|
||||
}
|
||||
func (m *TaskStatus) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TaskStatus.Unmarshal(m, b)
|
||||
|
@ -2320,7 +2324,7 @@ func (m *TaskDriverStatus) Reset() { *m = TaskDriverStatus{} }
|
|||
func (m *TaskDriverStatus) String() string { return proto.CompactTextString(m) }
|
||||
func (*TaskDriverStatus) ProtoMessage() {}
|
||||
func (*TaskDriverStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{40}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{40}
|
||||
}
|
||||
func (m *TaskDriverStatus) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TaskDriverStatus.Unmarshal(m, b)
|
||||
|
@ -2365,7 +2369,7 @@ func (m *TaskStats) Reset() { *m = TaskStats{} }
|
|||
func (m *TaskStats) String() string { return proto.CompactTextString(m) }
|
||||
func (*TaskStats) ProtoMessage() {}
|
||||
func (*TaskStats) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{41}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{41}
|
||||
}
|
||||
func (m *TaskStats) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TaskStats.Unmarshal(m, b)
|
||||
|
@ -2427,7 +2431,7 @@ func (m *TaskResourceUsage) Reset() { *m = TaskResourceUsage{} }
|
|||
func (m *TaskResourceUsage) String() string { return proto.CompactTextString(m) }
|
||||
func (*TaskResourceUsage) ProtoMessage() {}
|
||||
func (*TaskResourceUsage) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{42}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{42}
|
||||
}
|
||||
func (m *TaskResourceUsage) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TaskResourceUsage.Unmarshal(m, b)
|
||||
|
@ -2479,7 +2483,7 @@ func (m *CPUUsage) Reset() { *m = CPUUsage{} }
|
|||
func (m *CPUUsage) String() string { return proto.CompactTextString(m) }
|
||||
func (*CPUUsage) ProtoMessage() {}
|
||||
func (*CPUUsage) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{43}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{43}
|
||||
}
|
||||
func (m *CPUUsage) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CPUUsage.Unmarshal(m, b)
|
||||
|
@ -2554,6 +2558,7 @@ type MemoryUsage struct {
|
|||
MaxUsage uint64 `protobuf:"varint,3,opt,name=max_usage,json=maxUsage,proto3" json:"max_usage,omitempty"`
|
||||
KernelUsage uint64 `protobuf:"varint,4,opt,name=kernel_usage,json=kernelUsage,proto3" json:"kernel_usage,omitempty"`
|
||||
KernelMaxUsage uint64 `protobuf:"varint,5,opt,name=kernel_max_usage,json=kernelMaxUsage,proto3" json:"kernel_max_usage,omitempty"`
|
||||
Usage uint64 `protobuf:"varint,7,opt,name=usage,proto3" json:"usage,omitempty"`
|
||||
// MeasuredFields indicates which fields were actually sampled
|
||||
MeasuredFields []MemoryUsage_Fields `protobuf:"varint,6,rep,packed,name=measured_fields,json=measuredFields,proto3,enum=hashicorp.nomad.plugins.drivers.proto.MemoryUsage_Fields" json:"measured_fields,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
|
@ -2565,7 +2570,7 @@ func (m *MemoryUsage) Reset() { *m = MemoryUsage{} }
|
|||
func (m *MemoryUsage) String() string { return proto.CompactTextString(m) }
|
||||
func (*MemoryUsage) ProtoMessage() {}
|
||||
func (*MemoryUsage) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{44}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{44}
|
||||
}
|
||||
func (m *MemoryUsage) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MemoryUsage.Unmarshal(m, b)
|
||||
|
@ -2620,6 +2625,13 @@ func (m *MemoryUsage) GetKernelMaxUsage() uint64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (m *MemoryUsage) GetUsage() uint64 {
|
||||
if m != nil {
|
||||
return m.Usage
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *MemoryUsage) GetMeasuredFields() []MemoryUsage_Fields {
|
||||
if m != nil {
|
||||
return m.MeasuredFields
|
||||
|
@ -2649,7 +2661,7 @@ func (m *DriverTaskEvent) Reset() { *m = DriverTaskEvent{} }
|
|||
func (m *DriverTaskEvent) String() string { return proto.CompactTextString(m) }
|
||||
func (*DriverTaskEvent) ProtoMessage() {}
|
||||
func (*DriverTaskEvent) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_driver_c5667ca6005812fb, []int{45}
|
||||
return fileDescriptor_driver_d79b0e12b1c93702, []int{45}
|
||||
}
|
||||
func (m *DriverTaskEvent) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DriverTaskEvent.Unmarshal(m, b)
|
||||
|
@ -3384,195 +3396,196 @@ var _Driver_serviceDesc = grpc.ServiceDesc{
|
|||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("plugins/drivers/proto/driver.proto", fileDescriptor_driver_c5667ca6005812fb)
|
||||
proto.RegisterFile("plugins/drivers/proto/driver.proto", fileDescriptor_driver_d79b0e12b1c93702)
|
||||
}
|
||||
|
||||
var fileDescriptor_driver_c5667ca6005812fb = []byte{
|
||||
// 2962 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0xdd, 0x6f, 0xe3, 0xc6,
|
||||
0x11, 0x37, 0xf5, 0x65, 0x69, 0x64, 0xcb, 0xbc, 0xbd, 0xbb, 0x44, 0x51, 0xd0, 0xe6, 0x42, 0x20,
|
||||
0x85, 0x91, 0xe4, 0xe4, 0xc4, 0x41, 0xef, 0xab, 0xf9, 0x52, 0x24, 0x9e, 0xed, 0x9c, 0x2d, 0xbb,
|
||||
0x2b, 0x19, 0x97, 0x6b, 0x9b, 0xb0, 0x34, 0xb9, 0x27, 0xf1, 0x2c, 0x7e, 0x84, 0x5c, 0x3a, 0x36,
|
||||
0x8a, 0xa2, 0x45, 0x0a, 0x14, 0xed, 0x43, 0x81, 0xbe, 0x04, 0x7d, 0x6f, 0x1f, 0xfb, 0x17, 0xb4,
|
||||
0x45, 0xfe, 0x8f, 0x02, 0xed, 0x4b, 0x0b, 0x14, 0xe8, 0x6b, 0xff, 0x83, 0x62, 0x3f, 0x48, 0x51,
|
||||
0xb6, 0x2f, 0x47, 0xe9, 0xf2, 0x24, 0xed, 0xec, 0xce, 0x6f, 0x87, 0x33, 0xb3, 0x33, 0xb3, 0x3b,
|
||||
0xa0, 0x05, 0x93, 0x78, 0xe4, 0x78, 0xd1, 0x86, 0x1d, 0x3a, 0x27, 0x24, 0x8c, 0x36, 0x82, 0xd0,
|
||||
0xa7, 0xbe, 0x1c, 0xb5, 0xf9, 0x00, 0xbd, 0x36, 0x36, 0xa3, 0xb1, 0x63, 0xf9, 0x61, 0xd0, 0xf6,
|
||||
0x7c, 0xd7, 0xb4, 0xdb, 0x92, 0xa7, 0x2d, 0x79, 0xc4, 0xb2, 0xd6, 0x77, 0x47, 0xbe, 0x3f, 0x9a,
|
||||
0x10, 0x81, 0x70, 0x14, 0x3f, 0xde, 0xb0, 0xe3, 0xd0, 0xa4, 0x8e, 0xef, 0xc9, 0xf9, 0x57, 0xce,
|
||||
0xcf, 0x53, 0xc7, 0x25, 0x11, 0x35, 0xdd, 0x40, 0x2e, 0xf8, 0x70, 0xe4, 0xd0, 0x71, 0x7c, 0xd4,
|
||||
0xb6, 0x7c, 0x77, 0x23, 0xdd, 0x72, 0x83, 0x6f, 0xb9, 0x91, 0x88, 0x19, 0x8d, 0xcd, 0x90, 0xd8,
|
||||
0x1b, 0x63, 0x6b, 0x12, 0x05, 0xc4, 0x62, 0xbf, 0x06, 0xfb, 0x23, 0x11, 0xb6, 0xf2, 0x23, 0x44,
|
||||
0x34, 0x8c, 0x2d, 0x9a, 0x7c, 0xaf, 0x49, 0x69, 0xe8, 0x1c, 0xc5, 0x94, 0x08, 0x20, 0xed, 0x25,
|
||||
0x78, 0x71, 0x68, 0x46, 0xc7, 0x5d, 0xdf, 0x7b, 0xec, 0x8c, 0x06, 0xd6, 0x98, 0xb8, 0x26, 0x26,
|
||||
0x9f, 0xc7, 0x24, 0xa2, 0xda, 0x4f, 0xa0, 0x79, 0x71, 0x2a, 0x0a, 0x7c, 0x2f, 0x22, 0xe8, 0x43,
|
||||
0x28, 0x31, 0x69, 0x9a, 0xca, 0x0d, 0x65, 0xbd, 0xbe, 0xf9, 0x66, 0xfb, 0x69, 0x8a, 0x13, 0x32,
|
||||
0xb4, 0xe5, 0x57, 0xb4, 0x07, 0x01, 0xb1, 0x30, 0xe7, 0xd4, 0xae, 0xc3, 0xd5, 0xae, 0x19, 0x98,
|
||||
0x47, 0xce, 0xc4, 0xa1, 0x0e, 0x89, 0x92, 0x4d, 0x63, 0xb8, 0x36, 0x4b, 0x96, 0x1b, 0x7e, 0x0a,
|
||||
0x2b, 0x56, 0x86, 0x2e, 0x37, 0xbe, 0xdb, 0xce, 0x65, 0xb1, 0x76, 0x8f, 0x8f, 0x66, 0x80, 0x67,
|
||||
0xe0, 0xb4, 0x6b, 0x80, 0xee, 0x3b, 0xde, 0x88, 0x84, 0x41, 0xe8, 0x78, 0x34, 0x11, 0xe6, 0xeb,
|
||||
0x22, 0x5c, 0x9d, 0x21, 0x4b, 0x61, 0x9e, 0x00, 0xa4, 0x7a, 0x64, 0xa2, 0x14, 0xd7, 0xeb, 0x9b,
|
||||
0x1f, 0xe7, 0x14, 0xe5, 0x12, 0xbc, 0x76, 0x27, 0x05, 0xd3, 0x3d, 0x1a, 0x9e, 0xe1, 0x0c, 0x3a,
|
||||
0xfa, 0x0c, 0x2a, 0x63, 0x62, 0x4e, 0xe8, 0xb8, 0x59, 0xb8, 0xa1, 0xac, 0x37, 0x36, 0xef, 0x3f,
|
||||
0xc7, 0x3e, 0xdb, 0x1c, 0x68, 0x40, 0x4d, 0x4a, 0xb0, 0x44, 0x45, 0x37, 0x01, 0x89, 0x7f, 0x86,
|
||||
0x4d, 0x22, 0x2b, 0x74, 0x02, 0xe6, 0xc8, 0xcd, 0xe2, 0x0d, 0x65, 0xbd, 0x86, 0xaf, 0x88, 0x99,
|
||||
0xde, 0x74, 0xa2, 0x15, 0xc0, 0xda, 0x39, 0x69, 0x91, 0x0a, 0xc5, 0x63, 0x72, 0xc6, 0x2d, 0x52,
|
||||
0xc3, 0xec, 0x2f, 0xda, 0x82, 0xf2, 0x89, 0x39, 0x89, 0x09, 0x17, 0xb9, 0xbe, 0xf9, 0xf6, 0xb3,
|
||||
0xdc, 0x43, 0xba, 0xe8, 0x54, 0x0f, 0x58, 0xf0, 0xdf, 0x2b, 0xdc, 0x51, 0xb4, 0xbb, 0x50, 0xcf,
|
||||
0xc8, 0x8d, 0x1a, 0x00, 0x87, 0xfd, 0x9e, 0x3e, 0xd4, 0xbb, 0x43, 0xbd, 0xa7, 0x2e, 0xa1, 0x55,
|
||||
0xa8, 0x1d, 0xf6, 0xb7, 0xf5, 0xce, 0xee, 0x70, 0xfb, 0x91, 0xaa, 0xa0, 0x3a, 0x2c, 0x27, 0x83,
|
||||
0x82, 0x76, 0x0a, 0x08, 0x13, 0xcb, 0x3f, 0x21, 0x21, 0x73, 0x64, 0x69, 0x55, 0xf4, 0x22, 0x2c,
|
||||
0x53, 0x33, 0x3a, 0x36, 0x1c, 0x5b, 0xca, 0x5c, 0x61, 0xc3, 0x1d, 0x1b, 0xed, 0x40, 0x65, 0x6c,
|
||||
0x7a, 0xf6, 0xe4, 0xd9, 0x72, 0xcf, 0xaa, 0x9a, 0x81, 0x6f, 0x73, 0x46, 0x2c, 0x01, 0x98, 0x77,
|
||||
0xcf, 0xec, 0x2c, 0x0c, 0xa0, 0x3d, 0x02, 0x75, 0x40, 0xcd, 0x90, 0x66, 0xc5, 0xd1, 0xa1, 0xc4,
|
||||
0xf6, 0x97, 0x1e, 0x3d, 0xcf, 0x9e, 0xe2, 0x64, 0x62, 0xce, 0xae, 0xfd, 0xaf, 0x00, 0x57, 0x32,
|
||||
0xd8, 0xd2, 0x53, 0x1f, 0x42, 0x25, 0x24, 0x51, 0x3c, 0xa1, 0x1c, 0xbe, 0xb1, 0xf9, 0x41, 0x4e,
|
||||
0xf8, 0x0b, 0x48, 0x6d, 0xcc, 0x61, 0xb0, 0x84, 0x43, 0xeb, 0xa0, 0x0a, 0x0e, 0x83, 0x84, 0xa1,
|
||||
0x1f, 0x1a, 0x6e, 0x34, 0xe2, 0x5a, 0xab, 0xe1, 0x86, 0xa0, 0xeb, 0x8c, 0xbc, 0x17, 0x8d, 0x32,
|
||||
0x5a, 0x2d, 0x3e, 0xa7, 0x56, 0x91, 0x09, 0xaa, 0x47, 0xe8, 0x17, 0x7e, 0x78, 0x6c, 0x30, 0xd5,
|
||||
0x86, 0x8e, 0x4d, 0x9a, 0x25, 0x0e, 0x7a, 0x2b, 0x27, 0x68, 0x5f, 0xb0, 0xef, 0x4b, 0x6e, 0xbc,
|
||||
0xe6, 0xcd, 0x12, 0xb4, 0x37, 0xa0, 0x22, 0xbe, 0x94, 0x79, 0xd2, 0xe0, 0xb0, 0xdb, 0xd5, 0x07,
|
||||
0x03, 0x75, 0x09, 0xd5, 0xa0, 0x8c, 0xf5, 0x21, 0x66, 0x1e, 0x56, 0x83, 0xf2, 0xfd, 0xce, 0xb0,
|
||||
0xb3, 0xab, 0x16, 0xb4, 0xd7, 0x61, 0xed, 0xa1, 0xe9, 0xd0, 0x3c, 0xce, 0xa5, 0xf9, 0xa0, 0x4e,
|
||||
0xd7, 0x4a, 0xeb, 0xec, 0xcc, 0x58, 0x27, 0xbf, 0x6a, 0xf4, 0x53, 0x87, 0x9e, 0xb3, 0x87, 0x0a,
|
||||
0x45, 0x12, 0x86, 0xd2, 0x04, 0xec, 0xaf, 0xf6, 0x05, 0xac, 0x0d, 0xa8, 0x1f, 0xe4, 0xf2, 0xfc,
|
||||
0x77, 0x60, 0x99, 0xe5, 0x28, 0x3f, 0xa6, 0xd2, 0xf5, 0x5f, 0x6a, 0x8b, 0x1c, 0xd6, 0x4e, 0x72,
|
||||
0x58, 0xbb, 0x27, 0x73, 0x1c, 0x4e, 0x56, 0xa2, 0x17, 0xa0, 0x12, 0x39, 0x23, 0xcf, 0x9c, 0xc8,
|
||||
0x68, 0x21, 0x47, 0x1a, 0x62, 0x4e, 0x9e, 0x6c, 0x2c, 0x1d, 0xbf, 0x0b, 0xa8, 0x47, 0x22, 0x1a,
|
||||
0xfa, 0x67, 0xb9, 0xe4, 0xb9, 0x06, 0xe5, 0xc7, 0x7e, 0x68, 0x89, 0x83, 0x58, 0xc5, 0x62, 0xc0,
|
||||
0x0e, 0xd5, 0x0c, 0x88, 0xc4, 0xbe, 0x09, 0x68, 0xc7, 0x63, 0x39, 0x25, 0x9f, 0x21, 0x7e, 0x5f,
|
||||
0x80, 0xab, 0x33, 0xeb, 0xa5, 0x31, 0x16, 0x3f, 0x87, 0x2c, 0x30, 0xc5, 0x91, 0x38, 0x87, 0x68,
|
||||
0x1f, 0x2a, 0x62, 0x85, 0xd4, 0xe4, 0xed, 0x39, 0x80, 0x44, 0x9a, 0x92, 0x70, 0x12, 0xe6, 0x52,
|
||||
0xa7, 0x2f, 0x7e, 0xbb, 0x4e, 0xbf, 0x05, 0x6a, 0xf2, 0x1d, 0xd1, 0x33, 0x6d, 0xd3, 0x82, 0xaa,
|
||||
0xe3, 0x51, 0x12, 0x9e, 0x98, 0x13, 0xfe, 0x89, 0x45, 0x9c, 0x8e, 0xb5, 0x1f, 0xc3, 0x95, 0x0c,
|
||||
0x90, 0x54, 0xec, 0x7d, 0x28, 0x47, 0x8c, 0x20, 0x35, 0xfb, 0xd6, 0x9c, 0x9a, 0x8d, 0xb0, 0x60,
|
||||
0xd7, 0xae, 0x0a, 0x70, 0xfd, 0x84, 0x78, 0xa9, 0x98, 0x5a, 0x0f, 0xae, 0x0c, 0xb8, 0xdb, 0xe5,
|
||||
0xf2, 0xab, 0xa9, 0xcb, 0x16, 0x66, 0x5c, 0xf6, 0x1a, 0xa0, 0x2c, 0x8a, 0x74, 0xac, 0x33, 0x58,
|
||||
0xd3, 0x4f, 0x89, 0x95, 0x0b, 0xb9, 0x09, 0xcb, 0x96, 0xef, 0xba, 0xa6, 0x67, 0x37, 0x0b, 0x37,
|
||||
0x8a, 0xeb, 0x35, 0x9c, 0x0c, 0xb3, 0x67, 0xab, 0x98, 0xf7, 0x6c, 0x69, 0xbf, 0x53, 0x40, 0x9d,
|
||||
0xee, 0x2d, 0x15, 0xc9, 0xa4, 0xa7, 0x36, 0x03, 0x62, 0x7b, 0xaf, 0x60, 0x39, 0x92, 0xf4, 0xe4,
|
||||
0xf8, 0x0b, 0x3a, 0x09, 0xc3, 0x4c, 0x78, 0x29, 0x3e, 0x67, 0x78, 0xd1, 0xfe, 0xa3, 0x00, 0xba,
|
||||
0x58, 0x44, 0xa1, 0x57, 0x61, 0x25, 0x22, 0x9e, 0x6d, 0x08, 0x35, 0x0a, 0x0b, 0x57, 0x71, 0x9d,
|
||||
0xd1, 0x84, 0x3e, 0x23, 0x84, 0xa0, 0x44, 0x4e, 0x89, 0x25, 0x4f, 0x32, 0xff, 0x8f, 0xc6, 0xb0,
|
||||
0xf2, 0x38, 0x32, 0x9c, 0xc8, 0x9f, 0x98, 0x69, 0xb5, 0xd1, 0xd8, 0xd4, 0x17, 0x2e, 0xe6, 0xda,
|
||||
0xf7, 0x07, 0x3b, 0x09, 0x18, 0xae, 0x3f, 0x8e, 0xd2, 0x81, 0xd6, 0x86, 0x7a, 0x66, 0x0e, 0x55,
|
||||
0xa1, 0xd4, 0xdf, 0xef, 0xeb, 0xea, 0x12, 0x02, 0xa8, 0x74, 0xb7, 0xf1, 0xfe, 0xfe, 0x50, 0x44,
|
||||
0xf4, 0x9d, 0xbd, 0xce, 0x96, 0xae, 0x16, 0xb4, 0xbf, 0x54, 0x00, 0xa6, 0xa9, 0x15, 0x35, 0xa0,
|
||||
0x90, 0x5a, 0xba, 0xe0, 0xd8, 0xec, 0x63, 0x3c, 0xd3, 0x25, 0xd2, 0x7b, 0xf8, 0x7f, 0xb4, 0x09,
|
||||
0xd7, 0xdd, 0x68, 0x14, 0x98, 0xd6, 0xb1, 0x21, 0x33, 0xa2, 0xc5, 0x99, 0xf9, 0x57, 0xad, 0xe0,
|
||||
0xab, 0x72, 0x52, 0x4a, 0x2d, 0x70, 0x77, 0xa1, 0x48, 0xbc, 0x93, 0x66, 0x89, 0x57, 0x8e, 0xf7,
|
||||
0xe6, 0x4e, 0xf9, 0x6d, 0xdd, 0x3b, 0x11, 0x95, 0x22, 0x83, 0x41, 0x06, 0x80, 0x4d, 0x4e, 0x1c,
|
||||
0x8b, 0x18, 0x0c, 0xb4, 0xcc, 0x41, 0x3f, 0x9c, 0x1f, 0xb4, 0xc7, 0x31, 0x52, 0xe8, 0x9a, 0x9d,
|
||||
0x8c, 0x51, 0x1f, 0x6a, 0x21, 0x89, 0xfc, 0x38, 0xb4, 0x48, 0xd4, 0xac, 0xcc, 0x75, 0x8a, 0x71,
|
||||
0xc2, 0x87, 0xa7, 0x10, 0xa8, 0x07, 0x15, 0xd7, 0x8f, 0x3d, 0x1a, 0x35, 0x97, 0xb9, 0xb0, 0x6f,
|
||||
0xe6, 0x04, 0xdb, 0x63, 0x4c, 0x58, 0xf2, 0xa2, 0x2d, 0x58, 0x16, 0x22, 0x46, 0xcd, 0x2a, 0x87,
|
||||
0xb9, 0x99, 0xd7, 0x81, 0x38, 0x17, 0x4e, 0xb8, 0x99, 0x55, 0xe3, 0x88, 0x84, 0xcd, 0x9a, 0xb0,
|
||||
0x2a, 0xfb, 0x8f, 0x5e, 0x86, 0x9a, 0x39, 0x99, 0xf8, 0x96, 0x61, 0x3b, 0x61, 0x13, 0xf8, 0x44,
|
||||
0x95, 0x13, 0x7a, 0x4e, 0x88, 0x5e, 0x81, 0xba, 0x38, 0x7a, 0x46, 0x60, 0xd2, 0x71, 0xb3, 0xce,
|
||||
0xa7, 0x41, 0x90, 0x0e, 0x4c, 0x3a, 0x96, 0x0b, 0x48, 0x18, 0x8a, 0x05, 0x2b, 0xe9, 0x02, 0x12,
|
||||
0x86, 0x7c, 0xc1, 0xf7, 0x60, 0x8d, 0xc7, 0x91, 0x51, 0xe8, 0xc7, 0x81, 0xc1, 0x7d, 0x6a, 0x95,
|
||||
0x2f, 0x5a, 0x65, 0xe4, 0x2d, 0x46, 0xed, 0x33, 0xe7, 0x7a, 0x09, 0xaa, 0x4f, 0xfc, 0x23, 0xb1,
|
||||
0xa0, 0xc1, 0x17, 0x2c, 0x3f, 0xf1, 0x8f, 0x92, 0x29, 0x21, 0xa1, 0x63, 0x37, 0xd7, 0xc4, 0x14,
|
||||
0x1f, 0xef, 0xd8, 0xad, 0x5b, 0x50, 0x4d, 0xcc, 0x78, 0x49, 0x75, 0x7e, 0x2d, 0x5b, 0x9d, 0xd7,
|
||||
0x32, 0xa5, 0x76, 0xeb, 0x5d, 0x68, 0xcc, 0x3a, 0xc1, 0x3c, 0xdc, 0xda, 0x3f, 0x14, 0xa8, 0xa5,
|
||||
0xe6, 0x46, 0x1e, 0x5c, 0xe5, 0xe2, 0x98, 0x94, 0xd8, 0xc6, 0xd4, 0x7b, 0x44, 0x0e, 0x78, 0x2f,
|
||||
0xa7, 0xa5, 0x3a, 0x09, 0x82, 0x8c, 0x83, 0xd2, 0x95, 0x50, 0x8a, 0x3c, 0xdd, 0xef, 0x33, 0x58,
|
||||
0x9b, 0x38, 0x5e, 0x7c, 0x9a, 0xd9, 0x4b, 0x24, 0xe0, 0xef, 0xe7, 0xdc, 0x6b, 0x97, 0x71, 0x4f,
|
||||
0xf7, 0x68, 0x4c, 0x66, 0xc6, 0xda, 0x57, 0x05, 0x78, 0xe1, 0x72, 0x71, 0x50, 0x1f, 0x8a, 0x56,
|
||||
0x10, 0xcb, 0x4f, 0x7b, 0x77, 0xde, 0x4f, 0xeb, 0x06, 0xf1, 0x74, 0x57, 0x06, 0xc4, 0x8a, 0x76,
|
||||
0x97, 0xb8, 0x7e, 0x78, 0x26, 0xbf, 0xe0, 0x83, 0x79, 0x21, 0xf7, 0x38, 0xf7, 0x14, 0x55, 0xc2,
|
||||
0x21, 0x0c, 0x55, 0x99, 0xfa, 0x23, 0x19, 0x26, 0xe6, 0x2c, 0x21, 0x12, 0x48, 0x9c, 0xe2, 0x68,
|
||||
0xb7, 0xe0, 0xfa, 0xa5, 0x9f, 0x82, 0xbe, 0x03, 0x60, 0x05, 0xb1, 0xc1, 0xaf, 0x78, 0xc2, 0xee,
|
||||
0x45, 0x5c, 0xb3, 0x82, 0x78, 0xc0, 0x09, 0xda, 0x6d, 0x68, 0x3e, 0x4d, 0x5e, 0x76, 0xf8, 0x84,
|
||||
0xc4, 0x86, 0x7b, 0x94, 0xd4, 0x18, 0x82, 0xb0, 0x77, 0xa4, 0xfd, 0xa1, 0x00, 0x6b, 0xe7, 0xc4,
|
||||
0x61, 0x19, 0x50, 0x1c, 0xe6, 0x24, 0x2b, 0x8b, 0x11, 0x3b, 0xd9, 0x96, 0x63, 0x27, 0x65, 0x31,
|
||||
0xff, 0xcf, 0x63, 0x7a, 0x20, 0x4b, 0xd6, 0x82, 0x13, 0x30, 0x87, 0x76, 0x8f, 0x1c, 0x1a, 0xf1,
|
||||
0x9b, 0x44, 0x19, 0x8b, 0x01, 0x7a, 0x04, 0x8d, 0x90, 0x44, 0x24, 0x3c, 0x21, 0xb6, 0x11, 0xf8,
|
||||
0x21, 0x4d, 0x14, 0xb6, 0x39, 0x9f, 0xc2, 0x0e, 0xfc, 0x90, 0xe2, 0xd5, 0x04, 0x89, 0x8d, 0x22,
|
||||
0xf4, 0x10, 0x56, 0xed, 0x33, 0xcf, 0x74, 0x1d, 0x4b, 0x22, 0x57, 0x16, 0x46, 0x5e, 0x91, 0x40,
|
||||
0x1c, 0x98, 0xdd, 0x94, 0x33, 0x93, 0xec, 0xc3, 0x26, 0xe6, 0x11, 0x99, 0x48, 0x9d, 0x88, 0xc1,
|
||||
0xec, 0xf9, 0x2d, 0xcb, 0xf3, 0xab, 0xfd, 0xa9, 0x00, 0x8d, 0xd9, 0x03, 0x90, 0xd8, 0x2f, 0x20,
|
||||
0xa1, 0xe3, 0xdb, 0x19, 0xfb, 0x1d, 0x70, 0x02, 0xb3, 0x11, 0x9b, 0xfe, 0x3c, 0xf6, 0xa9, 0x99,
|
||||
0xd8, 0xc8, 0x0a, 0xe2, 0x1f, 0xb2, 0xf1, 0x39, 0xdb, 0x17, 0xcf, 0xd9, 0x1e, 0xbd, 0x09, 0x48,
|
||||
0xda, 0x77, 0xe2, 0xb8, 0x0e, 0x35, 0x8e, 0xce, 0x28, 0x11, 0xfa, 0x2f, 0x62, 0x55, 0xcc, 0xec,
|
||||
0xb2, 0x89, 0x8f, 0x18, 0x1d, 0x69, 0xb0, 0xea, 0xfb, 0xae, 0x11, 0x59, 0x7e, 0x48, 0x0c, 0xd3,
|
||||
0x7e, 0xd2, 0x2c, 0xf3, 0x85, 0x75, 0xdf, 0x77, 0x07, 0x8c, 0xd6, 0xb1, 0x9f, 0xb0, 0x80, 0x6b,
|
||||
0x05, 0x71, 0x44, 0xa8, 0xc1, 0x7e, 0x78, 0x8e, 0xaa, 0x61, 0x10, 0xa4, 0x6e, 0x10, 0x47, 0x99,
|
||||
0x05, 0x2e, 0x71, 0x59, 0xde, 0xc9, 0x2c, 0xd8, 0x23, 0x2e, 0xdb, 0x65, 0xe5, 0x80, 0x84, 0x16,
|
||||
0xf1, 0xe8, 0xd0, 0xb1, 0x8e, 0x59, 0x4a, 0x51, 0xd6, 0x15, 0x3c, 0x43, 0xd3, 0x3e, 0x85, 0x32,
|
||||
0x4f, 0x41, 0xec, 0xe3, 0x79, 0xf8, 0xe6, 0xd1, 0x5d, 0xa8, 0xb7, 0xca, 0x08, 0x3c, 0xb6, 0xbf,
|
||||
0x0c, 0xb5, 0xb1, 0x1f, 0xc9, 0xdc, 0x20, 0x3c, 0xaf, 0xca, 0x08, 0x7c, 0xb2, 0x05, 0xd5, 0x90,
|
||||
0x98, 0xb6, 0xef, 0x4d, 0xce, 0xb8, 0x5e, 0xaa, 0x38, 0x1d, 0x6b, 0x9f, 0x43, 0x45, 0x84, 0xdf,
|
||||
0xe7, 0xc0, 0xbf, 0x09, 0xc8, 0x12, 0x49, 0x25, 0x20, 0xa1, 0xeb, 0x44, 0x91, 0xe3, 0x7b, 0x51,
|
||||
0xf2, 0x9c, 0x23, 0x66, 0x0e, 0xa6, 0x13, 0xda, 0x3f, 0x15, 0x51, 0xef, 0x88, 0x8b, 0x36, 0xab,
|
||||
0x62, 0x99, 0xa7, 0xb1, 0x9a, 0x4c, 0xe1, 0xee, 0x91, 0x0c, 0x59, 0x2d, 0x29, 0xcb, 0x9a, 0xc2,
|
||||
0xa2, 0xef, 0x14, 0x12, 0x20, 0xb9, 0x0f, 0x10, 0x59, 0xf6, 0xcd, 0x7b, 0x1f, 0x20, 0xe2, 0x3e,
|
||||
0x40, 0x58, 0xf1, 0x29, 0x0b, 0x2e, 0x01, 0x57, 0xe2, 0xf5, 0x56, 0xdd, 0x4e, 0x2f, 0x51, 0x44,
|
||||
0xfb, 0xaf, 0x92, 0xc6, 0x8a, 0xe4, 0xb2, 0x83, 0x3e, 0x83, 0x2a, 0x3b, 0x76, 0x86, 0x6b, 0x06,
|
||||
0xf2, 0xe9, 0xae, 0xbb, 0xd8, 0x3d, 0xaa, 0xcd, 0x4e, 0xd9, 0x9e, 0x19, 0x88, 0x72, 0x69, 0x39,
|
||||
0x10, 0x23, 0x16, 0x73, 0x4c, 0x7b, 0x1a, 0x73, 0xd8, 0x7f, 0xf4, 0x1a, 0x34, 0xcc, 0x98, 0xfa,
|
||||
0x86, 0x69, 0x9f, 0x90, 0x90, 0x3a, 0x11, 0x91, 0xb6, 0x5f, 0x65, 0xd4, 0x4e, 0x42, 0x6c, 0xdd,
|
||||
0x83, 0x95, 0x2c, 0xe6, 0xb3, 0xb2, 0x6f, 0x39, 0x9b, 0x7d, 0x7f, 0x0a, 0x30, 0xad, 0xdb, 0x99,
|
||||
0x8f, 0x90, 0x53, 0x87, 0x1a, 0x96, 0x6f, 0x13, 0x69, 0xca, 0x2a, 0x23, 0x74, 0x7d, 0x9b, 0x9c,
|
||||
0xbb, 0x05, 0x95, 0x93, 0x5b, 0x10, 0x3b, 0xb5, 0xec, 0xa0, 0x1d, 0x3b, 0x93, 0x09, 0xb1, 0xa5,
|
||||
0x84, 0x35, 0xdf, 0x77, 0x1f, 0x70, 0x82, 0xf6, 0x75, 0x41, 0xf8, 0x8a, 0xb8, 0x9f, 0xe6, 0xaa,
|
||||
0x8d, 0xbf, 0x2d, 0x53, 0xdf, 0x05, 0x88, 0xa8, 0x19, 0xb2, 0x52, 0xc2, 0xa4, 0xf2, 0xc9, 0xa7,
|
||||
0x75, 0xe1, 0x1a, 0x35, 0x4c, 0x9e, 0xd9, 0x71, 0x4d, 0xae, 0xee, 0x50, 0xf4, 0x1e, 0xac, 0x58,
|
||||
0xbe, 0x1b, 0x4c, 0x88, 0x64, 0x2e, 0x3f, 0x93, 0xb9, 0x9e, 0xae, 0xef, 0xd0, 0xcc, 0x1d, 0xaa,
|
||||
0xf2, 0xbc, 0x77, 0xa8, 0xbf, 0x2a, 0xe2, 0x9a, 0x9d, 0xbd, 0xe5, 0xa3, 0xd1, 0x25, 0x4f, 0xc9,
|
||||
0x5b, 0x0b, 0x3e, 0x19, 0x7c, 0xd3, 0x3b, 0x72, 0xeb, 0xbd, 0x3c, 0x0f, 0xb7, 0x4f, 0x2f, 0xee,
|
||||
0xfe, 0x56, 0x84, 0x5a, 0x7a, 0x23, 0xbf, 0x60, 0xfb, 0x3b, 0x50, 0x4b, 0x7b, 0x1c, 0x32, 0x40,
|
||||
0x7c, 0xa3, 0x79, 0xd2, 0xc5, 0xe8, 0x31, 0x20, 0x73, 0x34, 0x4a, 0x8b, 0x36, 0x23, 0x8e, 0xcc,
|
||||
0x51, 0xf2, 0xbe, 0x71, 0x67, 0x0e, 0x3d, 0x24, 0x79, 0xeb, 0x90, 0xf1, 0x63, 0xd5, 0x1c, 0x8d,
|
||||
0x66, 0x28, 0xe8, 0x67, 0x70, 0x7d, 0x76, 0x0f, 0xe3, 0xe8, 0xcc, 0x08, 0x1c, 0x5b, 0xde, 0xc1,
|
||||
0xb6, 0xe7, 0x7d, 0x94, 0x68, 0xcf, 0xc0, 0x7f, 0x74, 0x76, 0xe0, 0xd8, 0x42, 0xe7, 0x28, 0xbc,
|
||||
0x30, 0xd1, 0xfa, 0x05, 0xbc, 0xf8, 0x94, 0xe5, 0x97, 0xd8, 0xa0, 0x3f, 0xfb, 0x78, 0xbe, 0xb8,
|
||||
0x12, 0x32, 0xd6, 0xfb, 0xa3, 0x22, 0xde, 0x4e, 0x66, 0x75, 0xd2, 0xc9, 0xd6, 0xad, 0x1b, 0x39,
|
||||
0xf7, 0xe9, 0x1e, 0x1c, 0x0a, 0x78, 0x5e, 0xaa, 0x7e, 0x7c, 0xae, 0x54, 0xcd, 0x5b, 0xc4, 0x88,
|
||||
0x8a, 0x4f, 0x00, 0x49, 0x04, 0xed, 0xcf, 0x45, 0xa8, 0x26, 0xe8, 0xfc, 0x06, 0x75, 0x16, 0x51,
|
||||
0xe2, 0x1a, 0x6e, 0x12, 0xc2, 0x14, 0x0c, 0x82, 0xb4, 0xc7, 0x82, 0xd8, 0xcb, 0x50, 0x63, 0x17,
|
||||
0x35, 0x31, 0x5d, 0xe0, 0xd3, 0x55, 0x46, 0xe0, 0x93, 0xaf, 0x40, 0x9d, 0xfa, 0xd4, 0x9c, 0x18,
|
||||
0x94, 0xe7, 0xf2, 0xa2, 0xe0, 0xe6, 0x24, 0x9e, 0xc9, 0xd1, 0x1b, 0x70, 0x85, 0x8e, 0x43, 0x9f,
|
||||
0xd2, 0x09, 0xab, 0xef, 0x78, 0x45, 0x23, 0x0a, 0x90, 0x12, 0x56, 0xd3, 0x09, 0x51, 0xe9, 0x44,
|
||||
0x2c, 0x7a, 0x4f, 0x17, 0x33, 0xd7, 0xe5, 0x41, 0xa4, 0x84, 0x57, 0x53, 0x2a, 0x73, 0x6d, 0x96,
|
||||
0x3c, 0x03, 0x51, 0x2d, 0xf0, 0x58, 0xa1, 0xe0, 0x64, 0x88, 0x0c, 0x58, 0x73, 0x89, 0x19, 0xc5,
|
||||
0x21, 0xb1, 0x8d, 0xc7, 0x0e, 0x99, 0xd8, 0xe2, 0xe2, 0xdb, 0xc8, 0x5d, 0x7e, 0x27, 0x6a, 0x69,
|
||||
0xdf, 0xe7, 0xdc, 0xb8, 0x91, 0xc0, 0x89, 0x31, 0xab, 0x1c, 0xc4, 0x3f, 0xb4, 0x06, 0xf5, 0xc1,
|
||||
0xa3, 0xc1, 0x50, 0xdf, 0x33, 0xf6, 0xf6, 0x7b, 0xba, 0xec, 0x8f, 0x0c, 0x74, 0x2c, 0x86, 0x0a,
|
||||
0x9b, 0x1f, 0xee, 0x0f, 0x3b, 0xbb, 0xc6, 0x70, 0xa7, 0xfb, 0x60, 0xa0, 0x16, 0xd0, 0x75, 0xb8,
|
||||
0x32, 0xdc, 0xc6, 0xfb, 0xc3, 0xe1, 0xae, 0xde, 0x33, 0x0e, 0x74, 0xbc, 0xb3, 0xdf, 0x1b, 0xa8,
|
||||
0x45, 0x84, 0xa0, 0x31, 0x25, 0x0f, 0x77, 0xf6, 0x74, 0xb5, 0x84, 0xea, 0xb0, 0x7c, 0xa0, 0xe3,
|
||||
0xae, 0xde, 0x1f, 0xaa, 0x65, 0xed, 0xef, 0x05, 0xa8, 0x67, 0xac, 0xc8, 0x1c, 0x39, 0x8c, 0x44,
|
||||
0x9d, 0x5f, 0xc2, 0xec, 0x2f, 0x0b, 0x26, 0x96, 0x69, 0x8d, 0x85, 0x75, 0x4a, 0x58, 0x0c, 0x78,
|
||||
0x6d, 0x6f, 0x9e, 0x66, 0xce, 0x79, 0x09, 0x57, 0x5d, 0xf3, 0x54, 0x80, 0xbc, 0x0a, 0x2b, 0xc7,
|
||||
0x24, 0xf4, 0xc8, 0x44, 0xce, 0x0b, 0x8b, 0xd4, 0x05, 0x4d, 0x2c, 0x59, 0x07, 0x55, 0x2e, 0x99,
|
||||
0xc2, 0x08, 0x73, 0x34, 0x04, 0x7d, 0x2f, 0x01, 0x3b, 0xba, 0xa8, 0xf5, 0x0a, 0xd7, 0xfa, 0xdd,
|
||||
0xf9, 0x9d, 0xf4, 0x69, 0x8a, 0x1f, 0xa4, 0x8a, 0x5f, 0x86, 0x22, 0x4e, 0x5a, 0x05, 0xdd, 0x4e,
|
||||
0x77, 0x9b, 0x29, 0x7b, 0x15, 0x6a, 0x7b, 0x9d, 0x4f, 0x8c, 0xc3, 0x01, 0x7f, 0x5c, 0x42, 0x2a,
|
||||
0xac, 0x3c, 0xd0, 0x71, 0x5f, 0xdf, 0x95, 0x94, 0x22, 0xba, 0x06, 0xaa, 0xa4, 0x4c, 0xd7, 0x95,
|
||||
0xb4, 0x7f, 0x15, 0x60, 0x4d, 0xc4, 0xf5, 0xf4, 0xbd, 0xf3, 0xe9, 0x0f, 0x8f, 0xd9, 0x67, 0x80,
|
||||
0xc2, 0xcc, 0x33, 0x40, 0x5a, 0x45, 0xf2, 0xb4, 0x5c, 0x9c, 0x56, 0x91, 0xfc, 0xf9, 0x60, 0x26,
|
||||
0x64, 0x97, 0xe6, 0x09, 0xd9, 0x4d, 0x58, 0x76, 0x49, 0x94, 0x2a, 0xbe, 0x86, 0x93, 0x21, 0x72,
|
||||
0xa0, 0x6e, 0x7a, 0x9e, 0x4f, 0xf9, 0x63, 0x5b, 0x72, 0xaf, 0xd9, 0x9a, 0xeb, 0x59, 0x2f, 0xfd,
|
||||
0xe2, 0x76, 0x67, 0x8a, 0x24, 0x22, 0x6b, 0x16, 0xbb, 0xf5, 0x3e, 0xa8, 0xe7, 0x17, 0xcc, 0x93,
|
||||
0xcf, 0x5e, 0x7f, 0x7b, 0x9a, 0xce, 0x08, 0x73, 0xec, 0xc3, 0xfe, 0x83, 0xfe, 0xfe, 0xc3, 0xbe,
|
||||
0xba, 0xc4, 0x06, 0xf8, 0xb0, 0xdf, 0xdf, 0xe9, 0x6f, 0xa9, 0x0a, 0x02, 0xa8, 0xe8, 0x9f, 0xec,
|
||||
0x0c, 0xf5, 0x9e, 0x5a, 0xd8, 0xfc, 0xf7, 0x2a, 0x54, 0x84, 0x90, 0xe8, 0x2b, 0x99, 0xca, 0xb3,
|
||||
0xbd, 0x71, 0xf4, 0xfe, 0xdc, 0x25, 0xf1, 0x4c, 0xbf, 0xbd, 0xf5, 0xc1, 0xc2, 0xfc, 0xf2, 0xbd,
|
||||
0x7a, 0x09, 0xfd, 0x56, 0x81, 0x95, 0x99, 0x07, 0xda, 0xbc, 0x6f, 0x8b, 0x97, 0xb4, 0xe2, 0x5b,
|
||||
0x3f, 0x58, 0x88, 0x37, 0x95, 0xe5, 0x37, 0x0a, 0xd4, 0x33, 0x4d, 0x68, 0x74, 0x77, 0x91, 0xc6,
|
||||
0xb5, 0x90, 0xe4, 0xde, 0xe2, 0x3d, 0x6f, 0x6d, 0xe9, 0x2d, 0x05, 0xfd, 0x5a, 0x81, 0x7a, 0xa6,
|
||||
0x1d, 0x9b, 0x5b, 0x94, 0x8b, 0xcd, 0xe3, 0xdc, 0xa2, 0x5c, 0xd6, 0xfd, 0x5d, 0x42, 0xbf, 0x54,
|
||||
0xa0, 0x96, 0xb6, 0x56, 0xd1, 0xed, 0xf9, 0x9b, 0xb1, 0x42, 0x88, 0x3b, 0x8b, 0x76, 0x71, 0xb5,
|
||||
0x25, 0xf4, 0x73, 0xa8, 0x26, 0x7d, 0x48, 0x94, 0x37, 0xfd, 0x9c, 0x6b, 0x72, 0xb6, 0x6e, 0xcf,
|
||||
0xcd, 0x97, 0xdd, 0x3e, 0x69, 0x0e, 0xe6, 0xde, 0xfe, 0x5c, 0x1b, 0xb3, 0x75, 0x7b, 0x6e, 0xbe,
|
||||
0x74, 0x7b, 0xe6, 0x09, 0x99, 0x1e, 0x62, 0x6e, 0x4f, 0xb8, 0xd8, 0xbc, 0xcc, 0xed, 0x09, 0x97,
|
||||
0xb5, 0x2c, 0x85, 0x20, 0x99, 0x2e, 0x64, 0x6e, 0x41, 0x2e, 0x76, 0x3a, 0x73, 0x0b, 0x72, 0x49,
|
||||
0xd3, 0x53, 0x5b, 0x42, 0x5f, 0x2a, 0xd9, 0xc2, 0xfe, 0xf6, 0xdc, 0xcd, 0xb9, 0x39, 0x5d, 0xf2,
|
||||
0x42, 0x7b, 0x90, 0x1f, 0xd0, 0x2f, 0xe5, 0x33, 0x84, 0xe8, 0xed, 0xa1, 0x79, 0xc0, 0x66, 0xda,
|
||||
0x81, 0xad, 0x5b, 0x8b, 0x25, 0x1b, 0x2e, 0xc4, 0xaf, 0x14, 0x80, 0x69, 0x17, 0x30, 0xb7, 0x10,
|
||||
0x17, 0xda, 0x8f, 0xad, 0xbb, 0x0b, 0x70, 0x66, 0x0f, 0x48, 0xd2, 0xf8, 0xcb, 0x7d, 0x40, 0xce,
|
||||
0x75, 0x29, 0x73, 0x1f, 0x90, 0xf3, 0x1d, 0x46, 0x6d, 0xe9, 0xa3, 0xe5, 0x1f, 0x95, 0x45, 0xf6,
|
||||
0xaf, 0xf0, 0x9f, 0x77, 0xfe, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x6e, 0xa6, 0xcf, 0xe1, 0x38, 0x27,
|
||||
var fileDescriptor_driver_d79b0e12b1c93702 = []byte{
|
||||
// 2978 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0xdb, 0x6f, 0xe3, 0xc6,
|
||||
0xd5, 0x37, 0x75, 0xb3, 0x74, 0x64, 0xcb, 0xdc, 0xd9, 0xdd, 0x44, 0x51, 0xf0, 0x7d, 0xd9, 0x10,
|
||||
0xc8, 0x07, 0x23, 0xc9, 0xca, 0x89, 0x83, 0x6f, 0x6f, 0xcd, 0x4d, 0x91, 0xb8, 0xb6, 0xb3, 0xb6,
|
||||
0xec, 0x8e, 0x64, 0x6c, 0xb6, 0x6d, 0x96, 0xa5, 0xc9, 0x59, 0x89, 0x6b, 0xf1, 0x12, 0x72, 0xe8,
|
||||
0xd8, 0x28, 0x8a, 0x16, 0x29, 0x50, 0xb4, 0x0f, 0x05, 0xfa, 0x12, 0xf4, 0xbd, 0x7d, 0xec, 0x5f,
|
||||
0xd0, 0x16, 0xf9, 0x4b, 0xda, 0x97, 0x16, 0x28, 0xd0, 0xd7, 0x3e, 0xf6, 0xad, 0x98, 0x0b, 0x29,
|
||||
0xca, 0xf6, 0x66, 0x29, 0x6d, 0x9e, 0xc8, 0x73, 0x66, 0xce, 0x6f, 0xce, 0xcc, 0x39, 0x33, 0xe7,
|
||||
0xcc, 0x1c, 0xd0, 0x82, 0x49, 0x3c, 0x72, 0xbc, 0x68, 0xc3, 0x0e, 0x9d, 0x13, 0x12, 0x46, 0x1b,
|
||||
0x41, 0xe8, 0x53, 0x5f, 0x52, 0x6d, 0x4e, 0xa0, 0x37, 0xc6, 0x66, 0x34, 0x76, 0x2c, 0x3f, 0x0c,
|
||||
0xda, 0x9e, 0xef, 0x9a, 0x76, 0x5b, 0xca, 0xb4, 0xa5, 0x8c, 0xe8, 0xd6, 0xfa, 0xdf, 0x91, 0xef,
|
||||
0x8f, 0x26, 0x44, 0x20, 0x1c, 0xc5, 0x4f, 0x36, 0xec, 0x38, 0x34, 0xa9, 0xe3, 0x7b, 0xb2, 0xfd,
|
||||
0xb5, 0xf3, 0xed, 0xd4, 0x71, 0x49, 0x44, 0x4d, 0x37, 0x90, 0x1d, 0x3e, 0x1e, 0x39, 0x74, 0x1c,
|
||||
0x1f, 0xb5, 0x2d, 0xdf, 0xdd, 0x48, 0x87, 0xdc, 0xe0, 0x43, 0x6e, 0x24, 0x6a, 0x46, 0x63, 0x33,
|
||||
0x24, 0xf6, 0xc6, 0xd8, 0x9a, 0x44, 0x01, 0xb1, 0xd8, 0xd7, 0x60, 0x3f, 0x12, 0x61, 0x2b, 0x3f,
|
||||
0x42, 0x44, 0xc3, 0xd8, 0xa2, 0xc9, 0x7c, 0x4d, 0x4a, 0x43, 0xe7, 0x28, 0xa6, 0x44, 0x00, 0x69,
|
||||
0xaf, 0xc0, 0xcb, 0x43, 0x33, 0x3a, 0xee, 0xfa, 0xde, 0x13, 0x67, 0x34, 0xb0, 0xc6, 0xc4, 0x35,
|
||||
0x31, 0xf9, 0x22, 0x26, 0x11, 0xd5, 0x7e, 0x04, 0xcd, 0x8b, 0x4d, 0x51, 0xe0, 0x7b, 0x11, 0x41,
|
||||
0x1f, 0x43, 0x89, 0x69, 0xd3, 0x54, 0x6e, 0x28, 0xeb, 0xf5, 0xcd, 0xb7, 0xdb, 0xcf, 0x5a, 0x38,
|
||||
0xa1, 0x43, 0x5b, 0xce, 0xa2, 0x3d, 0x08, 0x88, 0x85, 0xb9, 0xa4, 0x76, 0x1d, 0xae, 0x76, 0xcd,
|
||||
0xc0, 0x3c, 0x72, 0x26, 0x0e, 0x75, 0x48, 0x94, 0x0c, 0x1a, 0xc3, 0xb5, 0x59, 0xb6, 0x1c, 0xf0,
|
||||
0x73, 0x58, 0xb1, 0x32, 0x7c, 0x39, 0xf0, 0xdd, 0x76, 0x2e, 0x8b, 0xb5, 0x7b, 0x9c, 0x9a, 0x01,
|
||||
0x9e, 0x81, 0xd3, 0xae, 0x01, 0xba, 0xef, 0x78, 0x23, 0x12, 0x06, 0xa1, 0xe3, 0xd1, 0x44, 0x99,
|
||||
0x6f, 0x8a, 0x70, 0x75, 0x86, 0x2d, 0x95, 0x79, 0x0a, 0x90, 0xae, 0x23, 0x53, 0xa5, 0xb8, 0x5e,
|
||||
0xdf, 0xfc, 0x34, 0xa7, 0x2a, 0x97, 0xe0, 0xb5, 0x3b, 0x29, 0x98, 0xee, 0xd1, 0xf0, 0x0c, 0x67,
|
||||
0xd0, 0xd1, 0x63, 0xa8, 0x8c, 0x89, 0x39, 0xa1, 0xe3, 0x66, 0xe1, 0x86, 0xb2, 0xde, 0xd8, 0xbc,
|
||||
0xff, 0x02, 0xe3, 0x6c, 0x73, 0xa0, 0x01, 0x35, 0x29, 0xc1, 0x12, 0x15, 0xdd, 0x04, 0x24, 0xfe,
|
||||
0x0c, 0x9b, 0x44, 0x56, 0xe8, 0x04, 0xcc, 0x91, 0x9b, 0xc5, 0x1b, 0xca, 0x7a, 0x0d, 0x5f, 0x11,
|
||||
0x2d, 0xbd, 0x69, 0x43, 0x2b, 0x80, 0xb5, 0x73, 0xda, 0x22, 0x15, 0x8a, 0xc7, 0xe4, 0x8c, 0x5b,
|
||||
0xa4, 0x86, 0xd9, 0x2f, 0xda, 0x82, 0xf2, 0x89, 0x39, 0x89, 0x09, 0x57, 0xb9, 0xbe, 0xf9, 0xee,
|
||||
0xf3, 0xdc, 0x43, 0xba, 0xe8, 0x74, 0x1d, 0xb0, 0x90, 0xbf, 0x57, 0xb8, 0xa3, 0x68, 0x77, 0xa1,
|
||||
0x9e, 0xd1, 0x1b, 0x35, 0x00, 0x0e, 0xfb, 0x3d, 0x7d, 0xa8, 0x77, 0x87, 0x7a, 0x4f, 0x5d, 0x42,
|
||||
0xab, 0x50, 0x3b, 0xec, 0x6f, 0xeb, 0x9d, 0xdd, 0xe1, 0xf6, 0x23, 0x55, 0x41, 0x75, 0x58, 0x4e,
|
||||
0x88, 0x82, 0x76, 0x0a, 0x08, 0x13, 0xcb, 0x3f, 0x21, 0x21, 0x73, 0x64, 0x69, 0x55, 0xf4, 0x32,
|
||||
0x2c, 0x53, 0x33, 0x3a, 0x36, 0x1c, 0x5b, 0xea, 0x5c, 0x61, 0xe4, 0x8e, 0x8d, 0x76, 0xa0, 0x32,
|
||||
0x36, 0x3d, 0x7b, 0xf2, 0x7c, 0xbd, 0x67, 0x97, 0x9a, 0x81, 0x6f, 0x73, 0x41, 0x2c, 0x01, 0x98,
|
||||
0x77, 0xcf, 0x8c, 0x2c, 0x0c, 0xa0, 0x3d, 0x02, 0x75, 0x40, 0xcd, 0x90, 0x66, 0xd5, 0xd1, 0xa1,
|
||||
0xc4, 0xc6, 0x97, 0x1e, 0x3d, 0xcf, 0x98, 0x62, 0x67, 0x62, 0x2e, 0xae, 0xfd, 0xbb, 0x00, 0x57,
|
||||
0x32, 0xd8, 0xd2, 0x53, 0x1f, 0x42, 0x25, 0x24, 0x51, 0x3c, 0xa1, 0x1c, 0xbe, 0xb1, 0xf9, 0x51,
|
||||
0x4e, 0xf8, 0x0b, 0x48, 0x6d, 0xcc, 0x61, 0xb0, 0x84, 0x43, 0xeb, 0xa0, 0x0a, 0x09, 0x83, 0x84,
|
||||
0xa1, 0x1f, 0x1a, 0x6e, 0x34, 0xe2, 0xab, 0x56, 0xc3, 0x0d, 0xc1, 0xd7, 0x19, 0x7b, 0x2f, 0x1a,
|
||||
0x65, 0x56, 0xb5, 0xf8, 0x82, 0xab, 0x8a, 0x4c, 0x50, 0x3d, 0x42, 0xbf, 0xf4, 0xc3, 0x63, 0x83,
|
||||
0x2d, 0x6d, 0xe8, 0xd8, 0xa4, 0x59, 0xe2, 0xa0, 0xb7, 0x72, 0x82, 0xf6, 0x85, 0xf8, 0xbe, 0x94,
|
||||
0xc6, 0x6b, 0xde, 0x2c, 0x43, 0x7b, 0x0b, 0x2a, 0x62, 0xa6, 0xcc, 0x93, 0x06, 0x87, 0xdd, 0xae,
|
||||
0x3e, 0x18, 0xa8, 0x4b, 0xa8, 0x06, 0x65, 0xac, 0x0f, 0x31, 0xf3, 0xb0, 0x1a, 0x94, 0xef, 0x77,
|
||||
0x86, 0x9d, 0x5d, 0xb5, 0xa0, 0xbd, 0x09, 0x6b, 0x0f, 0x4d, 0x87, 0xe6, 0x71, 0x2e, 0xcd, 0x07,
|
||||
0x75, 0xda, 0x57, 0x5a, 0x67, 0x67, 0xc6, 0x3a, 0xf9, 0x97, 0x46, 0x3f, 0x75, 0xe8, 0x39, 0x7b,
|
||||
0xa8, 0x50, 0x24, 0x61, 0x28, 0x4d, 0xc0, 0x7e, 0xb5, 0x2f, 0x61, 0x6d, 0x40, 0xfd, 0x20, 0x97,
|
||||
0xe7, 0xbf, 0x07, 0xcb, 0x2c, 0x46, 0xf9, 0x31, 0x95, 0xae, 0xff, 0x4a, 0x5b, 0xc4, 0xb0, 0x76,
|
||||
0x12, 0xc3, 0xda, 0x3d, 0x19, 0xe3, 0x70, 0xd2, 0x13, 0xbd, 0x04, 0x95, 0xc8, 0x19, 0x79, 0xe6,
|
||||
0x44, 0x9e, 0x16, 0x92, 0xd2, 0x10, 0x73, 0xf2, 0x64, 0x60, 0xe9, 0xf8, 0x5d, 0x40, 0x3d, 0x12,
|
||||
0xd1, 0xd0, 0x3f, 0xcb, 0xa5, 0xcf, 0x35, 0x28, 0x3f, 0xf1, 0x43, 0x4b, 0x6c, 0xc4, 0x2a, 0x16,
|
||||
0x04, 0xdb, 0x54, 0x33, 0x20, 0x12, 0xfb, 0x26, 0xa0, 0x1d, 0x8f, 0xc5, 0x94, 0x7c, 0x86, 0xf8,
|
||||
0x6d, 0x01, 0xae, 0xce, 0xf4, 0x97, 0xc6, 0x58, 0x7c, 0x1f, 0xb2, 0x83, 0x29, 0x8e, 0xc4, 0x3e,
|
||||
0x44, 0xfb, 0x50, 0x11, 0x3d, 0xe4, 0x4a, 0xde, 0x9e, 0x03, 0x48, 0x84, 0x29, 0x09, 0x27, 0x61,
|
||||
0x2e, 0x75, 0xfa, 0xe2, 0x77, 0xeb, 0xf4, 0x5b, 0xa0, 0x26, 0xf3, 0x88, 0x9e, 0x6b, 0x9b, 0x16,
|
||||
0x54, 0x1d, 0x8f, 0x92, 0xf0, 0xc4, 0x9c, 0xf0, 0x29, 0x16, 0x71, 0x4a, 0x6b, 0x3f, 0x84, 0x2b,
|
||||
0x19, 0x20, 0xb9, 0xb0, 0xf7, 0xa1, 0x1c, 0x31, 0x86, 0x5c, 0xd9, 0x77, 0xe6, 0x5c, 0xd9, 0x08,
|
||||
0x0b, 0x71, 0xed, 0xaa, 0x00, 0xd7, 0x4f, 0x88, 0x97, 0xaa, 0xa9, 0xf5, 0xe0, 0xca, 0x80, 0xbb,
|
||||
0x5d, 0x2e, 0xbf, 0x9a, 0xba, 0x6c, 0x61, 0xc6, 0x65, 0xaf, 0x01, 0xca, 0xa2, 0x48, 0xc7, 0x3a,
|
||||
0x83, 0x35, 0xfd, 0x94, 0x58, 0xb9, 0x90, 0x9b, 0xb0, 0x6c, 0xf9, 0xae, 0x6b, 0x7a, 0x76, 0xb3,
|
||||
0x70, 0xa3, 0xb8, 0x5e, 0xc3, 0x09, 0x99, 0xdd, 0x5b, 0xc5, 0xbc, 0x7b, 0x4b, 0xfb, 0x8d, 0x02,
|
||||
0xea, 0x74, 0x6c, 0xb9, 0x90, 0x4c, 0x7b, 0x6a, 0x33, 0x20, 0x36, 0xf6, 0x0a, 0x96, 0x94, 0xe4,
|
||||
0x27, 0xdb, 0x5f, 0xf0, 0x49, 0x18, 0x66, 0x8e, 0x97, 0xe2, 0x0b, 0x1e, 0x2f, 0xda, 0x3f, 0x15,
|
||||
0x40, 0x17, 0x93, 0x28, 0xf4, 0x3a, 0xac, 0x44, 0xc4, 0xb3, 0x0d, 0xb1, 0x8c, 0xc2, 0xc2, 0x55,
|
||||
0x5c, 0x67, 0x3c, 0xb1, 0x9e, 0x11, 0x42, 0x50, 0x22, 0xa7, 0xc4, 0x92, 0x3b, 0x99, 0xff, 0xa3,
|
||||
0x31, 0xac, 0x3c, 0x89, 0x0c, 0x27, 0xf2, 0x27, 0x66, 0x9a, 0x6d, 0x34, 0x36, 0xf5, 0x85, 0x93,
|
||||
0xb9, 0xf6, 0xfd, 0xc1, 0x4e, 0x02, 0x86, 0xeb, 0x4f, 0xa2, 0x94, 0xd0, 0xda, 0x50, 0xcf, 0xb4,
|
||||
0xa1, 0x2a, 0x94, 0xfa, 0xfb, 0x7d, 0x5d, 0x5d, 0x42, 0x00, 0x95, 0xee, 0x36, 0xde, 0xdf, 0x1f,
|
||||
0x8a, 0x13, 0x7d, 0x67, 0xaf, 0xb3, 0xa5, 0xab, 0x05, 0xed, 0x4f, 0x15, 0x80, 0x69, 0x68, 0x45,
|
||||
0x0d, 0x28, 0xa4, 0x96, 0x2e, 0x38, 0x36, 0x9b, 0x8c, 0x67, 0xba, 0x44, 0x7a, 0x0f, 0xff, 0x47,
|
||||
0x9b, 0x70, 0xdd, 0x8d, 0x46, 0x81, 0x69, 0x1d, 0x1b, 0x32, 0x22, 0x5a, 0x5c, 0x98, 0xcf, 0x6a,
|
||||
0x05, 0x5f, 0x95, 0x8d, 0x52, 0x6b, 0x81, 0xbb, 0x0b, 0x45, 0xe2, 0x9d, 0x34, 0x4b, 0x3c, 0x73,
|
||||
0xbc, 0x37, 0x77, 0xc8, 0x6f, 0xeb, 0xde, 0x89, 0xc8, 0x14, 0x19, 0x0c, 0x32, 0x00, 0x6c, 0x72,
|
||||
0xe2, 0x58, 0xc4, 0x60, 0xa0, 0x65, 0x0e, 0xfa, 0xf1, 0xfc, 0xa0, 0x3d, 0x8e, 0x91, 0x42, 0xd7,
|
||||
0xec, 0x84, 0x46, 0x7d, 0xa8, 0x85, 0x24, 0xf2, 0xe3, 0xd0, 0x22, 0x51, 0xb3, 0x32, 0xd7, 0x2e,
|
||||
0xc6, 0x89, 0x1c, 0x9e, 0x42, 0xa0, 0x1e, 0x54, 0x5c, 0x3f, 0xf6, 0x68, 0xd4, 0x5c, 0xe6, 0xca,
|
||||
0xbe, 0x9d, 0x13, 0x6c, 0x8f, 0x09, 0x61, 0x29, 0x8b, 0xb6, 0x60, 0x59, 0xa8, 0x18, 0x35, 0xab,
|
||||
0x1c, 0xe6, 0x66, 0x5e, 0x07, 0xe2, 0x52, 0x38, 0x91, 0x66, 0x56, 0x8d, 0x23, 0x12, 0x36, 0x6b,
|
||||
0xc2, 0xaa, 0xec, 0x1f, 0xbd, 0x0a, 0x35, 0x73, 0x32, 0xf1, 0x2d, 0xc3, 0x76, 0xc2, 0x26, 0xf0,
|
||||
0x86, 0x2a, 0x67, 0xf4, 0x9c, 0x10, 0xbd, 0x06, 0x75, 0xb1, 0xf5, 0x8c, 0xc0, 0xa4, 0xe3, 0x66,
|
||||
0x9d, 0x37, 0x83, 0x60, 0x1d, 0x98, 0x74, 0x2c, 0x3b, 0x90, 0x30, 0x14, 0x1d, 0x56, 0xd2, 0x0e,
|
||||
0x24, 0x0c, 0x79, 0x87, 0xff, 0x83, 0x35, 0x7e, 0x8e, 0x8c, 0x42, 0x3f, 0x0e, 0x0c, 0xee, 0x53,
|
||||
0xab, 0xbc, 0xd3, 0x2a, 0x63, 0x6f, 0x31, 0x6e, 0x9f, 0x39, 0xd7, 0x2b, 0x50, 0x7d, 0xea, 0x1f,
|
||||
0x89, 0x0e, 0x0d, 0xde, 0x61, 0xf9, 0xa9, 0x7f, 0x94, 0x34, 0x09, 0x0d, 0x1d, 0xbb, 0xb9, 0x26,
|
||||
0x9a, 0x38, 0xbd, 0x63, 0xb7, 0x6e, 0x41, 0x35, 0x31, 0xe3, 0x25, 0xd9, 0xf9, 0xb5, 0x6c, 0x76,
|
||||
0x5e, 0xcb, 0xa4, 0xda, 0xad, 0xf7, 0xa1, 0x31, 0xeb, 0x04, 0xf3, 0x48, 0x6b, 0x7f, 0x55, 0xa0,
|
||||
0x96, 0x9a, 0x1b, 0x79, 0x70, 0x95, 0xab, 0x63, 0x52, 0x62, 0x1b, 0x53, 0xef, 0x11, 0x31, 0xe0,
|
||||
0x83, 0x9c, 0x96, 0xea, 0x24, 0x08, 0xf2, 0x1c, 0x94, 0xae, 0x84, 0x52, 0xe4, 0xe9, 0x78, 0x8f,
|
||||
0x61, 0x6d, 0xe2, 0x78, 0xf1, 0x69, 0x66, 0x2c, 0x11, 0x80, 0xff, 0x3f, 0xe7, 0x58, 0xbb, 0x4c,
|
||||
0x7a, 0x3a, 0x46, 0x63, 0x32, 0x43, 0x6b, 0x5f, 0x17, 0xe0, 0xa5, 0xcb, 0xd5, 0x41, 0x7d, 0x28,
|
||||
0x5a, 0x41, 0x2c, 0xa7, 0xf6, 0xfe, 0xbc, 0x53, 0xeb, 0x06, 0xf1, 0x74, 0x54, 0x06, 0xc4, 0x92,
|
||||
0x76, 0x97, 0xb8, 0x7e, 0x78, 0x26, 0x67, 0xf0, 0xd1, 0xbc, 0x90, 0x7b, 0x5c, 0x7a, 0x8a, 0x2a,
|
||||
0xe1, 0x10, 0x86, 0xaa, 0x0c, 0xfd, 0x91, 0x3c, 0x26, 0xe6, 0x4c, 0x21, 0x12, 0x48, 0x9c, 0xe2,
|
||||
0x68, 0xb7, 0xe0, 0xfa, 0xa5, 0x53, 0x41, 0xff, 0x03, 0x60, 0x05, 0xb1, 0xc1, 0xaf, 0x78, 0xc2,
|
||||
0xee, 0x45, 0x5c, 0xb3, 0x82, 0x78, 0xc0, 0x19, 0xda, 0x6d, 0x68, 0x3e, 0x4b, 0x5f, 0xb6, 0xf9,
|
||||
0x84, 0xc6, 0x86, 0x7b, 0x94, 0xe4, 0x18, 0x82, 0xb1, 0x77, 0xa4, 0xfd, 0xae, 0x00, 0x6b, 0xe7,
|
||||
0xd4, 0x61, 0x11, 0x50, 0x6c, 0xe6, 0x24, 0x2a, 0x0b, 0x8a, 0xed, 0x6c, 0xcb, 0xb1, 0x93, 0xb4,
|
||||
0x98, 0xff, 0xf3, 0x33, 0x3d, 0x90, 0x29, 0x6b, 0xc1, 0x09, 0x98, 0x43, 0xbb, 0x47, 0x0e, 0x8d,
|
||||
0xf8, 0x4d, 0xa2, 0x8c, 0x05, 0x81, 0x1e, 0x41, 0x23, 0x24, 0x11, 0x09, 0x4f, 0x88, 0x6d, 0x04,
|
||||
0x7e, 0x48, 0x93, 0x05, 0xdb, 0x9c, 0x6f, 0xc1, 0x0e, 0xfc, 0x90, 0xe2, 0xd5, 0x04, 0x89, 0x51,
|
||||
0x11, 0x7a, 0x08, 0xab, 0xf6, 0x99, 0x67, 0xba, 0x8e, 0x25, 0x91, 0x2b, 0x0b, 0x23, 0xaf, 0x48,
|
||||
0x20, 0x0e, 0xcc, 0x6e, 0xca, 0x99, 0x46, 0x36, 0xb1, 0x89, 0x79, 0x44, 0x26, 0x72, 0x4d, 0x04,
|
||||
0x31, 0xbb, 0x7f, 0xcb, 0x72, 0xff, 0x6a, 0x7f, 0x28, 0x40, 0x63, 0x76, 0x03, 0x24, 0xf6, 0x0b,
|
||||
0x48, 0xe8, 0xf8, 0x76, 0xc6, 0x7e, 0x07, 0x9c, 0xc1, 0x6c, 0xc4, 0x9a, 0xbf, 0x88, 0x7d, 0x6a,
|
||||
0x26, 0x36, 0xb2, 0x82, 0xf8, 0xfb, 0x8c, 0x3e, 0x67, 0xfb, 0xe2, 0x39, 0xdb, 0xa3, 0xb7, 0x01,
|
||||
0x49, 0xfb, 0x4e, 0x1c, 0xd7, 0xa1, 0xc6, 0xd1, 0x19, 0x25, 0x62, 0xfd, 0x8b, 0x58, 0x15, 0x2d,
|
||||
0xbb, 0xac, 0xe1, 0x13, 0xc6, 0x47, 0x1a, 0xac, 0xfa, 0xbe, 0x6b, 0x44, 0x96, 0x1f, 0x12, 0xc3,
|
||||
0xb4, 0x9f, 0x36, 0xcb, 0xbc, 0x63, 0xdd, 0xf7, 0xdd, 0x01, 0xe3, 0x75, 0xec, 0xa7, 0xec, 0xc0,
|
||||
0xb5, 0x82, 0x38, 0x22, 0xd4, 0x60, 0x1f, 0x1e, 0xa3, 0x6a, 0x18, 0x04, 0xab, 0x1b, 0xc4, 0x51,
|
||||
0xa6, 0x83, 0x4b, 0x5c, 0x16, 0x77, 0x32, 0x1d, 0xf6, 0x88, 0xcb, 0x46, 0x59, 0x39, 0x20, 0xa1,
|
||||
0x45, 0x3c, 0x3a, 0x74, 0xac, 0x63, 0x16, 0x52, 0x94, 0x75, 0x05, 0xcf, 0xf0, 0xb4, 0xcf, 0xa1,
|
||||
0xcc, 0x43, 0x10, 0x9b, 0x3c, 0x3f, 0xbe, 0xf9, 0xe9, 0x2e, 0x96, 0xb7, 0xca, 0x18, 0xfc, 0x6c,
|
||||
0x7f, 0x15, 0x6a, 0x63, 0x3f, 0x92, 0xb1, 0x41, 0x78, 0x5e, 0x95, 0x31, 0x78, 0x63, 0x0b, 0xaa,
|
||||
0x21, 0x31, 0x6d, 0xdf, 0x9b, 0x9c, 0xf1, 0x75, 0xa9, 0xe2, 0x94, 0xd6, 0xbe, 0x80, 0x8a, 0x38,
|
||||
0x7e, 0x5f, 0x00, 0xff, 0x26, 0x20, 0x4b, 0x04, 0x95, 0x80, 0x84, 0xae, 0x13, 0x45, 0x8e, 0xef,
|
||||
0x45, 0xc9, 0x73, 0x8e, 0x68, 0x39, 0x98, 0x36, 0x68, 0x7f, 0x53, 0x44, 0xbe, 0x23, 0x2e, 0xda,
|
||||
0x2c, 0x8b, 0x65, 0x9e, 0xc6, 0x72, 0x32, 0x85, 0xbb, 0x47, 0x42, 0xb2, 0x5c, 0x52, 0xa6, 0x35,
|
||||
0x85, 0x45, 0xdf, 0x29, 0x24, 0x40, 0x72, 0x1f, 0x20, 0x32, 0xed, 0x9b, 0xf7, 0x3e, 0x40, 0xc4,
|
||||
0x7d, 0x80, 0xb0, 0xe4, 0x53, 0x26, 0x5c, 0x02, 0xae, 0xc4, 0xf3, 0xad, 0xba, 0x9d, 0x5e, 0xa2,
|
||||
0x88, 0xf6, 0x2f, 0x25, 0x3d, 0x2b, 0x92, 0xcb, 0x0e, 0x7a, 0x0c, 0x55, 0xb6, 0xed, 0x0c, 0xd7,
|
||||
0x0c, 0xe4, 0xd3, 0x5d, 0x77, 0xb1, 0x7b, 0x54, 0x9b, 0xed, 0xb2, 0x3d, 0x33, 0x10, 0xe9, 0xd2,
|
||||
0x72, 0x20, 0x28, 0x76, 0xe6, 0x98, 0xf6, 0xf4, 0xcc, 0x61, 0xff, 0xe8, 0x0d, 0x68, 0x98, 0x31,
|
||||
0xf5, 0x0d, 0xd3, 0x3e, 0x21, 0x21, 0x75, 0x22, 0x22, 0x6d, 0xbf, 0xca, 0xb8, 0x9d, 0x84, 0xd9,
|
||||
0xba, 0x07, 0x2b, 0x59, 0xcc, 0xe7, 0x45, 0xdf, 0x72, 0x36, 0xfa, 0xfe, 0x18, 0x60, 0x9a, 0xb7,
|
||||
0x33, 0x1f, 0x21, 0xa7, 0x0e, 0x35, 0x2c, 0xdf, 0x26, 0xd2, 0x94, 0x55, 0xc6, 0xe8, 0xfa, 0x36,
|
||||
0x39, 0x77, 0x0b, 0x2a, 0x27, 0xb7, 0x20, 0xb6, 0x6b, 0xd9, 0x46, 0x3b, 0x76, 0x26, 0x13, 0x62,
|
||||
0x4b, 0x0d, 0x6b, 0xbe, 0xef, 0x3e, 0xe0, 0x0c, 0xed, 0x9b, 0x82, 0xf0, 0x15, 0x71, 0x3f, 0xcd,
|
||||
0x95, 0x1b, 0x7f, 0x57, 0xa6, 0xbe, 0x0b, 0x10, 0x51, 0x33, 0x64, 0xa9, 0x84, 0x49, 0xe5, 0x93,
|
||||
0x4f, 0xeb, 0xc2, 0x35, 0x6a, 0x98, 0x3c, 0xb3, 0xe3, 0x9a, 0xec, 0xdd, 0xa1, 0xe8, 0x03, 0x58,
|
||||
0xb1, 0x7c, 0x37, 0x98, 0x10, 0x29, 0x5c, 0x7e, 0xae, 0x70, 0x3d, 0xed, 0xdf, 0xa1, 0x99, 0x3b,
|
||||
0x54, 0xe5, 0x45, 0xef, 0x50, 0x7f, 0x56, 0xc4, 0x35, 0x3b, 0x7b, 0xcb, 0x47, 0xa3, 0x4b, 0x9e,
|
||||
0x92, 0xb7, 0x16, 0x7c, 0x32, 0xf8, 0xb6, 0x77, 0xe4, 0xd6, 0x07, 0x79, 0x1e, 0x6e, 0x9f, 0x9d,
|
||||
0xdc, 0xfd, 0xa5, 0x08, 0xb5, 0xf4, 0x46, 0x7e, 0xc1, 0xf6, 0x77, 0xa0, 0x96, 0xd6, 0x38, 0xe4,
|
||||
0x01, 0xf1, 0xad, 0xe6, 0x49, 0x3b, 0xa3, 0x27, 0x80, 0xcc, 0xd1, 0x28, 0x4d, 0xda, 0x8c, 0x38,
|
||||
0x32, 0x47, 0xc9, 0xfb, 0xc6, 0x9d, 0x39, 0xd6, 0x21, 0x89, 0x5b, 0x87, 0x4c, 0x1e, 0xab, 0xe6,
|
||||
0x68, 0x34, 0xc3, 0x41, 0x3f, 0x81, 0xeb, 0xb3, 0x63, 0x18, 0x47, 0x67, 0x46, 0xe0, 0xd8, 0xf2,
|
||||
0x0e, 0xb6, 0x3d, 0xef, 0xa3, 0x44, 0x7b, 0x06, 0xfe, 0x93, 0xb3, 0x03, 0xc7, 0x16, 0x6b, 0x8e,
|
||||
0xc2, 0x0b, 0x0d, 0xad, 0x9f, 0xc1, 0xcb, 0xcf, 0xe8, 0x7e, 0x89, 0x0d, 0xfa, 0xb3, 0x8f, 0xe7,
|
||||
0x8b, 0x2f, 0x42, 0xc6, 0x7a, 0xbf, 0x57, 0xc4, 0xdb, 0xc9, 0xec, 0x9a, 0x74, 0xb2, 0x79, 0xeb,
|
||||
0x46, 0xce, 0x71, 0xba, 0x07, 0x87, 0x02, 0x9e, 0xa7, 0xaa, 0x9f, 0x9e, 0x4b, 0x55, 0xf3, 0x26,
|
||||
0x31, 0x22, 0xe3, 0x13, 0x40, 0x12, 0x41, 0xfb, 0x63, 0x11, 0xaa, 0x09, 0x3a, 0xbf, 0x41, 0x9d,
|
||||
0x45, 0x94, 0xb8, 0x86, 0x9b, 0x1c, 0x61, 0x0a, 0x06, 0xc1, 0xda, 0x63, 0x87, 0xd8, 0xab, 0x50,
|
||||
0x63, 0x17, 0x35, 0xd1, 0x5c, 0xe0, 0xcd, 0x55, 0xc6, 0xe0, 0x8d, 0xaf, 0x41, 0x9d, 0xfa, 0xd4,
|
||||
0x9c, 0x18, 0x94, 0xc7, 0xf2, 0xa2, 0x90, 0xe6, 0x2c, 0x1e, 0xc9, 0xd1, 0x5b, 0x70, 0x85, 0x8e,
|
||||
0x43, 0x9f, 0xd2, 0x09, 0xcb, 0xef, 0x78, 0x46, 0x23, 0x12, 0x90, 0x12, 0x56, 0xd3, 0x06, 0x91,
|
||||
0xe9, 0x44, 0xec, 0xf4, 0x9e, 0x76, 0x66, 0xae, 0xcb, 0x0f, 0x91, 0x12, 0x5e, 0x4d, 0xb9, 0xcc,
|
||||
0xb5, 0x59, 0xf0, 0x0c, 0x44, 0xb6, 0xc0, 0xcf, 0x0a, 0x05, 0x27, 0x24, 0x32, 0x60, 0xcd, 0x25,
|
||||
0x66, 0x14, 0x87, 0xc4, 0x36, 0x9e, 0x38, 0x64, 0x62, 0x8b, 0x8b, 0x6f, 0x23, 0x77, 0xfa, 0x9d,
|
||||
0x2c, 0x4b, 0xfb, 0x3e, 0x97, 0xc6, 0x8d, 0x04, 0x4e, 0xd0, 0x2c, 0x73, 0x10, 0x7f, 0x68, 0x0d,
|
||||
0xea, 0x83, 0x47, 0x83, 0xa1, 0xbe, 0x67, 0xec, 0xed, 0xf7, 0x74, 0x59, 0x1f, 0x19, 0xe8, 0x58,
|
||||
0x90, 0x0a, 0x6b, 0x1f, 0xee, 0x0f, 0x3b, 0xbb, 0xc6, 0x70, 0xa7, 0xfb, 0x60, 0xa0, 0x16, 0xd0,
|
||||
0x75, 0xb8, 0x32, 0xdc, 0xc6, 0xfb, 0xc3, 0xe1, 0xae, 0xde, 0x33, 0x0e, 0x74, 0xbc, 0xb3, 0xdf,
|
||||
0x1b, 0xa8, 0x45, 0x84, 0xa0, 0x31, 0x65, 0x0f, 0x77, 0xf6, 0x74, 0xb5, 0x84, 0xea, 0xb0, 0x7c,
|
||||
0xa0, 0xe3, 0xae, 0xde, 0x1f, 0xaa, 0x65, 0xed, 0x3f, 0x05, 0xa8, 0x67, 0xac, 0xc8, 0x1c, 0x39,
|
||||
0x8c, 0x44, 0x9e, 0x5f, 0xc2, 0xec, 0x97, 0x1d, 0x26, 0x96, 0x69, 0x8d, 0x85, 0x75, 0x4a, 0x58,
|
||||
0x10, 0x3c, 0xb7, 0x37, 0x4f, 0x33, 0xfb, 0xbc, 0x84, 0xab, 0xae, 0x79, 0x2a, 0x40, 0x5e, 0x87,
|
||||
0x95, 0x63, 0x12, 0x7a, 0x64, 0x22, 0xdb, 0x85, 0x45, 0xea, 0x82, 0x27, 0xba, 0xac, 0x83, 0x2a,
|
||||
0xbb, 0x4c, 0x61, 0x84, 0x39, 0x1a, 0x82, 0xbf, 0x97, 0x80, 0x5d, 0x83, 0xb2, 0x68, 0x5e, 0x16,
|
||||
0xe3, 0x73, 0x02, 0x1d, 0x5d, 0xb4, 0x45, 0x85, 0xdb, 0xe2, 0xee, 0xfc, 0xae, 0xfb, 0x2c, 0x73,
|
||||
0x3c, 0x4e, 0xcd, 0xb1, 0x0c, 0x45, 0x9c, 0x14, 0x10, 0xba, 0x9d, 0xee, 0x36, 0x33, 0xc1, 0x2a,
|
||||
0xd4, 0xf6, 0x3a, 0x9f, 0x19, 0x87, 0x03, 0xfe, 0xe4, 0x84, 0x54, 0x58, 0x79, 0xa0, 0xe3, 0xbe,
|
||||
0xbe, 0x2b, 0x39, 0x45, 0x74, 0x0d, 0x54, 0xc9, 0x99, 0xf6, 0x2b, 0x31, 0x04, 0xf1, 0x5b, 0xd6,
|
||||
0xfe, 0x5e, 0x80, 0x35, 0x71, 0xf0, 0xa7, 0x0f, 0xa2, 0xcf, 0x7e, 0x99, 0xcc, 0xbe, 0x13, 0x14,
|
||||
0x66, 0xde, 0x09, 0xd2, 0x34, 0x93, 0xc7, 0xed, 0xe2, 0x34, 0xcd, 0xe4, 0xef, 0x0b, 0x33, 0x67,
|
||||
0x7a, 0x69, 0x9e, 0x33, 0xbd, 0x09, 0xcb, 0x2e, 0x89, 0x52, 0xcb, 0xd4, 0x70, 0x42, 0x22, 0x07,
|
||||
0xea, 0xa6, 0xe7, 0xf9, 0x94, 0xbf, 0xc6, 0x25, 0x17, 0x9f, 0xad, 0xb9, 0xde, 0xfd, 0xd2, 0x19,
|
||||
0xb7, 0x3b, 0x53, 0x24, 0x71, 0xf4, 0x66, 0xb1, 0x5b, 0x1f, 0x82, 0x7a, 0xbe, 0xc3, 0x3c, 0x01,
|
||||
0xef, 0xcd, 0x77, 0xa7, 0xf1, 0x8e, 0x30, 0xcf, 0x3f, 0xec, 0x3f, 0xe8, 0xef, 0x3f, 0xec, 0xab,
|
||||
0x4b, 0x8c, 0xc0, 0x87, 0xfd, 0xfe, 0x4e, 0x7f, 0x4b, 0x55, 0x10, 0x40, 0x45, 0xff, 0x6c, 0x67,
|
||||
0xa8, 0xf7, 0xd4, 0xc2, 0xe6, 0x3f, 0x56, 0xa1, 0x22, 0x94, 0x44, 0x5f, 0xcb, 0x58, 0x9f, 0x2d,
|
||||
0x9e, 0xa3, 0x0f, 0xe7, 0xce, 0x99, 0x67, 0x0a, 0xf2, 0xad, 0x8f, 0x16, 0x96, 0x97, 0x0f, 0xda,
|
||||
0x4b, 0xe8, 0xd7, 0x0a, 0xac, 0xcc, 0xbc, 0xe0, 0xe6, 0x7d, 0x7c, 0xbc, 0xa4, 0x56, 0xdf, 0xfa,
|
||||
0xde, 0x42, 0xb2, 0xa9, 0x2e, 0xbf, 0x52, 0xa0, 0x9e, 0xa9, 0x52, 0xa3, 0xbb, 0x8b, 0x54, 0xb6,
|
||||
0x85, 0x26, 0xf7, 0x16, 0x2f, 0x8a, 0x6b, 0x4b, 0xef, 0x28, 0xe8, 0x97, 0x0a, 0xd4, 0x33, 0xf5,
|
||||
0xda, 0xdc, 0xaa, 0x5c, 0xac, 0x2e, 0xe7, 0x56, 0xe5, 0xb2, 0xf2, 0xf0, 0x12, 0xfa, 0xb9, 0x02,
|
||||
0xb5, 0xb4, 0xf6, 0x8a, 0x6e, 0xcf, 0x5f, 0xad, 0x15, 0x4a, 0xdc, 0x59, 0xb4, 0xcc, 0xab, 0x2d,
|
||||
0xa1, 0x9f, 0x42, 0x35, 0x29, 0x54, 0xa2, 0xbc, 0xf1, 0xe9, 0x5c, 0x15, 0xb4, 0x75, 0x7b, 0x6e,
|
||||
0xb9, 0xec, 0xf0, 0x49, 0xf5, 0x30, 0xf7, 0xf0, 0xe7, 0xea, 0x9c, 0xad, 0xdb, 0x73, 0xcb, 0xa5,
|
||||
0xc3, 0x33, 0x4f, 0xc8, 0x14, 0x19, 0x73, 0x7b, 0xc2, 0xc5, 0xea, 0x66, 0x6e, 0x4f, 0xb8, 0xac,
|
||||
0xa6, 0x29, 0x14, 0xc9, 0x94, 0x29, 0x73, 0x2b, 0x72, 0xb1, 0x14, 0x9a, 0x5b, 0x91, 0x4b, 0xaa,
|
||||
0xa2, 0xda, 0x12, 0xfa, 0x4a, 0xc9, 0x66, 0xfe, 0xb7, 0xe7, 0xae, 0xde, 0xcd, 0xe9, 0x92, 0x17,
|
||||
0xea, 0x87, 0x7c, 0x83, 0x7e, 0x25, 0xdf, 0x29, 0x44, 0xf1, 0x0f, 0xcd, 0x03, 0x36, 0x53, 0x2f,
|
||||
0x6c, 0xdd, 0x5a, 0x2c, 0xd8, 0x70, 0x25, 0x7e, 0xa1, 0x00, 0x4c, 0xcb, 0x84, 0xb9, 0x95, 0xb8,
|
||||
0x50, 0x9f, 0x6c, 0xdd, 0x5d, 0x40, 0x32, 0xbb, 0x41, 0x92, 0xca, 0x60, 0xee, 0x0d, 0x72, 0xae,
|
||||
0x8c, 0x99, 0x7b, 0x83, 0x9c, 0x2f, 0x41, 0x6a, 0x4b, 0x9f, 0x2c, 0xff, 0xa0, 0x2c, 0xa2, 0x7f,
|
||||
0x85, 0x7f, 0xde, 0xfb, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe4, 0xbf, 0x79, 0x07, 0x59, 0x27,
|
||||
0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -568,6 +568,7 @@ message MemoryUsage {
|
|||
uint64 max_usage = 3;
|
||||
uint64 kernel_usage = 4;
|
||||
uint64 kernel_max_usage = 5;
|
||||
uint64 usage = 7;
|
||||
|
||||
enum Fields {
|
||||
RSS = 0;
|
||||
|
@ -575,6 +576,7 @@ message MemoryUsage {
|
|||
MAX_USAGE = 2;
|
||||
KERNEL_USAGE = 3;
|
||||
KERNEL_MAX_USAGE = 4;
|
||||
USAGE = 5;
|
||||
}
|
||||
// MeasuredFields indicates which fields were actually sampled
|
||||
repeated Fields measured_fields = 6;
|
||||
|
|
|
@ -454,6 +454,9 @@ func resourceUsageToProto(ru *ResourceUsage) *proto.TaskResourceUsage {
|
|||
case "Cache":
|
||||
memory.Cache = ru.MemoryStats.Cache
|
||||
memory.MeasuredFields = append(memory.MeasuredFields, proto.MemoryUsage_CACHE)
|
||||
case "Usage":
|
||||
memory.Usage = ru.MemoryStats.Usage
|
||||
memory.MeasuredFields = append(memory.MeasuredFields, proto.MemoryUsage_USAGE)
|
||||
case "Max Usage":
|
||||
memory.MaxUsage = ru.MemoryStats.MaxUsage
|
||||
memory.MeasuredFields = append(memory.MeasuredFields, proto.MemoryUsage_MAX_USAGE)
|
||||
|
@ -509,6 +512,9 @@ func resourceUsageFromProto(pb *proto.TaskResourceUsage) *ResourceUsage {
|
|||
case proto.MemoryUsage_CACHE:
|
||||
memory.Cache = pb.Memory.Cache
|
||||
memory.Measured = append(memory.Measured, "Cache")
|
||||
case proto.MemoryUsage_USAGE:
|
||||
memory.Usage = pb.Memory.Usage
|
||||
memory.Measured = append(memory.Measured, "Usage")
|
||||
case proto.MemoryUsage_MAX_USAGE:
|
||||
memory.MaxUsage = pb.Memory.MaxUsage
|
||||
memory.Measured = append(memory.Measured, "Max Usage")
|
||||
|
|
|
@ -94,7 +94,7 @@ func (m *Spec) Reset() { *m = Spec{} }
|
|||
func (m *Spec) String() string { return proto.CompactTextString(m) }
|
||||
func (*Spec) ProtoMessage() {}
|
||||
func (*Spec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_hcl_spec_45ead239ae3df7c4, []int{0}
|
||||
return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{0}
|
||||
}
|
||||
func (m *Spec) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Spec.Unmarshal(m, b)
|
||||
|
@ -522,7 +522,7 @@ func (m *Attr) Reset() { *m = Attr{} }
|
|||
func (m *Attr) String() string { return proto.CompactTextString(m) }
|
||||
func (*Attr) ProtoMessage() {}
|
||||
func (*Attr) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_hcl_spec_45ead239ae3df7c4, []int{1}
|
||||
return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{1}
|
||||
}
|
||||
func (m *Attr) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Attr.Unmarshal(m, b)
|
||||
|
@ -611,7 +611,7 @@ func (m *Block) Reset() { *m = Block{} }
|
|||
func (m *Block) String() string { return proto.CompactTextString(m) }
|
||||
func (*Block) ProtoMessage() {}
|
||||
func (*Block) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_hcl_spec_45ead239ae3df7c4, []int{2}
|
||||
return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{2}
|
||||
}
|
||||
func (m *Block) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Block.Unmarshal(m, b)
|
||||
|
@ -697,7 +697,7 @@ func (m *BlockAttrs) Reset() { *m = BlockAttrs{} }
|
|||
func (m *BlockAttrs) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockAttrs) ProtoMessage() {}
|
||||
func (*BlockAttrs) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_hcl_spec_45ead239ae3df7c4, []int{3}
|
||||
return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{3}
|
||||
}
|
||||
func (m *BlockAttrs) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BlockAttrs.Unmarshal(m, b)
|
||||
|
@ -792,7 +792,7 @@ func (m *BlockList) Reset() { *m = BlockList{} }
|
|||
func (m *BlockList) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockList) ProtoMessage() {}
|
||||
func (*BlockList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_hcl_spec_45ead239ae3df7c4, []int{4}
|
||||
return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{4}
|
||||
}
|
||||
func (m *BlockList) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BlockList.Unmarshal(m, b)
|
||||
|
@ -875,7 +875,7 @@ func (m *BlockSet) Reset() { *m = BlockSet{} }
|
|||
func (m *BlockSet) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockSet) ProtoMessage() {}
|
||||
func (*BlockSet) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_hcl_spec_45ead239ae3df7c4, []int{5}
|
||||
return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{5}
|
||||
}
|
||||
func (m *BlockSet) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BlockSet.Unmarshal(m, b)
|
||||
|
@ -974,7 +974,7 @@ func (m *BlockMap) Reset() { *m = BlockMap{} }
|
|||
func (m *BlockMap) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockMap) ProtoMessage() {}
|
||||
func (*BlockMap) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_hcl_spec_45ead239ae3df7c4, []int{6}
|
||||
return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{6}
|
||||
}
|
||||
func (m *BlockMap) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BlockMap.Unmarshal(m, b)
|
||||
|
@ -1044,7 +1044,7 @@ func (m *Literal) Reset() { *m = Literal{} }
|
|||
func (m *Literal) String() string { return proto.CompactTextString(m) }
|
||||
func (*Literal) ProtoMessage() {}
|
||||
func (*Literal) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_hcl_spec_45ead239ae3df7c4, []int{7}
|
||||
return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{7}
|
||||
}
|
||||
func (m *Literal) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Literal.Unmarshal(m, b)
|
||||
|
@ -1108,7 +1108,7 @@ func (m *Default) Reset() { *m = Default{} }
|
|||
func (m *Default) String() string { return proto.CompactTextString(m) }
|
||||
func (*Default) ProtoMessage() {}
|
||||
func (*Default) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_hcl_spec_45ead239ae3df7c4, []int{8}
|
||||
return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{8}
|
||||
}
|
||||
func (m *Default) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Default.Unmarshal(m, b)
|
||||
|
@ -1182,7 +1182,7 @@ func (m *Object) Reset() { *m = Object{} }
|
|||
func (m *Object) String() string { return proto.CompactTextString(m) }
|
||||
func (*Object) ProtoMessage() {}
|
||||
func (*Object) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_hcl_spec_45ead239ae3df7c4, []int{9}
|
||||
return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{9}
|
||||
}
|
||||
func (m *Object) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Object.Unmarshal(m, b)
|
||||
|
@ -1238,7 +1238,7 @@ func (m *Array) Reset() { *m = Array{} }
|
|||
func (m *Array) String() string { return proto.CompactTextString(m) }
|
||||
func (*Array) ProtoMessage() {}
|
||||
func (*Array) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_hcl_spec_45ead239ae3df7c4, []int{10}
|
||||
return fileDescriptor_hcl_spec_8d078e4df12ae415, []int{10}
|
||||
}
|
||||
func (m *Array) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Array.Unmarshal(m, b)
|
||||
|
@ -1281,10 +1281,10 @@ func init() {
|
|||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("plugins/shared/hclspec/hcl_spec.proto", fileDescriptor_hcl_spec_45ead239ae3df7c4)
|
||||
proto.RegisterFile("plugins/shared/hclspec/hcl_spec.proto", fileDescriptor_hcl_spec_8d078e4df12ae415)
|
||||
}
|
||||
|
||||
var fileDescriptor_hcl_spec_45ead239ae3df7c4 = []byte{
|
||||
var fileDescriptor_hcl_spec_8d078e4df12ae415 = []byte{
|
||||
// 624 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x96, 0x4d, 0x6f, 0xd3, 0x4c,
|
||||
0x10, 0xc7, 0xe3, 0xc4, 0xaf, 0xd3, 0xc3, 0xf3, 0x68, 0x85, 0x90, 0x55, 0x0e, 0x54, 0x96, 0x40,
|
||||
|
|
10
plugins/shared/loader/filter_unix.go
Normal file
10
plugins/shared/loader/filter_unix.go
Normal file
|
@ -0,0 +1,10 @@
|
|||
// +build !windows
|
||||
|
||||
package loader
|
||||
|
||||
import "os"
|
||||
|
||||
// executable Checks to see if the file is executable by anyone.
|
||||
func executable(path string, f os.FileInfo) bool {
|
||||
return f.Mode().Perm()&0111 != 0
|
||||
}
|
15
plugins/shared/loader/filter_windows.go
Normal file
15
plugins/shared/loader/filter_windows.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
// +build windows
|
||||
|
||||
package loader
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// On windows, an executable can be any file with any extension. To avoid
|
||||
// introspecting the file, here we skip executability checks on windows systems
|
||||
// and simply check for the convention of an `exe` extension.
|
||||
func executable(path string, s os.FileInfo) bool {
|
||||
return filepath.Ext(path) == "exe"
|
||||
}
|
|
@ -226,7 +226,7 @@ func (l *PluginLoader) scan() ([]os.FileInfo, error) {
|
|||
if err != nil {
|
||||
// There are no plugins to scan
|
||||
if os.IsNotExist(err) {
|
||||
l.logger.Debug("skipping external plugins since plugin_dir doesn't exist")
|
||||
l.logger.Warn("skipping external plugins since plugin_dir doesn't exist")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -246,13 +246,12 @@ func (l *PluginLoader) scan() ([]os.FileInfo, error) {
|
|||
return nil, fmt.Errorf("failed to stat file %q: %v", f, err)
|
||||
}
|
||||
if s.IsDir() {
|
||||
l.logger.Debug("skipping subdir in plugin folder", "subdir", f)
|
||||
l.logger.Warn("skipping subdir in plugin folder", "subdir", f)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if it is executable by anyone
|
||||
if s.Mode().Perm()&0111 == 0 {
|
||||
l.logger.Debug("skipping un-executable file in plugin folder", "file", f)
|
||||
if !executable(f, s) {
|
||||
l.logger.Warn("skipping un-executable file in plugin folder", "file", f)
|
||||
continue
|
||||
}
|
||||
plugins = append(plugins, s)
|
||||
|
@ -428,7 +427,7 @@ func (l *PluginLoader) mergePlugins(internal, external map[PluginID]*pluginInfo)
|
|||
func (l *PluginLoader) validatePluginConfigs() error {
|
||||
var mErr multierror.Error
|
||||
for id, info := range l.plugins {
|
||||
if err := l.validePluginConfig(id, info); err != nil {
|
||||
if err := l.validatePluginConfig(id, info); err != nil {
|
||||
wrapped := multierror.Prefix(err, fmt.Sprintf("plugin %s:", id))
|
||||
multierror.Append(&mErr, wrapped)
|
||||
}
|
||||
|
@ -440,7 +439,7 @@ func (l *PluginLoader) validatePluginConfigs() error {
|
|||
// validatePluginConfig is used to validate the plugin's configuration. If the
|
||||
// plugin has a config, it is parsed with the plugins config schema and
|
||||
// SetConfig is called to ensure the config is valid.
|
||||
func (l *PluginLoader) validePluginConfig(id PluginID, info *pluginInfo) error {
|
||||
func (l *PluginLoader) validatePluginConfig(id PluginID, info *pluginInfo) error {
|
||||
var mErr multierror.Error
|
||||
|
||||
// Check if a config is allowed
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -55,8 +56,12 @@ func newHarness(t *testing.T, plugins []string) *harness {
|
|||
t.Fatalf("failed to get self executable path: %v", err)
|
||||
}
|
||||
|
||||
exeSuffix := ""
|
||||
if runtime.GOOS == "windows" {
|
||||
exeSuffix = ".exe"
|
||||
}
|
||||
for _, p := range plugins {
|
||||
dest := filepath.Join(h.tmpDir, p)
|
||||
dest := filepath.Join(h.tmpDir, p) + exeSuffix
|
||||
if err := copyFile(selfExe, dest); err != nil {
|
||||
t.Fatalf("failed to copy file: %v", err)
|
||||
}
|
||||
|
@ -366,7 +371,7 @@ func TestPluginLoader_External_Config_Bad(t *testing.T) {
|
|||
t.Parallel()
|
||||
require := require.New(t)
|
||||
|
||||
// Create two plugins
|
||||
// Create a plugin
|
||||
plugins := []string{"mock-device"}
|
||||
pluginVersions := []string{"v0.0.1"}
|
||||
h := newHarness(t, plugins)
|
||||
|
@ -1217,6 +1222,9 @@ func TestPluginLoader_Bad_Executable(t *testing.T) {
|
|||
|
||||
// Test that we skip directories, non-executables and follow symlinks
|
||||
func TestPluginLoader_External_SkipBadFiles(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Windows currently does not skip non exe files")
|
||||
}
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ func (m *Attribute) Reset() { *m = Attribute{} }
|
|||
func (m *Attribute) String() string { return proto.CompactTextString(m) }
|
||||
func (*Attribute) ProtoMessage() {}
|
||||
func (*Attribute) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_attribute_47573829d12e3945, []int{0}
|
||||
return fileDescriptor_attribute_aa187fb710a98f5a, []int{0}
|
||||
}
|
||||
func (m *Attribute) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Attribute.Unmarshal(m, b)
|
||||
|
@ -230,10 +230,10 @@ func init() {
|
|||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("plugins/shared/structs/proto/attribute.proto", fileDescriptor_attribute_47573829d12e3945)
|
||||
proto.RegisterFile("plugins/shared/structs/proto/attribute.proto", fileDescriptor_attribute_aa187fb710a98f5a)
|
||||
}
|
||||
|
||||
var fileDescriptor_attribute_47573829d12e3945 = []byte{
|
||||
var fileDescriptor_attribute_aa187fb710a98f5a = []byte{
|
||||
// 218 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x8f, 0xb1, 0x4e, 0xc3, 0x30,
|
||||
0x10, 0x40, 0x63, 0xda, 0x34, 0xc9, 0x8d, 0x99, 0x8a, 0x10, 0x22, 0x62, 0x40, 0x19, 0x90, 0x33,
|
||||
|
|
|
@ -31,7 +31,7 @@ func (m *RecoverableError) Reset() { *m = RecoverableError{} }
|
|||
func (m *RecoverableError) String() string { return proto.CompactTextString(m) }
|
||||
func (*RecoverableError) ProtoMessage() {}
|
||||
func (*RecoverableError) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_recoverable_error_8c5d7f86073ca00c, []int{0}
|
||||
return fileDescriptor_recoverable_error_f746254fd69675b0, []int{0}
|
||||
}
|
||||
func (m *RecoverableError) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RecoverableError.Unmarshal(m, b)
|
||||
|
@ -63,10 +63,10 @@ func init() {
|
|||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("plugins/shared/structs/proto/recoverable_error.proto", fileDescriptor_recoverable_error_8c5d7f86073ca00c)
|
||||
proto.RegisterFile("plugins/shared/structs/proto/recoverable_error.proto", fileDescriptor_recoverable_error_f746254fd69675b0)
|
||||
}
|
||||
|
||||
var fileDescriptor_recoverable_error_8c5d7f86073ca00c = []byte{
|
||||
var fileDescriptor_recoverable_error_f746254fd69675b0 = []byte{
|
||||
// 138 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x29, 0xc8, 0x29, 0x4d,
|
||||
0xcf, 0xcc, 0x2b, 0xd6, 0x2f, 0xce, 0x48, 0x2c, 0x4a, 0x4d, 0xd1, 0x2f, 0x2e, 0x29, 0x2a, 0x4d,
|
||||
|
|
|
@ -35,7 +35,7 @@ func (m *StatObject) Reset() { *m = StatObject{} }
|
|||
func (m *StatObject) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatObject) ProtoMessage() {}
|
||||
func (*StatObject) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_stats_9457a07d62f0d5fa, []int{0}
|
||||
return fileDescriptor_stats_73a5e405c9cf442c, []int{0}
|
||||
}
|
||||
func (m *StatObject) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StatObject.Unmarshal(m, b)
|
||||
|
@ -100,7 +100,7 @@ func (m *StatValue) Reset() { *m = StatValue{} }
|
|||
func (m *StatValue) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatValue) ProtoMessage() {}
|
||||
func (*StatValue) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_stats_9457a07d62f0d5fa, []int{1}
|
||||
return fileDescriptor_stats_73a5e405c9cf442c, []int{1}
|
||||
}
|
||||
func (m *StatValue) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StatValue.Unmarshal(m, b)
|
||||
|
@ -184,10 +184,10 @@ func init() {
|
|||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("plugins/shared/structs/proto/stats.proto", fileDescriptor_stats_9457a07d62f0d5fa)
|
||||
proto.RegisterFile("plugins/shared/structs/proto/stats.proto", fileDescriptor_stats_73a5e405c9cf442c)
|
||||
}
|
||||
|
||||
var fileDescriptor_stats_9457a07d62f0d5fa = []byte{
|
||||
var fileDescriptor_stats_73a5e405c9cf442c = []byte{
|
||||
// 444 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0xd2, 0xdf, 0x6a, 0x13, 0x41,
|
||||
0x14, 0x06, 0x70, 0x36, 0xdb, 0x24, 0xcd, 0xc9, 0x45, 0xed, 0x14, 0x61, 0x89, 0x22, 0xa1, 0x17,
|
||||
|
|
|
@ -12,7 +12,8 @@ import (
|
|||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/nomad/structs/config"
|
||||
vapi "github.com/hashicorp/vault/api"
|
||||
"github.com/mitchellh/go-testing-interface"
|
||||
testing "github.com/mitchellh/go-testing-interface"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestVault is a test helper. It uses a fork/exec model to create a test Vault
|
||||
|
@ -167,13 +168,16 @@ func NewTestVaultDelayed(t testing.T) *TestVault {
|
|||
// Start starts the test Vault server and waits for it to respond to its HTTP
|
||||
// API
|
||||
func (tv *TestVault) Start() error {
|
||||
if err := tv.cmd.Start(); err != nil {
|
||||
tv.t.Fatalf("failed to start vault: %v", err)
|
||||
}
|
||||
|
||||
// Start the waiter
|
||||
tv.waitCh = make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
// Must call Start and Wait in the same goroutine on Windows #5174
|
||||
if err := tv.cmd.Start(); err != nil {
|
||||
tv.waitCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
err := tv.cmd.Wait()
|
||||
tv.waitCh <- err
|
||||
}()
|
||||
|
@ -198,7 +202,12 @@ func (tv *TestVault) Stop() {
|
|||
tv.t.Errorf("err: %s", err)
|
||||
}
|
||||
if tv.waitCh != nil {
|
||||
<-tv.waitCh
|
||||
select {
|
||||
case <-tv.waitCh:
|
||||
return
|
||||
case <-time.After(1 * time.Second):
|
||||
require.Fail(tv.t, "Timed out waiting for vault to terminate")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/docker/docker/LICENSE
generated
vendored
2
vendor/github.com/docker/docker/LICENSE
generated
vendored
|
@ -176,7 +176,7 @@
|
|||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2013-2017 Docker, Inc.
|
||||
Copyright 2013-2018 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
7
vendor/github.com/docker/docker/api/types/configs.go
generated
vendored
7
vendor/github.com/docker/docker/api/types/configs.go
generated
vendored
|
@ -55,3 +55,10 @@ type PluginEnableConfig struct {
|
|||
type PluginDisableConfig struct {
|
||||
ForceDisable bool
|
||||
}
|
||||
|
||||
// NetworkListConfig stores the options available for listing networks
|
||||
type NetworkListConfig struct {
|
||||
// TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here
|
||||
Detailed bool
|
||||
Verbose bool
|
||||
}
|
||||
|
|
1
vendor/github.com/docker/docker/api/types/container/host_config.go
generated
vendored
1
vendor/github.com/docker/docker/api/types/container/host_config.go
generated
vendored
|
@ -329,6 +329,7 @@ type Resources struct {
|
|||
DeviceCgroupRules []string // List of rule to be added to the device cgroup
|
||||
DiskQuota int64 // Disk limit (in bytes)
|
||||
KernelMemory int64 // Kernel memory limit (in bytes)
|
||||
KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes)
|
||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||
|
|
16
vendor/github.com/docker/docker/api/types/filters/parse.go
generated
vendored
16
vendor/github.com/docker/docker/api/types/filters/parse.go
generated
vendored
|
@ -323,6 +323,22 @@ func (args Args) WalkValues(field string, op func(value string) error) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Clone returns a copy of args.
|
||||
func (args Args) Clone() (newArgs Args) {
|
||||
newArgs.fields = make(map[string]map[string]bool, len(args.fields))
|
||||
for k, m := range args.fields {
|
||||
var mm map[string]bool
|
||||
if m != nil {
|
||||
mm = make(map[string]bool, len(m))
|
||||
for kk, v := range m {
|
||||
mm[kk] = v
|
||||
}
|
||||
}
|
||||
newArgs.fields[k] = mm
|
||||
}
|
||||
return newArgs
|
||||
}
|
||||
|
||||
func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
|
||||
m := map[string]map[string]bool{}
|
||||
for k, v := range d {
|
||||
|
|
1
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
1
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
|
@ -80,6 +80,7 @@ const (
|
|||
// BindOptions defines options specific to mounts of type "bind".
|
||||
type BindOptions struct {
|
||||
Propagation Propagation `json:",omitempty"`
|
||||
NonRecursive bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
// VolumeOptions represents the options for a mount of type volume.
|
||||
|
|
18
vendor/github.com/docker/docker/api/types/network/network.go
generated
vendored
18
vendor/github.com/docker/docker/api/types/network/network.go
generated
vendored
|
@ -1,4 +1,8 @@
|
|||
package network // import "github.com/docker/docker/api/types/network"
|
||||
import (
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/errdefs"
|
||||
)
|
||||
|
||||
// Address represents an IP address
|
||||
type Address struct {
|
||||
|
@ -106,3 +110,17 @@ type NetworkingConfig struct {
|
|||
type ConfigReference struct {
|
||||
Network string
|
||||
}
|
||||
|
||||
var acceptedFilters = map[string]bool{
|
||||
"driver": true,
|
||||
"type": true,
|
||||
"name": true,
|
||||
"id": true,
|
||||
"label": true,
|
||||
"scope": true,
|
||||
}
|
||||
|
||||
// ValidateFilters validates the list of filter args with the available filters.
|
||||
func ValidateFilters(filter filters.Args) error {
|
||||
return errdefs.InvalidParameter(filter.Validate(acceptedFilters))
|
||||
}
|
||||
|
|
4
vendor/github.com/docker/docker/api/types/stats.go
generated
vendored
4
vendor/github.com/docker/docker/api/types/stats.go
generated
vendored
|
@ -120,7 +120,7 @@ type NetworkStats struct {
|
|||
RxBytes uint64 `json:"rx_bytes"`
|
||||
// Packets received. Windows and Linux.
|
||||
RxPackets uint64 `json:"rx_packets"`
|
||||
// Received errors. Not used on Windows. Note that we dont `omitempty` this
|
||||
// Received errors. Not used on Windows. Note that we don't `omitempty` this
|
||||
// field as it is expected in the >=v1.21 API stats structure.
|
||||
RxErrors uint64 `json:"rx_errors"`
|
||||
// Incoming packets dropped. Windows and Linux.
|
||||
|
@ -129,7 +129,7 @@ type NetworkStats struct {
|
|||
TxBytes uint64 `json:"tx_bytes"`
|
||||
// Packets sent. Windows and Linux.
|
||||
TxPackets uint64 `json:"tx_packets"`
|
||||
// Sent errors. Not used on Windows. Note that we dont `omitempty` this
|
||||
// Sent errors. Not used on Windows. Note that we don't `omitempty` this
|
||||
// field as it is expected in the >=v1.21 API stats structure.
|
||||
TxErrors uint64 `json:"tx_errors"`
|
||||
// Outgoing packets dropped. Windows and Linux.
|
||||
|
|
1
vendor/github.com/docker/docker/api/types/swarm/container.go
generated
vendored
1
vendor/github.com/docker/docker/api/types/swarm/container.go
generated
vendored
|
@ -71,4 +71,5 @@ type ContainerSpec struct {
|
|||
Secrets []*SecretReference `json:",omitempty"`
|
||||
Configs []*ConfigReference `json:",omitempty"`
|
||||
Isolation container.Isolation `json:",omitempty"`
|
||||
Sysctls map[string]string `json:",omitempty"`
|
||||
}
|
||||
|
|
10
vendor/github.com/docker/docker/api/types/swarm/swarm.go
generated
vendored
10
vendor/github.com/docker/docker/api/types/swarm/swarm.go
generated
vendored
|
@ -1,6 +1,8 @@
|
|||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// ClusterInfo represents info about the cluster for outputting in "info"
|
||||
// it contains the same information as "Swarm", but without the JoinTokens
|
||||
|
@ -10,6 +12,9 @@ type ClusterInfo struct {
|
|||
Spec Spec
|
||||
TLSInfo TLSInfo
|
||||
RootRotationInProgress bool
|
||||
DefaultAddrPool []string
|
||||
SubnetSize uint32
|
||||
DataPathPort uint32
|
||||
}
|
||||
|
||||
// Swarm represents a swarm.
|
||||
|
@ -149,10 +154,13 @@ type InitRequest struct {
|
|||
ListenAddr string
|
||||
AdvertiseAddr string
|
||||
DataPathAddr string
|
||||
DataPathPort uint32
|
||||
ForceNewCluster bool
|
||||
Spec Spec
|
||||
AutoLockManagers bool
|
||||
Availability NodeAvailability
|
||||
DefaultAddrPool []string
|
||||
SubnetSize uint32
|
||||
}
|
||||
|
||||
// JoinRequest is the request used to join a swarm.
|
||||
|
|
20
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
20
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
|
@ -105,6 +105,7 @@ type Ping struct {
|
|||
APIVersion string
|
||||
OSType string
|
||||
Experimental bool
|
||||
BuilderVersion BuilderVersion
|
||||
}
|
||||
|
||||
// ComponentVersion describes the version information for a specific component.
|
||||
|
@ -157,6 +158,7 @@ type Info struct {
|
|||
MemoryLimit bool
|
||||
SwapLimit bool
|
||||
KernelMemory bool
|
||||
KernelMemoryTCP bool
|
||||
CPUCfsPeriod bool `json:"CpuCfsPeriod"`
|
||||
CPUCfsQuota bool `json:"CpuCfsQuota"`
|
||||
CPUShares bool
|
||||
|
@ -204,6 +206,8 @@ type Info struct {
|
|||
RuncCommit Commit
|
||||
InitCommit Commit
|
||||
SecurityOptions []string
|
||||
ProductLicense string `json:",omitempty"`
|
||||
Warnings []string
|
||||
}
|
||||
|
||||
// KeyValue holds a key/value pair
|
||||
|
@ -540,6 +544,7 @@ type ImagesPruneReport struct {
|
|||
// BuildCachePruneReport contains the response for Engine API:
|
||||
// POST "/build/prune"
|
||||
type BuildCachePruneReport struct {
|
||||
CachesDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
|
@ -590,13 +595,20 @@ type BuildResult struct {
|
|||
// BuildCache contains information about a build cache record
|
||||
type BuildCache struct {
|
||||
ID string
|
||||
Mutable bool
|
||||
Parent string
|
||||
Type string
|
||||
Description string
|
||||
InUse bool
|
||||
Shared bool
|
||||
Size int64
|
||||
|
||||
CreatedAt time.Time
|
||||
LastUsedAt *time.Time
|
||||
UsageCount int
|
||||
Parent string
|
||||
Description string
|
||||
}
|
||||
|
||||
// BuildCachePruneOptions hold parameters to prune the build cache
|
||||
type BuildCachePruneOptions struct {
|
||||
All bool
|
||||
KeepStorage int64
|
||||
Filters filters.Args
|
||||
}
|
||||
|
|
73
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
73
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
|
@ -52,7 +52,7 @@ type (
|
|||
NoLchown bool
|
||||
UIDMaps []idtools.IDMap
|
||||
GIDMaps []idtools.IDMap
|
||||
ChownOpts *idtools.IDPair
|
||||
ChownOpts *idtools.Identity
|
||||
IncludeSourceDir bool
|
||||
// WhiteoutFormat is the expected on disk format for whiteout files.
|
||||
// This format will be converted to the standard format on pack
|
||||
|
@ -73,12 +73,12 @@ type (
|
|||
// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
|
||||
type Archiver struct {
|
||||
Untar func(io.Reader, string, *TarOptions) error
|
||||
IDMappingsVar *idtools.IDMappings
|
||||
IDMapping *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
// NewDefaultArchiver returns a new Archiver without any IDMappings
|
||||
// NewDefaultArchiver returns a new Archiver without any IdentityMapping
|
||||
func NewDefaultArchiver() *Archiver {
|
||||
return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}}
|
||||
return &Archiver{Untar: Untar, IDMapping: &idtools.IdentityMapping{}}
|
||||
}
|
||||
|
||||
// breakoutError is used to differentiate errors related to breaking out
|
||||
|
@ -367,11 +367,7 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro
|
|||
hdr.AccessTime = time.Time{}
|
||||
hdr.ChangeTime = time.Time{}
|
||||
hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
|
||||
name, err = canonicalTarName(name, fi.IsDir())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err)
|
||||
}
|
||||
hdr.Name = name
|
||||
hdr.Name = canonicalTarName(name, fi.IsDir())
|
||||
if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -425,8 +421,8 @@ type tarAppender struct {
|
|||
|
||||
// for hardlink mapping
|
||||
SeenFiles map[uint64]string
|
||||
IDMappings *idtools.IDMappings
|
||||
ChownOpts *idtools.IDPair
|
||||
IdentityMapping *idtools.IdentityMapping
|
||||
ChownOpts *idtools.Identity
|
||||
|
||||
// For packing and unpacking whiteout files in the
|
||||
// non standard format. The whiteout files defined
|
||||
|
@ -435,29 +431,26 @@ type tarAppender struct {
|
|||
WhiteoutConverter tarWhiteoutConverter
|
||||
}
|
||||
|
||||
func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender {
|
||||
func newTarAppender(idMapping *idtools.IdentityMapping, writer io.Writer, chownOpts *idtools.Identity) *tarAppender {
|
||||
return &tarAppender{
|
||||
SeenFiles: make(map[uint64]string),
|
||||
TarWriter: tar.NewWriter(writer),
|
||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||
IDMappings: idMapping,
|
||||
IdentityMapping: idMapping,
|
||||
ChownOpts: chownOpts,
|
||||
}
|
||||
}
|
||||
|
||||
// canonicalTarName provides a platform-independent and consistent posix-style
|
||||
//path for files and directories to be archived regardless of the platform.
|
||||
func canonicalTarName(name string, isDir bool) (string, error) {
|
||||
name, err := CanonicalTarNameForPath(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
func canonicalTarName(name string, isDir bool) string {
|
||||
name = CanonicalTarNameForPath(name)
|
||||
|
||||
// suffix with '/' for directories
|
||||
if isDir && !strings.HasSuffix(name, "/") {
|
||||
name += "/"
|
||||
}
|
||||
return name, nil
|
||||
return name
|
||||
}
|
||||
|
||||
// addTarFile adds to the tar archive a file from `path` as `name`
|
||||
|
@ -509,14 +502,12 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
|||
//handle re-mapping container ID mappings back to host ID mappings before
|
||||
//writing tar headers/files. We skip whiteout files because they were written
|
||||
//by the kernel and already have proper ownership relative to the host
|
||||
if !isOverlayWhiteout &&
|
||||
!strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) &&
|
||||
!ta.IDMappings.Empty() {
|
||||
if !isOverlayWhiteout && !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IdentityMapping.Empty() {
|
||||
fileIDPair, err := getFileUIDGID(fi.Sys())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair)
|
||||
hdr.Uid, hdr.Gid, err = ta.IdentityMapping.ToContainer(fileIDPair)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -579,7 +570,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error {
|
||||
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.Identity, inUserns bool) error {
|
||||
// hdr.Mode is in linux format, which we can use for sycalls,
|
||||
// but for os.Foo() calls we need the mode converted to os.FileMode,
|
||||
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
|
||||
|
@ -659,7 +650,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||
// Lchown is not supported on Windows.
|
||||
if Lchown && runtime.GOOS != "windows" {
|
||||
if chownOpts == nil {
|
||||
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
|
||||
chownOpts = &idtools.Identity{UID: hdr.Uid, GID: hdr.Gid}
|
||||
}
|
||||
if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
|
||||
return err
|
||||
|
@ -669,11 +660,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||
var errors []string
|
||||
for key, value := range hdr.Xattrs {
|
||||
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
|
||||
if err == syscall.ENOTSUP {
|
||||
if err == syscall.ENOTSUP || err == syscall.EPERM {
|
||||
// We ignore errors here because not all graphdrivers support
|
||||
// xattrs *cough* old versions of AUFS *cough*. However only
|
||||
// ENOTSUP should be emitted in that case, otherwise we still
|
||||
// bail.
|
||||
// EPERM occurs if modifying xattrs is not allowed. This can
|
||||
// happen when running in userns with restrictions (ChromeOS).
|
||||
errors = append(errors, err.Error())
|
||||
continue
|
||||
}
|
||||
|
@ -908,8 +901,8 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
|
|||
defer pools.BufioReader32KPool.Put(trBuf)
|
||||
|
||||
var dirs []*tar.Header
|
||||
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||
rootIDs := idMappings.RootPair()
|
||||
idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||
rootIDs := idMapping.RootPair()
|
||||
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
|
@ -988,7 +981,7 @@ loop:
|
|||
}
|
||||
trBuf.Reset(tr)
|
||||
|
||||
if err := remapIDs(idMappings, hdr); err != nil {
|
||||
if err := remapIDs(idMapping, hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1075,8 +1068,8 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
|
|||
}
|
||||
defer archive.Close()
|
||||
options := &TarOptions{
|
||||
UIDMaps: archiver.IDMappingsVar.UIDs(),
|
||||
GIDMaps: archiver.IDMappingsVar.GIDs(),
|
||||
UIDMaps: archiver.IDMapping.UIDs(),
|
||||
GIDMaps: archiver.IDMapping.GIDs(),
|
||||
}
|
||||
return archiver.Untar(archive, dst, options)
|
||||
}
|
||||
|
@ -1089,8 +1082,8 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
|
|||
}
|
||||
defer archive.Close()
|
||||
options := &TarOptions{
|
||||
UIDMaps: archiver.IDMappingsVar.UIDs(),
|
||||
GIDMaps: archiver.IDMappingsVar.GIDs(),
|
||||
UIDMaps: archiver.IDMapping.UIDs(),
|
||||
GIDMaps: archiver.IDMapping.GIDs(),
|
||||
}
|
||||
return archiver.Untar(archive, dst, options)
|
||||
}
|
||||
|
@ -1111,7 +1104,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
|
|||
// if this Archiver is set up with ID mapping we need to create
|
||||
// the new destination directory with the remapped root UID/GID pair
|
||||
// as owner
|
||||
rootIDs := archiver.IDMappingsVar.RootPair()
|
||||
rootIDs := archiver.IDMapping.RootPair()
|
||||
// Create dst, copy src's content into it
|
||||
logrus.Debugf("Creating dest directory: %s", dst)
|
||||
if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
|
||||
|
@ -1171,7 +1164,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
|||
hdr.Name = filepath.Base(dst)
|
||||
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
|
||||
|
||||
if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil {
|
||||
if err := remapIDs(archiver.IDMapping, hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -1199,13 +1192,13 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
// IDMappings returns the IDMappings of the archiver.
|
||||
func (archiver *Archiver) IDMappings() *idtools.IDMappings {
|
||||
return archiver.IDMappingsVar
|
||||
// IdentityMapping returns the IdentityMapping of the archiver.
|
||||
func (archiver *Archiver) IdentityMapping() *idtools.IdentityMapping {
|
||||
return archiver.IDMapping
|
||||
}
|
||||
|
||||
func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error {
|
||||
ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
|
||||
func remapIDs(idMapping *idtools.IdentityMapping, hdr *tar.Header) error {
|
||||
ids, err := idMapping.ToHost(idtools.Identity{UID: hdr.Uid, GID: hdr.Gid})
|
||||
hdr.Uid, hdr.Gid = ids.UID, ids.GID
|
||||
return err
|
||||
}
|
||||
|
|
10
vendor/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
10
vendor/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
|
@ -32,8 +32,8 @@ func getWalkRoot(srcPath string, include string) string {
|
|||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) (string, error) {
|
||||
return p, nil // already unix-style
|
||||
func CanonicalTarNameForPath(p string) string {
|
||||
return p // already unix-style
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
|
@ -68,13 +68,13 @@ func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if !ok {
|
||||
return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||
return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||
}
|
||||
return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
|
||||
return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
|
|
18
vendor/github.com/docker/docker/pkg/archive/archive_windows.go
generated
vendored
18
vendor/github.com/docker/docker/pkg/archive/archive_windows.go
generated
vendored
|
@ -2,10 +2,8 @@ package archive // import "github.com/docker/docker/pkg/archive"
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
|
@ -26,16 +24,8 @@ func getWalkRoot(srcPath string, include string) string {
|
|||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) (string, error) {
|
||||
// windows: convert windows style relative path with backslashes
|
||||
// into forward slashes. Since windows does not allow '/' or '\'
|
||||
// in file names, it is mostly safe to replace however we must
|
||||
// check just in case
|
||||
if strings.Contains(p, "/") {
|
||||
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
|
||||
}
|
||||
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
|
||||
|
||||
func CanonicalTarNameForPath(p string) string {
|
||||
return filepath.ToSlash(p)
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
|
@ -71,7 +61,7 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
|
||||
// no notion of file ownership mapping yet on Windows
|
||||
return idtools.IDPair{UID: 0, GID: 0}, nil
|
||||
return idtools.Identity{UID: 0, GID: 0}, nil
|
||||
}
|
||||
|
|
12
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
12
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
|
@ -63,12 +63,16 @@ func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
|
|||
func (c changesByPath) Len() int { return len(c) }
|
||||
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
||||
|
||||
// Gnu tar and the go tar writer don't have sub-second mtime
|
||||
// precision, which is problematic when we apply changes via tar
|
||||
// files, we handle this by comparing for exact times, *or* same
|
||||
// Gnu tar doesn't have sub-second mtime precision. The go tar
|
||||
// writer (1.10+) does when using PAX format, but we round times to seconds
|
||||
// to ensure archives have the same hashes for backwards compatibility.
|
||||
// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4.
|
||||
//
|
||||
// Non-sub-second is problematic when we apply changes via tar
|
||||
// files. We handle this by comparing for exact times, *or* same
|
||||
// second count and either a or b having exactly 0 nanoseconds
|
||||
func sameFsTime(a, b time.Time) bool {
|
||||
return a == b ||
|
||||
return a.Equal(b) ||
|
||||
(a.Unix() == b.Unix() &&
|
||||
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue