Merge branch 'master' into f_gh_4381

This commit is contained in:
James Rasell 2018-06-19 17:51:57 +02:00 committed by GitHub
commit 75f95ccf09
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
133 changed files with 1487 additions and 1312 deletions

View File

@ -1,4 +1,4 @@
## 0.8.4 (Unreleased) ## 0.8.4 (June 11, 2018)
IMPROVEMENTS: IMPROVEMENTS:
* core: Updated serf library to improve how leave intents are handled [[GH-4278](https://github.com/hashicorp/nomad/issues/4278)] * core: Updated serf library to improve how leave intents are handled [[GH-4278](https://github.com/hashicorp/nomad/issues/4278)]
@ -36,12 +36,14 @@ IMPROVEMENTS:
an empty string rather than the field key. [[GH-3720](https://github.com/hashicorp/nomad/issues/3720)] an empty string rather than the field key. [[GH-3720](https://github.com/hashicorp/nomad/issues/3720)]
* ui: Show node drain, node eligibility, and node drain strategy information in the Client list and Client detail pages [[GH-4353](https://github.com/hashicorp/nomad/issues/4353)] * ui: Show node drain, node eligibility, and node drain strategy information in the Client list and Client detail pages [[GH-4353](https://github.com/hashicorp/nomad/issues/4353)]
* ui: Show reschedule-event information for allocations that were server-side rescheduled [[GH-4254](https://github.com/hashicorp/nomad/issues/4254)] * ui: Show reschedule-event information for allocations that were server-side rescheduled [[GH-4254](https://github.com/hashicorp/nomad/issues/4254)]
* ui: Show the running deployment Progress Deadlines on the Job Detail Page [[GH-4388](https://github.com/hashicorp/nomad/issues/4388)]
* ui: Show driver health status and node events on the Client Detail Page [[GH-4294](https://github.com/hashicorp/nomad/issues/4294)] * ui: Show driver health status and node events on the Client Detail Page [[GH-4294](https://github.com/hashicorp/nomad/issues/4294)]
* ui: Fuzzy and tokenized search on the Jobs List Page [[GH-4201](https://github.com/hashicorp/nomad/issues/4201)] * ui: Fuzzy and tokenized search on the Jobs List Page [[GH-4201](https://github.com/hashicorp/nomad/issues/4201)]
* ui: The stop job button looks more dangerous [[GH-4339](https://github.com/hashicorp/nomad/issues/4339)] * ui: The stop job button looks more dangerous [[GH-4339](https://github.com/hashicorp/nomad/issues/4339)]
BUG FIXES: BUG FIXES:
* core: Clean up leaked deployments on restoration [[GH-4329](https://github.com/hashicorp/nomad/issues/4329)] * core: Clean up leaked deployments on restoration [[GH-4329](https://github.com/hashicorp/nomad/issues/4329)]
* core: Fix regression to allow for dynamic Vault configuration reload [[GH-4395](https://github.com/hashicorp/nomad/issues/4395)]
* core: Fix bug where older failed allocations of jobs that have been updated to a newer version were * core: Fix bug where older failed allocations of jobs that have been updated to a newer version were
not being garbage collected [[GH-4313](https://github.com/hashicorp/nomad/issues/4313)] not being garbage collected [[GH-4313](https://github.com/hashicorp/nomad/issues/4313)]
* core: Fix bug when upgrading an existing server to Raft protocol 3 that * core: Fix bug when upgrading an existing server to Raft protocol 3 that
@ -57,9 +59,10 @@ BUG FIXES:
* driver/exec: Disable exec on non-linux platforms [[GH-4366](https://github.com/hashicorp/nomad/issues/4366)] * driver/exec: Disable exec on non-linux platforms [[GH-4366](https://github.com/hashicorp/nomad/issues/4366)]
* rpc: Fix RPC tunneling when running both client/server on one machine [[GH-4317](https://github.com/hashicorp/nomad/issues/4317)] * rpc: Fix RPC tunneling when running both client/server on one machine [[GH-4317](https://github.com/hashicorp/nomad/issues/4317)]
* ui: Track the method in XHR tracking to prevent errant ACL error dialogs when stopping a job [[GH-4319](https://github.com/hashicorp/nomad/issues/4319)] * ui: Track the method in XHR tracking to prevent errant ACL error dialogs when stopping a job [[GH-4319](https://github.com/hashicorp/nomad/issues/4319)]
* ui: Make the tasks list on the Allocation Detail Page look and behave like other lists [[GH-4387](https://github.com/hashicorp/nomad/issues/4387)] [[GH-4393](https://github.com/hashicorp/nomad/issues/4393)]
* ui: Use the Network IP, not the Node IP, for task addresses [[GH-4369](https://github.com/hashicorp/nomad/issues/4369)]
* ui: Use Polling instead of Streaming for logs in Safari [[GH-4335](https://github.com/hashicorp/nomad/issues/4335)] * ui: Use Polling instead of Streaming for logs in Safari [[GH-4335](https://github.com/hashicorp/nomad/issues/4335)]
* ui: Track PlaceCanaries in deployment metrics [[GH-4325](https://github.com/hashicorp/nomad/issues/4325)] * ui: Track PlaceCanaries in deployment metrics [[GH-4325](https://github.com/hashicorp/nomad/issues/4325)]
* ui: Use the Network IP, not the Node IP, for task addresses [[GH-4369](https://github.com/hashicorp/nomad/issues/4369)]
## 0.8.3 (April 27, 2018) ## 0.8.3 (April 27, 2018)

View File

@ -288,7 +288,6 @@ test-ui: ## Run Nomad UI test suite
@cd ui && npm rebuild node-sass @cd ui && npm rebuild node-sass
@cd ui && yarn install @cd ui && yarn install
@echo "--> Running ember tests" @echo "--> Running ember tests"
@cd ui && phantomjs --version
@cd ui && npm test @cd ui && npm test
.PHONY: ember-dist .PHONY: ember-dist

16
Vagrantfile vendored
View File

@ -15,6 +15,22 @@ Vagrant.configure(2) do |config|
vmCfg = configureLinuxProvisioners(vmCfg) vmCfg = configureLinuxProvisioners(vmCfg)
vmCfg.vm.synced_folder '.',
'/opt/gopath/src/github.com/hashicorp/nomad'
vmCfg.vm.provision "shell",
privileged: false,
path: './scripts/vagrant-linux-unpriv-bootstrap.sh'
end
config.vm.define "linux-ui", autostart: false, primary: false do |vmCfg|
vmCfg.vm.box = LINUX_BASE_BOX
vmCfg.vm.hostname = "linux"
vmCfg = configureProviders vmCfg,
cpus: suggestedCPUCores()
vmCfg = configureLinuxProvisioners(vmCfg)
vmCfg.vm.synced_folder '.', vmCfg.vm.synced_folder '.',
'/opt/gopath/src/github.com/hashicorp/nomad' '/opt/gopath/src/github.com/hashicorp/nomad'

View File

@ -614,6 +614,7 @@ type Job struct {
Update *UpdateStrategy Update *UpdateStrategy
Periodic *PeriodicConfig Periodic *PeriodicConfig
ParameterizedJob *ParameterizedJobConfig ParameterizedJob *ParameterizedJobConfig
Dispatched bool
Payload []byte Payload []byte
Reschedule *ReschedulePolicy Reschedule *ReschedulePolicy
Migrate *MigrateStrategy Migrate *MigrateStrategy
@ -636,7 +637,7 @@ func (j *Job) IsPeriodic() bool {
// IsParameterized returns whether a job is parameterized job. // IsParameterized returns whether a job is parameterized job.
func (j *Job) IsParameterized() bool { func (j *Job) IsParameterized() bool {
return j.ParameterizedJob != nil return j.ParameterizedJob != nil && !j.Dispatched
} }
func (j *Job) Canonicalize() { func (j *Job) Canonicalize() {

View File

@ -15,8 +15,9 @@ import (
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/kr/pretty" "github.com/stretchr/testify/require"
) )
var ( var (
@ -45,13 +46,6 @@ var (
} }
) )
func testLogger() *log.Logger {
if testing.Verbose() {
return log.New(os.Stderr, "", log.LstdFlags)
}
return log.New(ioutil.Discard, "", log.LstdFlags)
}
// Test that AllocDir.Build builds just the alloc directory. // Test that AllocDir.Build builds just the alloc directory.
func TestAllocDir_BuildAlloc(t *testing.T) { func TestAllocDir_BuildAlloc(t *testing.T) {
tmp, err := ioutil.TempDir("", "AllocDir") tmp, err := ioutil.TempDir("", "AllocDir")
@ -60,7 +54,7 @@ func TestAllocDir_BuildAlloc(t *testing.T) {
} }
defer os.RemoveAll(tmp) defer os.RemoveAll(tmp)
d := NewAllocDir(testLogger(), tmp) d := NewAllocDir(testlog.Logger(t), tmp)
defer d.Destroy() defer d.Destroy()
d.NewTaskDir(t1.Name) d.NewTaskDir(t1.Name)
d.NewTaskDir(t2.Name) d.NewTaskDir(t2.Name)
@ -97,7 +91,7 @@ func TestAllocDir_MountSharedAlloc(t *testing.T) {
} }
defer os.RemoveAll(tmp) defer os.RemoveAll(tmp)
d := NewAllocDir(testLogger(), tmp) d := NewAllocDir(testlog.Logger(t), tmp)
defer d.Destroy() defer d.Destroy()
if err := d.Build(); err != nil { if err := d.Build(); err != nil {
t.Fatalf("Build() failed: %v", err) t.Fatalf("Build() failed: %v", err)
@ -142,7 +136,7 @@ func TestAllocDir_Snapshot(t *testing.T) {
} }
defer os.RemoveAll(tmp) defer os.RemoveAll(tmp)
d := NewAllocDir(testLogger(), tmp) d := NewAllocDir(testlog.Logger(t), tmp)
defer d.Destroy() defer d.Destroy()
if err := d.Build(); err != nil { if err := d.Build(); err != nil {
t.Fatalf("Build() failed: %v", err) t.Fatalf("Build() failed: %v", err)
@ -229,13 +223,13 @@ func TestAllocDir_Move(t *testing.T) {
defer os.RemoveAll(tmp2) defer os.RemoveAll(tmp2)
// Create two alloc dirs // Create two alloc dirs
d1 := NewAllocDir(testLogger(), tmp1) d1 := NewAllocDir(testlog.Logger(t), tmp1)
if err := d1.Build(); err != nil { if err := d1.Build(); err != nil {
t.Fatalf("Build() failed: %v", err) t.Fatalf("Build() failed: %v", err)
} }
defer d1.Destroy() defer d1.Destroy()
d2 := NewAllocDir(testLogger(), tmp2) d2 := NewAllocDir(testlog.Logger(t), tmp2)
if err := d2.Build(); err != nil { if err := d2.Build(); err != nil {
t.Fatalf("Build() failed: %v", err) t.Fatalf("Build() failed: %v", err)
} }
@ -290,7 +284,7 @@ func TestAllocDir_EscapeChecking(t *testing.T) {
} }
defer os.RemoveAll(tmp) defer os.RemoveAll(tmp)
d := NewAllocDir(testLogger(), tmp) d := NewAllocDir(testlog.Logger(t), tmp)
if err := d.Build(); err != nil { if err := d.Build(); err != nil {
t.Fatalf("Build() failed: %v", err) t.Fatalf("Build() failed: %v", err)
} }
@ -331,7 +325,7 @@ func TestAllocDir_ReadAt_SecretDir(t *testing.T) {
} }
defer os.RemoveAll(tmp) defer os.RemoveAll(tmp)
d := NewAllocDir(testLogger(), tmp) d := NewAllocDir(testlog.Logger(t), tmp)
if err := d.Build(); err != nil { if err := d.Build(); err != nil {
t.Fatalf("Build() failed: %v", err) t.Fatalf("Build() failed: %v", err)
} }
@ -416,14 +410,14 @@ func TestAllocDir_CreateDir(t *testing.T) {
// TestAllocDir_Copy asserts that AllocDir.Copy does a deep copy of itself and // TestAllocDir_Copy asserts that AllocDir.Copy does a deep copy of itself and
// all TaskDirs. // all TaskDirs.
func TestAllocDir_Copy(t *testing.T) { func TestAllocDir_Copy(t *testing.T) {
a := NewAllocDir(testLogger(), "foo") a := NewAllocDir(testlog.Logger(t), "foo")
a.NewTaskDir("bar") a.NewTaskDir("bar")
a.NewTaskDir("baz") a.NewTaskDir("baz")
b := a.Copy() b := a.Copy()
if diff := pretty.Diff(a, b); len(diff) > 0 {
t.Errorf("differences between copies: %# v", pretty.Formatter(diff)) // Clear the logger
} require.Equal(t, a, b)
// Make sure TaskDirs map is copied // Make sure TaskDirs map is copied
a.NewTaskDir("new") a.NewTaskDir("new")

View File

@ -6,6 +6,7 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/hashicorp/nomad/helper/testlog"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
@ -21,7 +22,7 @@ func TestLinuxSpecialDirs(t *testing.T) {
} }
defer os.RemoveAll(allocDir) defer os.RemoveAll(allocDir)
td := newTaskDir(testLogger(), allocDir, "test") td := newTaskDir(testlog.Logger(t), allocDir, "test")
// Despite the task dir not existing, unmountSpecialDirs should *not* // Despite the task dir not existing, unmountSpecialDirs should *not*
// return an error // return an error

View File

@ -7,6 +7,7 @@ import (
"testing" "testing"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
) )
// Test that building a chroot will skip nonexistent directories. // Test that building a chroot will skip nonexistent directories.
@ -17,7 +18,7 @@ func TestTaskDir_EmbedNonexistent(t *testing.T) {
} }
defer os.RemoveAll(tmp) defer os.RemoveAll(tmp)
d := NewAllocDir(testLogger(), tmp) d := NewAllocDir(testlog.Logger(t), tmp)
defer d.Destroy() defer d.Destroy()
td := d.NewTaskDir(t1.Name) td := d.NewTaskDir(t1.Name)
if err := d.Build(); err != nil { if err := d.Build(); err != nil {
@ -39,7 +40,7 @@ func TestTaskDir_EmbedDirs(t *testing.T) {
} }
defer os.RemoveAll(tmp) defer os.RemoveAll(tmp)
d := NewAllocDir(testLogger(), tmp) d := NewAllocDir(testlog.Logger(t), tmp)
defer d.Destroy() defer d.Destroy()
td := d.NewTaskDir(t1.Name) td := d.NewTaskDir(t1.Name)
if err := d.Build(); err != nil { if err := d.Build(); err != nil {
@ -96,7 +97,7 @@ func TestTaskDir_NonRoot_Image(t *testing.T) {
} }
defer os.RemoveAll(tmp) defer os.RemoveAll(tmp)
d := NewAllocDir(testLogger(), tmp) d := NewAllocDir(testlog.Logger(t), tmp)
defer d.Destroy() defer d.Destroy()
td := d.NewTaskDir(t1.Name) td := d.NewTaskDir(t1.Name)
if err := d.Build(); err != nil { if err := d.Build(); err != nil {
@ -119,7 +120,7 @@ func TestTaskDir_NonRoot(t *testing.T) {
} }
defer os.RemoveAll(tmp) defer os.RemoveAll(tmp)
d := NewAllocDir(testLogger(), tmp) d := NewAllocDir(testlog.Logger(t), tmp)
defer d.Destroy() defer d.Destroy()
td := d.NewTaskDir(t1.Name) td := d.NewTaskDir(t1.Name)
if err := d.Build(); err != nil { if err := d.Build(); err != nil {

View File

@ -1,10 +1,9 @@
package client package allocrunner
import ( import (
"context" "context"
"fmt" "fmt"
"log" "log"
"os"
"path/filepath" "path/filepath"
"sync" "sync"
"time" "time"
@ -13,7 +12,10 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/allocrunner/taskrunner"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
consulApi "github.com/hashicorp/nomad/client/consul"
"github.com/hashicorp/nomad/client/state"
"github.com/hashicorp/nomad/client/vaultclient" "github.com/hashicorp/nomad/client/vaultclient"
"github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
@ -60,7 +62,7 @@ type AllocRunner struct {
allocDir *allocdir.AllocDir allocDir *allocdir.AllocDir
allocDirLock sync.Mutex allocDirLock sync.Mutex
tasks map[string]*TaskRunner tasks map[string]*taskrunner.TaskRunner
taskStates map[string]*structs.TaskState taskStates map[string]*structs.TaskState
restored map[string]struct{} restored map[string]struct{}
taskLock sync.RWMutex taskLock sync.RWMutex
@ -70,7 +72,7 @@ type AllocRunner struct {
updateCh chan *structs.Allocation updateCh chan *structs.Allocation
vaultClient vaultclient.VaultClient vaultClient vaultclient.VaultClient
consulClient ConsulServiceAPI consulClient consulApi.ConsulServiceAPI
// prevAlloc allows for Waiting until a previous allocation exits and // prevAlloc allows for Waiting until a previous allocation exits and
// the migrates it data. If sticky volumes aren't used and there's no // the migrates it data. If sticky volumes aren't used and there's no
@ -109,29 +111,6 @@ type AllocRunner struct {
baseLabels []metrics.Label baseLabels []metrics.Label
} }
// COMPAT: Remove in 0.7.0
// allocRunnerState is used to snapshot the state of the alloc runner
type allocRunnerState struct {
Version string
Alloc *structs.Allocation
AllocDir *allocdir.AllocDir
AllocClientStatus string
AllocClientDescription string
// COMPAT: Remove in 0.7.0: removing will break upgrading directly from
// 0.5.2, so don't remove in the 0.6 series.
// Context is deprecated and only used to migrate from older releases.
// It will be removed in the future.
Context *struct {
AllocID string // unused; included for completeness
AllocDir struct {
AllocDir string
SharedDir string // unused; included for completeness
TaskDirs map[string]string
}
} `json:"Context,omitempty"`
}
// allocRunnerAllocState is state that only has to be written when the alloc // allocRunnerAllocState is state that only has to be written when the alloc
// changes. // changes.
type allocRunnerAllocState struct { type allocRunnerAllocState struct {
@ -154,7 +133,7 @@ type allocRunnerMutableState struct {
// NewAllocRunner is used to create a new allocation context // NewAllocRunner is used to create a new allocation context
func NewAllocRunner(logger *log.Logger, config *config.Config, stateDB *bolt.DB, updater AllocStateUpdater, func NewAllocRunner(logger *log.Logger, config *config.Config, stateDB *bolt.DB, updater AllocStateUpdater,
alloc *structs.Allocation, vaultClient vaultclient.VaultClient, consulClient ConsulServiceAPI, alloc *structs.Allocation, vaultClient vaultclient.VaultClient, consulClient consulApi.ConsulServiceAPI,
prevAlloc prevAllocWatcher) *AllocRunner { prevAlloc prevAllocWatcher) *AllocRunner {
ar := &AllocRunner{ ar := &AllocRunner{
@ -168,7 +147,7 @@ func NewAllocRunner(logger *log.Logger, config *config.Config, stateDB *bolt.DB,
prevAlloc: prevAlloc, prevAlloc: prevAlloc,
dirtyCh: make(chan struct{}, 1), dirtyCh: make(chan struct{}, 1),
allocDir: allocdir.NewAllocDir(logger, filepath.Join(config.AllocDir, alloc.ID)), allocDir: allocdir.NewAllocDir(logger, filepath.Join(config.AllocDir, alloc.ID)),
tasks: make(map[string]*TaskRunner), tasks: make(map[string]*taskrunner.TaskRunner),
taskStates: copyTaskStates(alloc.TaskStates), taskStates: copyTaskStates(alloc.TaskStates),
restored: make(map[string]struct{}), restored: make(map[string]struct{}),
updateCh: make(chan *structs.Allocation, 64), updateCh: make(chan *structs.Allocation, 64),
@ -220,82 +199,44 @@ func (r *AllocRunner) pre060StateFilePath() string {
// RestoreState is used to restore the state of the alloc runner // RestoreState is used to restore the state of the alloc runner
func (r *AllocRunner) RestoreState() error { func (r *AllocRunner) RestoreState() error {
err := r.stateDB.View(func(tx *bolt.Tx) error {
// COMPAT: Remove in 0.7.0 bkt, err := state.GetAllocationBucket(tx, r.allocID)
// Check if the old snapshot is there
oldPath := r.pre060StateFilePath()
var snap allocRunnerState
var upgrading bool
if err := pre060RestoreState(oldPath, &snap); err == nil {
// Restore fields
r.logger.Printf("[INFO] client: restoring pre v0.6.0 alloc runner state for alloc %q", r.allocID)
r.alloc = snap.Alloc
r.allocDir = snap.AllocDir
r.allocClientStatus = snap.AllocClientStatus
r.allocClientDescription = snap.AllocClientDescription
if r.alloc != nil {
r.taskStates = snap.Alloc.TaskStates
}
// COMPAT: Remove in 0.7.0
// #2132 Upgrade path: if snap.AllocDir is nil, try to convert old
// Context struct to new AllocDir struct
if snap.AllocDir == nil && snap.Context != nil {
r.logger.Printf("[DEBUG] client: migrating state snapshot for alloc %q", r.allocID)
r.allocDir = allocdir.NewAllocDir(r.logger, snap.Context.AllocDir.AllocDir)
for taskName := range snap.Context.AllocDir.TaskDirs {
r.allocDir.NewTaskDir(taskName)
}
}
// Delete the old state
os.RemoveAll(oldPath)
upgrading = true
} else if !os.IsNotExist(err) {
// Something corrupt in the old state file
return err
} else {
// We are doing a normal restore
err := r.stateDB.View(func(tx *bolt.Tx) error {
bkt, err := getAllocationBucket(tx, r.allocID)
if err != nil {
return fmt.Errorf("failed to get allocation bucket: %v", err)
}
// Get the state objects
var mutable allocRunnerMutableState
var immutable allocRunnerImmutableState
var allocState allocRunnerAllocState
var allocDir allocdir.AllocDir
if err := getObject(bkt, allocRunnerStateAllocKey, &allocState); err != nil {
return fmt.Errorf("failed to read alloc runner alloc state: %v", err)
}
if err := getObject(bkt, allocRunnerStateImmutableKey, &immutable); err != nil {
return fmt.Errorf("failed to read alloc runner immutable state: %v", err)
}
if err := getObject(bkt, allocRunnerStateMutableKey, &mutable); err != nil {
return fmt.Errorf("failed to read alloc runner mutable state: %v", err)
}
if err := getObject(bkt, allocRunnerStateAllocDirKey, &allocDir); err != nil {
return fmt.Errorf("failed to read alloc runner alloc_dir state: %v", err)
}
// Populate the fields
r.alloc = allocState.Alloc
r.allocDir = &allocDir
r.allocClientStatus = mutable.AllocClientStatus
r.allocClientDescription = mutable.AllocClientDescription
r.taskStates = mutable.TaskStates
r.alloc.ClientStatus = getClientStatus(r.taskStates)
r.alloc.DeploymentStatus = mutable.DeploymentStatus
return nil
})
if err != nil { if err != nil {
return fmt.Errorf("failed to read allocation state: %v", err) return fmt.Errorf("failed to get allocation bucket: %v", err)
} }
// Get the state objects
var mutable allocRunnerMutableState
var immutable allocRunnerImmutableState
var allocState allocRunnerAllocState
var allocDir allocdir.AllocDir
if err := state.GetObject(bkt, allocRunnerStateAllocKey, &allocState); err != nil {
return fmt.Errorf("failed to read alloc runner alloc state: %v", err)
}
if err := state.GetObject(bkt, allocRunnerStateImmutableKey, &immutable); err != nil {
return fmt.Errorf("failed to read alloc runner immutable state: %v", err)
}
if err := state.GetObject(bkt, allocRunnerStateMutableKey, &mutable); err != nil {
return fmt.Errorf("failed to read alloc runner mutable state: %v", err)
}
if err := state.GetObject(bkt, allocRunnerStateAllocDirKey, &allocDir); err != nil {
return fmt.Errorf("failed to read alloc runner alloc_dir state: %v", err)
}
// Populate the fields
r.alloc = allocState.Alloc
r.allocDir = &allocDir
r.allocClientStatus = mutable.AllocClientStatus
r.allocClientDescription = mutable.AllocClientDescription
r.taskStates = mutable.TaskStates
r.alloc.ClientStatus = getClientStatus(r.taskStates)
r.alloc.DeploymentStatus = mutable.DeploymentStatus
return nil
})
if err != nil {
return fmt.Errorf("failed to read allocation state: %v", err)
} }
var snapshotErrors multierror.Error var snapshotErrors multierror.Error
@ -344,7 +285,7 @@ func (r *AllocRunner) RestoreState() error {
continue continue
} }
tr := NewTaskRunner(r.logger, r.config, r.stateDB, r.setTaskState, td, r.Alloc(), task, r.vaultClient, r.consulClient) tr := taskrunner.NewTaskRunner(r.logger, r.config, r.stateDB, r.setTaskState, td, r.Alloc(), task, r.vaultClient, r.consulClient)
r.tasks[name] = tr r.tasks[name] = tr
if restartReason, err := tr.RestoreState(); err != nil { if restartReason, err := tr.RestoreState(); err != nil {
@ -354,12 +295,6 @@ func (r *AllocRunner) RestoreState() error {
// Only start if the alloc isn't in a terminal status. // Only start if the alloc isn't in a terminal status.
go tr.Run() go tr.Run()
if upgrading {
if err := tr.SaveState(); err != nil {
r.logger.Printf("[WARN] client: initial save state for alloc %s task %s failed: %v", r.allocID, name, err)
}
}
// Restart task runner if RestoreState gave a reason // Restart task runner if RestoreState gave a reason
if restartReason != "" { if restartReason != "" {
r.logger.Printf("[INFO] client: restarting alloc %s task %s: %v", r.allocID, name, restartReason) r.logger.Printf("[INFO] client: restarting alloc %s task %s: %v", r.allocID, name, restartReason)
@ -367,6 +302,11 @@ func (r *AllocRunner) RestoreState() error {
tr.Restart("upgrade", restartReason, failure) tr.Restart("upgrade", restartReason, failure)
} }
} else { } else {
// XXX This does nothing and is broken since the task runner is not
// running yet, and there is nothing listening to the destroy ch.
// XXX When a single task is dead in the allocation we should kill
// all the task. This currently does NOT happen. Re-enable test:
// TestAllocRunner_TaskLeader_StopRestoredTG
tr.Destroy(taskDestroyEvent) tr.Destroy(taskDestroyEvent)
} }
} }
@ -389,7 +329,7 @@ func (r *AllocRunner) SaveState() error {
for _, tr := range runners { for _, tr := range runners {
if err := tr.SaveState(); err != nil { if err := tr.SaveState(); err != nil {
mErr.Errors = append(mErr.Errors, fmt.Errorf("failed to save state for alloc %s task %q: %v", mErr.Errors = append(mErr.Errors, fmt.Errorf("failed to save state for alloc %s task %q: %v",
r.allocID, tr.task.Name, err)) r.allocID, tr.Name(), err))
} }
} }
return mErr.ErrorOrNil() return mErr.ErrorOrNil()
@ -419,7 +359,7 @@ func (r *AllocRunner) saveAllocRunnerState() error {
return r.stateDB.Batch(func(tx *bolt.Tx) error { return r.stateDB.Batch(func(tx *bolt.Tx) error {
// Grab the allocation bucket // Grab the allocation bucket
allocBkt, err := getAllocationBucket(tx, r.allocID) allocBkt, err := state.GetAllocationBucket(tx, r.allocID)
if err != nil { if err != nil {
return fmt.Errorf("failed to retrieve allocation bucket: %v", err) return fmt.Errorf("failed to retrieve allocation bucket: %v", err)
} }
@ -433,7 +373,7 @@ func (r *AllocRunner) saveAllocRunnerState() error {
Alloc: alloc, Alloc: alloc,
} }
if err := putObject(allocBkt, allocRunnerStateAllocKey, &allocState); err != nil { if err := state.PutObject(allocBkt, allocRunnerStateAllocKey, &allocState); err != nil {
return fmt.Errorf("failed to write alloc_runner alloc state: %v", err) return fmt.Errorf("failed to write alloc_runner alloc state: %v", err)
} }
@ -450,7 +390,7 @@ func (r *AllocRunner) saveAllocRunnerState() error {
Version: r.config.Version.VersionNumber(), Version: r.config.Version.VersionNumber(),
} }
if err := putObject(allocBkt, allocRunnerStateImmutableKey, &immutable); err != nil { if err := state.PutObject(allocBkt, allocRunnerStateImmutableKey, &immutable); err != nil {
return fmt.Errorf("failed to write alloc_runner immutable state: %v", err) return fmt.Errorf("failed to write alloc_runner immutable state: %v", err)
} }
@ -461,7 +401,7 @@ func (r *AllocRunner) saveAllocRunnerState() error {
// Write the alloc dir data if it hasn't been written before and it exists. // Write the alloc dir data if it hasn't been written before and it exists.
if !r.allocDirPersisted && allocDir != nil { if !r.allocDirPersisted && allocDir != nil {
if err := putObject(allocBkt, allocRunnerStateAllocDirKey, allocDir); err != nil { if err := state.PutObject(allocBkt, allocRunnerStateAllocDirKey, allocDir); err != nil {
return fmt.Errorf("failed to write alloc_runner allocDir state: %v", err) return fmt.Errorf("failed to write alloc_runner allocDir state: %v", err)
} }
@ -478,7 +418,7 @@ func (r *AllocRunner) saveAllocRunnerState() error {
DeploymentStatus: alloc.DeploymentStatus, DeploymentStatus: alloc.DeploymentStatus,
} }
if err := putObject(allocBkt, allocRunnerStateMutableKey, &mutable); err != nil { if err := state.PutObject(allocBkt, allocRunnerStateMutableKey, &mutable); err != nil {
return fmt.Errorf("failed to write alloc_runner mutable state: %v", err) return fmt.Errorf("failed to write alloc_runner mutable state: %v", err)
} }
@ -492,7 +432,7 @@ func (r *AllocRunner) DestroyState() error {
defer r.allocStateLock.Unlock() defer r.allocStateLock.Unlock()
return r.stateDB.Update(func(tx *bolt.Tx) error { return r.stateDB.Update(func(tx *bolt.Tx) error {
if err := deleteAllocationBucket(tx, r.allocID); err != nil { if err := state.DeleteAllocationBucket(tx, r.allocID); err != nil {
return fmt.Errorf("failed to delete allocation bucket: %v", err) return fmt.Errorf("failed to delete allocation bucket: %v", err)
} }
return nil return nil
@ -754,14 +694,14 @@ func (r *AllocRunner) setTaskState(taskName, state string, event *structs.TaskEv
// Find all tasks that are not the one that is dead and check if the one // Find all tasks that are not the one that is dead and check if the one
// that is dead is a leader // that is dead is a leader
var otherTaskRunners []*TaskRunner var otherTaskRunners []*taskrunner.TaskRunner
var otherTaskNames []string var otherTaskNames []string
leader := false leader := false
for task, tr := range r.tasks { for task, tr := range r.tasks {
if task != taskName { if task != taskName {
otherTaskRunners = append(otherTaskRunners, tr) otherTaskRunners = append(otherTaskRunners, tr)
otherTaskNames = append(otherTaskNames, task) otherTaskNames = append(otherTaskNames, task)
} else if tr.task.Leader { } else if tr.IsLeader() {
leader = true leader = true
} }
} }
@ -784,6 +724,7 @@ func (r *AllocRunner) setTaskState(taskName, state string, event *structs.TaskEv
metrics.IncrCounter([]string{"client", "allocs", r.alloc.Job.Name, r.alloc.TaskGroup, taskName, "complete"}, 1) metrics.IncrCounter([]string{"client", "allocs", r.alloc.Job.Name, r.alloc.TaskGroup, taskName, "complete"}, 1)
} }
} }
// If the task failed, we should kill all the other tasks in the task group. // If the task failed, we should kill all the other tasks in the task group.
if taskState.Failed { if taskState.Failed {
for _, tr := range otherTaskRunners { for _, tr := range otherTaskRunners {
@ -922,7 +863,7 @@ func (r *AllocRunner) Run() {
taskdir := r.allocDir.NewTaskDir(task.Name) taskdir := r.allocDir.NewTaskDir(task.Name)
r.allocDirLock.Unlock() r.allocDirLock.Unlock()
tr := NewTaskRunner(r.logger, r.config, r.stateDB, r.setTaskState, taskdir, r.Alloc(), task.Copy(), r.vaultClient, r.consulClient) tr := taskrunner.NewTaskRunner(r.logger, r.config, r.stateDB, r.setTaskState, taskdir, r.Alloc(), task.Copy(), r.vaultClient, r.consulClient)
r.tasks[task.Name] = tr r.tasks[task.Name] = tr
tr.MarkReceived() tr.MarkReceived()
@ -1118,11 +1059,11 @@ func (r *AllocRunner) StatsReporter() AllocStatsReporter {
// getTaskRunners is a helper that returns a copy of the task runners list using // getTaskRunners is a helper that returns a copy of the task runners list using
// the taskLock. // the taskLock.
func (r *AllocRunner) getTaskRunners() []*TaskRunner { func (r *AllocRunner) getTaskRunners() []*taskrunner.TaskRunner {
// Get the task runners // Get the task runners
r.taskLock.RLock() r.taskLock.RLock()
defer r.taskLock.RUnlock() defer r.taskLock.RUnlock()
runners := make([]*TaskRunner, 0, len(r.tasks)) runners := make([]*taskrunner.TaskRunner, 0, len(r.tasks))
for _, tr := range r.tasks { for _, tr := range r.tasks {
runners = append(runners, tr) runners = append(runners, tr)
} }
@ -1156,7 +1097,7 @@ func (r *AllocRunner) LatestAllocStats(taskFilter string) (*cstructs.AllocResour
for _, tr := range runners { for _, tr := range runners {
l := tr.LatestResourceUsage() l := tr.LatestResourceUsage()
if l != nil { if l != nil {
astat.Tasks[tr.task.Name] = l astat.Tasks[tr.Name()] = l
flat = append(flat, l) flat = append(flat, l)
if l.Timestamp > astat.Timestamp { if l.Timestamp > astat.Timestamp {
astat.Timestamp = l.Timestamp astat.Timestamp = l.Timestamp
@ -1181,9 +1122,9 @@ func sumTaskResourceUsage(usages []*cstructs.TaskResourceUsage) *cstructs.Resour
return summed return summed
} }
// shouldUpdate takes the AllocModifyIndex of an allocation sent from the server and // ShouldUpdate takes the AllocModifyIndex of an allocation sent from the server and
// checks if the current running allocation is behind and should be updated. // checks if the current running allocation is behind and should be updated.
func (r *AllocRunner) shouldUpdate(serverIndex uint64) bool { func (r *AllocRunner) ShouldUpdate(serverIndex uint64) bool {
r.allocLock.Lock() r.allocLock.Lock()
defer r.allocLock.Unlock() defer r.allocLock.Unlock()
return r.alloc.AllocModifyIndex < serverIndex return r.alloc.AllocModifyIndex < serverIndex
@ -1215,3 +1156,11 @@ func (r *AllocRunner) IsDestroyed() bool {
func (r *AllocRunner) WaitCh() <-chan struct{} { func (r *AllocRunner) WaitCh() <-chan struct{} {
return r.waitCh return r.waitCh
} }
// AllocID returns the allocation ID of the allocation being run
func (r *AllocRunner) AllocID() string {
if r == nil {
return ""
}
return r.allocID
}

View File

@ -1,4 +1,4 @@
package client package allocrunner
import ( import (
"context" "context"
@ -9,6 +9,7 @@ import (
"time" "time"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
consulApi "github.com/hashicorp/nomad/client/consul"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper"
@ -160,7 +161,7 @@ type allocHealthTracker struct {
allocUpdates *cstructs.AllocListener allocUpdates *cstructs.AllocListener
// consulClient is used to look up the state of the task's checks // consulClient is used to look up the state of the task's checks
consulClient ConsulServiceAPI consulClient consulApi.ConsulServiceAPI
// healthy is used to signal whether we have determined the allocation to be // healthy is used to signal whether we have determined the allocation to be
// healthy or unhealthy // healthy or unhealthy
@ -191,7 +192,7 @@ type allocHealthTracker struct {
// alloc listener and consul API object are given so that the watcher can detect // alloc listener and consul API object are given so that the watcher can detect
// health changes. // health changes.
func newAllocHealthTracker(parentCtx context.Context, logger *log.Logger, alloc *structs.Allocation, func newAllocHealthTracker(parentCtx context.Context, logger *log.Logger, alloc *structs.Allocation,
allocUpdates *cstructs.AllocListener, consulClient ConsulServiceAPI, allocUpdates *cstructs.AllocListener, consulClient consulApi.ConsulServiceAPI,
minHealthyTime time.Duration, useChecks bool) *allocHealthTracker { minHealthyTime time.Duration, useChecks bool) *allocHealthTracker {
a := &allocHealthTracker{ a := &allocHealthTracker{

View File

@ -1,4 +1,4 @@
package client package allocrunner
import ( import (
"fmt" "fmt"
@ -6,93 +6,34 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"sync"
"testing" "testing"
"text/template"
"time" "time"
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
"github.com/hashicorp/consul/api" "github.com/hashicorp/consul/api"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
"github.com/hashicorp/nomad/version"
"github.com/kr/pretty"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/allocrunner/taskrunner"
"github.com/hashicorp/nomad/client/vaultclient" consulApi "github.com/hashicorp/nomad/client/consul"
"github.com/hashicorp/nomad/client/state"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
type MockAllocStateUpdater struct {
Allocs []*structs.Allocation
mu sync.Mutex
}
// Update fulfills the TaskStateUpdater interface
func (m *MockAllocStateUpdater) Update(alloc *structs.Allocation) {
m.mu.Lock()
m.Allocs = append(m.Allocs, alloc)
m.mu.Unlock()
}
// Last returns a copy of the last alloc (or nil) sync'd
func (m *MockAllocStateUpdater) Last() *structs.Allocation {
m.mu.Lock()
defer m.mu.Unlock()
n := len(m.Allocs)
if n == 0 {
return nil
}
return m.Allocs[n-1].Copy()
}
// allocationBucketExists checks if the allocation bucket was created. // allocationBucketExists checks if the allocation bucket was created.
func allocationBucketExists(tx *bolt.Tx, allocID string) bool { func allocationBucketExists(tx *bolt.Tx, allocID string) bool {
allocations := tx.Bucket(allocationsBucket) bucket, err := state.GetAllocationBucket(tx, allocID)
if allocations == nil { return err == nil && bucket != nil
return false
}
// Retrieve the specific allocations bucket
alloc := allocations.Bucket([]byte(allocID))
return alloc != nil
}
func testAllocRunnerFromAlloc(t *testing.T, alloc *structs.Allocation, restarts bool) (*MockAllocStateUpdater, *AllocRunner) {
conf := config.DefaultConfig()
conf.Node = mock.Node()
conf.StateDir = os.TempDir()
conf.AllocDir = os.TempDir()
tmp, _ := ioutil.TempFile("", "state-db")
db, _ := bolt.Open(tmp.Name(), 0600, nil)
upd := &MockAllocStateUpdater{}
if !restarts {
*alloc.Job.LookupTaskGroup(alloc.TaskGroup).RestartPolicy = structs.RestartPolicy{Attempts: 0}
alloc.Job.Type = structs.JobTypeBatch
}
vclient := vaultclient.NewMockVaultClient()
ar := NewAllocRunner(testlog.Logger(t), conf, db, upd.Update, alloc, vclient, newMockConsulServiceClient(t), noopPrevAlloc{})
return upd, ar
}
func testAllocRunner(t *testing.T, restarts bool) (*MockAllocStateUpdater, *AllocRunner) {
// Use mock driver
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
task.Config["run_for"] = "500ms"
return testAllocRunnerFromAlloc(t, alloc, restarts)
} }
func TestAllocRunner_SimpleRun(t *testing.T) { func TestAllocRunner_SimpleRun(t *testing.T) {
t.Parallel() t.Parallel()
upd, ar := testAllocRunner(t, false) upd, ar := TestAllocRunner(t, false)
go ar.Run() go ar.Run()
defer ar.Destroy() defer ar.Destroy()
@ -114,7 +55,7 @@ func TestAllocRunner_SimpleRun(t *testing.T) {
func TestAllocRunner_FinishedAtSet(t *testing.T) { func TestAllocRunner_FinishedAtSet(t *testing.T) {
t.Parallel() t.Parallel()
require := require.New(t) require := require.New(t)
_, ar := testAllocRunner(t, false) _, ar := TestAllocRunner(t, false)
ar.allocClientStatus = structs.AllocClientStatusFailed ar.allocClientStatus = structs.AllocClientStatusFailed
alloc := ar.Alloc() alloc := ar.Alloc()
taskFinishedAt := make(map[string]time.Time) taskFinishedAt := make(map[string]time.Time)
@ -136,7 +77,7 @@ func TestAllocRunner_FinishedAtSet(t *testing.T) {
func TestAllocRunner_FinishedAtSet_TaskEvents(t *testing.T) { func TestAllocRunner_FinishedAtSet_TaskEvents(t *testing.T) {
t.Parallel() t.Parallel()
require := require.New(t) require := require.New(t)
_, ar := testAllocRunner(t, false) _, ar := TestAllocRunner(t, false)
ar.taskStates[ar.alloc.Job.TaskGroups[0].Tasks[0].Name] = &structs.TaskState{State: structs.TaskStateDead, Failed: true} ar.taskStates[ar.alloc.Job.TaskGroups[0].Tasks[0].Name] = &structs.TaskState{State: structs.TaskStateDead, Failed: true}
alloc := ar.Alloc() alloc := ar.Alloc()
@ -161,7 +102,7 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_BadStart(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
// Ensure the task fails and restarts // Ensure the task fails and restarts
upd, ar := testAllocRunner(t, true) upd, ar := TestAllocRunner(t, true)
// Make the task fail // Make the task fail
task := ar.alloc.Job.TaskGroups[0].Tasks[0] task := ar.alloc.Job.TaskGroups[0].Tasks[0]
@ -208,7 +149,7 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_Deadline(t *testing.T) {
t.Parallel() t.Parallel()
// Don't restart but force service job type // Don't restart but force service job type
upd, ar := testAllocRunner(t, false) upd, ar := TestAllocRunner(t, false)
ar.alloc.Job.Type = structs.JobTypeService ar.alloc.Job.Type = structs.JobTypeService
// Make the task block // Make the task block
@ -268,7 +209,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_NoChecks(t *testing.T) {
t.Parallel() t.Parallel()
// Ensure the task fails and restarts // Ensure the task fails and restarts
upd, ar := testAllocRunner(t, true) upd, ar := TestAllocRunner(t, true)
// Make the task run healthy // Make the task run healthy
task := ar.alloc.Job.TaskGroups[0].Tasks[0] task := ar.alloc.Job.TaskGroups[0].Tasks[0]
@ -316,7 +257,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_Checks(t *testing.T) {
t.Parallel() t.Parallel()
// Ensure the task fails and restarts // Ensure the task fails and restarts
upd, ar := testAllocRunner(t, true) upd, ar := TestAllocRunner(t, true)
// Make the task fail // Make the task fail
task := ar.alloc.Job.TaskGroups[0].Tasks[0] task := ar.alloc.Job.TaskGroups[0].Tasks[0]
@ -347,7 +288,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_Checks(t *testing.T) {
// Only return the check as healthy after a duration // Only return the check as healthy after a duration
trigger := time.After(500 * time.Millisecond) trigger := time.After(500 * time.Millisecond)
ar.consulClient.(*mockConsulServiceClient).allocRegistrationsFn = func(allocID string) (*consul.AllocRegistration, error) { ar.consulClient.(*consulApi.MockConsulServiceClient).AllocRegistrationsFn = func(allocID string) (*consul.AllocRegistration, error) {
select { select {
case <-trigger: case <-trigger:
return &consul.AllocRegistration{ return &consul.AllocRegistration{
@ -409,7 +350,7 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_Checks(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
// Ensure the task fails and restarts // Ensure the task fails and restarts
upd, ar := testAllocRunner(t, true) upd, ar := TestAllocRunner(t, true)
// Make the task fail // Make the task fail
task := ar.alloc.Job.TaskGroups[0].Tasks[0] task := ar.alloc.Job.TaskGroups[0].Tasks[0]
@ -430,7 +371,7 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_Checks(t *testing.T) {
} }
// Only return the check as healthy after a duration // Only return the check as healthy after a duration
ar.consulClient.(*mockConsulServiceClient).allocRegistrationsFn = func(allocID string) (*consul.AllocRegistration, error) { ar.consulClient.(*consulApi.MockConsulServiceClient).AllocRegistrationsFn = func(allocID string) (*consul.AllocRegistration, error) {
return &consul.AllocRegistration{ return &consul.AllocRegistration{
Tasks: map[string]*consul.TaskRegistration{ Tasks: map[string]*consul.TaskRegistration{
task.Name: { task.Name: {
@ -478,7 +419,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_UpdatedDeployment(t *testing.T) {
t.Parallel() t.Parallel()
// Ensure the task fails and restarts // Ensure the task fails and restarts
upd, ar := testAllocRunner(t, true) upd, ar := TestAllocRunner(t, true)
// Make the task run healthy // Make the task run healthy
task := ar.alloc.Job.TaskGroups[0].Tasks[0] task := ar.alloc.Job.TaskGroups[0].Tasks[0]
@ -535,7 +476,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_Migration(t *testing.T) {
t.Parallel() t.Parallel()
// Ensure the task fails and restarts // Ensure the task fails and restarts
upd, ar := testAllocRunner(t, true) upd, ar := TestAllocRunner(t, true)
// Make the task run healthy // Make the task run healthy
tg := ar.alloc.Job.TaskGroups[0] tg := ar.alloc.Job.TaskGroups[0]
@ -588,7 +529,7 @@ func TestAllocRunner_DeploymentHealth_BatchDisabled(t *testing.T) {
task := tg.Tasks[0] task := tg.Tasks[0]
task.Driver = "mock_driver" task.Driver = "mock_driver"
task.Config["run_for"] = "5s" task.Config["run_for"] = "5s"
upd, ar := testAllocRunnerFromAlloc(t, alloc, false) upd, ar := TestAllocRunnerFromAlloc(t, alloc, false)
go ar.Run() go ar.Run()
defer ar.Destroy() defer ar.Destroy()
@ -634,7 +575,7 @@ func TestAllocRunner_RetryArtifact(t *testing.T) {
} }
alloc.Job.TaskGroups[0].Tasks = append(alloc.Job.TaskGroups[0].Tasks, badtask) alloc.Job.TaskGroups[0].Tasks = append(alloc.Job.TaskGroups[0].Tasks, badtask)
upd, ar := testAllocRunnerFromAlloc(t, alloc, true) upd, ar := TestAllocRunnerFromAlloc(t, alloc, true)
go ar.Run() go ar.Run()
defer ar.Destroy() defer ar.Destroy()
@ -673,7 +614,7 @@ func TestAllocRunner_RetryArtifact(t *testing.T) {
func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) { func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
t.Parallel() t.Parallel()
upd, ar := testAllocRunner(t, false) upd, ar := TestAllocRunner(t, false)
// Ensure task takes some time // Ensure task takes some time
task := ar.alloc.Job.TaskGroups[0].Tasks[0] task := ar.alloc.Job.TaskGroups[0].Tasks[0]
@ -772,7 +713,7 @@ func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
func TestAllocRunner_Destroy(t *testing.T) { func TestAllocRunner_Destroy(t *testing.T) {
t.Parallel() t.Parallel()
upd, ar := testAllocRunner(t, false) upd, ar := TestAllocRunner(t, false)
// Ensure task takes some time // Ensure task takes some time
task := ar.alloc.Job.TaskGroups[0].Tasks[0] task := ar.alloc.Job.TaskGroups[0].Tasks[0]
@ -828,7 +769,7 @@ func TestAllocRunner_Destroy(t *testing.T) {
func TestAllocRunner_Update(t *testing.T) { func TestAllocRunner_Update(t *testing.T) {
t.Parallel() t.Parallel()
_, ar := testAllocRunner(t, false) _, ar := TestAllocRunner(t, false)
// Deep copy the alloc to avoid races when updating // Deep copy the alloc to avoid races when updating
newAlloc := ar.Alloc().Copy() newAlloc := ar.Alloc().Copy()
@ -863,7 +804,7 @@ func TestAllocRunner_SaveRestoreState(t *testing.T) {
"run_for": "10s", "run_for": "10s",
} }
upd, ar := testAllocRunnerFromAlloc(t, alloc, false) upd, ar := TestAllocRunnerFromAlloc(t, alloc, false)
go ar.Run() go ar.Run()
defer ar.Destroy() defer ar.Destroy()
@ -882,9 +823,9 @@ func TestAllocRunner_SaveRestoreState(t *testing.T) {
} }
// Create a new alloc runner // Create a new alloc runner
l2 := prefixedTestLogger("----- ar2: ") l2 := testlog.WithPrefix(t, "----- ar2: ")
alloc2 := &structs.Allocation{ID: ar.alloc.ID} alloc2 := &structs.Allocation{ID: ar.alloc.ID}
prevAlloc := newAllocWatcher(alloc2, ar, nil, ar.config, l2, "") prevAlloc := NewAllocWatcher(alloc2, ar, nil, ar.config, l2, "")
ar2 := NewAllocRunner(l2, ar.config, ar.stateDB, upd.Update, ar2 := NewAllocRunner(l2, ar.config, ar.stateDB, upd.Update,
alloc2, ar.vaultClient, ar.consulClient, prevAlloc) alloc2, ar.vaultClient, ar.consulClient, prevAlloc)
err = ar2.RestoreState() err = ar2.RestoreState()
@ -931,8 +872,8 @@ func TestAllocRunner_SaveRestoreState(t *testing.T) {
func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) { func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
t.Parallel() t.Parallel()
upd, ar := testAllocRunner(t, false) upd, ar := TestAllocRunner(t, false)
ar.logger = prefixedTestLogger("ar1: ") ar.logger = testlog.WithPrefix(t, "ar1: ")
// Ensure task takes some time // Ensure task takes some time
ar.alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver" ar.alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
@ -977,9 +918,9 @@ func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
defer ar.allocLock.Unlock() defer ar.allocLock.Unlock()
// Create a new alloc runner // Create a new alloc runner
l2 := prefixedTestLogger("ar2: ") l2 := testlog.WithPrefix(t, "ar2: ")
alloc2 := &structs.Allocation{ID: ar.alloc.ID} alloc2 := &structs.Allocation{ID: ar.alloc.ID}
prevAlloc := newAllocWatcher(alloc2, ar, nil, ar.config, l2, "") prevAlloc := NewAllocWatcher(alloc2, ar, nil, ar.config, l2, "")
ar2 := NewAllocRunner(l2, ar.config, ar.stateDB, upd.Update, ar2 := NewAllocRunner(l2, ar.config, ar.stateDB, upd.Update,
alloc2, ar.vaultClient, ar.consulClient, prevAlloc) alloc2, ar.vaultClient, ar.consulClient, prevAlloc)
err = ar2.RestoreState() err = ar2.RestoreState()
@ -1052,230 +993,9 @@ func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
}) })
} }
// TestAllocRunner_SaveRestoreState_Upgrade asserts that pre-0.6 exec tasks are
// restarted on upgrade.
func TestAllocRunner_SaveRestoreState_Upgrade(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
task.Config = map[string]interface{}{
"exit_code": "0",
"run_for": "10s",
}
upd, ar := testAllocRunnerFromAlloc(t, alloc, false)
// Hack in old version to cause an upgrade on RestoreState
origConfig := ar.config.Copy()
ar.config.Version = &version.VersionInfo{Version: "0.5.6"}
go ar.Run()
defer ar.Destroy()
// Snapshot state
testutil.WaitForResult(func() (bool, error) {
last := upd.Last()
if last == nil {
return false, fmt.Errorf("No updates")
}
if last.ClientStatus != structs.AllocClientStatusRunning {
return false, fmt.Errorf("got status %v; want %v", last.ClientStatus, structs.AllocClientStatusRunning)
}
return true, nil
}, func(err error) {
t.Fatalf("task never started: %v", err)
})
err := ar.SaveState()
if err != nil {
t.Fatalf("err: %v", err)
}
// Create a new alloc runner
l2 := prefixedTestLogger("ar2: ")
alloc2 := &structs.Allocation{ID: ar.alloc.ID}
prevAlloc := newAllocWatcher(alloc2, ar, nil, origConfig, l2, "")
ar2 := NewAllocRunner(l2, origConfig, ar.stateDB, upd.Update, alloc2, ar.vaultClient, ar.consulClient, prevAlloc)
err = ar2.RestoreState()
if err != nil {
t.Fatalf("err: %v", err)
}
go ar2.Run()
defer ar2.Destroy() // Just-in-case of failure before Destroy below
testutil.WaitForResult(func() (bool, error) {
last := upd.Last()
if last == nil {
return false, fmt.Errorf("No updates")
}
for _, ev := range last.TaskStates["web"].Events {
if strings.HasSuffix(ev.RestartReason, pre06ScriptCheckReason) {
return true, nil
}
}
return false, fmt.Errorf("no restart with proper reason found")
}, func(err error) {
last := upd.Last()
t.Fatalf("err: %v\nweb state: % #v", err, pretty.Formatter(last.TaskStates["web"]))
})
// Destroy and wait
ar2.Destroy()
start := time.Now()
testutil.WaitForResult(func() (bool, error) {
alloc := ar2.Alloc()
if alloc.ClientStatus != structs.AllocClientStatusComplete {
return false, fmt.Errorf("Bad client status; got %v; want %v", alloc.ClientStatus, structs.AllocClientStatusComplete)
}
return true, nil
}, func(err error) {
last := upd.Last()
t.Fatalf("err: %v %#v %#v", err, last, last.TaskStates)
})
if time.Since(start) > time.Duration(testutil.TestMultiplier()*5)*time.Second {
t.Fatalf("took too long to terminate")
}
}
// Ensure pre-#2132 state files containing the Context struct are properly
// migrated to the new format.
//
// Old Context State:
//
// "Context": {
// "AllocDir": {
// "AllocDir": "/path/to/allocs/2a54fcff-fc44-8d4f-e025-53c48e9cbbbb",
// "SharedDir": "/path/to/allocs/2a54fcff-fc44-8d4f-e025-53c48e9cbbbb/alloc",
// "TaskDirs": {
// "echo1": "/path/to/allocs/2a54fcff-fc44-8d4f-e025-53c48e9cbbbb/echo1"
// }
// },
// "AllocID": "2a54fcff-fc44-8d4f-e025-53c48e9cbbbb"
// }
func TestAllocRunner_RestoreOldState(t *testing.T) {
t.Parallel()
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
task.Config = map[string]interface{}{
"exit_code": "0",
"run_for": "10s",
}
logger := testLogger()
conf := config.DefaultConfig()
conf.Node = mock.Node()
conf.StateDir = os.TempDir()
conf.AllocDir = os.TempDir()
tmp, err := ioutil.TempFile("", "state-db")
if err != nil {
t.Fatalf("error creating state db file: %v", err)
}
db, err := bolt.Open(tmp.Name(), 0600, nil)
if err != nil {
t.Fatalf("error creating state db: %v", err)
}
if err := os.MkdirAll(filepath.Join(conf.StateDir, "alloc", alloc.ID), 0777); err != nil {
t.Fatalf("error creating state dir: %v", err)
}
statePath := filepath.Join(conf.StateDir, "alloc", alloc.ID, "state.json")
w, err := os.Create(statePath)
if err != nil {
t.Fatalf("error creating state file: %v", err)
}
tmplctx := &struct {
AllocID string
AllocDir string
}{alloc.ID, conf.AllocDir}
err = template.Must(template.New("test_state").Parse(`{
"Version": "0.5.1",
"Alloc": {
"ID": "{{ .AllocID }}",
"Name": "example",
"JobID": "example",
"Job": {
"ID": "example",
"Name": "example",
"Type": "batch",
"TaskGroups": [
{
"Name": "example",
"Tasks": [
{
"Name": "example",
"Driver": "mock",
"Config": {
"exit_code": "0",
"run_for": "10s"
}
}
]
}
]
},
"TaskGroup": "example",
"DesiredStatus": "run",
"ClientStatus": "running",
"TaskStates": {
"example": {
"State": "running",
"Failed": false,
"Events": []
}
}
},
"Context": {
"AllocDir": {
"AllocDir": "{{ .AllocDir }}/{{ .AllocID }}",
"SharedDir": "{{ .AllocDir }}/{{ .AllocID }}/alloc",
"TaskDirs": {
"example": "{{ .AllocDir }}/{{ .AllocID }}/example"
}
},
"AllocID": "{{ .AllocID }}"
}
}`)).Execute(w, tmplctx)
if err != nil {
t.Fatalf("error writing state file: %v", err)
}
w.Close()
upd := &MockAllocStateUpdater{}
*alloc.Job.LookupTaskGroup(alloc.TaskGroup).RestartPolicy = structs.RestartPolicy{Attempts: 0}
alloc.Job.Type = structs.JobTypeBatch
vclient := vaultclient.NewMockVaultClient()
cclient := newMockConsulServiceClient(t)
ar := NewAllocRunner(logger, conf, db, upd.Update, alloc, vclient, cclient, noopPrevAlloc{})
defer ar.Destroy()
// RestoreState should fail on the task state since we only test the
// alloc state restoring.
err = ar.RestoreState()
if err == nil {
t.Fatal("expected error restoring Task state")
}
merr, ok := err.(*multierror.Error)
if !ok {
t.Fatalf("expected RestoreState to return a multierror but found: %T -> %v", err, err)
}
if len(merr.Errors) != 1 {
t.Fatalf("expected exactly 1 error from RestoreState but found: %d: %v", len(merr.Errors), err)
}
if expected := "failed to get task bucket"; !strings.Contains(merr.Errors[0].Error(), expected) {
t.Fatalf("expected %q but got: %q", expected, merr.Errors[0].Error())
}
if err := ar.SaveState(); err != nil {
t.Fatalf("error saving new state: %v", err)
}
}
func TestAllocRunner_TaskFailed_KillTG(t *testing.T) { func TestAllocRunner_TaskFailed_KillTG(t *testing.T) {
t.Parallel() t.Parallel()
upd, ar := testAllocRunner(t, false) upd, ar := TestAllocRunner(t, false)
// Create two tasks in the task group // Create two tasks in the task group
task := ar.alloc.Job.TaskGroups[0].Tasks[0] task := ar.alloc.Job.TaskGroups[0].Tasks[0]
@ -1343,7 +1063,7 @@ func TestAllocRunner_TaskFailed_KillTG(t *testing.T) {
func TestAllocRunner_TaskLeader_KillTG(t *testing.T) { func TestAllocRunner_TaskLeader_KillTG(t *testing.T) {
t.Parallel() t.Parallel()
upd, ar := testAllocRunner(t, false) upd, ar := TestAllocRunner(t, false)
// Create two tasks in the task group // Create two tasks in the task group
task := ar.alloc.Job.TaskGroups[0].Tasks[0] task := ar.alloc.Job.TaskGroups[0].Tasks[0]
@ -1417,7 +1137,7 @@ func TestAllocRunner_TaskLeader_KillTG(t *testing.T) {
// with a leader the leader is stopped before other tasks. // with a leader the leader is stopped before other tasks.
func TestAllocRunner_TaskLeader_StopTG(t *testing.T) { func TestAllocRunner_TaskLeader_StopTG(t *testing.T) {
t.Parallel() t.Parallel()
upd, ar := testAllocRunner(t, false) upd, ar := TestAllocRunner(t, false)
// Create 3 tasks in the task group // Create 3 tasks in the task group
task := ar.alloc.Job.TaskGroups[0].Tasks[0] task := ar.alloc.Job.TaskGroups[0].Tasks[0]
@ -1509,8 +1229,9 @@ func TestAllocRunner_TaskLeader_StopTG(t *testing.T) {
// not stopped as it does not exist. // not stopped as it does not exist.
// See https://github.com/hashicorp/nomad/issues/3420#issuecomment-341666932 // See https://github.com/hashicorp/nomad/issues/3420#issuecomment-341666932
func TestAllocRunner_TaskLeader_StopRestoredTG(t *testing.T) { func TestAllocRunner_TaskLeader_StopRestoredTG(t *testing.T) {
t.Skip("Skipping because the functionality being tested doesn't exist")
t.Parallel() t.Parallel()
_, ar := testAllocRunner(t, false) _, ar := TestAllocRunner(t, false)
defer ar.Destroy() defer ar.Destroy()
// Create a leader and follower task in the task group // Create a leader and follower task in the task group
@ -1535,11 +1256,11 @@ func TestAllocRunner_TaskLeader_StopRestoredTG(t *testing.T) {
ar.alloc.TaskResources[task2.Name] = task2.Resources ar.alloc.TaskResources[task2.Name] = task2.Resources
// Mimic Nomad exiting before the leader stopping is able to stop other tasks. // Mimic Nomad exiting before the leader stopping is able to stop other tasks.
ar.tasks = map[string]*TaskRunner{ ar.tasks = map[string]*taskrunner.TaskRunner{
"leader": NewTaskRunner(ar.logger, ar.config, ar.stateDB, ar.setTaskState, "leader": taskrunner.NewTaskRunner(ar.logger, ar.config, ar.stateDB, ar.setTaskState,
ar.allocDir.NewTaskDir(task2.Name), ar.Alloc(), task2.Copy(), ar.allocDir.NewTaskDir(task2.Name), ar.Alloc(), task2.Copy(),
ar.vaultClient, ar.consulClient), ar.vaultClient, ar.consulClient),
"follower1": NewTaskRunner(ar.logger, ar.config, ar.stateDB, ar.setTaskState, "follower1": taskrunner.NewTaskRunner(ar.logger, ar.config, ar.stateDB, ar.setTaskState,
ar.allocDir.NewTaskDir(task.Name), ar.Alloc(), task.Copy(), ar.allocDir.NewTaskDir(task.Name), ar.Alloc(), task.Copy(),
ar.vaultClient, ar.consulClient), ar.vaultClient, ar.consulClient),
} }
@ -1564,22 +1285,14 @@ func TestAllocRunner_TaskLeader_StopRestoredTG(t *testing.T) {
// Wait for tasks to be stopped because leader is dead // Wait for tasks to be stopped because leader is dead
testutil.WaitForResult(func() (bool, error) { testutil.WaitForResult(func() (bool, error) {
last := upd2.Last() alloc := ar2.Alloc()
if last == nil { for task, state := range alloc.TaskStates {
return false, fmt.Errorf("No updates") if state.State != structs.TaskStateDead {
} return false, fmt.Errorf("Task %q should be dead: %v", task, state.State)
if actual := last.TaskStates["leader"].State; actual != structs.TaskStateDead { }
return false, fmt.Errorf("Task leader is not dead yet (it's %q)", actual)
}
if actual := last.TaskStates["follower1"].State; actual != structs.TaskStateDead {
return false, fmt.Errorf("Task follower1 is not dead yet (it's %q)", actual)
} }
return true, nil return true, nil
}, func(err error) { }, func(err error) {
last := upd2.Last()
for name, state := range last.TaskStates {
t.Logf("%s: %s", name, state.State)
}
t.Fatalf("err: %v", err) t.Fatalf("err: %v", err)
}) })
@ -1606,7 +1319,7 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) {
task.Config = map[string]interface{}{ task.Config = map[string]interface{}{
"run_for": "1s", "run_for": "1s",
} }
upd, ar := testAllocRunnerFromAlloc(t, alloc, false) upd, ar := TestAllocRunnerFromAlloc(t, alloc, false)
go ar.Run() go ar.Run()
defer ar.Destroy() defer ar.Destroy()
@ -1639,10 +1352,10 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) {
task.Config = map[string]interface{}{ task.Config = map[string]interface{}{
"run_for": "1s", "run_for": "1s",
} }
upd2, ar2 := testAllocRunnerFromAlloc(t, alloc2, false) upd2, ar2 := TestAllocRunnerFromAlloc(t, alloc2, false)
// Set prevAlloc like Client does // Set prevAlloc like Client does
ar2.prevAlloc = newAllocWatcher(alloc2, ar, nil, ar2.config, ar2.logger, "") ar2.prevAlloc = NewAllocWatcher(alloc2, ar, nil, ar2.config, ar2.logger, "")
go ar2.Run() go ar2.Run()
defer ar2.Destroy() defer ar2.Destroy()

View File

@ -1,4 +1,4 @@
package client package allocrunner
import ( import (
"archive/tar" "archive/tar"
@ -20,6 +20,12 @@ import (
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
) )
const (
// getRemoteRetryIntv is minimum interval on which we retry
// to fetch remote objects. We pick a value between this and 2x this.
getRemoteRetryIntv = 30 * time.Second
)
// rpcer is the interface needed by a prevAllocWatcher to make RPC calls. // rpcer is the interface needed by a prevAllocWatcher to make RPC calls.
type rpcer interface { type rpcer interface {
// RPC allows retrieving remote allocs. // RPC allows retrieving remote allocs.
@ -49,13 +55,13 @@ type prevAllocWatcher interface {
IsMigrating() bool IsMigrating() bool
} }
// newAllocWatcher creates a prevAllocWatcher appropriate for whether this // NewAllocWatcher creates a prevAllocWatcher appropriate for whether this
// alloc's previous allocation was local or remote. If this alloc has no // alloc's previous allocation was local or remote. If this alloc has no
// previous alloc then a noop implementation is returned. // previous alloc then a noop implementation is returned.
func newAllocWatcher(alloc *structs.Allocation, prevAR *AllocRunner, rpc rpcer, config *config.Config, l *log.Logger, migrateToken string) prevAllocWatcher { func NewAllocWatcher(alloc *structs.Allocation, prevAR *AllocRunner, rpc rpcer, config *config.Config, l *log.Logger, migrateToken string) prevAllocWatcher {
if alloc.PreviousAllocation == "" { if alloc.PreviousAllocation == "" {
// No previous allocation, use noop transitioner // No previous allocation, use noop transitioner
return noopPrevAlloc{} return NoopPrevAlloc{}
} }
tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup) tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
@ -295,7 +301,7 @@ func (p *remotePrevAlloc) Wait(ctx context.Context) error {
err := p.rpc.RPC("Alloc.GetAlloc", &req, &resp) err := p.rpc.RPC("Alloc.GetAlloc", &req, &resp)
if err != nil { if err != nil {
p.logger.Printf("[ERR] client: failed to query previous alloc %q: %v", p.prevAllocID, err) p.logger.Printf("[ERR] client: failed to query previous alloc %q: %v", p.prevAllocID, err)
retry := getAllocRetryIntv + lib.RandomStagger(getAllocRetryIntv) retry := getRemoteRetryIntv + lib.RandomStagger(getRemoteRetryIntv)
select { select {
case <-time.After(retry): case <-time.After(retry):
continue continue
@ -386,7 +392,7 @@ func (p *remotePrevAlloc) getNodeAddr(ctx context.Context, nodeID string) (strin
err := p.rpc.RPC("Node.GetNode", &req, &resp) err := p.rpc.RPC("Node.GetNode", &req, &resp)
if err != nil { if err != nil {
p.logger.Printf("[ERR] client: failed to query node info %q: %v", nodeID, err) p.logger.Printf("[ERR] client: failed to query node info %q: %v", nodeID, err)
retry := getAllocRetryIntv + lib.RandomStagger(getAllocRetryIntv) retry := getRemoteRetryIntv + lib.RandomStagger(getRemoteRetryIntv)
select { select {
case <-time.After(retry): case <-time.After(retry):
continue continue
@ -568,15 +574,15 @@ func (p *remotePrevAlloc) streamAllocDir(ctx context.Context, resp io.ReadCloser
return nil return nil
} }
// noopPrevAlloc does not block or migrate on a previous allocation and never // NoopPrevAlloc does not block or migrate on a previous allocation and never
// returns an error. // returns an error.
type noopPrevAlloc struct{} type NoopPrevAlloc struct{}
// Wait returns nil immediately. // Wait returns nil immediately.
func (noopPrevAlloc) Wait(context.Context) error { return nil } func (NoopPrevAlloc) Wait(context.Context) error { return nil }
// Migrate returns nil immediately. // Migrate returns nil immediately.
func (noopPrevAlloc) Migrate(context.Context, *allocdir.AllocDir) error { return nil } func (NoopPrevAlloc) Migrate(context.Context, *allocdir.AllocDir) error { return nil }
func (noopPrevAlloc) IsWaiting() bool { return false } func (NoopPrevAlloc) IsWaiting() bool { return false }
func (noopPrevAlloc) IsMigrating() bool { return false } func (NoopPrevAlloc) IsMigrating() bool { return false }

View File

@ -1,4 +1,4 @@
package client package allocrunner
import ( import (
"archive/tar" "archive/tar"
@ -15,15 +15,15 @@ import (
"time" "time"
"github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
) )
// TestPrevAlloc_LocalPrevAlloc asserts that when a previous alloc runner is // TestPrevAlloc_LocalPrevAlloc asserts that when a previous alloc runner is
// set a localPrevAlloc will block on it. // set a localPrevAlloc will block on it.
func TestPrevAlloc_LocalPrevAlloc(t *testing.T) { func TestPrevAlloc_LocalPrevAlloc(t *testing.T) {
_, prevAR := testAllocRunner(t, false) _, prevAR := TestAllocRunner(t, false)
prevAR.alloc.Job.TaskGroups[0].Tasks[0].Config["run_for"] = "10s" prevAR.alloc.Job.TaskGroups[0].Tasks[0].Config["run_for"] = "10s"
newAlloc := mock.Alloc() newAlloc := mock.Alloc()
@ -33,7 +33,7 @@ func TestPrevAlloc_LocalPrevAlloc(t *testing.T) {
task.Driver = "mock_driver" task.Driver = "mock_driver"
task.Config["run_for"] = "500ms" task.Config["run_for"] = "500ms"
waiter := newAllocWatcher(newAlloc, prevAR, nil, nil, testLogger(), "") waiter := NewAllocWatcher(newAlloc, prevAR, nil, nil, testlog.Logger(t), "")
// Wait in a goroutine with a context to make sure it exits at the right time // Wait in a goroutine with a context to make sure it exits at the right time
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -177,14 +177,8 @@ func TestPrevAlloc_StreamAllocDir_Ok(t *testing.T) {
} }
defer os.RemoveAll(dir1) defer os.RemoveAll(dir1)
c1 := TestClient(t, func(c *config.Config) {
c.RPCHandler = nil
})
defer c1.Shutdown()
rc := ioutil.NopCloser(buf) rc := ioutil.NopCloser(buf)
prevAlloc := &remotePrevAlloc{logger: testlog.Logger(t)}
prevAlloc := &remotePrevAlloc{logger: testLogger()}
if err := prevAlloc.streamAllocDir(context.Background(), rc, dir1); err != nil { if err := prevAlloc.streamAllocDir(context.Background(), rc, dir1); err != nil {
t.Fatalf("err: %v", err) t.Fatalf("err: %v", err)
} }
@ -234,7 +228,7 @@ func TestPrevAlloc_StreamAllocDir_Error(t *testing.T) {
// This test only unit tests streamAllocDir so we only need a partially // This test only unit tests streamAllocDir so we only need a partially
// complete remotePrevAlloc // complete remotePrevAlloc
prevAlloc := &remotePrevAlloc{ prevAlloc := &remotePrevAlloc{
logger: testLogger(), logger: testlog.Logger(t),
allocID: "123", allocID: "123",
prevAllocID: "abc", prevAllocID: "abc",
migrate: true, migrate: true,

View File

@ -1,4 +1,4 @@
package client package taskrunner
import ( import (
"fmt" "fmt"

View File

@ -1,4 +1,4 @@
package client package taskrunner
import ( import (
"fmt" "fmt"
@ -19,6 +19,7 @@ import (
sconfig "github.com/hashicorp/nomad/nomad/structs/config" sconfig "github.com/hashicorp/nomad/nomad/structs/config"
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
const ( const (
@ -563,11 +564,12 @@ func TestTaskTemplateManager_Unblock_Consul(t *testing.T) {
func TestTaskTemplateManager_Unblock_Vault(t *testing.T) { func TestTaskTemplateManager_Unblock_Vault(t *testing.T) {
t.Parallel() t.Parallel()
require := require.New(t)
// Make a template that will render based on a key in Vault // Make a template that will render based on a key in Vault
vaultPath := "secret/password" vaultPath := "secret/data/password"
key := "password" key := "password"
content := "barbaz" content := "barbaz"
embedded := fmt.Sprintf(`{{with secret "%s"}}{{.Data.%s}}{{end}}`, vaultPath, key) embedded := fmt.Sprintf(`{{with secret "%s"}}{{.Data.data.%s}}{{end}}`, vaultPath, key)
file := "my.tmpl" file := "my.tmpl"
template := &structs.Template{ template := &structs.Template{
EmbeddedTmpl: embedded, EmbeddedTmpl: embedded,
@ -588,7 +590,8 @@ func TestTaskTemplateManager_Unblock_Vault(t *testing.T) {
// Write the secret to Vault // Write the secret to Vault
logical := harness.vault.Client.Logical() logical := harness.vault.Client.Logical()
logical.Write(vaultPath, map[string]interface{}{key: content}) _, err := logical.Write(vaultPath, map[string]interface{}{"data": map[string]interface{}{key: content}})
require.NoError(err)
// Wait for the unblock // Wait for the unblock
select { select {

View File

@ -0,0 +1,19 @@
package taskrunner
// Name returns the name of the task
func (r *TaskRunner) Name() string {
if r == nil || r.task == nil {
return ""
}
return r.task.Name
}
// IsLeader returns whether the task is a leader task
func (r *TaskRunner) IsLeader() bool {
if r == nil || r.task == nil {
return false
}
return r.task.Leader
}

View File

@ -1,4 +1,4 @@
package client package restarts
import ( import (
"fmt" "fmt"
@ -20,7 +20,7 @@ const (
ReasonDelay = "Exceeded allowed attempts, applying a delay" ReasonDelay = "Exceeded allowed attempts, applying a delay"
) )
func newRestartTracker(policy *structs.RestartPolicy, jobType string) *RestartTracker { func NewRestartTracker(policy *structs.RestartPolicy, jobType string) *RestartTracker {
onSuccess := true onSuccess := true
if jobType == structs.JobTypeBatch { if jobType == structs.JobTypeBatch {
onSuccess = false onSuccess = false
@ -54,6 +54,13 @@ func (r *RestartTracker) SetPolicy(policy *structs.RestartPolicy) {
r.policy = policy r.policy = policy
} }
// GetPolicy returns a copy of the policy used to determine restarts.
func (r *RestartTracker) GetPolicy() *structs.RestartPolicy {
r.lock.Lock()
defer r.lock.Unlock()
return r.policy.Copy()
}
// SetStartError is used to mark the most recent start error. If starting was // SetStartError is used to mark the most recent start error. If starting was
// successful the error should be nil. // successful the error should be nil.
func (r *RestartTracker) SetStartError(err error) *RestartTracker { func (r *RestartTracker) SetStartError(err error) *RestartTracker {

View File

@ -1,4 +1,4 @@
package client package restarts
import ( import (
"fmt" "fmt"
@ -32,7 +32,7 @@ func testWaitResult(exit int) *cstructs.WaitResult {
func TestClient_RestartTracker_ModeDelay(t *testing.T) { func TestClient_RestartTracker_ModeDelay(t *testing.T) {
t.Parallel() t.Parallel()
p := testPolicy(true, structs.RestartPolicyModeDelay) p := testPolicy(true, structs.RestartPolicyModeDelay)
rt := newRestartTracker(p, structs.JobTypeService) rt := NewRestartTracker(p, structs.JobTypeService)
for i := 0; i < p.Attempts; i++ { for i := 0; i < p.Attempts; i++ {
state, when := rt.SetWaitResult(testWaitResult(127)).GetState() state, when := rt.SetWaitResult(testWaitResult(127)).GetState()
if state != structs.TaskRestarting { if state != structs.TaskRestarting {
@ -58,7 +58,7 @@ func TestClient_RestartTracker_ModeDelay(t *testing.T) {
func TestClient_RestartTracker_ModeFail(t *testing.T) { func TestClient_RestartTracker_ModeFail(t *testing.T) {
t.Parallel() t.Parallel()
p := testPolicy(true, structs.RestartPolicyModeFail) p := testPolicy(true, structs.RestartPolicyModeFail)
rt := newRestartTracker(p, structs.JobTypeSystem) rt := NewRestartTracker(p, structs.JobTypeSystem)
for i := 0; i < p.Attempts; i++ { for i := 0; i < p.Attempts; i++ {
state, when := rt.SetWaitResult(testWaitResult(127)).GetState() state, when := rt.SetWaitResult(testWaitResult(127)).GetState()
if state != structs.TaskRestarting { if state != structs.TaskRestarting {
@ -78,7 +78,7 @@ func TestClient_RestartTracker_ModeFail(t *testing.T) {
func TestClient_RestartTracker_NoRestartOnSuccess(t *testing.T) { func TestClient_RestartTracker_NoRestartOnSuccess(t *testing.T) {
t.Parallel() t.Parallel()
p := testPolicy(false, structs.RestartPolicyModeDelay) p := testPolicy(false, structs.RestartPolicyModeDelay)
rt := newRestartTracker(p, structs.JobTypeBatch) rt := NewRestartTracker(p, structs.JobTypeBatch)
if state, _ := rt.SetWaitResult(testWaitResult(0)).GetState(); state != structs.TaskTerminated { if state, _ := rt.SetWaitResult(testWaitResult(0)).GetState(); state != structs.TaskTerminated {
t.Fatalf("NextRestart() returned %v, expected: %v", state, structs.TaskTerminated) t.Fatalf("NextRestart() returned %v, expected: %v", state, structs.TaskTerminated)
} }
@ -90,28 +90,28 @@ func TestClient_RestartTracker_ZeroAttempts(t *testing.T) {
p.Attempts = 0 p.Attempts = 0
// Test with a non-zero exit code // Test with a non-zero exit code
rt := newRestartTracker(p, structs.JobTypeService) rt := NewRestartTracker(p, structs.JobTypeService)
if state, when := rt.SetWaitResult(testWaitResult(1)).GetState(); state != structs.TaskNotRestarting { if state, when := rt.SetWaitResult(testWaitResult(1)).GetState(); state != structs.TaskNotRestarting {
t.Fatalf("expect no restart, got restart/delay: %v/%v", state, when) t.Fatalf("expect no restart, got restart/delay: %v/%v", state, when)
} }
// Even with a zero (successful) exit code non-batch jobs should exit // Even with a zero (successful) exit code non-batch jobs should exit
// with TaskNotRestarting // with TaskNotRestarting
rt = newRestartTracker(p, structs.JobTypeService) rt = NewRestartTracker(p, structs.JobTypeService)
if state, when := rt.SetWaitResult(testWaitResult(0)).GetState(); state != structs.TaskNotRestarting { if state, when := rt.SetWaitResult(testWaitResult(0)).GetState(); state != structs.TaskNotRestarting {
t.Fatalf("expect no restart, got restart/delay: %v/%v", state, when) t.Fatalf("expect no restart, got restart/delay: %v/%v", state, when)
} }
// Batch jobs with a zero exit code and 0 attempts *do* exit cleanly // Batch jobs with a zero exit code and 0 attempts *do* exit cleanly
// with Terminated // with Terminated
rt = newRestartTracker(p, structs.JobTypeBatch) rt = NewRestartTracker(p, structs.JobTypeBatch)
if state, when := rt.SetWaitResult(testWaitResult(0)).GetState(); state != structs.TaskTerminated { if state, when := rt.SetWaitResult(testWaitResult(0)).GetState(); state != structs.TaskTerminated {
t.Fatalf("expect terminated, got restart/delay: %v/%v", state, when) t.Fatalf("expect terminated, got restart/delay: %v/%v", state, when)
} }
// Batch jobs with a non-zero exit code and 0 attempts exit with // Batch jobs with a non-zero exit code and 0 attempts exit with
// TaskNotRestarting // TaskNotRestarting
rt = newRestartTracker(p, structs.JobTypeBatch) rt = NewRestartTracker(p, structs.JobTypeBatch)
if state, when := rt.SetWaitResult(testWaitResult(1)).GetState(); state != structs.TaskNotRestarting { if state, when := rt.SetWaitResult(testWaitResult(1)).GetState(); state != structs.TaskNotRestarting {
t.Fatalf("expect no restart, got restart/delay: %v/%v", state, when) t.Fatalf("expect no restart, got restart/delay: %v/%v", state, when)
} }
@ -121,7 +121,7 @@ func TestClient_RestartTracker_RestartTriggered(t *testing.T) {
t.Parallel() t.Parallel()
p := testPolicy(true, structs.RestartPolicyModeFail) p := testPolicy(true, structs.RestartPolicyModeFail)
p.Attempts = 0 p.Attempts = 0
rt := newRestartTracker(p, structs.JobTypeService) rt := NewRestartTracker(p, structs.JobTypeService)
if state, when := rt.SetRestartTriggered(false).GetState(); state != structs.TaskRestarting && when != 0 { if state, when := rt.SetRestartTriggered(false).GetState(); state != structs.TaskRestarting && when != 0 {
t.Fatalf("expect restart immediately, got %v %v", state, when) t.Fatalf("expect restart immediately, got %v %v", state, when)
} }
@ -131,7 +131,7 @@ func TestClient_RestartTracker_RestartTriggered_Failure(t *testing.T) {
t.Parallel() t.Parallel()
p := testPolicy(true, structs.RestartPolicyModeFail) p := testPolicy(true, structs.RestartPolicyModeFail)
p.Attempts = 1 p.Attempts = 1
rt := newRestartTracker(p, structs.JobTypeService) rt := NewRestartTracker(p, structs.JobTypeService)
if state, when := rt.SetRestartTriggered(true).GetState(); state != structs.TaskRestarting || when == 0 { if state, when := rt.SetRestartTriggered(true).GetState(); state != structs.TaskRestarting || when == 0 {
t.Fatalf("expect restart got %v %v", state, when) t.Fatalf("expect restart got %v %v", state, when)
} }
@ -143,7 +143,7 @@ func TestClient_RestartTracker_RestartTriggered_Failure(t *testing.T) {
func TestClient_RestartTracker_StartError_Recoverable_Fail(t *testing.T) { func TestClient_RestartTracker_StartError_Recoverable_Fail(t *testing.T) {
t.Parallel() t.Parallel()
p := testPolicy(true, structs.RestartPolicyModeFail) p := testPolicy(true, structs.RestartPolicyModeFail)
rt := newRestartTracker(p, structs.JobTypeSystem) rt := NewRestartTracker(p, structs.JobTypeSystem)
recErr := structs.NewRecoverableError(fmt.Errorf("foo"), true) recErr := structs.NewRecoverableError(fmt.Errorf("foo"), true)
for i := 0; i < p.Attempts; i++ { for i := 0; i < p.Attempts; i++ {
state, when := rt.SetStartError(recErr).GetState() state, when := rt.SetStartError(recErr).GetState()
@ -164,7 +164,7 @@ func TestClient_RestartTracker_StartError_Recoverable_Fail(t *testing.T) {
func TestClient_RestartTracker_StartError_Recoverable_Delay(t *testing.T) { func TestClient_RestartTracker_StartError_Recoverable_Delay(t *testing.T) {
t.Parallel() t.Parallel()
p := testPolicy(true, structs.RestartPolicyModeDelay) p := testPolicy(true, structs.RestartPolicyModeDelay)
rt := newRestartTracker(p, structs.JobTypeSystem) rt := NewRestartTracker(p, structs.JobTypeSystem)
recErr := structs.NewRecoverableError(fmt.Errorf("foo"), true) recErr := structs.NewRecoverableError(fmt.Errorf("foo"), true)
for i := 0; i < p.Attempts; i++ { for i := 0; i < p.Attempts; i++ {
state, when := rt.SetStartError(recErr).GetState() state, when := rt.SetStartError(recErr).GetState()

View File

@ -1,4 +1,4 @@
package client package taskrunner
import ( import (
"bytes" "bytes"
@ -21,9 +21,12 @@ import (
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
version "github.com/hashicorp/go-version" version "github.com/hashicorp/go-version"
"github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/allocrunner/getter"
"github.com/hashicorp/nomad/client/allocrunner/taskrunner/restarts"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
consulApi "github.com/hashicorp/nomad/client/consul"
"github.com/hashicorp/nomad/client/driver" "github.com/hashicorp/nomad/client/driver"
"github.com/hashicorp/nomad/client/getter" "github.com/hashicorp/nomad/client/state"
"github.com/hashicorp/nomad/client/vaultclient" "github.com/hashicorp/nomad/client/vaultclient"
"github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
@ -89,8 +92,8 @@ type TaskRunner struct {
config *config.Config config *config.Config
updater TaskStateUpdater updater TaskStateUpdater
logger *log.Logger logger *log.Logger
restartTracker *RestartTracker restartTracker *restarts.RestartTracker
consul ConsulServiceAPI consul consulApi.ConsulServiceAPI
// running marks whether the task is running // running marks whether the task is running
running bool running bool
@ -230,7 +233,7 @@ type SignalEvent struct {
func NewTaskRunner(logger *log.Logger, config *config.Config, func NewTaskRunner(logger *log.Logger, config *config.Config,
stateDB *bolt.DB, updater TaskStateUpdater, taskDir *allocdir.TaskDir, stateDB *bolt.DB, updater TaskStateUpdater, taskDir *allocdir.TaskDir,
alloc *structs.Allocation, task *structs.Task, alloc *structs.Allocation, task *structs.Task,
vaultClient vaultclient.VaultClient, consulClient ConsulServiceAPI) *TaskRunner { vaultClient vaultclient.VaultClient, consulClient consulApi.ConsulServiceAPI) *TaskRunner {
// Merge in the task resources // Merge in the task resources
task.Resources = alloc.TaskResources[task.Name] task.Resources = alloc.TaskResources[task.Name]
@ -241,7 +244,7 @@ func NewTaskRunner(logger *log.Logger, config *config.Config,
logger.Printf("[ERR] client: alloc %q for missing task group %q", alloc.ID, alloc.TaskGroup) logger.Printf("[ERR] client: alloc %q for missing task group %q", alloc.ID, alloc.TaskGroup)
return nil return nil
} }
restartTracker := newRestartTracker(tg.RestartPolicy, alloc.Job.Type) restartTracker := restarts.NewRestartTracker(tg.RestartPolicy, alloc.Job.Type)
// Initialize the environment builder // Initialize the environment builder
envBuilder := env.NewBuilder(config.Node, alloc, task, config.Region) envBuilder := env.NewBuilder(config.Node, alloc, task, config.Region)
@ -329,40 +332,20 @@ func (r *TaskRunner) pre060StateFilePath() string {
// backwards incompatible upgrades that need to restart tasks with a new // backwards incompatible upgrades that need to restart tasks with a new
// executor. // executor.
func (r *TaskRunner) RestoreState() (string, error) { func (r *TaskRunner) RestoreState() (string, error) {
// COMPAT: Remove in 0.7.0
// 0.6.0 transitioned from individual state files to a single bolt-db.
// The upgrade path is to:
// Check if old state exists
// If so, restore from that and delete old state
// Restore using state database
var snap taskRunnerState var snap taskRunnerState
err := r.stateDB.View(func(tx *bolt.Tx) error {
// Check if the old snapshot is there bkt, err := state.GetTaskBucket(tx, r.alloc.ID, r.task.Name)
oldPath := r.pre060StateFilePath()
if err := pre060RestoreState(oldPath, &snap); err == nil {
// Delete the old state
os.RemoveAll(oldPath)
} else if !os.IsNotExist(err) {
// Something corrupt in the old state file
return "", err
} else {
// We are doing a normal restore
err := r.stateDB.View(func(tx *bolt.Tx) error {
bkt, err := getTaskBucket(tx, r.alloc.ID, r.task.Name)
if err != nil {
return fmt.Errorf("failed to get task bucket: %v", err)
}
if err := getObject(bkt, taskRunnerStateAllKey, &snap); err != nil {
return fmt.Errorf("failed to read task runner state: %v", err)
}
return nil
})
if err != nil { if err != nil {
return "", err return fmt.Errorf("failed to get task bucket: %v", err)
} }
if err := state.GetObject(bkt, taskRunnerStateAllKey, &snap); err != nil {
return fmt.Errorf("failed to read task runner state: %v", err)
}
return nil
})
if err != nil {
return "", err
} }
// Restore fields from the snapshot // Restore fields from the snapshot
@ -510,12 +493,12 @@ func (r *TaskRunner) SaveState() error {
// Start the transaction. // Start the transaction.
return r.stateDB.Batch(func(tx *bolt.Tx) error { return r.stateDB.Batch(func(tx *bolt.Tx) error {
// Grab the task bucket // Grab the task bucket
taskBkt, err := getTaskBucket(tx, r.alloc.ID, r.task.Name) taskBkt, err := state.GetTaskBucket(tx, r.alloc.ID, r.task.Name)
if err != nil { if err != nil {
return fmt.Errorf("failed to retrieve allocation bucket: %v", err) return fmt.Errorf("failed to retrieve allocation bucket: %v", err)
} }
if err := putData(taskBkt, taskRunnerStateAllKey, buf.Bytes()); err != nil { if err := state.PutData(taskBkt, taskRunnerStateAllKey, buf.Bytes()); err != nil {
return fmt.Errorf("failed to write task_runner state: %v", err) return fmt.Errorf("failed to write task_runner state: %v", err)
} }
@ -534,7 +517,7 @@ func (r *TaskRunner) DestroyState() error {
defer r.persistLock.Unlock() defer r.persistLock.Unlock()
return r.stateDB.Update(func(tx *bolt.Tx) error { return r.stateDB.Update(func(tx *bolt.Tx) error {
if err := deleteTaskBucket(tx, r.alloc.ID, r.task.Name); err != nil { if err := state.DeleteTaskBucket(tx, r.alloc.ID, r.task.Name); err != nil {
return fmt.Errorf("failed to delete task bucket: %v", err) return fmt.Errorf("failed to delete task bucket: %v", err)
} }
return nil return nil

View File

@ -1,9 +1,8 @@
package client package taskrunner
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"os" "os"
@ -17,32 +16,24 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
"github.com/golang/snappy" "github.com/golang/snappy"
"github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/allocrunner/taskrunner/restarts"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
consulApi "github.com/hashicorp/nomad/client/consul"
"github.com/hashicorp/nomad/client/driver/env" "github.com/hashicorp/nomad/client/driver/env"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/vaultclient" "github.com/hashicorp/nomad/client/vaultclient"
"github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
"github.com/kr/pretty" "github.com/kr/pretty"
) )
func testLogger() *log.Logger {
return prefixedTestLogger("")
}
func prefixedTestLogger(prefix string) *log.Logger {
if testing.Verbose() {
return log.New(os.Stderr, prefix, log.LstdFlags|log.Lmicroseconds)
}
return log.New(ioutil.Discard, "", 0)
}
// Returns a tracker that never restarts. // Returns a tracker that never restarts.
func noRestartsTracker() *RestartTracker { func noRestartsTracker() *restarts.RestartTracker {
policy := &structs.RestartPolicy{Attempts: 0, Mode: structs.RestartPolicyModeFail} policy := &structs.RestartPolicy{Attempts: 0, Mode: structs.RestartPolicyModeFail}
return newRestartTracker(policy, structs.JobTypeBatch) return restarts.NewRestartTracker(policy, structs.JobTypeBatch)
} }
type MockTaskStateUpdater struct { type MockTaskStateUpdater struct {
@ -102,7 +93,7 @@ func testTaskRunner(t *testing.T, restarts bool) *taskRunnerTestCtx {
// //
// Callers should defer Cleanup() to cleanup after completion // Callers should defer Cleanup() to cleanup after completion
func testTaskRunnerFromAlloc(t *testing.T, restarts bool, alloc *structs.Allocation) *taskRunnerTestCtx { func testTaskRunnerFromAlloc(t *testing.T, restarts bool, alloc *structs.Allocation) *taskRunnerTestCtx {
logger := testLogger() logger := testlog.Logger(t)
conf := config.DefaultConfig() conf := config.DefaultConfig()
conf.Node = mock.Node() conf.Node = mock.Node()
conf.StateDir = os.TempDir() conf.StateDir = os.TempDir()
@ -120,7 +111,7 @@ func testTaskRunnerFromAlloc(t *testing.T, restarts bool, alloc *structs.Allocat
upd := &MockTaskStateUpdater{} upd := &MockTaskStateUpdater{}
task := alloc.Job.TaskGroups[0].Tasks[0] task := alloc.Job.TaskGroups[0].Tasks[0]
allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(conf.AllocDir, alloc.ID)) allocDir := allocdir.NewAllocDir(testlog.Logger(t), filepath.Join(conf.AllocDir, alloc.ID))
if err := allocDir.Build(); err != nil { if err := allocDir.Build(); err != nil {
t.Fatalf("error building alloc dir: %v", err) t.Fatalf("error building alloc dir: %v", err)
return nil return nil
@ -387,8 +378,8 @@ func TestTaskRunner_Update(t *testing.T) {
if ctx.tr.task.Driver != newTask.Driver { if ctx.tr.task.Driver != newTask.Driver {
return false, fmt.Errorf("Task not copied") return false, fmt.Errorf("Task not copied")
} }
if ctx.tr.restartTracker.policy.Mode != newMode { if ctx.tr.restartTracker.GetPolicy().Mode != newMode {
return false, fmt.Errorf("expected restart policy %q but found %q", newMode, ctx.tr.restartTracker.policy.Mode) return false, fmt.Errorf("expected restart policy %q but found %q", newMode, ctx.tr.restartTracker.GetPolicy().Mode)
} }
if ctx.tr.handle.ID() == oldHandle { if ctx.tr.handle.ID() == oldHandle {
return false, fmt.Errorf("handle not ctx.updated") return false, fmt.Errorf("handle not ctx.updated")
@ -642,7 +633,7 @@ func TestTaskRunner_UnregisterConsul_Retries(t *testing.T) {
ctx := testTaskRunnerFromAlloc(t, true, alloc) ctx := testTaskRunnerFromAlloc(t, true, alloc)
// Use mockConsulServiceClient // Use mockConsulServiceClient
consul := newMockConsulServiceClient(t) consul := consulApi.NewMockConsulServiceClient(t)
ctx.tr.consul = consul ctx.tr.consul = consul
ctx.tr.MarkReceived() ctx.tr.MarkReceived()
@ -650,26 +641,26 @@ func TestTaskRunner_UnregisterConsul_Retries(t *testing.T) {
defer ctx.Cleanup() defer ctx.Cleanup()
// Assert it is properly registered and unregistered // Assert it is properly registered and unregistered
if expected := 6; len(consul.ops) != expected { if expected := 6; len(consul.Ops) != expected {
t.Errorf("expected %d consul ops but found: %d", expected, len(consul.ops)) t.Errorf("expected %d consul ops but found: %d", expected, len(consul.Ops))
} }
if consul.ops[0].op != "add" { if consul.Ops[0].Op != "add" {
t.Errorf("expected first op to be add but found: %q", consul.ops[0].op) t.Errorf("expected first Op to be add but found: %q", consul.Ops[0].Op)
} }
if consul.ops[1].op != "remove" { if consul.Ops[1].Op != "remove" {
t.Errorf("expected second op to be remove but found: %q", consul.ops[1].op) t.Errorf("expected second op to be remove but found: %q", consul.Ops[1].Op)
} }
if consul.ops[2].op != "remove" { if consul.Ops[2].Op != "remove" {
t.Errorf("expected third op to be remove but found: %q", consul.ops[2].op) t.Errorf("expected third op to be remove but found: %q", consul.Ops[2].Op)
} }
if consul.ops[3].op != "add" { if consul.Ops[3].Op != "add" {
t.Errorf("expected fourth op to be add but found: %q", consul.ops[3].op) t.Errorf("expected fourth op to be add but found: %q", consul.Ops[3].Op)
} }
if consul.ops[4].op != "remove" { if consul.Ops[4].Op != "remove" {
t.Errorf("expected fifth op to be remove but found: %q", consul.ops[4].op) t.Errorf("expected fifth op to be remove but found: %q", consul.Ops[4].Op)
} }
if consul.ops[5].op != "remove" { if consul.Ops[5].Op != "remove" {
t.Errorf("expected sixth op to be remove but found: %q", consul.ops[5].op) t.Errorf("expected sixth op to be remove but found: %q", consul.Ops[5].Op)
} }
} }
@ -1199,7 +1190,7 @@ func TestTaskRunner_Template_Artifact(t *testing.T) {
t.Fatalf("bad: %v", err) t.Fatalf("bad: %v", err)
} }
ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(dir, "..")))) ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(dir, "../../.."))))
defer ts.Close() defer ts.Close()
alloc := mock.Alloc() alloc := mock.Alloc()

View File

@ -1,6 +1,6 @@
// +build !windows // +build !windows
package client package taskrunner
import ( import (
"syscall" "syscall"

View File

@ -0,0 +1,65 @@
package allocrunner
import (
"io/ioutil"
"os"
"sync"
"testing"
"github.com/boltdb/bolt"
"github.com/hashicorp/nomad/client/config"
consulApi "github.com/hashicorp/nomad/client/consul"
"github.com/hashicorp/nomad/client/vaultclient"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
)
type MockAllocStateUpdater struct {
Allocs []*structs.Allocation
mu sync.Mutex
}
// Update fulfills the TaskStateUpdater interface
func (m *MockAllocStateUpdater) Update(alloc *structs.Allocation) {
m.mu.Lock()
m.Allocs = append(m.Allocs, alloc)
m.mu.Unlock()
}
// Last returns a copy of the last alloc (or nil) sync'd
func (m *MockAllocStateUpdater) Last() *structs.Allocation {
m.mu.Lock()
defer m.mu.Unlock()
n := len(m.Allocs)
if n == 0 {
return nil
}
return m.Allocs[n-1].Copy()
}
func TestAllocRunnerFromAlloc(t *testing.T, alloc *structs.Allocation, restarts bool) (*MockAllocStateUpdater, *AllocRunner) {
conf := config.DefaultConfig()
conf.Node = mock.Node()
conf.StateDir = os.TempDir()
conf.AllocDir = os.TempDir()
tmp, _ := ioutil.TempFile("", "state-db")
db, _ := bolt.Open(tmp.Name(), 0600, nil)
upd := &MockAllocStateUpdater{}
if !restarts {
*alloc.Job.LookupTaskGroup(alloc.TaskGroup).RestartPolicy = structs.RestartPolicy{Attempts: 0}
alloc.Job.Type = structs.JobTypeBatch
}
vclient := vaultclient.NewMockVaultClient()
ar := NewAllocRunner(testlog.Logger(t), conf, db, upd.Update, alloc, vclient, consulApi.NewMockConsulServiceClient(t), NoopPrevAlloc{})
return upd, ar
}
func TestAllocRunner(t *testing.T, restarts bool) (*MockAllocStateUpdater, *AllocRunner) {
// Use mock driver
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
task.Config["run_for"] = "500ms"
return TestAllocRunnerFromAlloc(t, alloc, restarts)
}

View File

@ -21,8 +21,11 @@ import (
"github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/lib"
multierror "github.com/hashicorp/go-multierror" multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/allocrunner"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
consulApi "github.com/hashicorp/nomad/client/consul"
"github.com/hashicorp/nomad/client/servers" "github.com/hashicorp/nomad/client/servers"
"github.com/hashicorp/nomad/client/state"
"github.com/hashicorp/nomad/client/stats" "github.com/hashicorp/nomad/client/stats"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/vaultclient" "github.com/hashicorp/nomad/client/vaultclient"
@ -88,7 +91,7 @@ const (
type ClientStatsReporter interface { type ClientStatsReporter interface {
// GetAllocStats returns the AllocStatsReporter for the passed allocation. // GetAllocStats returns the AllocStatsReporter for the passed allocation.
// If it does not exist an error is reported. // If it does not exist an error is reported.
GetAllocStats(allocID string) (AllocStatsReporter, error) GetAllocStats(allocID string) (allocrunner.AllocStatsReporter, error)
// LatestHostStats returns the latest resource usage stats for the host // LatestHostStats returns the latest resource usage stats for the host
LatestHostStats() *stats.HostStats LatestHostStats() *stats.HostStats
@ -145,7 +148,7 @@ type Client struct {
// allocs maps alloc IDs to their AllocRunner. This map includes all // allocs maps alloc IDs to their AllocRunner. This map includes all
// AllocRunners - running and GC'd - until the server GCs them. // AllocRunners - running and GC'd - until the server GCs them.
allocs map[string]*AllocRunner allocs map[string]*allocrunner.AllocRunner
allocLock sync.RWMutex allocLock sync.RWMutex
// allocUpdates stores allocations that need to be synced to the server. // allocUpdates stores allocations that need to be synced to the server.
@ -153,7 +156,7 @@ type Client struct {
// consulService is Nomad's custom Consul client for managing services // consulService is Nomad's custom Consul client for managing services
// and checks. // and checks.
consulService ConsulServiceAPI consulService consulApi.ConsulServiceAPI
// consulCatalog is the subset of Consul's Catalog API Nomad uses. // consulCatalog is the subset of Consul's Catalog API Nomad uses.
consulCatalog consul.CatalogAPI consulCatalog consul.CatalogAPI
@ -193,7 +196,7 @@ var (
) )
// NewClient is used to create a new client from the given configuration // NewClient is used to create a new client from the given configuration
func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulService ConsulServiceAPI, logger *log.Logger) (*Client, error) { func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulService consulApi.ConsulServiceAPI, logger *log.Logger) (*Client, error) {
// Create the tls wrapper // Create the tls wrapper
var tlsWrap tlsutil.RegionWrapper var tlsWrap tlsutil.RegionWrapper
if cfg.TLSConfig.EnableRPC { if cfg.TLSConfig.EnableRPC {
@ -217,7 +220,7 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic
tlsWrap: tlsWrap, tlsWrap: tlsWrap,
streamingRpcs: structs.NewStreamingRpcRegistry(), streamingRpcs: structs.NewStreamingRpcRegistry(),
logger: logger, logger: logger,
allocs: make(map[string]*AllocRunner), allocs: make(map[string]*allocrunner.AllocRunner),
allocUpdates: make(chan *structs.Allocation, 64), allocUpdates: make(chan *structs.Allocation, 64),
shutdownCh: make(chan struct{}), shutdownCh: make(chan struct{}),
triggerDiscoveryCh: make(chan struct{}), triggerDiscoveryCh: make(chan struct{}),
@ -432,7 +435,17 @@ func (c *Client) reloadTLSConnections(newConfig *nconfig.TLSConfig) error {
// Reload allows a client to reload its configuration on the fly // Reload allows a client to reload its configuration on the fly
func (c *Client) Reload(newConfig *config.Config) error { func (c *Client) Reload(newConfig *config.Config) error {
return c.reloadTLSConnections(newConfig.TLSConfig) shouldReloadTLS, err := tlsutil.ShouldReloadRPCConnections(c.config.TLSConfig, newConfig.TLSConfig)
if err != nil {
c.logger.Printf("[ERR] nomad: error parsing server TLS configuration: %s", err)
return err
}
if shouldReloadTLS {
return c.reloadTLSConnections(newConfig.TLSConfig)
}
return nil
} }
// Leave is used to prepare the client to leave the cluster // Leave is used to prepare the client to leave the cluster
@ -507,10 +520,16 @@ func (c *Client) Shutdown() error {
// Destroy all the running allocations. // Destroy all the running allocations.
if c.config.DevMode { if c.config.DevMode {
var wg sync.WaitGroup
for _, ar := range c.getAllocRunners() { for _, ar := range c.getAllocRunners() {
ar.Destroy() wg.Add(1)
<-ar.WaitCh() go func(ar *allocrunner.AllocRunner) {
ar.Destroy()
<-ar.WaitCh()
wg.Done()
}(ar)
} }
wg.Wait()
} }
c.shutdown = true c.shutdown = true
@ -562,7 +581,7 @@ func (c *Client) StatsReporter() ClientStatsReporter {
return c return c
} }
func (c *Client) GetAllocStats(allocID string) (AllocStatsReporter, error) { func (c *Client) GetAllocStats(allocID string) (allocrunner.AllocStatsReporter, error) {
c.allocLock.RLock() c.allocLock.RLock()
defer c.allocLock.RUnlock() defer c.allocLock.RUnlock()
ar, ok := c.allocs[allocID] ar, ok := c.allocs[allocID]
@ -714,7 +733,7 @@ func (c *Client) restoreState() error {
} else { } else {
// Normal path // Normal path
err := c.stateDB.View(func(tx *bolt.Tx) error { err := c.stateDB.View(func(tx *bolt.Tx) error {
allocs, err = getAllAllocationIDs(tx) allocs, err = state.GetAllAllocationIDs(tx)
if err != nil { if err != nil {
return fmt.Errorf("failed to list allocations: %v", err) return fmt.Errorf("failed to list allocations: %v", err)
} }
@ -731,10 +750,10 @@ func (c *Client) restoreState() error {
alloc := &structs.Allocation{ID: id} alloc := &structs.Allocation{ID: id}
// don't worry about blocking/migrating when restoring // don't worry about blocking/migrating when restoring
watcher := noopPrevAlloc{} watcher := allocrunner.NoopPrevAlloc{}
c.configLock.RLock() c.configLock.RLock()
ar := NewAllocRunner(c.logger, c.configCopy.Copy(), c.stateDB, c.updateAllocStatus, alloc, c.vaultClient, c.consulService, watcher) ar := allocrunner.NewAllocRunner(c.logger, c.configCopy.Copy(), c.stateDB, c.updateAllocStatus, alloc, c.vaultClient, c.consulService, watcher)
c.configLock.RUnlock() c.configLock.RUnlock()
c.allocLock.Lock() c.allocLock.Lock()
@ -778,7 +797,7 @@ func (c *Client) saveState() error {
wg.Add(len(runners)) wg.Add(len(runners))
for id, ar := range runners { for id, ar := range runners {
go func(id string, ar *AllocRunner) { go func(id string, ar *allocrunner.AllocRunner) {
err := ar.SaveState() err := ar.SaveState()
if err != nil { if err != nil {
c.logger.Printf("[ERR] client: failed to save state for alloc %q: %v", id, err) c.logger.Printf("[ERR] client: failed to save state for alloc %q: %v", id, err)
@ -795,10 +814,10 @@ func (c *Client) saveState() error {
} }
// getAllocRunners returns a snapshot of the current set of alloc runners. // getAllocRunners returns a snapshot of the current set of alloc runners.
func (c *Client) getAllocRunners() map[string]*AllocRunner { func (c *Client) getAllocRunners() map[string]*allocrunner.AllocRunner {
c.allocLock.RLock() c.allocLock.RLock()
defer c.allocLock.RUnlock() defer c.allocLock.RUnlock()
runners := make(map[string]*AllocRunner, len(c.allocs)) runners := make(map[string]*allocrunner.AllocRunner, len(c.allocs))
for id, ar := range c.allocs { for id, ar := range c.allocs {
runners[id] = ar runners[id] = ar
} }
@ -1677,7 +1696,7 @@ OUTER:
// allocation or if the alloc runner requires an updated allocation. // allocation or if the alloc runner requires an updated allocation.
runner, ok := runners[allocID] runner, ok := runners[allocID]
if !ok || runner.shouldUpdate(modifyIndex) { if !ok || runner.ShouldUpdate(modifyIndex) {
// Only pull allocs that are required. Filtered // Only pull allocs that are required. Filtered
// allocs might be at a higher index, so ignore // allocs might be at a higher index, so ignore
// it. // it.
@ -1810,7 +1829,7 @@ func (c *Client) runAllocs(update *allocUpdates) {
c.allocLock.RLock() c.allocLock.RLock()
exist := make([]*structs.Allocation, 0, len(c.allocs)) exist := make([]*structs.Allocation, 0, len(c.allocs))
for _, ar := range c.allocs { for _, ar := range c.allocs {
exist = append(exist, ar.alloc) exist = append(exist, ar.Alloc())
} }
c.allocLock.RUnlock() c.allocLock.RUnlock()
@ -1899,18 +1918,18 @@ func (c *Client) addAlloc(alloc *structs.Allocation, migrateToken string) error
// get the previous alloc runner - if one exists - for the // get the previous alloc runner - if one exists - for the
// blocking/migrating watcher // blocking/migrating watcher
var prevAR *AllocRunner var prevAR *allocrunner.AllocRunner
if alloc.PreviousAllocation != "" { if alloc.PreviousAllocation != "" {
prevAR = c.allocs[alloc.PreviousAllocation] prevAR = c.allocs[alloc.PreviousAllocation]
} }
c.configLock.RLock() c.configLock.RLock()
prevAlloc := newAllocWatcher(alloc, prevAR, c, c.configCopy, c.logger, migrateToken) prevAlloc := allocrunner.NewAllocWatcher(alloc, prevAR, c, c.configCopy, c.logger, migrateToken)
// Copy the config since the node can be swapped out as it is being updated. // Copy the config since the node can be swapped out as it is being updated.
// The long term fix is to pass in the config and node separately and then // The long term fix is to pass in the config and node separately and then
// we don't have to do a copy. // we don't have to do a copy.
ar := NewAllocRunner(c.logger, c.configCopy.Copy(), c.stateDB, c.updateAllocStatus, alloc, c.vaultClient, c.consulService, prevAlloc) ar := allocrunner.NewAllocRunner(c.logger, c.configCopy.Copy(), c.stateDB, c.updateAllocStatus, alloc, c.vaultClient, c.consulService, prevAlloc)
c.configLock.RUnlock() c.configLock.RUnlock()
// Store the alloc runner. // Store the alloc runner.

View File

@ -3,7 +3,6 @@ package client
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -11,8 +10,10 @@ import (
memdb "github.com/hashicorp/go-memdb" memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
consulApi "github.com/hashicorp/nomad/client/consul"
"github.com/hashicorp/nomad/client/driver" "github.com/hashicorp/nomad/client/driver"
"github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad" "github.com/hashicorp/nomad/nomad"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
@ -601,10 +602,10 @@ func TestClient_SaveRestoreState(t *testing.T) {
} }
// Create a new client // Create a new client
logger := log.New(c1.config.LogOutput, "", log.LstdFlags) logger := testlog.Logger(t)
catalog := consul.NewMockCatalog(logger) catalog := consul.NewMockCatalog(logger)
mockService := newMockConsulServiceClient(t) mockService := consulApi.NewMockConsulServiceClient(t)
mockService.logger = logger mockService.Logger = logger
c2, err := NewClient(c1.config, catalog, mockService, logger) c2, err := NewClient(c1.config, catalog, mockService, logger)
if err != nil { if err != nil {
t.Fatalf("err: %v", err) t.Fatalf("err: %v", err)
@ -649,7 +650,7 @@ func TestClient_Init(t *testing.T) {
config: &config.Config{ config: &config.Config{
AllocDir: allocDir, AllocDir: allocDir,
}, },
logger: log.New(os.Stderr, "", log.LstdFlags), logger: testlog.Logger(t),
} }
if err := client.init(); err != nil { if err := client.init(); err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)

View File

@ -1,4 +1,4 @@
package client package consul
import ( import (
"github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/command/agent/consul"

View File

@ -0,0 +1,86 @@
package consul
import (
"fmt"
"log"
"sync"
"github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/mitchellh/go-testing-interface"
)
// MockConsulOp represents the register/deregister operations.
type MockConsulOp struct {
Op string // add, remove, or update
AllocID string
Task string
}
func NewMockConsulOp(op, allocID, task string) MockConsulOp {
if op != "add" && op != "remove" && op != "update" && op != "alloc_registrations" {
panic(fmt.Errorf("invalid consul op: %s", op))
}
return MockConsulOp{
Op: op,
AllocID: allocID,
Task: task,
}
}
// MockConsulServiceClient implements the ConsulServiceAPI interface to record
// and log task registration/deregistration.
type MockConsulServiceClient struct {
Ops []MockConsulOp
mu sync.Mutex
Logger *log.Logger
// AllocRegistrationsFn allows injecting return values for the
// AllocRegistrations function.
AllocRegistrationsFn func(allocID string) (*consul.AllocRegistration, error)
}
func NewMockConsulServiceClient(t testing.T) *MockConsulServiceClient {
m := MockConsulServiceClient{
Ops: make([]MockConsulOp, 0, 20),
Logger: testlog.Logger(t),
}
return &m
}
func (m *MockConsulServiceClient) UpdateTask(old, new *consul.TaskServices) error {
m.mu.Lock()
defer m.mu.Unlock()
m.Logger.Printf("[TEST] mock_consul: UpdateTask(alloc: %s, task: %s)", new.AllocID[:6], new.Name)
m.Ops = append(m.Ops, NewMockConsulOp("update", new.AllocID, new.Name))
return nil
}
func (m *MockConsulServiceClient) RegisterTask(task *consul.TaskServices) error {
m.mu.Lock()
defer m.mu.Unlock()
m.Logger.Printf("[TEST] mock_consul: RegisterTask(alloc: %s, task: %s)", task.AllocID, task.Name)
m.Ops = append(m.Ops, NewMockConsulOp("add", task.AllocID, task.Name))
return nil
}
func (m *MockConsulServiceClient) RemoveTask(task *consul.TaskServices) {
m.mu.Lock()
defer m.mu.Unlock()
m.Logger.Printf("[TEST] mock_consul: RemoveTask(%q, %q)", task.AllocID, task.Name)
m.Ops = append(m.Ops, NewMockConsulOp("remove", task.AllocID, task.Name))
}
func (m *MockConsulServiceClient) AllocRegistrations(allocID string) (*consul.AllocRegistration, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.Logger.Printf("[TEST] mock_consul: AllocRegistrations(%q)", allocID)
m.Ops = append(m.Ops, NewMockConsulOp("alloc_registrations", allocID, ""))
if m.AllocRegistrationsFn != nil {
return m.AllocRegistrationsFn(allocID)
}
return nil, nil
}

View File

@ -1,86 +0,0 @@
package client
import (
"fmt"
"log"
"sync"
"github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/mitchellh/go-testing-interface"
)
// mockConsulOp represents the register/deregister operations.
type mockConsulOp struct {
op string // add, remove, or update
allocID string
task string
}
func newMockConsulOp(op, allocID, task string) mockConsulOp {
if op != "add" && op != "remove" && op != "update" && op != "alloc_registrations" {
panic(fmt.Errorf("invalid consul op: %s", op))
}
return mockConsulOp{
op: op,
allocID: allocID,
task: task,
}
}
// mockConsulServiceClient implements the ConsulServiceAPI interface to record
// and log task registration/deregistration.
type mockConsulServiceClient struct {
ops []mockConsulOp
mu sync.Mutex
logger *log.Logger
// allocRegistrationsFn allows injecting return values for the
// AllocRegistrations function.
allocRegistrationsFn func(allocID string) (*consul.AllocRegistration, error)
}
func newMockConsulServiceClient(t testing.T) *mockConsulServiceClient {
m := mockConsulServiceClient{
ops: make([]mockConsulOp, 0, 20),
logger: testlog.Logger(t),
}
return &m
}
func (m *mockConsulServiceClient) UpdateTask(old, new *consul.TaskServices) error {
m.mu.Lock()
defer m.mu.Unlock()
m.logger.Printf("[TEST] mock_consul: UpdateTask(alloc: %s, task: %s)", new.AllocID[:6], new.Name)
m.ops = append(m.ops, newMockConsulOp("update", new.AllocID, new.Name))
return nil
}
func (m *mockConsulServiceClient) RegisterTask(task *consul.TaskServices) error {
m.mu.Lock()
defer m.mu.Unlock()
m.logger.Printf("[TEST] mock_consul: RegisterTask(alloc: %s, task: %s)", task.AllocID, task.Name)
m.ops = append(m.ops, newMockConsulOp("add", task.AllocID, task.Name))
return nil
}
func (m *mockConsulServiceClient) RemoveTask(task *consul.TaskServices) {
m.mu.Lock()
defer m.mu.Unlock()
m.logger.Printf("[TEST] mock_consul: RemoveTask(%q, %q)", task.AllocID, task.Name)
m.ops = append(m.ops, newMockConsulOp("remove", task.AllocID, task.Name))
}
func (m *mockConsulServiceClient) AllocRegistrations(allocID string) (*consul.AllocRegistration, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.logger.Printf("[TEST] mock_consul: AllocRegistrations(%q)", allocID)
m.ops = append(m.ops, newMockConsulOp("alloc_registrations", allocID, ""))
if m.allocRegistrationsFn != nil {
return m.allocRegistrationsFn(allocID)
}
return nil, nil
}

View File

@ -6,6 +6,7 @@ import (
"time" "time"
docker "github.com/fsouza/go-dockerclient" docker "github.com/fsouza/go-dockerclient"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
) )
@ -52,7 +53,7 @@ func TestDockerCoordinator_ConcurrentPulls(t *testing.T) {
// Add a delay so we can get multiple queued up // Add a delay so we can get multiple queued up
mock := newMockImageClient(mapping, 10*time.Millisecond) mock := newMockImageClient(mapping, 10*time.Millisecond)
config := &dockerCoordinatorConfig{ config := &dockerCoordinatorConfig{
logger: testLogger(), logger: testlog.Logger(t),
cleanup: true, cleanup: true,
client: mock, client: mock,
removeDelay: 100 * time.Millisecond, removeDelay: 100 * time.Millisecond,
@ -99,7 +100,7 @@ func TestDockerCoordinator_Pull_Remove(t *testing.T) {
// Add a delay so we can get multiple queued up // Add a delay so we can get multiple queued up
mock := newMockImageClient(mapping, 10*time.Millisecond) mock := newMockImageClient(mapping, 10*time.Millisecond)
config := &dockerCoordinatorConfig{ config := &dockerCoordinatorConfig{
logger: testLogger(), logger: testlog.Logger(t),
cleanup: true, cleanup: true,
client: mock, client: mock,
removeDelay: 1 * time.Millisecond, removeDelay: 1 * time.Millisecond,
@ -162,7 +163,7 @@ func TestDockerCoordinator_Remove_Cancel(t *testing.T) {
mock := newMockImageClient(mapping, 1*time.Millisecond) mock := newMockImageClient(mapping, 1*time.Millisecond)
config := &dockerCoordinatorConfig{ config := &dockerCoordinatorConfig{
logger: testLogger(), logger: testlog.Logger(t),
cleanup: true, cleanup: true,
client: mock, client: mock,
removeDelay: 100 * time.Millisecond, removeDelay: 100 * time.Millisecond,
@ -210,7 +211,7 @@ func TestDockerCoordinator_No_Cleanup(t *testing.T) {
mock := newMockImageClient(mapping, 1*time.Millisecond) mock := newMockImageClient(mapping, 1*time.Millisecond)
config := &dockerCoordinatorConfig{ config := &dockerCoordinatorConfig{
logger: testLogger(), logger: testlog.Logger(t),
cleanup: false, cleanup: false,
client: mock, client: mock,
removeDelay: 1 * time.Millisecond, removeDelay: 1 * time.Millisecond,

View File

@ -6,6 +6,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"strings"
"sync" "sync"
"time" "time"
@ -114,9 +115,15 @@ func (p *imageProgress) get() (string, time.Time) {
est = (elapsed.Nanoseconds() / cur * total) - elapsed.Nanoseconds() est = (elapsed.Nanoseconds() / cur * total) - elapsed.Nanoseconds()
} }
return fmt.Sprintf("Pulled %d/%d (%s/%s) layers: %d waiting/%d pulling - est %.1fs remaining", var msg strings.Builder
fmt.Fprintf(&msg, "Pulled %d/%d (%s/%s) layers: %d waiting/%d pulling",
pulled, len(p.layers), units.BytesSize(float64(cur)), units.BytesSize(float64(total)), pulled, len(p.layers), units.BytesSize(float64(cur)), units.BytesSize(float64(total)),
waiting, pulling, time.Duration(est).Seconds()), p.timestamp waiting, pulling)
if est > 0 {
fmt.Fprintf(&msg, " - est %.1fs remaining", time.Duration(est).Seconds())
}
return msg.String(), p.timestamp
} }
// set takes a status message received from the docker engine api during an image // set takes a status message received from the docker engine api during an image

View File

@ -24,6 +24,7 @@ import (
"github.com/hashicorp/nomad/client/fingerprint" "github.com/hashicorp/nomad/client/fingerprint"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
@ -225,7 +226,7 @@ func TestDockerDriver_Fingerprint_Bridge(t *testing.T) {
conf := testConfig(t) conf := testConfig(t)
conf.Node = mock.Node() conf.Node = mock.Node()
dd := NewDockerDriver(NewDriverContext("", "", "", "", conf, conf.Node, testLogger(), nil)) dd := NewDockerDriver(NewDriverContext("", "", "", "", conf, conf.Node, testlog.Logger(t), nil))
request := &cstructs.FingerprintRequest{Config: conf, Node: conf.Node} request := &cstructs.FingerprintRequest{Config: conf, Node: conf.Node}
var response cstructs.FingerprintResponse var response cstructs.FingerprintResponse
@ -277,7 +278,7 @@ func TestDockerDriver_Check_DockerHealthStatus(t *testing.T) {
conf := testConfig(t) conf := testConfig(t)
conf.Node = mock.Node() conf.Node = mock.Node()
dd := NewDockerDriver(NewDriverContext("", "", "", "", conf, conf.Node, testLogger(), nil)) dd := NewDockerDriver(NewDriverContext("", "", "", "", conf, conf.Node, testlog.Logger(t), nil))
request := &cstructs.HealthCheckRequest{} request := &cstructs.HealthCheckRequest{}
var response cstructs.HealthCheckResponse var response cstructs.HealthCheckResponse
@ -1677,7 +1678,7 @@ func setupDockerVolumes(t *testing.T, cfg *config.Config, hostpath string) (*str
} }
// Build alloc and task directory structure // Build alloc and task directory structure
allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(cfg.AllocDir, uuid.Generate())) allocDir := allocdir.NewAllocDir(testlog.Logger(t), filepath.Join(cfg.AllocDir, uuid.Generate()))
if err := allocDir.Build(); err != nil { if err := allocDir.Build(); err != nil {
t.Fatalf("failed to build alloc dir: %v", err) t.Fatalf("failed to build alloc dir: %v", err)
} }
@ -1690,11 +1691,11 @@ func setupDockerVolumes(t *testing.T, cfg *config.Config, hostpath string) (*str
// Setup driver // Setup driver
alloc := mock.Alloc() alloc := mock.Alloc()
logger := testLogger() logger := testlog.Logger(t)
emitter := func(m string, args ...interface{}) { emitter := func(m string, args ...interface{}) {
logger.Printf("[EVENT] "+m, args...) logger.Printf("[EVENT] "+m, args...)
} }
driverCtx := NewDriverContext(alloc.Job.Name, alloc.TaskGroup, task.Name, alloc.ID, cfg, cfg.Node, testLogger(), emitter) driverCtx := NewDriverContext(alloc.Job.Name, alloc.TaskGroup, task.Name, alloc.ID, cfg, cfg.Node, testlog.Logger(t), emitter)
driver := NewDockerDriver(driverCtx) driver := NewDockerDriver(driverCtx)
// Setup execCtx // Setup execCtx

View File

@ -3,7 +3,6 @@ package driver
import ( import (
"io" "io"
"io/ioutil" "io/ioutil"
"log"
"math/rand" "math/rand"
"os" "os"
"path/filepath" "path/filepath"
@ -14,6 +13,7 @@ import (
"github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/env" "github.com/hashicorp/nomad/client/driver/env"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/helper/testtask" "github.com/hashicorp/nomad/helper/testtask"
"github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
@ -60,10 +60,6 @@ func copyFile(src, dst string, t *testing.T) {
} }
} }
func testLogger() *log.Logger {
return log.New(os.Stderr, "", log.LstdFlags)
}
func testConfig(t *testing.T) *config.Config { func testConfig(t *testing.T) *config.Config {
conf := config.DefaultConfig() conf := config.DefaultConfig()
@ -116,7 +112,7 @@ type testContext struct {
func testDriverContexts(t *testing.T, task *structs.Task) *testContext { func testDriverContexts(t *testing.T, task *structs.Task) *testContext {
cfg := testConfig(t) cfg := testConfig(t)
cfg.Node = mock.Node() cfg.Node = mock.Node()
allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(cfg.AllocDir, uuid.Generate())) allocDir := allocdir.NewAllocDir(testlog.Logger(t), filepath.Join(cfg.AllocDir, uuid.Generate()))
if err := allocDir.Build(); err != nil { if err := allocDir.Build(); err != nil {
t.Fatalf("AllocDir.Build() failed: %v", err) t.Fatalf("AllocDir.Build() failed: %v", err)
} }
@ -141,7 +137,7 @@ func testDriverContexts(t *testing.T, task *structs.Task) *testContext {
SetEnvvars(eb, tmpdrv.FSIsolation(), td, cfg) SetEnvvars(eb, tmpdrv.FSIsolation(), td, cfg)
execCtx := NewExecContext(td, eb.Build()) execCtx := NewExecContext(td, eb.Build())
logger := testLogger() logger := testlog.Logger(t)
emitter := func(m string, args ...interface{}) { emitter := func(m string, args ...interface{}) {
logger.Printf("[EVENT] "+m, args...) logger.Printf("[EVENT] "+m, args...)
} }
@ -182,7 +178,7 @@ func setupTaskEnv(t *testing.T, driver string) (*allocdir.TaskDir, map[string]st
alloc.Name = "Bar" alloc.Name = "Bar"
alloc.TaskResources["web"].Networks[0].DynamicPorts[0].Value = 2000 alloc.TaskResources["web"].Networks[0].DynamicPorts[0].Value = 2000
conf := testConfig(t) conf := testConfig(t)
allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(conf.AllocDir, alloc.ID)) allocDir := allocdir.NewAllocDir(testlog.Logger(t), filepath.Join(conf.AllocDir, alloc.ID))
taskDir := allocDir.NewTaskDir(task.Name) taskDir := allocDir.NewTaskDir(task.Name)
eb := env.NewBuilder(conf.Node, alloc, task, conf.Region) eb := env.NewBuilder(conf.Node, alloc, task, conf.Region)
tmpDriver, err := NewDriver(driver, NewEmptyDriverContext()) tmpDriver, err := NewDriver(driver, NewEmptyDriverContext())

View File

@ -402,9 +402,9 @@ func (b *Builder) setAlloc(alloc *structs.Allocation) *Builder {
taskMetaSize := len(combined) * 2 taskMetaSize := len(combined) * 2
// if job is parameterized initialize optional meta to empty strings // if job is parameterized initialize optional meta to empty strings
if alloc.Job.IsParameterized() { if alloc.Job.Dispatched {
b.taskMeta = make(map[string]string, optionalMetaCount := len(alloc.Job.ParameterizedJob.MetaOptional)
taskMetaSize+(len(alloc.Job.ParameterizedJob.MetaOptional)*2)) b.taskMeta = make(map[string]string, taskMetaSize+optionalMetaCount*2)
for _, k := range alloc.Job.ParameterizedJob.MetaOptional { for _, k := range alloc.Job.ParameterizedJob.MetaOptional {
b.taskMeta[fmt.Sprintf("%s%s", MetaPrefix, strings.ToUpper(k))] = "" b.taskMeta[fmt.Sprintf("%s%s", MetaPrefix, strings.ToUpper(k))] = ""

View File

@ -378,13 +378,15 @@ func TestEnvironment_UpdateTask(t *testing.T) {
// job, if an optional meta field is not set, it will get interpolated as an // job, if an optional meta field is not set, it will get interpolated as an
// empty string. // empty string.
func TestEnvironment_InterpolateEmptyOptionalMeta(t *testing.T) { func TestEnvironment_InterpolateEmptyOptionalMeta(t *testing.T) {
require := require.New(t)
a := mock.Alloc() a := mock.Alloc()
a.Job.ParameterizedJob = &structs.ParameterizedJobConfig{ a.Job.ParameterizedJob = &structs.ParameterizedJobConfig{
MetaOptional: []string{"metaopt1", "metaopt2"}, MetaOptional: []string{"metaopt1", "metaopt2"},
} }
a.Job.Dispatched = true
task := a.Job.TaskGroups[0].Tasks[0] task := a.Job.TaskGroups[0].Tasks[0]
task.Meta = map[string]string{"metaopt1": "metaopt1val"} task.Meta = map[string]string{"metaopt1": "metaopt1val"}
env := NewBuilder(mock.Node(), a, task, "global").Build() env := NewBuilder(mock.Node(), a, task, "global").Build()
require.Equal(t, "metaopt1val", env.ReplaceEnv("${NOMAD_META_metaopt1}")) require.Equal("metaopt1val", env.ReplaceEnv("${NOMAD_META_metaopt1}"))
require.Empty(t, env.ReplaceEnv("${NOMAD_META_metaopt2}")) require.Empty(env.ReplaceEnv("${NOMAD_META_metaopt2}"))
} }

View File

@ -364,7 +364,7 @@ func (e *UniversalExecutor) configureLoggers() error {
return fmt.Errorf("error creating new stdout log file for %q: %v", e.ctx.Task.Name, err) return fmt.Errorf("error creating new stdout log file for %q: %v", e.ctx.Task.Name, err)
} }
r, err := NewLogRotatorWrapper(lro) r, err := newLogRotatorWrapper(e.logger, lro)
if err != nil { if err != nil {
return err return err
} }
@ -378,7 +378,7 @@ func (e *UniversalExecutor) configureLoggers() error {
return fmt.Errorf("error creating new stderr log file for %q: %v", e.ctx.Task.Name, err) return fmt.Errorf("error creating new stderr log file for %q: %v", e.ctx.Task.Name, err)
} }
r, err := NewLogRotatorWrapper(lre) r, err := newLogRotatorWrapper(e.logger, lre)
if err != nil { if err != nil {
return err return err
} }
@ -851,11 +851,12 @@ type logRotatorWrapper struct {
processOutReader *os.File processOutReader *os.File
rotatorWriter *logging.FileRotator rotatorWriter *logging.FileRotator
hasFinishedCopied chan struct{} hasFinishedCopied chan struct{}
logger *log.Logger
} }
// NewLogRotatorWrapper takes a rotator and returns a wrapper that has the // newLogRotatorWrapper takes a rotator and returns a wrapper that has the
// processOutWriter to attach to the processes stdout or stderr. // processOutWriter to attach to the processes stdout or stderr.
func NewLogRotatorWrapper(rotator *logging.FileRotator) (*logRotatorWrapper, error) { func newLogRotatorWrapper(logger *log.Logger, rotator *logging.FileRotator) (*logRotatorWrapper, error) {
r, w, err := os.Pipe() r, w, err := os.Pipe()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create os.Pipe for extracting logs: %v", err) return nil, fmt.Errorf("failed to create os.Pipe for extracting logs: %v", err)
@ -865,7 +866,8 @@ func NewLogRotatorWrapper(rotator *logging.FileRotator) (*logRotatorWrapper, err
processOutWriter: w, processOutWriter: w,
processOutReader: r, processOutReader: r,
rotatorWriter: rotator, rotatorWriter: rotator,
hasFinishedCopied: make(chan struct{}, 1), hasFinishedCopied: make(chan struct{}),
logger: logger,
} }
wrap.start() wrap.start()
return wrap, nil return wrap, nil
@ -875,22 +877,51 @@ func NewLogRotatorWrapper(rotator *logging.FileRotator) (*logRotatorWrapper, err
// called by the constructor and not the user of the wrapper. // called by the constructor and not the user of the wrapper.
func (l *logRotatorWrapper) start() { func (l *logRotatorWrapper) start() {
go func() { go func() {
io.Copy(l.rotatorWriter, l.processOutReader) defer close(l.hasFinishedCopied)
l.processOutReader.Close() // in case io.Copy stopped due to write error _, err := io.Copy(l.rotatorWriter, l.processOutReader)
close(l.hasFinishedCopied) if err != nil {
// Close reader to propagate io error across pipe.
// Note that this may block until the process exits on
// Windows due to
// https://github.com/PowerShell/PowerShell/issues/4254
// or similar issues. Since this is already running in
// a goroutine its safe to block until the process is
// force-killed.
l.processOutReader.Close()
}
}() }()
return return
} }
// Close closes the rotator and the process writer to ensure that the Wait // Close closes the rotator and the process writer to ensure that the Wait
// command exits. // command exits.
func (l *logRotatorWrapper) Close() error { func (l *logRotatorWrapper) Close() {
// Wait up to the close tolerance before we force close // Wait up to the close tolerance before we force close
select { select {
case <-l.hasFinishedCopied: case <-l.hasFinishedCopied:
case <-time.After(processOutputCloseTolerance): case <-time.After(processOutputCloseTolerance):
} }
err := l.processOutReader.Close()
// Closing the read side of a pipe may block on Windows if the process
// is being debugged as in:
// https://github.com/PowerShell/PowerShell/issues/4254
// The pipe will be closed and cleaned up when the process exits.
closeDone := make(chan struct{})
go func() {
defer close(closeDone)
err := l.processOutReader.Close()
if err != nil && !strings.Contains(err.Error(), "file already closed") {
l.logger.Printf("[WARN] executor: error closing read-side of process output pipe: %v", err)
}
}()
select {
case <-closeDone:
case <-time.After(processOutputCloseTolerance):
l.logger.Printf("[WARN] executor: timed out waiting for read-side of process output pipe to close")
}
l.rotatorWriter.Close() l.rotatorWriter.Close()
return err return
} }

View File

@ -2,7 +2,6 @@ package executor
import ( import (
"io/ioutil" "io/ioutil"
"log"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
@ -15,6 +14,7 @@ import (
dstructs "github.com/hashicorp/nomad/client/driver/structs" dstructs "github.com/hashicorp/nomad/client/driver/structs"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
) )
@ -41,13 +41,13 @@ func testExecutorContextWithChroot(t *testing.T) (*ExecutorContext, *allocdir.Al
task := alloc.Job.TaskGroups[0].Tasks[0] task := alloc.Job.TaskGroups[0].Tasks[0]
taskEnv := env.NewBuilder(mock.Node(), alloc, task, "global").Build() taskEnv := env.NewBuilder(mock.Node(), alloc, task, "global").Build()
allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(os.TempDir(), alloc.ID)) allocDir := allocdir.NewAllocDir(testlog.Logger(t), filepath.Join(os.TempDir(), alloc.ID))
if err := allocDir.Build(); err != nil { if err := allocDir.Build(); err != nil {
log.Fatalf("AllocDir.Build() failed: %v", err) t.Fatalf("AllocDir.Build() failed: %v", err)
} }
if err := allocDir.NewTaskDir(task.Name).Build(false, chrootEnv, cstructs.FSIsolationChroot); err != nil { if err := allocDir.NewTaskDir(task.Name).Build(false, chrootEnv, cstructs.FSIsolationChroot); err != nil {
allocDir.Destroy() allocDir.Destroy()
log.Fatalf("allocDir.NewTaskDir(%q) failed: %v", task.Name, err) t.Fatalf("allocDir.NewTaskDir(%q) failed: %v", task.Name, err)
} }
td := allocDir.TaskDirs[task.Name] td := allocDir.TaskDirs[task.Name]
ctx := &ExecutorContext{ ctx := &ExecutorContext{
@ -71,7 +71,7 @@ func TestExecutor_IsolationAndConstraints(t *testing.T) {
execCmd.ResourceLimits = true execCmd.ResourceLimits = true
execCmd.User = dstructs.DefaultUnprivilegedUser execCmd.User = dstructs.DefaultUnprivilegedUser
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags)) executor := NewExecutor(testlog.Logger(t))
if err := executor.SetContext(ctx); err != nil { if err := executor.SetContext(ctx); err != nil {
t.Fatalf("Unexpected error: %v", err) t.Fatalf("Unexpected error: %v", err)
@ -151,7 +151,7 @@ func TestExecutor_ClientCleanup(t *testing.T) {
ctx.Task.LogConfig.MaxFileSizeMB = 300 ctx.Task.LogConfig.MaxFileSizeMB = 300
defer allocDir.Destroy() defer allocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags)) executor := NewExecutor(testlog.Logger(t))
if err := executor.SetContext(ctx); err != nil { if err := executor.SetContext(ctx); err != nil {
t.Fatalf("Unexpected error") t.Fatalf("Unexpected error")

View File

@ -2,7 +2,6 @@ package executor
import ( import (
"io/ioutil" "io/ioutil"
"log"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -13,15 +12,12 @@ import (
"github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/driver/env" "github.com/hashicorp/nomad/client/driver/env"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
tu "github.com/hashicorp/nomad/testutil" tu "github.com/hashicorp/nomad/testutil"
"github.com/mitchellh/go-ps" "github.com/mitchellh/go-ps"
) )
func testLogger() *log.Logger {
return log.New(os.Stderr, "", log.LstdFlags)
}
// testExecutorContext returns an ExecutorContext and AllocDir. // testExecutorContext returns an ExecutorContext and AllocDir.
// //
// The caller is responsible for calling AllocDir.Destroy() to cleanup. // The caller is responsible for calling AllocDir.Destroy() to cleanup.
@ -30,13 +26,13 @@ func testExecutorContext(t *testing.T) (*ExecutorContext, *allocdir.AllocDir) {
task := alloc.Job.TaskGroups[0].Tasks[0] task := alloc.Job.TaskGroups[0].Tasks[0]
taskEnv := env.NewBuilder(mock.Node(), alloc, task, "global").Build() taskEnv := env.NewBuilder(mock.Node(), alloc, task, "global").Build()
allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(os.TempDir(), alloc.ID)) allocDir := allocdir.NewAllocDir(testlog.Logger(t), filepath.Join(os.TempDir(), alloc.ID))
if err := allocDir.Build(); err != nil { if err := allocDir.Build(); err != nil {
log.Fatalf("AllocDir.Build() failed: %v", err) t.Fatalf("AllocDir.Build() failed: %v", err)
} }
if err := allocDir.NewTaskDir(task.Name).Build(false, nil, cstructs.FSIsolationNone); err != nil { if err := allocDir.NewTaskDir(task.Name).Build(false, nil, cstructs.FSIsolationNone); err != nil {
allocDir.Destroy() allocDir.Destroy()
log.Fatalf("allocDir.NewTaskDir(%q) failed: %v", task.Name, err) t.Fatalf("allocDir.NewTaskDir(%q) failed: %v", task.Name, err)
} }
td := allocDir.TaskDirs[task.Name] td := allocDir.TaskDirs[task.Name]
ctx := &ExecutorContext{ ctx := &ExecutorContext{
@ -54,7 +50,7 @@ func TestExecutor_Start_Invalid(t *testing.T) {
execCmd := ExecCommand{Cmd: invalid, Args: []string{"1"}} execCmd := ExecCommand{Cmd: invalid, Args: []string{"1"}}
ctx, allocDir := testExecutorContext(t) ctx, allocDir := testExecutorContext(t)
defer allocDir.Destroy() defer allocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags)) executor := NewExecutor(testlog.Logger(t))
if err := executor.SetContext(ctx); err != nil { if err := executor.SetContext(ctx); err != nil {
t.Fatalf("Unexpected error") t.Fatalf("Unexpected error")
@ -70,7 +66,7 @@ func TestExecutor_Start_Wait_Failure_Code(t *testing.T) {
execCmd := ExecCommand{Cmd: "/bin/date", Args: []string{"fail"}} execCmd := ExecCommand{Cmd: "/bin/date", Args: []string{"fail"}}
ctx, allocDir := testExecutorContext(t) ctx, allocDir := testExecutorContext(t)
defer allocDir.Destroy() defer allocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags)) executor := NewExecutor(testlog.Logger(t))
if err := executor.SetContext(ctx); err != nil { if err := executor.SetContext(ctx); err != nil {
t.Fatalf("Unexpected error") t.Fatalf("Unexpected error")
@ -98,7 +94,7 @@ func TestExecutor_Start_Wait(t *testing.T) {
execCmd := ExecCommand{Cmd: "/bin/echo", Args: []string{"hello world"}} execCmd := ExecCommand{Cmd: "/bin/echo", Args: []string{"hello world"}}
ctx, allocDir := testExecutorContext(t) ctx, allocDir := testExecutorContext(t)
defer allocDir.Destroy() defer allocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags)) executor := NewExecutor(testlog.Logger(t))
if err := executor.SetContext(ctx); err != nil { if err := executor.SetContext(ctx); err != nil {
t.Fatalf("Unexpected error") t.Fatalf("Unexpected error")
@ -137,7 +133,7 @@ func TestExecutor_WaitExitSignal(t *testing.T) {
execCmd := ExecCommand{Cmd: "/bin/sleep", Args: []string{"10000"}} execCmd := ExecCommand{Cmd: "/bin/sleep", Args: []string{"10000"}}
ctx, allocDir := testExecutorContext(t) ctx, allocDir := testExecutorContext(t)
defer allocDir.Destroy() defer allocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags)) executor := NewExecutor(testlog.Logger(t))
if err := executor.SetContext(ctx); err != nil { if err := executor.SetContext(ctx); err != nil {
t.Fatalf("Unexpected error") t.Fatalf("Unexpected error")
@ -180,7 +176,7 @@ func TestExecutor_Start_Kill(t *testing.T) {
execCmd := ExecCommand{Cmd: "/bin/sleep", Args: []string{"10 && hello world"}} execCmd := ExecCommand{Cmd: "/bin/sleep", Args: []string{"10 && hello world"}}
ctx, allocDir := testExecutorContext(t) ctx, allocDir := testExecutorContext(t)
defer allocDir.Destroy() defer allocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags)) executor := NewExecutor(testlog.Logger(t))
if err := executor.SetContext(ctx); err != nil { if err := executor.SetContext(ctx); err != nil {
t.Fatalf("Unexpected error") t.Fatalf("Unexpected error")
@ -230,7 +226,7 @@ func TestExecutor_MakeExecutable(t *testing.T) {
f.Chmod(os.FileMode(0610)) f.Chmod(os.FileMode(0610))
// Make a fake executor // Make a fake executor
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags)) executor := NewExecutor(testlog.Logger(t))
err = executor.(*UniversalExecutor).makeExecutable(f.Name()) err = executor.(*UniversalExecutor).makeExecutable(f.Name())
if err != nil { if err != nil {
@ -259,7 +255,7 @@ func TestScanPids(t *testing.T) {
p5 := NewFakeProcess(20, 18) p5 := NewFakeProcess(20, 18)
// Make a fake executor // Make a fake executor
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags)).(*UniversalExecutor) executor := NewExecutor(testlog.Logger(t)).(*UniversalExecutor)
nomadPids, err := executor.scanPids(5, []ps.Process{p1, p2, p3, p4, p5}) nomadPids, err := executor.scanPids(5, []ps.Process{p1, p2, p3, p4, p5})
if err != nil { if err != nil {

View File

@ -63,7 +63,7 @@ func (e *UniversalExecutor) shutdownProcess(proc *os.Process) error {
if err := sendCtrlBreak(proc.Pid); err != nil { if err := sendCtrlBreak(proc.Pid); err != nil {
return fmt.Errorf("executor.shutdown error: %v", err) return fmt.Errorf("executor.shutdown error: %v", err)
} }
e.logger.Printf("Sent Ctrl-Break to process %v", proc.Pid) e.logger.Printf("[INFO] executor: sent Ctrl-Break to process %v", proc.Pid)
return nil return nil
} }

View File

@ -3,24 +3,23 @@ package logging
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log"
"math/rand" "math/rand"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
) )
var ( var (
logger = log.New(os.Stdout, "", log.LstdFlags)
pathPrefix = "logrotator" pathPrefix = "logrotator"
baseFileName = "redis.stdout" baseFileName = "redis.stdout"
) )
func TestFileRotator_IncorrectPath(t *testing.T) { func TestFileRotator_IncorrectPath(t *testing.T) {
t.Parallel() t.Parallel()
if _, err := NewFileRotator("/foo", baseFileName, 10, 10, logger); err == nil { if _, err := NewFileRotator("/foo", baseFileName, 10, 10, testlog.Logger(t)); err == nil {
t.Fatalf("expected error") t.Fatalf("expected error")
} }
} }
@ -34,7 +33,7 @@ func TestFileRotator_CreateNewFile(t *testing.T) {
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)
_, err = NewFileRotator(path, baseFileName, 10, 10, logger) _, err = NewFileRotator(path, baseFileName, 10, 10, testlog.Logger(t))
if err != nil { if err != nil {
t.Fatalf("test setup err: %v", err) t.Fatalf("test setup err: %v", err)
} }
@ -62,7 +61,7 @@ func TestFileRotator_OpenLastFile(t *testing.T) {
t.Fatalf("test setup failure: %v", err) t.Fatalf("test setup failure: %v", err)
} }
fr, err := NewFileRotator(path, baseFileName, 10, 10, logger) fr, err := NewFileRotator(path, baseFileName, 10, 10, testlog.Logger(t))
if err != nil { if err != nil {
t.Fatalf("test setup err: %v", err) t.Fatalf("test setup err: %v", err)
} }
@ -86,7 +85,7 @@ func TestFileRotator_WriteToCurrentFile(t *testing.T) {
t.Fatalf("test setup failure: %v", err) t.Fatalf("test setup failure: %v", err)
} }
fr, err := NewFileRotator(path, baseFileName, 10, 5, logger) fr, err := NewFileRotator(path, baseFileName, 10, 5, testlog.Logger(t))
if err != nil { if err != nil {
t.Fatalf("test setup err: %v", err) t.Fatalf("test setup err: %v", err)
} }
@ -119,7 +118,7 @@ func TestFileRotator_RotateFiles(t *testing.T) {
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)
fr, err := NewFileRotator(path, baseFileName, 10, 5, logger) fr, err := NewFileRotator(path, baseFileName, 10, 5, testlog.Logger(t))
if err != nil { if err != nil {
t.Fatalf("test setup err: %v", err) t.Fatalf("test setup err: %v", err)
} }
@ -178,7 +177,7 @@ func TestFileRotator_RotateFiles_Boundary(t *testing.T) {
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)
fr, err := NewFileRotator(path, baseFileName, 10, 5, logger) fr, err := NewFileRotator(path, baseFileName, 10, 5, testlog.Logger(t))
if err != nil { if err != nil {
t.Fatalf("test setup err: %v", err) t.Fatalf("test setup err: %v", err)
} }
@ -244,7 +243,7 @@ func TestFileRotator_WriteRemaining(t *testing.T) {
t.Fatalf("test setup failure: %v", err) t.Fatalf("test setup failure: %v", err)
} }
fr, err := NewFileRotator(path, baseFileName, 10, 5, logger) fr, err := NewFileRotator(path, baseFileName, 10, 5, testlog.Logger(t))
if err != nil { if err != nil {
t.Fatalf("test setup err: %v", err) t.Fatalf("test setup err: %v", err)
} }
@ -317,7 +316,7 @@ func TestFileRotator_PurgeOldFiles(t *testing.T) {
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)
fr, err := NewFileRotator(path, baseFileName, 2, 2, logger) fr, err := NewFileRotator(path, baseFileName, 2, 2, testlog.Logger(t))
if err != nil { if err != nil {
t.Fatalf("test setup err: %v", err) t.Fatalf("test setup err: %v", err)
} }
@ -367,7 +366,7 @@ func benchmarkRotatorWithInputSize(size int, b *testing.B) {
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)
fr, err := NewFileRotator(path, baseFileName, 5, 1024*1024, logger) fr, err := NewFileRotator(path, baseFileName, 5, 1024*1024, testlog.Logger(b))
if err != nil { if err != nil {
b.Fatalf("test setup err: %v", err) b.Fatalf("test setup err: %v", err)
} }

View File

@ -4,17 +4,16 @@ package logging
import ( import (
"bytes" "bytes"
"log"
"os"
"testing" "testing"
syslog "github.com/RackSec/srslog" syslog "github.com/RackSec/srslog"
"github.com/hashicorp/nomad/helper/testlog"
) )
func TestLogParser_Priority(t *testing.T) { func TestLogParser_Priority(t *testing.T) {
t.Parallel() t.Parallel()
line := []byte("<30>2016-02-10T10:16:43-08:00 d-thinkpad docker/e2a1e3ebd3a3[22950]: 1:C 10 Feb 18:16:43.391 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf") line := []byte("<30>2016-02-10T10:16:43-08:00 d-thinkpad docker/e2a1e3ebd3a3[22950]: 1:C 10 Feb 18:16:43.391 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf")
d := NewDockerLogParser(log.New(os.Stdout, "", log.LstdFlags)) d := NewDockerLogParser(testlog.Logger(t))
p, _, err := d.parsePriority(line) p, _, err := d.parsePriority(line)
if err != nil { if err != nil {
t.Fatalf("got an err: %v", err) t.Fatalf("got an err: %v", err)
@ -33,7 +32,7 @@ func TestLogParser_Priority(t *testing.T) {
func TestLogParser_Priority_UnixFormatter(t *testing.T) { func TestLogParser_Priority_UnixFormatter(t *testing.T) {
t.Parallel() t.Parallel()
line := []byte("<30>Feb 6, 10:16:43 docker/e2a1e3ebd3a3[22950]: 1:C 10 Feb 18:16:43.391 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf") line := []byte("<30>Feb 6, 10:16:43 docker/e2a1e3ebd3a3[22950]: 1:C 10 Feb 18:16:43.391 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf")
d := NewDockerLogParser(log.New(os.Stdout, "", log.LstdFlags)) d := NewDockerLogParser(testlog.Logger(t))
p, _, err := d.parsePriority(line) p, _, err := d.parsePriority(line)
if err != nil { if err != nil {
t.Fatalf("got an err: %v", err) t.Fatalf("got an err: %v", err)

View File

@ -14,6 +14,7 @@ import (
"github.com/hashicorp/consul/lib/freeport" "github.com/hashicorp/consul/lib/freeport"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
@ -65,7 +66,7 @@ func TestQemuDriver_Fingerprint(t *testing.T) {
} }
func TestQemuDriver_StartOpen_Wait(t *testing.T) { func TestQemuDriver_StartOpen_Wait(t *testing.T) {
logger := testLogger() logger := testlog.Logger(t)
if !testutil.IsTravis() { if !testutil.IsTravis() {
t.Parallel() t.Parallel()
} }
@ -143,7 +144,7 @@ func TestQemuDriver_GracefulShutdown(t *testing.T) {
} }
ctestutils.QemuCompatible(t) ctestutils.QemuCompatible(t)
logger := testLogger() logger := testlog.Logger(t)
// Graceful shutdown may be really slow unfortunately // Graceful shutdown may be really slow unfortunately
killTimeout := 3 * time.Minute killTimeout := 3 * time.Minute

View File

@ -5,11 +5,12 @@ import (
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
) )
func TestArchFingerprint(t *testing.T) { func TestArchFingerprint(t *testing.T) {
f := NewArchFingerprint(testLogger()) f := NewArchFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }

View File

@ -8,6 +8,7 @@ import (
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
) )
@ -42,7 +43,7 @@ func (m *MountPointDetectorEmptyMountPoint) MountPoint() (string, error) {
func TestCGroupFingerprint(t *testing.T) { func TestCGroupFingerprint(t *testing.T) {
{ {
f := &CGroupFingerprint{ f := &CGroupFingerprint{
logger: testLogger(), logger: testlog.Logger(t),
lastState: cgroupUnavailable, lastState: cgroupUnavailable,
mountPointDetector: &MountPointDetectorMountPointFail{}, mountPointDetector: &MountPointDetectorMountPointFail{},
} }
@ -65,7 +66,7 @@ func TestCGroupFingerprint(t *testing.T) {
{ {
f := &CGroupFingerprint{ f := &CGroupFingerprint{
logger: testLogger(), logger: testlog.Logger(t),
lastState: cgroupUnavailable, lastState: cgroupUnavailable,
mountPointDetector: &MountPointDetectorValidMountPoint{}, mountPointDetector: &MountPointDetectorValidMountPoint{},
} }
@ -87,7 +88,7 @@ func TestCGroupFingerprint(t *testing.T) {
{ {
f := &CGroupFingerprint{ f := &CGroupFingerprint{
logger: testLogger(), logger: testlog.Logger(t),
lastState: cgroupUnavailable, lastState: cgroupUnavailable,
mountPointDetector: &MountPointDetectorEmptyMountPoint{}, mountPointDetector: &MountPointDetectorEmptyMountPoint{},
} }
@ -108,7 +109,7 @@ func TestCGroupFingerprint(t *testing.T) {
} }
{ {
f := &CGroupFingerprint{ f := &CGroupFingerprint{
logger: testLogger(), logger: testlog.Logger(t),
lastState: cgroupAvailable, lastState: cgroupAvailable,
mountPointDetector: &MountPointDetectorValidMountPoint{}, mountPointDetector: &MountPointDetectorValidMountPoint{},
} }

View File

@ -9,12 +9,13 @@ import (
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestConsulFingerprint(t *testing.T) { func TestConsulFingerprint(t *testing.T) {
fp := NewConsulFingerprint(testLogger()) fp := NewConsulFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }
@ -170,7 +171,7 @@ const mockConsulResponse = `
// See https://github.com/hashicorp/nomad/issues/3326 // See https://github.com/hashicorp/nomad/issues/3326
func TestConsulFingerprint_UnexpectedResponse(t *testing.T) { func TestConsulFingerprint_UnexpectedResponse(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
fp := NewConsulFingerprint(testLogger()) fp := NewConsulFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }

View File

@ -5,11 +5,12 @@ import (
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
) )
func TestCPUFingerprint(t *testing.T) { func TestCPUFingerprint(t *testing.T) {
f := NewCPUFingerprint(testLogger()) f := NewCPUFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }
@ -52,7 +53,7 @@ func TestCPUFingerprint(t *testing.T) {
// TestCPUFingerprint_OverrideCompute asserts that setting cpu_total_compute in // TestCPUFingerprint_OverrideCompute asserts that setting cpu_total_compute in
// the client config overrides the detected CPU freq (if any). // the client config overrides the detected CPU freq (if any).
func TestCPUFingerprint_OverrideCompute(t *testing.T) { func TestCPUFingerprint_OverrideCompute(t *testing.T) {
f := NewCPUFingerprint(testLogger()) f := NewCPUFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }

View File

@ -10,12 +10,13 @@ import (
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
) )
func TestEnvAWSFingerprint_nonAws(t *testing.T) { func TestEnvAWSFingerprint_nonAws(t *testing.T) {
os.Setenv("AWS_ENV_URL", "http://127.0.0.1/latest/meta-data/") os.Setenv("AWS_ENV_URL", "http://127.0.0.1/latest/meta-data/")
f := NewEnvAWSFingerprint(testLogger()) f := NewEnvAWSFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }
@ -33,7 +34,7 @@ func TestEnvAWSFingerprint_nonAws(t *testing.T) {
} }
func TestEnvAWSFingerprint_aws(t *testing.T) { func TestEnvAWSFingerprint_aws(t *testing.T) {
f := NewEnvAWSFingerprint(testLogger()) f := NewEnvAWSFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }
@ -167,7 +168,7 @@ func TestNetworkFingerprint_AWS(t *testing.T) {
defer ts.Close() defer ts.Close()
os.Setenv("AWS_ENV_URL", ts.URL+"/latest/meta-data/") os.Setenv("AWS_ENV_URL", ts.URL+"/latest/meta-data/")
f := NewEnvAWSFingerprint(testLogger()) f := NewEnvAWSFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }
@ -216,7 +217,7 @@ func TestNetworkFingerprint_AWS_network(t *testing.T) {
defer ts.Close() defer ts.Close()
os.Setenv("AWS_ENV_URL", ts.URL+"/latest/meta-data/") os.Setenv("AWS_ENV_URL", ts.URL+"/latest/meta-data/")
f := NewEnvAWSFingerprint(testLogger()) f := NewEnvAWSFingerprint(testlog.Logger(t))
{ {
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
@ -297,7 +298,7 @@ func TestNetworkFingerprint_AWS_network(t *testing.T) {
func TestNetworkFingerprint_notAWS(t *testing.T) { func TestNetworkFingerprint_notAWS(t *testing.T) {
os.Setenv("AWS_ENV_URL", "http://127.0.0.1/latest/meta-data/") os.Setenv("AWS_ENV_URL", "http://127.0.0.1/latest/meta-data/")
f := NewEnvAWSFingerprint(testLogger()) f := NewEnvAWSFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }

View File

@ -11,12 +11,13 @@ import (
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
) )
func TestGCEFingerprint_nonGCE(t *testing.T) { func TestGCEFingerprint_nonGCE(t *testing.T) {
os.Setenv("GCE_ENV_URL", "http://127.0.0.1/computeMetadata/v1/instance/") os.Setenv("GCE_ENV_URL", "http://127.0.0.1/computeMetadata/v1/instance/")
f := NewEnvGCEFingerprint(testLogger()) f := NewEnvGCEFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }
@ -90,7 +91,7 @@ func testFingerprint_GCE(t *testing.T, withExternalIp bool) {
})) }))
defer ts.Close() defer ts.Close()
os.Setenv("GCE_ENV_URL", ts.URL+"/computeMetadata/v1/instance/") os.Setenv("GCE_ENV_URL", ts.URL+"/computeMetadata/v1/instance/")
f := NewEnvGCEFingerprint(testLogger()) f := NewEnvGCEFingerprint(testlog.Logger(t))
request := &cstructs.FingerprintRequest{Config: &config.Config{}, Node: node} request := &cstructs.FingerprintRequest{Config: &config.Config{}, Node: node}
var response cstructs.FingerprintResponse var response cstructs.FingerprintResponse

View File

@ -3,8 +3,6 @@ package fingerprint
// This file contains helper methods for testing fingerprinters // This file contains helper methods for testing fingerprinters
import ( import (
"log"
"os"
"testing" "testing"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
@ -12,10 +10,6 @@ import (
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
) )
func testLogger() *log.Logger {
return log.New(os.Stderr, "", log.LstdFlags)
}
func assertFingerprintOK(t *testing.T, fp Fingerprint, node *structs.Node) *cstructs.FingerprintResponse { func assertFingerprintOK(t *testing.T, fp Fingerprint, node *structs.Node) *cstructs.FingerprintResponse {
request := &cstructs.FingerprintRequest{Config: new(config.Config), Node: node} request := &cstructs.FingerprintRequest{Config: new(config.Config), Node: node}
var response cstructs.FingerprintResponse var response cstructs.FingerprintResponse

View File

@ -5,11 +5,12 @@ import (
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
) )
func TestHostFingerprint(t *testing.T) { func TestHostFingerprint(t *testing.T) {
f := NewHostFingerprint(testLogger()) f := NewHostFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }

View File

@ -5,13 +5,14 @@ import (
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestMemoryFingerprint(t *testing.T) { func TestMemoryFingerprint(t *testing.T) {
f := NewMemoryFingerprint(testLogger()) f := NewMemoryFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }
@ -34,7 +35,7 @@ func TestMemoryFingerprint(t *testing.T) {
} }
func TestMemoryFingerprint_Override(t *testing.T) { func TestMemoryFingerprint_Override(t *testing.T) {
f := NewMemoryFingerprint(testLogger()) f := NewMemoryFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }

View File

@ -8,6 +8,7 @@ import (
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
) )
@ -184,7 +185,7 @@ func TestNetworkFingerprint_basic(t *testing.T) {
t.Skipf("Environment variable %+q not empty, skipping test", skipOnlineTestsEnvVar) t.Skipf("Environment variable %+q not empty, skipping test", skipOnlineTestsEnvVar)
} }
f := &NetworkFingerprint{logger: testLogger(), interfaceDetector: &DefaultNetworkInterfaceDetector{}} f := &NetworkFingerprint{logger: testlog.Logger(t), interfaceDetector: &DefaultNetworkInterfaceDetector{}}
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }
@ -235,7 +236,7 @@ func TestNetworkFingerprint_basic(t *testing.T) {
} }
func TestNetworkFingerprint_default_device_absent(t *testing.T) { func TestNetworkFingerprint_default_device_absent(t *testing.T) {
f := &NetworkFingerprint{logger: testLogger(), interfaceDetector: &NetworkInterfaceDetectorOnlyLo{}} f := &NetworkFingerprint{logger: testlog.Logger(t), interfaceDetector: &NetworkInterfaceDetectorOnlyLo{}}
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }
@ -258,7 +259,7 @@ func TestNetworkFingerprint_default_device_absent(t *testing.T) {
} }
func TestNetworkFingerPrint_default_device(t *testing.T) { func TestNetworkFingerPrint_default_device(t *testing.T) {
f := &NetworkFingerprint{logger: testLogger(), interfaceDetector: &NetworkInterfaceDetectorOnlyLo{}} f := &NetworkFingerprint{logger: testlog.Logger(t), interfaceDetector: &NetworkInterfaceDetectorOnlyLo{}}
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }
@ -309,7 +310,7 @@ func TestNetworkFingerPrint_default_device(t *testing.T) {
} }
func TestNetworkFingerPrint_LinkLocal_Allowed(t *testing.T) { func TestNetworkFingerPrint_LinkLocal_Allowed(t *testing.T) {
f := &NetworkFingerprint{logger: testLogger(), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}} f := &NetworkFingerprint{logger: testlog.Logger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}}
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }
@ -356,7 +357,7 @@ func TestNetworkFingerPrint_LinkLocal_Allowed(t *testing.T) {
} }
func TestNetworkFingerPrint_LinkLocal_Allowed_MixedIntf(t *testing.T) { func TestNetworkFingerPrint_LinkLocal_Allowed_MixedIntf(t *testing.T) {
f := &NetworkFingerprint{logger: testLogger(), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}} f := &NetworkFingerprint{logger: testlog.Logger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}}
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }
@ -410,7 +411,7 @@ func TestNetworkFingerPrint_LinkLocal_Allowed_MixedIntf(t *testing.T) {
} }
func TestNetworkFingerPrint_LinkLocal_Disallowed(t *testing.T) { func TestNetworkFingerPrint_LinkLocal_Disallowed(t *testing.T) {
f := &NetworkFingerprint{logger: testLogger(), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}} f := &NetworkFingerprint{logger: testlog.Logger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}}
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }

View File

@ -1,9 +1,13 @@
package fingerprint package fingerprint
import "testing" import (
"testing"
"github.com/hashicorp/nomad/helper/testlog"
)
func TestNetworkFingerPrint_linkspeed_parse(t *testing.T) { func TestNetworkFingerPrint_linkspeed_parse(t *testing.T) {
f := &NetworkFingerprint{logger: testLogger(), interfaceDetector: &DefaultNetworkInterfaceDetector{}} f := &NetworkFingerprint{logger: testlog.Logger(t), interfaceDetector: &DefaultNetworkInterfaceDetector{}}
var outputTests = []struct { var outputTests = []struct {
in string in string

View File

@ -5,12 +5,13 @@ import (
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/version" "github.com/hashicorp/nomad/version"
) )
func TestNomadFingerprint(t *testing.T) { func TestNomadFingerprint(t *testing.T) {
f := NewNomadFingerprint(testLogger()) f := NewNomadFingerprint(testlog.Logger(t))
v := "foo" v := "foo"
r := "123" r := "123"

View File

@ -3,11 +3,12 @@ package fingerprint
import ( import (
"testing" "testing"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
) )
func TestSignalFingerprint(t *testing.T) { func TestSignalFingerprint(t *testing.T) {
fp := NewSignalFingerprint(testLogger()) fp := NewSignalFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }

View File

@ -4,11 +4,12 @@ import (
"strconv" "strconv"
"testing" "testing"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
) )
func TestStorageFingerprint(t *testing.T) { func TestStorageFingerprint(t *testing.T) {
fp := NewStorageFingerprint(testLogger()) fp := NewStorageFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }

View File

@ -5,6 +5,7 @@ import (
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
) )
@ -13,7 +14,7 @@ func TestVaultFingerprint(t *testing.T) {
tv := testutil.NewTestVault(t) tv := testutil.NewTestVault(t)
defer tv.Stop() defer tv.Stop()
fp := NewVaultFingerprint(testLogger()) fp := NewVaultFingerprint(testlog.Logger(t))
node := &structs.Node{ node := &structs.Node{
Attributes: make(map[string]string), Attributes: make(map[string]string),
} }

View File

@ -2,11 +2,10 @@ package client
import ( import (
"fmt" "fmt"
"log"
"os"
"testing" "testing"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -16,7 +15,7 @@ func TestFingerprintManager_Run_MockDriver(t *testing.T) {
require := require.New(t) require := require.New(t)
testClient := TestClient(t, nil) testClient := TestClient(t, nil)
testClient.logger = log.New(os.Stderr, "", log.LstdFlags) testClient.logger = testlog.Logger(t)
defer testClient.Shutdown() defer testClient.Shutdown()
fm := NewFingerprintManager( fm := NewFingerprintManager(
@ -25,7 +24,7 @@ func TestFingerprintManager_Run_MockDriver(t *testing.T) {
testClient.shutdownCh, testClient.shutdownCh,
testClient.updateNodeFromFingerprint, testClient.updateNodeFromFingerprint,
testClient.updateNodeFromDriver, testClient.updateNodeFromDriver,
testLogger(), testlog.Logger(t),
) )
err := fm.Run() err := fm.Run()
@ -43,7 +42,7 @@ func TestFingerprintManager_Run_ResourcesFingerprint(t *testing.T) {
require := require.New(t) require := require.New(t)
testClient := TestClient(t, nil) testClient := TestClient(t, nil)
testClient.logger = testLogger() testClient.logger = testlog.Logger(t)
defer testClient.Shutdown() defer testClient.Shutdown()
fm := NewFingerprintManager( fm := NewFingerprintManager(
@ -70,7 +69,7 @@ func TestFingerprintManager_Fingerprint_Run(t *testing.T) {
require := require.New(t) require := require.New(t)
testClient := TestClient(t, nil) testClient := TestClient(t, nil)
testClient.logger = testLogger() testClient.logger = testlog.Logger(t)
defer testClient.Shutdown() defer testClient.Shutdown()
fm := NewFingerprintManager( fm := NewFingerprintManager(
@ -102,7 +101,7 @@ func TestFingerprintManager_Fingerprint_Periodic(t *testing.T) {
} }
}) })
testClient.logger = testLogger() testClient.logger = testlog.Logger(t)
defer testClient.Shutdown() defer testClient.Shutdown()
fm := NewFingerprintManager( fm := NewFingerprintManager(
@ -164,7 +163,7 @@ func TestFingerprintManager_HealthCheck_Driver(t *testing.T) {
} }
}) })
testClient.logger = testLogger() testClient.logger = testlog.Logger(t)
defer testClient.Shutdown() defer testClient.Shutdown()
fm := NewFingerprintManager( fm := NewFingerprintManager(
@ -264,7 +263,7 @@ func TestFingerprintManager_HealthCheck_Periodic(t *testing.T) {
} }
}) })
testClient.logger = testLogger() testClient.logger = testlog.Logger(t)
defer testClient.Shutdown() defer testClient.Shutdown()
fm := NewFingerprintManager( fm := NewFingerprintManager(
@ -360,7 +359,7 @@ func TestFimgerprintManager_Run_InWhitelist(t *testing.T) {
} }
}) })
testClient.logger = testLogger() testClient.logger = testlog.Logger(t)
defer testClient.Shutdown() defer testClient.Shutdown()
fm := NewFingerprintManager( fm := NewFingerprintManager(
@ -390,7 +389,7 @@ func TestFingerprintManager_Run_InBlacklist(t *testing.T) {
} }
}) })
testClient.logger = testLogger() testClient.logger = testlog.Logger(t)
defer testClient.Shutdown() defer testClient.Shutdown()
fm := NewFingerprintManager( fm := NewFingerprintManager(
@ -422,7 +421,7 @@ func TestFingerprintManager_Run_Combination(t *testing.T) {
} }
}) })
testClient.logger = testLogger() testClient.logger = testlog.Logger(t)
defer testClient.Shutdown() defer testClient.Shutdown()
fm := NewFingerprintManager( fm := NewFingerprintManager(
@ -455,7 +454,7 @@ func TestFingerprintManager_Run_WhitelistDrivers(t *testing.T) {
} }
}) })
testClient.logger = testLogger() testClient.logger = testlog.Logger(t)
defer testClient.Shutdown() defer testClient.Shutdown()
fm := NewFingerprintManager( fm := NewFingerprintManager(
@ -485,7 +484,7 @@ func TestFingerprintManager_Run_AllDriversBlacklisted(t *testing.T) {
} }
}) })
testClient.logger = testLogger() testClient.logger = testlog.Logger(t)
defer testClient.Shutdown() defer testClient.Shutdown()
fm := NewFingerprintManager( fm := NewFingerprintManager(
@ -519,7 +518,7 @@ func TestFingerprintManager_Run_DriversWhiteListBlacklistCombination(t *testing.
} }
}) })
testClient.logger = testLogger() testClient.logger = testlog.Logger(t)
defer testClient.Shutdown() defer testClient.Shutdown()
fm := NewFingerprintManager( fm := NewFingerprintManager(
@ -552,7 +551,7 @@ func TestFingerprintManager_Run_DriversInBlacklist(t *testing.T) {
} }
}) })
testClient.logger = testLogger() testClient.logger = testlog.Logger(t)
defer testClient.Shutdown() defer testClient.Shutdown()
fm := NewFingerprintManager( fm := NewFingerprintManager(

View File

@ -5,7 +5,6 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"log"
"math" "math"
"net" "net"
"os" "os"
@ -20,6 +19,7 @@ import (
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
sframer "github.com/hashicorp/nomad/client/lib/streamframer" sframer "github.com/hashicorp/nomad/client/lib/streamframer"
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad" "github.com/hashicorp/nomad/nomad"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
@ -41,7 +41,7 @@ func tempAllocDir(t testing.TB) *allocdir.AllocDir {
t.Fatalf("failed to chmod dir: %v", err) t.Fatalf("failed to chmod dir: %v", err)
} }
return allocdir.NewAllocDir(log.New(os.Stderr, "", log.LstdFlags), dir) return allocdir.NewAllocDir(testlog.Logger(t), dir)
} }
type nopWriteCloser struct { type nopWriteCloser struct {
@ -83,6 +83,9 @@ func TestFS_Stat(t *testing.T) {
// Create and add an alloc // Create and add an alloc
a := mock.Alloc() a := mock.Alloc()
task := a.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
task.Config["run_for"] = "500ms"
c.addAlloc(a, "") c.addAlloc(a, "")
// Wait for the client to start it // Wait for the client to start it
@ -92,7 +95,17 @@ func TestFS_Stat(t *testing.T) {
return false, fmt.Errorf("alloc doesn't exist") return false, fmt.Errorf("alloc doesn't exist")
} }
return len(ar.tasks) != 0, fmt.Errorf("tasks not running") alloc := ar.Alloc()
running := false
for _, s := range alloc.TaskStates {
if s.State == structs.TaskStateRunning {
running = true
} else {
running = false
}
}
return running, fmt.Errorf("tasks not running")
}, func(err error) { }, func(err error) {
t.Fatal(err) t.Fatal(err)
}) })
@ -208,6 +221,9 @@ func TestFS_List(t *testing.T) {
// Create and add an alloc // Create and add an alloc
a := mock.Alloc() a := mock.Alloc()
task := a.Job.TaskGroups[0].Tasks[0]
task.Driver = "mock_driver"
task.Config["run_for"] = "500ms"
c.addAlloc(a, "") c.addAlloc(a, "")
// Wait for the client to start it // Wait for the client to start it
@ -217,7 +233,17 @@ func TestFS_List(t *testing.T) {
return false, fmt.Errorf("alloc doesn't exist") return false, fmt.Errorf("alloc doesn't exist")
} }
return len(ar.tasks) != 0, fmt.Errorf("tasks not running") alloc := ar.Alloc()
running := false
for _, s := range alloc.TaskStates {
if s.State == structs.TaskStateRunning {
running = true
} else {
running = false
}
}
return running, fmt.Errorf("tasks not running")
}, func(err error) { }, func(err error) {
t.Fatal(err) t.Fatal(err)
}) })
@ -1736,6 +1762,7 @@ func TestFS_streamFile_Truncate(t *testing.T) {
// Start the reader // Start the reader
truncateCh := make(chan struct{}) truncateCh := make(chan struct{})
truncateClosed := false
dataPostTruncCh := make(chan struct{}) dataPostTruncCh := make(chan struct{})
frames := make(chan *sframer.StreamFrame, 4) frames := make(chan *sframer.StreamFrame, 4)
go func() { go func() {
@ -1746,8 +1773,9 @@ func TestFS_streamFile_Truncate(t *testing.T) {
continue continue
} }
if frame.FileEvent == truncateEvent { if frame.FileEvent == truncateEvent && !truncateClosed {
close(truncateCh) close(truncateCh)
truncateClosed = true
} }
collected = append(collected, frame.Data...) collected = append(collected, frame.Data...)

View File

@ -7,6 +7,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/hashicorp/nomad/client/allocrunner"
"github.com/hashicorp/nomad/client/stats" "github.com/hashicorp/nomad/client/stats"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
) )
@ -169,7 +170,7 @@ func (a *AllocGarbageCollector) keepUsageBelowThreshold() error {
// destroyAllocRunner is used to destroy an allocation runner. It will acquire a // destroyAllocRunner is used to destroy an allocation runner. It will acquire a
// lock to restrict parallelism and then destroy the alloc runner, returning // lock to restrict parallelism and then destroy the alloc runner, returning
// once the allocation has been destroyed. // once the allocation has been destroyed.
func (a *AllocGarbageCollector) destroyAllocRunner(ar *AllocRunner, reason string) { func (a *AllocGarbageCollector) destroyAllocRunner(ar *allocrunner.AllocRunner, reason string) {
id := "<nil>" id := "<nil>"
if alloc := ar.Alloc(); alloc != nil { if alloc := ar.Alloc(); alloc != nil {
id = alloc.ID id = alloc.ID
@ -327,7 +328,7 @@ func (a *AllocGarbageCollector) MakeRoomFor(allocations []*structs.Allocation) e
} }
// MarkForCollection starts tracking an allocation for Garbage Collection // MarkForCollection starts tracking an allocation for Garbage Collection
func (a *AllocGarbageCollector) MarkForCollection(ar *AllocRunner) { func (a *AllocGarbageCollector) MarkForCollection(ar *allocrunner.AllocRunner) {
if ar.Alloc() == nil { if ar.Alloc() == nil {
a.destroyAllocRunner(ar, "alloc is nil") a.destroyAllocRunner(ar, "alloc is nil")
return return
@ -342,7 +343,7 @@ func (a *AllocGarbageCollector) MarkForCollection(ar *AllocRunner) {
// a PQ // a PQ
type GCAlloc struct { type GCAlloc struct {
timeStamp time.Time timeStamp time.Time
allocRunner *AllocRunner allocRunner *allocrunner.AllocRunner
index int index int
} }
@ -396,7 +397,7 @@ func NewIndexedGCAllocPQ() *IndexedGCAllocPQ {
// Push an alloc runner into the GC queue. Returns true if alloc was added, // Push an alloc runner into the GC queue. Returns true if alloc was added,
// false if the alloc already existed. // false if the alloc already existed.
func (i *IndexedGCAllocPQ) Push(ar *AllocRunner) bool { func (i *IndexedGCAllocPQ) Push(ar *allocrunner.AllocRunner) bool {
i.pqLock.Lock() i.pqLock.Lock()
defer i.pqLock.Unlock() defer i.pqLock.Unlock()

View File

@ -5,8 +5,10 @@ import (
"testing" "testing"
"time" "time"
"github.com/hashicorp/nomad/client/allocrunner"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/stats" "github.com/hashicorp/nomad/client/stats"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
@ -22,14 +24,24 @@ func gcConfig() *GCConfig {
} }
} }
// exitAllocRunner is a helper that updates the allocs on the given alloc
// runners to be terminal
func exitAllocRunner(runners ...*allocrunner.AllocRunner) {
for _, ar := range runners {
terminalAlloc := ar.Alloc()
terminalAlloc.DesiredStatus = structs.AllocDesiredStatusStop
ar.Update(terminalAlloc)
}
}
func TestIndexedGCAllocPQ(t *testing.T) { func TestIndexedGCAllocPQ(t *testing.T) {
t.Parallel() t.Parallel()
pq := NewIndexedGCAllocPQ() pq := NewIndexedGCAllocPQ()
_, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
_, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
_, ar3 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar3 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
_, ar4 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar4 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
pq.Push(ar1) pq.Push(ar1)
pq.Push(ar2) pq.Push(ar2)
@ -105,10 +117,10 @@ func (m *MockStatsCollector) Stats() *stats.HostStats {
func TestAllocGarbageCollector_MarkForCollection(t *testing.T) { func TestAllocGarbageCollector_MarkForCollection(t *testing.T) {
t.Parallel() t.Parallel()
logger := testLogger() logger := testlog.Logger(t)
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
_, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
gc.MarkForCollection(ar1) gc.MarkForCollection(ar1)
gcAlloc := gc.allocRunners.Pop() gcAlloc := gc.allocRunners.Pop()
@ -119,17 +131,19 @@ func TestAllocGarbageCollector_MarkForCollection(t *testing.T) {
func TestAllocGarbageCollector_Collect(t *testing.T) { func TestAllocGarbageCollector_Collect(t *testing.T) {
t.Parallel() t.Parallel()
logger := testLogger() logger := testlog.Logger(t)
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
_, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
_, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
go ar1.Run()
go ar2.Run()
gc.MarkForCollection(ar1) gc.MarkForCollection(ar1)
gc.MarkForCollection(ar2) gc.MarkForCollection(ar2)
// Fake that ar.Run() exits // Exit the alloc runners
close(ar1.waitCh) exitAllocRunner(ar1, ar2)
close(ar2.waitCh)
gc.Collect(ar1.Alloc().ID) gc.Collect(ar1.Alloc().ID)
gcAlloc := gc.allocRunners.Pop() gcAlloc := gc.allocRunners.Pop()
@ -140,11 +154,11 @@ func TestAllocGarbageCollector_Collect(t *testing.T) {
func TestAllocGarbageCollector_CollectAll(t *testing.T) { func TestAllocGarbageCollector_CollectAll(t *testing.T) {
t.Parallel() t.Parallel()
logger := testLogger() logger := testlog.Logger(t)
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
_, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
_, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
gc.MarkForCollection(ar1) gc.MarkForCollection(ar1)
gc.MarkForCollection(ar2) gc.MarkForCollection(ar2)
@ -157,19 +171,23 @@ func TestAllocGarbageCollector_CollectAll(t *testing.T) {
func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) { func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) {
t.Parallel() t.Parallel()
logger := testLogger() logger := testlog.Logger(t)
statsCollector := &MockStatsCollector{} statsCollector := &MockStatsCollector{}
conf := gcConfig() conf := gcConfig()
conf.ReservedDiskMB = 20 conf.ReservedDiskMB = 20
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
_, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
close(ar1.waitCh) _, ar2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
_, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) go ar1.Run()
close(ar2.waitCh) go ar2.Run()
gc.MarkForCollection(ar1) gc.MarkForCollection(ar1)
gc.MarkForCollection(ar2) gc.MarkForCollection(ar2)
// Exit the alloc runners
exitAllocRunner(ar1, ar2)
// Make stats collector report 200MB free out of which 20MB is reserved // Make stats collector report 200MB free out of which 20MB is reserved
statsCollector.availableValues = []uint64{200 * MB} statsCollector.availableValues = []uint64{200 * MB}
statsCollector.usedPercents = []float64{0} statsCollector.usedPercents = []float64{0}
@ -192,19 +210,23 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T)
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) { func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) {
t.Parallel() t.Parallel()
logger := testLogger() logger := testlog.Logger(t)
statsCollector := &MockStatsCollector{} statsCollector := &MockStatsCollector{}
conf := gcConfig() conf := gcConfig()
conf.ReservedDiskMB = 20 conf.ReservedDiskMB = 20
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
_, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
close(ar1.waitCh) _, ar2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
_, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) go ar1.Run()
close(ar2.waitCh) go ar2.Run()
gc.MarkForCollection(ar1) gc.MarkForCollection(ar1)
gc.MarkForCollection(ar2) gc.MarkForCollection(ar2)
// Exit the alloc runners
exitAllocRunner(ar1, ar2)
// Make stats collector report 80MB and 175MB free in subsequent calls // Make stats collector report 80MB and 175MB free in subsequent calls
statsCollector.availableValues = []uint64{80 * MB, 80 * MB, 175 * MB} statsCollector.availableValues = []uint64{80 * MB, 80 * MB, 175 * MB}
statsCollector.usedPercents = []float64{0, 0, 0} statsCollector.usedPercents = []float64{0, 0, 0}
@ -228,19 +250,23 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) {
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) { func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) {
t.Parallel() t.Parallel()
logger := testLogger() logger := testlog.Logger(t)
statsCollector := &MockStatsCollector{} statsCollector := &MockStatsCollector{}
conf := gcConfig() conf := gcConfig()
conf.ReservedDiskMB = 20 conf.ReservedDiskMB = 20
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
_, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
close(ar1.waitCh) _, ar2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
_, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) go ar1.Run()
close(ar2.waitCh) go ar2.Run()
gc.MarkForCollection(ar1) gc.MarkForCollection(ar1)
gc.MarkForCollection(ar2) gc.MarkForCollection(ar2)
// Exit the alloc runners
exitAllocRunner(ar1, ar2)
// Make stats collector report 80MB and 95MB free in subsequent calls // Make stats collector report 80MB and 95MB free in subsequent calls
statsCollector.availableValues = []uint64{80 * MB, 80 * MB, 95 * MB} statsCollector.availableValues = []uint64{80 * MB, 80 * MB, 95 * MB}
statsCollector.usedPercents = []float64{0, 0, 0} statsCollector.usedPercents = []float64{0, 0, 0}
@ -260,19 +286,23 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) {
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) { func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) {
t.Parallel() t.Parallel()
logger := testLogger() logger := testlog.Logger(t)
statsCollector := &MockStatsCollector{} statsCollector := &MockStatsCollector{}
conf := gcConfig() conf := gcConfig()
conf.ReservedDiskMB = 20 conf.ReservedDiskMB = 20
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
_, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
close(ar1.waitCh) _, ar2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
_, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) go ar1.Run()
close(ar2.waitCh) go ar2.Run()
gc.MarkForCollection(ar1) gc.MarkForCollection(ar1)
gc.MarkForCollection(ar2) gc.MarkForCollection(ar2)
// Exit the alloc runners
exitAllocRunner(ar1, ar2)
alloc := mock.Alloc() alloc := mock.Alloc()
alloc.Resources.DiskMB = 150 alloc.Resources.DiskMB = 150
if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil { if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil {
@ -419,19 +449,23 @@ func TestAllocGarbageCollector_MaxAllocs(t *testing.T) {
func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) { func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) {
t.Parallel() t.Parallel()
logger := testLogger() logger := testlog.Logger(t)
statsCollector := &MockStatsCollector{} statsCollector := &MockStatsCollector{}
conf := gcConfig() conf := gcConfig()
conf.ReservedDiskMB = 20 conf.ReservedDiskMB = 20
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
_, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
close(ar1.waitCh) _, ar2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
_, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) go ar1.Run()
close(ar2.waitCh) go ar2.Run()
gc.MarkForCollection(ar1) gc.MarkForCollection(ar1)
gc.MarkForCollection(ar2) gc.MarkForCollection(ar2)
// Exit the alloc runners
exitAllocRunner(ar1, ar2)
statsCollector.availableValues = []uint64{1000} statsCollector.availableValues = []uint64{1000}
statsCollector.usedPercents = []float64{20} statsCollector.usedPercents = []float64{20}
statsCollector.inodePercents = []float64{10} statsCollector.inodePercents = []float64{10}
@ -451,19 +485,23 @@ func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) {
func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) { func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) {
t.Parallel() t.Parallel()
logger := testLogger() logger := testlog.Logger(t)
statsCollector := &MockStatsCollector{} statsCollector := &MockStatsCollector{}
conf := gcConfig() conf := gcConfig()
conf.ReservedDiskMB = 20 conf.ReservedDiskMB = 20
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf) gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
_, ar1 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) _, ar1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
close(ar1.waitCh) _, ar2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc(), false)
_, ar2 := testAllocRunnerFromAlloc(t, mock.Alloc(), false) go ar1.Run()
close(ar2.waitCh) go ar2.Run()
gc.MarkForCollection(ar1) gc.MarkForCollection(ar1)
gc.MarkForCollection(ar2) gc.MarkForCollection(ar2)
// Exit the alloc runners
exitAllocRunner(ar1, ar2)
statsCollector.availableValues = []uint64{1000, 800} statsCollector.availableValues = []uint64{1000, 800}
statsCollector.usedPercents = []float64{85, 60} statsCollector.usedPercents = []float64{85, 60}
statsCollector.inodePercents = []float64{50, 30} statsCollector.inodePercents = []float64{50, 30}

View File

@ -2,12 +2,12 @@ package servers
import ( import (
"fmt" "fmt"
"log"
"math/rand" "math/rand"
"net" "net"
"os"
"testing" "testing"
"time" "time"
"github.com/hashicorp/nomad/helper/testlog"
) )
func init() { func init() {
@ -36,14 +36,14 @@ func (cp *fauxConnPool) Ping(net.Addr) error {
} }
func testManager(t *testing.T) (m *Manager) { func testManager(t *testing.T) (m *Manager) {
logger := log.New(os.Stderr, "", 0) logger := testlog.Logger(t)
shutdownCh := make(chan struct{}) shutdownCh := make(chan struct{})
m = New(logger, shutdownCh, &fauxConnPool{}) m = New(logger, shutdownCh, &fauxConnPool{})
return m return m
} }
func testManagerFailProb(failPct float64) (m *Manager) { func testManagerFailProb(t *testing.T, failPct float64) (m *Manager) {
logger := log.New(os.Stderr, "", 0) logger := testlog.Logger(t)
shutdownCh := make(chan struct{}) shutdownCh := make(chan struct{})
m = New(logger, shutdownCh, &fauxConnPool{failPct: failPct}) m = New(logger, shutdownCh, &fauxConnPool{failPct: failPct})
return m return m
@ -136,7 +136,7 @@ func TestManagerInternal_refreshServerRebalanceTimer(t *testing.T) {
{1000000, 19, 10 * time.Minute}, {1000000, 19, 10 * time.Minute},
} }
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
shutdownCh := make(chan struct{}) shutdownCh := make(chan struct{})
for _, s := range clusters { for _, s := range clusters {

View File

@ -2,10 +2,8 @@ package servers_test
import ( import (
"fmt" "fmt"
"log"
"math/rand" "math/rand"
"net" "net"
"os"
"strings" "strings"
"testing" "testing"
@ -115,7 +113,7 @@ func TestServers_FindServer(t *testing.T) {
} }
func TestServers_New(t *testing.T) { func TestServers_New(t *testing.T) {
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
shutdownCh := make(chan struct{}) shutdownCh := make(chan struct{})
m := servers.New(logger, shutdownCh, &fauxConnPool{}) m := servers.New(logger, shutdownCh, &fauxConnPool{})
if m == nil { if m == nil {

View File

@ -1,4 +1,4 @@
package client package state
import ( import (
"bytes" "bytes"
@ -25,7 +25,7 @@ var (
allocationsBucket = []byte("allocations") allocationsBucket = []byte("allocations")
) )
func putObject(bkt *bolt.Bucket, key []byte, obj interface{}) error { func PutObject(bkt *bolt.Bucket, key []byte, obj interface{}) error {
if !bkt.Writable() { if !bkt.Writable() {
return fmt.Errorf("bucket must be writable") return fmt.Errorf("bucket must be writable")
} }
@ -43,7 +43,7 @@ func putObject(bkt *bolt.Bucket, key []byte, obj interface{}) error {
return nil return nil
} }
func putData(bkt *bolt.Bucket, key, value []byte) error { func PutData(bkt *bolt.Bucket, key, value []byte) error {
if !bkt.Writable() { if !bkt.Writable() {
return fmt.Errorf("bucket must be writable") return fmt.Errorf("bucket must be writable")
} }
@ -55,7 +55,7 @@ func putData(bkt *bolt.Bucket, key, value []byte) error {
return nil return nil
} }
func getObject(bkt *bolt.Bucket, key []byte, obj interface{}) error { func GetObject(bkt *bolt.Bucket, key []byte, obj interface{}) error {
// Get the data // Get the data
data := bkt.Get(key) data := bkt.Get(key)
if data == nil { if data == nil {
@ -70,11 +70,11 @@ func getObject(bkt *bolt.Bucket, key []byte, obj interface{}) error {
return nil return nil
} }
// getAllocationBucket returns the bucket used to persist state about a // GetAllocationBucket returns the bucket used to persist state about a
// particular allocation. If the root allocation bucket or the specific // particular allocation. If the root allocation bucket or the specific
// allocation bucket doesn't exist, it will be created as long as the // allocation bucket doesn't exist, it will be created as long as the
// transaction is writable. // transaction is writable.
func getAllocationBucket(tx *bolt.Tx, allocID string) (*bolt.Bucket, error) { func GetAllocationBucket(tx *bolt.Tx, allocID string) (*bolt.Bucket, error) {
var err error var err error
w := tx.Writable() w := tx.Writable()
@ -108,12 +108,12 @@ func getAllocationBucket(tx *bolt.Tx, allocID string) (*bolt.Bucket, error) {
return alloc, nil return alloc, nil
} }
// getTaskBucket returns the bucket used to persist state about a // GetTaskBucket returns the bucket used to persist state about a
// particular task. If the root allocation bucket, the specific // particular task. If the root allocation bucket, the specific
// allocation or task bucket doesn't exist, they will be created as long as the // allocation or task bucket doesn't exist, they will be created as long as the
// transaction is writable. // transaction is writable.
func getTaskBucket(tx *bolt.Tx, allocID, taskName string) (*bolt.Bucket, error) { func GetTaskBucket(tx *bolt.Tx, allocID, taskName string) (*bolt.Bucket, error) {
alloc, err := getAllocationBucket(tx, allocID) alloc, err := GetAllocationBucket(tx, allocID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -136,8 +136,8 @@ func getTaskBucket(tx *bolt.Tx, allocID, taskName string) (*bolt.Bucket, error)
return task, nil return task, nil
} }
// deleteAllocationBucket is used to delete an allocation bucket if it exists. // DeleteAllocationBucket is used to delete an allocation bucket if it exists.
func deleteAllocationBucket(tx *bolt.Tx, allocID string) error { func DeleteAllocationBucket(tx *bolt.Tx, allocID string) error {
if !tx.Writable() { if !tx.Writable() {
return fmt.Errorf("transaction must be writable") return fmt.Errorf("transaction must be writable")
} }
@ -157,8 +157,8 @@ func deleteAllocationBucket(tx *bolt.Tx, allocID string) error {
return allocations.DeleteBucket(key) return allocations.DeleteBucket(key)
} }
// deleteTaskBucket is used to delete a task bucket if it exists. // DeleteTaskBucket is used to delete a task bucket if it exists.
func deleteTaskBucket(tx *bolt.Tx, allocID, taskName string) error { func DeleteTaskBucket(tx *bolt.Tx, allocID, taskName string) error {
if !tx.Writable() { if !tx.Writable() {
return fmt.Errorf("transaction must be writable") return fmt.Errorf("transaction must be writable")
} }
@ -184,7 +184,7 @@ func deleteTaskBucket(tx *bolt.Tx, allocID, taskName string) error {
return alloc.DeleteBucket(key) return alloc.DeleteBucket(key)
} }
func getAllAllocationIDs(tx *bolt.Tx) ([]string, error) { func GetAllAllocationIDs(tx *bolt.Tx) ([]string, error) {
allocationsBkt := tx.Bucket(allocationsBucket) allocationsBkt := tx.Bucket(allocationsBucket)
if allocationsBkt == nil { if allocationsBkt == nil {
return nil, nil return nil, nil

View File

@ -1,13 +1,13 @@
package stats package stats
import ( import (
"log"
"math" "math"
"os" "os"
"testing" "testing"
"time" "time"
shelpers "github.com/hashicorp/nomad/helper/stats" shelpers "github.com/hashicorp/nomad/helper/stats"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -26,7 +26,7 @@ func TestHostStats_CPU(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
assert.Nil(shelpers.Init()) assert.Nil(shelpers.Init())
logger := log.New(os.Stderr, "", log.LstdFlags|log.Lmicroseconds) logger := testlog.Logger(t)
cwd, err := os.Getwd() cwd, err := os.Getwd()
assert.Nil(err) assert.Nil(err)
hs := NewHostStatsCollector(logger, cwd) hs := NewHostStatsCollector(logger, cwd)

View File

@ -2,6 +2,7 @@ package client
import ( import (
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
consulApi "github.com/hashicorp/nomad/client/consul"
"github.com/hashicorp/nomad/client/fingerprint" "github.com/hashicorp/nomad/client/fingerprint"
"github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper"
@ -37,8 +38,8 @@ func TestClient(t testing.T, cb func(c *config.Config)) *Client {
logger := testlog.Logger(t) logger := testlog.Logger(t)
catalog := consul.NewMockCatalog(logger) catalog := consul.NewMockCatalog(logger)
mockService := newMockConsulServiceClient(t) mockService := consulApi.NewMockConsulServiceClient(t)
mockService.logger = logger mockService.Logger = logger
client, err := NewClient(conf, catalog, mockService, logger) client, err := NewClient(conf, catalog, mockService, logger)
if err != nil { if err != nil {
t.Fatalf("err: %v", err) t.Fatalf("err: %v", err)

View File

@ -1,9 +1,7 @@
package client package client
import ( import (
"encoding/json"
"fmt" "fmt"
"io/ioutil"
"math/rand" "math/rand"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
@ -73,16 +71,3 @@ func shuffleStrings(list []string) {
list[i], list[j] = list[j], list[i] list[i], list[j] = list[j], list[i]
} }
} }
// pre060RestoreState is used to read back in the persisted state for pre v0.6.0
// state
func pre060RestoreState(path string, data interface{}) error {
buf, err := ioutil.ReadFile(path)
if err != nil {
return err
}
if err := json.Unmarshal(buf, data); err != nil {
return fmt.Errorf("failed to decode state: %v", err)
}
return nil
}

View File

@ -1,13 +1,12 @@
package vaultclient package vaultclient
import ( import (
"log"
"os"
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
vaultapi "github.com/hashicorp/vault/api" vaultapi "github.com/hashicorp/vault/api"
) )
@ -17,7 +16,7 @@ func TestVaultClient_TokenRenewals(t *testing.T) {
v := testutil.NewTestVault(t) v := testutil.NewTestVault(t)
defer v.Stop() defer v.Stop()
logger := log.New(os.Stderr, "TEST: ", log.Lshortfile|log.LstdFlags) logger := testlog.Logger(t)
v.Config.ConnectionRetryIntv = 100 * time.Millisecond v.Config.ConnectionRetryIntv = 100 * time.Millisecond
v.Config.TaskTokenTTL = "4s" v.Config.TaskTokenTTL = "4s"
c, err := NewVaultClient(v.Config, logger, nil) c, err := NewVaultClient(v.Config, logger, nil)
@ -101,7 +100,7 @@ func TestVaultClient_Heap(t *testing.T) {
conf.VaultConfig.Token = "testvaulttoken" conf.VaultConfig.Token = "testvaulttoken"
conf.VaultConfig.TaskTokenTTL = "10s" conf.VaultConfig.TaskTokenTTL = "10s"
logger := log.New(os.Stderr, "TEST: ", log.Lshortfile|log.LstdFlags) logger := testlog.Logger(t)
c, err := NewVaultClient(conf.VaultConfig, logger, nil) c, err := NewVaultClient(conf.VaultConfig, logger, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -204,7 +203,7 @@ func TestVaultClient_RenewNonRenewableLease(t *testing.T) {
v := testutil.NewTestVault(t) v := testutil.NewTestVault(t)
defer v.Stop() defer v.Stop()
logger := log.New(os.Stderr, "TEST: ", log.Lshortfile|log.LstdFlags) logger := testlog.Logger(t)
v.Config.ConnectionRetryIntv = 100 * time.Millisecond v.Config.ConnectionRetryIntv = 100 * time.Millisecond
v.Config.TaskTokenTTL = "4s" v.Config.TaskTokenTTL = "4s"
c, err := NewVaultClient(v.Config, logger, nil) c, err := NewVaultClient(v.Config, logger, nil)
@ -253,7 +252,7 @@ func TestVaultClient_RenewNonexistentLease(t *testing.T) {
v := testutil.NewTestVault(t) v := testutil.NewTestVault(t)
defer v.Stop() defer v.Stop()
logger := log.New(os.Stderr, "TEST: ", log.Lshortfile|log.LstdFlags) logger := testlog.Logger(t)
v.Config.ConnectionRetryIntv = 100 * time.Millisecond v.Config.ConnectionRetryIntv = 100 * time.Millisecond
v.Config.TaskTokenTTL = "4s" v.Config.TaskTokenTTL = "4s"
c, err := NewVaultClient(v.Config, logger, nil) c, err := NewVaultClient(v.Config, logger, nil)

View File

@ -866,16 +866,16 @@ func (a *Agent) Stats() map[string]map[string]string {
// ShouldReload determines if we should reload the configuration and agent // ShouldReload determines if we should reload the configuration and agent
// connections. If the TLS Configuration has not changed, we shouldn't reload. // connections. If the TLS Configuration has not changed, we shouldn't reload.
func (a *Agent) ShouldReload(newConfig *Config) (agent, http, rpc bool) { func (a *Agent) ShouldReload(newConfig *Config) (agent, http bool) {
a.configLock.Lock() a.configLock.Lock()
defer a.configLock.Unlock() defer a.configLock.Unlock()
isEqual, err := a.config.TLSConfig.CertificateInfoIsEqual(newConfig.TLSConfig) isEqual, err := a.config.TLSConfig.CertificateInfoIsEqual(newConfig.TLSConfig)
if err != nil { if err != nil {
a.logger.Printf("[INFO] agent: error when parsing TLS certificate %v", err) a.logger.Printf("[INFO] agent: error when parsing TLS certificate %v", err)
return false, false, false return false, false
} else if !isEqual { } else if !isEqual {
return true, true, true return true, true
} }
// Allow the ability to only reload HTTP connections // Allow the ability to only reload HTTP connections
@ -886,11 +886,10 @@ func (a *Agent) ShouldReload(newConfig *Config) (agent, http, rpc bool) {
// Allow the ability to only reload HTTP connections // Allow the ability to only reload HTTP connections
if a.config.TLSConfig.EnableRPC != newConfig.TLSConfig.EnableRPC { if a.config.TLSConfig.EnableRPC != newConfig.TLSConfig.EnableRPC {
rpc = true
agent = true agent = true
} }
return agent, http, rpc return agent, http
} }
// Reload handles configuration changes for the agent. Provides a method that // Reload handles configuration changes for the agent. Provides a method that

View File

@ -2,7 +2,6 @@ package agent
import ( import (
"io/ioutil" "io/ioutil"
"log"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -11,6 +10,7 @@ import (
cstructs "github.com/hashicorp/nomad/client/structs" cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
sconfig "github.com/hashicorp/nomad/nomad/structs/config" sconfig "github.com/hashicorp/nomad/nomad/structs/config"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -333,10 +333,7 @@ func TestAget_Client_TelemetryConfiguration(t *testing.T) {
// API health check depending on configuration. // API health check depending on configuration.
func TestAgent_HTTPCheck(t *testing.T) { func TestAgent_HTTPCheck(t *testing.T) {
t.Parallel() t.Parallel()
logger := log.New(ioutil.Discard, "", 0) logger := testlog.Logger(t)
if testing.Verbose() {
logger = log.New(os.Stdout, "[TestAgent_HTTPCheck] ", log.Lshortfile)
}
agent := func() *Agent { agent := func() *Agent {
return &Agent{ return &Agent{
logger: logger, logger: logger,
@ -417,14 +414,11 @@ func TestAgent_HTTPCheckPath(t *testing.T) {
// Agent.agentHTTPCheck only needs a config and logger // Agent.agentHTTPCheck only needs a config and logger
a := &Agent{ a := &Agent{
config: DevConfig(), config: DevConfig(),
logger: log.New(ioutil.Discard, "", 0), logger: testlog.Logger(t),
} }
if err := a.config.normalizeAddrs(); err != nil { if err := a.config.normalizeAddrs(); err != nil {
t.Fatalf("error normalizing config: %v", err) t.Fatalf("error normalizing config: %v", err)
} }
if testing.Verbose() {
a.logger = log.New(os.Stderr, "", log.LstdFlags)
}
// Assert server check uses /v1/agent/health?type=server // Assert server check uses /v1/agent/health?type=server
isServer := true isServer := true
@ -638,7 +632,7 @@ func TestServer_Reload_TLS_WithNilConfiguration(t *testing.T) {
t.Parallel() t.Parallel()
assert := assert.New(t) assert := assert.New(t)
logger := log.New(ioutil.Discard, "", 0) logger := testlog.Logger(t)
agent := &Agent{ agent := &Agent{
logger: logger, logger: logger,
@ -662,7 +656,7 @@ func TestServer_Reload_TLS_UpgradeToTLS(t *testing.T) {
dir := tmpDir(t) dir := tmpDir(t)
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
logger := log.New(ioutil.Discard, "", 0) logger := testlog.Logger(t)
agentConfig := &Config{ agentConfig := &Config{
TLSConfig: &sconfig.TLSConfig{}, TLSConfig: &sconfig.TLSConfig{},
@ -704,7 +698,7 @@ func TestServer_Reload_TLS_DowngradeFromTLS(t *testing.T) {
dir := tmpDir(t) dir := tmpDir(t)
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
logger := log.New(ioutil.Discard, "", 0) logger := testlog.Logger(t)
agentConfig := &Config{ agentConfig := &Config{
TLSConfig: &sconfig.TLSConfig{ TLSConfig: &sconfig.TLSConfig{
@ -769,10 +763,9 @@ func TestServer_ShouldReload_ReturnFalseForNoChanges(t *testing.T) {
}) })
defer agent.Shutdown() defer agent.Shutdown()
shouldReloadAgent, shouldReloadHTTP, shouldReloadRPC := agent.ShouldReload(sameAgentConfig) shouldReloadAgent, shouldReloadHTTP := agent.ShouldReload(sameAgentConfig)
assert.False(shouldReloadAgent) assert.False(shouldReloadAgent)
assert.False(shouldReloadHTTP) assert.False(shouldReloadHTTP)
assert.False(shouldReloadRPC)
} }
func TestServer_ShouldReload_ReturnTrueForOnlyHTTPChanges(t *testing.T) { func TestServer_ShouldReload_ReturnTrueForOnlyHTTPChanges(t *testing.T) {
@ -810,10 +803,9 @@ func TestServer_ShouldReload_ReturnTrueForOnlyHTTPChanges(t *testing.T) {
}) })
defer agent.Shutdown() defer agent.Shutdown()
shouldReloadAgent, shouldReloadHTTP, shouldReloadRPC := agent.ShouldReload(sameAgentConfig) shouldReloadAgent, shouldReloadHTTP := agent.ShouldReload(sameAgentConfig)
require.True(shouldReloadAgent) require.True(shouldReloadAgent)
require.True(shouldReloadHTTP) require.True(shouldReloadHTTP)
require.False(shouldReloadRPC)
} }
func TestServer_ShouldReload_ReturnTrueForOnlyRPCChanges(t *testing.T) { func TestServer_ShouldReload_ReturnTrueForOnlyRPCChanges(t *testing.T) {
@ -851,10 +843,9 @@ func TestServer_ShouldReload_ReturnTrueForOnlyRPCChanges(t *testing.T) {
}) })
defer agent.Shutdown() defer agent.Shutdown()
shouldReloadAgent, shouldReloadHTTP, shouldReloadRPC := agent.ShouldReload(sameAgentConfig) shouldReloadAgent, shouldReloadHTTP := agent.ShouldReload(sameAgentConfig)
assert.True(shouldReloadAgent) assert.True(shouldReloadAgent)
assert.False(shouldReloadHTTP) assert.False(shouldReloadHTTP)
assert.True(shouldReloadRPC)
} }
func TestServer_ShouldReload_ReturnTrueForConfigChanges(t *testing.T) { func TestServer_ShouldReload_ReturnTrueForConfigChanges(t *testing.T) {
@ -894,10 +885,9 @@ func TestServer_ShouldReload_ReturnTrueForConfigChanges(t *testing.T) {
}, },
} }
shouldReloadAgent, shouldReloadHTTP, shouldReloadRPC := agent.ShouldReload(newConfig) shouldReloadAgent, shouldReloadHTTP := agent.ShouldReload(newConfig)
assert.True(shouldReloadAgent) assert.True(shouldReloadAgent)
assert.True(shouldReloadHTTP) assert.True(shouldReloadHTTP)
assert.True(shouldReloadRPC)
} }
func TestServer_ShouldReload_ReturnTrueForFileChanges(t *testing.T) { func TestServer_ShouldReload_ReturnTrueForFileChanges(t *testing.T) {
@ -927,7 +917,7 @@ func TestServer_ShouldReload_ReturnTrueForFileChanges(t *testing.T) {
content := []byte(oldCertificate) content := []byte(oldCertificate)
dir, err := ioutil.TempDir("", "certificate") dir, err := ioutil.TempDir("", "certificate")
if err != nil { if err != nil {
log.Fatal(err) t.Fatal(err)
} }
defer os.RemoveAll(dir) // clean up defer os.RemoveAll(dir) // clean up
@ -940,7 +930,7 @@ func TestServer_ShouldReload_ReturnTrueForFileChanges(t *testing.T) {
key = "../../helper/tlsutil/testdata/nomad-foo-key.pem" key = "../../helper/tlsutil/testdata/nomad-foo-key.pem"
) )
logger := log.New(ioutil.Discard, "", 0) logger := testlog.Logger(t)
agentConfig := &Config{ agentConfig := &Config{
TLSConfig: &sconfig.TLSConfig{ TLSConfig: &sconfig.TLSConfig{
@ -959,10 +949,9 @@ func TestServer_ShouldReload_ReturnTrueForFileChanges(t *testing.T) {
} }
agent.config.TLSConfig.SetChecksum() agent.config.TLSConfig.SetChecksum()
shouldReloadAgent, shouldReloadHTTP, shouldReloadRPC := agent.ShouldReload(agentConfig) shouldReloadAgent, shouldReloadHTTP := agent.ShouldReload(agentConfig)
require.False(shouldReloadAgent) require.False(shouldReloadAgent)
require.False(shouldReloadHTTP) require.False(shouldReloadHTTP)
require.False(shouldReloadRPC)
newCertificate := ` newCertificate := `
-----BEGIN CERTIFICATE----- -----BEGIN CERTIFICATE-----
@ -999,10 +988,9 @@ func TestServer_ShouldReload_ReturnTrueForFileChanges(t *testing.T) {
}, },
} }
shouldReloadAgent, shouldReloadHTTP, shouldReloadRPC = agent.ShouldReload(newAgentConfig) shouldReloadAgent, shouldReloadHTTP = agent.ShouldReload(newAgentConfig)
require.True(shouldReloadAgent) require.True(shouldReloadAgent)
require.True(shouldReloadHTTP) require.True(shouldReloadHTTP)
require.True(shouldReloadRPC)
} }
func TestServer_ShouldReload_ShouldHandleMultipleChanges(t *testing.T) { func TestServer_ShouldReload_ShouldHandleMultipleChanges(t *testing.T) {
@ -1043,20 +1031,18 @@ func TestServer_ShouldReload_ShouldHandleMultipleChanges(t *testing.T) {
defer agent.Shutdown() defer agent.Shutdown()
{ {
shouldReloadAgent, shouldReloadHTTP, shouldReloadRPC := agent.ShouldReload(sameAgentConfig) shouldReloadAgent, shouldReloadHTTP := agent.ShouldReload(sameAgentConfig)
require.True(shouldReloadAgent) require.True(shouldReloadAgent)
require.True(shouldReloadHTTP) require.True(shouldReloadHTTP)
require.True(shouldReloadRPC)
} }
err := agent.Reload(sameAgentConfig) err := agent.Reload(sameAgentConfig)
require.Nil(err) require.Nil(err)
{ {
shouldReloadAgent, shouldReloadHTTP, shouldReloadRPC := agent.ShouldReload(sameAgentConfig) shouldReloadAgent, shouldReloadHTTP := agent.ShouldReload(sameAgentConfig)
require.False(shouldReloadAgent) require.False(shouldReloadAgent)
require.False(shouldReloadHTTP) require.False(shouldReloadHTTP)
require.False(shouldReloadRPC)
} }
} }

File diff suppressed because one or more lines are too long

View File

@ -730,7 +730,7 @@ func (c *Command) handleReload() {
newConf.LogLevel = c.agent.GetConfig().LogLevel newConf.LogLevel = c.agent.GetConfig().LogLevel
} }
shouldReloadAgent, shouldReloadHTTP, shouldReloadRPC := c.agent.ShouldReload(newConf) shouldReloadAgent, shouldReloadHTTP := c.agent.ShouldReload(newConf)
if shouldReloadAgent { if shouldReloadAgent {
c.agent.logger.Printf("[DEBUG] agent: starting reload of agent config") c.agent.logger.Printf("[DEBUG] agent: starting reload of agent config")
err := c.agent.Reload(newConf) err := c.agent.Reload(newConf)
@ -740,32 +740,30 @@ func (c *Command) handleReload() {
} }
} }
if shouldReloadRPC { if s := c.agent.Server(); s != nil {
if s := c.agent.Server(); s != nil { c.agent.logger.Printf("[DEBUG] agent: starting reload of server config")
sconf, err := convertServerConfig(newConf, c.logOutput) sconf, err := convertServerConfig(newConf, c.logOutput)
c.agent.logger.Printf("[DEBUG] agent: starting reload of server config") if err != nil {
if err != nil { c.agent.logger.Printf("[ERR] agent: failed to convert server config: %v", err)
c.agent.logger.Printf("[ERR] agent: failed to convert server config: %v", err) return
} else {
if err := s.Reload(sconf); err != nil {
c.agent.logger.Printf("[ERR] agent: reloading server config failed: %v", err)
return return
} else {
if err := s.Reload(sconf); err != nil {
c.agent.logger.Printf("[ERR] agent: reloading server config failed: %v", err)
return
}
} }
} }
}
if s := c.agent.Client(); s != nil { if s := c.agent.Client(); s != nil {
clientConfig, err := c.agent.clientConfig() clientConfig, err := c.agent.clientConfig()
c.agent.logger.Printf("[DEBUG] agent: starting reload of client config") c.agent.logger.Printf("[DEBUG] agent: starting reload of client config")
if err != nil { if err != nil {
c.agent.logger.Printf("[ERR] agent: reloading client config failed: %v", err) c.agent.logger.Printf("[ERR] agent: reloading client config failed: %v", err)
return return
} }
if err := c.agent.Client().Reload(clientConfig); err != nil { if err := c.agent.Client().Reload(clientConfig); err != nil {
c.agent.logger.Printf("[ERR] agent: reloading client config failed: %v", err) c.agent.logger.Printf("[ERR] agent: reloading client config failed: %v", err)
return return
}
} }
} }

View File

@ -2,7 +2,6 @@ package consul_test
import ( import (
"io/ioutil" "io/ioutil"
"log"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
@ -11,23 +10,17 @@ import (
"github.com/boltdb/bolt" "github.com/boltdb/bolt"
consulapi "github.com/hashicorp/consul/api" consulapi "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/testutil" "github.com/hashicorp/consul/testutil"
"github.com/hashicorp/nomad/client"
"github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/allocrunner/taskrunner"
"github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/vaultclient" "github.com/hashicorp/nomad/client/vaultclient"
"github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func testLogger() *log.Logger {
if testing.Verbose() {
return log.New(os.Stderr, "", log.LstdFlags)
}
return log.New(ioutil.Discard, "", 0)
}
// TestConsul_Integration asserts TaskRunner properly registers and deregisters // TestConsul_Integration asserts TaskRunner properly registers and deregisters
// services and checks with Consul using an embedded Consul agent. // services and checks with Consul using an embedded Consul agent.
func TestConsul_Integration(t *testing.T) { func TestConsul_Integration(t *testing.T) {
@ -129,7 +122,7 @@ func TestConsul_Integration(t *testing.T) {
}, },
} }
logger := testLogger() logger := testlog.Logger(t)
logUpdate := func(name, state string, event *structs.TaskEvent, lazySync bool) { logUpdate := func(name, state string, event *structs.TaskEvent, lazySync bool) {
logger.Printf("[TEST] test.updater: name=%q state=%q event=%v", name, state, event) logger.Printf("[TEST] test.updater: name=%q state=%q event=%v", name, state, event)
} }
@ -149,7 +142,7 @@ func TestConsul_Integration(t *testing.T) {
serviceClient.Run() serviceClient.Run()
close(consulRan) close(consulRan)
}() }()
tr := client.NewTaskRunner(logger, conf, db, logUpdate, taskDir, alloc, task, vclient, serviceClient) tr := taskrunner.NewTaskRunner(logger, conf, db, logUpdate, taskDir, alloc, task, vclient, serviceClient)
tr.MarkReceived() tr.MarkReceived()
go tr.Run() go tr.Run()
defer func() { defer func() {

View File

@ -2,12 +2,12 @@ package agent
import ( import (
"fmt" "fmt"
"io/ioutil"
"log" "log"
"os" "os"
"testing" "testing"
"time" "time"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
"github.com/mitchellh/cli" "github.com/mitchellh/cli"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -92,7 +92,7 @@ func TestRetryJoin_Server_NonCloud(t *testing.T) {
discover: &MockDiscover{}, discover: &MockDiscover{},
serverJoin: mockJoin, serverJoin: mockJoin,
serverEnabled: true, serverEnabled: true,
logger: log.New(ioutil.Discard, "", 0), logger: testlog.Logger(t),
errCh: make(chan struct{}), errCh: make(chan struct{}),
} }
@ -123,7 +123,7 @@ func TestRetryJoin_Server_Cloud(t *testing.T) {
discover: mockDiscover, discover: mockDiscover,
serverJoin: mockJoin, serverJoin: mockJoin,
serverEnabled: true, serverEnabled: true,
logger: log.New(ioutil.Discard, "", 0), logger: testlog.Logger(t),
errCh: make(chan struct{}), errCh: make(chan struct{}),
} }
@ -155,7 +155,7 @@ func TestRetryJoin_Server_MixedProvider(t *testing.T) {
discover: mockDiscover, discover: mockDiscover,
serverJoin: mockJoin, serverJoin: mockJoin,
serverEnabled: true, serverEnabled: true,
logger: log.New(ioutil.Discard, "", 0), logger: testlog.Logger(t),
errCh: make(chan struct{}), errCh: make(chan struct{}),
} }
@ -186,7 +186,7 @@ func TestRetryJoin_Client(t *testing.T) {
discover: &MockDiscover{}, discover: &MockDiscover{},
clientJoin: mockJoin, clientJoin: mockJoin,
clientEnabled: true, clientEnabled: true,
logger: log.New(ioutil.Discard, "", 0), logger: testlog.Logger(t),
errCh: make(chan struct{}), errCh: make(chan struct{}),
} }

View File

@ -12,7 +12,7 @@ import (
"github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/api/contexts" "github.com/hashicorp/nomad/api/contexts"
"github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/allocrunner/taskrunner/restarts"
"github.com/posener/complete" "github.com/posener/complete"
) )
@ -425,7 +425,7 @@ func buildDisplayMessage(event *api.TaskEvent) string {
desc = strings.Join(parts, ", ") desc = strings.Join(parts, ", ")
case api.TaskRestarting: case api.TaskRestarting:
in := fmt.Sprintf("Task restarting in %v", time.Duration(event.StartDelay)) in := fmt.Sprintf("Task restarting in %v", time.Duration(event.StartDelay))
if event.RestartReason != "" && event.RestartReason != client.ReasonWithinPolicy { if event.RestartReason != "" && event.RestartReason != restarts.ReasonWithinPolicy {
desc = fmt.Sprintf("%s - %s", event.RestartReason, in) desc = fmt.Sprintf("%s - %s", event.RestartReason, in)
} else { } else {
desc = in desc = in

View File

@ -25,7 +25,7 @@ Usage: nomad node <subcommand> [options] [args]
Mark a node as ineligible for running workloads. This is useful when the node Mark a node as ineligible for running workloads. This is useful when the node
is expected to be removed or upgraded so new allocations aren't placed on it: is expected to be removed or upgraded so new allocations aren't placed on it:
$ nomad node eligibility -disabled <node-id> $ nomad node eligibility -disable <node-id>
Mark a node to be drained, allowing batch jobs four hours to finished before Mark a node to be drained, allowing batch jobs four hours to finished before
forcing them off the node: forcing them off the node:

View File

@ -237,11 +237,11 @@ func TestNodeDrainCommand_Monitor(t *testing.T) {
require.Contains(out, fmt.Sprintf("Alloc %q marked for migration", a.ID)) require.Contains(out, fmt.Sprintf("Alloc %q marked for migration", a.ID))
require.Contains(out, fmt.Sprintf("Alloc %q draining", a.ID)) require.Contains(out, fmt.Sprintf("Alloc %q draining", a.ID))
} }
}
expected := fmt.Sprintf("All allocations on node %q have stopped.\n", nodeID) expected := fmt.Sprintf("All allocations on node %q have stopped.\n", nodeID)
if !strings.HasSuffix(out, expected) { if !strings.HasSuffix(out, expected) {
t.Fatalf("expected output to end with:\n%s", expected) t.Fatalf("expected output to end with:\n%s", expected)
}
} }
// Test -monitor flag // Test -monitor flag
@ -254,7 +254,6 @@ func TestNodeDrainCommand_Monitor(t *testing.T) {
out = outBuf.String() out = outBuf.String()
t.Logf("Output:\n%s", out) t.Logf("Output:\n%s", out)
require.Contains(out, "No drain strategy set") require.Contains(out, "No drain strategy set")
} }
@ -262,7 +261,7 @@ func TestNodeDrainCommand_Monitor_NoDrainStrategy(t *testing.T) {
t.Parallel() t.Parallel()
require := require.New(t) require := require.New(t)
server, client, url := testServer(t, true, func(c *agent.Config) { server, client, url := testServer(t, true, func(c *agent.Config) {
c.NodeName = "drain_monitor_node" c.NodeName = "drain_monitor_node2"
}) })
defer server.Shutdown() defer server.Shutdown()

View File

@ -42,9 +42,17 @@ var supportedTLSCiphers = map[string]uint16{
} }
// defaultTLSCiphers are the TLS Ciphers that are supported by default // defaultTLSCiphers are the TLS Ciphers that are supported by default
var defaultTLSCiphers = []string{"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", var defaultTLSCiphers = []string{
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
} }
// RegionSpecificWrapper is used to invoke a static Region and turns a // RegionSpecificWrapper is used to invoke a static Region and turns a
@ -413,3 +421,27 @@ func ParseMinVersion(version string) (uint16, error) {
return vers, nil return vers, nil
} }
// ShouldReloadRPCConnections compares two TLS Configurations and determines
// whether they differ such that RPC connections should be reloaded
func ShouldReloadRPCConnections(old, new *config.TLSConfig) (bool, error) {
var certificateInfoEqual bool
var rpcInfoEqual bool
// If already configured with TLS, compare with the new TLS configuration
if new != nil {
var err error
certificateInfoEqual, err = new.CertificateInfoIsEqual(old)
if err != nil {
return false, err
}
} else if new == nil && old == nil {
certificateInfoEqual = true
}
if new != nil && old != nil && new.EnableRPC == old.EnableRPC {
rpcInfoEqual = true
}
return (!rpcInfoEqual || !certificateInfoEqual), nil
}

View File

@ -696,9 +696,16 @@ func TestConfig_ParseCiphers_Default(t *testing.T) {
require := require.New(t) require := require.New(t)
expectedCiphers := []uint16{ expectedCiphers := []uint16{
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
} }
parsedCiphers, err := ParseCiphers("") parsedCiphers, err := ParseCiphers("")
@ -709,11 +716,9 @@ func TestConfig_ParseCiphers_Default(t *testing.T) {
func TestConfig_ParseCiphers_Invalid(t *testing.T) { func TestConfig_ParseCiphers_Invalid(t *testing.T) {
require := require.New(t) require := require.New(t)
invalidCiphers := []string{"TLS_RSA_WITH_3DES_EDE_CBC_SHA", invalidCiphers := []string{
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", "TLS_RSA_RSA_WITH_RC4_128_SHA",
"TLS_RSA_WITH_RC4_128_SHA", "INVALID_CIPHER",
"TLS_ECDHE_RSA_WITH_RC4_128_SHA",
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
} }
for _, cipher := range invalidCiphers { for _, cipher := range invalidCiphers {
@ -778,3 +783,89 @@ func TestConfig_NewTLSConfiguration(t *testing.T) {
} }
require.Equal(tlsConf.CipherSuites, expectedCiphers) require.Equal(tlsConf.CipherSuites, expectedCiphers)
} }
func TestConfig_ShouldReloadRPCConnections(t *testing.T) {
require := require.New(t)
type shouldReloadTestInput struct {
old *config.TLSConfig
new *config.TLSConfig
shouldReload bool
errorStr string
}
testInput := []*shouldReloadTestInput{
{
old: &config.TLSConfig{
CAFile: cacert,
CertFile: badcert,
KeyFile: badkey,
},
new: &config.TLSConfig{
CAFile: cacert,
CertFile: badcert,
KeyFile: badkey,
},
shouldReload: false,
errorStr: "Same TLS Configuration should not reload",
},
{
old: &config.TLSConfig{
CAFile: cacert,
CertFile: badcert,
KeyFile: badkey,
},
new: &config.TLSConfig{
CAFile: cacert,
CertFile: foocert,
KeyFile: fookey,
},
shouldReload: true,
errorStr: "Different TLS Configuration should reload",
},
{
old: &config.TLSConfig{
CAFile: cacert,
CertFile: badcert,
KeyFile: badkey,
EnableRPC: true,
},
new: &config.TLSConfig{
CAFile: cacert,
CertFile: badcert,
KeyFile: badkey,
EnableRPC: false,
},
shouldReload: true,
errorStr: "Downgrading RPC connections should force reload",
},
{
old: nil,
new: &config.TLSConfig{
CAFile: cacert,
CertFile: badcert,
KeyFile: badkey,
EnableRPC: true,
},
shouldReload: true,
errorStr: "Upgrading RPC connections should force reload",
},
{
old: &config.TLSConfig{
CAFile: cacert,
CertFile: badcert,
KeyFile: badkey,
EnableRPC: true,
},
new: nil,
shouldReload: true,
errorStr: "Downgrading RPC connections should force reload",
},
}
for _, testCase := range testInput {
shouldReload, err := ShouldReloadRPCConnections(testCase.old, testCase.new)
require.NoError(err)
require.Equal(shouldReload, testCase.shouldReload, testCase.errorStr)
}
}

View File

@ -7,6 +7,7 @@ import (
memdb "github.com/hashicorp/go-memdb" memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
@ -18,7 +19,7 @@ import (
func testDeploymentWatcher(t *testing.T, qps float64, batchDur time.Duration) (*Watcher, *mockBackend) { func testDeploymentWatcher(t *testing.T, qps float64, batchDur time.Duration) (*Watcher, *mockBackend) {
m := newMockBackend(t) m := newMockBackend(t)
w := NewDeploymentsWatcher(testLogger(), m, qps, batchDur) w := NewDeploymentsWatcher(testlog.Logger(t), m, qps, batchDur)
return w, m return w, m
} }

View File

@ -1,8 +1,6 @@
package deploymentwatcher package deploymentwatcher
import ( import (
"log"
"os"
"reflect" "reflect"
"strings" "strings"
"sync" "sync"
@ -13,10 +11,6 @@ import (
mocker "github.com/stretchr/testify/mock" mocker "github.com/stretchr/testify/mock"
) )
func testLogger() *log.Logger {
return log.New(os.Stderr, "", log.LstdFlags|log.Lmicroseconds)
}
type mockBackend struct { type mockBackend struct {
mocker.Mock mocker.Mock
index uint64 index uint64
@ -125,17 +119,14 @@ type matchDeploymentStatusUpdateConfig struct {
func matchDeploymentStatusUpdateRequest(c *matchDeploymentStatusUpdateConfig) func(args *structs.DeploymentStatusUpdateRequest) bool { func matchDeploymentStatusUpdateRequest(c *matchDeploymentStatusUpdateConfig) func(args *structs.DeploymentStatusUpdateRequest) bool {
return func(args *structs.DeploymentStatusUpdateRequest) bool { return func(args *structs.DeploymentStatusUpdateRequest) bool {
if args.DeploymentUpdate.DeploymentID != c.DeploymentID { if args.DeploymentUpdate.DeploymentID != c.DeploymentID {
testLogger().Printf("deployment ids dont match")
return false return false
} }
if args.DeploymentUpdate.Status != c.Status && args.DeploymentUpdate.StatusDescription != c.StatusDescription { if args.DeploymentUpdate.Status != c.Status && args.DeploymentUpdate.StatusDescription != c.StatusDescription {
testLogger().Printf("status's dont match")
return false return false
} }
if c.Eval && args.Eval == nil || !c.Eval && args.Eval != nil { if c.Eval && args.Eval == nil || !c.Eval && args.Eval != nil {
testLogger().Printf("evals dont match")
return false return false
} }

View File

@ -135,7 +135,7 @@ func NewFSM(config *FSMConfig) (*nomadFSM, error) {
evalBroker: config.EvalBroker, evalBroker: config.EvalBroker,
periodicDispatcher: config.Periodic, periodicDispatcher: config.Periodic,
blockedEvals: config.Blocked, blockedEvals: config.Blocked,
logger: log.New(config.LogOutput, "", log.LstdFlags), logger: log.New(config.LogOutput, "", log.LstdFlags|log.Lmicroseconds),
config: config, config: config,
state: state, state: state,
timetable: NewTimeTable(timeTableGranularity, timeTableLimit), timetable: NewTimeTable(timeTableGranularity, timeTableLimit),

View File

@ -47,7 +47,7 @@ func testStateStore(t *testing.T) *state.StateStore {
func testFSM(t *testing.T) *nomadFSM { func testFSM(t *testing.T) *nomadFSM {
broker := testBroker(t, 0) broker := testBroker(t, 0)
dispatcher, _ := testPeriodicDispatcher() dispatcher, _ := testPeriodicDispatcher(t)
fsmConfig := &FSMConfig{ fsmConfig := &FSMConfig{
EvalBroker: broker, EvalBroker: broker,
Periodic: dispatcher, Periodic: dispatcher,

View File

@ -124,10 +124,8 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis
} }
// Validate job transitions if its an update // Validate job transitions if its an update
if existingJob != nil { if err := validateJobUpdate(existingJob, args.Job); err != nil {
if err := validateJobUpdate(existingJob, args.Job); err != nil { return err
return err
}
} }
// Ensure that the job has permissions for the requested Vault tokens // Ensure that the job has permissions for the requested Vault tokens
@ -1327,6 +1325,14 @@ func validateJob(job *structs.Job) (invalid, warnings error) {
// validateJobUpdate ensures updates to a job are valid. // validateJobUpdate ensures updates to a job are valid.
func validateJobUpdate(old, new *structs.Job) error { func validateJobUpdate(old, new *structs.Job) error {
// Validate Dispatch not set on new Jobs
if old == nil {
if new.Dispatched {
return fmt.Errorf("job can't be submitted with 'Dispatched' set")
}
return nil
}
// Type transitions are disallowed // Type transitions are disallowed
if old.Type != new.Type { if old.Type != new.Type {
return fmt.Errorf("cannot update job from type %q to %q", old.Type, new.Type) return fmt.Errorf("cannot update job from type %q to %q", old.Type, new.Type)
@ -1348,6 +1354,10 @@ func validateJobUpdate(old, new *structs.Job) error {
return fmt.Errorf("cannot update parameterized job to being non-parameterized") return fmt.Errorf("cannot update parameterized job to being non-parameterized")
} }
if old.Dispatched != new.Dispatched {
return fmt.Errorf("field 'Dispatched' is read-only")
}
return nil return nil
} }
@ -1398,11 +1408,11 @@ func (j *Job) Dispatch(args *structs.JobDispatchRequest, reply *structs.JobDispa
// Derive the child job and commit it via Raft // Derive the child job and commit it via Raft
dispatchJob := parameterizedJob.Copy() dispatchJob := parameterizedJob.Copy()
dispatchJob.ParameterizedJob = nil
dispatchJob.ID = structs.DispatchedID(parameterizedJob.ID, time.Now()) dispatchJob.ID = structs.DispatchedID(parameterizedJob.ID, time.Now())
dispatchJob.ParentID = parameterizedJob.ID dispatchJob.ParentID = parameterizedJob.ID
dispatchJob.Name = dispatchJob.ID dispatchJob.Name = dispatchJob.ID
dispatchJob.SetSubmitTime() dispatchJob.SetSubmitTime()
dispatchJob.Dispatched = true
// Merge in the meta data // Merge in the meta data
for k, v := range args.Meta { for k, v := range args.Meta {

View File

@ -458,6 +458,33 @@ func TestJobEndpoint_Register_ParameterizedJob(t *testing.T) {
} }
} }
func TestJobEndpoint_Register_Dispatched(t *testing.T) {
t.Parallel()
require := require.New(t)
s1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer s1.Shutdown()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request with a job with 'Dispatch' set to true
job := mock.Job()
job.Dispatched = true
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
require.Error(err)
require.Contains(err.Error(), "job can't be submitted with 'Dispatched'")
}
func TestJobEndpoint_Register_EnforceIndex(t *testing.T) { func TestJobEndpoint_Register_EnforceIndex(t *testing.T) {
t.Parallel() t.Parallel()
s1 := TestServer(t, func(c *Config) { s1 := TestServer(t, func(c *Config) {
@ -3959,6 +3986,7 @@ func TestJobEndpoint_ValidateJob_KillSignal(t *testing.T) {
func TestJobEndpoint_ValidateJobUpdate(t *testing.T) { func TestJobEndpoint_ValidateJobUpdate(t *testing.T) {
t.Parallel() t.Parallel()
require := require.New(t)
old := mock.Job() old := mock.Job()
new := mock.Job() new := mock.Job()
@ -3988,6 +4016,16 @@ func TestJobEndpoint_ValidateJobUpdate(t *testing.T) {
} else { } else {
t.Log(err) t.Log(err)
} }
new = mock.Job()
new.Dispatched = true
require.Error(validateJobUpdate(old, new),
"expected err when setting new job to dispatched")
require.Error(validateJobUpdate(nil, new),
"expected err when setting new job to dispatched")
require.Error(validateJobUpdate(new, old),
"expected err when setting dispatched to false")
require.NoError(validateJobUpdate(nil, old))
} }
func TestJobEndpoint_ValidateJobUpdate_ACL(t *testing.T) { func TestJobEndpoint_ValidateJobUpdate_ACL(t *testing.T) {
@ -4343,6 +4381,15 @@ func TestJobEndpoint_Dispatch(t *testing.T) {
if out.ParentID != tc.parameterizedJob.ID { if out.ParentID != tc.parameterizedJob.ID {
t.Fatalf("bad parent ID") t.Fatalf("bad parent ID")
} }
if !out.Dispatched {
t.Fatal("expected dispatched job")
}
if out.IsParameterized() {
t.Fatal("dispatched job should not be parameterized")
}
if out.ParameterizedJob == nil {
t.Fatal("parameter job config should exist")
}
if tc.noEval { if tc.noEval {
return return

View File

@ -2,9 +2,7 @@ package nomad
import ( import (
"fmt" "fmt"
"log"
"math/rand" "math/rand"
"os"
"reflect" "reflect"
"sort" "sort"
"strconv" "strconv"
@ -13,6 +11,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil" "github.com/hashicorp/nomad/testutil"
@ -78,8 +77,8 @@ func (t times) Less(i, j int) bool { return t[i].Before(t[j]) }
// testPeriodicDispatcher returns an enabled PeriodicDispatcher which uses the // testPeriodicDispatcher returns an enabled PeriodicDispatcher which uses the
// MockJobEvalDispatcher. // MockJobEvalDispatcher.
func testPeriodicDispatcher() (*PeriodicDispatch, *MockJobEvalDispatcher) { func testPeriodicDispatcher(t *testing.T) (*PeriodicDispatch, *MockJobEvalDispatcher) {
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
m := NewMockJobEvalDispatcher() m := NewMockJobEvalDispatcher()
d := NewPeriodicDispatch(logger, m) d := NewPeriodicDispatch(logger, m)
d.SetEnabled(true) d.SetEnabled(true)
@ -105,7 +104,7 @@ func testPeriodicJob(times ...time.Time) *structs.Job {
// This tests the reported issue: https://github.com/hashicorp/nomad/issues/2829 // This tests the reported issue: https://github.com/hashicorp/nomad/issues/2829
func TestPeriodicDispatch_SetEnabled(t *testing.T) { func TestPeriodicDispatch_SetEnabled(t *testing.T) {
t.Parallel() t.Parallel()
p, _ := testPeriodicDispatcher() p, _ := testPeriodicDispatcher(t)
// SetEnabled has been called once but do it again. // SetEnabled has been called once but do it again.
p.SetEnabled(true) p.SetEnabled(true)
@ -128,7 +127,7 @@ func TestPeriodicDispatch_SetEnabled(t *testing.T) {
func TestPeriodicDispatch_Add_NonPeriodic(t *testing.T) { func TestPeriodicDispatch_Add_NonPeriodic(t *testing.T) {
t.Parallel() t.Parallel()
p, _ := testPeriodicDispatcher() p, _ := testPeriodicDispatcher(t)
job := mock.Job() job := mock.Job()
if err := p.Add(job); err != nil { if err := p.Add(job); err != nil {
t.Fatalf("Add of non-periodic job failed: %v; expect no-op", err) t.Fatalf("Add of non-periodic job failed: %v; expect no-op", err)
@ -142,7 +141,7 @@ func TestPeriodicDispatch_Add_NonPeriodic(t *testing.T) {
func TestPeriodicDispatch_Add_Periodic_Parameterized(t *testing.T) { func TestPeriodicDispatch_Add_Periodic_Parameterized(t *testing.T) {
t.Parallel() t.Parallel()
p, _ := testPeriodicDispatcher() p, _ := testPeriodicDispatcher(t)
job := mock.PeriodicJob() job := mock.PeriodicJob()
job.ParameterizedJob = &structs.ParameterizedJobConfig{} job.ParameterizedJob = &structs.ParameterizedJobConfig{}
if err := p.Add(job); err != nil { if err := p.Add(job); err != nil {
@ -157,7 +156,7 @@ func TestPeriodicDispatch_Add_Periodic_Parameterized(t *testing.T) {
func TestPeriodicDispatch_Add_Periodic_Stopped(t *testing.T) { func TestPeriodicDispatch_Add_Periodic_Stopped(t *testing.T) {
t.Parallel() t.Parallel()
p, _ := testPeriodicDispatcher() p, _ := testPeriodicDispatcher(t)
job := mock.PeriodicJob() job := mock.PeriodicJob()
job.Stop = true job.Stop = true
if err := p.Add(job); err != nil { if err := p.Add(job); err != nil {
@ -172,7 +171,7 @@ func TestPeriodicDispatch_Add_Periodic_Stopped(t *testing.T) {
func TestPeriodicDispatch_Add_UpdateJob(t *testing.T) { func TestPeriodicDispatch_Add_UpdateJob(t *testing.T) {
t.Parallel() t.Parallel()
p, _ := testPeriodicDispatcher() p, _ := testPeriodicDispatcher(t)
job := mock.PeriodicJob() job := mock.PeriodicJob()
if err := p.Add(job); err != nil { if err := p.Add(job); err != nil {
t.Fatalf("Add failed %v", err) t.Fatalf("Add failed %v", err)
@ -202,7 +201,7 @@ func TestPeriodicDispatch_Add_UpdateJob(t *testing.T) {
func TestPeriodicDispatch_Add_Remove_Namespaced(t *testing.T) { func TestPeriodicDispatch_Add_Remove_Namespaced(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
t.Parallel() t.Parallel()
p, _ := testPeriodicDispatcher() p, _ := testPeriodicDispatcher(t)
job := mock.PeriodicJob() job := mock.PeriodicJob()
job2 := mock.PeriodicJob() job2 := mock.PeriodicJob()
job2.Namespace = "test" job2.Namespace = "test"
@ -219,7 +218,7 @@ func TestPeriodicDispatch_Add_Remove_Namespaced(t *testing.T) {
func TestPeriodicDispatch_Add_RemoveJob(t *testing.T) { func TestPeriodicDispatch_Add_RemoveJob(t *testing.T) {
t.Parallel() t.Parallel()
p, _ := testPeriodicDispatcher() p, _ := testPeriodicDispatcher(t)
job := mock.PeriodicJob() job := mock.PeriodicJob()
if err := p.Add(job); err != nil { if err := p.Add(job); err != nil {
t.Fatalf("Add failed %v", err) t.Fatalf("Add failed %v", err)
@ -244,7 +243,7 @@ func TestPeriodicDispatch_Add_RemoveJob(t *testing.T) {
func TestPeriodicDispatch_Add_TriggersUpdate(t *testing.T) { func TestPeriodicDispatch_Add_TriggersUpdate(t *testing.T) {
t.Parallel() t.Parallel()
p, m := testPeriodicDispatcher() p, m := testPeriodicDispatcher(t)
// Create a job that won't be evaluated for a while. // Create a job that won't be evaluated for a while.
job := testPeriodicJob(time.Now().Add(10 * time.Second)) job := testPeriodicJob(time.Now().Add(10 * time.Second))
@ -287,7 +286,7 @@ func TestPeriodicDispatch_Add_TriggersUpdate(t *testing.T) {
func TestPeriodicDispatch_Remove_Untracked(t *testing.T) { func TestPeriodicDispatch_Remove_Untracked(t *testing.T) {
t.Parallel() t.Parallel()
p, _ := testPeriodicDispatcher() p, _ := testPeriodicDispatcher(t)
if err := p.Remove("ns", "foo"); err != nil { if err := p.Remove("ns", "foo"); err != nil {
t.Fatalf("Remove failed %v; expected a no-op", err) t.Fatalf("Remove failed %v; expected a no-op", err)
} }
@ -295,7 +294,7 @@ func TestPeriodicDispatch_Remove_Untracked(t *testing.T) {
func TestPeriodicDispatch_Remove_Tracked(t *testing.T) { func TestPeriodicDispatch_Remove_Tracked(t *testing.T) {
t.Parallel() t.Parallel()
p, _ := testPeriodicDispatcher() p, _ := testPeriodicDispatcher(t)
job := mock.PeriodicJob() job := mock.PeriodicJob()
if err := p.Add(job); err != nil { if err := p.Add(job); err != nil {
@ -319,7 +318,7 @@ func TestPeriodicDispatch_Remove_Tracked(t *testing.T) {
func TestPeriodicDispatch_Remove_TriggersUpdate(t *testing.T) { func TestPeriodicDispatch_Remove_TriggersUpdate(t *testing.T) {
t.Parallel() t.Parallel()
p, _ := testPeriodicDispatcher() p, _ := testPeriodicDispatcher(t)
// Create a job that will be evaluated soon. // Create a job that will be evaluated soon.
job := testPeriodicJob(time.Now().Add(1 * time.Second)) job := testPeriodicJob(time.Now().Add(1 * time.Second))
@ -349,7 +348,7 @@ func TestPeriodicDispatch_Remove_TriggersUpdate(t *testing.T) {
func TestPeriodicDispatch_ForceRun_Untracked(t *testing.T) { func TestPeriodicDispatch_ForceRun_Untracked(t *testing.T) {
t.Parallel() t.Parallel()
p, _ := testPeriodicDispatcher() p, _ := testPeriodicDispatcher(t)
if _, err := p.ForceRun("ns", "foo"); err == nil { if _, err := p.ForceRun("ns", "foo"); err == nil {
t.Fatal("ForceRun of untracked job should fail") t.Fatal("ForceRun of untracked job should fail")
@ -358,7 +357,7 @@ func TestPeriodicDispatch_ForceRun_Untracked(t *testing.T) {
func TestPeriodicDispatch_ForceRun_Tracked(t *testing.T) { func TestPeriodicDispatch_ForceRun_Tracked(t *testing.T) {
t.Parallel() t.Parallel()
p, m := testPeriodicDispatcher() p, m := testPeriodicDispatcher(t)
// Create a job that won't be evaluated for a while. // Create a job that won't be evaluated for a while.
job := testPeriodicJob(time.Now().Add(10 * time.Second)) job := testPeriodicJob(time.Now().Add(10 * time.Second))
@ -387,7 +386,7 @@ func TestPeriodicDispatch_ForceRun_Tracked(t *testing.T) {
func TestPeriodicDispatch_Run_DisallowOverlaps(t *testing.T) { func TestPeriodicDispatch_Run_DisallowOverlaps(t *testing.T) {
t.Parallel() t.Parallel()
p, m := testPeriodicDispatcher() p, m := testPeriodicDispatcher(t)
// Create a job that will trigger two launches but disallows overlapping. // Create a job that will trigger two launches but disallows overlapping.
launch1 := time.Now().Round(1 * time.Second).Add(1 * time.Second) launch1 := time.Now().Round(1 * time.Second).Add(1 * time.Second)
@ -417,7 +416,7 @@ func TestPeriodicDispatch_Run_DisallowOverlaps(t *testing.T) {
func TestPeriodicDispatch_Run_Multiple(t *testing.T) { func TestPeriodicDispatch_Run_Multiple(t *testing.T) {
t.Parallel() t.Parallel()
p, m := testPeriodicDispatcher() p, m := testPeriodicDispatcher(t)
// Create a job that will be launched twice. // Create a job that will be launched twice.
launch1 := time.Now().Round(1 * time.Second).Add(1 * time.Second) launch1 := time.Now().Round(1 * time.Second).Add(1 * time.Second)
@ -449,7 +448,7 @@ func TestPeriodicDispatch_Run_Multiple(t *testing.T) {
func TestPeriodicDispatch_Run_SameTime(t *testing.T) { func TestPeriodicDispatch_Run_SameTime(t *testing.T) {
t.Parallel() t.Parallel()
p, m := testPeriodicDispatcher() p, m := testPeriodicDispatcher(t)
// Create two job that will be launched at the same time. // Create two job that will be launched at the same time.
launch := time.Now().Round(1 * time.Second).Add(1 * time.Second) launch := time.Now().Round(1 * time.Second).Add(1 * time.Second)
@ -487,7 +486,7 @@ func TestPeriodicDispatch_Run_SameTime(t *testing.T) {
func TestPeriodicDispatch_Run_SameID_Different_Namespace(t *testing.T) { func TestPeriodicDispatch_Run_SameID_Different_Namespace(t *testing.T) {
t.Parallel() t.Parallel()
p, m := testPeriodicDispatcher() p, m := testPeriodicDispatcher(t)
// Create two job that will be launched at the same time. // Create two job that will be launched at the same time.
launch := time.Now().Round(1 * time.Second).Add(1 * time.Second) launch := time.Now().Round(1 * time.Second).Add(1 * time.Second)
@ -534,7 +533,7 @@ func TestPeriodicDispatch_Run_SameID_Different_Namespace(t *testing.T) {
// behavior. // behavior.
func TestPeriodicDispatch_Complex(t *testing.T) { func TestPeriodicDispatch_Complex(t *testing.T) {
t.Parallel() t.Parallel()
p, m := testPeriodicDispatcher() p, m := testPeriodicDispatcher(t)
// Create some jobs launching at different times. // Create some jobs launching at different times.
now := time.Now().Round(1 * time.Second) now := time.Now().Round(1 * time.Second)

View File

@ -678,13 +678,12 @@ func (s *Server) Reload(newConfig *Config) error {
} }
} }
tlsInfoEqual, err := newConfig.TLSConfig.CertificateInfoIsEqual(s.config.TLSConfig) shouldReloadTLS, err := tlsutil.ShouldReloadRPCConnections(s.config.TLSConfig, newConfig.TLSConfig)
if err != nil { if err != nil {
s.logger.Printf("[ERR] nomad: error parsing server TLS configuration: %s", err) s.logger.Printf("[ERR] nomad: error checking whether to reload TLS configuration: %s", err)
return err
} }
if !tlsInfoEqual || newConfig.TLSConfig.EnableRPC != s.config.TLSConfig.EnableRPC { if shouldReloadTLS {
if err := s.reloadTLSConnections(newConfig.TLSConfig); err != nil { if err := s.reloadTLSConnections(newConfig.TLSConfig); err != nil {
s.logger.Printf("[ERR] nomad: error reloading server TLS configuration: %s", err) s.logger.Printf("[ERR] nomad: error reloading server TLS configuration: %s", err)
multierror.Append(&mErr, err) multierror.Append(&mErr, err)

View File

@ -219,7 +219,7 @@ func TestServer_Reload_Vault(t *testing.T) {
} }
tr := true tr := true
config := s1.config config := DefaultConfig()
config.VaultConfig.Enabled = &tr config.VaultConfig.Enabled = &tr
config.VaultConfig.Token = uuid.Generate() config.VaultConfig.Token = uuid.Generate()

View File

@ -69,7 +69,7 @@ func NewStateStore(config *StateStoreConfig) (*StateStore, error) {
// Create the state store // Create the state store
s := &StateStore{ s := &StateStore{
logger: log.New(config.LogOutput, "", log.LstdFlags), logger: log.New(config.LogOutput, "", log.LstdFlags|log.Lmicroseconds),
db: db, db: db,
config: config, config: config,
abandonCh: make(chan struct{}), abandonCh: make(chan struct{}),

View File

@ -181,3 +181,55 @@ func (c *VaultConfig) Copy() *VaultConfig {
*nc = *c *nc = *c
return nc return nc
} }
// IsEqual compares two Vault configurations and returns a boolean indicating
// if they are equal.
func (a *VaultConfig) IsEqual(b *VaultConfig) bool {
if a == nil && b != nil {
return false
}
if a != nil && b == nil {
return false
}
if a.Token != b.Token {
return false
}
if a.Role != b.Role {
return false
}
if a.TaskTokenTTL != b.TaskTokenTTL {
return false
}
if a.Addr != b.Addr {
return false
}
if a.ConnectionRetryIntv.Nanoseconds() != b.ConnectionRetryIntv.Nanoseconds() {
return false
}
if a.TLSCaFile != b.TLSCaFile {
return false
}
if a.TLSCaPath != b.TLSCaPath {
return false
}
if a.TLSCertFile != b.TLSCertFile {
return false
}
if a.TLSKeyFile != b.TLSKeyFile {
return false
}
if a.TLSServerName != b.TLSServerName {
return false
}
if a.AllowUnauthenticated != b.AllowUnauthenticated {
return false
}
if a.TLSSkipVerify != b.TLSSkipVerify {
return false
}
if a.Enabled != b.Enabled {
return false
}
return true
}

View File

@ -3,6 +3,8 @@ package config
import ( import (
"reflect" "reflect"
"testing" "testing"
"github.com/stretchr/testify/require"
) )
func TestVaultConfig_Merge(t *testing.T) { func TestVaultConfig_Merge(t *testing.T) {
@ -57,3 +59,71 @@ func TestVaultConfig_Merge(t *testing.T) {
t.Fatalf("bad:\n%#v\n%#v", result, e) t.Fatalf("bad:\n%#v\n%#v", result, e)
} }
} }
func TestVaultConfig_IsEqual(t *testing.T) {
require := require.New(t)
trueValue, falseValue := true, false
c1 := &VaultConfig{
Enabled: &falseValue,
Token: "1",
Role: "1",
AllowUnauthenticated: &trueValue,
TaskTokenTTL: "1",
Addr: "1",
TLSCaFile: "1",
TLSCaPath: "1",
TLSCertFile: "1",
TLSKeyFile: "1",
TLSSkipVerify: &trueValue,
TLSServerName: "1",
}
c2 := &VaultConfig{
Enabled: &falseValue,
Token: "1",
Role: "1",
AllowUnauthenticated: &trueValue,
TaskTokenTTL: "1",
Addr: "1",
TLSCaFile: "1",
TLSCaPath: "1",
TLSCertFile: "1",
TLSKeyFile: "1",
TLSSkipVerify: &trueValue,
TLSServerName: "1",
}
require.True(c1.IsEqual(c2))
c3 := &VaultConfig{
Enabled: &trueValue,
Token: "1",
Role: "1",
AllowUnauthenticated: &trueValue,
TaskTokenTTL: "1",
Addr: "1",
TLSCaFile: "1",
TLSCaPath: "1",
TLSCertFile: "1",
TLSKeyFile: "1",
TLSSkipVerify: &trueValue,
TLSServerName: "1",
}
c4 := &VaultConfig{
Enabled: &falseValue,
Token: "1",
Role: "1",
AllowUnauthenticated: &trueValue,
TaskTokenTTL: "1",
Addr: "1",
TLSCaFile: "1",
TLSCaPath: "1",
TLSCertFile: "1",
TLSKeyFile: "1",
TLSSkipVerify: &trueValue,
TLSServerName: "1",
}
require.False(c3.IsEqual(c4))
}

View File

@ -150,6 +150,12 @@ func TestJobDiff(t *testing.T) {
Old: "true", Old: "true",
New: "", New: "",
}, },
{
Type: DiffTypeDeleted,
Name: "Dispatched",
Old: "false",
New: "",
},
{ {
Type: DiffTypeDeleted, Type: DiffTypeDeleted,
Name: "Meta[foo]", Name: "Meta[foo]",
@ -213,6 +219,12 @@ func TestJobDiff(t *testing.T) {
Old: "", Old: "",
New: "true", New: "true",
}, },
{
Type: DiffTypeAdded,
Name: "Dispatched",
Old: "",
New: "false",
},
{ {
Type: DiffTypeAdded, Type: DiffTypeAdded,
Name: "Meta[foo]", Name: "Meta[foo]",

View File

@ -2018,6 +2018,10 @@ type Job struct {
// for dispatching. // for dispatching.
ParameterizedJob *ParameterizedJobConfig ParameterizedJob *ParameterizedJobConfig
// Dispatched is used to identify if the Job has been dispatched from a
// parameterized job.
Dispatched bool
// Payload is the payload supplied when the job was dispatched. // Payload is the payload supplied when the job was dispatched.
Payload []byte Payload []byte
@ -2328,7 +2332,7 @@ func (j *Job) IsPeriodicActive() bool {
// IsParameterized returns whether a job is parameterized job. // IsParameterized returns whether a job is parameterized job.
func (j *Job) IsParameterized() bool { func (j *Job) IsParameterized() bool {
return j.ParameterizedJob != nil return j.ParameterizedJob != nil && !j.Dispatched
} }
// VaultPolicies returns the set of Vault policies per task group, per task // VaultPolicies returns the set of Vault policies per task group, per task

View File

@ -2,7 +2,6 @@ package nomad
import ( import (
"fmt" "fmt"
"log"
"math/rand" "math/rand"
"net" "net"
"sync/atomic" "sync/atomic"
@ -77,7 +76,7 @@ func TestServer(t testing.T, cb func(*Config)) *Server {
// Enable raft as leader if we have bootstrap on // Enable raft as leader if we have bootstrap on
config.RaftConfig.StartAsLeader = !config.DevDisableBootstrap config.RaftConfig.StartAsLeader = !config.DevDisableBootstrap
logger := log.New(config.LogOutput, fmt.Sprintf("[%s] ", config.NodeName), log.LstdFlags) logger := testlog.WithPrefix(t, fmt.Sprintf("[%s] ", config.NodeName))
catalog := consul.NewMockCatalog(logger) catalog := consul.NewMockCatalog(logger)
for i := 10; i >= 0; i-- { for i := 10; i >= 0; i-- {

View File

@ -316,6 +316,11 @@ func (v *vaultClient) SetConfig(config *config.VaultConfig) error {
v.l.Lock() v.l.Lock()
defer v.l.Unlock() defer v.l.Unlock()
// If reloading the same config, no-op
if v.config.IsEqual(config) {
return nil
}
// Kill any background routines // Kill any background routines
if v.running { if v.running {
// Stop accepting any new request // Stop accepting any new request

View File

@ -4,9 +4,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"log"
"math/rand" "math/rand"
"os"
"reflect" "reflect"
"strings" "strings"
"testing" "testing"
@ -15,6 +13,7 @@ import (
"golang.org/x/time/rate" "golang.org/x/time/rate"
"github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs"
@ -150,7 +149,7 @@ func testVaultRoleAndToken(v *testutil.TestVault, t *testing.T, vaultPolicies ma
func TestVaultClient_BadConfig(t *testing.T) { func TestVaultClient_BadConfig(t *testing.T) {
t.Parallel() t.Parallel()
conf := &config.VaultConfig{} conf := &config.VaultConfig{}
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
// Should be no error since Vault is not enabled // Should be no error since Vault is not enabled
_, err := NewVaultClient(nil, logger, nil) _, err := NewVaultClient(nil, logger, nil)
@ -179,7 +178,7 @@ func TestVaultClient_EstablishConnection(t *testing.T) {
t.Parallel() t.Parallel()
for i := 10; i >= 0; i-- { for i := 10; i >= 0; i-- {
v := testutil.NewTestVaultDelayed(t) v := testutil.NewTestVaultDelayed(t)
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
v.Config.ConnectionRetryIntv = 100 * time.Millisecond v.Config.ConnectionRetryIntv = 100 * time.Millisecond
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
@ -247,7 +246,7 @@ func TestVaultClient_ValidateRole(t *testing.T) {
} }
v.Config.Token = testVaultRoleAndToken(v, t, vaultPolicies, data, nil) v.Config.Token = testVaultRoleAndToken(v, t, vaultPolicies, data, nil)
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
v.Config.ConnectionRetryIntv = 100 * time.Millisecond v.Config.ConnectionRetryIntv = 100 * time.Millisecond
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
@ -286,7 +285,7 @@ func TestVaultClient_ValidateRole_NonExistant(t *testing.T) {
v.Config.Token = defaultTestVaultWhitelistRoleAndToken(v, t, 5) v.Config.Token = defaultTestVaultWhitelistRoleAndToken(v, t, 5)
v.Config.Token = v.RootToken v.Config.Token = v.RootToken
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
v.Config.ConnectionRetryIntv = 100 * time.Millisecond v.Config.ConnectionRetryIntv = 100 * time.Millisecond
v.Config.Role = "test-nonexistent" v.Config.Role = "test-nonexistent"
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
@ -335,7 +334,7 @@ func TestVaultClient_ValidateToken(t *testing.T) {
} }
v.Config.Token = testVaultRoleAndToken(v, t, vaultPolicies, data, []string{"token-lookup", "nomad-role-create"}) v.Config.Token = testVaultRoleAndToken(v, t, vaultPolicies, data, []string{"token-lookup", "nomad-role-create"})
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
v.Config.ConnectionRetryIntv = 100 * time.Millisecond v.Config.ConnectionRetryIntv = 100 * time.Millisecond
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
@ -378,7 +377,7 @@ func TestVaultClient_SetActive(t *testing.T) {
v := testutil.NewTestVault(t) v := testutil.NewTestVault(t)
defer v.Stop() defer v.Stop()
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -414,7 +413,7 @@ func TestVaultClient_SetConfig(t *testing.T) {
// Set the configs token in a new test role // Set the configs token in a new test role
v2.Config.Token = defaultTestVaultWhitelistRoleAndToken(v2, t, 20) v2.Config.Token = defaultTestVaultWhitelistRoleAndToken(v2, t, 20)
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -437,6 +436,31 @@ func TestVaultClient_SetConfig(t *testing.T) {
if client.tokenData == nil || len(client.tokenData.Policies) != 3 { if client.tokenData == nil || len(client.tokenData.Policies) != 3 {
t.Fatalf("unexpected token: %v", client.tokenData) t.Fatalf("unexpected token: %v", client.tokenData)
} }
// Test that when SetConfig is called with the same configuration, it is a
// no-op
failCh := make(chan struct{}, 1)
go func() {
tomb := client.tomb
select {
case <-tomb.Dying():
close(failCh)
case <-time.After(1 * time.Second):
return
}
}()
// Update the config
if err := client.SetConfig(v2.Config); err != nil {
t.Fatalf("SetConfig failed: %v", err)
}
select {
case <-failCh:
t.Fatalf("Tomb shouldn't have exited")
case <-time.After(1 * time.Second):
return
}
} }
// Test that we can disable vault // Test that we can disable vault
@ -445,7 +469,7 @@ func TestVaultClient_SetConfig_Disable(t *testing.T) {
v := testutil.NewTestVault(t) v := testutil.NewTestVault(t)
defer v.Stop() defer v.Stop()
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -483,7 +507,7 @@ func TestVaultClient_RenewalLoop(t *testing.T) {
v.Config.Token = defaultTestVaultWhitelistRoleAndToken(v, t, 5) v.Config.Token = defaultTestVaultWhitelistRoleAndToken(v, t, 5)
// Start the client // Start the client
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -541,7 +565,7 @@ func TestVaultClient_LookupToken_Invalid(t *testing.T) {
} }
// Enable vault but use a bad address so it never establishes a conn // Enable vault but use a bad address so it never establishes a conn
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(conf, logger, nil) client, err := NewVaultClient(conf, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -560,7 +584,7 @@ func TestVaultClient_LookupToken_Root(t *testing.T) {
v := testutil.NewTestVault(t) v := testutil.NewTestVault(t)
defer v.Stop() defer v.Stop()
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -625,7 +649,7 @@ func TestVaultClient_LookupToken_Role(t *testing.T) {
// Set the configs token in a new test role // Set the configs token in a new test role
v.Config.Token = defaultTestVaultWhitelistRoleAndToken(v, t, 5) v.Config.Token = defaultTestVaultWhitelistRoleAndToken(v, t, 5)
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -687,7 +711,7 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) {
v := testutil.NewTestVault(t) v := testutil.NewTestVault(t)
defer v.Stop() defer v.Stop()
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -747,7 +771,7 @@ func TestVaultClient_CreateToken_Root(t *testing.T) {
v := testutil.NewTestVault(t) v := testutil.NewTestVault(t)
defer v.Stop() defer v.Stop()
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -795,7 +819,7 @@ func TestVaultClient_CreateToken_Whitelist_Role(t *testing.T) {
v.Config.Token = defaultTestVaultWhitelistRoleAndToken(v, t, 5) v.Config.Token = defaultTestVaultWhitelistRoleAndToken(v, t, 5)
// Start the client // Start the client
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -846,7 +870,7 @@ func TestVaultClient_CreateToken_Root_Target_Role(t *testing.T) {
v.Config.Role = "test" v.Config.Role = "test"
// Start the client // Start the client
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -905,7 +929,7 @@ func TestVaultClient_CreateToken_Blacklist_Role(t *testing.T) {
v.Config.Role = "test" v.Config.Role = "test"
// Start the client // Start the client
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -954,7 +978,7 @@ func TestVaultClient_CreateToken_Role_InvalidToken(t *testing.T) {
v.Config.Token = "foo-bar" v.Config.Token = "foo-bar"
// Start the client // Start the client
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -992,7 +1016,7 @@ func TestVaultClient_CreateToken_Role_Unrecoverable(t *testing.T) {
v.Config.Token = defaultTestVaultWhitelistRoleAndToken(v, t, 5) v.Config.Token = defaultTestVaultWhitelistRoleAndToken(v, t, 5)
// Start the client // Start the client
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, nil) client, err := NewVaultClient(v.Config, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -1026,7 +1050,7 @@ func TestVaultClient_CreateToken_Prestart(t *testing.T) {
Addr: "http://127.0.0.1:0", Addr: "http://127.0.0.1:0",
} }
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(vconfig, logger, nil) client, err := NewVaultClient(vconfig, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -1058,7 +1082,7 @@ func TestVaultClient_RevokeTokens_PreEstablishs(t *testing.T) {
Token: uuid.Generate(), Token: uuid.Generate(),
Addr: "http://127.0.0.1:0", Addr: "http://127.0.0.1:0",
} }
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(vconfig, logger, nil) client, err := NewVaultClient(vconfig, logger, nil)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -1106,7 +1130,7 @@ func TestVaultClient_RevokeTokens_Root(t *testing.T) {
return nil return nil
} }
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, purge) client, err := NewVaultClient(v.Config, logger, purge)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)
@ -1174,7 +1198,7 @@ func TestVaultClient_RevokeTokens_Role(t *testing.T) {
return nil return nil
} }
logger := log.New(os.Stderr, "", log.LstdFlags) logger := testlog.Logger(t)
client, err := NewVaultClient(v.Config, logger, purge) client, err := NewVaultClient(v.Config, logger, purge)
if err != nil { if err != nil {
t.Fatalf("failed to build vault client: %v", err) t.Fatalf("failed to build vault client: %v", err)

Some files were not shown because too many files have changed in this diff Show More