Move chroot building into TaskRunner

* Refactor AllocDir to have a TaskDir struct per task.
* Drivers expose filesystem isolation preference
* Fix lxc mounting of `secrets/`
This commit is contained in:
Michael Schurter 2016-12-02 17:04:07 -08:00
parent e740997dcc
commit 3ea09ba16a
44 changed files with 1903 additions and 1777 deletions

View File

@ -11,7 +11,6 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver"
"github.com/hashicorp/nomad/client/vaultclient"
"github.com/hashicorp/nomad/nomad/structs"
@ -48,8 +47,9 @@ type AllocRunner struct {
dirtyCh chan struct{}
ctx *driver.ExecContext
ctxLock sync.Mutex
allocDir *allocdir.AllocDir
allocDirLock sync.Mutex
tasks map[string]*TaskRunner
taskStates map[string]*structs.TaskState
restored map[string]struct{}
@ -76,9 +76,9 @@ type AllocRunner struct {
type allocRunnerState struct {
Version string
Alloc *structs.Allocation
AllocDir *allocdir.AllocDir
AllocClientStatus string
AllocClientDescription string
Context *driver.ExecContext
}
// NewAllocRunner is used to create a new allocation context
@ -119,7 +119,7 @@ func (r *AllocRunner) RestoreState() error {
// Restore fields
r.alloc = snap.Alloc
r.ctx = snap.Context
r.allocDir = snap.AllocDir
r.allocClientStatus = snap.AllocClientStatus
r.allocClientDescription = snap.AllocClientDescription
@ -127,8 +127,9 @@ func (r *AllocRunner) RestoreState() error {
if r.alloc == nil {
snapshotErrors.Errors = append(snapshotErrors.Errors, fmt.Errorf("alloc_runner snapshot includes a nil allocation"))
}
if r.ctx == nil {
snapshotErrors.Errors = append(snapshotErrors.Errors, fmt.Errorf("alloc_runner snapshot includes a nil context"))
if r.allocDir == nil {
//FIXME Upgrade path?
snapshotErrors.Errors = append(snapshotErrors.Errors, fmt.Errorf("alloc_runner snapshot includes a nil alloc dir"))
}
if e := snapshotErrors.ErrorOrNil(); e != nil {
return e
@ -142,9 +143,16 @@ func (r *AllocRunner) RestoreState() error {
// Mark the task as restored.
r.restored[name] = struct{}{}
td, ok := r.allocDir.TaskDirs[name]
if !ok {
err := fmt.Errorf("failed to find task dir metadata for alloc %q task %q",
r.alloc.ID, name)
r.logger.Printf("[ERR] client: %v", err)
mErr.Errors = append(mErr.Errors, err)
}
task := &structs.Task{Name: name}
tr := NewTaskRunner(r.logger, r.config, r.setTaskState, r.ctx, r.Alloc(),
task, r.vaultClient)
tr := NewTaskRunner(r.logger, r.config, r.setTaskState, td, r.Alloc(), task, r.vaultClient)
r.tasks[name] = tr
// Skip tasks in terminal states.
@ -166,10 +174,7 @@ func (r *AllocRunner) RestoreState() error {
// GetAllocDir returns the alloc dir for the alloc runner
func (r *AllocRunner) GetAllocDir() *allocdir.AllocDir {
if r.ctx == nil {
return nil
}
return r.ctx.AllocDir
return r.allocDir
}
// SaveState is used to snapshot the state of the alloc runner
@ -204,14 +209,14 @@ func (r *AllocRunner) saveAllocRunnerState() error {
allocClientDescription := r.allocClientDescription
r.allocLock.Unlock()
r.ctxLock.Lock()
ctx := r.ctx
r.ctxLock.Unlock()
r.allocDirLock.Lock()
allocDir := r.allocDir
r.allocDirLock.Unlock()
snap := allocRunnerState{
Version: r.config.Version,
Alloc: alloc,
Context: ctx,
AllocDir: allocDir,
AllocClientStatus: allocClientStatus,
AllocClientDescription: allocClientDescription,
}
@ -233,7 +238,7 @@ func (r *AllocRunner) DestroyState() error {
// DestroyContext is used to destroy the context
func (r *AllocRunner) DestroyContext() error {
return r.ctx.AllocDir.Destroy()
return r.allocDir.Destroy()
}
// copyTaskStates returns a copy of the passed task states.
@ -409,18 +414,19 @@ func (r *AllocRunner) Run() {
}
// Create the execution context
r.ctxLock.Lock()
if r.ctx == nil {
allocDir := allocdir.NewAllocDir(filepath.Join(r.config.AllocDir, r.alloc.ID))
if err := allocDir.Build(tg.Tasks); err != nil {
r.allocDirLock.Lock()
if r.allocDir == nil {
// Build allocation directory
r.allocDir = allocdir.NewAllocDir(filepath.Join(r.config.AllocDir, r.alloc.ID))
if err := r.allocDir.Build(); err != nil {
r.logger.Printf("[WARN] client: failed to build task directories: %v", err)
r.setStatus(structs.AllocClientStatusFailed, fmt.Sprintf("failed to build task dirs for '%s'", alloc.TaskGroup))
r.ctxLock.Unlock()
r.allocDirLock.Unlock()
return
}
r.ctx = driver.NewExecContext(allocDir, r.alloc.ID)
if r.otherAllocDir != nil {
if err := allocDir.Move(r.otherAllocDir, tg.Tasks); err != nil {
if err := r.allocDir.Move(r.otherAllocDir, tg.Tasks); err != nil {
r.logger.Printf("[ERROR] client: failed to move alloc dir into alloc %q: %v", r.alloc.ID, err)
}
if err := r.otherAllocDir.Destroy(); err != nil {
@ -428,7 +434,7 @@ func (r *AllocRunner) Run() {
}
}
}
r.ctxLock.Unlock()
r.allocDirLock.Unlock()
// Check if the allocation is in a terminal status. In this case, we don't
// start any of the task runners and directly wait for the destroy signal to
@ -448,7 +454,11 @@ func (r *AllocRunner) Run() {
continue
}
tr := NewTaskRunner(r.logger, r.config, r.setTaskState, r.ctx, r.Alloc(), task.Copy(), r.vaultClient)
r.allocDirLock.Lock()
taskdir := r.allocDir.NewTaskDir(task.Name)
r.allocDirLock.Unlock()
tr := NewTaskRunner(r.logger, r.config, r.setTaskState, taskdir, r.Alloc(), task.Copy(), r.vaultClient)
r.tasks[task.Name] = tr
tr.MarkReceived()

View File

@ -173,8 +173,8 @@ func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
}
// Check the alloc directory still exists
if _, err := os.Stat(ar.ctx.AllocDir.AllocDir); err != nil {
return false, fmt.Errorf("alloc dir destroyed: %v", ar.ctx.AllocDir.AllocDir)
if _, err := os.Stat(ar.allocDir.AllocDir); err != nil {
return false, fmt.Errorf("alloc dir destroyed: %v", ar.allocDir.AllocDir)
}
return true, nil
@ -204,8 +204,8 @@ func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
}
// Check the alloc directory was cleaned
if _, err := os.Stat(ar.ctx.AllocDir.AllocDir); err == nil {
return false, fmt.Errorf("alloc dir still exists: %v", ar.ctx.AllocDir.AllocDir)
if _, err := os.Stat(ar.allocDir.AllocDir); err == nil {
return false, fmt.Errorf("alloc dir still exists: %v", ar.allocDir.AllocDir)
} else if !os.IsNotExist(err) {
return false, fmt.Errorf("stat err: %v", err)
}
@ -252,8 +252,8 @@ func TestAllocRunner_Destroy(t *testing.T) {
}
// Check the alloc directory was cleaned
if _, err := os.Stat(ar.ctx.AllocDir.AllocDir); err == nil {
return false, fmt.Errorf("alloc dir still exists: %v", ar.ctx.AllocDir.AllocDir)
if _, err := os.Stat(ar.allocDir.AllocDir); err == nil {
return false, fmt.Errorf("alloc dir still exists: %v", ar.allocDir.AllocDir)
} else if !os.IsNotExist(err) {
return false, fmt.Errorf("stat err: %v", err)
}
@ -424,8 +424,8 @@ func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
}
// Check the alloc directory still exists
if _, err := os.Stat(ar.ctx.AllocDir.AllocDir); err != nil {
return false, fmt.Errorf("alloc dir destroyed: %v", ar.ctx.AllocDir.AllocDir)
if _, err := os.Stat(ar.allocDir.AllocDir); err != nil {
return false, fmt.Errorf("alloc dir destroyed: %v", ar.allocDir.AllocDir)
}
return true, nil
@ -456,8 +456,8 @@ func TestAllocRunner_SaveRestoreState_TerminalAlloc(t *testing.T) {
}
// Check the alloc directory was cleaned
if _, err := os.Stat(ar.ctx.AllocDir.AllocDir); err == nil {
return false, fmt.Errorf("alloc dir still exists: %v", ar.ctx.AllocDir.AllocDir)
if _, err := os.Stat(ar.allocDir.AllocDir); err == nil {
return false, fmt.Errorf("alloc dir still exists: %v", ar.allocDir.AllocDir)
} else if !os.IsNotExist(err) {
return false, fmt.Errorf("stat err: %v", err)
}
@ -546,10 +546,10 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) {
})
// Write some data in data dir and task dir of the alloc
dataFile := filepath.Join(ar.ctx.AllocDir.SharedDir, "data", "data_file")
dataFile := filepath.Join(ar.allocDir.SharedDir, "data", "data_file")
ioutil.WriteFile(dataFile, []byte("hello world"), os.ModePerm)
taskDir := ar.ctx.AllocDir.TaskDirs[task.Name]
taskLocalFile := filepath.Join(taskDir, "local", "local_file")
taskDir := ar.allocDir.TaskDirs[task.Name]
taskLocalFile := filepath.Join(taskDir.LocalDir, "local_file")
ioutil.WriteFile(taskLocalFile, []byte("good bye world"), os.ModePerm)
// Create another alloc runner
@ -560,7 +560,7 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) {
"run_for": "1s",
}
upd1, ar1 := testAllocRunnerFromAlloc(alloc1, false)
ar1.SetPreviousAllocDir(ar.ctx.AllocDir)
ar1.SetPreviousAllocDir(ar.allocDir)
go ar1.Run()
testutil.WaitForResult(func() (bool, error) {
@ -577,13 +577,13 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) {
})
// Ensure that data from ar1 was moved to ar
taskDir = ar1.ctx.AllocDir.TaskDirs[task.Name]
taskLocalFile = filepath.Join(taskDir, "local", "local_file")
taskDir = ar1.allocDir.TaskDirs[task.Name]
taskLocalFile = filepath.Join(taskDir.LocalDir, "local_file")
if fileInfo, _ := os.Stat(taskLocalFile); fileInfo == nil {
t.Fatalf("file %v not found", taskLocalFile)
}
dataFile = filepath.Join(ar1.ctx.AllocDir.SharedDir, "data", "data_file")
dataFile = filepath.Join(ar1.allocDir.SharedDir, "data", "data_file")
if fileInfo, _ := os.Stat(dataFile); fileInfo == nil {
t.Fatalf("file %v not found", dataFile)
}

View File

@ -23,8 +23,12 @@ var (
// Name of the directory where logs of Tasks are written
LogDirName = "logs"
// SharedDataDir is one of the shared allocation directories. It is
// included in snapshots.
SharedDataDir = "data"
// The set of directories that exist inside eache shared alloc directory.
SharedAllocDirs = []string{LogDirName, "tmp", "data"}
SharedAllocDirs = []string{LogDirName, "tmp", SharedDataDir}
// The name of the directory that exists inside each task directory
// regardless of driver.
@ -48,7 +52,7 @@ type AllocDir struct {
SharedDir string
// TaskDirs is a mapping of task names to their non-shared directory.
TaskDirs map[string]string
TaskDirs map[string]*TaskDir
}
// AllocFileInfo holds information about a file inside the AllocDir
@ -73,22 +77,27 @@ type AllocDirFS interface {
// NewAllocDir initializes the AllocDir struct with allocDir as base path for
// the allocation directory.
func NewAllocDir(allocDir string) *AllocDir {
d := &AllocDir{
AllocDir: allocDir,
TaskDirs: make(map[string]string),
return &AllocDir{
AllocDir: allocDir,
SharedDir: filepath.Join(allocDir, SharedAllocName),
TaskDirs: make(map[string]*TaskDir),
}
d.SharedDir = filepath.Join(d.AllocDir, SharedAllocName)
return d
}
// NewTaskDir creates a new TaskDir and adds it to the AllocDirs TaskDirs map.
func (d *AllocDir) NewTaskDir(name string) *TaskDir {
td := newTaskDir(d.AllocDir, name)
d.TaskDirs[name] = td
return td
}
// Snapshot creates an archive of the files and directories in the data dir of
// the allocation and the task local directories
func (d *AllocDir) Snapshot(w io.Writer) error {
allocDataDir := filepath.Join(d.SharedDir, "data")
allocDataDir := filepath.Join(d.SharedDir, SharedDataDir)
rootPaths := []string{allocDataDir}
for _, path := range d.TaskDirs {
taskLocaPath := filepath.Join(path, "local")
rootPaths = append(rootPaths, taskLocaPath)
for _, taskdir := range d.TaskDirs {
rootPaths = append(rootPaths, taskdir.LocalDir)
}
tw := tar.NewWriter(w)
@ -142,11 +151,11 @@ func (d *AllocDir) Snapshot(w io.Writer) error {
return nil
}
// Move moves the shared data and task local dirs
// Move other alloc directory's shared path and local dir to this alloc dir.
func (d *AllocDir) Move(other *AllocDir, tasks []*structs.Task) error {
// Move the data directory
otherDataDir := filepath.Join(other.SharedDir, "data")
dataDir := filepath.Join(d.SharedDir, "data")
otherDataDir := filepath.Join(other.SharedDir, SharedDataDir)
dataDir := filepath.Join(d.SharedDir, SharedDataDir)
if fileInfo, err := os.Stat(otherDataDir); fileInfo != nil && err == nil {
if err := os.Rename(otherDataDir, dataDir); err != nil {
return fmt.Errorf("error moving data dir: %v", err)
@ -155,14 +164,19 @@ func (d *AllocDir) Move(other *AllocDir, tasks []*structs.Task) error {
// Move the task directories
for _, task := range tasks {
taskDir := filepath.Join(other.AllocDir, task.Name)
otherTaskLocal := filepath.Join(taskDir, TaskLocal)
otherTaskDir := filepath.Join(other.AllocDir, task.Name)
otherTaskLocal := filepath.Join(otherTaskDir, TaskLocal)
if fileInfo, err := os.Stat(otherTaskLocal); fileInfo != nil && err == nil {
if taskDir, ok := d.TaskDirs[task.Name]; ok {
if err := os.Rename(otherTaskLocal, filepath.Join(taskDir, TaskLocal)); err != nil {
return fmt.Errorf("error moving task local dir: %v", err)
}
fileInfo, err := os.Stat(otherTaskLocal)
if fileInfo != nil && err == nil {
// TaskDirs haven't been built yet, so create it
newTaskDir := filepath.Join(d.AllocDir, task.Name)
if err := os.MkdirAll(newTaskDir, 0777); err != nil {
return fmt.Errorf("error creating task %q dir: %v", task.Name, err)
}
localDir := filepath.Join(newTaskDir, TaskLocal)
if err := os.Rename(otherTaskLocal, localDir); err != nil {
return fmt.Errorf("error moving task %q local dir: %v", task.Name, err)
}
}
}
@ -180,44 +194,45 @@ func (d *AllocDir) Destroy() error {
}
if err := os.RemoveAll(d.AllocDir); err != nil {
mErr.Errors = append(mErr.Errors, err)
mErr.Errors = append(mErr.Errors, fmt.Errorf("failed to remove alloc dir %q: %v", d.AllocDir, err))
}
return mErr.ErrorOrNil()
}
// UnmountAll linked/mounted directories in task dirs.
func (d *AllocDir) UnmountAll() error {
var mErr multierror.Error
for _, dir := range d.TaskDirs {
// Check if the directory has the shared alloc mounted.
taskAlloc := filepath.Join(dir, SharedAllocName)
if d.pathExists(taskAlloc) {
if err := d.unmountSharedDir(taskAlloc); err != nil {
if pathExists(dir.SharedTaskDir) {
if err := unlinkDir(dir.SharedTaskDir); err != nil {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("failed to unmount shared alloc dir %q: %v", taskAlloc, err))
} else if err := os.RemoveAll(taskAlloc); err != nil {
fmt.Errorf("failed to unmount shared alloc dir %q: %v", dir.SharedTaskDir, err))
} else if err := os.RemoveAll(dir.SharedTaskDir); err != nil {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("failed to delete shared alloc dir %q: %v", taskAlloc, err))
fmt.Errorf("failed to delete shared alloc dir %q: %v", dir.SharedTaskDir, err))
}
}
taskSecret := filepath.Join(dir, TaskSecrets)
if d.pathExists(taskSecret) {
if err := d.removeSecretDir(taskSecret); err != nil {
if pathExists(dir.SecretsDir) {
if err := removeSecretDir(dir.SecretsDir); err != nil {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("failed to remove the secret dir %q: %v", taskSecret, err))
fmt.Errorf("failed to remove the secret dir %q: %v", dir.SecretsDir, err))
}
}
// Unmount dev/ and proc/ have been mounted.
d.unmountSpecialDirs(dir)
if err := dir.unmountSpecialDirs(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
return mErr.ErrorOrNil()
}
// Given a list of a task build the correct alloc structure.
func (d *AllocDir) Build(tasks []*structs.Task) error {
// Build the directory tree for an allocation.
func (d *AllocDir) Build() error {
// Make the alloc directory, owned by the nomad process.
if err := os.MkdirAll(d.AllocDir, 0755); err != nil {
return fmt.Errorf("Failed to make the alloc directory %v: %v", d.AllocDir, err)
@ -229,63 +244,17 @@ func (d *AllocDir) Build(tasks []*structs.Task) error {
}
// Make the shared directory have non-root permissions.
if err := d.dropDirPermissions(d.SharedDir); err != nil {
if err := dropDirPermissions(d.SharedDir); err != nil {
return err
}
// Create shared subdirs
for _, dir := range SharedAllocDirs {
p := filepath.Join(d.SharedDir, dir)
if err := os.MkdirAll(p, 0777); err != nil {
return err
}
if err := d.dropDirPermissions(p); err != nil {
return err
}
}
// Make the task directories.
for _, t := range tasks {
taskDir := filepath.Join(d.AllocDir, t.Name)
if err := os.MkdirAll(taskDir, 0777); err != nil {
return err
}
// Make the task directory have non-root permissions.
if err := d.dropDirPermissions(taskDir); err != nil {
return err
}
// Create a local directory that each task can use.
local := filepath.Join(taskDir, TaskLocal)
if err := os.MkdirAll(local, 0777); err != nil {
return err
}
if err := d.dropDirPermissions(local); err != nil {
return err
}
d.TaskDirs[t.Name] = taskDir
// Create the directories that should be in every task.
for _, dir := range TaskDirs {
local := filepath.Join(taskDir, dir)
if err := os.MkdirAll(local, 0777); err != nil {
return err
}
if err := d.dropDirPermissions(local); err != nil {
return err
}
}
// Create the secret directory
secret := filepath.Join(taskDir, TaskSecrets)
if err := d.createSecretDir(secret); err != nil {
return err
}
if err := d.dropDirPermissions(secret); err != nil {
if err := dropDirPermissions(p); err != nil {
return err
}
}
@ -293,122 +262,6 @@ func (d *AllocDir) Build(tasks []*structs.Task) error {
return nil
}
// Embed takes a mapping of absolute directory or file paths on the host to
// their intended, relative location within the task directory. Embed attempts
// hardlink and then defaults to copying. If the path exists on the host and
// can't be embedded an error is returned.
func (d *AllocDir) Embed(task string, entries map[string]string) error {
taskdir, ok := d.TaskDirs[task]
if !ok {
return fmt.Errorf("Task directory doesn't exist for task %v", task)
}
subdirs := make(map[string]string)
for source, dest := range entries {
// Check to see if directory exists on host.
s, err := os.Stat(source)
if os.IsNotExist(err) {
continue
}
// Embedding a single file
if !s.IsDir() {
if err := d.createDir(taskdir, filepath.Dir(dest)); err != nil {
return fmt.Errorf("Couldn't create destination directory %v: %v", dest, err)
}
// Copy the file.
taskEntry := filepath.Join(taskdir, dest)
if err := d.linkOrCopy(source, taskEntry, s.Mode().Perm()); err != nil {
return err
}
continue
}
// Create destination directory.
destDir := filepath.Join(taskdir, dest)
if err := d.createDir(taskdir, dest); err != nil {
return fmt.Errorf("Couldn't create destination directory %v: %v", destDir, err)
}
// Enumerate the files in source.
dirEntries, err := ioutil.ReadDir(source)
if err != nil {
return fmt.Errorf("Couldn't read directory %v: %v", source, err)
}
for _, entry := range dirEntries {
hostEntry := filepath.Join(source, entry.Name())
taskEntry := filepath.Join(destDir, filepath.Base(hostEntry))
if entry.IsDir() {
subdirs[hostEntry] = filepath.Join(dest, filepath.Base(hostEntry))
continue
}
// Check if entry exists. This can happen if restarting a failed
// task.
if _, err := os.Lstat(taskEntry); err == nil {
continue
}
if !entry.Mode().IsRegular() {
// If it is a symlink we can create it, otherwise we skip it.
if entry.Mode()&os.ModeSymlink == 0 {
continue
}
link, err := os.Readlink(hostEntry)
if err != nil {
return fmt.Errorf("Couldn't resolve symlink for %v: %v", source, err)
}
if err := os.Symlink(link, taskEntry); err != nil {
// Symlinking twice
if err.(*os.LinkError).Err.Error() != "file exists" {
return fmt.Errorf("Couldn't create symlink: %v", err)
}
}
continue
}
if err := d.linkOrCopy(hostEntry, taskEntry, entry.Mode().Perm()); err != nil {
return err
}
}
}
// Recurse on self to copy subdirectories.
if len(subdirs) != 0 {
return d.Embed(task, subdirs)
}
return nil
}
// MountSharedDir mounts the shared directory into the specified task's
// directory. Mount is documented at an OS level in their respective
// implementation files.
func (d *AllocDir) MountSharedDir(task string) error {
taskDir, ok := d.TaskDirs[task]
if !ok {
return fmt.Errorf("No task directory exists for %v", task)
}
taskLoc := filepath.Join(taskDir, SharedAllocName)
if err := d.mountSharedDir(taskLoc); err != nil {
return fmt.Errorf("Failed to mount shared directory for task %v: %v", task, err)
}
return nil
}
// LogDir returns the log dir in the current allocation directory
func (d *AllocDir) LogDir() string {
return filepath.Join(d.AllocDir, SharedAllocName, LogDirName)
}
// List returns the list of files at a path relative to the alloc dir
func (d *AllocDir) List(path string) ([]*AllocFileInfo, error) {
if escapes, err := structs.PathEscapesAllocDir("", path); err != nil {
@ -470,8 +323,7 @@ func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) {
// Check if it is trying to read into a secret directory
for _, dir := range d.TaskDirs {
sdir := filepath.Join(dir, TaskSecrets)
if filepath.HasPrefix(p, sdir) {
if filepath.HasPrefix(p, dir.SecretsDir) {
return nil, fmt.Errorf("Reading secret file prohibited: %s", path)
}
}
@ -549,7 +401,7 @@ func fileCopy(src, dst string, perm os.FileMode) error {
}
// pathExists is a helper function to check if the path exists.
func (d *AllocDir) pathExists(path string) bool {
func pathExists(path string) bool {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
return false
@ -558,19 +410,11 @@ func (d *AllocDir) pathExists(path string) bool {
return true
}
func (d *AllocDir) GetSecretDir(task string) (string, error) {
if t, ok := d.TaskDirs[task]; !ok {
return "", fmt.Errorf("Allocation directory doesn't contain task %q", task)
} else {
return filepath.Join(t, TaskSecrets), nil
}
}
// createDir creates a directory structure inside the basepath. This functions
// preserves the permissions of each of the subdirectories in the relative path
// by looking up the permissions in the host.
func (d *AllocDir) createDir(basePath, relPath string) error {
filePerms, err := d.splitPath(relPath)
func createDir(basePath, relPath string) error {
filePerms, err := splitPath(relPath)
if err != nil {
return err
}
@ -594,9 +438,9 @@ type fileInfo struct {
}
// splitPath stats each subdirectory of a path. The first element of the array
// is the file passed to this method, and the last element is the root of the
// is the file passed to this function, and the last element is the root of the
// path.
func (d *AllocDir) splitPath(path string) ([]fileInfo, error) {
func splitPath(path string) ([]fileInfo, error) {
var mode os.FileMode
i, err := os.Stat(path)

View File

@ -1,37 +0,0 @@
package allocdir
import (
"os"
"syscall"
)
// Hardlinks the shared directory. As a side-effect the shared directory and
// task directory must be on the same filesystem.
func (d *AllocDir) mountSharedDir(dir string) error {
return syscall.Link(d.SharedDir, dir)
}
func (d *AllocDir) unmountSharedDir(dir string) error {
return syscall.Unlink(dir)
}
// createSecretDir creates the secrets dir folder at the given path
func (d *AllocDir) createSecretDir(dir string) error {
return os.MkdirAll(dir, 0777)
}
// removeSecretDir removes the secrets dir folder
func (d *AllocDir) removeSecretDir(dir string) error {
return os.RemoveAll(dir)
}
// MountSpecialDirs mounts the dev and proc file system on the chroot of the
// task. It's a no-op on darwin.
func (d *AllocDir) MountSpecialDirs(taskDir string) error {
return nil
}
// unmountSpecialDirs unmounts the dev and proc file system from the chroot
func (d *AllocDir) unmountSpecialDirs(taskDir string) error {
return nil
}

View File

@ -1,37 +0,0 @@
package allocdir
import (
"os"
"syscall"
)
// Hardlinks the shared directory. As a side-effect the shared directory and
// task directory must be on the same filesystem.
func (d *AllocDir) mountSharedDir(dir string) error {
return syscall.Link(d.SharedDir, dir)
}
func (d *AllocDir) unmountSharedDir(dir string) error {
return syscall.Unlink(dir)
}
// createSecretDir creates the secrets dir folder at the given path
func (d *AllocDir) createSecretDir(dir string) error {
return os.MkdirAll(dir, 0777)
}
// removeSecretDir removes the secrets dir folder
func (d *AllocDir) removeSecretDir(dir string) error {
return os.RemoveAll(dir)
}
// MountSpecialDirs mounts the dev and proc file system on the chroot of the
// task. It's a no-op on FreeBSD right now.
func (d *AllocDir) MountSpecialDirs(taskDir string) error {
return nil
}
// unmountSpecialDirs unmounts the dev and proc file system from the chroot
func (d *AllocDir) unmountSpecialDirs(taskDir string) error {
return nil
}

View File

@ -1,116 +0,0 @@
package allocdir
import (
"fmt"
"os"
"path/filepath"
"syscall"
"golang.org/x/sys/unix"
"github.com/hashicorp/go-multierror"
)
const (
// secretDirTmpfsSize is the size of the tmpfs per task in MBs
secretDirTmpfsSize = 1
)
// Bind mounts the shared directory into the task directory. Must be root to
// run.
func (d *AllocDir) mountSharedDir(taskDir string) error {
if err := os.MkdirAll(taskDir, 0777); err != nil {
return err
}
return syscall.Mount(d.SharedDir, taskDir, "", syscall.MS_BIND, "")
}
func (d *AllocDir) unmountSharedDir(dir string) error {
return syscall.Unmount(dir, 0)
}
// createSecretDir creates the secrets dir folder at the given path using a
// tmpfs
func (d *AllocDir) createSecretDir(dir string) error {
// Only mount the tmpfs if we are root
if unix.Geteuid() == 0 {
if err := os.MkdirAll(dir, 0777); err != nil {
return err
}
var flags uintptr
flags = syscall.MS_NOEXEC
options := fmt.Sprintf("size=%dm", secretDirTmpfsSize)
err := syscall.Mount("tmpfs", dir, "tmpfs", flags, options)
return os.NewSyscallError("mount", err)
}
return os.MkdirAll(dir, 0777)
}
// createSecretDir removes the secrets dir folder
func (d *AllocDir) removeSecretDir(dir string) error {
if unix.Geteuid() == 0 {
if err := syscall.Unmount(dir, 0); err != nil {
return os.NewSyscallError("unmount", err)
}
}
return os.RemoveAll(dir)
}
// MountSpecialDirs mounts the dev and proc file system from the host to the
// chroot
func (d *AllocDir) MountSpecialDirs(taskDir string) error {
// Mount dev
dev := filepath.Join(taskDir, "dev")
if !d.pathExists(dev) {
if err := os.MkdirAll(dev, 0777); err != nil {
return fmt.Errorf("Mkdir(%v) failed: %v", dev, err)
}
if err := syscall.Mount("none", dev, "devtmpfs", syscall.MS_RDONLY, ""); err != nil {
return fmt.Errorf("Couldn't mount /dev to %v: %v", dev, err)
}
}
// Mount proc
proc := filepath.Join(taskDir, "proc")
if !d.pathExists(proc) {
if err := os.MkdirAll(proc, 0777); err != nil {
return fmt.Errorf("Mkdir(%v) failed: %v", proc, err)
}
if err := syscall.Mount("none", proc, "proc", syscall.MS_RDONLY, ""); err != nil {
return fmt.Errorf("Couldn't mount /proc to %v: %v", proc, err)
}
}
return nil
}
// unmountSpecialDirs unmounts the dev and proc file system from the chroot
func (d *AllocDir) unmountSpecialDirs(taskDir string) error {
errs := new(multierror.Error)
dev := filepath.Join(taskDir, "dev")
if d.pathExists(dev) {
if err := syscall.Unmount(dev, 0); err != nil {
errs = multierror.Append(errs, fmt.Errorf("Failed to unmount dev (%v): %v", dev, err))
} else if err := os.RemoveAll(dev); err != nil {
errs = multierror.Append(errs, fmt.Errorf("Failed to delete dev directory (%v): %v", dev, err))
}
}
// Unmount proc.
proc := filepath.Join(taskDir, "proc")
if d.pathExists(proc) {
if err := syscall.Unmount(proc, 0); err != nil {
errs = multierror.Append(errs, fmt.Errorf("Failed to unmount proc (%v): %v", proc, err))
} else if err := os.RemoveAll(proc); err != nil {
errs = multierror.Append(errs, fmt.Errorf("Failed to delete proc directory (%v): %v", dev, err))
}
}
return errs.ErrorOrNil()
}

View File

@ -8,13 +8,12 @@ import (
"log"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"testing"
tomb "gopkg.in/tomb.v1"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/nomad/structs"
)
@ -50,8 +49,7 @@ var (
}
)
// Test that given a set of tasks, each task gets a directory and that directory
// has the shared alloc dir inside of it.
// Test that AllocDir.Build builds just the alloc directory.
func TestAllocDir_BuildAlloc(t *testing.T) {
tmp, err := ioutil.TempDir("", "AllocDir")
if err != nil {
@ -61,126 +59,29 @@ func TestAllocDir_BuildAlloc(t *testing.T) {
d := NewAllocDir(tmp)
defer d.Destroy()
tasks := []*structs.Task{t1, t2}
if err := d.Build(tasks); err != nil {
t.Fatalf("Build(%v) failed: %v", tasks, err)
d.NewTaskDir(t1.Name)
d.NewTaskDir(t2.Name)
if err := d.Build(); err != nil {
t.Fatalf("Build() failed: %v", err)
}
// Check that the AllocDir and each of the task directories exist.
if _, err := os.Stat(d.AllocDir); os.IsNotExist(err) {
t.Fatalf("Build(%v) didn't create AllocDir %v", tasks, d.AllocDir)
t.Fatalf("Build() didn't create AllocDir %v", d.AllocDir)
}
for _, task := range tasks {
for _, task := range []*structs.Task{t1, t2} {
tDir, ok := d.TaskDirs[task.Name]
if !ok {
t.Fatalf("Task directory not found for %v", task.Name)
}
if _, err := os.Stat(tDir); os.IsNotExist(err) {
t.Fatalf("Build(%v) didn't create TaskDir %v", tasks, tDir)
if stat, _ := os.Stat(tDir.Dir); stat != nil {
t.Fatalf("Build() created TaskDir %v", tDir.Dir)
}
if _, err := os.Stat(filepath.Join(tDir, TaskSecrets)); os.IsNotExist(err) {
t.Fatalf("Build(%v) didn't create secret dir %v", tasks)
}
}
}
func TestAllocDir_LogDir(t *testing.T) {
tmp, err := ioutil.TempDir("", "AllocDir")
if err != nil {
t.Fatalf("Couldn't create temp dir: %v", err)
}
defer os.RemoveAll(tmp)
d := NewAllocDir(tmp)
defer d.Destroy()
expected := filepath.Join(d.AllocDir, SharedAllocName, LogDirName)
if d.LogDir() != expected {
t.Fatalf("expected: %v, got: %v", expected, d.LogDir())
}
}
func TestAllocDir_EmbedNonExistent(t *testing.T) {
tmp, err := ioutil.TempDir("", "AllocDir")
if err != nil {
t.Fatalf("Couldn't create temp dir: %v", err)
}
defer os.RemoveAll(tmp)
d := NewAllocDir(tmp)
defer d.Destroy()
tasks := []*structs.Task{t1, t2}
if err := d.Build(tasks); err != nil {
t.Fatalf("Build(%v) failed: %v", tasks, err)
}
fakeDir := "/foobarbaz"
task := tasks[0].Name
mapping := map[string]string{fakeDir: fakeDir}
if err := d.Embed(task, mapping); err != nil {
t.Fatalf("Embed(%v, %v) should should skip %v since it does not exist", task, mapping, fakeDir)
}
}
func TestAllocDir_EmbedDirs(t *testing.T) {
tmp, err := ioutil.TempDir("", "AllocDir")
if err != nil {
t.Fatalf("Couldn't create temp dir: %v", err)
}
defer os.RemoveAll(tmp)
d := NewAllocDir(tmp)
defer d.Destroy()
tasks := []*structs.Task{t1, t2}
if err := d.Build(tasks); err != nil {
t.Fatalf("Build(%v) failed: %v", tasks, err)
}
// Create a fake host directory, with a file, and a subfolder that contains
// a file.
host, err := ioutil.TempDir("", "AllocDirHost")
if err != nil {
t.Fatalf("Couldn't create temp dir: %v", err)
}
defer os.RemoveAll(host)
subDirName := "subdir"
subDir := filepath.Join(host, subDirName)
if err := os.MkdirAll(subDir, 0777); err != nil {
t.Fatalf("Failed to make subdir %v: %v", subDir, err)
}
file := "foo"
subFile := "bar"
if err := ioutil.WriteFile(filepath.Join(host, file), []byte{'a'}, 0777); err != nil {
t.Fatalf("Coudn't create file in host dir %v: %v", host, err)
}
if err := ioutil.WriteFile(filepath.Join(subDir, subFile), []byte{'a'}, 0777); err != nil {
t.Fatalf("Coudn't create file in host subdir %v: %v", subDir, err)
}
// Create mapping from host dir to task dir.
task := tasks[0].Name
taskDest := "bin/test/"
mapping := map[string]string{host: taskDest}
if err := d.Embed(task, mapping); err != nil {
t.Fatalf("Embed(%v, %v) failed: %v", task, mapping, err)
}
// Check that the embedding was done properly.
taskDir, ok := d.TaskDirs[task]
if !ok {
t.Fatalf("Task directory not found for %v", task)
}
exp := []string{filepath.Join(taskDir, taskDest, file), filepath.Join(taskDir, taskDest, subDirName, subFile)}
for _, e := range exp {
if _, err := os.Stat(e); os.IsNotExist(err) {
t.Fatalf("File %v not embeded: %v", e, err)
if stat, _ := os.Stat(tDir.SecretsDir); stat != nil {
t.Fatalf("Build() created secret dir %v", tDir.Dir)
}
}
}
@ -195,41 +96,37 @@ func TestAllocDir_MountSharedAlloc(t *testing.T) {
d := NewAllocDir(tmp)
defer d.Destroy()
tasks := []*structs.Task{t1, t2}
if err := d.Build(tasks); err != nil {
t.Fatalf("Build(%v) failed: %v", tasks, err)
if err := d.Build(); err != nil {
t.Fatalf("Build() failed: %v", err)
}
// Build 2 task dirs
td1 := d.NewTaskDir(t1.Name)
if err := td1.Build(nil, cstructs.FSIsolationNone); err != nil {
t.Fatalf("error build task=%q dir: %v", t1.Name, err)
}
td2 := d.NewTaskDir(t2.Name)
if err := td2.Build(nil, cstructs.FSIsolationNone); err != nil {
t.Fatalf("error build task=%q dir: %v", t2.Name, err)
}
// Write a file to the shared dir.
exp := []byte{'f', 'o', 'o'}
file := "bar"
if err := ioutil.WriteFile(filepath.Join(d.SharedDir, file), exp, 0777); err != nil {
contents := []byte("foo")
const filename = "bar"
if err := ioutil.WriteFile(filepath.Join(d.SharedDir, filename), contents, 0666); err != nil {
t.Fatalf("Couldn't write file to shared directory: %v", err)
}
for _, task := range tasks {
// Mount and then check that the file exists in the task directory.
if err := d.MountSharedDir(task.Name); err != nil {
if v, ok := osMountSharedDirSupport[runtime.GOOS]; v && ok {
t.Fatalf("MountSharedDir(%v) failed: %v", task.Name, err)
} else {
t.Skipf("MountShareDir(%v) failed, no OS support")
}
}
taskDir, ok := d.TaskDirs[task.Name]
if !ok {
t.Fatalf("Task directory not found for %v", task.Name)
}
taskFile := filepath.Join(taskDir, SharedAllocName, file)
// Check that the file exists in the task directories
for _, td := range []*TaskDir{td1, td2} {
taskFile := filepath.Join(td.SharedTaskDir, filename)
act, err := ioutil.ReadFile(taskFile)
if err != nil {
t.Fatalf("Failed to read shared alloc file from task dir: %v", err)
}
if !reflect.DeepEqual(act, exp) {
t.Fatalf("Incorrect data read from task dir: want %v; got %v", exp, act)
if !bytes.Equal(act, contents) {
t.Fatalf("Incorrect data read from task dir: want %v; got %v", contents, act)
}
}
}
@ -243,27 +140,31 @@ func TestAllocDir_Snapshot(t *testing.T) {
d := NewAllocDir(tmp)
defer d.Destroy()
tasks := []*structs.Task{t1, t2}
if err := d.Build(tasks); err != nil {
t.Fatalf("Build(%v) failed: %v", tasks, err)
if err := d.Build(); err != nil {
t.Fatalf("Build() failed: %v", err)
}
dataDir := filepath.Join(d.SharedDir, "data")
taskDir := d.TaskDirs[t1.Name]
taskLocal := filepath.Join(taskDir, "local")
// Build 2 task dirs
td1 := d.NewTaskDir(t1.Name)
if err := td1.Build(nil, cstructs.FSIsolationNone); err != nil {
t.Fatalf("error build task=%q dir: %v", t1.Name, err)
}
td2 := d.NewTaskDir(t2.Name)
if err := td2.Build(nil, cstructs.FSIsolationNone); err != nil {
t.Fatalf("error build task=%q dir: %v", t2.Name, err)
}
// Write a file to the shared dir.
exp := []byte{'f', 'o', 'o'}
file := "bar"
if err := ioutil.WriteFile(filepath.Join(dataDir, file), exp, 0777); err != nil {
if err := ioutil.WriteFile(filepath.Join(d.SharedDir, "data", file), exp, 0666); err != nil {
t.Fatalf("Couldn't write file to shared directory: %v", err)
}
// Write a file to the task local
exp = []byte{'b', 'a', 'r'}
file1 := "lol"
if err := ioutil.WriteFile(filepath.Join(taskLocal, file1), exp, 0777); err != nil {
if err := ioutil.WriteFile(filepath.Join(td1.LocalDir, file1), exp, 0666); err != nil {
t.Fatalf("couldn't write to task local directory: %v", err)
}
@ -307,50 +208,55 @@ func TestAllocDir_Move(t *testing.T) {
// Create two alloc dirs
d1 := NewAllocDir(tmp1)
if err := d1.Build(); err != nil {
t.Fatalf("Build() failed: %v", err)
}
defer d1.Destroy()
d2 := NewAllocDir(tmp2)
if err := d2.Build(); err != nil {
t.Fatalf("Build() failed: %v", err)
}
defer d2.Destroy()
tasks := []*structs.Task{t1, t2}
if err := d1.Build(tasks); err != nil {
t.Fatalf("Build(%v) failed: %v", tasks, err)
td1 := d1.NewTaskDir(t1.Name)
if err := td1.Build(nil, cstructs.FSIsolationNone); err != nil {
t.Fatalf("TaskDir.Build() faild: %v", err)
}
if err := d2.Build(tasks); err != nil {
t.Fatalf("Build(%v) failed: %v", tasks, err)
td2 := d2.NewTaskDir(t1.Name)
if err := td2.Build(nil, cstructs.FSIsolationNone); err != nil {
t.Fatalf("TaskDir.Build() faild: %v", err)
}
dataDir := filepath.Join(d1.SharedDir, "data")
taskDir := d1.TaskDirs[t1.Name]
taskLocal := filepath.Join(taskDir, "local")
dataDir := filepath.Join(d1.SharedDir, SharedDataDir)
// Write a file to the shared dir.
exp := []byte{'f', 'o', 'o'}
file := "bar"
if err := ioutil.WriteFile(filepath.Join(dataDir, file), exp, 0777); err != nil {
exp1 := []byte("foo")
file1 := "bar"
if err := ioutil.WriteFile(filepath.Join(dataDir, file1), exp1, 0666); err != nil {
t.Fatalf("Couldn't write file to shared directory: %v", err)
}
// Write a file to the task local
exp = []byte{'b', 'a', 'r'}
file1 := "lol"
if err := ioutil.WriteFile(filepath.Join(taskLocal, file1), exp, 0777); err != nil {
exp2 := []byte("bar")
file2 := "lol"
if err := ioutil.WriteFile(filepath.Join(td1.LocalDir, file2), exp2, 0666); err != nil {
t.Fatalf("couldn't write to task local directory: %v", err)
}
// Move the d1 allocdir to d2
if err := d2.Move(d1, []*structs.Task{t1, t2}); err != nil {
if err := d2.Move(d1, []*structs.Task{t1}); err != nil {
t.Fatalf("err: %v", err)
}
// Ensure the files in d1 are present in d2
fi, err := os.Stat(filepath.Join(d2.SharedDir, "data", "bar"))
fi, err := os.Stat(filepath.Join(d2.SharedDir, SharedDataDir, file1))
if err != nil || fi == nil {
t.Fatalf("data dir was not moved")
}
fi, err = os.Stat(filepath.Join(d2.TaskDirs[t1.Name], "local", "lol"))
fi, err = os.Stat(filepath.Join(d2.TaskDirs[t1.Name].LocalDir, file2))
if err != nil || fi == nil {
t.Fatalf("task local dir was not moved")
}
@ -364,11 +270,10 @@ func TestAllocDir_EscapeChecking(t *testing.T) {
defer os.RemoveAll(tmp)
d := NewAllocDir(tmp)
defer d.Destroy()
tasks := []*structs.Task{t1, t2}
if err := d.Build(tasks); err != nil {
t.Fatalf("Build(%v) failed: %v", tasks, err)
if err := d.Build(); err != nil {
t.Fatalf("Build() failed: %v", err)
}
defer d.Destroy()
// Check that issuing calls that escape the alloc dir returns errors
// List
@ -398,6 +303,7 @@ func TestAllocDir_EscapeChecking(t *testing.T) {
}
}
// Test that `nomad fs` can't read secrets
func TestAllocDir_ReadAt_SecretDir(t *testing.T) {
tmp, err := ioutil.TempDir("", "AllocDir")
if err != nil {
@ -406,10 +312,14 @@ func TestAllocDir_ReadAt_SecretDir(t *testing.T) {
defer os.RemoveAll(tmp)
d := NewAllocDir(tmp)
if err := d.Build(); err != nil {
t.Fatalf("Build() failed: %v", err)
}
defer d.Destroy()
tasks := []*structs.Task{t1, t2}
if err := d.Build(tasks); err != nil {
t.Fatalf("Build(%v) failed: %v", tasks, err)
td := d.NewTaskDir(t1.Name)
if err := td.Build(nil, cstructs.FSIsolationNone); err != nil {
t.Fatalf("TaskDir.Build() failed: %v", err)
}
// ReadAt of secret dir should fail
@ -431,10 +341,7 @@ func TestAllocDir_SplitPath(t *testing.T) {
t.Fatalf("err: %v", err)
}
d := NewAllocDir(dir)
defer d.Destroy()
info, err := d.splitPath(dest)
info, err := splitPath(dest)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -466,10 +373,7 @@ func TestAllocDir_CreateDir(t *testing.T) {
t.Fatalf("err: %v", err)
}
d := NewAllocDir(dir)
defer d.Destroy()
if err := d.createDir(dir1, subdir); err != nil {
if err := createDir(dir1, subdir); err != nil {
t.Fatalf("err: %v", err)
}

View File

@ -0,0 +1,26 @@
package allocdir
import (
"os"
"syscall"
)
// linkDir hardlinks src to dst. The src and dst must be on the same filesystem.
func linkDir(src, dst string) error {
return syscall.Link(src, dst)
}
// unlinkDir removes a directory link.
func unlinkDir(dir string) error {
return syscall.Unlink(dir)
}
// createSecretDir creates the secrets dir folder at the given path
func createSecretDir(dir string) error {
return os.MkdirAll(dir, 0777)
}
// removeSecretDir removes the secrets dir folder
func removeSecretDir(dir string) error {
return os.RemoveAll(dir)
}

View File

@ -0,0 +1,26 @@
package allocdir
import (
"os"
"syscall"
)
// linkDir hardlinks src to dst. The src and dst must be on the same filesystem.
func linkDir(src, dst string) error {
return syscall.Link(src, dst)
}
// unlinkDir removes a directory link.
func unlinkDir(dir string) error {
return syscall.Unlink(dir)
}
// createSecretDir creates the secrets dir folder at the given path
func createSecretDir(dir string) error {
return os.MkdirAll(dir, 0777)
}
// removeSecretDir removes the secrets dir folder
func removeSecretDir(dir string) error {
return os.RemoveAll(dir)
}

View File

@ -0,0 +1,81 @@
package allocdir
import (
"fmt"
"os"
"path/filepath"
"syscall"
"golang.org/x/sys/unix"
)
const (
// secretDirTmpfsSize is the size of the tmpfs per task in MBs
secretDirTmpfsSize = 1
// secretMarker is the filename of the marker created so Nomad doesn't
// try to mount the secrets tmpfs more than once
secretMarker = ".nomad-mount"
)
// linkDir bind mounts src to dst as Linux doesn't support hardlinking
// directories.
func linkDir(src, dst string) error {
if err := os.MkdirAll(dst, 0777); err != nil {
return err
}
return syscall.Mount(src, dst, "", syscall.MS_BIND, "")
}
// unlinkDir unmounts a bind mounted directory as Linux doesn't support
// hardlinking directories.
func unlinkDir(dir string) error {
return syscall.Unmount(dir, 0)
}
// createSecretDir creates the secrets dir folder at the given path using a
// tmpfs
func createSecretDir(dir string) error {
// Only mount the tmpfs if we are root
if unix.Geteuid() == 0 {
if err := os.MkdirAll(dir, 0777); err != nil {
return err
}
// Check for marker file and skip mounting if it exists
marker := filepath.Join(dir, secretMarker)
if _, err := os.Stat(marker); err == nil {
return nil
}
var flags uintptr
flags = syscall.MS_NOEXEC
options := fmt.Sprintf("size=%dm", secretDirTmpfsSize)
if err := syscall.Mount("tmpfs", dir, "tmpfs", flags, options); err != nil {
return os.NewSyscallError("mount", err)
}
// Create the marker file so we don't try to mount more than once
f, err := os.OpenFile(marker, os.O_RDWR|os.O_CREATE, 0666)
if err != nil {
// Hard fail since if this fails something is really wrong
return err
}
f.Close()
return nil
}
return os.MkdirAll(dir, 0777)
}
// createSecretDir removes the secrets dir folder
func removeSecretDir(dir string) error {
if unix.Geteuid() == 0 {
if err := syscall.Unmount(dir, 0); err != nil {
return os.NewSyscallError("unmount", err)
}
}
return os.RemoveAll(dir)
}

View File

@ -1,6 +1,5 @@
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
// Functions shared between linux/darwin.
package allocdir
import (
@ -27,22 +26,9 @@ var (
TaskSecretsContainerPath = filepath.Join("/", TaskSecrets)
)
func (d *AllocDir) linkOrCopy(src, dst string, perm os.FileMode) error {
// Avoid link/copy if the file already exists in the chroot
// TODO 0.6 clean this up. This was needed because chroot creation fails
// when a process restarts.
if fileInfo, _ := os.Stat(dst); fileInfo != nil {
return nil
}
// Attempt to hardlink.
if err := os.Link(src, dst); err == nil {
return nil
}
return fileCopy(src, dst, perm)
}
func (d *AllocDir) dropDirPermissions(path string) error {
// dropDirPermissions gives full access to a directory to all users and sets
// the owner to nobody.
func dropDirPermissions(path string) error {
if err := os.Chmod(path, 0777); err != nil {
return fmt.Errorf("Chmod(%v) failed: %v", path, err)
}
@ -74,6 +60,7 @@ func (d *AllocDir) dropDirPermissions(path string) error {
return nil
}
// getUid for a user
func getUid(u *user.User) (int, error) {
uid, err := strconv.Atoi(u.Uid)
if err != nil {
@ -83,6 +70,7 @@ func getUid(u *user.User) (int, error) {
return uid, nil
}
// getGid for a user
func getGid(u *user.User) (int, error) {
gid, err := strconv.Atoi(u.Gid)
if err != nil {
@ -91,3 +79,20 @@ func getGid(u *user.User) (int, error) {
return gid, nil
}
// linkOrCopy attempts to hardlink dst to src and fallsback to copying if the
// hardlink fails.
func linkOrCopy(src, dst string, perm os.FileMode) error {
// Avoid link/copy if the file already exists in the chroot
// TODO 0.6 clean this up. This was needed because chroot creation fails
// when a process restarts.
if fileInfo, _ := os.Stat(dst); fileInfo != nil {
return nil
}
// Attempt to hardlink.
if err := os.Link(src, dst); err == nil {
return nil
}
return fileCopy(src, dst, perm)
}

View File

@ -20,42 +20,53 @@ var (
TaskSecretsContainerPath = filepath.Join("c:\\", TaskSecrets)
)
func (d *AllocDir) linkOrCopy(src, dst string, perm os.FileMode) error {
// linkOrCopy is always copies dst to src on Windows.
func linkOrCopy(src, dst string, perm os.FileMode) error {
return fileCopy(src, dst, perm)
}
// The windows version does nothing currently.
func (d *AllocDir) mountSharedDir(dir string) error {
func mountSharedDir(dir string) error {
return errors.New("Mount on Windows not supported.")
}
// createSecretDir creates the secrets dir folder at the given path
func (d *AllocDir) createSecretDir(dir string) error {
return os.MkdirAll(dir, 0777)
}
// removeSecretDir removes the secrets dir folder
func (d *AllocDir) removeSecretDir(dir string) error {
return os.RemoveAll(dir)
}
// The windows version does nothing currently.
func (d *AllocDir) dropDirPermissions(path string) error {
func linkDir(src, dst string) error {
return nil
}
// The windows version does nothing currently.
func (d *AllocDir) unmountSharedDir(dir string) error {
func unlinkDir(dir string) error {
return nil
}
// createSecretDir creates the secrets dir folder at the given path
func createSecretDir(dir string) error {
return os.MkdirAll(dir, 0777)
}
// removeSecretDir removes the secrets dir folder
func removeSecretDir(dir string) error {
return os.RemoveAll(dir)
}
// The windows version does nothing currently.
func dropDirPermissions(path string) error {
return nil
}
// The windows version does nothing currently.
func unmountSharedDir(dir string) error {
return nil
}
// MountSpecialDirs mounts the dev and proc file system on the chroot of the
// task. It's a no-op on windows.
func (d *AllocDir) MountSpecialDirs(taskDir string) error {
func MountSpecialDirs(taskDir string) error {
return nil
}
// unmountSpecialDirs unmounts the dev and proc file system from the chroot
func (d *AllocDir) unmountSpecialDirs(taskDir string) error {
func unmountSpecialDirs(taskDir string) error {
return nil
}

213
client/allocdir/task_dir.go Normal file
View File

@ -0,0 +1,213 @@
package allocdir
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
cstructs "github.com/hashicorp/nomad/client/structs"
)
type TaskDir struct {
// Dir is the path to Task directory on the host
Dir string
// SharedAllocDir is the path to shared alloc directory on the host
// <alloc_dir>/alloc/
SharedAllocDir string
// SharedTaskDir is the path to the shared alloc directory linked into
// the task directory on the host.
// <task_dir>/alloc/
SharedTaskDir string
// LocalDir is the path to the task's local directory on the host
// <task_dir>/local/
LocalDir string
// LogDir is the path to the task's log directory on the host
// <alloc_dir>/alloc/logs/
LogDir string
// SecretsDir is the path to secrets/ directory on the host
// <task_dir>/secrets/
SecretsDir string
}
// newTaskDir creates a TaskDir struct with paths set. Call Build() to
// create paths on disk.
//
// Call AllocDir.NewTaskDir to create new TaskDirs
func newTaskDir(allocDir, taskName string) *TaskDir {
taskDir := filepath.Join(allocDir, taskName)
return &TaskDir{
Dir: taskDir,
SharedAllocDir: filepath.Join(allocDir, SharedAllocName),
LogDir: filepath.Join(allocDir, SharedAllocName, LogDirName),
SharedTaskDir: filepath.Join(taskDir, SharedAllocName),
LocalDir: filepath.Join(taskDir, TaskLocal),
SecretsDir: filepath.Join(taskDir, TaskSecrets),
}
}
// Build default directories and permissions in a task directory.
func (t *TaskDir) Build(chroot map[string]string, fsi cstructs.FSIsolation) error {
if err := os.MkdirAll(t.Dir, 0777); err != nil {
return err
}
// Make the task directory have non-root permissions.
if err := dropDirPermissions(t.Dir); err != nil {
return err
}
// Create a local directory that each task can use.
if err := os.MkdirAll(t.LocalDir, 0777); err != nil {
return err
}
if err := dropDirPermissions(t.LocalDir); err != nil {
return err
}
// Create the directories that should be in every task.
for _, dir := range TaskDirs {
absdir := filepath.Join(t.Dir, dir)
if err := os.MkdirAll(absdir, 0777); err != nil {
return err
}
if err := dropDirPermissions(absdir); err != nil {
return err
}
}
// Always link the shared task directory even though image based
// filesystem isolalation doesn't require it. This way we have a
// consistent task dir.
if err := linkDir(t.SharedAllocDir, t.SharedTaskDir); err != nil {
return fmt.Errorf("Failed to mount shared directory for task: %v", err)
}
// Create the secret directory
if err := createSecretDir(t.SecretsDir); err != nil {
return err
}
if err := dropDirPermissions(t.SecretsDir); err != nil {
return err
}
// Build chroot if chroot filesystem isolation is going to be used
if fsi == cstructs.FSIsolationChroot {
if err := t.buildChroot(chroot); err != nil {
return err
}
}
return nil
}
// buildChroot takes a mapping of absolute directory or file paths on the host
// to their intended, relative location within the task directory. This
// attempts hardlink and then defaults to copying. If the path exists on the
// host and can't be embedded an error is returned.
func (t *TaskDir) buildChroot(entries map[string]string) error {
// Link/copy chroot entries
if err := t.embedDirs(entries); err != nil {
return err
}
// Mount special dirs
if err := t.mountSpecialDirs(); err != nil {
return err
}
return nil
}
func (t *TaskDir) embedDirs(entries map[string]string) error {
subdirs := make(map[string]string)
for source, dest := range entries {
// Check to see if directory exists on host.
s, err := os.Stat(source)
if os.IsNotExist(err) {
continue
}
// Embedding a single file
if !s.IsDir() {
if err := createDir(t.Dir, filepath.Dir(dest)); err != nil {
return fmt.Errorf("Couldn't create destination directory %v: %v", dest, err)
}
// Copy the file.
taskEntry := filepath.Join(t.Dir, dest)
if err := linkOrCopy(source, taskEntry, s.Mode().Perm()); err != nil {
return err
}
continue
}
// Create destination directory.
destDir := filepath.Join(t.Dir, dest)
if err := createDir(t.Dir, dest); err != nil {
return fmt.Errorf("Couldn't create destination directory %v: %v", destDir, err)
}
// Enumerate the files in source.
dirEntries, err := ioutil.ReadDir(source)
if err != nil {
return fmt.Errorf("Couldn't read directory %v: %v", source, err)
}
for _, entry := range dirEntries {
hostEntry := filepath.Join(source, entry.Name())
taskEntry := filepath.Join(destDir, filepath.Base(hostEntry))
if entry.IsDir() {
subdirs[hostEntry] = filepath.Join(dest, filepath.Base(hostEntry))
continue
}
// Check if entry exists. This can happen if restarting a failed
// task.
if _, err := os.Lstat(taskEntry); err == nil {
continue
}
if !entry.Mode().IsRegular() {
// If it is a symlink we can create it, otherwise we skip it.
if entry.Mode()&os.ModeSymlink == 0 {
continue
}
link, err := os.Readlink(hostEntry)
if err != nil {
return fmt.Errorf("Couldn't resolve symlink for %v: %v", source, err)
}
if err := os.Symlink(link, taskEntry); err != nil {
// Symlinking twice
if err.(*os.LinkError).Err.Error() != "file exists" {
return fmt.Errorf("Couldn't create symlink: %v", err)
}
}
continue
}
if err := linkOrCopy(hostEntry, taskEntry, entry.Mode().Perm()); err != nil {
return err
}
}
}
// Recurse on self to copy subdirectories.
if len(subdirs) != 0 {
return t.embedDirs(subdirs)
}
return nil
}

View File

@ -0,0 +1,65 @@
package allocdir
import (
"fmt"
"os"
"path/filepath"
"syscall"
"github.com/hashicorp/go-multierror"
)
// mountSpecialDirs mounts the dev and proc file system from the host to the
// chroot
func (t *TaskDir) mountSpecialDirs() error {
// Mount dev
dev := filepath.Join(t.Dir, "dev")
if !pathExists(dev) {
if err := os.MkdirAll(dev, 0777); err != nil {
return fmt.Errorf("Mkdir(%v) failed: %v", dev, err)
}
if err := syscall.Mount("none", dev, "devtmpfs", syscall.MS_RDONLY, ""); err != nil {
return fmt.Errorf("Couldn't mount /dev to %v: %v", dev, err)
}
}
// Mount proc
proc := filepath.Join(t.Dir, "proc")
if !pathExists(proc) {
if err := os.MkdirAll(proc, 0777); err != nil {
return fmt.Errorf("Mkdir(%v) failed: %v", proc, err)
}
if err := syscall.Mount("none", proc, "proc", syscall.MS_RDONLY, ""); err != nil {
return fmt.Errorf("Couldn't mount /proc to %v: %v", proc, err)
}
}
return nil
}
// unmountSpecialDirs unmounts the dev and proc file system from the chroot
func (t *TaskDir) unmountSpecialDirs() error {
errs := new(multierror.Error)
dev := filepath.Join(t.Dir, "dev")
if pathExists(dev) {
if err := syscall.Unmount(dev, 0); err != nil {
errs = multierror.Append(errs, fmt.Errorf("Failed to unmount dev (%v): %v", dev, err))
} else if err := os.RemoveAll(dev); err != nil {
errs = multierror.Append(errs, fmt.Errorf("Failed to delete dev directory (%v): %v", dev, err))
}
}
// Unmount proc.
proc := filepath.Join(t.Dir, "proc")
if pathExists(proc) {
if err := syscall.Unmount(proc, 0); err != nil {
errs = multierror.Append(errs, fmt.Errorf("Failed to unmount proc (%v): %v", proc, err))
} else if err := os.RemoveAll(proc); err != nil {
errs = multierror.Append(errs, fmt.Errorf("Failed to delete proc directory (%v): %v", dev, err))
}
}
return errs.ErrorOrNil()
}

View File

@ -0,0 +1,13 @@
// +build !linux
package allocdir
// currently a noop on non-Linux platforms
func (d *TaskDir) mountSpecialDirs() error {
return nil
}
// currently a noop on non-Linux platforms
func (d *TaskDir) unmountSpecialDirs() error {
return nil
}

View File

@ -0,0 +1,84 @@
package allocdir
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
)
// Test that building a chroot will skip nonexistent directories.
func TestTaskDir_EmbedNonExistent(t *testing.T) {
tmp, err := ioutil.TempDir("", "AllocDir")
if err != nil {
t.Fatalf("Couldn't create temp dir: %v", err)
}
defer os.RemoveAll(tmp)
d := NewAllocDir(tmp)
defer d.Destroy()
td := d.NewTaskDir(t1.Name)
if err := d.Build(); err != nil {
t.Fatalf("Build() failed: %v", err)
}
fakeDir := "/foobarbaz"
mapping := map[string]string{fakeDir: fakeDir}
if err := td.embedDirs(mapping); err != nil {
t.Fatalf("embedDirs(%v) should should skip %v since it does not exist", mapping, fakeDir)
}
}
// Test that building a chroot copies files from the host into the task dir.
func TestTaskDir_EmbedDirs(t *testing.T) {
tmp, err := ioutil.TempDir("", "AllocDir")
if err != nil {
t.Fatalf("Couldn't create temp dir: %v", err)
}
defer os.RemoveAll(tmp)
d := NewAllocDir(tmp)
defer d.Destroy()
td := d.NewTaskDir(t1.Name)
if err := d.Build(); err != nil {
t.Fatalf("Build() failed: %v", err)
}
// Create a fake host directory, with a file, and a subfolder that contains
// a file.
host, err := ioutil.TempDir("", "AllocDirHost")
if err != nil {
t.Fatalf("Couldn't create temp dir: %v", err)
}
defer os.RemoveAll(host)
subDirName := "subdir"
subDir := filepath.Join(host, subDirName)
if err := os.MkdirAll(subDir, 0777); err != nil {
t.Fatalf("Failed to make subdir %v: %v", subDir, err)
}
file := "foo"
subFile := "bar"
if err := ioutil.WriteFile(filepath.Join(host, file), []byte{'a'}, 0777); err != nil {
t.Fatalf("Coudn't create file in host dir %v: %v", host, err)
}
if err := ioutil.WriteFile(filepath.Join(subDir, subFile), []byte{'a'}, 0777); err != nil {
t.Fatalf("Coudn't create file in host subdir %v: %v", subDir, err)
}
// Create mapping from host dir to task dir.
taskDest := "bin/test/"
mapping := map[string]string{host: taskDest}
if err := td.embedDirs(mapping); err != nil {
t.Fatalf("embedDirs(%v) failed: %v", mapping, err)
}
exp := []string{filepath.Join(td.Dir, taskDest, file), filepath.Join(td.Dir, taskDest, subDirName, subFile)}
for _, f := range exp {
if _, err := os.Stat(f); os.IsNotExist(err) {
t.Fatalf("File %v not embeded: %v", f, err)
}
}
}

View File

@ -39,6 +39,19 @@ var (
"qemu",
"java",
}, ",")
// A mapping of directories on the host OS to attempt to embed inside each
// task's chroot.
DefaultChrootEnv = map[string]string{
"/bin": "/bin",
"/etc": "/etc",
"/lib": "/lib",
"/lib32": "/lib32",
"/lib64": "/lib64",
"/run/resolvconf": "/run/resolvconf",
"/sbin": "/sbin",
"/usr": "/usr",
}
)
// RPCHandler can be provided to the Client if there is a local server

View File

@ -347,6 +347,10 @@ func (d *DockerDriver) Abilities() DriverAbilities {
}
}
func (d *DockerDriver) FSIsolation() cstructs.FSIsolation {
return cstructs.FSIsolationImage
}
func (d *DockerDriver) Prestart(ctx *ExecContext, task *structs.Task) error {
// Set environment variables.
d.taskEnv.SetAllocDir(allocdir.SharedAllocContainerPath).
@ -357,18 +361,13 @@ func (d *DockerDriver) Prestart(ctx *ExecContext, task *structs.Task) error {
return err
}
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
// Initialize docker API clients
client, _, err := d.dockerClients()
if err != nil {
return fmt.Errorf("Failed to connect to docker daemon: %s", err)
}
if err := d.createImage(driverConfig, client, taskDir); err != nil {
if err := d.createImage(driverConfig, client, ctx.TaskDir); err != nil {
return err
}
@ -393,11 +392,7 @@ func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginLogFile := filepath.Join(ctx.TaskDir.Dir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
@ -410,8 +405,9 @@ func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle
TaskEnv: d.taskEnv,
Task: task,
Driver: "docker",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
LogDir: ctx.TaskDir.LogDir,
TaskDir: ctx.TaskDir.Dir,
PortLowerBound: d.config.ClientMinPort,
PortUpperBound: d.config.ClientMaxPort,
}
@ -604,24 +600,12 @@ func (d *DockerDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool
return true, nil
}
func (d *DockerDriver) containerBinds(driverConfig *DockerDriverConfig, alloc *allocdir.AllocDir,
func (d *DockerDriver) containerBinds(driverConfig *DockerDriverConfig, taskDir *allocdir.TaskDir,
task *structs.Task) ([]string, error) {
shared := alloc.SharedDir
taskDir, ok := alloc.TaskDirs[task.Name]
if !ok {
return nil, fmt.Errorf("Failed to find task local directory: %v", task.Name)
}
local := filepath.Join(taskDir, allocdir.TaskLocal)
secret, err := alloc.GetSecretDir(task.Name)
if err != nil {
return nil, err
}
allocDirBind := fmt.Sprintf("%s:%s", shared, allocdir.SharedAllocContainerPath)
taskLocalBind := fmt.Sprintf("%s:%s", local, allocdir.TaskLocalContainerPath)
secretDirBind := fmt.Sprintf("%s:%s", secret, allocdir.TaskSecretsContainerPath)
allocDirBind := fmt.Sprintf("%s:%s", taskDir.SharedAllocDir, allocdir.SharedAllocContainerPath)
taskLocalBind := fmt.Sprintf("%s:%s", taskDir.LocalDir, allocdir.TaskLocalContainerPath)
secretDirBind := fmt.Sprintf("%s:%s", taskDir.SecretsDir, allocdir.TaskSecretsContainerPath)
binds := []string{allocDirBind, taskLocalBind, secretDirBind}
volumesEnabled := d.config.ReadBoolDefault(dockerVolumesConfigOption, dockerVolumesConfigDefault)
@ -647,7 +631,7 @@ func (d *DockerDriver) containerBinds(driverConfig *DockerDriverConfig, alloc *a
// Relative paths are always allowed as they mount within a container
// Expand path relative to alloc dir
parts[0] = filepath.Join(taskDir, parts[0])
parts[0] = filepath.Join(taskDir.Dir, parts[0])
binds = append(binds, strings.Join(parts, ":"))
}
@ -672,7 +656,7 @@ func (d *DockerDriver) createContainerConfig(ctx *ExecContext, task *structs.Tas
return c, fmt.Errorf("task.Resources is empty")
}
binds, err := d.containerBinds(driverConfig, ctx.AllocDir, task)
binds, err := d.containerBinds(driverConfig, ctx.TaskDir, task)
if err != nil {
return c, err
}
@ -900,7 +884,7 @@ func (d *DockerDriver) Periodic() (bool, time.Duration) {
// createImage creates a docker image either by pulling it from a registry or by
// loading it from the file system
func (d *DockerDriver) createImage(driverConfig *DockerDriverConfig, client *docker.Client, taskDir string) error {
func (d *DockerDriver) createImage(driverConfig *DockerDriverConfig, client *docker.Client, taskDir *allocdir.TaskDir) error {
image := driverConfig.ImageName
repo, tag := docker.ParseRepositoryTag(image)
if tag == "" {
@ -979,10 +963,10 @@ func (d *DockerDriver) pullImage(driverConfig *DockerDriverConfig, client *docke
}
// loadImage creates an image by loading it from the file system
func (d *DockerDriver) loadImage(driverConfig *DockerDriverConfig, client *docker.Client, taskDir string) error {
func (d *DockerDriver) loadImage(driverConfig *DockerDriverConfig, client *docker.Client, taskDir *allocdir.TaskDir) error {
var errors multierror.Error
for _, image := range driverConfig.LoadImages {
archive := filepath.Join(taskDir, allocdir.TaskLocal, image)
archive := filepath.Join(taskDir.LocalDir, image)
d.logger.Printf("[DEBUG] driver.docker: loading image from: %v", archive)
f, err := os.Open(archive)
if err != nil {

View File

@ -18,6 +18,7 @@ import (
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/env"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
@ -50,7 +51,8 @@ func dockerTask() (*structs.Task, int, int) {
docker_reserved += 1
docker_dynamic += 1
return &structs.Task{
Name: "redis-demo",
Name: "redis-demo",
Driver: "docker",
Config: map[string]interface{}{
"image": "busybox",
"load": []string{"busybox.tar"},
@ -92,28 +94,28 @@ func dockerSetup(t *testing.T, task *structs.Task) (*docker.Client, DriverHandle
}
func dockerSetupWithClient(t *testing.T, task *structs.Task, client *docker.Client) (*docker.Client, DriverHandle, func()) {
driverCtx, execCtx := testDriverContexts(task)
driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
driver := NewDockerDriver(driverCtx)
copyImage(execCtx, task, "busybox.tar", t)
tctx := testDriverContexts(t, task)
tctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
driver := NewDockerDriver(tctx.DriverCtx)
copyImage(t, tctx.ExecCtx.TaskDir, "busybox.tar")
if err := driver.Prestart(execCtx, task); err != nil {
execCtx.AllocDir.Destroy()
if err := driver.Prestart(tctx.ExecCtx, task); err != nil {
tctx.AllocDir.Destroy()
t.Fatalf("error in prestart: %v", err)
}
handle, err := driver.Start(execCtx, task)
handle, err := driver.Start(tctx.ExecCtx, task)
if err != nil {
execCtx.AllocDir.Destroy()
tctx.AllocDir.Destroy()
t.Fatalf("Failed to start driver: %s\nStack\n%s", err, debug.Stack())
}
if handle == nil {
execCtx.AllocDir.Destroy()
tctx.AllocDir.Destroy()
t.Fatalf("handle is nil\nStack\n%s", debug.Stack())
}
cleanup := func() {
handle.Kill()
execCtx.AllocDir.Destroy()
tctx.AllocDir.Destroy()
}
return client, handle, cleanup
@ -133,10 +135,10 @@ func newTestDockerClient(t *testing.T) *docker.Client {
// This test should always pass, even if docker daemon is not available
func TestDockerDriver_Fingerprint(t *testing.T) {
driverCtx, execCtx := testDriverContexts(&structs.Task{Name: "foo", Resources: basicResources})
driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer execCtx.AllocDir.Destroy()
d := NewDockerDriver(driverCtx)
ctx := testDriverContexts(t, &structs.Task{Name: "foo", Driver: "docker", Resources: basicResources})
ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer ctx.AllocDir.Destroy()
d := NewDockerDriver(ctx.DriverCtx)
node := &structs.Node{
Attributes: make(map[string]string),
}
@ -159,7 +161,8 @@ func TestDockerDriver_StartOpen_Wait(t *testing.T) {
}
task := &structs.Task{
Name: "nc-demo",
Name: "nc-demo",
Driver: "docker",
Config: map[string]interface{}{
"load": []string{"busybox.tar"},
"image": "busybox",
@ -173,16 +176,16 @@ func TestDockerDriver_StartOpen_Wait(t *testing.T) {
Resources: basicResources,
}
driverCtx, execCtx := testDriverContexts(task)
driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer execCtx.AllocDir.Destroy()
d := NewDockerDriver(driverCtx)
copyImage(execCtx, task, "busybox.tar", t)
ctx := testDriverContexts(t, task)
ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer ctx.AllocDir.Destroy()
d := NewDockerDriver(ctx.DriverCtx)
copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar")
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -192,7 +195,7 @@ func TestDockerDriver_StartOpen_Wait(t *testing.T) {
defer handle.Kill()
// Attempt to open
handle2, err := d.Open(execCtx, handle.ID())
handle2, err := d.Open(ctx.ExecCtx, handle.ID())
if err != nil {
t.Fatalf("err: %v", err)
}
@ -203,7 +206,8 @@ func TestDockerDriver_StartOpen_Wait(t *testing.T) {
func TestDockerDriver_Start_Wait(t *testing.T) {
task := &structs.Task{
Name: "nc-demo",
Name: "nc-demo",
Driver: "docker",
Config: map[string]interface{}{
"load": []string{"busybox.tar"},
"image": "busybox",
@ -244,7 +248,8 @@ func TestDockerDriver_Start_LoadImage(t *testing.T) {
t.SkipNow()
}
task := &structs.Task{
Name: "busybox-demo",
Name: "busybox-demo",
Driver: "docker",
Config: map[string]interface{}{
"image": "busybox",
"load": []string{"busybox.tar"},
@ -263,18 +268,18 @@ func TestDockerDriver_Start_LoadImage(t *testing.T) {
},
}
driverCtx, execCtx := testDriverContexts(task)
driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer execCtx.AllocDir.Destroy()
d := NewDockerDriver(driverCtx)
ctx := testDriverContexts(t, task)
ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer ctx.AllocDir.Destroy()
d := NewDockerDriver(ctx.DriverCtx)
// Copy the image into the task's directory
copyImage(execCtx, task, "busybox.tar", t)
copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar")
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -293,7 +298,7 @@ func TestDockerDriver_Start_LoadImage(t *testing.T) {
}
// Check that data was written to the shared alloc directory.
outputFile := filepath.Join(execCtx.AllocDir.LogDir(), "busybox-demo.stdout.0")
outputFile := filepath.Join(ctx.ExecCtx.TaskDir.LogDir, "busybox-demo.stdout.0")
act, err := ioutil.ReadFile(outputFile)
if err != nil {
t.Fatalf("Couldn't read expected output: %v", err)
@ -311,7 +316,8 @@ func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) {
t.SkipNow()
}
task := &structs.Task{
Name: "busybox-demo",
Name: "busybox-demo",
Driver: "docker",
Config: map[string]interface{}{
"image": "127.0.1.1:32121/foo", // bad path
"command": "/bin/echo",
@ -329,12 +335,12 @@ func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) {
},
}
driverCtx, execCtx := testDriverContexts(task)
driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer execCtx.AllocDir.Destroy()
d := NewDockerDriver(driverCtx)
ctx := testDriverContexts(t, task)
ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer ctx.AllocDir.Destroy()
d := NewDockerDriver(ctx.DriverCtx)
err := d.Prestart(execCtx, task)
err := d.Prestart(ctx.ExecCtx, task)
if err == nil {
t.Fatalf("want error in prestart: %v", err)
}
@ -357,7 +363,8 @@ func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
exp := []byte{'w', 'i', 'n'}
file := "output.txt"
task := &structs.Task{
Name: "nc-demo",
Name: "nc-demo",
Driver: "docker",
Config: map[string]interface{}{
"image": "busybox",
"load": []string{"busybox.tar"},
@ -378,16 +385,16 @@ func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
},
}
driverCtx, execCtx := testDriverContexts(task)
driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer execCtx.AllocDir.Destroy()
d := NewDockerDriver(driverCtx)
copyImage(execCtx, task, "busybox.tar", t)
ctx := testDriverContexts(t, task)
ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer ctx.AllocDir.Destroy()
d := NewDockerDriver(ctx.DriverCtx)
copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar")
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -406,7 +413,7 @@ func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
}
// Check that data was written to the shared alloc directory.
outputFile := filepath.Join(execCtx.AllocDir.SharedDir, file)
outputFile := filepath.Join(ctx.AllocDir.SharedDir, file)
act, err := ioutil.ReadFile(outputFile)
if err != nil {
t.Fatalf("Couldn't read expected output: %v", err)
@ -419,7 +426,8 @@ func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
task := &structs.Task{
Name: "nc-demo",
Name: "nc-demo",
Driver: "docker",
Config: map[string]interface{}{
"image": "busybox",
"load": []string{"busybox.tar"},
@ -471,16 +479,16 @@ func TestDockerDriver_StartN(t *testing.T) {
// Let's spin up a bunch of things
var err error
for idx, task := range taskList {
driverCtx, execCtx := testDriverContexts(task)
driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer execCtx.AllocDir.Destroy()
d := NewDockerDriver(driverCtx)
copyImage(execCtx, task, "busybox.tar", t)
ctx := testDriverContexts(t, task)
ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer ctx.AllocDir.Destroy()
d := NewDockerDriver(ctx.DriverCtx)
copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar")
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart #%d: %v", idx+1, err)
}
handles[idx], err = d.Start(execCtx, task)
handles[idx], err = d.Start(ctx.ExecCtx, task)
if err != nil {
t.Errorf("Failed starting task #%d: %s", idx+1, err)
}
@ -529,18 +537,18 @@ func TestDockerDriver_StartNVersions(t *testing.T) {
// Let's spin up a bunch of things
var err error
for idx, task := range taskList {
driverCtx, execCtx := testDriverContexts(task)
driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer execCtx.AllocDir.Destroy()
d := NewDockerDriver(driverCtx)
copyImage(execCtx, task, "busybox.tar", t)
copyImage(execCtx, task, "busybox_musl.tar", t)
copyImage(execCtx, task, "busybox_glibc.tar", t)
ctx := testDriverContexts(t, task)
ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer ctx.AllocDir.Destroy()
d := NewDockerDriver(ctx.DriverCtx)
copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar")
copyImage(t, ctx.ExecCtx.TaskDir, "busybox_musl.tar")
copyImage(t, ctx.ExecCtx.TaskDir, "busybox_glibc.tar")
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart #%d: %v", idx+1, err)
}
handles[idx], err = d.Start(execCtx, task)
handles[idx], err = d.Start(ctx.ExecCtx, task)
if err != nil {
t.Errorf("Failed starting task #%d: %s", idx+1, err)
}
@ -582,7 +590,8 @@ func TestDockerDriver_NetworkMode_Host(t *testing.T) {
expected := "host"
task := &structs.Task{
Name: "nc-demo",
Name: "nc-demo",
Driver: "docker",
Config: map[string]interface{}{
"image": "busybox",
"load": []string{"busybox.tar"},
@ -848,8 +857,9 @@ func TestDockerDriver_PortsMapping(t *testing.T) {
func TestDockerDriver_User(t *testing.T) {
task := &structs.Task{
Name: "redis-demo",
User: "alice",
Name: "redis-demo",
User: "alice",
Driver: "docker",
Config: map[string]interface{}{
"image": "busybox",
"load": []string{"busybox.tar"},
@ -870,19 +880,19 @@ func TestDockerDriver_User(t *testing.T) {
t.SkipNow()
}
driverCtx, execCtx := testDriverContexts(task)
driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
driver := NewDockerDriver(driverCtx)
defer execCtx.AllocDir.Destroy()
copyImage(execCtx, task, "busybox.tar", t)
ctx := testDriverContexts(t, task)
ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
driver := NewDockerDriver(ctx.DriverCtx)
defer ctx.AllocDir.Destroy()
copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar")
if err := driver.Prestart(execCtx, task); err != nil {
if err := driver.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
// It should fail because the user "alice" does not exist on the given
// image.
handle, err := driver.Start(execCtx, task)
handle, err := driver.Start(ctx.ExecCtx, task)
if err == nil {
handle.Kill()
t.Fatalf("Should've failed")
@ -895,7 +905,8 @@ func TestDockerDriver_User(t *testing.T) {
func TestDockerDriver_CleanupContainer(t *testing.T) {
task := &structs.Task{
Name: "redis-demo",
Name: "redis-demo",
Driver: "docker",
Config: map[string]interface{}{
"image": "busybox",
"load": []string{"busybox.tar"},
@ -989,7 +1000,8 @@ func TestDockerDriver_Stats(t *testing.T) {
func TestDockerDriver_Signal(t *testing.T) {
task := &structs.Task{
Name: "redis-demo",
Name: "redis-demo",
Driver: "docker",
Config: map[string]interface{}{
"image": "busybox",
"load": []string{"busybox.tar"},
@ -1006,15 +1018,15 @@ func TestDockerDriver_Signal(t *testing.T) {
},
}
driverCtx, execCtx := testDriverContexts(task)
driverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer execCtx.AllocDir.Destroy()
d := NewDockerDriver(driverCtx)
ctx := testDriverContexts(t, task)
ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
defer ctx.AllocDir.Destroy()
d := NewDockerDriver(ctx.DriverCtx)
// Copy the image into the task's directory
copyImage(execCtx, task, "busybox.tar", t)
copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar")
testFile := filepath.Join(execCtx.AllocDir.TaskDirs["redis-demo"], allocdir.TaskLocal, "test.sh")
testFile := filepath.Join(ctx.ExecCtx.TaskDir.LocalDir, "test.sh")
testData := []byte(`
at_term() {
echo 'Terminated.'
@ -1029,10 +1041,10 @@ done
fmt.Errorf("Failed to write data")
}
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -1058,7 +1070,7 @@ done
}
// Check the log file to see it exited because of the signal
outputFile := filepath.Join(execCtx.AllocDir.LogDir(), "redis-demo.stdout.0")
outputFile := filepath.Join(ctx.ExecCtx.TaskDir.LogDir, "redis-demo.stdout.0")
act, err := ioutil.ReadFile(outputFile)
if err != nil {
t.Fatalf("Couldn't read expected output: %v", err)
@ -1081,8 +1093,9 @@ func setupDockerVolumes(t *testing.T, cfg *config.Config, hostpath string) (*str
containerFile := filepath.Join(containerPath, randfn)
task := &structs.Task{
Name: "ls",
Env: map[string]string{"VOL_PATH": containerPath},
Name: "ls",
Env: map[string]string{"VOL_PATH": containerPath},
Driver: "docker",
Config: map[string]interface{}{
"image": "busybox",
"load": []string{"busybox.tar"},
@ -1097,18 +1110,27 @@ func setupDockerVolumes(t *testing.T, cfg *config.Config, hostpath string) (*str
Resources: basicResources,
}
// Build alloc and task directory structure
allocDir := allocdir.NewAllocDir(filepath.Join(cfg.AllocDir, structs.GenerateUUID()))
allocDir.Build([]*structs.Task{task})
if err := allocDir.Build(); err != nil {
t.Fatalf("failed to build alloc dir: %v", err)
}
taskDir := allocDir.NewTaskDir(task.Name)
if err := taskDir.Build(nil, cstructs.FSIsolationImage); err != nil {
allocDir.Destroy()
t.Fatalf("failed to build task dir: %v", err)
}
alloc := mock.Alloc()
execCtx := NewExecContext(allocDir, alloc.ID)
execCtx := NewExecContext(taskDir, alloc.ID)
cleanup := func() {
execCtx.AllocDir.Destroy()
allocDir.Destroy()
if filepath.IsAbs(hostpath) {
os.RemoveAll(hostpath)
}
}
taskEnv, err := GetTaskEnv(allocDir, cfg.Node, task, alloc, cfg, "")
taskEnv, err := GetTaskEnv(taskDir, cfg.Node, task, alloc, cfg, "")
if err != nil {
cleanup()
t.Fatalf("Failed to get task env: %v", err)
@ -1120,7 +1142,7 @@ func setupDockerVolumes(t *testing.T, cfg *config.Config, hostpath string) (*str
}
driverCtx := NewDriverContext(task.Name, cfg, cfg.Node, testLogger(), taskEnv, emitter)
driver := NewDockerDriver(driverCtx)
copyImage(execCtx, task, "busybox.tar", t)
copyImage(t, taskDir, "busybox.tar")
return task, driver, execCtx, hostfile, cleanup
}
@ -1172,12 +1194,7 @@ func TestDockerDriver_VolumesDisabled(t *testing.T) {
t.Fatalf("timeout")
}
taskDir, ok := execCtx.AllocDir.TaskDirs[task.Name]
if !ok {
t.Fatalf("Failed to get task dir")
}
if _, err := ioutil.ReadFile(filepath.Join(taskDir, fn)); err != nil {
if _, err := ioutil.ReadFile(filepath.Join(execCtx.TaskDir.Dir, fn)); err != nil {
t.Fatalf("unexpected error reading %s: %v", fn, err)
}
}
@ -1218,8 +1235,7 @@ func TestDockerDriver_VolumesEnabled(t *testing.T) {
}
}
func copyImage(execCtx *ExecContext, task *structs.Task, image string, t *testing.T) {
taskDir, _ := execCtx.AllocDir.TaskDirs[task.Name]
dst := filepath.Join(taskDir, allocdir.TaskLocal, image)
func copyImage(t *testing.T, taskDir *allocdir.TaskDir, image string) {
dst := filepath.Join(taskDir.LocalDir, image)
copyFile(filepath.Join("./test-resources/docker", image), dst, t)
}

View File

@ -4,7 +4,6 @@ import (
"fmt"
"log"
"os"
"path/filepath"
"strings"
"github.com/hashicorp/nomad/client/allocdir"
@ -67,6 +66,9 @@ type Driver interface {
// Abilities returns the abilities of the driver
Abilities() DriverAbilities
// FSIsolation returns the method of filesystem isolation used
FSIsolation() cstructs.FSIsolation
}
// DriverAbilities marks the abilities the driver has.
@ -136,23 +138,23 @@ type DriverHandle interface {
Signal(s os.Signal) error
}
// ExecContext is shared between drivers within an allocation
// ExecContext is a task's execution context
type ExecContext struct {
// AllocDir contains information about the alloc directory structure.
AllocDir *allocdir.AllocDir
// TaskDir contains information about the task directory structure.
TaskDir *allocdir.TaskDir
// Alloc ID
AllocID string
}
// NewExecContext is used to create a new execution context
func NewExecContext(alloc *allocdir.AllocDir, allocID string) *ExecContext {
return &ExecContext{AllocDir: alloc, AllocID: allocID}
func NewExecContext(td *allocdir.TaskDir, allocID string) *ExecContext {
return &ExecContext{TaskDir: td, AllocID: allocID}
}
// GetTaskEnv converts the alloc dir, the node, task and alloc into a
// TaskEnvironment.
func GetTaskEnv(allocDir *allocdir.AllocDir, node *structs.Node,
func GetTaskEnv(taskDir *allocdir.TaskDir, node *structs.Node,
task *structs.Task, alloc *structs.Allocation, conf *config.Config,
vaultToken string) (*env.TaskEnvironment, error) {
@ -162,15 +164,10 @@ func GetTaskEnv(allocDir *allocdir.AllocDir, node *structs.Node,
SetEnvvars(task.Env).
SetTaskName(task.Name)
if allocDir != nil {
env.SetAllocDir(allocDir.SharedDir)
taskdir, ok := allocDir.TaskDirs[task.Name]
if !ok {
return nil, fmt.Errorf("failed to get task directory for task %q", task.Name)
}
env.SetTaskLocalDir(filepath.Join(taskdir, allocdir.TaskLocal))
env.SetSecretsDir(filepath.Join(taskdir, allocdir.TaskSecrets))
if taskDir != nil {
env.SetAllocDir(taskDir.SharedAllocDir)
env.SetTaskLocalDir(taskDir.LocalDir)
env.SetSecretsDir(taskDir.SecretsDir)
}
if task.Resources != nil {

View File

@ -62,7 +62,6 @@ func copyFile(src, dst string, t *testing.T) {
if err := out.Sync(); err != nil {
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
}
return
}
func testLogger() *log.Logger {
@ -77,16 +76,46 @@ func testConfig() *config.Config {
return conf
}
func testDriverContexts(task *structs.Task) (*DriverContext, *ExecContext) {
type testContext struct {
AllocDir *allocdir.AllocDir
DriverCtx *DriverContext
ExecCtx *ExecContext
}
// testDriverContext sets up an alloc dir, task dir, DriverContext, and ExecContext.
//
// It is up to the caller to call AllocDir.Destroy to cleanup.
func testDriverContexts(t *testing.T, task *structs.Task) *testContext {
cfg := testConfig()
allocDir := allocdir.NewAllocDir(filepath.Join(cfg.AllocDir, structs.GenerateUUID()))
allocDir.Build([]*structs.Task{task})
if err := allocDir.Build(); err != nil {
t.Fatalf("AllocDir.Build() failed: %v", err)
}
alloc := mock.Alloc()
execCtx := NewExecContext(allocDir, alloc.ID)
taskEnv, err := GetTaskEnv(allocDir, cfg.Node, task, alloc, cfg, "")
// Build a temp driver so we can call FSIsolation and build the task dir
tmpdrv, err := NewDriver(task.Driver, NewEmptyDriverContext())
if err != nil {
return nil, nil
allocDir.Destroy()
t.Fatalf("NewDriver(%q, nil) failed: %v", task.Driver, err)
return nil
}
// Build the task dir
td := allocDir.NewTaskDir(task.Name)
if err := td.Build(config.DefaultChrootEnv, tmpdrv.FSIsolation()); err != nil {
allocDir.Destroy()
t.Fatalf("TaskDir.Build(%#v, %q) failed: %v", config.DefaultChrootEnv, tmpdrv.FSIsolation())
return nil
}
execCtx := NewExecContext(td, alloc.ID)
taskEnv, err := GetTaskEnv(td, cfg.Node, task, alloc, cfg, "")
if err != nil {
allocDir.Destroy()
t.Fatalf("GetTaskEnv() failed: %v", err)
return nil
}
logger := testLogger()
@ -94,12 +123,14 @@ func testDriverContexts(task *structs.Task) (*DriverContext, *ExecContext) {
logger.Printf("[EVENT] "+m, args...)
}
driverCtx := NewDriverContext(task.Name, cfg, cfg.Node, logger, taskEnv, emitter)
return driverCtx, execCtx
return &testContext{allocDir, driverCtx, execCtx}
}
func TestDriver_GetTaskEnv(t *testing.T) {
task := &structs.Task{
Name: "Foo",
Name: "Foo",
Driver: "mock",
Env: map[string]string{
"HELLO": "world",
"lorem": "ipsum",

View File

@ -44,7 +44,7 @@ type execHandle struct {
executor executor.Executor
isolationConfig *dstructs.IsolationConfig
userPid int
allocDir *allocdir.AllocDir
taskDir *allocdir.TaskDir
killTimeout time.Duration
maxKillTimeout time.Duration
logger *log.Logger
@ -86,11 +86,15 @@ func (d *ExecDriver) Abilities() DriverAbilities {
}
}
func (d *ExecDriver) FSIsolation() cstructs.FSIsolation {
return cstructs.FSIsolationChroot
}
func (d *ExecDriver) Periodic() (bool, time.Duration) {
return true, 15 * time.Second
}
func (d *ExecDriver) Prestart(execctx *ExecContext, task *structs.Task) error {
func (d *ExecDriver) Prestart(ctx *ExecContext, task *structs.Task) error {
return nil
}
@ -106,17 +110,11 @@ func (d *ExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle,
return nil, err
}
// Get the task directory for storing the executor logs.
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
bin, err := discover.NomadExecutable()
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginLogFile := filepath.Join(ctx.TaskDir.Dir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
@ -126,12 +124,12 @@ func (d *ExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle,
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Driver: "exec",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
ChrootEnv: d.config.ChrootEnv,
Task: task,
TaskEnv: d.taskEnv,
Driver: "exec",
AllocID: ctx.AllocID,
LogDir: ctx.TaskDir.LogDir,
TaskDir: ctx.TaskDir.Dir,
Task: task,
}
if err := exec.SetContext(executorCtx); err != nil {
pluginClient.Kill()
@ -160,7 +158,6 @@ func (d *ExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle,
pluginClient: pluginClient,
userPid: ps.Pid,
executor: exec,
allocDir: ctx.AllocDir,
isolationConfig: ps.IsolationConfig,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
maxKillTimeout: maxKill,
@ -181,8 +178,6 @@ type execId struct {
KillTimeout time.Duration
MaxKillTimeout time.Duration
UserPid int
TaskDir string
AllocDir *allocdir.AllocDir
IsolationConfig *dstructs.IsolationConfig
PluginConfig *PluginReattachConfig
}
@ -210,9 +205,6 @@ func (d *ExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, erro
merrs.Errors = append(merrs.Errors, fmt.Errorf("destroying cgroup failed: %v", e))
}
}
if e := ctx.AllocDir.UnmountAll(); e != nil {
merrs.Errors = append(merrs.Errors, e)
}
return nil, fmt.Errorf("error connecting to plugin: %v", merrs.ErrorOrNil())
}
@ -223,7 +215,6 @@ func (d *ExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, erro
pluginClient: client,
executor: exec,
userPid: id.UserPid,
allocDir: id.AllocDir,
isolationConfig: id.IsolationConfig,
logger: d.logger,
version: id.Version,
@ -246,7 +237,6 @@ func (h *execHandle) ID() string {
MaxKillTimeout: h.maxKillTimeout,
PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
UserPid: h.userPid,
AllocDir: h.allocDir,
IsolationConfig: h.isolationConfig,
}
@ -284,17 +274,15 @@ func (h *execHandle) Kill() error {
select {
case <-h.doneCh:
return nil
case <-time.After(h.killTimeout):
if h.pluginClient.Exited() {
return nil
break
}
if err := h.executor.Exit(); err != nil {
return fmt.Errorf("executor Exit failed: %v", err)
}
return nil
}
return nil
}
func (h *execHandle) Stats() (*cstructs.TaskResourceUsage, error) {
@ -317,9 +305,6 @@ func (h *execHandle) run() {
h.logger.Printf("[ERR] driver.exec: destroying resource container failed: %v", e)
}
}
if e := h.allocDir.UnmountAll(); e != nil {
h.logger.Printf("[ERR] driver.exec: unmounting dev,proc and alloc dirs failed: %v", e)
}
}
// Remove services

View File

@ -24,11 +24,12 @@ func TestExecDriver_Fingerprint(t *testing.T) {
ctestutils.ExecCompatible(t)
task := &structs.Task{
Name: "foo",
Driver: "exec",
Resources: structs.DefaultResources(),
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewExecDriver(ctx.DriverCtx)
node := &structs.Node{
Attributes: map[string]string{
"unique.cgroup.mountpoint": "/sys/fs/cgroup",
@ -49,7 +50,8 @@ func TestExecDriver_Fingerprint(t *testing.T) {
func TestExecDriver_StartOpen_Wait(t *testing.T) {
ctestutils.ExecCompatible(t)
task := &structs.Task{
Name: "sleep",
Name: "sleep",
Driver: "exec",
Config: map[string]interface{}{
"command": "/bin/sleep",
"args": []string{"5"},
@ -61,14 +63,14 @@ func TestExecDriver_StartOpen_Wait(t *testing.T) {
Resources: basicResources,
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewExecDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -77,7 +79,7 @@ func TestExecDriver_StartOpen_Wait(t *testing.T) {
}
// Attempt to open
handle2, err := d.Open(execCtx, handle.ID())
handle2, err := d.Open(ctx.ExecCtx, handle.ID())
if err != nil {
t.Fatalf("err: %v", err)
}
@ -92,7 +94,8 @@ func TestExecDriver_StartOpen_Wait(t *testing.T) {
func TestExecDriver_KillUserPid_OnPluginReconnectFailure(t *testing.T) {
ctestutils.ExecCompatible(t)
task := &structs.Task{
Name: "sleep",
Name: "sleep",
Driver: "exec",
Config: map[string]interface{}{
"command": "/bin/sleep",
"args": []string{"1000000"},
@ -104,14 +107,14 @@ func TestExecDriver_KillUserPid_OnPluginReconnectFailure(t *testing.T) {
Resources: basicResources,
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewExecDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -134,7 +137,7 @@ func TestExecDriver_KillUserPid_OnPluginReconnectFailure(t *testing.T) {
}
// Attempt to open
handle2, err := d.Open(execCtx, handle.ID())
handle2, err := d.Open(ctx.ExecCtx, handle.ID())
if err == nil {
t.Fatalf("expected error")
}
@ -155,7 +158,8 @@ func TestExecDriver_KillUserPid_OnPluginReconnectFailure(t *testing.T) {
func TestExecDriver_Start_Wait(t *testing.T) {
ctestutils.ExecCompatible(t)
task := &structs.Task{
Name: "sleep",
Name: "sleep",
Driver: "exec",
Config: map[string]interface{}{
"command": "/bin/sleep",
"args": []string{"2"},
@ -167,14 +171,14 @@ func TestExecDriver_Start_Wait(t *testing.T) {
Resources: basicResources,
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewExecDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -205,7 +209,8 @@ func TestExecDriver_Start_Wait_AllocDir(t *testing.T) {
exp := []byte{'w', 'i', 'n'}
file := "output.txt"
task := &structs.Task{
Name: "sleep",
Name: "sleep",
Driver: "exec",
Config: map[string]interface{}{
"command": "/bin/bash",
"args": []string{
@ -220,14 +225,14 @@ func TestExecDriver_Start_Wait_AllocDir(t *testing.T) {
Resources: basicResources,
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewExecDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -246,7 +251,7 @@ func TestExecDriver_Start_Wait_AllocDir(t *testing.T) {
}
// Check that data was written to the shared alloc directory.
outputFile := filepath.Join(execCtx.AllocDir.SharedDir, file)
outputFile := filepath.Join(ctx.AllocDir.SharedDir, file)
act, err := ioutil.ReadFile(outputFile)
if err != nil {
t.Fatalf("Couldn't read expected output: %v", err)
@ -260,7 +265,8 @@ func TestExecDriver_Start_Wait_AllocDir(t *testing.T) {
func TestExecDriver_Start_Kill_Wait(t *testing.T) {
ctestutils.ExecCompatible(t)
task := &structs.Task{
Name: "sleep",
Name: "sleep",
Driver: "exec",
Config: map[string]interface{}{
"command": "/bin/sleep",
"args": []string{"100"},
@ -273,14 +279,14 @@ func TestExecDriver_Start_Kill_Wait(t *testing.T) {
KillTimeout: 10 * time.Second,
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewExecDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -310,7 +316,8 @@ func TestExecDriver_Start_Kill_Wait(t *testing.T) {
func TestExecDriver_Signal(t *testing.T) {
ctestutils.ExecCompatible(t)
task := &structs.Task{
Name: "signal",
Name: "signal",
Driver: "exec",
Config: map[string]interface{}{
"command": "/bin/bash",
"args": []string{"test.sh"},
@ -323,11 +330,11 @@ func TestExecDriver_Signal(t *testing.T) {
KillTimeout: 10 * time.Second,
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewExecDriver(ctx.DriverCtx)
testFile := filepath.Join(execCtx.AllocDir.TaskDirs["signal"], "test.sh")
testFile := filepath.Join(ctx.ExecCtx.TaskDir.Dir, "test.sh")
testData := []byte(`
at_term() {
echo 'Terminated.'
@ -342,10 +349,10 @@ done
fmt.Errorf("Failed to write data")
}
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -372,7 +379,7 @@ done
}
// Check the log file to see it exited because of the signal
outputFile := filepath.Join(execCtx.AllocDir.LogDir(), "signal.stdout.0")
outputFile := filepath.Join(ctx.ExecCtx.TaskDir.LogDir, "signal.stdout.0")
act, err := ioutil.ReadFile(outputFile)
if err != nil {
t.Fatalf("Couldn't read expected output: %v", err)
@ -388,8 +395,9 @@ done
func TestExecDriverUser(t *testing.T) {
ctestutils.ExecCompatible(t)
task := &structs.Task{
Name: "sleep",
User: "alice",
Name: "sleep",
Driver: "exec",
User: "alice",
Config: map[string]interface{}{
"command": "/bin/sleep",
"args": []string{"100"},
@ -402,14 +410,14 @@ func TestExecDriverUser(t *testing.T) {
KillTimeout: 10 * time.Second,
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewExecDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err == nil {
handle.Kill()
t.Fatalf("Should've failed")

View File

@ -9,7 +9,7 @@ import (
docker "github.com/fsouza/go-dockerclient"
cstructs "github.com/hashicorp/nomad/client/driver/structs"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
"github.com/hashicorp/nomad/client/testutil"
)
@ -41,12 +41,12 @@ func TestExecScriptCheckWithIsolation(t *testing.T) {
testutil.ExecCompatible(t)
execCmd := ExecCommand{Cmd: "/bin/echo", Args: []string{"hello world"}}
ctx := testExecutorContext(t)
defer ctx.AllocDir.Destroy()
ctx, allocDir := testExecutorContextWithChroot(t)
defer allocDir.Destroy()
execCmd.FSIsolation = true
execCmd.ResourceLimits = true
execCmd.User = cstructs.DefaultUnpriviledgedUser
execCmd.User = dstructs.DefaultUnpriviledgedUser
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
@ -63,7 +63,7 @@ func TestExecScriptCheckWithIsolation(t *testing.T) {
id: "foo",
cmd: "/bin/echo",
args: []string{"hello", "world"},
taskDir: ctx.AllocDir.TaskDirs["web"],
taskDir: ctx.TaskDir,
FSIsolation: true,
}

View File

@ -94,19 +94,17 @@ type ExecutorContext struct {
// TaskEnv holds information about the environment of a Task
TaskEnv *env.TaskEnvironment
// AllocDir is the handle to do operations on the alloc dir of
// the task
AllocDir *allocdir.AllocDir
// Task is the task whose executor is being launched
Task *structs.Task
// AllocID is the allocation id to which the task belongs
AllocID string
// A mapping of directories on the host OS to attempt to embed inside each
// task's chroot.
ChrootEnv map[string]string
// TaskDir is the host path to the task's root
TaskDir string
// LogDir is the host path where logs should be written
LogDir string
// Driver is the name of the driver that invoked the executor
Driver string
@ -183,7 +181,6 @@ type UniversalExecutor struct {
pids map[int]*nomadPid
pidLock sync.RWMutex
taskDir string
exitState *ProcessState
processExited chan interface{}
fsIsolationEnforced bool
@ -258,10 +255,8 @@ func (e *UniversalExecutor) LaunchCmd(command *ExecCommand) (*ProcessState, erro
}
}
// configuring the task dir
if err := e.configureTaskDir(); err != nil {
return nil, err
}
// set the task dir as the working directory for the command
e.cmd.Dir = e.ctx.TaskDir
e.ctx.TaskEnv.Build()
// configuring the chroot, resource container, and start the plugin
@ -277,6 +272,8 @@ func (e *UniversalExecutor) LaunchCmd(command *ExecCommand) (*ProcessState, erro
return nil, err
}
e.logger.Printf("[DEBUG] executor: XXX 1")
// Setup the loggers
if err := e.configureLoggers(); err != nil {
return nil, err
@ -284,23 +281,32 @@ func (e *UniversalExecutor) LaunchCmd(command *ExecCommand) (*ProcessState, erro
e.cmd.Stdout = e.lro
e.cmd.Stderr = e.lre
e.logger.Printf("[DEBUG] executor: XXX 2")
// Look up the binary path and make it executable
absPath, err := e.lookupBin(e.ctx.TaskEnv.ReplaceEnv(command.Cmd))
if err != nil {
return nil, err
}
e.logger.Printf("[DEBUG] executor: XXX abs: %q", absPath)
if err := e.makeExecutable(absPath); err != nil {
return nil, err
}
path := absPath
e.logger.Printf("[DEBUG] executor: XXX cmd.Path=%q", path)
e.logger.Printf("[DEBUG] executor: XXX cmd.Args=%q", e.cmd.Args)
e.logger.Printf("[DEBUG] executor: XXX cmd.Dir= %q", e.cmd.Dir)
e.logger.Printf("[DEBUG] executor: XXX cmd.Sys= %#v", e.cmd.SysProcAttr)
// Determine the path to run as it may have to be relative to the chroot.
if e.fsIsolationEnforced {
rel, err := filepath.Rel(e.taskDir, path)
rel, err := filepath.Rel(e.ctx.TaskDir, path)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to determine relative path base=%q target=%q: %v", e.ctx.TaskDir, path, err)
}
path = rel
}
@ -310,9 +316,14 @@ func (e *UniversalExecutor) LaunchCmd(command *ExecCommand) (*ProcessState, erro
e.cmd.Args = append([]string{e.cmd.Path}, e.ctx.TaskEnv.ParseAndReplace(command.Args)...)
e.cmd.Env = e.ctx.TaskEnv.EnvList()
e.logger.Printf("[DEBUG] executor: XXX cmd.Path=%q", path)
e.logger.Printf("[DEBUG] executor: XXX cmd.Args=%q", e.cmd.Args)
e.logger.Printf("[DEBUG] executor: XXX cmd.Dir= %q", e.cmd.Dir)
e.logger.Printf("[DEBUG] executor: XXX cmd.Sys= %#v", e.cmd.SysProcAttr)
// Start the process
if err := e.cmd.Start(); err != nil {
return nil, err
return nil, fmt.Errorf("failed to start command path=%q --- args=%q: %v", path, e.cmd.Args, err)
}
go e.collectPids()
go e.wait()
@ -325,21 +336,24 @@ func (e *UniversalExecutor) configureLoggers() error {
e.rotatorLock.Lock()
defer e.rotatorLock.Unlock()
e.logger.Printf("[DEBUG] executor: XXX logdir= %q", e.ctx.LogDir)
e.logger.Printf("[DEBUG] executor: XXX task = %q", e.ctx.Task.Name)
logFileSize := int64(e.ctx.Task.LogConfig.MaxFileSizeMB * 1024 * 1024)
if e.lro == nil {
lro, err := logging.NewFileRotator(e.ctx.AllocDir.LogDir(), fmt.Sprintf("%v.stdout", e.ctx.Task.Name),
lro, err := logging.NewFileRotator(e.ctx.LogDir, fmt.Sprintf("%v.stdout", e.ctx.Task.Name),
e.ctx.Task.LogConfig.MaxFiles, logFileSize, e.logger)
if err != nil {
return err
return fmt.Errorf("error creating new stdout log file for %q: %v", e.ctx.Task.Name, err)
}
e.lro = lro
}
if e.lre == nil {
lre, err := logging.NewFileRotator(e.ctx.AllocDir.LogDir(), fmt.Sprintf("%v.stderr", e.ctx.Task.Name),
lre, err := logging.NewFileRotator(e.ctx.LogDir, fmt.Sprintf("%v.stderr", e.ctx.Task.Name),
e.ctx.Task.LogConfig.MaxFiles, logFileSize, e.logger)
if err != nil {
return err
return fmt.Errorf("error creating new stderr log file for %q: %v", e.ctx.Task.Name, err)
}
e.lre = lre
}
@ -496,12 +510,6 @@ func (e *UniversalExecutor) Exit() error {
merr.Errors = append(merr.Errors, err)
}
}
if e.command.FSIsolation {
if err := e.removeChrootMounts(); err != nil {
merr.Errors = append(merr.Errors, err)
}
}
return merr.ErrorOrNil()
}
@ -595,29 +603,18 @@ func (e *UniversalExecutor) pidStats() (map[string]*cstructs.ResourceUsage, erro
return stats, nil
}
// configureTaskDir sets the task dir in the executor
func (e *UniversalExecutor) configureTaskDir() error {
taskDir, ok := e.ctx.AllocDir.TaskDirs[e.ctx.Task.Name]
e.taskDir = taskDir
if !ok {
return fmt.Errorf("couldn't find task directory for task %v", e.ctx.Task.Name)
}
e.cmd.Dir = taskDir
return nil
}
// lookupBin looks for path to the binary to run by looking for the binary in
// the following locations, in-order: task/local/, task/, based on host $PATH.
// The return path is absolute.
func (e *UniversalExecutor) lookupBin(bin string) (string, error) {
// Check in the local directory
local := filepath.Join(e.taskDir, allocdir.TaskLocal, bin)
local := filepath.Join(e.ctx.TaskDir, allocdir.TaskLocal, bin)
if _, err := os.Stat(local); err == nil {
return local, nil
}
// Check at the root of the task's directory
root := filepath.Join(e.taskDir, bin)
root := filepath.Join(e.ctx.TaskDir, bin)
if _, err := os.Stat(root); err == nil {
return root, nil
}
@ -731,7 +728,7 @@ func (e *UniversalExecutor) createCheck(check *structs.ServiceCheck, checkID str
timeout: check.Timeout,
cmd: check.Command,
args: check.Args,
taskDir: e.taskDir,
taskDir: e.ctx.TaskDir,
FSIsolation: e.command.FSIsolation,
}, nil

View File

@ -15,31 +15,13 @@ import (
"github.com/opencontainers/runc/libcontainer/cgroups"
cgroupFs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
cgroupConfig "github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/stats"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/nomad/structs"
)
var (
// A mapping of directories on the host OS to attempt to embed inside each
// task's chroot.
chrootEnv = map[string]string{
"/bin": "/bin",
"/etc": "/etc",
"/lib": "/lib",
"/lib32": "/lib32",
"/lib64": "/lib64",
"/run/resolvconf": "/run/resolvconf",
"/sbin": "/sbin",
"/usr": "/usr",
}
// clockTicks is the clocks per second of the machine
clockTicks = uint64(system.GetClockTicks())
// The statistics the executor exposes when using cgroups
ExecutorCgroupMeasuredMemStats = []string{"RSS", "Cache", "Swap", "Max Usage", "Kernel Usage", "Kernel Max Usage"}
ExecutorCgroupMeasuredCpuStats = []string{"System Mode", "User Mode", "Throttled Periods", "Throttled Time", "Percent"}
@ -71,9 +53,6 @@ func (e *UniversalExecutor) applyLimits(pid int) error {
manager := getCgroupManager(e.resConCtx.groups, nil)
if err := manager.Apply(pid); err != nil {
e.logger.Printf("[ERR] executor: error applying pid to cgroup: %v", err)
if er := e.removeChrootMounts(); er != nil {
e.logger.Printf("[ERR] executor: error removing chroot: %v", er)
}
return err
}
e.resConCtx.cgPaths = manager.GetPaths()
@ -83,9 +62,6 @@ func (e *UniversalExecutor) applyLimits(pid int) error {
if er := DestroyCgroup(e.resConCtx.groups, e.resConCtx.cgPaths, os.Getpid()); er != nil {
e.logger.Printf("[ERR] executor: error destroying cgroup: %v", er)
}
if er := e.removeChrootMounts(); er != nil {
e.logger.Printf("[ERR] executor: error removing chroot: %v", er)
}
return err
}
return nil
@ -222,50 +198,16 @@ func (e *UniversalExecutor) runAs(userid string) error {
// configureChroot configures a chroot
func (e *UniversalExecutor) configureChroot() error {
allocDir := e.ctx.AllocDir
if err := allocDir.MountSharedDir(e.ctx.Task.Name); err != nil {
return err
}
chroot := chrootEnv
if len(e.ctx.ChrootEnv) > 0 {
chroot = e.ctx.ChrootEnv
}
if err := allocDir.Embed(e.ctx.Task.Name, chroot); err != nil {
return err
}
// Set the tasks AllocDir environment variable.
e.ctx.TaskEnv.
SetAllocDir(filepath.Join("/", allocdir.SharedAllocName)).
SetTaskLocalDir(filepath.Join("/", allocdir.TaskLocal)).
SetSecretsDir(filepath.Join("/", allocdir.TaskSecrets)).
Build()
if e.cmd.SysProcAttr == nil {
e.cmd.SysProcAttr = &syscall.SysProcAttr{}
}
e.cmd.SysProcAttr.Chroot = e.taskDir
e.cmd.SysProcAttr.Chroot = e.ctx.TaskDir
e.cmd.Dir = "/"
if err := allocDir.MountSpecialDirs(e.taskDir); err != nil {
return err
}
e.fsIsolationEnforced = true
return nil
}
// cleanTaskDir is an idempotent operation to clean the task directory and
// should be called when tearing down the task.
func (e *UniversalExecutor) removeChrootMounts() error {
// Prevent a race between Wait/ForceStop
e.resConCtx.cgLock.Lock()
defer e.resConCtx.cgLock.Unlock()
return e.ctx.AllocDir.UnmountAll()
}
// getAllPids returns the pids of all the processes spun up by the executor. We
// use the libcontainer apis to get the pids when the user is using cgroup
// isolation and we scan the entire process table if the user is not using any

View File

@ -9,48 +9,70 @@ import (
"strings"
"testing"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/driver/env"
cstructs "github.com/hashicorp/nomad/client/driver/structs"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/nomad/mock"
)
func testExecutorContextWithChroot(t *testing.T) *ExecutorContext {
taskEnv := env.NewTaskEnvironment(mock.Node())
task, allocDir := mockAllocDir(t)
ctx := &ExecutorContext{
TaskEnv: taskEnv,
Task: task,
AllocDir: allocDir,
ChrootEnv: map[string]string{
"/etc/ld.so.cache": "/etc/ld.so.cache",
"/etc/ld.so.conf": "/etc/ld.so.conf",
"/etc/ld.so.conf.d": "/etc/ld.so.conf.d",
"/lib": "/lib",
"/lib64": "/lib64",
"/usr/lib": "/usr/lib",
"/bin/ls": "/bin/ls",
"/foobar": "/does/not/exist",
},
// testExecutorContextWithChroot returns an ExecutorContext and AllocDir with
// chroot. Use testExecutorContext if you don't need a chroot.
//
// The caller is responsible for calling AllocDir.Destroy() to cleanup.
func testExecutorContextWithChroot(t *testing.T) (*ExecutorContext, *allocdir.AllocDir) {
chrootEnv := map[string]string{
"/etc/ld.so.cache": "/etc/ld.so.cache",
"/etc/ld.so.conf": "/etc/ld.so.conf",
"/etc/ld.so.conf.d": "/etc/ld.so.conf.d",
"/lib": "/lib",
"/lib64": "/lib64",
"/usr/lib": "/usr/lib",
"/bin/ls": "/bin/ls",
"/bin/echo": "/bin/echo",
"/bin/bash": "/bin/bash",
"/usr/bin/yes": "/usr/bin/yes",
"/foobar": "/does/not/exist",
}
return ctx
taskEnv := env.NewTaskEnvironment(mock.Node())
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
allocDir := allocdir.NewAllocDir(filepath.Join(os.TempDir(), alloc.ID))
if err := allocDir.Build(); err != nil {
log.Fatalf("AllocDir.Build() failed: %v", err)
}
if err := allocDir.NewTaskDir(task.Name).Build(chrootEnv, cstructs.FSIsolationChroot); err != nil {
allocDir.Destroy()
log.Fatalf("allocDir.NewTaskDir(%q) failed: %v", task.Name, err)
}
td := allocDir.TaskDirs[task.Name]
ctx := &ExecutorContext{
TaskEnv: taskEnv,
Task: task,
TaskDir: td.Dir,
LogDir: td.LogDir,
}
return ctx, allocDir
}
func TestExecutor_IsolationAndConstraints(t *testing.T) {
testutil.ExecCompatible(t)
execCmd := ExecCommand{Cmd: "/bin/ls", Args: []string{"-F", "/", "/etc/"}}
ctx := testExecutorContextWithChroot(t)
defer ctx.AllocDir.Destroy()
ctx, allocDir := testExecutorContextWithChroot(t)
defer allocDir.Destroy()
execCmd.FSIsolation = true
execCmd.ResourceLimits = true
execCmd.User = cstructs.DefaultUnpriviledgedUser
execCmd.User = dstructs.DefaultUnpriviledgedUser
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
if err := executor.SetContext(ctx); err != nil {
t.Fatalf("Unexpected error")
t.Fatalf("Unexpected error: %v", err)
}
ps, err := executor.LaunchCmd(&execCmd)
@ -103,7 +125,7 @@ usr/
ld.so.cache
ld.so.conf
ld.so.conf.d/`
file := filepath.Join(ctx.AllocDir.LogDir(), "web.stdout.0")
file := filepath.Join(ctx.LogDir, "web.stdout.0")
output, err := ioutil.ReadFile(file)
if err != nil {
t.Fatalf("Couldn't read file %v", file)

View File

@ -13,6 +13,7 @@ import (
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/driver/env"
cstructs "github.com/hashicorp/nomad/client/structs"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
@ -33,34 +34,37 @@ var (
}
)
func mockAllocDir(t *testing.T) (*structs.Task, *allocdir.AllocDir) {
// testExecutorContext returns an ExecutorContext and AllocDir.
//
// The caller is responsible for calling AllocDir.Destroy() to cleanup.
func testExecutorContext(t *testing.T) (*ExecutorContext, *allocdir.AllocDir) {
taskEnv := env.NewTaskEnvironment(mock.Node())
alloc := mock.Alloc()
task := alloc.Job.TaskGroups[0].Tasks[0]
allocDir := allocdir.NewAllocDir(filepath.Join(os.TempDir(), alloc.ID))
if err := allocDir.Build([]*structs.Task{task}); err != nil {
log.Panicf("allocDir.Build() failed: %v", err)
if err := allocDir.Build(); err != nil {
log.Fatalf("AllocDir.Build() failed: %v", err)
}
return task, allocDir
}
func testExecutorContext(t *testing.T) *ExecutorContext {
taskEnv := env.NewTaskEnvironment(mock.Node())
task, allocDir := mockAllocDir(t)
if err := allocDir.NewTaskDir(task.Name).Build(nil, cstructs.FSIsolationNone); err != nil {
allocDir.Destroy()
log.Fatalf("allocDir.NewTaskDir(%q) failed: %v", task.Name, err)
}
td := allocDir.TaskDirs[task.Name]
ctx := &ExecutorContext{
TaskEnv: taskEnv,
Task: task,
AllocDir: allocDir,
TaskEnv: taskEnv,
Task: task,
TaskDir: td.Dir,
LogDir: td.LogDir,
}
return ctx
return ctx, allocDir
}
func TestExecutor_Start_Invalid(t *testing.T) {
invalid := "/bin/foobar"
execCmd := ExecCommand{Cmd: invalid, Args: []string{"1"}}
ctx := testExecutorContext(t)
defer ctx.AllocDir.Destroy()
ctx, allocDir := testExecutorContext(t)
defer allocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
if err := executor.SetContext(ctx); err != nil {
@ -74,8 +78,8 @@ func TestExecutor_Start_Invalid(t *testing.T) {
func TestExecutor_Start_Wait_Failure_Code(t *testing.T) {
execCmd := ExecCommand{Cmd: "/bin/sleep", Args: []string{"fail"}}
ctx := testExecutorContext(t)
defer ctx.AllocDir.Destroy()
ctx, allocDir := testExecutorContext(t)
defer allocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
if err := executor.SetContext(ctx); err != nil {
@ -101,8 +105,8 @@ func TestExecutor_Start_Wait_Failure_Code(t *testing.T) {
func TestExecutor_Start_Wait(t *testing.T) {
execCmd := ExecCommand{Cmd: "/bin/echo", Args: []string{"hello world"}}
ctx := testExecutorContext(t)
defer ctx.AllocDir.Destroy()
ctx, allocDir := testExecutorContext(t)
defer allocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
if err := executor.SetContext(ctx); err != nil {
@ -125,7 +129,7 @@ func TestExecutor_Start_Wait(t *testing.T) {
}
expected := "hello world"
file := filepath.Join(ctx.AllocDir.LogDir(), "web.stdout.0")
file := filepath.Join(ctx.LogDir, "web.stdout.0")
output, err := ioutil.ReadFile(file)
if err != nil {
t.Fatalf("Couldn't read file %v", file)
@ -139,8 +143,8 @@ func TestExecutor_Start_Wait(t *testing.T) {
func TestExecutor_WaitExitSignal(t *testing.T) {
execCmd := ExecCommand{Cmd: "/bin/sleep", Args: []string{"10000"}}
ctx := testExecutorContext(t)
defer ctx.AllocDir.Destroy()
ctx, allocDir := testExecutorContext(t)
defer allocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
if err := executor.SetContext(ctx); err != nil {
@ -183,10 +187,10 @@ func TestExecutor_ClientCleanup(t *testing.T) {
testutil.ExecCompatible(t)
execCmd := ExecCommand{Cmd: "/bin/bash", Args: []string{"-c", "/usr/bin/yes"}}
ctx := testExecutorContext(t)
ctx, allocDir := testExecutorContextWithChroot(t)
ctx.Task.LogConfig.MaxFiles = 1
ctx.Task.LogConfig.MaxFileSizeMB = 300
defer ctx.AllocDir.Destroy()
defer allocDir.Destroy()
execCmd.FSIsolation = true
execCmd.ResourceLimits = true
@ -210,7 +214,7 @@ func TestExecutor_ClientCleanup(t *testing.T) {
t.Fatalf("err: %v", err)
}
file := filepath.Join(ctx.AllocDir.LogDir(), "web.stdout.0")
file := filepath.Join(ctx.LogDir, "web.stdout.0")
finfo, err := os.Stat(file)
if err != nil {
t.Fatalf("error stating stdout file: %v", err)
@ -227,8 +231,8 @@ func TestExecutor_ClientCleanup(t *testing.T) {
func TestExecutor_Start_Kill(t *testing.T) {
execCmd := ExecCommand{Cmd: "/bin/sleep", Args: []string{"10 && hello world"}}
ctx := testExecutorContext(t)
defer ctx.AllocDir.Destroy()
ctx, allocDir := testExecutorContext(t)
defer allocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
if err := executor.SetContext(ctx); err != nil {
@ -250,7 +254,7 @@ func TestExecutor_Start_Kill(t *testing.T) {
t.Fatalf("error: %v", err)
}
file := filepath.Join(ctx.AllocDir.LogDir(), "web.stdout.0")
file := filepath.Join(ctx.LogDir, "web.stdout.0")
time.Sleep(time.Duration(tu.TestMultiplier()*2) * time.Second)
output, err := ioutil.ReadFile(file)
@ -278,8 +282,6 @@ func TestExecutor_MakeExecutable(t *testing.T) {
f.Chmod(os.FileMode(0610))
// Make a fake exececutor
ctx := testExecutorContext(t)
defer ctx.AllocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
err = executor.(*UniversalExecutor).makeExecutable(f.Name())
@ -303,8 +305,8 @@ func TestExecutor_MakeExecutable(t *testing.T) {
func TestExecutorInterpolateServices(t *testing.T) {
task := mock.Job().TaskGroups[0].Tasks[0]
// Make a fake exececutor
ctx := testExecutorContext(t)
defer ctx.AllocDir.Destroy()
ctx, allocDir := testExecutorContext(t)
defer allocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags))
executor.(*UniversalExecutor).ctx = ctx
@ -333,8 +335,6 @@ func TestScanPids(t *testing.T) {
p5 := NewFakeProcess(20, 18)
// Make a fake exececutor
ctx := testExecutorContext(t)
defer ctx.AllocDir.Destroy()
executor := NewExecutor(log.New(os.Stdout, "", log.LstdFlags)).(*UniversalExecutor)
nomadPids, err := executor.scanPids(5, []ps.Process{p1, p2, p3, p4, p5})

View File

@ -17,11 +17,6 @@ func (e *UniversalExecutor) LaunchSyslogServer() (*SyslogServerState, error) {
return nil, fmt.Errorf("SetContext must be called before launching the Syslog Server")
}
// configuring the task dir
if err := e.configureTaskDir(); err != nil {
return nil, err
}
e.syslogChan = make(chan *logging.SyslogMessage, 2048)
l, err := e.getListener(e.ctx.PortLowerBound, e.ctx.PortUpperBound)
if err != nil {

View File

@ -17,7 +17,6 @@ import (
"github.com/hashicorp/go-plugin"
"github.com/mitchellh/mapstructure"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/executor"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
@ -54,8 +53,6 @@ type javaHandle struct {
executor executor.Executor
isolationConfig *dstructs.IsolationConfig
taskDir string
allocDir *allocdir.AllocDir
killTimeout time.Duration
maxKillTimeout time.Duration
version string
@ -100,6 +97,10 @@ func (d *JavaDriver) Abilities() DriverAbilities {
}
}
func (d *JavaDriver) FSIsolation() cstructs.FSIsolation {
return cstructs.FSIsolationChroot
}
func (d *JavaDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
// Get the current status so that we can log any debug messages only if the
// state changes
@ -173,11 +174,6 @@ func (d *JavaDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle,
return nil, err
}
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
if driverConfig.JarPath == "" {
return nil, fmt.Errorf("jar_path must be specified")
}
@ -200,7 +196,7 @@ func (d *JavaDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle,
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginLogFile := filepath.Join(ctx.TaskDir.Dir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
@ -212,12 +208,12 @@ func (d *JavaDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle,
// Set the context
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Driver: "java",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
ChrootEnv: d.config.ChrootEnv,
Task: task,
TaskEnv: d.taskEnv,
Driver: "java",
AllocID: ctx.AllocID,
Task: task,
TaskDir: ctx.TaskDir.Dir,
LogDir: ctx.TaskDir.LogDir,
}
if err := execIntf.SetContext(executorCtx); err != nil {
pluginClient.Kill()
@ -250,8 +246,6 @@ func (d *JavaDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle,
executor: execIntf,
userPid: ps.Pid,
isolationConfig: ps.IsolationConfig,
taskDir: taskDir,
allocDir: ctx.AllocDir,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
maxKillTimeout: maxKill,
version: d.config.Version,
@ -279,8 +273,6 @@ type javaId struct {
MaxKillTimeout time.Duration
PluginConfig *PluginReattachConfig
IsolationConfig *dstructs.IsolationConfig
TaskDir string
AllocDir *allocdir.AllocDir
UserPid int
}
@ -307,9 +299,6 @@ func (d *JavaDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, erro
merrs.Errors = append(merrs.Errors, fmt.Errorf("destroying resource container failed: %v", e))
}
}
if e := ctx.AllocDir.UnmountAll(); e != nil {
merrs.Errors = append(merrs.Errors, e)
}
return nil, fmt.Errorf("error connecting to plugin: %v", merrs.ErrorOrNil())
}
@ -323,8 +312,6 @@ func (d *JavaDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, erro
executor: exec,
userPid: id.UserPid,
isolationConfig: id.IsolationConfig,
taskDir: id.TaskDir,
allocDir: id.AllocDir,
logger: d.logger,
version: id.Version,
killTimeout: id.KillTimeout,
@ -347,8 +334,6 @@ func (h *javaHandle) ID() string {
MaxKillTimeout: h.maxKillTimeout,
PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
UserPid: h.userPid,
TaskDir: h.taskDir,
AllocDir: h.allocDir,
IsolationConfig: h.isolationConfig,
}
@ -386,17 +371,16 @@ func (h *javaHandle) Kill() error {
select {
case <-h.doneCh:
return nil
case <-time.After(h.killTimeout):
if h.pluginClient.Exited() {
return nil
break
}
if err := h.executor.Exit(); err != nil {
return fmt.Errorf("executor Exit failed: %v", err)
}
return nil
}
return nil
}
func (h *javaHandle) Stats() (*cstructs.TaskResourceUsage, error) {
@ -417,9 +401,6 @@ func (h *javaHandle) run() {
h.logger.Printf("[ERR] driver.java: error killing user process: %v", e)
}
}
if e := h.allocDir.UnmountAll(); e != nil {
h.logger.Printf("[ERR] driver.java: unmounting dev,proc and alloc dirs failed: %v", e)
}
}
// Remove services

View File

@ -34,11 +34,12 @@ func TestJavaDriver_Fingerprint(t *testing.T) {
ctestutils.JavaCompatible(t)
task := &structs.Task{
Name: "foo",
Driver: "java",
Resources: structs.DefaultResources(),
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewJavaDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewJavaDriver(ctx.DriverCtx)
node := &structs.Node{
Attributes: map[string]string{
"unique.cgroup.mountpoint": "/sys/fs/cgroups",
@ -72,7 +73,8 @@ func TestJavaDriver_StartOpen_Wait(t *testing.T) {
ctestutils.JavaCompatible(t)
task := &structs.Task{
Name: "demo-app",
Name: "demo-app",
Driver: "java",
Config: map[string]interface{}{
"jar_path": "demoapp.jar",
"jvm_options": []string{"-Xmx64m", "-Xms32m"},
@ -84,18 +86,18 @@ func TestJavaDriver_StartOpen_Wait(t *testing.T) {
Resources: basicResources,
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewJavaDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewJavaDriver(ctx.DriverCtx)
// Copy the test jar into the task's directory
dst, _ := execCtx.AllocDir.TaskDirs[task.Name]
dst := ctx.ExecCtx.TaskDir.Dir
copyFile("./test-resources/java/demoapp.jar", filepath.Join(dst, "demoapp.jar"), t)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -104,7 +106,7 @@ func TestJavaDriver_StartOpen_Wait(t *testing.T) {
}
// Attempt to open
handle2, err := d.Open(execCtx, handle.ID())
handle2, err := d.Open(ctx.ExecCtx, handle.ID())
if err != nil {
t.Fatalf("err: %v", err)
}
@ -127,7 +129,8 @@ func TestJavaDriver_Start_Wait(t *testing.T) {
ctestutils.JavaCompatible(t)
task := &structs.Task{
Name: "demo-app",
Name: "demo-app",
Driver: "java",
Config: map[string]interface{}{
"jar_path": "demoapp.jar",
},
@ -138,17 +141,18 @@ func TestJavaDriver_Start_Wait(t *testing.T) {
Resources: basicResources,
}
driverCtx, execCtx := testDriverContexts(task)
d := NewJavaDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewJavaDriver(ctx.DriverCtx)
// Copy the test jar into the task's directory
dst, _ := execCtx.AllocDir.TaskDirs[task.Name]
dst := ctx.ExecCtx.TaskDir.Dir
copyFile("./test-resources/java/demoapp.jar", filepath.Join(dst, "demoapp.jar"), t)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -168,7 +172,7 @@ func TestJavaDriver_Start_Wait(t *testing.T) {
}
// Get the stdout of the process and assrt that it's not empty
stdout := filepath.Join(execCtx.AllocDir.LogDir(), "demo-app.stdout.0")
stdout := filepath.Join(ctx.ExecCtx.TaskDir.LogDir, "demo-app.stdout.0")
fInfo, err := os.Stat(stdout)
if err != nil {
t.Fatalf("failed to get stdout of process: %v", err)
@ -191,7 +195,8 @@ func TestJavaDriver_Start_Kill_Wait(t *testing.T) {
ctestutils.JavaCompatible(t)
task := &structs.Task{
Name: "demo-app",
Name: "demo-app",
Driver: "java",
Config: map[string]interface{}{
"jar_path": "demoapp.jar",
},
@ -202,18 +207,18 @@ func TestJavaDriver_Start_Kill_Wait(t *testing.T) {
Resources: basicResources,
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewJavaDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewJavaDriver(ctx.DriverCtx)
// Copy the test jar into the task's directory
dst, _ := execCtx.AllocDir.TaskDirs[task.Name]
dst := ctx.ExecCtx.TaskDir.Dir
copyFile("./test-resources/java/demoapp.jar", filepath.Join(dst, "demoapp.jar"), t)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -252,7 +257,8 @@ func TestJavaDriver_Signal(t *testing.T) {
ctestutils.JavaCompatible(t)
task := &structs.Task{
Name: "demo-app",
Name: "demo-app",
Driver: "java",
Config: map[string]interface{}{
"jar_path": "demoapp.jar",
},
@ -263,18 +269,18 @@ func TestJavaDriver_Signal(t *testing.T) {
Resources: basicResources,
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewJavaDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewJavaDriver(ctx.DriverCtx)
// Copy the test jar into the task's directory
dst, _ := execCtx.AllocDir.TaskDirs[task.Name]
dst := ctx.ExecCtx.TaskDir.Dir
copyFile("./test-resources/java/demoapp.jar", filepath.Join(dst, "demoapp.jar"), t)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -313,8 +319,9 @@ func TestJavaDriverUser(t *testing.T) {
ctestutils.JavaCompatible(t)
task := &structs.Task{
Name: "demo-app",
User: "alice",
Name: "demo-app",
Driver: "java",
User: "alice",
Config: map[string]interface{}{
"jar_path": "demoapp.jar",
},
@ -325,14 +332,14 @@ func TestJavaDriverUser(t *testing.T) {
Resources: basicResources,
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewJavaDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewJavaDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err == nil {
handle.Kill()
t.Fatalf("Should've failed")

View File

@ -92,7 +92,9 @@ func (s *SyslogCollector) LaunchCollector(ctx *LogCollectorContext) (*SyslogColl
go s.server.Start()
logFileSize := int64(ctx.LogConfig.MaxFileSizeMB * 1024 * 1024)
lro, err := NewFileRotator(ctx.AllocDir.LogDir(), fmt.Sprintf("%v.stdout", ctx.TaskName),
//FIXME There's an easier way to get this
logdir := ctx.AllocDir.TaskDirs[ctx.TaskName].LogDir
lro, err := NewFileRotator(logdir, fmt.Sprintf("%v.stdout", ctx.TaskName),
ctx.LogConfig.MaxFiles, logFileSize, s.logger)
if err != nil {
@ -100,7 +102,7 @@ func (s *SyslogCollector) LaunchCollector(ctx *LogCollectorContext) (*SyslogColl
}
s.lro = lro
lre, err := NewFileRotator(ctx.AllocDir.LogDir(), fmt.Sprintf("%v.stderr", ctx.TaskName),
lre, err := NewFileRotator(logdir, fmt.Sprintf("%v.stderr", ctx.TaskName),
ctx.LogConfig.MaxFiles, logFileSize, s.logger)
if err != nil {
return nil, err
@ -157,7 +159,7 @@ func (s *SyslogCollector) configureTaskDir() error {
if !ok {
return fmt.Errorf("couldn't find task directory for task %v", s.ctx.TaskName)
}
s.taskDir = taskDir
s.taskDir = taskDir.Dir
return nil
}

View File

@ -13,7 +13,6 @@ import (
"syscall"
"time"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/fingerprint"
"github.com/hashicorp/nomad/client/stats"
@ -153,6 +152,10 @@ func (d *LxcDriver) Abilities() DriverAbilities {
}
}
func (d *LxcDriver) FSIsolation() cstructs.FSIsolation {
return cstructs.FSIsolationImage
}
// Fingerprint fingerprints the lxc driver configuration
func (d *LxcDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
enabled := cfg.ReadBoolDefault(lxcConfigOption, true)
@ -169,9 +172,6 @@ func (d *LxcDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, e
}
func (d *LxcDriver) Prestart(ctx *ExecContext, task *structs.Task) error {
d.taskEnv.SetAllocDir(allocdir.SharedAllocContainerPath)
d.taskEnv.SetTaskLocalDir(allocdir.TaskLocalContainerPath)
d.taskEnv.SetSecretsDir(allocdir.TaskSecretsContainerPath)
return nil
}
@ -220,7 +220,7 @@ func (d *LxcDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, e
}
c.SetLogLevel(logLevel)
logFile := filepath.Join(ctx.AllocDir.LogDir(), fmt.Sprintf("%v-lxc.log", task.Name))
logFile := filepath.Join(ctx.TaskDir.LogDir, fmt.Sprintf("%v-lxc.log", task.Name))
c.SetLogFile(logFile)
options := lxc.TemplateOptions{
@ -243,19 +243,10 @@ func (d *LxcDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, e
}
// Bind mount the shared alloc dir and task local dir in the container
taskDir, ok := ctx.AllocDir.TaskDirs[task.Name]
if !ok {
return nil, fmt.Errorf("failed to find task local directory: %v", task.Name)
}
secretdir, err := ctx.AllocDir.GetSecretDir(task.Name)
if err != nil {
return nil, fmt.Errorf("faild getting secret path for task: %v", err)
}
taskLocalDir := filepath.Join(taskDir, allocdir.TaskLocal)
mounts := []string{
fmt.Sprintf("%s local none rw,bind,create=dir", taskLocalDir),
fmt.Sprintf("%s alloc none rw,bind,create=dir", ctx.AllocDir.SharedDir),
fmt.Sprintf("%s secret none rw,bind,create=dir", secretdir),
fmt.Sprintf("%s local none rw,bind,create=dir", ctx.TaskDir.LocalDir),
fmt.Sprintf("%s alloc none rw,bind,create=dir", ctx.TaskDir.SharedAllocDir),
fmt.Sprintf("%s secrets none rw,bind,create=dir", ctx.TaskDir.SecretsDir),
}
for _, mnt := range mounts {
if err := c.SetConfigItem("lxc.mount.entry", mnt); err != nil {

View File

@ -22,12 +22,14 @@ func TestLxcDriver_Fingerprint(t *testing.T) {
task := &structs.Task{
Name: "foo",
Driver: "lxc",
Resources: structs.DefaultResources(),
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewLxcDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewLxcDriver(ctx.DriverCtx)
node := &structs.Node{
Attributes: map[string]string{},
}
@ -57,7 +59,8 @@ func TestLxcDriver_Start_Wait(t *testing.T) {
}
task := &structs.Task{
Name: "foo",
Name: "foo",
Driver: "lxc",
Config: map[string]interface{}{
"template": "/usr/share/lxc/templates/lxc-busybox",
},
@ -65,14 +68,14 @@ func TestLxcDriver_Start_Wait(t *testing.T) {
Resources: structs.DefaultResources(),
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewLxcDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewLxcDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -99,8 +102,8 @@ func TestLxcDriver_Start_Wait(t *testing.T) {
})
// Look for mounted directories in their proper location
containerName := fmt.Sprintf("%s-%s", task.Name, execCtx.AllocID)
for _, mnt := range []string{"alloc", "local", "secret"} {
containerName := fmt.Sprintf("%s-%s", task.Name, ctx.ExecCtx.AllocID)
for _, mnt := range []string{"alloc", "local", "secrets"} {
fullpath := filepath.Join(lxcHandle.lxcPath, containerName, "rootfs", mnt)
stat, err := os.Stat(fullpath)
if err != nil {
@ -132,7 +135,8 @@ func TestLxcDriver_Open_Wait(t *testing.T) {
}
task := &structs.Task{
Name: "foo",
Name: "foo",
Driver: "lxc",
Config: map[string]interface{}{
"template": "/usr/share/lxc/templates/lxc-busybox",
},
@ -140,14 +144,14 @@ func TestLxcDriver_Open_Wait(t *testing.T) {
Resources: structs.DefaultResources(),
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewLxcDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewLxcDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -163,7 +167,7 @@ func TestLxcDriver_Open_Wait(t *testing.T) {
}()
}
handle2, err := d.Open(execCtx, handle.ID())
handle2, err := d.Open(ctx.ExecCtx, handle.ID())
if err != nil {
t.Fatalf("err: %v", err)
}

View File

@ -75,6 +75,10 @@ func (d *MockDriver) Abilities() DriverAbilities {
}
}
func (d *MockDriver) FSIsolation() cstructs.FSIsolation {
return cstructs.FSIsolationNone
}
func (d *MockDriver) Prestart(ctx *ExecContext, task *structs.Task) error {
return nil
}

View File

@ -13,7 +13,6 @@ import (
"time"
"github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/executor"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
@ -55,7 +54,6 @@ type qemuHandle struct {
pluginClient *plugin.Client
userPid int
executor executor.Executor
allocDir *allocdir.AllocDir
killTimeout time.Duration
maxKillTimeout time.Duration
logger *log.Logger
@ -103,6 +101,10 @@ func (d *QemuDriver) Abilities() DriverAbilities {
}
}
func (d *QemuDriver) FSIsolation() cstructs.FSIsolation {
return cstructs.FSIsolationImage
}
func (d *QemuDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
// Get the current status so that we can log any debug messages only if the
// state changes
@ -158,12 +160,6 @@ func (d *QemuDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle,
}
vmID := filepath.Base(vmPath)
// Get the tasks local directory.
taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
// Parse configuration arguments
// Create the base arguments
accelerator := "tcg"
@ -242,7 +238,7 @@ func (d *QemuDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle,
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginLogFile := filepath.Join(ctx.TaskDir.Dir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
@ -252,11 +248,12 @@ func (d *QemuDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle,
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Driver: "qemu",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
Task: task,
TaskEnv: d.taskEnv,
Driver: "qemu",
AllocID: ctx.AllocID,
Task: task,
TaskDir: ctx.TaskDir.Dir,
LogDir: ctx.TaskDir.LogDir,
}
if err := exec.SetContext(executorCtx); err != nil {
pluginClient.Kill()
@ -281,7 +278,6 @@ func (d *QemuDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle,
pluginClient: pluginClient,
executor: exec,
userPid: ps.Pid,
allocDir: ctx.AllocDir,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
maxKillTimeout: maxKill,
version: d.config.Version,
@ -303,7 +299,6 @@ type qemuId struct {
MaxKillTimeout time.Duration
UserPid int
PluginConfig *PluginReattachConfig
AllocDir *allocdir.AllocDir
}
func (d *QemuDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
@ -332,7 +327,6 @@ func (d *QemuDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, erro
pluginClient: pluginClient,
executor: exec,
userPid: id.UserPid,
allocDir: id.AllocDir,
logger: d.logger,
killTimeout: id.KillTimeout,
maxKillTimeout: id.MaxKillTimeout,
@ -354,7 +348,6 @@ func (h *qemuHandle) ID() string {
MaxKillTimeout: h.maxKillTimeout,
PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
UserPid: h.userPid,
AllocDir: h.allocDir,
}
data, err := json.Marshal(id)
@ -416,9 +409,6 @@ func (h *qemuHandle) run() {
if e := killProcess(h.userPid); e != nil {
h.logger.Printf("[ERR] driver.qemu: error killing user process: %v", e)
}
if e := h.allocDir.UnmountAll(); e != nil {
h.logger.Printf("[ERR] driver.qemu: unmounting dev,proc and alloc dirs failed: %v", e)
}
}
close(h.doneCh)

View File

@ -18,11 +18,13 @@ func TestQemuDriver_Fingerprint(t *testing.T) {
ctestutils.QemuCompatible(t)
task := &structs.Task{
Name: "foo",
Driver: "qemu",
Resources: structs.DefaultResources(),
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewQemuDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewQemuDriver(ctx.DriverCtx)
node := &structs.Node{
Attributes: make(map[string]string),
}
@ -44,7 +46,8 @@ func TestQemuDriver_Fingerprint(t *testing.T) {
func TestQemuDriver_StartOpen_Wait(t *testing.T) {
ctestutils.QemuCompatible(t)
task := &structs.Task{
Name: "linux",
Name: "linux",
Driver: "qemu",
Config: map[string]interface{}{
"image_path": "linux-0.2.img",
"accelerator": "tcg",
@ -69,15 +72,19 @@ func TestQemuDriver_StartOpen_Wait(t *testing.T) {
},
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewQemuDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewQemuDriver(ctx.DriverCtx)
// Copy the test image into the task's directory
dst, _ := execCtx.AllocDir.TaskDirs[task.Name]
dst := ctx.ExecCtx.TaskDir.Dir
copyFile("./test-resources/qemu/linux-0.2.img", filepath.Join(dst, "linux-0.2.img"), t)
handle, err := d.Start(execCtx, task)
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("Prestart faild: %v", err)
}
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -91,7 +98,7 @@ func TestQemuDriver_StartOpen_Wait(t *testing.T) {
}
// Attempt to open
handle2, err := d.Open(execCtx, handle.ID())
handle2, err := d.Open(ctx.ExecCtx, handle.ID())
if err != nil {
t.Fatalf("err: %v", err)
}
@ -108,8 +115,9 @@ func TestQemuDriver_StartOpen_Wait(t *testing.T) {
func TestQemuDriverUser(t *testing.T) {
ctestutils.QemuCompatible(t)
task := &structs.Task{
Name: "linux",
User: "alice",
Name: "linux",
Driver: "qemu",
User: "alice",
Config: map[string]interface{}{
"image_path": "linux-0.2.img",
"accelerator": "tcg",
@ -134,11 +142,15 @@ func TestQemuDriverUser(t *testing.T) {
},
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewQemuDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewQemuDriver(ctx.DriverCtx)
handle, err := d.Start(execCtx, task)
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("Prestart faild: %v", err)
}
handle, err := d.Start(ctx.ExecCtx, task)
if err == nil {
handle.Kill()
t.Fatalf("Should've failed")

View File

@ -10,7 +10,6 @@ import (
"time"
"github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/allocdir"
"github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/client/driver/executor"
dstructs "github.com/hashicorp/nomad/client/driver/structs"
@ -47,7 +46,6 @@ type rawExecHandle struct {
executor executor.Executor
killTimeout time.Duration
maxKillTimeout time.Duration
allocDir *allocdir.AllocDir
logger *log.Logger
waitCh chan *dstructs.WaitResult
doneCh chan struct{}
@ -86,6 +84,10 @@ func (d *RawExecDriver) Abilities() DriverAbilities {
}
}
func (d *RawExecDriver) FSIsolation() cstructs.FSIsolation {
return cstructs.FSIsolationNone
}
func (d *RawExecDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) {
// Get the current status so that we can log any debug messages only if the
// state changes
@ -113,18 +115,16 @@ func (d *RawExecDriver) Prestart(ctx *ExecContext, task *structs.Task) error {
func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) {
var driverConfig ExecDriverConfig
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
d.logger.Printf("[WARN] driver.raw_exec: XXX error decoding config")
return nil, err
}
// Get the tasks local directory.
taskName := d.DriverContext.taskName
taskDir, ok := ctx.AllocDir.TaskDirs[taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
// Get the command to be ran
command := driverConfig.Command
if err := validateCommand(command, "args"); err != nil {
d.logger.Printf("[WARN] driver.raw_exec: XXX error validating command")
return nil, err
}
@ -132,21 +132,23 @@ func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandl
if err != nil {
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginLogFile := filepath.Join(ctx.TaskDir.Dir, fmt.Sprintf("%s-executor.out", taskName))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config)
if err != nil {
d.logger.Printf("[WARN] driver.raw_exec: XXX error creating executor")
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Driver: "raw_exec",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
Task: task,
TaskEnv: d.taskEnv,
Driver: "raw_exec",
AllocID: ctx.AllocID,
Task: task,
TaskDir: ctx.TaskDir.Dir,
LogDir: ctx.TaskDir.LogDir,
}
if err := exec.SetContext(executorCtx); err != nil {
pluginClient.Kill()
@ -160,6 +162,8 @@ func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandl
}
ps, err := exec.LaunchCmd(execCmd)
if err != nil {
d.logger.Printf("[WARN] driver.raw_exec: XXX error launching command: %v", err)
d.logger.Printf("[WARN] driver.raw_exec: XXX error launching command: cmd=%q --- args=%q --- user=%q", command, driverConfig.Args, task.User)
pluginClient.Kill()
return nil, err
}
@ -173,7 +177,6 @@ func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandl
userPid: ps.Pid,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
maxKillTimeout: maxKill,
allocDir: ctx.AllocDir,
version: d.config.Version,
logger: d.logger,
doneCh: make(chan struct{}),
@ -192,7 +195,6 @@ type rawExecId struct {
MaxKillTimeout time.Duration
UserPid int
PluginConfig *PluginReattachConfig
AllocDir *allocdir.AllocDir
}
func (d *RawExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
@ -224,7 +226,6 @@ func (d *RawExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, e
logger: d.logger,
killTimeout: id.KillTimeout,
maxKillTimeout: id.MaxKillTimeout,
allocDir: id.AllocDir,
version: id.Version,
doneCh: make(chan struct{}),
waitCh: make(chan *dstructs.WaitResult, 1),
@ -243,7 +244,6 @@ func (h *rawExecHandle) ID() string {
MaxKillTimeout: h.maxKillTimeout,
PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
UserPid: h.userPid,
AllocDir: h.allocDir,
}
data, err := json.Marshal(id)
@ -304,9 +304,6 @@ func (h *rawExecHandle) run() {
if e := killProcess(h.userPid); e != nil {
h.logger.Printf("[ERR] driver.raw_exec: error killing user process: %v", e)
}
if e := h.allocDir.UnmountAll(); e != nil {
h.logger.Printf("[ERR] driver.raw_exec: unmounting dev,proc and alloc dirs failed: %v", e)
}
}
// Remove services
if err := h.executor.DeregisterServices(); err != nil {

View File

@ -20,11 +20,12 @@ import (
func TestRawExecDriver_Fingerprint(t *testing.T) {
task := &structs.Task{
Name: "foo",
Driver: "raw_exec",
Resources: structs.DefaultResources(),
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewRawExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRawExecDriver(ctx.DriverCtx)
node := &structs.Node{
Attributes: make(map[string]string),
}
@ -59,7 +60,8 @@ func TestRawExecDriver_Fingerprint(t *testing.T) {
func TestRawExecDriver_StartOpen_Wait(t *testing.T) {
task := &structs.Task{
Name: "sleep",
Name: "sleep",
Driver: "raw_exec",
Config: map[string]interface{}{
"command": testtask.Path(),
"args": []string{"sleep", "1s"},
@ -71,14 +73,14 @@ func TestRawExecDriver_StartOpen_Wait(t *testing.T) {
Resources: basicResources,
}
testtask.SetTaskEnv(task)
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewRawExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRawExecDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -87,7 +89,7 @@ func TestRawExecDriver_StartOpen_Wait(t *testing.T) {
}
// Attempt to open
handle2, err := d.Open(execCtx, handle.ID())
handle2, err := d.Open(ctx.ExecCtx, handle.ID())
if err != nil {
t.Fatalf("err: %v", err)
}
@ -107,7 +109,8 @@ func TestRawExecDriver_StartOpen_Wait(t *testing.T) {
func TestRawExecDriver_Start_Wait(t *testing.T) {
task := &structs.Task{
Name: "sleep",
Name: "sleep",
Driver: "raw_exec",
Config: map[string]interface{}{
"command": testtask.Path(),
"args": []string{"sleep", "1s"},
@ -119,14 +122,14 @@ func TestRawExecDriver_Start_Wait(t *testing.T) {
Resources: basicResources,
}
testtask.SetTaskEnv(task)
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewRawExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRawExecDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -156,7 +159,8 @@ func TestRawExecDriver_Start_Wait_AllocDir(t *testing.T) {
file := "output.txt"
outPath := fmt.Sprintf(`${%s}/%s`, env.AllocDir, file)
task := &structs.Task{
Name: "sleep",
Name: "sleep",
Driver: "raw_exec",
Config: map[string]interface{}{
"command": testtask.Path(),
"args": []string{
@ -172,14 +176,14 @@ func TestRawExecDriver_Start_Wait_AllocDir(t *testing.T) {
}
testtask.SetTaskEnv(task)
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewRawExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRawExecDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -198,7 +202,7 @@ func TestRawExecDriver_Start_Wait_AllocDir(t *testing.T) {
}
// Check that data was written to the shared alloc directory.
outputFile := filepath.Join(execCtx.AllocDir.SharedDir, file)
outputFile := filepath.Join(ctx.AllocDir.SharedDir, file)
act, err := ioutil.ReadFile(outputFile)
if err != nil {
t.Fatalf("Couldn't read expected output: %v", err)
@ -211,7 +215,8 @@ func TestRawExecDriver_Start_Wait_AllocDir(t *testing.T) {
func TestRawExecDriver_Start_Kill_Wait(t *testing.T) {
task := &structs.Task{
Name: "sleep",
Name: "sleep",
Driver: "raw_exec",
Config: map[string]interface{}{
"command": testtask.Path(),
"args": []string{"sleep", "45s"},
@ -224,14 +229,14 @@ func TestRawExecDriver_Start_Kill_Wait(t *testing.T) {
}
testtask.SetTaskEnv(task)
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewRawExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRawExecDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -262,8 +267,9 @@ func TestRawExecDriver_Start_Kill_Wait(t *testing.T) {
func TestRawExecDriverUser(t *testing.T) {
task := &structs.Task{
Name: "sleep",
User: "alice",
Name: "sleep",
Driver: "raw_exec",
User: "alice",
Config: map[string]interface{}{
"command": testtask.Path(),
"args": []string{"sleep", "45s"},
@ -276,14 +282,14 @@ func TestRawExecDriverUser(t *testing.T) {
}
testtask.SetTaskEnv(task)
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewRawExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRawExecDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err == nil {
handle.Kill()
t.Fatalf("Should've failed")
@ -296,7 +302,8 @@ func TestRawExecDriverUser(t *testing.T) {
func TestRawExecDriver_Signal(t *testing.T) {
task := &structs.Task{
Name: "signal",
Name: "signal",
Driver: "raw_exec",
Config: map[string]interface{}{
"command": "/bin/bash",
"args": []string{"test.sh"},
@ -309,11 +316,11 @@ func TestRawExecDriver_Signal(t *testing.T) {
KillTimeout: 10 * time.Second,
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewExecDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRawExecDriver(ctx.DriverCtx)
testFile := filepath.Join(execCtx.AllocDir.TaskDirs["signal"], "test.sh")
testFile := filepath.Join(ctx.ExecCtx.TaskDir.Dir, "test.sh")
testData := []byte(`
at_term() {
echo 'Terminated.'
@ -328,10 +335,10 @@ done
fmt.Errorf("Failed to write data")
}
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("prestart err: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -358,7 +365,7 @@ done
}
// Check the log file to see it exited because of the signal
outputFile := filepath.Join(execCtx.AllocDir.LogDir(), "signal.stdout.0")
outputFile := filepath.Join(ctx.ExecCtx.TaskDir.LogDir, "signal.stdout.0")
act, err := ioutil.ReadFile(outputFile)
if err != nil {
t.Fatalf("Couldn't read expected output: %v", err)

View File

@ -80,7 +80,6 @@ type rktHandle struct {
pluginClient *plugin.Client
executorPid int
executor executor.Executor
allocDir *allocdir.AllocDir
logger *log.Logger
killTimeout time.Duration
maxKillTimeout time.Duration
@ -92,7 +91,6 @@ type rktHandle struct {
// disk
type rktPID struct {
PluginConfig *PluginReattachConfig
AllocDir *allocdir.AllocDir
ExecutorPid int
KillTimeout time.Duration
MaxKillTimeout time.Duration
@ -103,6 +101,10 @@ func NewRktDriver(ctx *DriverContext) Driver {
return &RktDriver{DriverContext: *ctx}
}
func (d *RktDriver) FSIsolation() cstructs.FSIsolation {
return cstructs.FSIsolationImage
}
// Validate is used to validate the driver configuration
func (d *RktDriver) Validate(config map[string]interface{}) error {
fd := &fields.FieldData{
@ -207,9 +209,6 @@ func (d *RktDriver) Periodic() (bool, time.Duration) {
}
func (d *RktDriver) Prestart(ctx *ExecContext, task *structs.Task) error {
d.taskEnv.SetAllocDir(allocdir.SharedAllocContainerPath)
d.taskEnv.SetTaskLocalDir(allocdir.TaskLocalContainerPath)
d.taskEnv.SetSecretsDir(allocdir.TaskSecretsContainerPath)
return nil
}
@ -225,13 +224,6 @@ func (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, e
// ACI image
img := driverConfig.ImageName
// Get the tasks local directory.
taskName := d.DriverContext.taskName
taskDir, ok := ctx.AllocDir.TaskDirs[taskName]
if !ok {
return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName)
}
// Build the command.
var cmdArgs []string
@ -259,17 +251,17 @@ func (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, e
// Mount /alloc
allocVolName := fmt.Sprintf("%s-%s-alloc", ctx.AllocID, task.Name)
cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", allocVolName, ctx.AllocDir.SharedDir))
cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", allocVolName, ctx.TaskDir.SharedAllocDir))
cmdArgs = append(cmdArgs, fmt.Sprintf("--mount=volume=%s,target=%s", allocVolName, allocdir.SharedAllocContainerPath))
// Mount /local
localVolName := fmt.Sprintf("%s-%s-local", ctx.AllocID, task.Name)
cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", localVolName, filepath.Join(taskDir, allocdir.TaskLocal)))
cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", localVolName, ctx.TaskDir.LocalDir))
cmdArgs = append(cmdArgs, fmt.Sprintf("--mount=volume=%s,target=%s", localVolName, allocdir.TaskLocalContainerPath))
// Mount /secrets
secretsVolName := fmt.Sprintf("%s-%s-secrets", ctx.AllocID, task.Name)
cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", secretsVolName, filepath.Join(taskDir, allocdir.TaskSecrets)))
cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", secretsVolName, ctx.TaskDir.SecretsDir))
cmdArgs = append(cmdArgs, fmt.Sprintf("--mount=volume=%s,target=%s", secretsVolName, allocdir.TaskSecretsContainerPath))
// Mount arbitrary volumes if enabled
@ -297,7 +289,7 @@ func (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, e
// Inject environment variables
for k, v := range d.taskEnv.EnvMap() {
cmdArgs = append(cmdArgs, fmt.Sprintf("--set-env=%v=%v", k, v))
cmdArgs = append(cmdArgs, fmt.Sprintf("--set-env=%v=%q", k, v))
}
// Check if the user has overridden the exec command.
@ -407,7 +399,7 @@ func (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, e
return nil, fmt.Errorf("unable to find the nomad binary: %v", err)
}
pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name))
pluginLogFile := filepath.Join(ctx.TaskDir.Dir, fmt.Sprintf("%s-executor.out", task.Name))
pluginConfig := &plugin.ClientConfig{
Cmd: exec.Command(bin, "executor", pluginLogFile),
}
@ -417,11 +409,12 @@ func (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, e
return nil, err
}
executorCtx := &executor.ExecutorContext{
TaskEnv: d.taskEnv,
Driver: "rkt",
AllocDir: ctx.AllocDir,
AllocID: ctx.AllocID,
Task: task,
TaskEnv: d.taskEnv,
Driver: "rkt",
AllocID: ctx.AllocID,
Task: task,
TaskDir: ctx.TaskDir.Dir,
LogDir: ctx.TaskDir.LogDir,
}
if err := execIntf.SetContext(executorCtx); err != nil {
pluginClient.Kill()
@ -450,7 +443,6 @@ func (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, e
pluginClient: pluginClient,
executor: execIntf,
executorPid: ps.Pid,
allocDir: ctx.AllocDir,
logger: d.logger,
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
maxKillTimeout: maxKill,
@ -490,7 +482,6 @@ func (d *RktDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error
h := &rktHandle{
pluginClient: pluginClient,
executorPid: id.ExecutorPid,
allocDir: id.AllocDir,
executor: exec,
logger: d.logger,
killTimeout: id.KillTimeout,
@ -512,7 +503,6 @@ func (h *rktHandle) ID() string {
KillTimeout: h.killTimeout,
MaxKillTimeout: h.maxKillTimeout,
ExecutorPid: h.executorPid,
AllocDir: h.allocDir,
}
data, err := json.Marshal(pid)
if err != nil {
@ -561,9 +551,6 @@ func (h *rktHandle) run() {
if e := killProcess(h.executorPid); e != nil {
h.logger.Printf("[ERROR] driver.rkt: error killing user process: %v", e)
}
if e := h.allocDir.UnmountAll(); e != nil {
h.logger.Printf("[ERROR] driver.rkt: unmounting dev,proc and alloc dirs failed: %v", e)
}
}
// Remove services
if err := h.executor.DeregisterServices(); err != nil {

View File

@ -44,8 +44,8 @@ func TestRktDriver_Fingerprint(t *testing.T) {
}
ctestutils.RktCompatible(t)
driverCtx, _ := testDriverContexts(&structs.Task{Name: "foo"})
d := NewRktDriver(driverCtx)
ctx := testDriverContexts(t, &structs.Task{Name: "foo", Driver: "rkt"})
d := NewRktDriver(ctx.DriverCtx)
node := &structs.Node{
Attributes: make(map[string]string),
}
@ -75,7 +75,8 @@ func TestRktDriver_Start_DNS(t *testing.T) {
ctestutils.RktCompatible(t)
// TODO: use test server to load from a fixture
task := &structs.Task{
Name: "etcd",
Name: "etcd",
Driver: "rkt",
Config: map[string]interface{}{
"trust_prefix": "coreos.com/etcd",
"image": "coreos.com/etcd:v2.0.4",
@ -93,15 +94,14 @@ func TestRktDriver_Start_DNS(t *testing.T) {
},
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
d := NewRktDriver(driverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -111,7 +111,7 @@ func TestRktDriver_Start_DNS(t *testing.T) {
defer handle.Kill()
// Attempt to open
handle2, err := d.Open(execCtx, handle.ID())
handle2, err := d.Open(ctx.ExecCtx, handle.ID())
if err != nil {
t.Fatalf("err: %v", err)
}
@ -127,7 +127,8 @@ func TestRktDriver_Start_Wait(t *testing.T) {
ctestutils.RktCompatible(t)
task := &structs.Task{
Name: "etcd",
Name: "etcd",
Driver: "rkt",
Config: map[string]interface{}{
"trust_prefix": "coreos.com/etcd",
"image": "coreos.com/etcd:v2.0.4",
@ -144,14 +145,14 @@ func TestRktDriver_Start_Wait(t *testing.T) {
},
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewRktDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -188,7 +189,8 @@ func TestRktDriver_Start_Wait_Skip_Trust(t *testing.T) {
ctestutils.RktCompatible(t)
task := &structs.Task{
Name: "etcd",
Name: "etcd",
Driver: "rkt",
Config: map[string]interface{}{
"image": "coreos.com/etcd:v2.0.4",
"command": "/etcd",
@ -204,14 +206,14 @@ func TestRktDriver_Start_Wait_Skip_Trust(t *testing.T) {
},
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewRktDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -253,7 +255,8 @@ func TestRktDriver_Start_Wait_AllocDir(t *testing.T) {
hostpath := filepath.Join(tmpvol, file)
task := &structs.Task{
Name: "alpine",
Name: "alpine",
Driver: "rkt",
Config: map[string]interface{}{
"image": "docker://alpine",
"command": "/bin/sh",
@ -274,14 +277,14 @@ func TestRktDriver_Start_Wait_AllocDir(t *testing.T) {
},
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewRktDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -317,8 +320,9 @@ func TestRktDriverUser(t *testing.T) {
ctestutils.RktCompatible(t)
task := &structs.Task{
Name: "etcd",
User: "alice",
Name: "etcd",
Driver: "rkt",
User: "alice",
Config: map[string]interface{}{
"trust_prefix": "coreos.com/etcd",
"image": "coreos.com/etcd:v2.0.4",
@ -335,14 +339,14 @@ func TestRktDriverUser(t *testing.T) {
},
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
d := NewRktDriver(driverCtx)
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err == nil {
handle.Kill()
t.Fatalf("Should've failed")
@ -359,7 +363,8 @@ func TestRktTrustPrefix(t *testing.T) {
}
ctestutils.RktCompatible(t)
task := &structs.Task{
Name: "etcd",
Name: "etcd",
Driver: "rkt",
Config: map[string]interface{}{
"trust_prefix": "example.com/invalid",
"image": "coreos.com/etcd:v2.0.4",
@ -375,15 +380,14 @@ func TestRktTrustPrefix(t *testing.T) {
CPU: 100,
},
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
d := NewRktDriver(driverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err == nil {
handle.Kill()
t.Fatalf("Should've failed")
@ -397,7 +401,8 @@ func TestRktTrustPrefix(t *testing.T) {
func TestRktTaskValidate(t *testing.T) {
ctestutils.RktCompatible(t)
task := &structs.Task{
Name: "etcd",
Name: "etcd",
Driver: "rkt",
Config: map[string]interface{}{
"trust_prefix": "coreos.com/etcd",
"image": "coreos.com/etcd:v2.0.4",
@ -408,10 +413,10 @@ func TestRktTaskValidate(t *testing.T) {
},
Resources: basicResources,
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
d := NewRktDriver(driverCtx)
if err := d.Validate(task.Config); err != nil {
t.Fatalf("Validation error in TaskConfig : '%v'", err)
}
@ -425,7 +430,8 @@ func TestRktDriver_PortsMapping(t *testing.T) {
ctestutils.RktCompatible(t)
task := &structs.Task{
Name: "etcd",
Name: "etcd",
Driver: "rkt",
Config: map[string]interface{}{
"image": "docker://redis:latest",
"args": []string{"--version"},
@ -452,15 +458,14 @@ func TestRktDriver_PortsMapping(t *testing.T) {
},
}
driverCtx, execCtx := testDriverContexts(task)
defer execCtx.AllocDir.Destroy()
ctx := testDriverContexts(t, task)
defer ctx.AllocDir.Destroy()
d := NewRktDriver(ctx.DriverCtx)
d := NewRktDriver(driverCtx)
if err := d.Prestart(execCtx, task); err != nil {
if err := d.Prestart(ctx.ExecCtx, task); err != nil {
t.Fatalf("error in prestart: %v", err)
}
handle, err := d.Start(execCtx, task)
handle, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}

View File

@ -95,3 +95,32 @@ func joinStringSet(s1, s2 []string) []string {
return j
}
// FSIsolation is an enumeration to describe what kind of filesystem isolation
// a driver supports.
type FSIsolation int
const (
// FSIsolationNone means no isolation. The host filesystem is used.
FSIsolationNone FSIsolation = 0
// FSIsolationChroot means the driver will use a chroot on the host
// filesystem.
FSIsolationChroot FSIsolation = 1
// FSIsolationImage means the driver uses an image.
FSIsolationImage FSIsolation = 2
)
func (f FSIsolation) String() string {
switch f {
case 0:
return "none"
case 1:
return "chroot"
case 2:
return "image"
default:
return "INVALID"
}
}

View File

@ -59,7 +59,6 @@ type TaskRunner struct {
config *config.Config
updater TaskStateUpdater
logger *log.Logger
ctx *driver.ExecContext
alloc *structs.Allocation
restartTracker *RestartTracker
@ -71,7 +70,7 @@ type TaskRunner struct {
resourceUsageLock sync.RWMutex
task *structs.Task
taskDir string
taskDir *allocdir.TaskDir
// taskEnv is the environment variables of the task
taskEnv *env.TaskEnvironment
@ -85,8 +84,15 @@ type TaskRunner struct {
// artifactsDownloaded tracks whether the tasks artifacts have been
// downloaded
//
// Must acquire persistLock when accessing
artifactsDownloaded bool
// taskDirBuilt tracks whether the task has built its directory.
//
// Must acquire persistLock when accessing
taskDirBuilt bool
// payloadRendered tracks whether the payload has been rendered to disk
payloadRendered bool
@ -134,6 +140,7 @@ type taskRunnerState struct {
Task *structs.Task
HandleID string
ArtifactDownloaded bool
TaskDirBuilt bool
PayloadRendered bool
}
@ -154,7 +161,7 @@ type SignalEvent struct {
// NewTaskRunner is used to create a new task context
func NewTaskRunner(logger *log.Logger, config *config.Config,
updater TaskStateUpdater, ctx *driver.ExecContext,
updater TaskStateUpdater, taskDir *allocdir.TaskDir,
alloc *structs.Allocation, task *structs.Task,
vaultClient vaultclient.VaultClient) *TaskRunner {
@ -169,19 +176,11 @@ func NewTaskRunner(logger *log.Logger, config *config.Config,
}
restartTracker := newRestartTracker(tg.RestartPolicy, alloc.Job.Type)
// Get the task directory
taskDir, ok := ctx.AllocDir.TaskDirs[task.Name]
if !ok {
logger.Printf("[ERR] client: task directory for alloc %q task %q couldn't be found", alloc.ID, task.Name)
return nil
}
tc := &TaskRunner{
config: config,
updater: updater,
logger: logger,
restartTracker: restartTracker,
ctx: ctx,
alloc: alloc,
task: task,
taskDir: taskDir,
@ -232,11 +231,12 @@ func (r *TaskRunner) RestoreState() error {
// Restore fields
if snap.Task == nil {
return fmt.Errorf("task runner snapshot include nil Task")
return fmt.Errorf("task runner snapshot includes nil Task")
} else {
r.task = snap.Task
}
r.artifactsDownloaded = snap.ArtifactDownloaded
r.taskDirBuilt = snap.TaskDirBuilt
r.payloadRendered = snap.PayloadRendered
if err := r.setTaskEnv(); err != nil {
@ -245,13 +245,8 @@ func (r *TaskRunner) RestoreState() error {
}
if r.task.Vault != nil {
secretDir, err := r.ctx.AllocDir.GetSecretDir(r.task.Name)
if err != nil {
return fmt.Errorf("failed to determine task %s secret dir in alloc %q: %v", r.task.Name, r.alloc.ID, err)
}
// Read the token from the secret directory
tokenPath := filepath.Join(secretDir, vaultTokenFile)
tokenPath := filepath.Join(r.taskDir.SecretsDir, vaultTokenFile)
data, err := ioutil.ReadFile(tokenPath)
if err != nil {
if !os.IsNotExist(err) {
@ -267,12 +262,13 @@ func (r *TaskRunner) RestoreState() error {
// Restore the driver
if snap.HandleID != "" {
driver, err := r.createDriver()
d, err := r.createDriver()
if err != nil {
return err
}
handle, err := driver.Open(r.ctx, snap.HandleID)
ctx := driver.NewExecContext(r.taskDir, r.alloc.ID)
handle, err := d.Open(ctx, snap.HandleID)
// In the case it fails, we relaunch the task in the Run() method.
if err != nil {
@ -300,8 +296,10 @@ func (r *TaskRunner) SaveState() error {
Task: r.task,
Version: r.config.Version,
ArtifactDownloaded: r.artifactsDownloaded,
TaskDirBuilt: r.taskDirBuilt,
PayloadRendered: r.payloadRendered,
}
r.handleLock.Lock()
if r.handle != nil {
snap.HandleID = r.handle.ID()
@ -312,6 +310,9 @@ func (r *TaskRunner) SaveState() error {
// DestroyState is used to cleanup after ourselves
func (r *TaskRunner) DestroyState() error {
r.persistLock.Lock()
defer r.persistLock.Unlock()
return os.RemoveAll(r.stateFilePath())
}
@ -332,7 +333,7 @@ func (r *TaskRunner) setTaskEnv() error {
r.taskEnvLock.Lock()
defer r.taskEnvLock.Unlock()
taskEnv, err := driver.GetTaskEnv(r.ctx.AllocDir, r.config.Node,
taskEnv, err := driver.GetTaskEnv(r.taskDir, r.config.Node,
r.task.Copy(), r.alloc, r.config, r.vaultFuture.Get())
if err != nil {
return err
@ -653,14 +654,7 @@ func (r *TaskRunner) deriveVaultToken() (token string, exit bool) {
// writeToken writes the given token to disk
func (r *TaskRunner) writeToken(token string) error {
// Write the token to disk
secretDir, err := r.ctx.AllocDir.GetSecretDir(r.task.Name)
if err != nil {
return fmt.Errorf("failed to determine task %s secret dir in alloc %q: %v", r.task.Name, r.alloc.ID, err)
}
// Write the token to the file system
tokenPath := filepath.Join(secretDir, vaultTokenFile)
tokenPath := filepath.Join(r.taskDir.SecretsDir, vaultTokenFile)
if err := ioutil.WriteFile(tokenPath, []byte(token), 0777); err != nil {
return fmt.Errorf("failed to save Vault tokens to secret dir for task %q in alloc %q: %v", r.task.Name, r.alloc.ID, err)
}
@ -686,7 +680,7 @@ func (r *TaskRunner) updatedTokenHandler() {
// Create a new templateManager
var err error
r.templateManager, err = NewTaskTemplateManager(r, r.task.Templates,
r.config, r.vaultFuture.Get(), r.taskDir, r.getTaskEnv())
r.config, r.vaultFuture.Get(), r.taskDir.Dir, r.getTaskEnv())
if err != nil {
err := fmt.Errorf("failed to build task's template manager: %v", err)
r.setState(structs.TaskStateDead, structs.NewTaskEvent(structs.TaskSetupFailure).SetSetupError(err).SetFailsTask())
@ -725,7 +719,7 @@ func (r *TaskRunner) prestart(resultCh chan bool) {
requirePayload := len(r.alloc.Job.Payload) != 0 &&
(r.task.DispatchInput != nil && r.task.DispatchInput.File != "")
if !r.payloadRendered && requirePayload {
renderTo := filepath.Join(r.taskDir, allocdir.TaskLocal, r.task.DispatchInput.File)
renderTo := filepath.Join(r.taskDir.LocalDir, r.task.DispatchInput.File)
decoded, err := snappy.Decode(nil, r.alloc.Job.Payload)
if err != nil {
r.setState(
@ -747,11 +741,15 @@ func (r *TaskRunner) prestart(resultCh chan bool) {
}
for {
r.persistLock.Lock()
downloaded := r.artifactsDownloaded
r.persistLock.Unlock()
// Download the task's artifacts
if !r.artifactsDownloaded && len(r.task.Artifacts) > 0 {
if !downloaded && len(r.task.Artifacts) > 0 {
r.setState(structs.TaskStatePending, structs.NewTaskEvent(structs.TaskDownloadingArtifacts))
for _, artifact := range r.task.Artifacts {
if err := getter.GetArtifact(r.getTaskEnv(), artifact, r.taskDir); err != nil {
if err := getter.GetArtifact(r.getTaskEnv(), artifact, r.taskDir.Dir); err != nil {
wrapped := fmt.Errorf("failed to download artifact %q: %v", artifact.GetterSource, err)
r.setState(structs.TaskStatePending,
structs.NewTaskEvent(structs.TaskArtifactDownloadFailed).SetDownloadError(wrapped))
@ -760,7 +758,9 @@ func (r *TaskRunner) prestart(resultCh chan bool) {
}
}
r.persistLock.Lock()
r.artifactsDownloaded = true
r.persistLock.Unlock()
}
// We don't have to wait for any template
@ -779,7 +779,7 @@ func (r *TaskRunner) prestart(resultCh chan bool) {
if r.templateManager == nil {
var err error
r.templateManager, err = NewTaskTemplateManager(r, r.task.Templates,
r.config, r.vaultFuture.Get(), r.taskDir, r.getTaskEnv())
r.config, r.vaultFuture.Get(), r.taskDir.Dir, r.getTaskEnv())
if err != nil {
err := fmt.Errorf("failed to build task's template manager: %v", err)
r.setState(structs.TaskStateDead, structs.NewTaskEvent(structs.TaskSetupFailure).SetSetupError(err).SetFailsTask())
@ -1056,17 +1056,23 @@ func (r *TaskRunner) killTask(killingEvent *structs.TaskEvent) {
r.setState("", structs.NewTaskEvent(structs.TaskKilled).SetKillError(err))
}
// startTask creates the driver and starts the task.
// startTask creates the driver, task dir, and starts the task.
func (r *TaskRunner) startTask() error {
// Create a driver
driver, err := r.createDriver()
drv, err := r.createDriver()
if err != nil {
return fmt.Errorf("failed to create driver of task %q for alloc %q: %v",
r.task.Name, r.alloc.ID, err)
}
// Build base task directory structure regardless of FS isolation abilities
if err := r.buildTaskDir(drv.FSIsolation()); err != nil {
return fmt.Errorf("failed to build task directory for %q: %v", r.task.Name, err)
}
// Run prestart
if err := driver.Prestart(r.ctx, r.task); err != nil {
ctx := driver.NewExecContext(r.taskDir, r.alloc.ID)
if err := drv.Prestart(ctx, r.task); err != nil {
wrapped := fmt.Errorf("failed to initialize task %q for alloc %q: %v",
r.task.Name, r.alloc.ID, err)
@ -1080,7 +1086,7 @@ func (r *TaskRunner) startTask() error {
}
// Start the job
handle, err := driver.Start(r.ctx, r.task)
handle, err := drv.Start(ctx, r.task)
if err != nil {
wrapped := fmt.Errorf("failed to start task %q for alloc %q: %v",
r.task.Name, r.alloc.ID, err)
@ -1101,6 +1107,32 @@ func (r *TaskRunner) startTask() error {
return nil
}
// buildTaskDir creates the task directory before driver.Prestart. It is safe
// to call multiple times as its state is persisted.
func (r *TaskRunner) buildTaskDir(fsi cstructs.FSIsolation) error {
r.persistLock.Lock()
if r.taskDirBuilt {
// Already built! Nothing to do.
r.persistLock.Unlock()
return nil
}
r.persistLock.Unlock()
chroot := config.DefaultChrootEnv
if len(r.config.ChrootEnv) > 0 {
chroot = r.config.ChrootEnv
}
if err := r.taskDir.Build(chroot, fsi); err != nil {
return err
}
// Mark task dir as successfully built
r.persistLock.Lock()
r.taskDirBuilt = true
r.persistLock.Unlock()
return nil
}
// collectResourceUsageStats starts collecting resource usage stats of a Task.
// Collection ends when the passed channel is closed
func (r *TaskRunner) collectResourceUsageStats(stopCollection <-chan struct{}) {

File diff suppressed because it is too large Load Diff