open-nomad/client/driver/executor/executor_linux.go

241 lines
7 KiB
Go
Raw Normal View History

2016-02-05 00:03:17 +00:00
package executor
2016-02-02 21:38:38 +00:00
import (
"fmt"
"os"
"os/user"
"path/filepath"
"strconv"
"syscall"
"github.com/hashicorp/go-multierror"
2016-02-03 19:41:49 +00:00
"github.com/opencontainers/runc/libcontainer/cgroups"
cgroupFs "github.com/opencontainers/runc/libcontainer/cgroups/fs"
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
cgroupConfig "github.com/opencontainers/runc/libcontainer/configs"
"github.com/hashicorp/nomad/client/allocdir"
2016-02-03 19:41:49 +00:00
"github.com/hashicorp/nomad/nomad/structs"
)
var (
// A mapping of directories on the host OS to attempt to embed inside each
// task's chroot.
chrootEnv = map[string]string{
"/bin": "/bin",
"/etc": "/etc",
"/lib": "/lib",
"/lib32": "/lib32",
"/lib64": "/lib64",
"/sbin": "/sbin",
"/usr/bin": "/usr/bin",
"/usr/sbin": "/usr/sbin",
"/usr/lib": "/usr/lib",
"/usr/share": "/usr/share",
"/run/resolvconf": "/run/resolvconf",
}
2016-02-02 21:38:38 +00:00
)
2016-02-05 00:18:10 +00:00
// configureIsolation configures chroot and creates cgroups
2016-02-04 00:03:43 +00:00
func (e *UniversalExecutor) configureIsolation() error {
2016-02-04 00:09:17 +00:00
if e.ctx.FSIsolation {
2016-02-04 00:03:43 +00:00
if err := e.configureChroot(); err != nil {
return err
}
}
2016-02-04 00:09:17 +00:00
if e.ctx.ResourceLimits {
2016-02-04 18:09:52 +00:00
if err := e.configureCgroups(e.ctx.TaskResources); err != nil {
2016-02-04 00:03:43 +00:00
return fmt.Errorf("error creating cgroups: %v", err)
}
if err := e.applyLimits(os.Getpid()); err != nil {
if er := DestroyCgroup(e.groups); er != nil {
e.logger.Printf("[ERR] executor: error destroying cgroup: %v", er)
}
if er := e.removeChrootMounts(); er != nil {
e.logger.Printf("[ERR] executor: error removing chroot: %v", er)
}
return fmt.Errorf("error entering the plugin process in the cgroup: %v:", err)
}
2016-02-03 19:41:49 +00:00
}
2016-02-04 00:03:43 +00:00
return nil
}
2016-02-03 19:41:49 +00:00
2016-02-05 00:18:10 +00:00
// applyLimits puts a process in a pre-configured cgroup
func (e *UniversalExecutor) applyLimits(pid int) error {
2016-02-04 00:09:17 +00:00
if !e.ctx.ResourceLimits {
2016-02-04 00:03:43 +00:00
return nil
}
// Entering the process in the cgroup
manager := getCgroupManager(e.groups)
if err := manager.Apply(pid); err != nil {
e.logger.Printf("[ERR] executor: unable to join cgroup: %v", err)
2016-02-03 19:41:49 +00:00
if err := e.Exit(); err != nil {
e.logger.Printf("[ERR] executor: unable to kill process: %v", err)
2016-02-03 19:41:49 +00:00
}
2016-02-04 00:03:43 +00:00
return err
2016-02-03 19:41:49 +00:00
}
2016-02-04 00:03:43 +00:00
return nil
}
2016-02-04 00:03:43 +00:00
// configureCgroups converts a Nomad Resources specification into the equivalent
// cgroup configuration. It returns an error if the resources are invalid.
func (e *UniversalExecutor) configureCgroups(resources *structs.Resources) error {
e.groups = &cgroupConfig.Cgroup{}
e.groups.Resources = &cgroupConfig.Resources{}
2016-03-02 00:53:56 +00:00
cgroupName := structs.GenerateUUID()
cgPath, err := cgroups.GetThisCgroupDir("devices")
if err != nil {
return fmt.Errorf("unable to get mount point for devices sub-system: %v", err)
}
e.groups.Path = filepath.Join(cgPath, cgroupName)
2016-02-04 00:03:43 +00:00
// TODO: verify this is needed for things like network access
e.groups.Resources.AllowAllDevices = true
2016-02-04 00:03:43 +00:00
if resources.MemoryMB > 0 {
// Total amount of memory allowed to consume
e.groups.Resources.Memory = int64(resources.MemoryMB * 1024 * 1024)
// Disable swap to avoid issues on the machine
e.groups.Resources.MemorySwap = int64(-1)
}
2016-02-04 00:03:43 +00:00
if resources.CPU < 2 {
return fmt.Errorf("resources.CPU must be equal to or greater than 2: %v", resources.CPU)
}
2016-02-04 00:03:43 +00:00
// Set the relative CPU shares for this cgroup.
e.groups.Resources.CpuShares = int64(resources.CPU)
2016-02-04 00:03:43 +00:00
if resources.IOPS != 0 {
// Validate it is in an acceptable range.
if resources.IOPS < 10 || resources.IOPS > 1000 {
return fmt.Errorf("resources.IOPS must be between 10 and 1000: %d", resources.IOPS)
}
2016-02-04 00:03:43 +00:00
e.groups.Resources.BlkioWeight = uint16(resources.IOPS)
}
return nil
}
// runAs takes a user id as a string and looks up the user, and sets the command
// to execute as that user.
2016-02-04 00:03:43 +00:00
func (e *UniversalExecutor) runAs(userid string) error {
u, err := user.Lookup(userid)
if err != nil {
return fmt.Errorf("Failed to identify user %v: %v", userid, err)
}
// Convert the uid and gid
uid, err := strconv.ParseUint(u.Uid, 10, 32)
if err != nil {
return fmt.Errorf("Unable to convert userid to uint32: %s", err)
}
gid, err := strconv.ParseUint(u.Gid, 10, 32)
if err != nil {
return fmt.Errorf("Unable to convert groupid to uint32: %s", err)
}
// Set the command to run as that user and group.
if e.cmd.SysProcAttr == nil {
e.cmd.SysProcAttr = &syscall.SysProcAttr{}
}
if e.cmd.SysProcAttr.Credential == nil {
e.cmd.SysProcAttr.Credential = &syscall.Credential{}
}
e.cmd.SysProcAttr.Credential.Uid = uint32(uid)
e.cmd.SysProcAttr.Credential.Gid = uint32(gid)
return nil
}
2016-02-05 00:18:10 +00:00
// configureChroot configures a chroot
2016-02-04 00:03:43 +00:00
func (e *UniversalExecutor) configureChroot() error {
allocDir := e.ctx.AllocDir
2016-02-04 18:09:52 +00:00
if err := allocDir.MountSharedDir(e.ctx.TaskName); err != nil {
2016-02-04 00:03:43 +00:00
return err
}
2016-02-04 18:09:52 +00:00
if err := allocDir.Embed(e.ctx.TaskName, chrootEnv); err != nil {
2016-02-04 00:03:43 +00:00
return err
}
// Set the tasks AllocDir environment variable.
e.ctx.TaskEnv.SetAllocDir(filepath.Join("/", allocdir.SharedAllocName)).SetTaskLocalDir(filepath.Join("/", allocdir.TaskLocal)).Build()
if e.cmd.SysProcAttr == nil {
e.cmd.SysProcAttr = &syscall.SysProcAttr{}
}
e.cmd.SysProcAttr.Chroot = e.taskDir
e.cmd.Dir = "/"
2016-02-04 00:03:43 +00:00
if err := allocDir.MountSpecialDirs(e.taskDir); err != nil {
return err
}
2016-02-04 00:03:43 +00:00
return nil
}
// cleanTaskDir is an idempotent operation to clean the task directory and
// should be called when tearing down the task.
2016-02-04 00:03:43 +00:00
func (e *UniversalExecutor) removeChrootMounts() error {
// Prevent a race between Wait/ForceStop
e.lock.Lock()
defer e.lock.Unlock()
return e.ctx.AllocDir.UnmountAll()
2016-02-02 21:38:38 +00:00
}
2016-02-03 19:41:49 +00:00
// destroyCgroup kills all processes in the cgroup and removes the cgroup
// configuration from the host.
func DestroyCgroup(groups *cgroupConfig.Cgroup) error {
merrs := new(multierror.Error)
if groups == nil {
2016-02-03 19:41:49 +00:00
return fmt.Errorf("Can't destroy: cgroup configuration empty")
}
manager := getCgroupManager(groups)
if pids, perr := manager.GetPids(); perr == nil {
for _, pid := range pids {
2016-03-02 19:44:40 +00:00
// If the pid is the pid of the executor then we don't kill it, the
// executor is going to be killed by the driver once the Wait
// returns
if pid == os.Getpid() {
continue
}
proc, err := os.FindProcess(pid)
if err != nil {
merrs.Errors = append(merrs.Errors, fmt.Errorf("error finding process %v: %v", pid, err))
} else {
if e := proc.Kill(); e != nil {
merrs.Errors = append(merrs.Errors, fmt.Errorf("error killing process %v: %v", pid, e))
}
}
2016-02-03 19:41:49 +00:00
}
} else {
merrs.Errors = append(merrs.Errors, fmt.Errorf("error getting pids: %v", perr))
2016-02-03 19:41:49 +00:00
}
// Remove the cgroup.
if err := manager.Destroy(); err != nil {
multierror.Append(merrs, fmt.Errorf("Failed to delete the cgroup directories: %v", err))
2016-02-03 19:41:49 +00:00
}
if len(merrs.Errors) != 0 {
return fmt.Errorf("errors while destroying cgroup: %v", merrs)
2016-02-03 19:41:49 +00:00
}
return nil
}
// getCgroupManager returns the correct libcontainer cgroup manager.
func getCgroupManager(groups *cgroupConfig.Cgroup) cgroups.Manager {
2016-02-03 19:41:49 +00:00
var manager cgroups.Manager
manager = &cgroupFs.Manager{Cgroups: groups}
if systemd.UseSystemd() {
manager = &systemd.Manager{Cgroups: groups}
}
return manager
}