Use cgroup when possible
This commit is contained in:
parent
7affe3bd40
commit
ffd9270f2f
|
@ -118,6 +118,11 @@ type ExecCommand struct {
|
||||||
// ResourceLimits determines whether resource limits are enforced by the
|
// ResourceLimits determines whether resource limits are enforced by the
|
||||||
// executor.
|
// executor.
|
||||||
ResourceLimits bool
|
ResourceLimits bool
|
||||||
|
|
||||||
|
// Cgroup marks whether we put the process in a cgroup. Setting this field
|
||||||
|
// doesn't enforce resource limits. To enforce limits, set ResoruceLimits.
|
||||||
|
// Using the cgroup does allow more precise cleanup of processes.
|
||||||
|
Cgroup bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessState holds information about the state of a user process.
|
// ProcessState holds information about the state of a user process.
|
||||||
|
@ -497,7 +502,7 @@ func (e *UniversalExecutor) Exit() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prefer killing the process via the resource container.
|
// Prefer killing the process via the resource container.
|
||||||
if e.cmd.Process != nil && !e.command.ResourceLimits {
|
if e.cmd.Process != nil && !(e.command.ResourceLimits || e.command.Cgroup) {
|
||||||
proc, err := os.FindProcess(e.cmd.Process.Pid)
|
proc, err := os.FindProcess(e.cmd.Process.Pid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.logger.Printf("[ERR] executor: can't find process with pid: %v, err: %v",
|
e.logger.Printf("[ERR] executor: can't find process with pid: %v, err: %v",
|
||||||
|
@ -508,7 +513,7 @@ func (e *UniversalExecutor) Exit() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.command.ResourceLimits {
|
if e.command.ResourceLimits || e.command.Cgroup {
|
||||||
if err := e.resConCtx.executorCleanup(); err != nil {
|
if err := e.resConCtx.executorCleanup(); err != nil {
|
||||||
merr.Errors = append(merr.Errors, err)
|
merr.Errors = append(merr.Errors, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ func (e *UniversalExecutor) configureIsolation() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.command.ResourceLimits {
|
if e.command.ResourceLimits || e.command.Cgroup {
|
||||||
if err := e.configureCgroups(e.ctx.Task.Resources); err != nil {
|
if err := e.configureCgroups(e.ctx.Task.Resources); err != nil {
|
||||||
return fmt.Errorf("error creating cgroups: %v", err)
|
return fmt.Errorf("error creating cgroups: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,7 @@ func (e *UniversalExecutor) configureIsolation() error {
|
||||||
|
|
||||||
// applyLimits puts a process in a pre-configured cgroup
|
// applyLimits puts a process in a pre-configured cgroup
|
||||||
func (e *UniversalExecutor) applyLimits(pid int) error {
|
func (e *UniversalExecutor) applyLimits(pid int) error {
|
||||||
if !e.command.ResourceLimits {
|
if !(e.command.ResourceLimits || e.command.Cgroup) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,9 +76,14 @@ func (e *UniversalExecutor) configureCgroups(resources *structs.Resources) error
|
||||||
cgroupName := uuid.Generate()
|
cgroupName := uuid.Generate()
|
||||||
e.resConCtx.groups.Path = filepath.Join("/nomad", cgroupName)
|
e.resConCtx.groups.Path = filepath.Join("/nomad", cgroupName)
|
||||||
|
|
||||||
// TODO: verify this is needed for things like network access
|
// Allow access to /dev/
|
||||||
e.resConCtx.groups.Resources.AllowAllDevices = true
|
e.resConCtx.groups.Resources.AllowAllDevices = true
|
||||||
|
|
||||||
|
// Use a cgroup but don't apply limits
|
||||||
|
if !e.command.ResourceLimits {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
if resources.MemoryMB > 0 {
|
if resources.MemoryMB > 0 {
|
||||||
// Total amount of memory allowed to consume
|
// Total amount of memory allowed to consume
|
||||||
e.resConCtx.groups.Resources.Memory = int64(resources.MemoryMB * 1024 * 1024)
|
e.resConCtx.groups.Resources.Memory = int64(resources.MemoryMB * 1024 * 1024)
|
||||||
|
@ -110,7 +115,7 @@ func (e *UniversalExecutor) configureCgroups(resources *structs.Resources) error
|
||||||
// isolation we aggregate the resource utilization of all the pids launched by
|
// isolation we aggregate the resource utilization of all the pids launched by
|
||||||
// the executor.
|
// the executor.
|
||||||
func (e *UniversalExecutor) Stats() (*cstructs.TaskResourceUsage, error) {
|
func (e *UniversalExecutor) Stats() (*cstructs.TaskResourceUsage, error) {
|
||||||
if !e.command.ResourceLimits {
|
if !(e.command.ResourceLimits || e.command.Cgroup) {
|
||||||
pidStats, err := e.pidStats()
|
pidStats, err := e.pidStats()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -234,7 +239,7 @@ func (e *UniversalExecutor) configureChroot() error {
|
||||||
// isolation and we scan the entire process table if the user is not using any
|
// isolation and we scan the entire process table if the user is not using any
|
||||||
// isolation
|
// isolation
|
||||||
func (e *UniversalExecutor) getAllPids() (map[int]*nomadPid, error) {
|
func (e *UniversalExecutor) getAllPids() (map[int]*nomadPid, error) {
|
||||||
if e.command.ResourceLimits {
|
if e.command.ResourceLimits || e.command.Cgroup {
|
||||||
manager := getCgroupManager(e.resConCtx.groups, e.resConCtx.cgPaths)
|
manager := getCgroupManager(e.resConCtx.groups, e.resConCtx.cgPaths)
|
||||||
pids, err := manager.GetAllPids()
|
pids, err := manager.GetAllPids()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -7,8 +7,11 @@ import (
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
multierror "github.com/hashicorp/go-multierror"
|
||||||
"github.com/hashicorp/go-plugin"
|
"github.com/hashicorp/go-plugin"
|
||||||
"github.com/hashicorp/nomad/client/allocdir"
|
"github.com/hashicorp/nomad/client/allocdir"
|
||||||
"github.com/hashicorp/nomad/client/driver/env"
|
"github.com/hashicorp/nomad/client/driver/env"
|
||||||
|
@ -22,8 +25,11 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// The option that enables this driver in the Config.Options map.
|
// rawExecEnableOption is the option that enables this driver in the Config.Options map.
|
||||||
rawExecConfigOption = "driver.raw_exec.enable"
|
rawExecEnableOption = "driver.raw_exec.enable"
|
||||||
|
|
||||||
|
// rawExecNoCgroupOption forces no cgroups.
|
||||||
|
rawExecNoCgroupOption = "driver.raw_exec.no_cgroups"
|
||||||
|
|
||||||
// The key populated in Node Attributes to indicate presence of the Raw Exec
|
// The key populated in Node Attributes to indicate presence of the Raw Exec
|
||||||
// driver
|
// driver
|
||||||
|
@ -36,21 +42,26 @@ const (
|
||||||
type RawExecDriver struct {
|
type RawExecDriver struct {
|
||||||
DriverContext
|
DriverContext
|
||||||
fingerprint.StaticFingerprinter
|
fingerprint.StaticFingerprinter
|
||||||
|
|
||||||
|
// useCgroup tracks whether we should use a cgroup to manage the process
|
||||||
|
// tree
|
||||||
|
useCgroup bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// rawExecHandle is returned from Start/Open as a handle to the PID
|
// rawExecHandle is returned from Start/Open as a handle to the PID
|
||||||
type rawExecHandle struct {
|
type rawExecHandle struct {
|
||||||
version string
|
version string
|
||||||
pluginClient *plugin.Client
|
pluginClient *plugin.Client
|
||||||
userPid int
|
userPid int
|
||||||
executor executor.Executor
|
executor executor.Executor
|
||||||
killTimeout time.Duration
|
isolationConfig *dstructs.IsolationConfig
|
||||||
maxKillTimeout time.Duration
|
killTimeout time.Duration
|
||||||
logger *log.Logger
|
maxKillTimeout time.Duration
|
||||||
waitCh chan *dstructs.WaitResult
|
logger *log.Logger
|
||||||
doneCh chan struct{}
|
waitCh chan *dstructs.WaitResult
|
||||||
taskEnv *env.TaskEnv
|
doneCh chan struct{}
|
||||||
taskDir *allocdir.TaskDir
|
taskEnv *env.TaskEnv
|
||||||
|
taskDir *allocdir.TaskDir
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRawExecDriver is used to create a new raw exec driver
|
// NewRawExecDriver is used to create a new raw exec driver
|
||||||
|
@ -93,7 +104,7 @@ func (d *RawExecDriver) FSIsolation() cstructs.FSIsolation {
|
||||||
|
|
||||||
func (d *RawExecDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstructs.FingerprintResponse) error {
|
func (d *RawExecDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstructs.FingerprintResponse) error {
|
||||||
// Check that the user has explicitly enabled this executor.
|
// Check that the user has explicitly enabled this executor.
|
||||||
enabled := req.Config.ReadBoolDefault(rawExecConfigOption, false)
|
enabled := req.Config.ReadBoolDefault(rawExecEnableOption, false)
|
||||||
|
|
||||||
if enabled || req.Config.DevMode {
|
if enabled || req.Config.DevMode {
|
||||||
d.logger.Printf("[WARN] driver.raw_exec: raw exec is enabled. Only enable if needed")
|
d.logger.Printf("[WARN] driver.raw_exec: raw exec is enabled. Only enable if needed")
|
||||||
|
@ -107,6 +118,14 @@ func (d *RawExecDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *RawExecDriver) Prestart(*ExecContext, *structs.Task) (*PrestartResponse, error) {
|
func (d *RawExecDriver) Prestart(*ExecContext, *structs.Task) (*PrestartResponse, error) {
|
||||||
|
// If we are on linux, running as root, cgroups are mounted, and cgroups
|
||||||
|
// aren't disabled by the operate use cgroups for pid management.
|
||||||
|
forceDisable := d.DriverContext.config.ReadBoolDefault(rawExecNoCgroupOption, false)
|
||||||
|
if !forceDisable && runtime.GOOS == "linux" &&
|
||||||
|
syscall.Geteuid() == 0 && cgroupsMounted(d.DriverContext.node) {
|
||||||
|
d.useCgroup = true
|
||||||
|
}
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,6 +173,7 @@ func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (*StartRespo
|
||||||
Args: driverConfig.Args,
|
Args: driverConfig.Args,
|
||||||
User: task.User,
|
User: task.User,
|
||||||
TaskKillSignal: taskKillSignal,
|
TaskKillSignal: taskKillSignal,
|
||||||
|
Cgroup: d.useCgroup,
|
||||||
}
|
}
|
||||||
ps, err := exec.LaunchCmd(execCmd)
|
ps, err := exec.LaunchCmd(execCmd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -165,17 +185,18 @@ func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (*StartRespo
|
||||||
// Return a driver handle
|
// Return a driver handle
|
||||||
maxKill := d.DriverContext.config.MaxKillTimeout
|
maxKill := d.DriverContext.config.MaxKillTimeout
|
||||||
h := &rawExecHandle{
|
h := &rawExecHandle{
|
||||||
pluginClient: pluginClient,
|
pluginClient: pluginClient,
|
||||||
executor: exec,
|
executor: exec,
|
||||||
userPid: ps.Pid,
|
isolationConfig: ps.IsolationConfig,
|
||||||
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
|
userPid: ps.Pid,
|
||||||
maxKillTimeout: maxKill,
|
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
|
||||||
version: d.config.Version.VersionNumber(),
|
maxKillTimeout: maxKill,
|
||||||
logger: d.logger,
|
version: d.config.Version.VersionNumber(),
|
||||||
doneCh: make(chan struct{}),
|
logger: d.logger,
|
||||||
waitCh: make(chan *dstructs.WaitResult, 1),
|
doneCh: make(chan struct{}),
|
||||||
taskEnv: ctx.TaskEnv,
|
waitCh: make(chan *dstructs.WaitResult, 1),
|
||||||
taskDir: ctx.TaskDir,
|
taskEnv: ctx.TaskEnv,
|
||||||
|
taskDir: ctx.TaskDir,
|
||||||
}
|
}
|
||||||
go h.run()
|
go h.run()
|
||||||
return &StartResponse{Handle: h}, nil
|
return &StartResponse{Handle: h}, nil
|
||||||
|
@ -184,11 +205,12 @@ func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (*StartRespo
|
||||||
func (d *RawExecDriver) Cleanup(*ExecContext, *CreatedResources) error { return nil }
|
func (d *RawExecDriver) Cleanup(*ExecContext, *CreatedResources) error { return nil }
|
||||||
|
|
||||||
type rawExecId struct {
|
type rawExecId struct {
|
||||||
Version string
|
Version string
|
||||||
KillTimeout time.Duration
|
KillTimeout time.Duration
|
||||||
MaxKillTimeout time.Duration
|
MaxKillTimeout time.Duration
|
||||||
UserPid int
|
UserPid int
|
||||||
PluginConfig *PluginReattachConfig
|
PluginConfig *PluginReattachConfig
|
||||||
|
IsolationConfig *dstructs.IsolationConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *RawExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
|
func (d *RawExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
|
||||||
|
@ -202,11 +224,19 @@ func (d *RawExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, e
|
||||||
}
|
}
|
||||||
exec, pluginClient, err := createExecutorWithConfig(pluginConfig, d.config.LogOutput)
|
exec, pluginClient, err := createExecutorWithConfig(pluginConfig, d.config.LogOutput)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
merrs := new(multierror.Error)
|
||||||
|
merrs.Errors = append(merrs.Errors, err)
|
||||||
d.logger.Println("[ERR] driver.raw_exec: error connecting to plugin so destroying plugin pid and user pid")
|
d.logger.Println("[ERR] driver.raw_exec: error connecting to plugin so destroying plugin pid and user pid")
|
||||||
if e := destroyPlugin(id.PluginConfig.Pid, id.UserPid); e != nil {
|
if e := destroyPlugin(id.PluginConfig.Pid, id.UserPid); e != nil {
|
||||||
d.logger.Printf("[ERR] driver.raw_exec: error destroying plugin and userpid: %v", e)
|
merrs.Errors = append(merrs.Errors, fmt.Errorf("error destroying plugin and userpid: %v", e))
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("error connecting to plugin: %v", err)
|
if id.IsolationConfig != nil {
|
||||||
|
ePid := pluginConfig.Reattach.Pid
|
||||||
|
if e := executor.ClientCleanup(id.IsolationConfig, ePid); e != nil {
|
||||||
|
merrs.Errors = append(merrs.Errors, fmt.Errorf("destroying resource container failed: %v", e))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("error connecting to plugin: %v", merrs.ErrorOrNil())
|
||||||
}
|
}
|
||||||
|
|
||||||
ver, _ := exec.Version()
|
ver, _ := exec.Version()
|
||||||
|
@ -214,17 +244,18 @@ func (d *RawExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, e
|
||||||
|
|
||||||
// Return a driver handle
|
// Return a driver handle
|
||||||
h := &rawExecHandle{
|
h := &rawExecHandle{
|
||||||
pluginClient: pluginClient,
|
pluginClient: pluginClient,
|
||||||
executor: exec,
|
executor: exec,
|
||||||
userPid: id.UserPid,
|
userPid: id.UserPid,
|
||||||
logger: d.logger,
|
isolationConfig: id.IsolationConfig,
|
||||||
killTimeout: id.KillTimeout,
|
logger: d.logger,
|
||||||
maxKillTimeout: id.MaxKillTimeout,
|
killTimeout: id.KillTimeout,
|
||||||
version: id.Version,
|
maxKillTimeout: id.MaxKillTimeout,
|
||||||
doneCh: make(chan struct{}),
|
version: id.Version,
|
||||||
waitCh: make(chan *dstructs.WaitResult, 1),
|
doneCh: make(chan struct{}),
|
||||||
taskEnv: ctx.TaskEnv,
|
waitCh: make(chan *dstructs.WaitResult, 1),
|
||||||
taskDir: ctx.TaskDir,
|
taskEnv: ctx.TaskEnv,
|
||||||
|
taskDir: ctx.TaskDir,
|
||||||
}
|
}
|
||||||
go h.run()
|
go h.run()
|
||||||
return h, nil
|
return h, nil
|
||||||
|
@ -232,11 +263,12 @@ func (d *RawExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, e
|
||||||
|
|
||||||
func (h *rawExecHandle) ID() string {
|
func (h *rawExecHandle) ID() string {
|
||||||
id := rawExecId{
|
id := rawExecId{
|
||||||
Version: h.version,
|
Version: h.version,
|
||||||
KillTimeout: h.killTimeout,
|
KillTimeout: h.killTimeout,
|
||||||
MaxKillTimeout: h.maxKillTimeout,
|
MaxKillTimeout: h.maxKillTimeout,
|
||||||
PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
|
PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
|
||||||
UserPid: h.userPid,
|
UserPid: h.userPid,
|
||||||
|
IsolationConfig: h.isolationConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err := json.Marshal(id)
|
data, err := json.Marshal(id)
|
||||||
|
@ -298,8 +330,15 @@ func (h *rawExecHandle) run() {
|
||||||
ps, werr := h.executor.Wait()
|
ps, werr := h.executor.Wait()
|
||||||
close(h.doneCh)
|
close(h.doneCh)
|
||||||
if ps.ExitCode == 0 && werr != nil {
|
if ps.ExitCode == 0 && werr != nil {
|
||||||
if e := killProcess(h.userPid); e != nil {
|
if h.isolationConfig != nil {
|
||||||
h.logger.Printf("[ERR] driver.raw_exec: error killing user process: %v", e)
|
ePid := h.pluginClient.ReattachConfig().Pid
|
||||||
|
if e := executor.ClientCleanup(h.isolationConfig, ePid); e != nil {
|
||||||
|
h.logger.Printf("[ERR] driver.raw_exec: destroying resource container failed: %v", e)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if e := killProcess(h.userPid); e != nil {
|
||||||
|
h.logger.Printf("[ERR] driver.raw_exec: error killing user process: %v", e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ func TestRawExecDriver_Fingerprint(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disable raw exec.
|
// Disable raw exec.
|
||||||
cfg := &config.Config{Options: map[string]string{rawExecConfigOption: "false"}}
|
cfg := &config.Config{Options: map[string]string{rawExecEnableOption: "false"}}
|
||||||
|
|
||||||
request := &cstructs.FingerprintRequest{Config: cfg, Node: node}
|
request := &cstructs.FingerprintRequest{Config: cfg, Node: node}
|
||||||
var response cstructs.FingerprintResponse
|
var response cstructs.FingerprintResponse
|
||||||
|
@ -47,7 +47,7 @@ func TestRawExecDriver_Fingerprint(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enable raw exec.
|
// Enable raw exec.
|
||||||
request.Config.Options[rawExecConfigOption] = "true"
|
request.Config.Options[rawExecEnableOption] = "true"
|
||||||
err = d.Fingerprint(request, &response)
|
err = d.Fingerprint(request, &response)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %v", err)
|
t.Fatalf("err: %v", err)
|
||||||
|
|
Loading…
Reference in a new issue