Use cgroup when possible
This commit is contained in:
parent
7affe3bd40
commit
ffd9270f2f
|
@ -118,6 +118,11 @@ type ExecCommand struct {
|
|||
// ResourceLimits determines whether resource limits are enforced by the
|
||||
// executor.
|
||||
ResourceLimits bool
|
||||
|
||||
// Cgroup marks whether we put the process in a cgroup. Setting this field
|
||||
// doesn't enforce resource limits. To enforce limits, set ResoruceLimits.
|
||||
// Using the cgroup does allow more precise cleanup of processes.
|
||||
Cgroup bool
|
||||
}
|
||||
|
||||
// ProcessState holds information about the state of a user process.
|
||||
|
@ -497,7 +502,7 @@ func (e *UniversalExecutor) Exit() error {
|
|||
}
|
||||
|
||||
// Prefer killing the process via the resource container.
|
||||
if e.cmd.Process != nil && !e.command.ResourceLimits {
|
||||
if e.cmd.Process != nil && !(e.command.ResourceLimits || e.command.Cgroup) {
|
||||
proc, err := os.FindProcess(e.cmd.Process.Pid)
|
||||
if err != nil {
|
||||
e.logger.Printf("[ERR] executor: can't find process with pid: %v, err: %v",
|
||||
|
@ -508,7 +513,7 @@ func (e *UniversalExecutor) Exit() error {
|
|||
}
|
||||
}
|
||||
|
||||
if e.command.ResourceLimits {
|
||||
if e.command.ResourceLimits || e.command.Cgroup {
|
||||
if err := e.resConCtx.executorCleanup(); err != nil {
|
||||
merr.Errors = append(merr.Errors, err)
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ func (e *UniversalExecutor) configureIsolation() error {
|
|||
}
|
||||
}
|
||||
|
||||
if e.command.ResourceLimits {
|
||||
if e.command.ResourceLimits || e.command.Cgroup {
|
||||
if err := e.configureCgroups(e.ctx.Task.Resources); err != nil {
|
||||
return fmt.Errorf("error creating cgroups: %v", err)
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ func (e *UniversalExecutor) configureIsolation() error {
|
|||
|
||||
// applyLimits puts a process in a pre-configured cgroup
|
||||
func (e *UniversalExecutor) applyLimits(pid int) error {
|
||||
if !e.command.ResourceLimits {
|
||||
if !(e.command.ResourceLimits || e.command.Cgroup) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -76,9 +76,14 @@ func (e *UniversalExecutor) configureCgroups(resources *structs.Resources) error
|
|||
cgroupName := uuid.Generate()
|
||||
e.resConCtx.groups.Path = filepath.Join("/nomad", cgroupName)
|
||||
|
||||
// TODO: verify this is needed for things like network access
|
||||
// Allow access to /dev/
|
||||
e.resConCtx.groups.Resources.AllowAllDevices = true
|
||||
|
||||
// Use a cgroup but don't apply limits
|
||||
if !e.command.ResourceLimits {
|
||||
return nil
|
||||
}
|
||||
|
||||
if resources.MemoryMB > 0 {
|
||||
// Total amount of memory allowed to consume
|
||||
e.resConCtx.groups.Resources.Memory = int64(resources.MemoryMB * 1024 * 1024)
|
||||
|
@ -110,7 +115,7 @@ func (e *UniversalExecutor) configureCgroups(resources *structs.Resources) error
|
|||
// isolation we aggregate the resource utilization of all the pids launched by
|
||||
// the executor.
|
||||
func (e *UniversalExecutor) Stats() (*cstructs.TaskResourceUsage, error) {
|
||||
if !e.command.ResourceLimits {
|
||||
if !(e.command.ResourceLimits || e.command.Cgroup) {
|
||||
pidStats, err := e.pidStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -234,7 +239,7 @@ func (e *UniversalExecutor) configureChroot() error {
|
|||
// isolation and we scan the entire process table if the user is not using any
|
||||
// isolation
|
||||
func (e *UniversalExecutor) getAllPids() (map[int]*nomadPid, error) {
|
||||
if e.command.ResourceLimits {
|
||||
if e.command.ResourceLimits || e.command.Cgroup {
|
||||
manager := getCgroupManager(e.resConCtx.groups, e.resConCtx.cgPaths)
|
||||
pids, err := manager.GetAllPids()
|
||||
if err != nil {
|
||||
|
|
|
@ -7,8 +7,11 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/go-plugin"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/driver/env"
|
||||
|
@ -22,8 +25,11 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// The option that enables this driver in the Config.Options map.
|
||||
rawExecConfigOption = "driver.raw_exec.enable"
|
||||
// rawExecEnableOption is the option that enables this driver in the Config.Options map.
|
||||
rawExecEnableOption = "driver.raw_exec.enable"
|
||||
|
||||
// rawExecNoCgroupOption forces no cgroups.
|
||||
rawExecNoCgroupOption = "driver.raw_exec.no_cgroups"
|
||||
|
||||
// The key populated in Node Attributes to indicate presence of the Raw Exec
|
||||
// driver
|
||||
|
@ -36,6 +42,10 @@ const (
|
|||
type RawExecDriver struct {
|
||||
DriverContext
|
||||
fingerprint.StaticFingerprinter
|
||||
|
||||
// useCgroup tracks whether we should use a cgroup to manage the process
|
||||
// tree
|
||||
useCgroup bool
|
||||
}
|
||||
|
||||
// rawExecHandle is returned from Start/Open as a handle to the PID
|
||||
|
@ -44,6 +54,7 @@ type rawExecHandle struct {
|
|||
pluginClient *plugin.Client
|
||||
userPid int
|
||||
executor executor.Executor
|
||||
isolationConfig *dstructs.IsolationConfig
|
||||
killTimeout time.Duration
|
||||
maxKillTimeout time.Duration
|
||||
logger *log.Logger
|
||||
|
@ -93,7 +104,7 @@ func (d *RawExecDriver) FSIsolation() cstructs.FSIsolation {
|
|||
|
||||
func (d *RawExecDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstructs.FingerprintResponse) error {
|
||||
// Check that the user has explicitly enabled this executor.
|
||||
enabled := req.Config.ReadBoolDefault(rawExecConfigOption, false)
|
||||
enabled := req.Config.ReadBoolDefault(rawExecEnableOption, false)
|
||||
|
||||
if enabled || req.Config.DevMode {
|
||||
d.logger.Printf("[WARN] driver.raw_exec: raw exec is enabled. Only enable if needed")
|
||||
|
@ -107,6 +118,14 @@ func (d *RawExecDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstr
|
|||
}
|
||||
|
||||
func (d *RawExecDriver) Prestart(*ExecContext, *structs.Task) (*PrestartResponse, error) {
|
||||
// If we are on linux, running as root, cgroups are mounted, and cgroups
|
||||
// aren't disabled by the operate use cgroups for pid management.
|
||||
forceDisable := d.DriverContext.config.ReadBoolDefault(rawExecNoCgroupOption, false)
|
||||
if !forceDisable && runtime.GOOS == "linux" &&
|
||||
syscall.Geteuid() == 0 && cgroupsMounted(d.DriverContext.node) {
|
||||
d.useCgroup = true
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -154,6 +173,7 @@ func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (*StartRespo
|
|||
Args: driverConfig.Args,
|
||||
User: task.User,
|
||||
TaskKillSignal: taskKillSignal,
|
||||
Cgroup: d.useCgroup,
|
||||
}
|
||||
ps, err := exec.LaunchCmd(execCmd)
|
||||
if err != nil {
|
||||
|
@ -167,6 +187,7 @@ func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (*StartRespo
|
|||
h := &rawExecHandle{
|
||||
pluginClient: pluginClient,
|
||||
executor: exec,
|
||||
isolationConfig: ps.IsolationConfig,
|
||||
userPid: ps.Pid,
|
||||
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
|
||||
maxKillTimeout: maxKill,
|
||||
|
@ -189,6 +210,7 @@ type rawExecId struct {
|
|||
MaxKillTimeout time.Duration
|
||||
UserPid int
|
||||
PluginConfig *PluginReattachConfig
|
||||
IsolationConfig *dstructs.IsolationConfig
|
||||
}
|
||||
|
||||
func (d *RawExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
|
||||
|
@ -202,11 +224,19 @@ func (d *RawExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, e
|
|||
}
|
||||
exec, pluginClient, err := createExecutorWithConfig(pluginConfig, d.config.LogOutput)
|
||||
if err != nil {
|
||||
merrs := new(multierror.Error)
|
||||
merrs.Errors = append(merrs.Errors, err)
|
||||
d.logger.Println("[ERR] driver.raw_exec: error connecting to plugin so destroying plugin pid and user pid")
|
||||
if e := destroyPlugin(id.PluginConfig.Pid, id.UserPid); e != nil {
|
||||
d.logger.Printf("[ERR] driver.raw_exec: error destroying plugin and userpid: %v", e)
|
||||
merrs.Errors = append(merrs.Errors, fmt.Errorf("error destroying plugin and userpid: %v", e))
|
||||
}
|
||||
return nil, fmt.Errorf("error connecting to plugin: %v", err)
|
||||
if id.IsolationConfig != nil {
|
||||
ePid := pluginConfig.Reattach.Pid
|
||||
if e := executor.ClientCleanup(id.IsolationConfig, ePid); e != nil {
|
||||
merrs.Errors = append(merrs.Errors, fmt.Errorf("destroying resource container failed: %v", e))
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("error connecting to plugin: %v", merrs.ErrorOrNil())
|
||||
}
|
||||
|
||||
ver, _ := exec.Version()
|
||||
|
@ -217,6 +247,7 @@ func (d *RawExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, e
|
|||
pluginClient: pluginClient,
|
||||
executor: exec,
|
||||
userPid: id.UserPid,
|
||||
isolationConfig: id.IsolationConfig,
|
||||
logger: d.logger,
|
||||
killTimeout: id.KillTimeout,
|
||||
maxKillTimeout: id.MaxKillTimeout,
|
||||
|
@ -237,6 +268,7 @@ func (h *rawExecHandle) ID() string {
|
|||
MaxKillTimeout: h.maxKillTimeout,
|
||||
PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
|
||||
UserPid: h.userPid,
|
||||
IsolationConfig: h.isolationConfig,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(id)
|
||||
|
@ -298,10 +330,17 @@ func (h *rawExecHandle) run() {
|
|||
ps, werr := h.executor.Wait()
|
||||
close(h.doneCh)
|
||||
if ps.ExitCode == 0 && werr != nil {
|
||||
if h.isolationConfig != nil {
|
||||
ePid := h.pluginClient.ReattachConfig().Pid
|
||||
if e := executor.ClientCleanup(h.isolationConfig, ePid); e != nil {
|
||||
h.logger.Printf("[ERR] driver.raw_exec: destroying resource container failed: %v", e)
|
||||
}
|
||||
} else {
|
||||
if e := killProcess(h.userPid); e != nil {
|
||||
h.logger.Printf("[ERR] driver.raw_exec: error killing user process: %v", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Exit the executor
|
||||
if err := h.executor.Exit(); err != nil {
|
||||
|
|
|
@ -33,7 +33,7 @@ func TestRawExecDriver_Fingerprint(t *testing.T) {
|
|||
}
|
||||
|
||||
// Disable raw exec.
|
||||
cfg := &config.Config{Options: map[string]string{rawExecConfigOption: "false"}}
|
||||
cfg := &config.Config{Options: map[string]string{rawExecEnableOption: "false"}}
|
||||
|
||||
request := &cstructs.FingerprintRequest{Config: cfg, Node: node}
|
||||
var response cstructs.FingerprintResponse
|
||||
|
@ -47,7 +47,7 @@ func TestRawExecDriver_Fingerprint(t *testing.T) {
|
|||
}
|
||||
|
||||
// Enable raw exec.
|
||||
request.Config.Options[rawExecConfigOption] = "true"
|
||||
request.Config.Options[rawExecEnableOption] = "true"
|
||||
err = d.Fingerprint(request, &response)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
|
Loading…
Reference in a new issue