5dee1141d1
* client/executor: refactor client to remove interpolation * executor: POC libcontainer based executor * vendor: use hashicorp libcontainer fork * vendor: add libcontainer/nsenter dep * executor: updated executor interface to simplify operations * executor: implement logging pipe * logmon: new logmon plugin to manage task logs * driver/executor: use logmon for log management * executor: fix tests and windows build * executor: fix logging key names * executor: fix test failures * executor: add config field to toggle between using libcontainer and standard executors * logmon: use discover utility to discover nomad executable * executor: only call libcontainer-shim on main in linux * logmon: use seperate path configs for stdout/stderr fifos * executor: windows fixes * executor: created reusable pid stats collection utility that can be used in an executor * executor: update fifo.Open calls * executor: fix build * remove executor from docker driver * executor: Shutdown func to kill and cleanup executor and its children * executor: move linux specific universal executor funcs to seperate file * move logmon initialization to a task runner hook * client: doc fixes and renaming from code review * taskrunner: use shared config struct for logmon fifo fields * taskrunner: logmon only needs to be started once per task
337 lines
9.4 KiB
Go
337 lines
9.4 KiB
Go
package driver
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
"log"
|
|
"os"
|
|
"path/filepath"
|
|
"runtime"
|
|
"syscall"
|
|
"time"
|
|
|
|
multierror "github.com/hashicorp/go-multierror"
|
|
"github.com/hashicorp/go-plugin"
|
|
"github.com/hashicorp/nomad/client/allocdir"
|
|
"github.com/hashicorp/nomad/client/driver/env"
|
|
"github.com/hashicorp/nomad/client/driver/executor"
|
|
dstructs "github.com/hashicorp/nomad/client/driver/structs"
|
|
"github.com/hashicorp/nomad/client/fingerprint"
|
|
cstructs "github.com/hashicorp/nomad/client/structs"
|
|
"github.com/hashicorp/nomad/helper/fields"
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
"github.com/mitchellh/mapstructure"
|
|
)
|
|
|
|
const (
|
|
// rawExecEnableOption is the option that enables this driver in the Config.Options map.
|
|
rawExecEnableOption = "driver.raw_exec.enable"
|
|
|
|
// rawExecNoCgroupOption forces no cgroups.
|
|
rawExecNoCgroupOption = "driver.raw_exec.no_cgroups"
|
|
|
|
// The key populated in Node Attributes to indicate presence of the Raw Exec
|
|
// driver
|
|
rawExecDriverAttr = "driver.raw_exec"
|
|
)
|
|
|
|
// The RawExecDriver is a privileged version of the exec driver. It provides no
|
|
// resource isolation and just fork/execs. The Exec driver should be preferred
|
|
// and this should only be used when explicitly needed.
|
|
type RawExecDriver struct {
|
|
DriverContext
|
|
fingerprint.StaticFingerprinter
|
|
|
|
// useCgroup tracks whether we should use a cgroup to manage the process
|
|
// tree
|
|
useCgroup bool
|
|
}
|
|
|
|
// rawExecHandle is returned from Start/Open as a handle to the PID
|
|
type rawExecHandle struct {
|
|
version string
|
|
pluginClient *plugin.Client
|
|
userPid int
|
|
executor executor.Executor
|
|
killTimeout time.Duration
|
|
maxKillTimeout time.Duration
|
|
shutdownSignal string
|
|
logger *log.Logger
|
|
waitCh chan *dstructs.WaitResult
|
|
doneCh chan struct{}
|
|
taskEnv *env.TaskEnv
|
|
taskDir *allocdir.TaskDir
|
|
}
|
|
|
|
// NewRawExecDriver is used to create a new raw exec driver
|
|
func NewRawExecDriver(ctx *DriverContext) Driver {
|
|
return &RawExecDriver{DriverContext: *ctx}
|
|
}
|
|
|
|
// Validate is used to validate the driver configuration
|
|
func (d *RawExecDriver) Validate(config map[string]interface{}) error {
|
|
fd := &fields.FieldData{
|
|
Raw: config,
|
|
Schema: map[string]*fields.FieldSchema{
|
|
"command": {
|
|
Type: fields.TypeString,
|
|
Required: true,
|
|
},
|
|
"args": {
|
|
Type: fields.TypeArray,
|
|
},
|
|
},
|
|
}
|
|
|
|
if err := fd.Validate(); err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (d *RawExecDriver) Abilities() DriverAbilities {
|
|
return DriverAbilities{
|
|
SendSignals: true,
|
|
Exec: true,
|
|
}
|
|
}
|
|
|
|
func (d *RawExecDriver) FSIsolation() cstructs.FSIsolation {
|
|
return cstructs.FSIsolationNone
|
|
}
|
|
|
|
func (d *RawExecDriver) Fingerprint(req *cstructs.FingerprintRequest, resp *cstructs.FingerprintResponse) error {
|
|
// Check that the user has explicitly enabled this executor.
|
|
enabled := req.Config.ReadBoolDefault(rawExecEnableOption, false)
|
|
|
|
if enabled || req.Config.DevMode {
|
|
d.logger.Printf("[WARN] driver.raw_exec: raw exec is enabled. Only enable if needed")
|
|
resp.AddAttribute(rawExecDriverAttr, "1")
|
|
resp.Detected = true
|
|
return nil
|
|
}
|
|
|
|
resp.RemoveAttribute(rawExecDriverAttr)
|
|
return nil
|
|
}
|
|
|
|
func (d *RawExecDriver) Prestart(*ExecContext, *structs.Task) (*PrestartResponse, error) {
|
|
// If we are on linux, running as root, cgroups are mounted, and cgroups
|
|
// aren't disabled by the operator use cgroups for pid management.
|
|
forceDisable := d.DriverContext.config.ReadBoolDefault(rawExecNoCgroupOption, false)
|
|
if !forceDisable && runtime.GOOS == "linux" &&
|
|
syscall.Geteuid() == 0 && cgroupsMounted(d.DriverContext.node) {
|
|
d.useCgroup = true
|
|
}
|
|
|
|
return nil, nil
|
|
}
|
|
|
|
func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (*StartResponse, error) {
|
|
var driverConfig ExecDriverConfig
|
|
if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
// Get the command to be ran
|
|
command := driverConfig.Command
|
|
if err := validateCommand(command, "args"); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
pluginLogFile := filepath.Join(ctx.TaskDir.Dir, "executor.out")
|
|
executorConfig := &dstructs.ExecutorConfig{
|
|
LogFile: pluginLogFile,
|
|
LogLevel: d.config.LogLevel,
|
|
}
|
|
|
|
exec, pluginClient, err := createExecutor(d.config.LogOutput, d.config, executorConfig)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
_, err = getTaskKillSignal(task.KillSignal)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
execCmd := &executor.ExecCommand{
|
|
Cmd: command,
|
|
Args: ctx.TaskEnv.ParseAndReplace(driverConfig.Args),
|
|
User: task.User,
|
|
BasicProcessCgroup: d.useCgroup,
|
|
Env: ctx.TaskEnv.List(),
|
|
TaskDir: ctx.TaskDir.Dir,
|
|
StdoutPath: ctx.StdoutFifo,
|
|
StderrPath: ctx.StderrFifo,
|
|
}
|
|
ps, err := exec.Launch(execCmd)
|
|
if err != nil {
|
|
pluginClient.Kill()
|
|
return nil, err
|
|
}
|
|
d.logger.Printf("[DEBUG] driver.raw_exec: started process with pid: %v", ps.Pid)
|
|
|
|
// Return a driver handle
|
|
maxKill := d.DriverContext.config.MaxKillTimeout
|
|
h := &rawExecHandle{
|
|
pluginClient: pluginClient,
|
|
executor: exec,
|
|
userPid: ps.Pid,
|
|
shutdownSignal: task.KillSignal,
|
|
killTimeout: GetKillTimeout(task.KillTimeout, maxKill),
|
|
maxKillTimeout: maxKill,
|
|
version: d.config.Version.VersionNumber(),
|
|
logger: d.logger,
|
|
doneCh: make(chan struct{}),
|
|
waitCh: make(chan *dstructs.WaitResult, 1),
|
|
taskEnv: ctx.TaskEnv,
|
|
taskDir: ctx.TaskDir,
|
|
}
|
|
go h.run()
|
|
return &StartResponse{Handle: h}, nil
|
|
}
|
|
|
|
func (d *RawExecDriver) Cleanup(*ExecContext, *CreatedResources) error { return nil }
|
|
|
|
type rawExecId struct {
|
|
Version string
|
|
KillTimeout time.Duration
|
|
MaxKillTimeout time.Duration
|
|
UserPid int
|
|
PluginConfig *PluginReattachConfig
|
|
ShutdownSignal string
|
|
}
|
|
|
|
func (d *RawExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) {
|
|
id := &rawExecId{}
|
|
if err := json.Unmarshal([]byte(handleID), id); err != nil {
|
|
return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err)
|
|
}
|
|
|
|
pluginConfig := &plugin.ClientConfig{
|
|
Reattach: id.PluginConfig.PluginConfig(),
|
|
}
|
|
exec, pluginClient, err := createExecutorWithConfig(pluginConfig, d.config.LogOutput)
|
|
if err != nil {
|
|
merrs := new(multierror.Error)
|
|
merrs.Errors = append(merrs.Errors, err)
|
|
d.logger.Println("[ERR] driver.raw_exec: error connecting to plugin so destroying plugin pid and user pid")
|
|
if e := destroyPlugin(id.PluginConfig.Pid, id.UserPid); e != nil {
|
|
merrs.Errors = append(merrs.Errors, fmt.Errorf("error destroying plugin and userpid: %v", e))
|
|
}
|
|
return nil, fmt.Errorf("error connecting to plugin: %v", merrs.ErrorOrNil())
|
|
}
|
|
|
|
ver, _ := exec.Version()
|
|
d.logger.Printf("[DEBUG] driver.raw_exec: version of executor: %v", ver.Version)
|
|
|
|
// Return a driver handle
|
|
h := &rawExecHandle{
|
|
pluginClient: pluginClient,
|
|
executor: exec,
|
|
userPid: id.UserPid,
|
|
logger: d.logger,
|
|
shutdownSignal: id.ShutdownSignal,
|
|
killTimeout: id.KillTimeout,
|
|
maxKillTimeout: id.MaxKillTimeout,
|
|
version: id.Version,
|
|
doneCh: make(chan struct{}),
|
|
waitCh: make(chan *dstructs.WaitResult, 1),
|
|
taskEnv: ctx.TaskEnv,
|
|
taskDir: ctx.TaskDir,
|
|
}
|
|
go h.run()
|
|
return h, nil
|
|
}
|
|
|
|
func (h *rawExecHandle) ID() string {
|
|
id := rawExecId{
|
|
Version: h.version,
|
|
KillTimeout: h.killTimeout,
|
|
MaxKillTimeout: h.maxKillTimeout,
|
|
PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()),
|
|
UserPid: h.userPid,
|
|
ShutdownSignal: h.shutdownSignal,
|
|
}
|
|
|
|
data, err := json.Marshal(id)
|
|
if err != nil {
|
|
h.logger.Printf("[ERR] driver.raw_exec: failed to marshal ID to JSON: %s", err)
|
|
}
|
|
return string(data)
|
|
}
|
|
|
|
func (h *rawExecHandle) WaitCh() chan *dstructs.WaitResult {
|
|
return h.waitCh
|
|
}
|
|
|
|
func (h *rawExecHandle) Update(task *structs.Task) error {
|
|
// Store the updated kill timeout.
|
|
h.killTimeout = GetKillTimeout(task.KillTimeout, h.maxKillTimeout)
|
|
|
|
// Update is not possible
|
|
return nil
|
|
}
|
|
|
|
func (h *rawExecHandle) Exec(ctx context.Context, cmd string, args []string) ([]byte, int, error) {
|
|
return executor.ExecScript(ctx, h.taskDir.Dir, h.taskEnv.List(), nil, h.taskEnv.ReplaceEnv(cmd), h.taskEnv.ParseAndReplace(args))
|
|
}
|
|
|
|
func (h *rawExecHandle) Signal(s os.Signal) error {
|
|
return h.executor.Signal(s)
|
|
}
|
|
|
|
func (d *rawExecHandle) Network() *cstructs.DriverNetwork {
|
|
return nil
|
|
}
|
|
|
|
func (h *rawExecHandle) Kill() error {
|
|
if err := h.executor.Signal(os.Interrupt); err != nil {
|
|
if h.pluginClient.Exited() {
|
|
return nil
|
|
}
|
|
return fmt.Errorf("executor Shutdown failed: %v", err)
|
|
}
|
|
|
|
select {
|
|
case <-h.doneCh:
|
|
return nil
|
|
case <-time.After(h.killTimeout):
|
|
if h.pluginClient.Exited() {
|
|
return nil
|
|
}
|
|
if err := h.executor.Shutdown(h.shutdownSignal, h.killTimeout); err != nil {
|
|
return fmt.Errorf("executor Exit failed: %v", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
}
|
|
|
|
func (h *rawExecHandle) Stats() (*cstructs.TaskResourceUsage, error) {
|
|
return h.executor.Stats()
|
|
}
|
|
|
|
func (h *rawExecHandle) run() {
|
|
ps, werr := h.executor.Wait()
|
|
close(h.doneCh)
|
|
if ps.ExitCode == 0 && werr != nil {
|
|
if e := killProcess(h.userPid); e != nil {
|
|
h.logger.Printf("[ERR] driver.raw_exec: error killing user process: %v", e)
|
|
}
|
|
}
|
|
|
|
// Destroy the executor
|
|
if err := h.executor.Shutdown(h.shutdownSignal, 0); err != nil {
|
|
h.logger.Printf("[ERR] driver.raw_exec: error killing executor: %v", err)
|
|
}
|
|
h.pluginClient.Kill()
|
|
|
|
// Send the results
|
|
h.waitCh <- &dstructs.WaitResult{ExitCode: ps.ExitCode, Signal: ps.Signal, Err: werr}
|
|
close(h.waitCh)
|
|
}
|