2016-07-10 06:56:31 +00:00
|
|
|
package executor
|
|
|
|
|
|
|
|
import (
|
2018-12-05 16:07:48 +00:00
|
|
|
"context"
|
2018-09-24 18:37:45 +00:00
|
|
|
"fmt"
|
2016-07-10 06:56:31 +00:00
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2019-05-25 02:30:45 +00:00
|
|
|
"regexp"
|
2016-07-10 06:56:31 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"testing"
|
2017-01-26 04:58:24 +00:00
|
|
|
"time"
|
2016-07-10 06:56:31 +00:00
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocdir"
|
2018-11-30 11:18:39 +00:00
|
|
|
"github.com/hashicorp/nomad/client/taskenv"
|
2016-07-10 06:56:31 +00:00
|
|
|
"github.com/hashicorp/nomad/client/testutil"
|
2018-06-13 22:33:25 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/testlog"
|
2016-08-04 22:03:56 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
2018-12-07 02:39:53 +00:00
|
|
|
"github.com/hashicorp/nomad/plugins/drivers"
|
2018-09-24 18:37:45 +00:00
|
|
|
tu "github.com/hashicorp/nomad/testutil"
|
2019-12-11 15:28:41 +00:00
|
|
|
"github.com/opencontainers/runc/libcontainer/cgroups"
|
2018-12-10 03:30:23 +00:00
|
|
|
lconfigs "github.com/opencontainers/runc/libcontainer/configs"
|
2018-09-24 18:37:45 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2018-12-10 03:30:23 +00:00
|
|
|
"golang.org/x/sys/unix"
|
2016-07-10 06:56:31 +00:00
|
|
|
)
|
|
|
|
|
2018-09-24 18:37:45 +00:00
|
|
|
func init() {
|
|
|
|
executorFactories["LibcontainerExecutor"] = libcontainerFactory
|
|
|
|
}
|
|
|
|
|
2019-03-26 13:06:36 +00:00
|
|
|
var libcontainerFactory = executorFactory{
|
|
|
|
new: NewExecutorWithIsolation,
|
|
|
|
configureExecCmd: func(t *testing.T, cmd *ExecCommand) {
|
|
|
|
cmd.ResourceLimits = true
|
|
|
|
setupRootfs(t, cmd.TaskDir)
|
|
|
|
},
|
2018-09-24 18:37:45 +00:00
|
|
|
}
|
|
|
|
|
2016-12-03 01:04:07 +00:00
|
|
|
// testExecutorContextWithChroot returns an ExecutorContext and AllocDir with
|
|
|
|
// chroot. Use testExecutorContext if you don't need a chroot.
|
|
|
|
//
|
|
|
|
// The caller is responsible for calling AllocDir.Destroy() to cleanup.
|
2019-04-01 15:59:56 +00:00
|
|
|
func testExecutorCommandWithChroot(t *testing.T) *testExecCmd {
|
2016-12-03 01:04:07 +00:00
|
|
|
chrootEnv := map[string]string{
|
|
|
|
"/etc/ld.so.cache": "/etc/ld.so.cache",
|
|
|
|
"/etc/ld.so.conf": "/etc/ld.so.conf",
|
|
|
|
"/etc/ld.so.conf.d": "/etc/ld.so.conf.d",
|
2019-05-24 20:06:50 +00:00
|
|
|
"/etc/passwd": "/etc/passwd",
|
2016-12-03 01:04:07 +00:00
|
|
|
"/lib": "/lib",
|
|
|
|
"/lib64": "/lib64",
|
|
|
|
"/usr/lib": "/usr/lib",
|
|
|
|
"/bin/ls": "/bin/ls",
|
2019-05-07 20:13:38 +00:00
|
|
|
"/bin/cat": "/bin/cat",
|
2016-12-03 01:04:07 +00:00
|
|
|
"/bin/echo": "/bin/echo",
|
|
|
|
"/bin/bash": "/bin/bash",
|
2017-01-09 23:40:53 +00:00
|
|
|
"/bin/sleep": "/bin/sleep",
|
2016-12-03 01:04:07 +00:00
|
|
|
"/foobar": "/does/not/exist",
|
|
|
|
}
|
|
|
|
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
task := alloc.Job.TaskGroups[0].Tasks[0]
|
2018-11-30 11:18:39 +00:00
|
|
|
taskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, "global").Build()
|
2016-12-03 01:04:07 +00:00
|
|
|
|
2018-09-24 18:37:45 +00:00
|
|
|
allocDir := allocdir.NewAllocDir(testlog.HCLogger(t), filepath.Join(os.TempDir(), alloc.ID))
|
2016-12-03 01:04:07 +00:00
|
|
|
if err := allocDir.Build(); err != nil {
|
2018-06-13 22:33:25 +00:00
|
|
|
t.Fatalf("AllocDir.Build() failed: %v", err)
|
2016-12-03 01:04:07 +00:00
|
|
|
}
|
2019-01-04 21:11:25 +00:00
|
|
|
if err := allocDir.NewTaskDir(task.Name).Build(true, chrootEnv); err != nil {
|
2016-12-03 01:04:07 +00:00
|
|
|
allocDir.Destroy()
|
2018-06-13 22:33:25 +00:00
|
|
|
t.Fatalf("allocDir.NewTaskDir(%q) failed: %v", task.Name, err)
|
2016-12-03 01:04:07 +00:00
|
|
|
}
|
|
|
|
td := allocDir.TaskDirs[task.Name]
|
2018-12-07 01:54:14 +00:00
|
|
|
cmd := &ExecCommand{
|
2018-09-24 18:37:45 +00:00
|
|
|
Env: taskEnv.List(),
|
2016-12-03 01:04:07 +00:00
|
|
|
TaskDir: td.Dir,
|
2018-12-07 02:39:53 +00:00
|
|
|
Resources: &drivers.Resources{
|
2018-12-14 00:21:41 +00:00
|
|
|
NomadResources: alloc.AllocatedResources.Tasks[task.Name],
|
2018-09-24 18:37:45 +00:00
|
|
|
},
|
2016-08-04 22:03:56 +00:00
|
|
|
}
|
2018-09-24 18:37:45 +00:00
|
|
|
|
2019-04-01 15:59:56 +00:00
|
|
|
testCmd := &testExecCmd{
|
|
|
|
command: cmd,
|
|
|
|
allocDir: allocDir,
|
|
|
|
}
|
|
|
|
configureTLogging(t, testCmd)
|
|
|
|
return testCmd
|
2016-08-04 22:03:56 +00:00
|
|
|
}
|
|
|
|
|
2016-07-10 06:56:31 +00:00
|
|
|
func TestExecutor_IsolationAndConstraints(t *testing.T) {
|
2017-07-21 19:14:54 +00:00
|
|
|
t.Parallel()
|
2018-09-24 18:37:45 +00:00
|
|
|
require := require.New(t)
|
2016-07-10 06:56:31 +00:00
|
|
|
testutil.ExecCompatible(t)
|
|
|
|
|
2019-04-01 15:59:56 +00:00
|
|
|
testExecCmd := testExecutorCommandWithChroot(t)
|
|
|
|
execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir
|
2018-09-24 18:37:45 +00:00
|
|
|
execCmd.Cmd = "/bin/ls"
|
|
|
|
execCmd.Args = []string{"-F", "/", "/etc/"}
|
2016-12-03 01:04:07 +00:00
|
|
|
defer allocDir.Destroy()
|
2016-07-10 06:56:31 +00:00
|
|
|
|
|
|
|
execCmd.ResourceLimits = true
|
|
|
|
|
2019-03-26 13:06:36 +00:00
|
|
|
executor := NewExecutorWithIsolation(testlog.HCLogger(t))
|
2018-09-24 18:37:45 +00:00
|
|
|
defer executor.Shutdown("SIGKILL", 0)
|
2016-10-12 18:35:29 +00:00
|
|
|
|
2018-09-24 18:37:45 +00:00
|
|
|
ps, err := executor.Launch(execCmd)
|
|
|
|
require.NoError(err)
|
|
|
|
require.NotZero(ps.Pid)
|
2016-10-12 18:35:29 +00:00
|
|
|
|
2018-12-05 16:07:48 +00:00
|
|
|
state, err := executor.Wait(context.Background())
|
2018-09-24 18:37:45 +00:00
|
|
|
require.NoError(err)
|
|
|
|
require.Zero(state.ExitCode)
|
2016-07-10 06:56:31 +00:00
|
|
|
|
2018-03-11 17:50:28 +00:00
|
|
|
// Check if the resource constraints were applied
|
2018-09-24 18:37:45 +00:00
|
|
|
if lexec, ok := executor.(*LibcontainerExecutor); ok {
|
|
|
|
state, err := lexec.container.State()
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
memLimits := filepath.Join(state.CgroupPaths["memory"], "memory.limit_in_bytes")
|
|
|
|
data, err := ioutil.ReadFile(memLimits)
|
|
|
|
require.NoError(err)
|
|
|
|
|
2018-12-14 00:21:41 +00:00
|
|
|
expectedMemLim := strconv.Itoa(int(execCmd.Resources.NomadResources.Memory.MemoryMB * 1024 * 1024))
|
2018-09-24 18:37:45 +00:00
|
|
|
actualMemLim := strings.TrimSpace(string(data))
|
|
|
|
require.Equal(actualMemLim, expectedMemLim)
|
|
|
|
require.NoError(executor.Shutdown("", 0))
|
2018-12-05 16:07:48 +00:00
|
|
|
executor.Wait(context.Background())
|
2018-09-24 18:37:45 +00:00
|
|
|
|
|
|
|
// Check if Nomad has actually removed the cgroups
|
|
|
|
tu.WaitForResult(func() (bool, error) {
|
|
|
|
_, err = os.Stat(memLimits)
|
|
|
|
if err == nil {
|
|
|
|
return false, fmt.Errorf("expected an error from os.Stat %s", memLimits)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) { t.Error(err) })
|
2016-07-10 06:56:31 +00:00
|
|
|
|
|
|
|
}
|
2016-09-02 19:44:05 +00:00
|
|
|
expected := `/:
|
|
|
|
alloc/
|
|
|
|
bin/
|
|
|
|
dev/
|
|
|
|
etc/
|
|
|
|
lib/
|
|
|
|
lib64/
|
|
|
|
local/
|
|
|
|
proc/
|
|
|
|
secrets/
|
2018-09-24 18:37:45 +00:00
|
|
|
sys/
|
2016-09-02 19:44:05 +00:00
|
|
|
tmp/
|
|
|
|
usr/
|
|
|
|
|
|
|
|
/etc/:
|
|
|
|
ld.so.cache
|
|
|
|
ld.so.conf
|
2019-05-25 02:30:45 +00:00
|
|
|
ld.so.conf.d/
|
|
|
|
passwd`
|
2018-09-24 18:37:45 +00:00
|
|
|
tu.WaitForResult(func() (bool, error) {
|
2019-04-01 15:59:56 +00:00
|
|
|
output := testExecCmd.stdout.String()
|
2018-09-24 18:37:45 +00:00
|
|
|
act := strings.TrimSpace(string(output))
|
|
|
|
if act != expected {
|
|
|
|
return false, fmt.Errorf("Command output incorrectly: want %v; got %v", expected, act)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) { t.Error(err) })
|
2016-07-10 06:56:31 +00:00
|
|
|
}
|
2017-01-26 04:58:24 +00:00
|
|
|
|
2019-06-11 01:20:45 +00:00
|
|
|
// TestExecutor_CgroupPaths asserts that process starts with independent cgroups
|
|
|
|
// hierarchy created for this process
|
|
|
|
func TestExecutor_CgroupPaths(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
|
|
|
testutil.ExecCompatible(t)
|
|
|
|
|
|
|
|
testExecCmd := testExecutorCommandWithChroot(t)
|
|
|
|
execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir
|
|
|
|
execCmd.Cmd = "/bin/bash"
|
|
|
|
execCmd.Args = []string{"-c", "sleep 0.2; cat /proc/self/cgroup"}
|
|
|
|
defer allocDir.Destroy()
|
|
|
|
|
|
|
|
execCmd.ResourceLimits = true
|
|
|
|
|
|
|
|
executor := NewExecutorWithIsolation(testlog.HCLogger(t))
|
|
|
|
defer executor.Shutdown("SIGKILL", 0)
|
|
|
|
|
|
|
|
ps, err := executor.Launch(execCmd)
|
|
|
|
require.NoError(err)
|
|
|
|
require.NotZero(ps.Pid)
|
|
|
|
|
|
|
|
state, err := executor.Wait(context.Background())
|
|
|
|
require.NoError(err)
|
|
|
|
require.Zero(state.ExitCode)
|
|
|
|
|
|
|
|
tu.WaitForResult(func() (bool, error) {
|
|
|
|
output := strings.TrimSpace(testExecCmd.stdout.String())
|
|
|
|
// sanity check that we got some cgroups
|
|
|
|
if !strings.Contains(output, ":devices:") {
|
|
|
|
return false, fmt.Errorf("was expected cgroup files but found:\n%v", output)
|
|
|
|
}
|
|
|
|
lines := strings.Split(output, "\n")
|
|
|
|
for _, line := range lines {
|
|
|
|
// Every cgroup entry should be /nomad/$ALLOC_ID
|
|
|
|
if line == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-06-11 17:00:26 +00:00
|
|
|
// Skip rdma subsystem; rdma was added in most recent kernels and libcontainer/docker
|
|
|
|
// don't isolate it by default.
|
2019-06-11 01:20:45 +00:00
|
|
|
if strings.Contains(line, ":rdma:") {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if !strings.Contains(line, ":/nomad/") {
|
|
|
|
return false, fmt.Errorf("Not a member of the alloc's cgroup: expected=...:/nomad/... -- found=%q", line)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) { t.Error(err) })
|
|
|
|
}
|
|
|
|
|
2019-12-11 15:28:41 +00:00
|
|
|
// TestExecutor_CgroupPaths asserts that all cgroups created for a task
|
|
|
|
// are destroyed on shutdown
|
|
|
|
func TestExecutor_CgroupPathsAreDestroyed(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
|
|
|
testutil.ExecCompatible(t)
|
|
|
|
|
|
|
|
testExecCmd := testExecutorCommandWithChroot(t)
|
|
|
|
execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir
|
|
|
|
execCmd.Cmd = "/bin/bash"
|
|
|
|
execCmd.Args = []string{"-c", "sleep 0.2; cat /proc/self/cgroup"}
|
|
|
|
defer allocDir.Destroy()
|
|
|
|
|
|
|
|
execCmd.ResourceLimits = true
|
|
|
|
|
|
|
|
executor := NewExecutorWithIsolation(testlog.HCLogger(t))
|
|
|
|
defer executor.Shutdown("SIGKILL", 0)
|
|
|
|
|
|
|
|
ps, err := executor.Launch(execCmd)
|
|
|
|
require.NoError(err)
|
|
|
|
require.NotZero(ps.Pid)
|
|
|
|
|
|
|
|
state, err := executor.Wait(context.Background())
|
|
|
|
require.NoError(err)
|
|
|
|
require.Zero(state.ExitCode)
|
|
|
|
|
|
|
|
var cgroupsPaths string
|
|
|
|
tu.WaitForResult(func() (bool, error) {
|
|
|
|
output := strings.TrimSpace(testExecCmd.stdout.String())
|
|
|
|
// sanity check that we got some cgroups
|
|
|
|
if !strings.Contains(output, ":devices:") {
|
|
|
|
return false, fmt.Errorf("was expected cgroup files but found:\n%v", output)
|
|
|
|
}
|
|
|
|
lines := strings.Split(output, "\n")
|
|
|
|
for _, line := range lines {
|
|
|
|
// Every cgroup entry should be /nomad/$ALLOC_ID
|
|
|
|
if line == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip rdma subsystem; rdma was added in most recent kernels and libcontainer/docker
|
|
|
|
// don't isolate it by default.
|
|
|
|
if strings.Contains(line, ":rdma:") {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if !strings.Contains(line, ":/nomad/") {
|
|
|
|
return false, fmt.Errorf("Not a member of the alloc's cgroup: expected=...:/nomad/... -- found=%q", line)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cgroupsPaths = output
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) { t.Error(err) })
|
|
|
|
|
|
|
|
// shutdown executor and test that cgroups are destroyed
|
|
|
|
executor.Shutdown("SIGKILL", 0)
|
|
|
|
|
|
|
|
// test that the cgroup paths are not visible
|
|
|
|
tmpFile, err := ioutil.TempFile("", "")
|
|
|
|
require.NoError(err)
|
|
|
|
defer os.Remove(tmpFile.Name())
|
|
|
|
|
|
|
|
_, err = tmpFile.WriteString(cgroupsPaths)
|
|
|
|
require.NoError(err)
|
|
|
|
tmpFile.Close()
|
|
|
|
|
|
|
|
subsystems, err := cgroups.ParseCgroupFile(tmpFile.Name())
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
for subsystem, cgroup := range subsystems {
|
|
|
|
if !strings.Contains(cgroup, "nomad/") {
|
|
|
|
// this should only be rdma at this point
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
p, err := getCgroupPathHelper(subsystem, cgroup)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Falsef(cgroups.PathExists(p), "cgroup for %s %s still exists", subsystem, cgroup)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-07 20:58:27 +00:00
|
|
|
func TestUniversalExecutor_LookupTaskBin(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// Create a temp dir
|
|
|
|
tmpDir, err := ioutil.TempDir("", "")
|
|
|
|
require.Nil(err)
|
|
|
|
defer os.Remove(tmpDir)
|
|
|
|
|
2019-05-08 14:01:51 +00:00
|
|
|
// Create the command
|
|
|
|
cmd := &ExecCommand{Env: []string{"PATH=/bin"}, TaskDir: tmpDir}
|
|
|
|
|
2019-05-07 20:58:27 +00:00
|
|
|
// Make a foo subdir
|
|
|
|
os.MkdirAll(filepath.Join(tmpDir, "foo"), 0700)
|
|
|
|
|
|
|
|
// Write a file under foo
|
|
|
|
filePath := filepath.Join(tmpDir, "foo", "tmp.txt")
|
|
|
|
err = ioutil.WriteFile(filePath, []byte{1, 2}, os.ModeAppend)
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Lookout with an absolute path to the binary
|
2019-05-08 14:01:51 +00:00
|
|
|
cmd.Cmd = "/foo/tmp.txt"
|
|
|
|
_, err = lookupTaskBin(cmd)
|
2019-05-07 20:58:27 +00:00
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Write a file under local subdir
|
|
|
|
os.MkdirAll(filepath.Join(tmpDir, "local"), 0700)
|
|
|
|
filePath2 := filepath.Join(tmpDir, "local", "tmp.txt")
|
|
|
|
ioutil.WriteFile(filePath2, []byte{1, 2}, os.ModeAppend)
|
|
|
|
|
|
|
|
// Lookup with file name, should find the one we wrote above
|
2019-05-08 14:01:51 +00:00
|
|
|
cmd.Cmd = "tmp.txt"
|
|
|
|
_, err = lookupTaskBin(cmd)
|
2019-05-07 20:58:27 +00:00
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Lookup a host absolute path
|
2019-05-08 14:01:51 +00:00
|
|
|
cmd.Cmd = "/bin/sh"
|
|
|
|
_, err = lookupTaskBin(cmd)
|
2019-05-07 20:58:27 +00:00
|
|
|
require.Error(err)
|
|
|
|
}
|
|
|
|
|
2019-05-02 17:36:34 +00:00
|
|
|
// Exec Launch looks for the binary only inside the chroot
|
|
|
|
func TestExecutor_EscapeContainer(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
require := require.New(t)
|
|
|
|
testutil.ExecCompatible(t)
|
|
|
|
|
|
|
|
testExecCmd := testExecutorCommandWithChroot(t)
|
|
|
|
execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir
|
|
|
|
execCmd.Cmd = "/bin/kill" // missing from the chroot container
|
|
|
|
defer allocDir.Destroy()
|
|
|
|
|
|
|
|
execCmd.ResourceLimits = true
|
|
|
|
|
|
|
|
executor := NewExecutorWithIsolation(testlog.HCLogger(t))
|
|
|
|
defer executor.Shutdown("SIGKILL", 0)
|
|
|
|
|
|
|
|
_, err := executor.Launch(execCmd)
|
|
|
|
require.Error(err)
|
|
|
|
require.Regexp("^file /bin/kill not found under path", err)
|
2019-05-03 20:20:05 +00:00
|
|
|
|
|
|
|
// Bare files are looked up using the system path, inside the container
|
|
|
|
allocDir.Destroy()
|
|
|
|
testExecCmd = testExecutorCommandWithChroot(t)
|
|
|
|
execCmd, allocDir = testExecCmd.command, testExecCmd.allocDir
|
|
|
|
execCmd.Cmd = "kill"
|
|
|
|
_, err = executor.Launch(execCmd)
|
|
|
|
require.Error(err)
|
|
|
|
require.Regexp("^file kill not found under path", err)
|
|
|
|
|
|
|
|
allocDir.Destroy()
|
|
|
|
testExecCmd = testExecutorCommandWithChroot(t)
|
|
|
|
execCmd, allocDir = testExecCmd.command, testExecCmd.allocDir
|
|
|
|
execCmd.Cmd = "echo"
|
|
|
|
_, err = executor.Launch(execCmd)
|
|
|
|
require.NoError(err)
|
2019-05-02 17:36:34 +00:00
|
|
|
}
|
|
|
|
|
2019-05-20 19:30:07 +00:00
|
|
|
func TestExecutor_Capabilities(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
testutil.ExecCompatible(t)
|
|
|
|
|
2019-05-24 20:06:50 +00:00
|
|
|
cases := []struct {
|
|
|
|
user string
|
|
|
|
caps string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
user: "nobody",
|
|
|
|
caps: `
|
2019-05-25 02:30:45 +00:00
|
|
|
CapInh: 0000000000000000
|
|
|
|
CapPrm: 0000000000000000
|
|
|
|
CapEff: 0000000000000000
|
|
|
|
CapBnd: 0000003fffffffff
|
|
|
|
CapAmb: 0000000000000000`,
|
2019-05-24 20:06:50 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
user: "root",
|
|
|
|
caps: `
|
2019-05-25 02:30:45 +00:00
|
|
|
CapInh: 0000000000000000
|
|
|
|
CapPrm: 0000003fffffffff
|
|
|
|
CapEff: 0000003fffffffff
|
|
|
|
CapBnd: 0000003fffffffff
|
|
|
|
CapAmb: 0000000000000000`,
|
2019-05-24 20:06:50 +00:00
|
|
|
},
|
|
|
|
}
|
2019-05-20 19:30:07 +00:00
|
|
|
|
2019-05-24 20:06:50 +00:00
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.user, func(t *testing.T) {
|
|
|
|
require := require.New(t)
|
2019-05-20 19:30:07 +00:00
|
|
|
|
2019-05-24 20:06:50 +00:00
|
|
|
testExecCmd := testExecutorCommandWithChroot(t)
|
|
|
|
execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir
|
|
|
|
defer allocDir.Destroy()
|
2019-05-20 19:30:07 +00:00
|
|
|
|
2019-05-24 20:06:50 +00:00
|
|
|
execCmd.User = c.user
|
|
|
|
execCmd.ResourceLimits = true
|
|
|
|
execCmd.Cmd = "/bin/bash"
|
|
|
|
execCmd.Args = []string{"-c", "cat /proc/$$/status"}
|
2019-05-20 19:30:07 +00:00
|
|
|
|
2019-05-24 20:06:50 +00:00
|
|
|
executor := NewExecutorWithIsolation(testlog.HCLogger(t))
|
|
|
|
defer executor.Shutdown("SIGKILL", 0)
|
|
|
|
|
|
|
|
_, err := executor.Launch(execCmd)
|
|
|
|
require.NoError(err)
|
2019-05-20 19:30:07 +00:00
|
|
|
|
2019-05-24 20:06:50 +00:00
|
|
|
ch := make(chan interface{})
|
|
|
|
go func() {
|
|
|
|
executor.Wait(context.Background())
|
|
|
|
close(ch)
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ch:
|
|
|
|
// all good
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
require.Fail("timeout waiting for exec to shutdown")
|
|
|
|
}
|
|
|
|
|
2019-05-25 02:30:45 +00:00
|
|
|
canonical := func(s string) string {
|
|
|
|
s = strings.TrimSpace(s)
|
|
|
|
s = regexp.MustCompile("[ \t]+").ReplaceAllString(s, " ")
|
|
|
|
s = regexp.MustCompile("[\n\r]+").ReplaceAllString(s, "\n")
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
|
|
|
expected := canonical(c.caps)
|
2019-05-24 20:06:50 +00:00
|
|
|
tu.WaitForResult(func() (bool, error) {
|
2019-05-25 02:30:45 +00:00
|
|
|
output := canonical(testExecCmd.stdout.String())
|
|
|
|
if !strings.Contains(output, expected) {
|
|
|
|
return false, fmt.Errorf("capabilities didn't match: want\n%v\n; got:\n%v\n", expected, output)
|
2019-05-24 20:06:50 +00:00
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) { require.NoError(err) })
|
|
|
|
})
|
|
|
|
}
|
2019-05-20 19:30:07 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-01-26 04:58:24 +00:00
|
|
|
func TestExecutor_ClientCleanup(t *testing.T) {
|
2017-07-21 19:14:54 +00:00
|
|
|
t.Parallel()
|
2017-01-26 04:58:24 +00:00
|
|
|
testutil.ExecCompatible(t)
|
2018-09-24 18:37:45 +00:00
|
|
|
require := require.New(t)
|
2017-01-26 04:58:24 +00:00
|
|
|
|
2019-04-01 15:59:56 +00:00
|
|
|
testExecCmd := testExecutorCommandWithChroot(t)
|
|
|
|
execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir
|
2017-01-26 04:58:24 +00:00
|
|
|
defer allocDir.Destroy()
|
|
|
|
|
2019-03-26 13:06:36 +00:00
|
|
|
executor := NewExecutorWithIsolation(testlog.HCLogger(t))
|
2018-09-24 18:37:45 +00:00
|
|
|
defer executor.Shutdown("", 0)
|
2017-01-26 04:58:24 +00:00
|
|
|
|
|
|
|
// Need to run a command which will produce continuous output but not
|
|
|
|
// too quickly to ensure executor.Exit() stops the process.
|
2018-09-24 18:37:45 +00:00
|
|
|
execCmd.Cmd = "/bin/bash"
|
|
|
|
execCmd.Args = []string{"-c", "while true; do /bin/echo X; /bin/sleep 1; done"}
|
2017-01-26 04:58:24 +00:00
|
|
|
execCmd.ResourceLimits = true
|
|
|
|
|
2018-09-24 18:37:45 +00:00
|
|
|
ps, err := executor.Launch(execCmd)
|
2018-12-08 23:24:42 +00:00
|
|
|
|
2018-09-24 18:37:45 +00:00
|
|
|
require.NoError(err)
|
|
|
|
require.NotZero(ps.Pid)
|
2017-01-26 04:58:24 +00:00
|
|
|
time.Sleep(500 * time.Millisecond)
|
2018-09-24 18:37:45 +00:00
|
|
|
require.NoError(executor.Shutdown("SIGINT", 100*time.Millisecond))
|
2018-12-08 23:24:42 +00:00
|
|
|
|
|
|
|
ch := make(chan interface{})
|
|
|
|
go func() {
|
2018-12-13 19:41:09 +00:00
|
|
|
executor.Wait(context.Background())
|
2018-12-08 23:24:42 +00:00
|
|
|
close(ch)
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ch:
|
|
|
|
// all good
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
require.Fail("timeout waiting for exec to shutdown")
|
|
|
|
}
|
2017-01-26 04:58:24 +00:00
|
|
|
|
2019-04-01 15:59:56 +00:00
|
|
|
output := testExecCmd.stdout.String()
|
2018-09-24 18:37:45 +00:00
|
|
|
require.NotZero(len(output))
|
2017-01-26 04:58:24 +00:00
|
|
|
time.Sleep(2 * time.Second)
|
2019-04-01 15:59:56 +00:00
|
|
|
output1 := testExecCmd.stdout.String()
|
2018-09-24 18:37:45 +00:00
|
|
|
require.Equal(len(output), len(output1))
|
2017-01-26 04:58:24 +00:00
|
|
|
}
|
2018-12-10 03:30:23 +00:00
|
|
|
|
|
|
|
func TestExecutor_cmdDevices(t *testing.T) {
|
|
|
|
input := []*drivers.DeviceConfig{
|
|
|
|
{
|
|
|
|
HostPath: "/dev/null",
|
|
|
|
TaskPath: "/task/dev/null",
|
|
|
|
Permissions: "rwm",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
expected := &lconfigs.Device{
|
|
|
|
Path: "/task/dev/null",
|
|
|
|
Type: 99,
|
|
|
|
Major: 1,
|
|
|
|
Minor: 3,
|
|
|
|
Permissions: "rwm",
|
|
|
|
}
|
|
|
|
|
|
|
|
found, err := cmdDevices(input)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, found, 1)
|
|
|
|
|
|
|
|
// ignore file permission and ownership
|
|
|
|
// as they are host specific potentially
|
|
|
|
d := found[0]
|
|
|
|
d.FileMode = 0
|
|
|
|
d.Uid = 0
|
|
|
|
d.Gid = 0
|
|
|
|
|
|
|
|
require.EqualValues(t, expected, d)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestExecutor_cmdMounts(t *testing.T) {
|
|
|
|
input := []*drivers.MountConfig{
|
|
|
|
{
|
|
|
|
HostPath: "/host/path-ro",
|
|
|
|
TaskPath: "/task/path-ro",
|
|
|
|
Readonly: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
HostPath: "/host/path-rw",
|
|
|
|
TaskPath: "/task/path-rw",
|
|
|
|
Readonly: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
expected := []*lconfigs.Mount{
|
|
|
|
{
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
Source: "/host/path-ro",
|
|
|
|
Destination: "/task/path-ro",
|
|
|
|
Flags: unix.MS_BIND | unix.MS_RDONLY,
|
|
|
|
Device: "bind",
|
|
|
|
PropagationFlags: []int{unix.MS_PRIVATE | unix.MS_REC},
|
2018-12-10 03:30:23 +00:00
|
|
|
},
|
|
|
|
{
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
Source: "/host/path-rw",
|
|
|
|
Destination: "/task/path-rw",
|
|
|
|
Flags: unix.MS_BIND,
|
|
|
|
Device: "bind",
|
|
|
|
PropagationFlags: []int{unix.MS_PRIVATE | unix.MS_REC},
|
2018-12-10 03:30:23 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
require.EqualValues(t, expected, cmdMounts(input))
|
|
|
|
}
|