Version 1.0.3
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJgEuOKAAoJEFGFLYc0j/xMxF8H/3TTU6Tu+Xm0YvcsDaYDphZ/ X7KQBV0aFiuL5VkTw4PzKEsgryIy9/sqEPyxxyKRowAmos9qhiusjNAIfqdP4TF8 tdZmTedkfWir9uPD+hyv/LXpwbQ2T8kTwS3xHTYvaOmaCxZr710FEn+imnMk1AUn Xs5itkd/CYGr0nBLm+I5GutWSDPmL7Uw8J5Z30fFyoaxoCPAbCWQQNk793SCRUc5 f/uo18V2tFInmQ+3sAdnM4gPewyStK/a5VvzWavL9fVDtYK83wlqWSchTXY5jpVz zNEzt/rYhbBzakPQQKb5zieblh2iGI8aHWpD5w4WduqO2Sg6B/5lAeNZIlW0UJg= =2g3c -----END PGP SIGNATURE----- Merge tag 'v1.0.3' into post-release-1.0.3 Version 1.0.3
This commit is contained in:
commit
ce68ee164b
|
@ -1,4 +1,4 @@
|
|||
## 1.0.3 (Unreleased)
|
||||
## 1.0.4 (Unreleased)
|
||||
|
||||
FEATURES:
|
||||
* **Terminating Gateways**: Adds built-in support for running Consul Connect terminating gateways [[GH-9829](https://github.com/hashicorp/nomad/pull/9829)]
|
||||
|
@ -14,6 +14,11 @@ BUG FIXES:
|
|||
* scheduler: Fixed a bug where job statuses and summaries where duplicated and miscalculated when registering a job. [[GH-9768](https://github.com/hashicorp/nomad/issues/9768)]
|
||||
* driver/qemu: Fixed a bug where network namespaces were not supported for QEMU workloads [[GH-9861](https://github.com/hashicorp/nomad/pull/9861)]
|
||||
|
||||
## 1.0.3 (January 28, 2021)
|
||||
|
||||
SECURITY:
|
||||
* drivers/exec+java: Modified exec-based drivers to run tasks in private PID/IPC namespaces. CVE-2021-3283 [[GH-9911](https://github.com/hashicorp/nomad/issues/9911)]
|
||||
|
||||
## 1.0.2 (January 14, 2021)
|
||||
|
||||
IMPROVEMENTS:
|
||||
|
|
9683
client/structs/structs.generated.go
Normal file
9683
client/structs/structs.generated.go
Normal file
File diff suppressed because it is too large
Load diff
File diff suppressed because one or more lines are too long
|
@ -7,7 +7,6 @@ import (
|
|||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -270,9 +269,9 @@ func TestExecDriver_StartWaitRecover(t *testing.T) {
|
|||
require.NoError(harness.DestroyTask(task.ID, true))
|
||||
}
|
||||
|
||||
// TestExecDriver_DestroyKillsAll asserts that when TaskDestroy is called all
|
||||
// task processes are cleaned up.
|
||||
func TestExecDriver_DestroyKillsAll(t *testing.T) {
|
||||
// TestExecDriver_NoOrphans asserts that when the main
|
||||
// task dies, the orphans in the PID namespaces are killed by the kernel
|
||||
func TestExecDriver_NoOrphans(t *testing.T) {
|
||||
t.Parallel()
|
||||
require := require.New(t)
|
||||
ctestutils.ExecCompatible(t)
|
||||
|
@ -294,50 +293,54 @@ func TestExecDriver_DestroyKillsAll(t *testing.T) {
|
|||
|
||||
taskConfig := map[string]interface{}{}
|
||||
taskConfig["command"] = "/bin/sh"
|
||||
taskConfig["args"] = []string{"-c", fmt.Sprintf(`sleep 3600 & echo "SLEEP_PID=$!"`)}
|
||||
|
||||
// print the child PID in the task PID namespace, then sleep for 5 seconds to give us a chance to examine processes
|
||||
taskConfig["args"] = []string{"-c", fmt.Sprintf(`sleep 3600 & sleep 20`)}
|
||||
require.NoError(task.EncodeConcreteDriverConfig(&taskConfig))
|
||||
|
||||
handle, _, err := harness.StartTask(task)
|
||||
require.NoError(err)
|
||||
defer harness.DestroyTask(task.ID, true)
|
||||
|
||||
ch, err := harness.WaitTask(context.Background(), handle.Config.ID)
|
||||
waitCh, err := harness.WaitTask(context.Background(), handle.Config.ID)
|
||||
require.NoError(err)
|
||||
|
||||
select {
|
||||
case result := <-ch:
|
||||
require.True(result.Successful(), "command failed: %#v", result)
|
||||
case <-time.After(10 * time.Second):
|
||||
require.Fail("timeout waiting for task to shutdown")
|
||||
}
|
||||
require.NoError(harness.WaitUntilStarted(task.ID, 1*time.Second))
|
||||
|
||||
sleepPid := 0
|
||||
|
||||
// Ensure that the task is marked as dead, but account
|
||||
// for WaitTask() closing channel before internal state is updated
|
||||
var childPids []int
|
||||
taskState := TaskState{}
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
stdout, err := ioutil.ReadFile(filepath.Join(task.TaskDir().LogDir, "test.stdout.0"))
|
||||
require.NoError(handle.GetDriverState(&taskState))
|
||||
if taskState.Pid == 0 {
|
||||
return false, fmt.Errorf("task PID is zero")
|
||||
}
|
||||
|
||||
children, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/task/%d/children", taskState.Pid, taskState.Pid))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to output pid file: %v", err)
|
||||
return false, fmt.Errorf("error reading /proc for children: %v", err)
|
||||
}
|
||||
|
||||
pidMatch := regexp.MustCompile(`SLEEP_PID=(\d+)`).FindStringSubmatch(string(stdout))
|
||||
if len(pidMatch) != 2 {
|
||||
return false, fmt.Errorf("failed to find pid in %s", string(stdout))
|
||||
pids := strings.Fields(string(children))
|
||||
if len(pids) < 2 {
|
||||
return false, fmt.Errorf("error waiting for two children, currently %d", len(pids))
|
||||
}
|
||||
|
||||
pid, err := strconv.Atoi(pidMatch[1])
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("pid parts aren't int: %s", pidMatch[1])
|
||||
for _, cpid := range pids {
|
||||
p, err := strconv.Atoi(cpid)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error parsing child pids from /proc: %s", cpid)
|
||||
}
|
||||
childPids = append(childPids, p)
|
||||
}
|
||||
|
||||
sleepPid = pid
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
require.NoError(err)
|
||||
})
|
||||
|
||||
select {
|
||||
case result := <-waitCh:
|
||||
require.True(result.Successful(), "command failed: %#v", result)
|
||||
case <-time.After(30 * time.Second):
|
||||
require.Fail("timeout waiting for task to shutdown")
|
||||
}
|
||||
|
||||
// isProcessRunning returns an error if process is not running
|
||||
isProcessRunning := func(pid int) error {
|
||||
process, err := os.FindProcess(pid)
|
||||
|
@ -353,20 +356,20 @@ func TestExecDriver_DestroyKillsAll(t *testing.T) {
|
|||
return nil
|
||||
}
|
||||
|
||||
require.NoError(isProcessRunning(sleepPid))
|
||||
|
||||
require.NoError(harness.DestroyTask(task.ID, true))
|
||||
// task should be dead
|
||||
require.Error(isProcessRunning(taskState.Pid))
|
||||
|
||||
// all children should eventually be killed by OS
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
err := isProcessRunning(sleepPid)
|
||||
if err == nil {
|
||||
return false, fmt.Errorf("child process is still running")
|
||||
for _, cpid := range childPids {
|
||||
err := isProcessRunning(cpid)
|
||||
if err == nil {
|
||||
return false, fmt.Errorf("child process %d is still running", cpid)
|
||||
}
|
||||
if !strings.Contains(err.Error(), "failed to signal process") {
|
||||
return false, fmt.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "failed to signal process") {
|
||||
return false, fmt.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}, func(err error) {
|
||||
require.NoError(err)
|
||||
|
|
|
@ -8,14 +8,15 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
ctestutils "github.com/hashicorp/nomad/client/testutil"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func TestExecDriver_StartWaitStop(t *testing.T) {
|
||||
|
@ -44,6 +45,7 @@ func TestExecDriver_StartWaitStop(t *testing.T) {
|
|||
defer cleanup()
|
||||
|
||||
handle, _, err := harness.StartTask(task)
|
||||
defer harness.DestroyTask(task.ID, true)
|
||||
require.NoError(err)
|
||||
|
||||
ch, err := harness.WaitTask(context.Background(), handle.Config.ID)
|
||||
|
@ -52,12 +54,12 @@ func TestExecDriver_StartWaitStop(t *testing.T) {
|
|||
require.NoError(harness.WaitUntilStarted(task.ID, 1*time.Second))
|
||||
|
||||
go func() {
|
||||
harness.StopTask(task.ID, 2*time.Second, "SIGINT")
|
||||
harness.StopTask(task.ID, 2*time.Second, "SIGKILL")
|
||||
}()
|
||||
|
||||
select {
|
||||
case result := <-ch:
|
||||
require.Equal(int(unix.SIGINT), result.Signal)
|
||||
require.Equal(int(unix.SIGKILL), result.Signal)
|
||||
case <-time.After(10 * time.Second):
|
||||
require.Fail("timeout waiting for task to shutdown")
|
||||
}
|
||||
|
@ -77,8 +79,6 @@ func TestExecDriver_StartWaitStop(t *testing.T) {
|
|||
}, func(err error) {
|
||||
require.NoError(err)
|
||||
})
|
||||
|
||||
require.NoError(harness.DestroyTask(task.ID, true))
|
||||
}
|
||||
|
||||
func TestExec_ExecTaskStreaming(t *testing.T) {
|
||||
|
|
|
@ -581,6 +581,8 @@ func configureIsolation(cfg *lconfigs.Config, command *ExecCommand) error {
|
|||
// launch with mount namespace
|
||||
cfg.Namespaces = lconfigs.Namespaces{
|
||||
{Type: lconfigs.NEWNS},
|
||||
{Type: lconfigs.NEWPID},
|
||||
{Type: lconfigs.NEWIPC},
|
||||
}
|
||||
|
||||
if command.NetworkIsolation != nil {
|
||||
|
|
|
@ -107,35 +107,46 @@ func TestExecutor_IsolationAndConstraints(t *testing.T) {
|
|||
require.NoError(err)
|
||||
require.NotZero(ps.Pid)
|
||||
|
||||
state, err := executor.Wait(context.Background())
|
||||
estate, err := executor.Wait(context.Background())
|
||||
require.NoError(err)
|
||||
require.Zero(state.ExitCode)
|
||||
require.Zero(estate.ExitCode)
|
||||
|
||||
lexec, ok := executor.(*LibcontainerExecutor)
|
||||
require.True(ok)
|
||||
|
||||
// Check if the resource constraints were applied
|
||||
if lexec, ok := executor.(*LibcontainerExecutor); ok {
|
||||
state, err := lexec.container.State()
|
||||
require.NoError(err)
|
||||
state, err := lexec.container.State()
|
||||
require.NoError(err)
|
||||
|
||||
memLimits := filepath.Join(state.CgroupPaths["memory"], "memory.limit_in_bytes")
|
||||
data, err := ioutil.ReadFile(memLimits)
|
||||
require.NoError(err)
|
||||
memLimits := filepath.Join(state.CgroupPaths["memory"], "memory.limit_in_bytes")
|
||||
data, err := ioutil.ReadFile(memLimits)
|
||||
require.NoError(err)
|
||||
|
||||
expectedMemLim := strconv.Itoa(int(execCmd.Resources.NomadResources.Memory.MemoryMB * 1024 * 1024))
|
||||
actualMemLim := strings.TrimSpace(string(data))
|
||||
require.Equal(actualMemLim, expectedMemLim)
|
||||
require.NoError(executor.Shutdown("", 0))
|
||||
executor.Wait(context.Background())
|
||||
expectedMemLim := strconv.Itoa(int(execCmd.Resources.NomadResources.Memory.MemoryMB * 1024 * 1024))
|
||||
actualMemLim := strings.TrimSpace(string(data))
|
||||
require.Equal(actualMemLim, expectedMemLim)
|
||||
|
||||
// Check if Nomad has actually removed the cgroups
|
||||
tu.WaitForResult(func() (bool, error) {
|
||||
_, err = os.Stat(memLimits)
|
||||
if err == nil {
|
||||
return false, fmt.Errorf("expected an error from os.Stat %s", memLimits)
|
||||
}
|
||||
return true, nil
|
||||
}, func(err error) { t.Error(err) })
|
||||
// Check that namespaces were applied to the container config
|
||||
config := lexec.container.Config()
|
||||
require.NoError(err)
|
||||
|
||||
require.Contains(config.Namespaces, lconfigs.Namespace{Type: lconfigs.NEWNS})
|
||||
require.Contains(config.Namespaces, lconfigs.Namespace{Type: lconfigs.NEWPID})
|
||||
require.Contains(config.Namespaces, lconfigs.Namespace{Type: lconfigs.NEWIPC})
|
||||
|
||||
// Shut down executor
|
||||
require.NoError(executor.Shutdown("", 0))
|
||||
executor.Wait(context.Background())
|
||||
|
||||
// Check if Nomad has actually removed the cgroups
|
||||
tu.WaitForResult(func() (bool, error) {
|
||||
_, err = os.Stat(memLimits)
|
||||
if err == nil {
|
||||
return false, fmt.Errorf("expected an error from os.Stat %s", memLimits)
|
||||
}
|
||||
return true, nil
|
||||
}, func(err error) { t.Error(err) })
|
||||
|
||||
}
|
||||
expected := `/:
|
||||
alloc/
|
||||
bin/
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
_ "github.com/hashicorp/nomad/e2e/deployment"
|
||||
_ "github.com/hashicorp/nomad/e2e/events"
|
||||
_ "github.com/hashicorp/nomad/e2e/example"
|
||||
_ "github.com/hashicorp/nomad/e2e/isolation"
|
||||
_ "github.com/hashicorp/nomad/e2e/lifecycle"
|
||||
_ "github.com/hashicorp/nomad/e2e/metrics"
|
||||
_ "github.com/hashicorp/nomad/e2e/namespaces"
|
||||
|
|
|
@ -192,6 +192,15 @@ func AllocLogs(allocID string, logStream LogStream) (string, error) {
|
|||
return Command(cmd[0], cmd[1:]...)
|
||||
}
|
||||
|
||||
func AllocTaskLogs(allocID, task string, logStream LogStream) (string, error) {
|
||||
cmd := []string{"nomad", "alloc", "logs"}
|
||||
if logStream == LogsStdErr {
|
||||
cmd = append(cmd, "-stderr")
|
||||
}
|
||||
cmd = append(cmd, allocID, task)
|
||||
return Command(cmd[0], cmd[1:]...)
|
||||
}
|
||||
|
||||
// AllocExec is a convenience wrapper that runs 'nomad alloc exec' with the
|
||||
// passed execCmd via '/bin/sh -c', retrying if the task isn't ready
|
||||
func AllocExec(allocID, taskID, execCmd, ns string, wc *WaitConfig) (string, error) {
|
||||
|
|
|
@ -153,6 +153,24 @@ func WaitForAllocRunning(t *testing.T, nomadClient *api.Client, allocID string)
|
|||
})
|
||||
}
|
||||
|
||||
func WaitForAllocTaskRunning(t *testing.T, nomadClient *api.Client, allocID, task string) {
|
||||
testutil.WaitForResultRetries(retries, func() (bool, error) {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
alloc, _, err := nomadClient.Allocations().Info(allocID, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
state := "n/a"
|
||||
if task := alloc.TaskStates[task]; task != nil {
|
||||
state = task.State
|
||||
}
|
||||
return state == structs.AllocClientStatusRunning, fmt.Errorf("expected status running, but was: %s", state)
|
||||
}, func(err error) {
|
||||
t.Fatalf("failed to wait on alloc: %v", err)
|
||||
})
|
||||
}
|
||||
|
||||
func WaitForAllocsRunning(t *testing.T, nomadClient *api.Client, allocIDs []string) {
|
||||
for _, allocID := range allocIDs {
|
||||
WaitForAllocRunning(t, nomadClient, allocID)
|
||||
|
|
25
e2e/isolation/input/alloc_exec.nomad
Normal file
25
e2e/isolation/input/alloc_exec.nomad
Normal file
|
@ -0,0 +1,25 @@
|
|||
job "alloc_exec" {
|
||||
datacenters = ["dc1"]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
|
||||
group "main" {
|
||||
task "main" {
|
||||
driver = "exec"
|
||||
|
||||
config {
|
||||
command = "/bin/sleep"
|
||||
args = ["30s"]
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
39
e2e/isolation/input/alloc_exec_java.nomad
Normal file
39
e2e/isolation/input/alloc_exec_java.nomad
Normal file
|
@ -0,0 +1,39 @@
|
|||
job "java_sleep" {
|
||||
datacenters = ["dc1"]
|
||||
type = "batch"
|
||||
|
||||
group "java" {
|
||||
|
||||
task "build" {
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
|
||||
driver = "exec"
|
||||
config {
|
||||
command = "javac"
|
||||
args = ["-d", "${NOMAD_ALLOC_DIR}", "local/Sleep.java"]
|
||||
}
|
||||
|
||||
template {
|
||||
destination = "local/Sleep.java"
|
||||
data = <<EOH
|
||||
public class Sleep {
|
||||
public static void main(String... s) throws Exception {
|
||||
Thread.sleep(30000);
|
||||
}
|
||||
}
|
||||
EOH
|
||||
}
|
||||
}
|
||||
|
||||
task "sleep" {
|
||||
driver = "java"
|
||||
config {
|
||||
class_path = "${NOMAD_ALLOC_DIR}"
|
||||
class = "Sleep"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
38
e2e/isolation/input/exec.nomad
Normal file
38
e2e/isolation/input/exec.nomad
Normal file
|
@ -0,0 +1,38 @@
|
|||
job "exec" {
|
||||
datacenters = ["dc1"]
|
||||
type = "batch"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
|
||||
group "exec" {
|
||||
task "exec" {
|
||||
driver = "exec"
|
||||
|
||||
config {
|
||||
command = "bash"
|
||||
args = [
|
||||
"-c", "local/pid.sh"
|
||||
]
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
#!/usr/bin/env bash
|
||||
echo my pid is $BASHPID
|
||||
EOF
|
||||
|
||||
destination = "local/pid.sh"
|
||||
perms = "777"
|
||||
change_mode = "noop"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
39
e2e/isolation/input/java.nomad
Normal file
39
e2e/isolation/input/java.nomad
Normal file
|
@ -0,0 +1,39 @@
|
|||
job "java_pid" {
|
||||
datacenters = ["dc1"]
|
||||
type = "batch"
|
||||
|
||||
group "java" {
|
||||
|
||||
task "build" {
|
||||
lifecycle {
|
||||
hook = "prestart"
|
||||
sidecar = false
|
||||
}
|
||||
|
||||
driver = "exec"
|
||||
config {
|
||||
command = "javac"
|
||||
args = ["-d", "${NOMAD_ALLOC_DIR}", "local/Pid.java"]
|
||||
}
|
||||
|
||||
template {
|
||||
destination = "local/Pid.java"
|
||||
data = <<EOH
|
||||
public class Pid {
|
||||
public static void main(String... s) throws Exception {
|
||||
System.out.println("my pid is " + ProcessHandle.current().pid());
|
||||
}
|
||||
}
|
||||
EOH
|
||||
}
|
||||
}
|
||||
|
||||
task "pid" {
|
||||
driver = "java"
|
||||
config {
|
||||
class_path = "${NOMAD_ALLOC_DIR}"
|
||||
class = "Pid"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
38
e2e/isolation/input/raw_exec.nomad
Normal file
38
e2e/isolation/input/raw_exec.nomad
Normal file
|
@ -0,0 +1,38 @@
|
|||
job "raw_exec" {
|
||||
datacenters = ["dc1"]
|
||||
type = "batch"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.kernel.name}"
|
||||
value = "linux"
|
||||
}
|
||||
|
||||
group "raw_exec" {
|
||||
task "raw_exec" {
|
||||
driver = "raw_exec"
|
||||
|
||||
config {
|
||||
command = "bash"
|
||||
args = [
|
||||
"-c", "local/pid.sh"
|
||||
]
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
#!/usr/bin/env bash
|
||||
echo my pid is $BASHPID
|
||||
EOF
|
||||
|
||||
destination = "local/pid.sh"
|
||||
perms = "777"
|
||||
change_mode = "noop"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
241
e2e/isolation/isolation.go
Normal file
241
e2e/isolation/isolation.go
Normal file
|
@ -0,0 +1,241 @@
|
|||
package isolation
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||
"github.com/hashicorp/nomad/e2e/framework"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type IsolationTest struct {
|
||||
framework.TC
|
||||
|
||||
jobIDs []string
|
||||
}
|
||||
|
||||
func init() {
|
||||
framework.AddSuites(&framework.TestSuite{
|
||||
Component: "Isolation",
|
||||
CanRunLocal: true,
|
||||
Cases: []framework.TestCase{
|
||||
new(IsolationTest),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (tc *IsolationTest) BeforeAll(f *framework.F) {
|
||||
t := f.T()
|
||||
e2eutil.WaitForLeader(t, tc.Nomad())
|
||||
e2eutil.WaitForNodesReady(t, tc.Nomad(), 1)
|
||||
}
|
||||
|
||||
func (tc *IsolationTest) TestIsolation_ExecDriver_PIDNamespacing(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
clientNodes, err := e2eutil.ListLinuxClientNodes(tc.Nomad())
|
||||
require.Nil(t, err)
|
||||
|
||||
if len(clientNodes) == 0 {
|
||||
t.Skip("no Linux clients")
|
||||
}
|
||||
|
||||
uuid := uuid.Generate()
|
||||
jobID := "isolation-pid-namespace-" + uuid[0:8]
|
||||
file := "isolation/input/exec.nomad"
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), file, jobID, "")
|
||||
require.Equal(t, len(allocs), 1, fmt.Sprintf("failed to register %s", jobID))
|
||||
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
defer func() {
|
||||
tc.Nomad().Jobs().Deregister(jobID, true, nil)
|
||||
}()
|
||||
|
||||
allocID := allocs[0].ID
|
||||
e2eutil.WaitForAllocStopped(t, tc.Nomad(), allocID)
|
||||
|
||||
out, err := e2eutil.AllocLogs(allocID, e2eutil.LogsStdOut)
|
||||
require.NoError(t, err, fmt.Sprintf("could not get logs for alloc %s", allocID))
|
||||
|
||||
require.Contains(t, out, "my pid is 1\n")
|
||||
}
|
||||
|
||||
func (tc *IsolationTest) TestIsolation_ExecDriver_PIDNamespacing_AllocExec(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
clientNodes, err := e2eutil.ListLinuxClientNodes(tc.Nomad())
|
||||
require.Nil(t, err)
|
||||
|
||||
if len(clientNodes) == 0 {
|
||||
t.Skip("no Linux clients")
|
||||
}
|
||||
|
||||
uuid := uuid.Generate()
|
||||
jobID := "isolation-pid-namespace-" + uuid[0:8]
|
||||
file := "isolation/input/alloc_exec.nomad"
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), file, jobID, "")
|
||||
require.Equal(t, len(allocs), 1, fmt.Sprintf("failed to register %s", jobID))
|
||||
|
||||
defer func() {
|
||||
tc.Nomad().Jobs().Deregister(jobID, true, nil)
|
||||
}()
|
||||
|
||||
allocID := allocs[0].ID
|
||||
e2eutil.WaitForAllocRunning(t, tc.Nomad(), allocID)
|
||||
|
||||
alloc, _, err := tc.Nomad().Allocations().Info(allocID, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, alloc)
|
||||
|
||||
resizeCh := make(chan api.TerminalSize)
|
||||
var tty bool
|
||||
|
||||
ctx, cancelFn := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancelFn()
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
|
||||
exitCode, err := tc.Nomad().Allocations().Exec(
|
||||
ctx,
|
||||
alloc,
|
||||
"main",
|
||||
tty,
|
||||
[]string{"ps", "ax"},
|
||||
bytes.NewReader([]byte("")),
|
||||
&stdout,
|
||||
&stderr,
|
||||
resizeCh,
|
||||
nil,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, exitCode)
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(stdout.String()), "\n")
|
||||
// header, sleep process, ps ax process are the only output lines expected
|
||||
require.Len(t, lines, 3)
|
||||
}
|
||||
|
||||
func (tc *IsolationTest) TestIsolation_JavaDriver_PIDNamespacing(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
clientNodes, err := e2eutil.ListLinuxClientNodes(tc.Nomad())
|
||||
require.Nil(t, err)
|
||||
|
||||
if len(clientNodes) == 0 {
|
||||
t.Skip("no Linux clients")
|
||||
}
|
||||
|
||||
uuid := uuid.Generate()
|
||||
jobID := "isolation-pid-namespace-" + uuid[0:8]
|
||||
file := "isolation/input/java.nomad"
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), file, jobID, "")
|
||||
require.Equal(t, len(allocs), 1, fmt.Sprintf("failed to register %s", jobID))
|
||||
|
||||
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||
defer func() {
|
||||
tc.Nomad().Jobs().Deregister(jobID, true, nil)
|
||||
}()
|
||||
|
||||
allocID := allocs[0].ID
|
||||
e2eutil.WaitForAllocStopped(t, tc.Nomad(), allocID)
|
||||
|
||||
out, err := e2eutil.AllocTaskLogs(allocID, "pid", e2eutil.LogsStdOut)
|
||||
require.NoError(t, err, fmt.Sprintf("could not get logs for alloc %s", allocID))
|
||||
|
||||
require.Contains(t, out, "my pid is 1\n")
|
||||
}
|
||||
|
||||
func (tc *IsolationTest) TestIsolation_JavaDriver_PIDNamespacing_AllocExec(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
clientNodes, err := e2eutil.ListLinuxClientNodes(tc.Nomad())
|
||||
require.Nil(t, err)
|
||||
|
||||
if len(clientNodes) == 0 {
|
||||
t.Skip("no Linux clients")
|
||||
}
|
||||
|
||||
uuid := uuid.Generate()
|
||||
jobID := "isolation-pid-namespace-" + uuid[0:8]
|
||||
file := "isolation/input/alloc_exec_java.nomad"
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), file, jobID, "")
|
||||
require.Equal(t, len(allocs), 1, fmt.Sprintf("failed to register %s", jobID))
|
||||
|
||||
defer func() {
|
||||
tc.Nomad().Jobs().Deregister(jobID, true, nil)
|
||||
}()
|
||||
|
||||
allocID := allocs[0].ID
|
||||
e2eutil.WaitForAllocTaskRunning(t, tc.Nomad(), allocID, "sleep")
|
||||
|
||||
alloc, _, err := tc.Nomad().Allocations().Info(allocID, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, alloc)
|
||||
|
||||
resizeCh := make(chan api.TerminalSize)
|
||||
var tty bool
|
||||
|
||||
ctx, cancelFn := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancelFn()
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
|
||||
exitCode, err := tc.Nomad().Allocations().Exec(
|
||||
ctx,
|
||||
alloc,
|
||||
"sleep",
|
||||
tty,
|
||||
[]string{"ps", "ax"},
|
||||
bytes.NewReader([]byte("")),
|
||||
&stdout,
|
||||
&stderr,
|
||||
resizeCh,
|
||||
nil,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, exitCode)
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(stdout.String()), "\n")
|
||||
// header, sleep process, ps ax process are the only output lines expected
|
||||
require.Len(t, lines, 3)
|
||||
}
|
||||
|
||||
func (tc *IsolationTest) TestIsolation_RawExecDriver_NoPIDNamespacing(f *framework.F) {
|
||||
t := f.T()
|
||||
|
||||
clientNodes, err := e2eutil.ListLinuxClientNodes(tc.Nomad())
|
||||
require.Nil(t, err)
|
||||
|
||||
if len(clientNodes) == 0 {
|
||||
t.Skip("no Linux clients")
|
||||
}
|
||||
|
||||
uuid := uuid.Generate()
|
||||
jobID := "isolation-pid-namespace-" + uuid[0:8]
|
||||
file := "isolation/input/raw_exec.nomad"
|
||||
|
||||
allocs := e2eutil.RegisterAndWaitForAllocs(t, tc.Nomad(), file, jobID, "")
|
||||
require.Equal(t, len(allocs), 1, fmt.Sprintf("failed to register %s", jobID))
|
||||
|
||||
defer func() {
|
||||
tc.Nomad().Jobs().Deregister(jobID, true, nil)
|
||||
}()
|
||||
|
||||
allocID := allocs[0].ID
|
||||
e2eutil.WaitForAllocStopped(t, tc.Nomad(), allocID)
|
||||
|
||||
out, err := e2eutil.AllocLogs(allocID, e2eutil.LogsStdOut)
|
||||
require.NoError(t, err, fmt.Sprintf("could not get logs for alloc %s", allocID))
|
||||
|
||||
var pid uint64
|
||||
_, err = fmt.Sscanf(out, "my pid is %d", &pid)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Greater(t, pid, uint64(1))
|
||||
}
|
150618
nomad/structs/structs.generated.go
Normal file
150618
nomad/structs/structs.generated.go
Normal file
File diff suppressed because it is too large
Load diff
|
@ -16,7 +16,7 @@ var (
|
|||
// A pre-release marker for the version. If this is "" (empty string)
|
||||
// then it means that it is a final release. Otherwise, this is a pre-release
|
||||
// such as "dev" (in development), "beta", "rc1", etc.
|
||||
VersionPrerelease = "dev"
|
||||
VersionPrerelease = ""
|
||||
|
||||
// VersionMetadata is metadata further describing the build type.
|
||||
VersionMetadata = ""
|
||||
|
|
Loading…
Reference in a new issue