open-nomad/drivers/exec/driver_unix_test.go
Seth Hoenig 2e5c6de820 client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.

Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.

Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.

When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.

Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.

The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.

[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html

Closes #11289
Fixes #11705 #11773 #11933
2022-03-23 11:35:27 -05:00

322 lines
7.8 KiB
Go

//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
package exec
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/lib/cgutil"
ctestutils "github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/drivers/shared/capabilities"
"github.com/hashicorp/nomad/drivers/shared/executor"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/helper/uuid"
basePlug "github.com/hashicorp/nomad/plugins/base"
"github.com/hashicorp/nomad/plugins/drivers"
dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
"github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/require"
"golang.org/x/sys/unix"
)
func TestExecDriver_StartWaitStop(t *testing.T) {
ci.Parallel(t)
ctestutils.ExecCompatible(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
d := NewExecDriver(ctx, testlog.HCLogger(t))
harness := dtestutil.NewDriverHarness(t, d)
allocID := uuid.Generate()
task := &drivers.TaskConfig{
AllocID: allocID,
ID: uuid.Generate(),
Name: "test",
Resources: testResources(allocID, "test"),
}
taskConfig := map[string]interface{}{
"command": "/bin/sleep",
"args": []string{"600"},
}
require.NoError(t, task.EncodeConcreteDriverConfig(&taskConfig))
cleanup := harness.MkAllocDir(task, false)
defer cleanup()
handle, _, err := harness.StartTask(task)
defer harness.DestroyTask(task.ID, true)
require.NoError(t, err)
ch, err := harness.WaitTask(context.Background(), handle.Config.ID)
require.NoError(t, err)
require.NoError(t, harness.WaitUntilStarted(task.ID, 1*time.Second))
go func() {
harness.StopTask(task.ID, 2*time.Second, "SIGKILL")
}()
select {
case result := <-ch:
require.Equal(t, int(unix.SIGKILL), result.Signal)
case <-time.After(10 * time.Second):
require.Fail(t, "timeout waiting for task to shutdown")
}
// Ensure that the task is marked as dead, but account
// for WaitTask() closing channel before internal state is updated
testutil.WaitForResult(func() (bool, error) {
status, err := harness.InspectTask(task.ID)
if err != nil {
return false, fmt.Errorf("inspecting task failed: %v", err)
}
if status.State != drivers.TaskStateExited {
return false, fmt.Errorf("task hasn't exited yet; status: %v", status.State)
}
return true, nil
}, func(err error) {
require.NoError(t, err)
})
}
func TestExec_ExecTaskStreaming(t *testing.T) {
ci.Parallel(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
d := NewExecDriver(ctx, testlog.HCLogger(t))
harness := dtestutil.NewDriverHarness(t, d)
defer harness.Kill()
task := &drivers.TaskConfig{
ID: uuid.Generate(),
Name: "sleep",
}
if cgutil.UseV2 {
allocID := uuid.Generate()
task.AllocID = allocID
task.Resources = testResources(allocID, "sleep")
}
cleanup := harness.MkAllocDir(task, false)
defer cleanup()
tc := &TaskConfig{
Command: "/bin/sleep",
Args: []string{"9000"},
}
require.NoError(t, task.EncodeConcreteDriverConfig(&tc))
_, _, err := harness.StartTask(task)
require.NoError(t, err)
defer d.DestroyTask(task.ID, true)
dtestutil.ExecTaskStreamingConformanceTests(t, harness, task.ID)
}
// Tests that a given DNSConfig properly configures dns
func TestExec_dnsConfig(t *testing.T) {
ci.Parallel(t)
ctestutils.RequireRoot(t)
ctestutils.ExecCompatible(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
d := NewExecDriver(ctx, testlog.HCLogger(t))
harness := dtestutil.NewDriverHarness(t, d)
defer harness.Kill()
cases := []struct {
name string
cfg *drivers.DNSConfig
}{
{
name: "nil DNSConfig",
},
{
name: "basic",
cfg: &drivers.DNSConfig{
Servers: []string{"1.1.1.1", "1.0.0.1"},
},
},
{
name: "full",
cfg: &drivers.DNSConfig{
Servers: []string{"1.1.1.1", "1.0.0.1"},
Searches: []string{"local.test", "node.consul"},
Options: []string{"ndots:2", "edns0"},
},
},
}
for _, c := range cases {
task := &drivers.TaskConfig{
ID: uuid.Generate(),
Name: "sleep",
DNS: c.cfg,
}
if cgutil.UseV2 {
allocID := uuid.Generate()
task.Resources = testResources(allocID, "sleep")
}
cleanup := harness.MkAllocDir(task, false)
defer cleanup()
tc := &TaskConfig{
Command: "/bin/sleep",
Args: []string{"9000"},
}
require.NoError(t, task.EncodeConcreteDriverConfig(&tc))
_, _, err := harness.StartTask(task)
require.NoError(t, err)
defer d.DestroyTask(task.ID, true)
dtestutil.TestTaskDNSConfig(t, harness, task.ID, c.cfg)
}
}
func TestExecDriver_Capabilities(t *testing.T) {
ci.Parallel(t)
ctestutils.ExecCompatible(t)
task := &drivers.TaskConfig{
ID: uuid.Generate(),
Name: "sleep",
}
if cgutil.UseV2 {
allocID := uuid.Generate()
task.AllocID = allocID
task.Resources = testResources(allocID, "sleep")
}
for _, tc := range []struct {
Name string
CapAdd []string
CapDrop []string
AllowList string
StartError string
}{
{
Name: "default-allowlist-add-allowed",
CapAdd: []string{"fowner", "mknod"},
CapDrop: []string{"ALL"},
},
{
Name: "default-allowlist-add-forbidden",
CapAdd: []string{"net_admin"},
StartError: "net_admin",
},
{
Name: "default-allowlist-drop-existing",
CapDrop: []string{"FOWNER", "MKNOD", "NET_RAW"},
},
{
Name: "restrictive-allowlist-drop-all",
CapDrop: []string{"ALL"},
AllowList: "FOWNER,MKNOD",
},
{
Name: "restrictive-allowlist-add-allowed",
CapAdd: []string{"fowner", "mknod"},
CapDrop: []string{"ALL"},
AllowList: "fowner,mknod",
},
{
Name: "restrictive-allowlist-add-forbidden",
CapAdd: []string{"net_admin", "mknod"},
CapDrop: []string{"ALL"},
AllowList: "fowner,mknod",
StartError: "net_admin",
},
{
Name: "restrictive-allowlist-add-multiple-forbidden",
CapAdd: []string{"net_admin", "mknod", "CAP_SYS_TIME"},
CapDrop: []string{"ALL"},
AllowList: "fowner,mknod",
StartError: "net_admin, sys_time",
},
{
Name: "permissive-allowlist",
CapAdd: []string{"net_admin", "mknod"},
AllowList: "ALL",
},
{
Name: "permissive-allowlist-add-all",
CapAdd: []string{"all"},
AllowList: "ALL",
},
} {
t.Run(tc.Name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
d := NewExecDriver(ctx, testlog.HCLogger(t))
harness := dtestutil.NewDriverHarness(t, d)
defer harness.Kill()
config := &Config{
NoPivotRoot: true,
DefaultModePID: executor.IsolationModePrivate,
DefaultModeIPC: executor.IsolationModePrivate,
}
if tc.AllowList != "" {
config.AllowCaps = strings.Split(tc.AllowList, ",")
} else {
// inherit HCL defaults if not set
config.AllowCaps = capabilities.NomadDefaults().Slice(true)
}
var data []byte
require.NoError(t, basePlug.MsgPackEncode(&data, config))
baseConfig := &basePlug.Config{PluginConfig: data}
require.NoError(t, harness.SetConfig(baseConfig))
cleanup := harness.MkAllocDir(task, false)
defer cleanup()
tCfg := &TaskConfig{
Command: "/bin/sleep",
Args: []string{"9000"},
}
if len(tc.CapAdd) > 0 {
tCfg.CapAdd = tc.CapAdd
}
if len(tc.CapDrop) > 0 {
tCfg.CapDrop = tc.CapDrop
}
require.NoError(t, task.EncodeConcreteDriverConfig(&tCfg))
// check the start error against expectations
_, _, err := harness.StartTask(task)
if err == nil && tc.StartError != "" {
t.Fatalf("Expected error in start: %v", tc.StartError)
} else if err != nil {
if tc.StartError == "" {
require.NoError(t, err)
} else {
require.Contains(t, err.Error(), tc.StartError)
}
return
}
_ = d.DestroyTask(task.ID, true)
})
}
}