4767d44b94
* Fix DevicesSets being removed when cpusets are reloaded with cgroup v2 This meant that if any allocation was created or removed, all active DevicesSets were removed from all cgroups of all tasks. This was most noticeable with "exec" and "raw_exec", as it meant they no longer had access to /dev files. * e2e: add test for verifying cgroups do not interfere with access to devices --------- Co-authored-by: Seth Hoenig <shoenig@duck.com>
55 lines
1.3 KiB
Go
55 lines
1.3 KiB
Go
// Copyright (c) HashiCorp, Inc.
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
package isolation
|
|
|
|
import (
|
|
"testing"
|
|
|
|
"github.com/hashicorp/nomad/e2e/e2eutil"
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
|
"github.com/shoenig/test/must"
|
|
)
|
|
|
|
func TestCgroupDevices(t *testing.T) {
|
|
nomad := e2eutil.NomadClient(t)
|
|
|
|
e2eutil.WaitForLeader(t, nomad)
|
|
e2eutil.WaitForNodesReady(t, nomad, 1)
|
|
|
|
t.Run("testDevicesUsable", testDevicesUsable)
|
|
}
|
|
|
|
func testDevicesUsable(t *testing.T) {
|
|
nomad := e2eutil.NomadClient(t)
|
|
|
|
jobID := "cgroup-devices-" + uuid.Short()
|
|
jobIDs := []string{jobID}
|
|
t.Cleanup(e2eutil.CleanupJobsAndGC(t, &jobIDs))
|
|
|
|
// start job
|
|
allocs := e2eutil.RegisterAndWaitForAllocs(t, nomad, "./input/cgroup_devices.hcl", jobID, "")
|
|
must.Len(t, 2, allocs)
|
|
|
|
// pick one to stop and one to verify
|
|
allocA := allocs[0].ID
|
|
allocB := allocs[1].ID
|
|
|
|
// verify devices are working
|
|
checkDev(t, allocA)
|
|
checkDev(t, allocB)
|
|
|
|
// stop the chosen alloc
|
|
_, err := e2eutil.Command("nomad", "alloc", "stop", "-detach", allocA)
|
|
must.NoError(t, err)
|
|
e2eutil.WaitForAllocStopped(t, nomad, allocA)
|
|
|
|
// verify device of remaining alloc
|
|
checkDev(t, allocB)
|
|
}
|
|
|
|
func checkDev(t *testing.T, allocID string) {
|
|
_, err := e2eutil.Command("nomad", "alloc", "exec", allocID, "dd", "if=/dev/zero", "of=/dev/null", "count=1")
|
|
must.NoError(t, err)
|
|
}
|