2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2018-11-27 19:03:58 +00:00
|
|
|
package testutils
|
2018-09-26 17:33:37 +00:00
|
|
|
|
|
|
|
import (
|
2018-11-13 01:09:27 +00:00
|
|
|
"context"
|
2018-09-26 05:18:03 +00:00
|
|
|
"fmt"
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
"os"
|
2018-09-26 17:33:37 +00:00
|
|
|
"path/filepath"
|
2018-09-26 05:18:03 +00:00
|
|
|
"runtime"
|
2018-09-26 17:33:37 +00:00
|
|
|
"time"
|
|
|
|
|
2018-09-26 05:18:03 +00:00
|
|
|
hclog "github.com/hashicorp/go-hclog"
|
2018-09-26 17:33:37 +00:00
|
|
|
plugin "github.com/hashicorp/go-plugin"
|
2022-04-19 14:13:38 +00:00
|
|
|
"github.com/hashicorp/nomad/ci"
|
2018-09-26 05:18:03 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocdir"
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
"github.com/hashicorp/nomad/client/lib/cgutil"
|
2018-09-26 05:18:03 +00:00
|
|
|
"github.com/hashicorp/nomad/client/logmon"
|
2018-11-30 11:18:39 +00:00
|
|
|
"github.com/hashicorp/nomad/client/taskenv"
|
2018-09-26 17:33:37 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/testlog"
|
2018-09-26 05:18:03 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2018-11-09 04:38:47 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2018-09-26 17:33:37 +00:00
|
|
|
"github.com/hashicorp/nomad/plugins/base"
|
2018-11-27 19:03:58 +00:00
|
|
|
"github.com/hashicorp/nomad/plugins/drivers"
|
2018-09-26 17:33:37 +00:00
|
|
|
"github.com/hashicorp/nomad/plugins/shared/hclspec"
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
testing "github.com/mitchellh/go-testing-interface"
|
2018-11-13 01:09:27 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2018-09-26 17:33:37 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type DriverHarness struct {
|
2018-11-27 19:03:58 +00:00
|
|
|
drivers.DriverPlugin
|
2018-09-26 17:33:37 +00:00
|
|
|
client *plugin.GRPCClient
|
|
|
|
server *plugin.GRPCServer
|
|
|
|
t testing.T
|
2018-09-26 05:18:03 +00:00
|
|
|
logger hclog.Logger
|
2018-11-27 19:03:58 +00:00
|
|
|
impl drivers.DriverPlugin
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
cgroup string
|
2018-09-26 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2021-12-20 10:44:21 +00:00
|
|
|
func (h *DriverHarness) Impl() drivers.DriverPlugin {
|
|
|
|
return h.impl
|
2018-11-09 04:38:47 +00:00
|
|
|
}
|
2018-11-27 19:03:58 +00:00
|
|
|
func NewDriverHarness(t testing.T, d drivers.DriverPlugin) *DriverHarness {
|
2018-09-26 05:18:03 +00:00
|
|
|
logger := testlog.HCLogger(t).Named("driver_harness")
|
2019-01-25 14:38:41 +00:00
|
|
|
pd := drivers.NewDriverPlugin(d, logger)
|
2018-11-27 19:03:58 +00:00
|
|
|
|
2018-09-26 05:18:03 +00:00
|
|
|
client, server := plugin.TestPluginGRPCConn(t,
|
|
|
|
map[string]plugin.Plugin{
|
2018-11-27 19:03:58 +00:00
|
|
|
base.PluginTypeDriver: pd,
|
|
|
|
base.PluginTypeBase: &base.PluginBase{Impl: d},
|
|
|
|
"logmon": logmon.NewPlugin(logmon.NewLogMon(logger.Named("logmon"))),
|
2018-09-26 17:33:37 +00:00
|
|
|
},
|
2018-09-26 05:18:03 +00:00
|
|
|
)
|
2018-09-26 17:33:37 +00:00
|
|
|
|
|
|
|
raw, err := client.Dispense(base.PluginTypeDriver)
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
require.NoError(t, err, "failed to dispense plugin")
|
2018-09-26 17:33:37 +00:00
|
|
|
|
2018-11-27 19:03:58 +00:00
|
|
|
dClient := raw.(drivers.DriverPlugin)
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
return &DriverHarness{
|
2018-09-26 17:33:37 +00:00
|
|
|
client: client,
|
|
|
|
server: server,
|
|
|
|
DriverPlugin: dClient,
|
2018-09-26 05:18:03 +00:00
|
|
|
logger: logger,
|
2018-10-16 02:37:58 +00:00
|
|
|
t: t,
|
2018-11-09 04:38:47 +00:00
|
|
|
impl: d,
|
2018-09-26 17:33:37 +00:00
|
|
|
}
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
}
|
2018-09-26 17:33:37 +00:00
|
|
|
|
2022-03-29 00:33:01 +00:00
|
|
|
// setupCgroupV2 creates a v2 cgroup for the task, as if a Client were initialized
|
|
|
|
// and managing the cgroup as it normally would via the cpuset manager.
|
|
|
|
//
|
|
|
|
// Note that we are being lazy and trying to avoid importing cgutil because
|
|
|
|
// currently plugins/drivers/testutils is platform agnostic-ish.
|
|
|
|
//
|
|
|
|
// Some drivers (raw_exec) setup their own cgroup, while others (exec, java, docker)
|
|
|
|
// would otherwise depend on the Nomad cpuset manager (and docker daemon) to create
|
|
|
|
// one, which isn't available here in testing, and so we create one via the harness.
|
|
|
|
// Plumbing such metadata through to the harness is a mind bender, so we just always
|
|
|
|
// create the cgroup, but at least put it under 'testing.slice'.
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
//
|
2022-03-29 00:33:01 +00:00
|
|
|
// tl;dr raw_exec tests should ignore this cgroup.
|
|
|
|
func (h *DriverHarness) setupCgroupV2(allocID, task string) {
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
if cgutil.UseV2 {
|
|
|
|
h.cgroup = filepath.Join(cgutil.CgroupRoot, "testing.slice", cgutil.CgroupScope(allocID, task))
|
2022-03-29 00:33:01 +00:00
|
|
|
h.logger.Trace("create cgroup for test", "parent", "testing.slice", "id", allocID, "task", task, "path", h.cgroup)
|
|
|
|
if err := os.MkdirAll(h.cgroup, 0755); err != nil {
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
}
|
2018-09-26 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (h *DriverHarness) Kill() {
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
_ = h.client.Close()
|
2018-09-26 17:33:37 +00:00
|
|
|
h.server.Stop()
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
h.cleanupCgroup()
|
|
|
|
}
|
|
|
|
|
2022-03-29 00:33:01 +00:00
|
|
|
// cleanupCgroup might cleanup a cgroup that may or may not be tricked by DriverHarness.
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
func (h *DriverHarness) cleanupCgroup() {
|
2022-03-29 00:33:01 +00:00
|
|
|
// some [non-exec] tests don't bother with MkAllocDir which is what would create
|
|
|
|
// the cgroup, but then do call Kill, so in that case skip the cgroup cleanup
|
|
|
|
if cgutil.UseV2 && h.cgroup != "" {
|
|
|
|
if err := os.Remove(h.cgroup); err != nil && !os.IsNotExist(err) {
|
|
|
|
// in some cases the driver will cleanup the cgroup itself, in which
|
|
|
|
// case we do not care about the cgroup not existing at cleanup time
|
|
|
|
h.t.Fatalf("failed to cleanup cgroup: %v", err)
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
}
|
|
|
|
}
|
2018-09-26 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
2019-05-29 22:38:43 +00:00
|
|
|
// MkAllocDir creates a temporary directory and allocdir structure.
|
2018-09-26 05:18:03 +00:00
|
|
|
// If enableLogs is set to true a logmon instance will be started to write logs
|
|
|
|
// to the LogDir of the task
|
2020-04-20 13:28:19 +00:00
|
|
|
// A cleanup func is returned and should be deferred so as to not leak dirs
|
2018-09-26 17:33:37 +00:00
|
|
|
// between tests.
|
2018-11-27 19:03:58 +00:00
|
|
|
func (h *DriverHarness) MkAllocDir(t *drivers.TaskConfig, enableLogs bool) func() {
|
2023-03-08 16:25:09 +00:00
|
|
|
dir, err := os.MkdirTemp("", "nomad_driver_harness-")
|
2018-09-26 17:33:37 +00:00
|
|
|
require.NoError(h.t, err)
|
2018-09-26 05:18:03 +00:00
|
|
|
|
2021-10-15 23:56:14 +00:00
|
|
|
allocDir := allocdir.NewAllocDir(h.logger, dir, t.AllocID)
|
2018-09-26 05:18:03 +00:00
|
|
|
require.NoError(h.t, allocDir.Build())
|
2021-10-18 17:32:41 +00:00
|
|
|
|
|
|
|
t.AllocDir = allocDir.AllocDir
|
|
|
|
|
2018-09-26 05:18:03 +00:00
|
|
|
taskDir := allocDir.NewTaskDir(t.Name)
|
2018-10-16 02:37:58 +00:00
|
|
|
|
|
|
|
caps, err := h.Capabilities()
|
|
|
|
require.NoError(h.t, err)
|
|
|
|
|
|
|
|
fsi := caps.FSIsolation
|
2022-03-29 00:33:01 +00:00
|
|
|
h.logger.Trace("FS isolation", "fsi", fsi)
|
2022-04-19 14:13:38 +00:00
|
|
|
require.NoError(h.t, taskDir.Build(fsi == drivers.FSIsolationChroot, ci.TinyChroot))
|
2018-09-26 05:18:03 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &structs.Task{
|
2018-11-27 03:42:45 +00:00
|
|
|
Name: t.Name,
|
|
|
|
Env: t.Env,
|
|
|
|
}
|
2018-12-13 23:06:48 +00:00
|
|
|
|
|
|
|
// Create the mock allocation
|
|
|
|
alloc := mock.Alloc()
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
alloc.ID = t.AllocID
|
2018-11-27 03:42:45 +00:00
|
|
|
if t.Resources != nil {
|
2018-12-13 23:06:48 +00:00
|
|
|
alloc.AllocatedResources.Tasks[task.Name] = t.Resources.NomadResources
|
2018-11-09 04:38:47 +00:00
|
|
|
}
|
2018-12-13 23:06:48 +00:00
|
|
|
|
|
|
|
taskBuilder := taskenv.NewBuilder(mock.Node(), alloc, task, "global")
|
2023-05-12 17:29:44 +00:00
|
|
|
SetEnvvars(taskBuilder, fsi, taskDir)
|
2018-11-09 04:38:47 +00:00
|
|
|
|
|
|
|
taskEnv := taskBuilder.Build()
|
|
|
|
if t.Env == nil {
|
2018-11-28 22:19:48 +00:00
|
|
|
t.Env = taskEnv.Map()
|
2018-11-09 04:38:47 +00:00
|
|
|
} else {
|
2018-11-28 22:19:48 +00:00
|
|
|
for k, v := range taskEnv.Map() {
|
2018-11-09 04:38:47 +00:00
|
|
|
if _, ok := t.Env[k]; !ok {
|
|
|
|
t.Env[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-29 00:33:01 +00:00
|
|
|
// setup a v2 cgroup for test cases that assume one exists
|
|
|
|
h.setupCgroupV2(alloc.ID, task.Name)
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
|
2018-09-26 05:18:03 +00:00
|
|
|
//logmon
|
|
|
|
if enableLogs {
|
2019-03-21 01:14:08 +00:00
|
|
|
lm := logmon.NewLogMon(h.logger.Named("logmon"))
|
2018-09-26 05:18:03 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
id := uuid.Generate()[:8]
|
2018-09-26 19:32:26 +00:00
|
|
|
t.StdoutPath = fmt.Sprintf("//./pipe/%s-%s.stdout", t.Name, id)
|
|
|
|
t.StderrPath = fmt.Sprintf("//./pipe/%s-%s.stderr", t.Name, id)
|
2018-09-26 05:18:03 +00:00
|
|
|
} else {
|
2018-09-26 19:32:26 +00:00
|
|
|
t.StdoutPath = filepath.Join(taskDir.LogDir, fmt.Sprintf(".%s.stdout.fifo", t.Name))
|
|
|
|
t.StderrPath = filepath.Join(taskDir.LogDir, fmt.Sprintf(".%s.stderr.fifo", t.Name))
|
2018-09-26 05:18:03 +00:00
|
|
|
}
|
2019-03-21 01:14:08 +00:00
|
|
|
err = lm.Start(&logmon.LogConfig{
|
2018-09-26 05:18:03 +00:00
|
|
|
LogDir: taskDir.LogDir,
|
|
|
|
StdoutLogFile: fmt.Sprintf("%s.stdout", t.Name),
|
|
|
|
StderrLogFile: fmt.Sprintf("%s.stderr", t.Name),
|
2018-09-26 19:32:26 +00:00
|
|
|
StdoutFifo: t.StdoutPath,
|
|
|
|
StderrFifo: t.StderrPath,
|
2018-09-26 05:18:03 +00:00
|
|
|
MaxFiles: 10,
|
|
|
|
MaxFileSizeMB: 10,
|
|
|
|
})
|
|
|
|
require.NoError(h.t, err)
|
|
|
|
|
|
|
|
return func() {
|
2019-03-21 01:14:08 +00:00
|
|
|
lm.Stop()
|
2018-12-07 19:03:13 +00:00
|
|
|
h.client.Close()
|
2018-09-26 05:18:03 +00:00
|
|
|
allocDir.Destroy()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return func() {
|
2018-12-07 19:03:13 +00:00
|
|
|
h.client.Close()
|
2018-09-26 05:18:03 +00:00
|
|
|
allocDir.Destroy()
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
h.cleanupCgroup()
|
2018-09-26 05:18:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// WaitUntilStarted will block until the task for the given ID is in the running
|
|
|
|
// state or the timeout is reached
|
|
|
|
func (h *DriverHarness) WaitUntilStarted(taskID string, timeout time.Duration) error {
|
|
|
|
deadline := time.Now().Add(timeout)
|
2018-11-27 19:03:58 +00:00
|
|
|
var lastState drivers.TaskState
|
2018-09-26 05:18:03 +00:00
|
|
|
for {
|
|
|
|
status, err := h.InspectTask(taskID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-11-27 19:03:58 +00:00
|
|
|
if status.State == drivers.TaskStateRunning {
|
2018-09-26 05:18:03 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
lastState = status.State
|
|
|
|
if time.Now().After(deadline) {
|
|
|
|
return fmt.Errorf("task never transitioned to running, currently '%s'", lastState)
|
|
|
|
}
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
}
|
2018-09-26 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// MockDriver is used for testing.
|
|
|
|
// Each function can be set as a closure to make assertions about how data
|
|
|
|
// is passed through the base plugin layer.
|
|
|
|
type MockDriver struct {
|
|
|
|
base.MockPlugin
|
2019-04-28 21:12:53 +00:00
|
|
|
TaskConfigSchemaF func() (*hclspec.Spec, error)
|
|
|
|
FingerprintF func(context.Context) (<-chan *drivers.Fingerprint, error)
|
|
|
|
CapabilitiesF func() (*drivers.Capabilities, error)
|
|
|
|
RecoverTaskF func(*drivers.TaskHandle) error
|
|
|
|
StartTaskF func(*drivers.TaskConfig) (*drivers.TaskHandle, *drivers.DriverNetwork, error)
|
|
|
|
WaitTaskF func(context.Context, string) (<-chan *drivers.ExitResult, error)
|
|
|
|
StopTaskF func(string, time.Duration, string) error
|
|
|
|
DestroyTaskF func(string, bool) error
|
|
|
|
InspectTaskF func(string) (*drivers.TaskStatus, error)
|
|
|
|
TaskStatsF func(context.Context, string, time.Duration) (<-chan *drivers.TaskResourceUsage, error)
|
|
|
|
TaskEventsF func(context.Context) (<-chan *drivers.TaskEvent, error)
|
|
|
|
SignalTaskF func(string, string) error
|
|
|
|
ExecTaskF func(string, []string, time.Duration) (*drivers.ExecTaskResult, error)
|
|
|
|
ExecTaskStreamingF func(context.Context, string, *drivers.ExecOptions) (*drivers.ExitResult, error)
|
2019-05-08 14:30:10 +00:00
|
|
|
MockNetworkManager
|
|
|
|
}
|
|
|
|
|
|
|
|
type MockNetworkManager struct {
|
2021-09-16 06:13:09 +00:00
|
|
|
CreateNetworkF func(string, *drivers.NetworkCreateRequest) (*drivers.NetworkIsolationSpec, bool, error)
|
2019-05-08 14:30:10 +00:00
|
|
|
DestroyNetworkF func(string, *drivers.NetworkIsolationSpec) error
|
|
|
|
}
|
|
|
|
|
2021-09-16 06:13:09 +00:00
|
|
|
func (m *MockNetworkManager) CreateNetwork(allocID string, req *drivers.NetworkCreateRequest) (*drivers.NetworkIsolationSpec, bool, error) {
|
|
|
|
return m.CreateNetworkF(allocID, req)
|
2019-05-08 14:30:10 +00:00
|
|
|
}
|
|
|
|
func (m *MockNetworkManager) DestroyNetwork(id string, spec *drivers.NetworkIsolationSpec) error {
|
|
|
|
return m.DestroyNetworkF(id, spec)
|
2018-09-26 17:33:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (d *MockDriver) TaskConfigSchema() (*hclspec.Spec, error) { return d.TaskConfigSchemaF() }
|
2018-11-27 19:03:58 +00:00
|
|
|
func (d *MockDriver) Fingerprint(ctx context.Context) (<-chan *drivers.Fingerprint, error) {
|
2018-09-26 17:33:37 +00:00
|
|
|
return d.FingerprintF(ctx)
|
|
|
|
}
|
2018-11-27 19:03:58 +00:00
|
|
|
func (d *MockDriver) Capabilities() (*drivers.Capabilities, error) { return d.CapabilitiesF() }
|
|
|
|
func (d *MockDriver) RecoverTask(h *drivers.TaskHandle) error { return d.RecoverTaskF(h) }
|
2019-01-04 23:01:35 +00:00
|
|
|
func (d *MockDriver) StartTask(c *drivers.TaskConfig) (*drivers.TaskHandle, *drivers.DriverNetwork, error) {
|
2018-10-04 19:08:20 +00:00
|
|
|
return d.StartTaskF(c)
|
|
|
|
}
|
2018-11-27 19:03:58 +00:00
|
|
|
func (d *MockDriver) WaitTask(ctx context.Context, id string) (<-chan *drivers.ExitResult, error) {
|
2018-09-26 17:33:37 +00:00
|
|
|
return d.WaitTaskF(ctx, id)
|
|
|
|
}
|
|
|
|
func (d *MockDriver) StopTask(taskID string, timeout time.Duration, signal string) error {
|
|
|
|
return d.StopTaskF(taskID, timeout, signal)
|
|
|
|
}
|
|
|
|
func (d *MockDriver) DestroyTask(taskID string, force bool) error {
|
|
|
|
return d.DestroyTaskF(taskID, force)
|
|
|
|
}
|
2018-11-27 19:03:58 +00:00
|
|
|
func (d *MockDriver) InspectTask(taskID string) (*drivers.TaskStatus, error) {
|
|
|
|
return d.InspectTaskF(taskID)
|
|
|
|
}
|
2018-12-11 20:27:50 +00:00
|
|
|
func (d *MockDriver) TaskStats(ctx context.Context, taskID string, i time.Duration) (<-chan *drivers.TaskResourceUsage, error) {
|
2020-12-10 15:29:18 +00:00
|
|
|
return d.TaskStatsF(ctx, taskID, i)
|
2018-10-04 19:08:20 +00:00
|
|
|
}
|
2018-11-27 19:03:58 +00:00
|
|
|
func (d *MockDriver) TaskEvents(ctx context.Context) (<-chan *drivers.TaskEvent, error) {
|
2018-09-26 17:33:37 +00:00
|
|
|
return d.TaskEventsF(ctx)
|
|
|
|
}
|
|
|
|
func (d *MockDriver) SignalTask(taskID string, signal string) error {
|
2020-12-10 15:29:18 +00:00
|
|
|
return d.SignalTaskF(taskID, signal)
|
2018-09-26 17:33:37 +00:00
|
|
|
}
|
2018-11-27 19:03:58 +00:00
|
|
|
func (d *MockDriver) ExecTask(taskID string, cmd []string, timeout time.Duration) (*drivers.ExecTaskResult, error) {
|
2018-09-26 17:33:37 +00:00
|
|
|
return d.ExecTaskF(taskID, cmd, timeout)
|
|
|
|
}
|
2019-01-04 23:33:27 +00:00
|
|
|
|
2019-04-28 21:12:53 +00:00
|
|
|
func (d *MockDriver) ExecTaskStreaming(ctx context.Context, taskID string, execOpts *drivers.ExecOptions) (*drivers.ExitResult, error) {
|
|
|
|
return d.ExecTaskStreamingF(ctx, taskID, execOpts)
|
|
|
|
}
|
|
|
|
|
2019-01-04 23:33:27 +00:00
|
|
|
// SetEnvvars sets path and host env vars depending on the FS isolation used.
|
2023-05-12 17:29:44 +00:00
|
|
|
func SetEnvvars(envBuilder *taskenv.Builder, fsi drivers.FSIsolation, taskDir *allocdir.TaskDir) {
|
2020-12-14 17:56:34 +00:00
|
|
|
|
|
|
|
envBuilder.SetClientTaskRoot(taskDir.Dir)
|
|
|
|
envBuilder.SetClientSharedAllocDir(taskDir.SharedAllocDir)
|
|
|
|
envBuilder.SetClientTaskLocalDir(taskDir.LocalDir)
|
|
|
|
envBuilder.SetClientTaskSecretsDir(taskDir.SecretsDir)
|
|
|
|
|
2019-01-04 23:33:27 +00:00
|
|
|
// Set driver-specific environment variables
|
|
|
|
switch fsi {
|
|
|
|
case drivers.FSIsolationNone:
|
|
|
|
// Use host paths
|
|
|
|
envBuilder.SetAllocDir(taskDir.SharedAllocDir)
|
|
|
|
envBuilder.SetTaskLocalDir(taskDir.LocalDir)
|
|
|
|
envBuilder.SetSecretsDir(taskDir.SecretsDir)
|
|
|
|
default:
|
|
|
|
// filesystem isolation; use container paths
|
|
|
|
envBuilder.SetAllocDir(allocdir.SharedAllocContainerPath)
|
|
|
|
envBuilder.SetTaskLocalDir(allocdir.TaskLocalContainerPath)
|
|
|
|
envBuilder.SetSecretsDir(allocdir.TaskSecretsContainerPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the host environment variables for non-image based drivers
|
|
|
|
if fsi != drivers.FSIsolationImage {
|
2023-05-12 17:29:44 +00:00
|
|
|
envBuilder.SetHostEnvvars([]string{"env.denylist"})
|
2019-01-04 23:33:27 +00:00
|
|
|
}
|
|
|
|
}
|