2021-10-01 13:59:55 +00:00
|
|
|
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
|
2019-01-11 13:28:40 +00:00
|
|
|
|
|
|
|
package docker
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2019-01-14 12:34:24 +00:00
|
|
|
"io"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
"runtime"
|
2019-01-11 13:28:40 +00:00
|
|
|
"sort"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
docker "github.com/fsouza/go-dockerclient"
|
2022-03-15 12:42:43 +00:00
|
|
|
"github.com/hashicorp/nomad/ci"
|
2019-01-14 12:34:24 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocdir"
|
2019-01-11 13:28:40 +00:00
|
|
|
"github.com/hashicorp/nomad/client/testutil"
|
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
|
|
|
"github.com/hashicorp/nomad/plugins/drivers"
|
2019-04-28 21:26:15 +00:00
|
|
|
dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
|
2019-01-11 13:28:40 +00:00
|
|
|
tu "github.com/hashicorp/nomad/testutil"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestDockerDriver_User(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2022-03-15 12:42:43 +00:00
|
|
|
|
2023-01-03 17:25:20 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
task.User = "alice"
|
|
|
|
cfg.Command = "/bin/sleep"
|
|
|
|
cfg.Args = []string{"10000"}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
|
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
if err == nil {
|
|
|
|
d.DestroyTask(task.ID, true)
|
|
|
|
t.Fatalf("Should've failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !strings.Contains(err.Error(), "alice") {
|
|
|
|
t.Fatalf("Expected failure string not found, found %q instead", err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2022-03-15 12:42:43 +00:00
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// Because go-dockerclient doesn't provide api for query network aliases, just check that
|
|
|
|
// a container can be created with a 'network_aliases' property
|
|
|
|
|
|
|
|
// Create network, network-scoped alias is supported only for containers in user defined networks
|
|
|
|
client := newTestDockerClient(t)
|
|
|
|
networkOpts := docker.CreateNetworkOptions{Name: "foobar", Driver: "bridge"}
|
|
|
|
network, err := client.CreateNetwork(networkOpts)
|
|
|
|
require.NoError(err)
|
|
|
|
defer client.RemoveNetwork(network.ID)
|
|
|
|
|
|
|
|
expected := []string{"foobar"}
|
|
|
|
taskCfg := newTaskConfig("", busyboxLongRunningCmd)
|
|
|
|
taskCfg.NetworkMode = network.Name
|
|
|
|
taskCfg.NetworkAliases = expected
|
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "busybox",
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(task.EncodeConcreteDriverConfig(&taskCfg))
|
|
|
|
|
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
|
|
|
|
_, _, err = d.StartTask(task)
|
|
|
|
require.NoError(err)
|
|
|
|
require.NoError(d.WaitUntilStarted(task.ID, 5*time.Second))
|
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
|
|
|
|
dockerDriver, ok := d.Impl().(*Driver)
|
|
|
|
require.True(ok)
|
|
|
|
|
|
|
|
handle, ok := dockerDriver.tasks.Get(task.ID)
|
|
|
|
require.True(ok)
|
|
|
|
|
|
|
|
_, err = client.InspectContainer(handle.containerID)
|
|
|
|
require.NoError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_NetworkMode_Host(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
expected := "host"
|
|
|
|
|
|
|
|
taskCfg := newTaskConfig("", busyboxLongRunningCmd)
|
|
|
|
taskCfg.NetworkMode = expected
|
|
|
|
|
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "busybox-demo",
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
|
|
|
|
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
|
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
|
|
|
|
dockerDriver, ok := d.Impl().(*Driver)
|
|
|
|
require.True(t, ok)
|
|
|
|
|
|
|
|
handle, ok := dockerDriver.tasks.Get(task.ID)
|
|
|
|
require.True(t, ok)
|
|
|
|
|
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
actual := container.HostConfig.NetworkMode
|
|
|
|
require.Equal(t, expected, actual)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_CPUCFSPeriod(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
2023-01-03 17:25:20 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
cfg.CPUHardLimit = true
|
|
|
|
cfg.CPUCFSPeriod = 1000000
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, _, handle, cleanup := dockerSetup(t, task, nil)
|
2019-01-11 13:28:40 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
waitForExist(t, client, handle.containerID)
|
|
|
|
|
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, cfg.CPUCFSPeriod, container.HostConfig.CPUPeriod)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Sysctl_Ulimit(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2022-03-15 12:42:43 +00:00
|
|
|
|
2023-01-03 17:25:20 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
expectedUlimits := map[string]string{
|
|
|
|
"nproc": "4242",
|
|
|
|
"nofile": "2048:4096",
|
|
|
|
}
|
|
|
|
cfg.Sysctl = map[string]string{
|
|
|
|
"net.core.somaxconn": "16384",
|
|
|
|
}
|
|
|
|
cfg.Ulimit = expectedUlimits
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
2019-01-11 13:28:40 +00:00
|
|
|
defer cleanup()
|
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
|
|
|
|
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
|
|
|
assert.Nil(t, err, "unexpected error: %v", err)
|
|
|
|
|
|
|
|
want := "16384"
|
|
|
|
got := container.HostConfig.Sysctls["net.core.somaxconn"]
|
|
|
|
assert.Equal(t, want, got, "Wrong net.core.somaxconn config for docker job. Expect: %s, got: %s", want, got)
|
|
|
|
|
|
|
|
expectedUlimitLen := 2
|
|
|
|
actualUlimitLen := len(container.HostConfig.Ulimits)
|
|
|
|
assert.Equal(t, want, got, "Wrong number of ulimit configs for docker job. Expect: %d, got: %d", expectedUlimitLen, actualUlimitLen)
|
|
|
|
|
|
|
|
for _, got := range container.HostConfig.Ulimits {
|
|
|
|
if expectedStr, ok := expectedUlimits[got.Name]; !ok {
|
|
|
|
t.Errorf("%s config unexpected for docker job.", got.Name)
|
|
|
|
} else {
|
|
|
|
if !strings.Contains(expectedStr, ":") {
|
|
|
|
expectedStr = expectedStr + ":" + expectedStr
|
|
|
|
}
|
|
|
|
|
|
|
|
splitted := strings.SplitN(expectedStr, ":", 2)
|
|
|
|
soft, _ := strconv.Atoi(splitted[0])
|
|
|
|
hard, _ := strconv.Atoi(splitted[1])
|
|
|
|
assert.Equal(t, int64(soft), got.Soft, "Wrong soft %s ulimit for docker job. Expect: %d, got: %d", got.Name, soft, got.Soft)
|
|
|
|
assert.Equal(t, int64(hard), got.Hard, "Wrong hard %s ulimit for docker job. Expect: %d, got: %d", got.Name, hard, got.Hard)
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Sysctl_Ulimit_Errors(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2022-03-15 12:42:43 +00:00
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
brokenConfigs := []map[string]string{
|
|
|
|
{
|
|
|
|
"nofile": "",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"nofile": "abc:1234",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"nofile": "1234:abc",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []struct {
|
|
|
|
ulimitConfig map[string]string
|
|
|
|
err error
|
|
|
|
}{
|
|
|
|
{brokenConfigs[0], fmt.Errorf("Malformed ulimit specification nofile: \"\", cannot be empty")},
|
|
|
|
{brokenConfigs[1], fmt.Errorf("Malformed soft ulimit nofile: abc:1234")},
|
|
|
|
{brokenConfigs[2], fmt.Errorf("Malformed hard ulimit nofile: 1234:abc")},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
2023-01-03 17:25:20 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
cfg.Ulimit = tc.ulimitConfig
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2023-01-03 17:25:20 +00:00
|
|
|
t.Cleanup(cleanup)
|
2019-01-11 13:28:40 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
|
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NotNil(t, err, "Expected non nil error")
|
|
|
|
require.Contains(t, err.Error(), tc.err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This test does not run on Windows due to stricter path validation in the
|
|
|
|
// negative case for non existent mount paths. We should write a similar test
|
|
|
|
// for windows.
|
|
|
|
func TestDockerDriver_BindMountsHonorVolumesEnabledFlag(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
|
|
|
allocDir := "/tmp/nomad/alloc-dir"
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
requiresVolumes bool
|
|
|
|
|
|
|
|
volumeDriver string
|
|
|
|
volumes []string
|
|
|
|
|
|
|
|
expectedVolumes []string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "basic plugin",
|
|
|
|
requiresVolumes: true,
|
|
|
|
volumeDriver: "nfs",
|
|
|
|
volumes: []string{"test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "absolute default driver",
|
|
|
|
requiresVolumes: true,
|
|
|
|
volumeDriver: "",
|
|
|
|
volumes: []string{"/abs/test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"/abs/test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "absolute local driver",
|
|
|
|
requiresVolumes: true,
|
|
|
|
volumeDriver: "local",
|
|
|
|
volumes: []string{"/abs/test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"/abs/test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "relative default driver",
|
|
|
|
requiresVolumes: false,
|
|
|
|
volumeDriver: "",
|
|
|
|
volumes: []string{"test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"/tmp/nomad/alloc-dir/demo/test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
{
|
2019-04-17 09:13:34 +00:00
|
|
|
name: "named volume local driver",
|
|
|
|
requiresVolumes: true,
|
2019-01-11 13:28:40 +00:00
|
|
|
volumeDriver: "local",
|
|
|
|
volumes: []string{"test-path:/tmp/taskpath"},
|
2019-04-17 09:13:34 +00:00
|
|
|
expectedVolumes: []string{"test-path:/tmp/taskpath"},
|
2019-01-11 13:28:40 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "relative outside task-dir default driver",
|
|
|
|
requiresVolumes: false,
|
|
|
|
volumeDriver: "",
|
|
|
|
volumes: []string{"../test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"/tmp/nomad/alloc-dir/test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "relative outside alloc-dir default driver",
|
|
|
|
requiresVolumes: true,
|
|
|
|
volumeDriver: "",
|
|
|
|
volumes: []string{"../../test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"/tmp/nomad/test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
{
|
2019-04-17 09:13:34 +00:00
|
|
|
name: "clean path local driver",
|
2019-01-11 13:28:40 +00:00
|
|
|
requiresVolumes: true,
|
|
|
|
volumeDriver: "local",
|
2019-04-17 09:13:34 +00:00
|
|
|
volumes: []string{"/tmp/nomad/../test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"/tmp/test-path:/tmp/taskpath"},
|
2019-01-11 13:28:40 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("with volumes enabled", func(t *testing.T) {
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
driver.config.Volumes.Enabled = true
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
2023-01-03 17:25:20 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
cfg.VolumeDriver = c.volumeDriver
|
|
|
|
cfg.Volumes = c.volumes
|
|
|
|
|
|
|
|
task.AllocDir = allocDir
|
|
|
|
task.Name = "demo"
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
for _, v := range c.expectedVolumes {
|
|
|
|
require.Contains(t, cc.HostConfig.Binds, v)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("with volumes disabled", func(t *testing.T) {
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
driver.config.Volumes.Enabled = false
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
2023-01-03 17:25:20 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
cfg.VolumeDriver = c.volumeDriver
|
|
|
|
cfg.Volumes = c.volumes
|
|
|
|
|
|
|
|
task.AllocDir = allocDir
|
|
|
|
task.Name = "demo"
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
if c.requiresVolumes {
|
|
|
|
require.Error(t, err, "volumes are not enabled")
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
for _, v := range c.expectedVolumes {
|
|
|
|
require.Contains(t, cc.HostConfig.Binds, v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// This test does not run on windows due to differences in the definition of
|
|
|
|
// an absolute path, changing path expansion behaviour. A similar test should
|
|
|
|
// be written for windows.
|
|
|
|
func TestDockerDriver_MountsSerialization(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
|
|
|
allocDir := "/tmp/nomad/alloc-dir"
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
requiresVolumes bool
|
|
|
|
passedMounts []DockerMount
|
|
|
|
expectedMounts []docker.HostMount
|
|
|
|
}{
|
|
|
|
{
|
2020-11-11 15:03:46 +00:00
|
|
|
name: "basic volume",
|
|
|
|
requiresVolumes: true,
|
2019-01-11 13:28:40 +00:00
|
|
|
passedMounts: []DockerMount{
|
|
|
|
{
|
|
|
|
Target: "/nomad",
|
|
|
|
ReadOnly: true,
|
|
|
|
Source: "test",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedMounts: []docker.HostMount{
|
|
|
|
{
|
|
|
|
Type: "volume",
|
|
|
|
Target: "/nomad",
|
|
|
|
Source: "test",
|
|
|
|
ReadOnly: true,
|
|
|
|
VolumeOptions: &docker.VolumeOptions{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "basic bind",
|
|
|
|
passedMounts: []DockerMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/nomad",
|
|
|
|
Source: "test",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedMounts: []docker.HostMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/nomad",
|
|
|
|
Source: "/tmp/nomad/alloc-dir/demo/test",
|
|
|
|
BindOptions: &docker.BindOptions{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "basic absolute bind",
|
|
|
|
requiresVolumes: true,
|
|
|
|
passedMounts: []DockerMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/nomad",
|
|
|
|
Source: "/tmp/test",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedMounts: []docker.HostMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/nomad",
|
|
|
|
Source: "/tmp/test",
|
|
|
|
BindOptions: &docker.BindOptions{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "bind relative outside",
|
|
|
|
requiresVolumes: true,
|
|
|
|
passedMounts: []DockerMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/nomad",
|
|
|
|
Source: "../../test",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedMounts: []docker.HostMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/nomad",
|
|
|
|
Source: "/tmp/nomad/test",
|
|
|
|
BindOptions: &docker.BindOptions{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "basic tmpfs",
|
|
|
|
requiresVolumes: false,
|
|
|
|
passedMounts: []DockerMount{
|
|
|
|
{
|
|
|
|
Type: "tmpfs",
|
|
|
|
Target: "/nomad",
|
|
|
|
TmpfsOptions: DockerTmpfsOptions{
|
|
|
|
SizeBytes: 321,
|
|
|
|
Mode: 0666,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedMounts: []docker.HostMount{
|
|
|
|
{
|
|
|
|
Type: "tmpfs",
|
|
|
|
Target: "/nomad",
|
|
|
|
TempfsOptions: &docker.TempfsOptions{
|
|
|
|
SizeBytes: 321,
|
|
|
|
Mode: 0666,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("with volumes enabled", func(t *testing.T) {
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
driver.config.Volumes.Enabled = true
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
2023-01-03 17:25:20 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
cfg.Mounts = c.passedMounts
|
|
|
|
|
|
|
|
task.AllocDir = allocDir
|
|
|
|
task.Name = "demo"
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, c.expectedMounts, cc.HostConfig.Mounts)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("with volumes disabled", func(t *testing.T) {
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
driver.config.Volumes.Enabled = false
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
2023-01-03 17:25:20 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
cfg.Mounts = c.passedMounts
|
|
|
|
|
|
|
|
task.AllocDir = allocDir
|
|
|
|
task.Name = "demo"
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
if c.requiresVolumes {
|
|
|
|
require.Error(t, err, "volumes are not enabled")
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, c.expectedMounts, cc.HostConfig.Mounts)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDockerDriver_CreateContainerConfig_MountsCombined asserts that
|
|
|
|
// devices and mounts set by device managers/plugins are honored
|
|
|
|
// and present in docker.CreateContainerOptions, and that it is appended
|
|
|
|
// to any devices/mounts a user sets in the task config.
|
|
|
|
func TestDockerDriver_CreateContainerConfig_MountsCombined(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
2023-01-03 17:25:20 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
|
|
|
|
task.Devices = []*drivers.DeviceConfig{
|
|
|
|
{
|
|
|
|
HostPath: "/dev/fuse",
|
|
|
|
TaskPath: "/container/dev/task-fuse",
|
|
|
|
Permissions: "rw",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
task.Mounts = []*drivers.MountConfig{
|
|
|
|
{
|
|
|
|
HostPath: "/tmp/task-mount",
|
|
|
|
TaskPath: "/container/tmp/task-mount",
|
|
|
|
Readonly: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg.Devices = []DockerDevice{
|
|
|
|
{
|
|
|
|
HostPath: "/dev/stdout",
|
|
|
|
ContainerPath: "/container/dev/cfg-stdout",
|
|
|
|
CgroupPermissions: "rwm",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
cfg.Mounts = []DockerMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Source: "/tmp/cfg-mount",
|
|
|
|
Target: "/container/tmp/cfg-mount",
|
|
|
|
ReadOnly: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
2020-11-11 15:03:46 +00:00
|
|
|
driver.config.Volumes.Enabled = true
|
2019-01-11 13:28:40 +00:00
|
|
|
|
|
|
|
c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
expectedMounts := []docker.HostMount{
|
|
|
|
{
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
Type: "bind",
|
|
|
|
Source: "/tmp/cfg-mount",
|
|
|
|
Target: "/container/tmp/cfg-mount",
|
|
|
|
ReadOnly: false,
|
|
|
|
BindOptions: &docker.BindOptions{
|
|
|
|
Propagation: "",
|
|
|
|
},
|
2019-01-11 13:28:40 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Source: "/tmp/task-mount",
|
|
|
|
Target: "/container/tmp/task-mount",
|
|
|
|
ReadOnly: true,
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
BindOptions: &docker.BindOptions{
|
|
|
|
Propagation: "rprivate",
|
|
|
|
},
|
2019-01-11 13:28:40 +00:00
|
|
|
},
|
|
|
|
}
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
|
|
|
|
if runtime.GOOS != "linux" {
|
|
|
|
expectedMounts[0].BindOptions = &docker.BindOptions{}
|
|
|
|
expectedMounts[1].BindOptions = &docker.BindOptions{}
|
|
|
|
}
|
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
foundMounts := c.HostConfig.Mounts
|
|
|
|
sort.Slice(foundMounts, func(i, j int) bool {
|
|
|
|
return foundMounts[i].Target < foundMounts[j].Target
|
|
|
|
})
|
|
|
|
require.EqualValues(t, expectedMounts, foundMounts)
|
|
|
|
|
|
|
|
expectedDevices := []docker.Device{
|
|
|
|
{
|
|
|
|
PathOnHost: "/dev/stdout",
|
|
|
|
PathInContainer: "/container/dev/cfg-stdout",
|
|
|
|
CgroupPermissions: "rwm",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
PathOnHost: "/dev/fuse",
|
|
|
|
PathInContainer: "/container/dev/task-fuse",
|
|
|
|
CgroupPermissions: "rw",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
foundDevices := c.HostConfig.Devices
|
|
|
|
sort.Slice(foundDevices, func(i, j int) bool {
|
|
|
|
return foundDevices[i].PathInContainer < foundDevices[j].PathInContainer
|
|
|
|
})
|
|
|
|
require.EqualValues(t, expectedDevices, foundDevices)
|
|
|
|
}
|
2019-01-14 12:34:24 +00:00
|
|
|
|
2019-01-14 13:58:33 +00:00
|
|
|
// TestDockerDriver_Cleanup ensures Cleanup removes only downloaded images.
|
|
|
|
// Doesn't run on windows because it requires an image variant
|
|
|
|
func TestDockerDriver_Cleanup(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-14 13:58:33 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
|
|
|
// using a small image and an specific point release to avoid accidental conflicts with other tasks
|
|
|
|
cfg := newTaskConfig("", []string{"sleep", "100"})
|
|
|
|
cfg.Image = "busybox:1.29.2"
|
|
|
|
cfg.LoadImage = ""
|
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "cleanup_test",
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, driver, handle, cleanup := dockerSetup(t, task, map[string]interface{}{
|
|
|
|
"gc": map[string]interface{}{
|
|
|
|
"image": true,
|
|
|
|
"image_delay": "1ms",
|
|
|
|
},
|
|
|
|
})
|
2019-01-14 13:58:33 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
|
|
|
|
// Cleanup
|
|
|
|
require.NoError(t, driver.DestroyTask(task.ID, true))
|
|
|
|
|
|
|
|
// Ensure image was removed
|
|
|
|
tu.WaitForResult(func() (bool, error) {
|
|
|
|
if _, err := client.InspectImage(cfg.Image); err == nil {
|
|
|
|
return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// The image doesn't exist which shouldn't be an error when calling
|
|
|
|
// Cleanup, so call it again to make sure.
|
|
|
|
require.NoError(t, driver.Impl().(*Driver).cleanupImage(handle))
|
|
|
|
}
|
|
|
|
|
2019-04-09 23:59:58 +00:00
|
|
|
// Tests that images prefixed with "https://" are supported
|
|
|
|
func TestDockerDriver_Start_Image_HTTPS(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-04-09 23:59:58 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
|
|
|
taskCfg := TaskConfig{
|
2020-08-12 07:58:07 +00:00
|
|
|
Image: "https://gcr.io/google_containers/pause:0.8.0",
|
|
|
|
ImagePullTimeout: "5m",
|
2019-04-09 23:59:58 +00:00
|
|
|
}
|
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "pause",
|
|
|
|
AllocID: uuid.Generate(),
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
|
|
|
|
2022-05-24 19:21:25 +00:00
|
|
|
harness := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := harness.MkAllocDir(task, true)
|
2019-04-09 23:59:58 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2022-05-24 19:21:25 +00:00
|
|
|
_, _, err := harness.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = harness.WaitUntilStarted(task.ID, 1*time.Minute)
|
2019-04-09 23:59:58 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-05-24 19:21:25 +00:00
|
|
|
harness.DestroyTask(task.ID, true)
|
2019-04-09 23:59:58 +00:00
|
|
|
}
|
|
|
|
|
2019-01-14 12:34:24 +00:00
|
|
|
func newTaskConfig(variant string, command []string) TaskConfig {
|
|
|
|
// busyboxImageID is the ID stored in busybox.tar
|
|
|
|
busyboxImageID := "busybox:1.29.3"
|
|
|
|
|
|
|
|
image := busyboxImageID
|
|
|
|
loadImage := "busybox.tar"
|
|
|
|
if variant != "" {
|
|
|
|
image = fmt.Sprintf("%s-%s", busyboxImageID, variant)
|
|
|
|
loadImage = fmt.Sprintf("busybox_%s.tar", variant)
|
|
|
|
}
|
|
|
|
|
|
|
|
return TaskConfig{
|
2020-08-12 07:58:07 +00:00
|
|
|
Image: image,
|
|
|
|
ImagePullTimeout: "5m",
|
|
|
|
LoadImage: loadImage,
|
|
|
|
Command: command[0],
|
|
|
|
Args: command[1:],
|
2019-01-14 12:34:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func copyImage(t *testing.T, taskDir *allocdir.TaskDir, image string) {
|
|
|
|
dst := filepath.Join(taskDir.LocalDir, image)
|
|
|
|
copyFile(filepath.Join("./test-resources/docker", image), dst, t)
|
|
|
|
}
|
|
|
|
|
|
|
|
// copyFile moves an existing file to the destination
|
|
|
|
func copyFile(src, dst string, t *testing.T) {
|
2021-10-18 17:32:41 +00:00
|
|
|
t.Helper()
|
2019-01-14 12:34:24 +00:00
|
|
|
in, err := os.Open(src)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
|
|
|
|
}
|
|
|
|
defer in.Close()
|
|
|
|
out, err := os.Create(dst)
|
2021-10-18 17:32:41 +00:00
|
|
|
require.NoError(t, err, "copying %v -> %v failed: %v", src, dst, err)
|
|
|
|
|
2019-01-14 12:34:24 +00:00
|
|
|
defer func() {
|
|
|
|
if err := out.Close(); err != nil {
|
|
|
|
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if _, err = io.Copy(out, in); err != nil {
|
|
|
|
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
|
|
|
|
}
|
|
|
|
if err := out.Sync(); err != nil {
|
|
|
|
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
|
|
|
|
}
|
|
|
|
}
|
2019-04-28 21:26:15 +00:00
|
|
|
|
|
|
|
func TestDocker_ExecTaskStreaming(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-04-28 21:26:15 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
|
|
|
taskCfg := newTaskConfig("", []string{"/bin/sleep", "1000"})
|
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "nc-demo",
|
|
|
|
AllocID: uuid.Generate(),
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
|
|
|
|
2022-05-24 19:21:25 +00:00
|
|
|
harness := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := harness.MkAllocDir(task, true)
|
2019-04-28 21:26:15 +00:00
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
|
2022-05-24 19:21:25 +00:00
|
|
|
_, _, err := harness.StartTask(task)
|
2019-04-28 21:26:15 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-05-24 19:21:25 +00:00
|
|
|
err = harness.WaitUntilStarted(task.ID, 1*time.Minute)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer harness.DestroyTask(task.ID, true)
|
2019-04-28 21:26:15 +00:00
|
|
|
|
2022-05-24 19:21:25 +00:00
|
|
|
dtestutil.ExecTaskStreamingConformanceTests(t, harness, task.ID)
|
2019-04-28 21:26:15 +00:00
|
|
|
|
|
|
|
}
|
2020-04-28 03:11:06 +00:00
|
|
|
|
|
|
|
// Tests that a given DNSConfig properly configures dns
|
|
|
|
func Test_dnsConfig(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-04-28 03:11:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2022-03-15 12:42:43 +00:00
|
|
|
|
2020-04-28 03:11:06 +00:00
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
cfg *drivers.DNSConfig
|
|
|
|
}{
|
|
|
|
{
|
2022-03-31 13:04:40 +00:00
|
|
|
name: "nil",
|
2020-04-28 03:11:06 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "basic",
|
|
|
|
cfg: &drivers.DNSConfig{
|
|
|
|
Servers: []string{"1.1.1.1", "1.0.0.1"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "full",
|
|
|
|
cfg: &drivers.DNSConfig{
|
|
|
|
Servers: []string{"1.1.1.1", "1.0.0.1"},
|
|
|
|
Searches: []string{"local.test", "node.consul"},
|
|
|
|
Options: []string{"ndots:2", "edns0"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
2022-03-31 13:04:40 +00:00
|
|
|
t.Run(c.name, func(t *testing.T) {
|
|
|
|
harness := dockerDriverHarness(t, nil)
|
|
|
|
|
|
|
|
taskCfg := newTaskConfig("", []string{"/bin/sleep", "1000"})
|
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "nc-demo",
|
|
|
|
AllocID: uuid.Generate(),
|
|
|
|
Resources: basicResources,
|
|
|
|
DNS: c.cfg,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2020-04-28 03:11:06 +00:00
|
|
|
|
2022-03-31 13:04:40 +00:00
|
|
|
cleanup := harness.MkAllocDir(task, false)
|
|
|
|
|
|
|
|
_, _, err := harness.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
2020-04-28 03:11:06 +00:00
|
|
|
|
2022-05-24 19:21:25 +00:00
|
|
|
err = harness.WaitUntilStarted(task.ID, 1*time.Minute)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-03-31 13:04:40 +00:00
|
|
|
dtestutil.TestTaskDNSConfig(t, harness, task.ID, c.cfg)
|
2020-04-28 03:11:06 +00:00
|
|
|
|
2022-03-31 13:04:40 +00:00
|
|
|
// cleanup immediately before the next test case
|
|
|
|
require.NoError(t, harness.DestroyTask(task.ID, true))
|
|
|
|
cleanup()
|
|
|
|
harness.Kill()
|
|
|
|
})
|
2020-04-28 03:11:06 +00:00
|
|
|
}
|
|
|
|
}
|