2018-11-06 05:39:48 +00:00
|
|
|
package docker
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
|
|
|
"math/rand"
|
|
|
|
"path/filepath"
|
|
|
|
"reflect"
|
|
|
|
"runtime"
|
|
|
|
"runtime/debug"
|
2020-12-15 19:13:50 +00:00
|
|
|
"sort"
|
2018-11-06 05:39:48 +00:00
|
|
|
"strings"
|
2020-09-30 16:36:26 +00:00
|
|
|
"syscall"
|
2018-11-06 05:39:48 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
docker "github.com/fsouza/go-dockerclient"
|
|
|
|
hclog "github.com/hashicorp/go-hclog"
|
2022-03-15 12:42:43 +00:00
|
|
|
"github.com/hashicorp/nomad/ci"
|
2018-11-30 11:18:39 +00:00
|
|
|
"github.com/hashicorp/nomad/client/taskenv"
|
2018-11-06 05:39:48 +00:00
|
|
|
"github.com/hashicorp/nomad/client/testutil"
|
2019-12-04 00:15:11 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/freeport"
|
2019-11-20 00:05:15 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/pluginutils/hclspecutils"
|
|
|
|
"github.com/hashicorp/nomad/helper/pluginutils/hclutils"
|
2019-01-23 14:27:14 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/pluginutils/loader"
|
2018-11-06 05:39:48 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/testlog"
|
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"github.com/hashicorp/nomad/plugins/base"
|
|
|
|
"github.com/hashicorp/nomad/plugins/drivers"
|
2018-12-14 00:21:41 +00:00
|
|
|
dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
|
2018-11-06 05:39:48 +00:00
|
|
|
tu "github.com/hashicorp/nomad/testutil"
|
2022-08-11 15:19:39 +00:00
|
|
|
"github.com/shoenig/test/must"
|
2019-01-17 17:44:27 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2018-11-06 05:39:48 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
basicResources = &drivers.Resources{
|
2018-12-14 00:21:41 +00:00
|
|
|
NomadResources: &structs.AllocatedTaskResources{
|
|
|
|
Memory: structs.AllocatedMemoryResources{
|
|
|
|
MemoryMB: 256,
|
|
|
|
},
|
|
|
|
Cpu: structs.AllocatedCpuResources{
|
|
|
|
CpuShares: 250,
|
|
|
|
},
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
LinuxResources: &drivers.LinuxResources{
|
2018-12-14 16:06:14 +00:00
|
|
|
CPUShares: 512,
|
2018-11-06 05:39:48 +00:00
|
|
|
MemoryLimitBytes: 256 * 1024 * 1024,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
func dockerIsRemote(t *testing.T) bool {
|
|
|
|
client, err := docker.NewClientFromEnv()
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Technically this could be a local tcp socket but for testing purposes
|
|
|
|
// we'll just assume that tcp is only used for remote connections.
|
|
|
|
if client.Endpoint()[0:3] == "tcp" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-12-14 15:58:31 +00:00
|
|
|
var (
|
|
|
|
// busyboxLongRunningCmd is a busybox command that runs indefinitely, and
|
|
|
|
// ideally responds to SIGINT/SIGTERM. Sadly, busybox:1.29.3 /bin/sleep doesn't.
|
2019-01-11 13:28:40 +00:00
|
|
|
busyboxLongRunningCmd = []string{"nc", "-l", "-p", "3000", "127.0.0.1"}
|
2018-12-14 15:58:31 +00:00
|
|
|
)
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
// Returns a task with a reserved and dynamic port. The ports are returned
|
2019-12-04 00:15:11 +00:00
|
|
|
// respectively, and should be reclaimed with freeport.Return at the end of a test.
|
2018-11-09 04:38:47 +00:00
|
|
|
func dockerTask(t *testing.T) (*drivers.TaskConfig, *TaskConfig, []int) {
|
2019-12-04 00:15:11 +00:00
|
|
|
ports := freeport.MustTake(2)
|
2018-11-06 05:39:48 +00:00
|
|
|
dockerReserved := ports[0]
|
|
|
|
dockerDynamic := ports[1]
|
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
cfg := newTaskConfig("", busyboxLongRunningCmd)
|
2018-11-06 05:39:48 +00:00
|
|
|
task := &drivers.TaskConfig{
|
2019-05-30 19:35:22 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "redis-demo",
|
|
|
|
AllocID: uuid.Generate(),
|
|
|
|
Env: map[string]string{
|
|
|
|
"test": t.Name(),
|
|
|
|
},
|
2018-12-18 01:03:43 +00:00
|
|
|
DeviceEnv: make(map[string]string),
|
2018-11-06 05:39:48 +00:00
|
|
|
Resources: &drivers.Resources{
|
2018-12-14 00:21:41 +00:00
|
|
|
NomadResources: &structs.AllocatedTaskResources{
|
|
|
|
Memory: structs.AllocatedMemoryResources{
|
|
|
|
MemoryMB: 256,
|
|
|
|
},
|
|
|
|
Cpu: structs.AllocatedCpuResources{
|
|
|
|
CpuShares: 512,
|
|
|
|
},
|
2018-11-06 05:39:48 +00:00
|
|
|
Networks: []*structs.NetworkResource{
|
|
|
|
{
|
|
|
|
IP: "127.0.0.1",
|
|
|
|
ReservedPorts: []structs.Port{{Label: "main", Value: dockerReserved}},
|
|
|
|
DynamicPorts: []structs.Port{{Label: "REDIS", Value: dockerDynamic}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
LinuxResources: &drivers.LinuxResources{
|
|
|
|
CPUShares: 512,
|
|
|
|
MemoryLimitBytes: 256 * 1024 * 1024,
|
2019-11-20 00:05:15 +00:00
|
|
|
PercentTicks: float64(512) / float64(4096),
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&cfg))
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
return task, &cfg, ports
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// dockerSetup does all of the basic setup you need to get a running docker
|
|
|
|
// process up and running for testing. Use like:
|
|
|
|
//
|
|
|
|
// task := taskTemplate()
|
|
|
|
// // do custom task configuration
|
2020-05-27 01:08:25 +00:00
|
|
|
// client, handle, cleanup := dockerSetup(t, task, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
// defer cleanup()
|
|
|
|
// // do test stuff
|
|
|
|
//
|
|
|
|
// If there is a problem during setup this function will abort or skip the test
|
|
|
|
// and indicate the reason.
|
2020-05-27 01:08:25 +00:00
|
|
|
func dockerSetup(t *testing.T, task *drivers.TaskConfig, driverCfg map[string]interface{}) (*docker.Client, *dtestutil.DriverHarness, *taskHandle, func()) {
|
2018-11-06 05:39:48 +00:00
|
|
|
client := newTestDockerClient(t)
|
2020-05-27 01:08:25 +00:00
|
|
|
driver := dockerDriverHarness(t, driverCfg)
|
2018-11-09 04:38:47 +00:00
|
|
|
cleanup := driver.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
_, _, err := driver.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
dockerDriver, ok := driver.Impl().(*Driver)
|
|
|
|
require.True(t, ok)
|
|
|
|
handle, ok := dockerDriver.tasks.Get(task.ID)
|
|
|
|
require.True(t, ok)
|
|
|
|
|
|
|
|
return client, driver, handle, func() {
|
|
|
|
driver.DestroyTask(task.ID, true)
|
|
|
|
cleanup()
|
|
|
|
}
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2019-05-30 19:35:22 +00:00
|
|
|
// cleanSlate removes the specified docker image, including potentially stopping/removing any
|
|
|
|
// containers based on that image. This is used to decouple tests that would be coupled
|
|
|
|
// by using the same container image.
|
|
|
|
func cleanSlate(client *docker.Client, imageID string) {
|
|
|
|
if img, _ := client.InspectImage(imageID); img == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
containers, _ := client.ListContainers(docker.ListContainersOptions{
|
|
|
|
All: true,
|
|
|
|
Filters: map[string][]string{
|
|
|
|
"ancestor": {imageID},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
for _, c := range containers {
|
|
|
|
client.RemoveContainer(docker.RemoveContainerOptions{
|
|
|
|
Force: true,
|
|
|
|
ID: c.ID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
client.RemoveImageExtended(imageID, docker.RemoveImageOptions{
|
|
|
|
Force: true,
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
// dockerDriverHarness wires up everything needed to launch a task with a docker driver.
|
|
|
|
// A driver plugin interface and cleanup function is returned
|
2018-11-27 19:03:58 +00:00
|
|
|
func dockerDriverHarness(t *testing.T, cfg map[string]interface{}) *dtestutil.DriverHarness {
|
2018-11-06 05:39:48 +00:00
|
|
|
logger := testlog.HCLogger(t)
|
2020-05-26 13:44:26 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
t.Cleanup(func() { cancel() })
|
|
|
|
harness := dtestutil.NewDriverHarness(t, NewDockerDriver(ctx, logger))
|
2018-11-09 04:38:47 +00:00
|
|
|
if cfg == nil {
|
|
|
|
cfg = map[string]interface{}{
|
2018-11-20 03:58:05 +00:00
|
|
|
"gc": map[string]interface{}{
|
2020-05-27 01:08:25 +00:00
|
|
|
"image": false,
|
2018-11-20 03:58:05 +00:00
|
|
|
"image_delay": "1s",
|
|
|
|
},
|
2018-11-09 04:38:47 +00:00
|
|
|
}
|
|
|
|
}
|
2018-11-06 05:39:48 +00:00
|
|
|
plugLoader, err := loader.NewPluginLoader(&loader.PluginLoaderConfig{
|
2018-12-18 23:21:10 +00:00
|
|
|
Logger: logger,
|
|
|
|
PluginDir: "./plugins",
|
|
|
|
SupportedVersions: loader.AgentSupportedApiVersions,
|
2018-11-06 05:39:48 +00:00
|
|
|
InternalPlugins: map[loader.PluginID]*loader.InternalPluginConfig{
|
2018-11-25 16:53:21 +00:00
|
|
|
PluginID: {
|
2018-11-09 04:38:47 +00:00
|
|
|
Config: cfg,
|
2020-05-26 13:44:26 +00:00
|
|
|
Factory: func(context.Context, hclog.Logger) interface{} {
|
2018-11-06 05:39:48 +00:00
|
|
|
return harness
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
instance, err := plugLoader.Dispense(pluginName, base.PluginTypeDriver, nil, logger)
|
|
|
|
require.NoError(t, err)
|
2018-11-27 19:03:58 +00:00
|
|
|
driver, ok := instance.Plugin().(*dtestutil.DriverHarness)
|
2018-11-06 05:39:48 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatal("plugin instance is not a driver... wat?")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
return driver
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newTestDockerClient(t *testing.T) *docker.Client {
|
|
|
|
t.Helper()
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
client, err := docker.NewClientFromEnv()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to initialize client: %s\nStack\n%s", err, debug.Stack())
|
|
|
|
}
|
|
|
|
return client
|
|
|
|
}
|
|
|
|
|
2020-06-25 20:46:45 +00:00
|
|
|
// Following tests have been removed from this file.
|
|
|
|
// [TestDockerDriver_Fingerprint, TestDockerDriver_Fingerprint_Bridge, TestDockerDriver_Check_DockerHealthStatus]
|
|
|
|
// If you want to checkout/revert those tests, please check commit: 41715b1860778aa80513391bd64abd721d768ab0
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
func TestDockerDriver_Start_Wait(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
taskCfg := newTaskConfig("", busyboxLongRunningCmd)
|
2018-11-06 05:39:48 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "nc-demo",
|
2019-03-28 22:16:52 +00:00
|
|
|
AllocID: uuid.Generate(),
|
2018-11-06 05:39:48 +00:00
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
2018-11-06 05:39:48 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
|
|
|
|
// Attempt to wait
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case <-waitCh:
|
2018-11-14 11:20:35 +00:00
|
|
|
t.Fatalf("wait channel should not have received an exit result")
|
2018-11-06 05:39:48 +00:00
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*1) * time.Second):
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
func TestDockerDriver_Start_WaitFinish(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-09 04:38:47 +00:00
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
taskCfg := newTaskConfig("", []string{"echo", "hello"})
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "nc-demo",
|
2019-03-28 22:16:52 +00:00
|
|
|
AllocID: uuid.Generate(),
|
2018-11-09 04:38:47 +00:00
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
|
|
|
|
// Attempt to wait
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if !res.Successful() {
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Fail(t, "ExitResult should be successful: %v", res)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Fail(t, "timeout")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDockerDriver_Start_StoppedContainer asserts that Nomad will detect a
|
|
|
|
// stopped task container, remove it, and start a new container.
|
|
|
|
//
|
|
|
|
// See https://github.com/hashicorp/nomad/issues/3419
|
|
|
|
func TestDockerDriver_Start_StoppedContainer(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
taskCfg := newTaskConfig("", []string{"sleep", "9001"})
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "nc-demo",
|
2019-03-28 22:16:52 +00:00
|
|
|
AllocID: uuid.Generate(),
|
2018-11-09 04:38:47 +00:00
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
|
|
|
|
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-04 12:31:30 +00:00
|
|
|
client := newTestDockerClient(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
|
|
|
|
var imageID string
|
|
|
|
var err error
|
|
|
|
|
|
|
|
if runtime.GOOS != "windows" {
|
|
|
|
imageID, err = d.Impl().(*Driver).loadImage(task, &taskCfg, client)
|
|
|
|
} else {
|
2020-11-02 14:28:02 +00:00
|
|
|
image, lErr := client.InspectImage(taskCfg.Image)
|
2019-01-11 13:28:40 +00:00
|
|
|
err = lErr
|
|
|
|
if image != nil {
|
|
|
|
imageID = image.ID
|
|
|
|
}
|
|
|
|
}
|
2018-12-04 12:31:30 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEmpty(t, imageID)
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
// Create a container of the same name but don't start it. This mimics
|
|
|
|
// the case of dockerd getting restarted and stopping containers while
|
|
|
|
// Nomad is watching them.
|
|
|
|
opts := docker.CreateContainerOptions{
|
2018-11-09 04:38:47 +00:00
|
|
|
Name: strings.Replace(task.ID, "/", "_", -1),
|
2018-11-06 05:39:48 +00:00
|
|
|
Config: &docker.Config{
|
2019-01-11 13:28:40 +00:00
|
|
|
Image: taskCfg.Image,
|
2018-11-06 05:39:48 +00:00
|
|
|
Cmd: []string{"sleep", "9000"},
|
2019-05-30 19:35:22 +00:00
|
|
|
Env: []string{fmt.Sprintf("test=%s", t.Name())},
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
if _, err := client.CreateContainer(opts); err != nil {
|
|
|
|
t.Fatalf("error creating initial container: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-12-04 12:31:30 +00:00
|
|
|
_, _, err = d.StartTask(task)
|
2018-11-09 04:38:47 +00:00
|
|
|
defer d.DestroyTask(task.ID, true)
|
2019-05-30 19:35:22 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-09 04:38:47 +00:00
|
|
|
|
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2019-05-30 19:35:22 +00:00
|
|
|
require.NoError(t, d.DestroyTask(task.ID, true))
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Start_LoadImage(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-09 04:38:47 +00:00
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
taskCfg := newTaskConfig("", []string{"sh", "-c", "echo hello > $NOMAD_TASK_DIR/output"})
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "busybox-demo",
|
2019-03-28 22:16:52 +00:00
|
|
|
AllocID: uuid.Generate(),
|
2018-11-09 04:38:47 +00:00
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
defer d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if !res.Successful() {
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Fail(t, "ExitResult should be successful: %v", res)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Fail(t, "timeout")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that data was written to the shared alloc directory.
|
2018-11-09 04:38:47 +00:00
|
|
|
outputFile := filepath.Join(task.TaskDir().LocalDir, "output")
|
2018-11-06 05:39:48 +00:00
|
|
|
act, err := ioutil.ReadFile(outputFile)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't read expected output: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
exp := "hello"
|
|
|
|
if strings.TrimSpace(string(act)) != exp {
|
|
|
|
t.Fatalf("Command outputted %v; want %v", act, exp)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-04-05 02:10:18 +00:00
|
|
|
// Tests that starting a task without an image fails
|
|
|
|
func TestDockerDriver_Start_NoImage(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-04-05 02:10:18 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
|
|
|
taskCfg := TaskConfig{
|
|
|
|
Command: "echo",
|
|
|
|
Args: []string{"foo"},
|
|
|
|
}
|
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "echo",
|
|
|
|
AllocID: uuid.Generate(),
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
|
|
|
|
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, false)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), "image name required")
|
|
|
|
|
|
|
|
d.DestroyTask(task.ID, true)
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-09 04:38:47 +00:00
|
|
|
|
|
|
|
taskCfg := TaskConfig{
|
2020-08-12 07:58:07 +00:00
|
|
|
Image: "127.0.0.1:32121/foo", // bad path
|
|
|
|
ImagePullTimeout: "5m",
|
|
|
|
Command: "echo",
|
2018-11-09 04:38:47 +00:00
|
|
|
Args: []string{
|
|
|
|
"hello",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "busybox-demo",
|
2019-03-28 22:16:52 +00:00
|
|
|
AllocID: uuid.Generate(),
|
2018-11-09 04:38:47 +00:00
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
if rerr, ok := err.(*structs.RecoverableError); !ok {
|
|
|
|
t.Fatalf("want recoverable error: %+v", err)
|
|
|
|
} else if !rerr.IsRecoverable() {
|
|
|
|
t.Fatalf("error not recoverable: %+v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
// This test requires that the alloc dir be mounted into docker as a volume.
|
|
|
|
// Because this cannot happen when docker is run remotely, e.g. when running
|
|
|
|
// docker in a VM, we skip this when we detect Docker is being run remotely.
|
|
|
|
if !testutil.DockerIsConnected(t) || dockerIsRemote(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
|
|
|
exp := []byte{'w', 'i', 'n'}
|
|
|
|
file := "output.txt"
|
2019-01-11 13:28:40 +00:00
|
|
|
|
|
|
|
taskCfg := newTaskConfig("", []string{
|
|
|
|
"sh",
|
|
|
|
"-c",
|
|
|
|
fmt.Sprintf(`sleep 1; echo -n %s > $%s/%s`,
|
|
|
|
string(exp), taskenv.AllocDir, file),
|
|
|
|
})
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "busybox-demo",
|
2019-03-28 22:16:52 +00:00
|
|
|
AllocID: uuid.Generate(),
|
2018-11-09 04:38:47 +00:00
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
|
|
|
|
// Attempt to wait
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if !res.Successful() {
|
2018-11-12 12:39:55 +00:00
|
|
|
require.Fail(t, fmt.Sprintf("ExitResult should be successful: %v", res))
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Fail(t, "timeout")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that data was written to the shared alloc directory.
|
2018-11-09 04:38:47 +00:00
|
|
|
outputFile := filepath.Join(task.TaskDir().SharedAllocDir, file)
|
2018-11-06 05:39:48 +00:00
|
|
|
act, err := ioutil.ReadFile(outputFile)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't read expected output: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(act, exp) {
|
|
|
|
t.Fatalf("Command outputted %v; want %v", act, exp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-09 04:38:47 +00:00
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
taskCfg := newTaskConfig("", busyboxLongRunningCmd)
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "busybox-demo",
|
2019-03-28 22:16:52 +00:00
|
|
|
AllocID: uuid.Generate(),
|
2018-11-06 05:39:48 +00:00
|
|
|
Resources: basicResources,
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
|
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
go func(t *testing.T) {
|
2018-11-06 05:39:48 +00:00
|
|
|
time.Sleep(100 * time.Millisecond)
|
2019-01-11 13:28:40 +00:00
|
|
|
signal := "SIGINT"
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
signal = "SIGKILL"
|
|
|
|
}
|
|
|
|
require.NoError(t, d.StopTask(task.ID, time.Second, signal))
|
2018-11-12 12:39:55 +00:00
|
|
|
}(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
// Attempt to wait
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if res.Successful() {
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Fail(t, "ExitResult should err: %v", res)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
require.Fail(t, "timeout")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Start_KillTimeout(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2019-02-20 12:48:02 +00:00
|
|
|
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Windows Docker does not support SIGUSR1")
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
timeout := 2 * time.Second
|
2019-01-11 13:28:40 +00:00
|
|
|
taskCfg := newTaskConfig("", []string{"sleep", "10"})
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "busybox-demo",
|
2019-03-28 22:16:52 +00:00
|
|
|
AllocID: uuid.Generate(),
|
2018-11-09 04:38:47 +00:00
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
|
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
defer d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
var killSent time.Time
|
2018-11-06 05:39:48 +00:00
|
|
|
go func() {
|
2018-11-09 04:38:47 +00:00
|
|
|
time.Sleep(100 * time.Millisecond)
|
2018-11-06 05:39:48 +00:00
|
|
|
killSent = time.Now()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.StopTask(task.ID, timeout, "SIGUSR1"))
|
2018-11-06 05:39:48 +00:00
|
|
|
}()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
// Attempt to wait
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var killed time.Time
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
killed = time.Now()
|
2018-11-09 04:38:47 +00:00
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
require.Fail(t, "timeout")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.True(t, killed.Sub(killSent) > timeout)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_StartN(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Windows Docker does not support SIGINT")
|
|
|
|
}
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-09 04:38:47 +00:00
|
|
|
require := require.New(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task1, _, ports1 := dockerTask(t)
|
|
|
|
defer freeport.Return(ports1)
|
|
|
|
|
|
|
|
task2, _, ports2 := dockerTask(t)
|
|
|
|
defer freeport.Return(ports2)
|
|
|
|
|
|
|
|
task3, _, ports3 := dockerTask(t)
|
|
|
|
defer freeport.Return(ports3)
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
taskList := []*drivers.TaskConfig{task1, task2, task3}
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
t.Logf("Starting %d tasks", len(taskList))
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
// Let's spin up a bunch of things
|
2018-11-09 04:38:47 +00:00
|
|
|
for _, task := range taskList {
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2019-05-30 19:35:22 +00:00
|
|
|
defer d.DestroyTask(task3.ID, true)
|
|
|
|
defer d.DestroyTask(task2.ID, true)
|
|
|
|
defer d.DestroyTask(task1.ID, true)
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
t.Log("All tasks are started. Terminating...")
|
2018-11-09 04:38:47 +00:00
|
|
|
for _, task := range taskList {
|
|
|
|
require.NoError(d.StopTask(task.ID, time.Second, "SIGINT"))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
// Attempt to wait
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
select {
|
|
|
|
case <-waitCh:
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
require.Fail("timeout waiting on task")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Log("Test complete!")
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_StartNVersions(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Skipped on windows, we don't have image variants available")
|
|
|
|
}
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-09 04:38:47 +00:00
|
|
|
require := require.New(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task1, cfg1, ports1 := dockerTask(t)
|
|
|
|
defer freeport.Return(ports1)
|
2019-01-11 13:28:40 +00:00
|
|
|
tcfg1 := newTaskConfig("", []string{"echo", "hello"})
|
|
|
|
cfg1.Image = tcfg1.Image
|
|
|
|
cfg1.LoadImage = tcfg1.LoadImage
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(task1.EncodeConcreteDriverConfig(cfg1))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task2, cfg2, ports2 := dockerTask(t)
|
|
|
|
defer freeport.Return(ports2)
|
2019-01-11 13:28:40 +00:00
|
|
|
tcfg2 := newTaskConfig("musl", []string{"echo", "hello"})
|
|
|
|
cfg2.Image = tcfg2.Image
|
|
|
|
cfg2.LoadImage = tcfg2.LoadImage
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(task2.EncodeConcreteDriverConfig(cfg2))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task3, cfg3, ports3 := dockerTask(t)
|
|
|
|
defer freeport.Return(ports3)
|
2019-01-11 13:28:40 +00:00
|
|
|
tcfg3 := newTaskConfig("glibc", []string{"echo", "hello"})
|
|
|
|
cfg3.Image = tcfg3.Image
|
|
|
|
cfg3.LoadImage = tcfg3.LoadImage
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(task3.EncodeConcreteDriverConfig(cfg3))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
taskList := []*drivers.TaskConfig{task1, task2, task3}
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
t.Logf("Starting %d tasks", len(taskList))
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
// Let's spin up a bunch of things
|
2018-11-09 04:38:47 +00:00
|
|
|
for _, task := range taskList {
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
copyImage(t, task.TaskDir(), "busybox_musl.tar")
|
|
|
|
copyImage(t, task.TaskDir(), "busybox_glibc.tar")
|
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-15 03:04:33 +00:00
|
|
|
require.NoError(d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2019-05-30 19:35:22 +00:00
|
|
|
defer d.DestroyTask(task3.ID, true)
|
|
|
|
defer d.DestroyTask(task2.ID, true)
|
|
|
|
defer d.DestroyTask(task1.ID, true)
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
t.Log("All tasks are started. Terminating...")
|
2018-11-09 04:38:47 +00:00
|
|
|
for _, task := range taskList {
|
|
|
|
require.NoError(d.StopTask(task.ID, time.Second, "SIGINT"))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
// Attempt to wait
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
select {
|
|
|
|
case <-waitCh:
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
require.Fail("timeout waiting on task")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
2019-05-30 19:35:22 +00:00
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
t.Log("Test complete!")
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Labels(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.Labels = map[string]string{
|
|
|
|
"label1": "value1",
|
|
|
|
"label2": "value2",
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-06 05:39:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2021-03-08 13:59:52 +00:00
|
|
|
// expect to see 1 additional standard labels (allocID)
|
2019-10-18 18:45:45 +00:00
|
|
|
require.Equal(t, len(cfg.Labels)+1, len(container.Config.Labels))
|
2018-11-09 04:38:47 +00:00
|
|
|
for k, v := range cfg.Labels {
|
|
|
|
require.Equal(t, v, container.Config.Labels[k])
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-08 13:59:52 +00:00
|
|
|
func TestDockerDriver_ExtraLabels(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2021-03-08 13:59:52 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
dockerClientConfig := make(map[string]interface{})
|
|
|
|
|
|
|
|
dockerClientConfig["extra_labels"] = []string{"task*", "job_name"}
|
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, dockerClientConfig)
|
|
|
|
defer cleanup()
|
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
|
|
|
|
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedLabels := map[string]string{
|
|
|
|
"com.hashicorp.nomad.alloc_id": task.AllocID,
|
|
|
|
"com.hashicorp.nomad.task_name": task.Name,
|
|
|
|
"com.hashicorp.nomad.task_group_name": task.TaskGroupName,
|
|
|
|
"com.hashicorp.nomad.job_name": task.JobName,
|
|
|
|
}
|
|
|
|
|
|
|
|
// expect to see 4 labels (allocID by default, task_name and task_group_name due to task*, and job_name)
|
|
|
|
require.Equal(t, 4, len(container.Config.Labels))
|
|
|
|
for k, v := range expectedLabels {
|
|
|
|
require.Equal(t, v, container.Config.Labels[k])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-12 21:04:33 +00:00
|
|
|
func TestDockerDriver_LoggingConfiguration(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2021-03-12 21:04:33 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
dockerClientConfig := make(map[string]interface{})
|
|
|
|
loggerConfig := map[string]string{"gelf-address": "udp://1.2.3.4:12201", "tag": "gelf"}
|
|
|
|
|
|
|
|
dockerClientConfig["logging"] = LoggingConfig{
|
|
|
|
Type: "gelf",
|
|
|
|
Config: loggerConfig,
|
|
|
|
}
|
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, dockerClientConfig)
|
|
|
|
defer cleanup()
|
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
|
|
|
|
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, "gelf", container.HostConfig.LogConfig.Type)
|
|
|
|
require.Equal(t, loggerConfig, container.HostConfig.LogConfig.Config)
|
|
|
|
}
|
|
|
|
|
2022-08-11 15:19:39 +00:00
|
|
|
func TestDockerDriver_HealthchecksDisable(t *testing.T) {
|
|
|
|
ci.Parallel(t)
|
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
cfg.Healthchecks.Disable = true
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
must.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
|
|
|
defer cleanup()
|
|
|
|
must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
|
|
|
|
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
must.NotNil(t, container.Config.Healthcheck)
|
|
|
|
must.Eq(t, []string{"NONE"}, container.Config.Healthcheck.Test)
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
func TestDockerDriver_ForcePull(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.ForcePull = true
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
_, err := client.InspectContainer(handle.containerID)
|
2018-11-06 05:39:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("TODO: Skipped digest test on Windows")
|
|
|
|
}
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.LoadImage = ""
|
|
|
|
cfg.Image = "library/busybox@sha256:58ac43b2cc92c687a32c8be6278e50a063579655fe3090125dcb2af0ff9e1a64"
|
2018-11-06 05:39:48 +00:00
|
|
|
localDigest := "sha256:8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7"
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.ForcePull = true
|
2018-12-14 15:58:31 +00:00
|
|
|
cfg.Command = busyboxLongRunningCmd[0]
|
|
|
|
cfg.Args = busyboxLongRunningCmd[1:]
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-06 05:39:48 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, localDigest, container.Image)
|
|
|
|
}
|
|
|
|
|
2020-03-31 01:21:39 +00:00
|
|
|
func TestDockerDriver_SecurityOptUnconfined(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-11 13:28:40 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Windows does not support seccomp")
|
|
|
|
}
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.SecurityOpt = []string{"seccomp=unconfined"}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-06 05:39:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Exactly(t, cfg.SecurityOpt, container.HostConfig.SecurityOpt)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2020-03-31 01:21:39 +00:00
|
|
|
func TestDockerDriver_SecurityOptFromFile(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-03-31 01:21:39 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Windows does not support seccomp")
|
|
|
|
}
|
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
cfg.SecurityOpt = []string{"seccomp=./test-resources/docker/seccomp.json"}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
2020-03-31 01:21:39 +00:00
|
|
|
defer cleanup()
|
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
|
|
|
|
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Contains(t, container.HostConfig.SecurityOpt[0], "reboot")
|
|
|
|
}
|
|
|
|
|
2020-04-03 18:40:58 +00:00
|
|
|
func TestDockerDriver_Runtime(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-04-01 18:44:29 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2020-04-03 18:40:58 +00:00
|
|
|
cfg.Runtime = "runc"
|
2020-04-01 18:44:29 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
2020-04-01 18:44:29 +00:00
|
|
|
defer cleanup()
|
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
|
|
|
|
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2020-04-03 18:40:58 +00:00
|
|
|
require.Exactly(t, cfg.Runtime, container.HostConfig.Runtime)
|
2020-04-01 18:44:29 +00:00
|
|
|
}
|
|
|
|
|
2018-11-20 14:49:50 +00:00
|
|
|
func TestDockerDriver_CreateContainerConfig(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2018-11-20 14:49:50 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2018-11-20 14:49:50 +00:00
|
|
|
opt := map[string]string{"size": "120G"}
|
|
|
|
|
|
|
|
cfg.StorageOpt = opt
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
|
|
|
|
c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, "org/repo:0.1", c.Config.Image)
|
|
|
|
require.EqualValues(t, opt, c.HostConfig.StorageOpt)
|
2019-03-28 22:16:52 +00:00
|
|
|
|
|
|
|
// Container name should be /<task_name>-<alloc_id> for backward compat
|
|
|
|
containerName := fmt.Sprintf("%s-%s", strings.Replace(task.Name, "/", "_", -1), task.AllocID)
|
|
|
|
require.Equal(t, containerName, c.Name)
|
2018-11-20 14:49:50 +00:00
|
|
|
}
|
2018-12-18 01:03:43 +00:00
|
|
|
|
2020-04-03 18:40:58 +00:00
|
|
|
func TestDockerDriver_CreateContainerConfig_RuntimeConflict(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-04-03 18:40:58 +00:00
|
|
|
|
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2020-06-30 13:34:59 +00:00
|
|
|
task.DeviceEnv["NVIDIA_VISIBLE_DEVICES"] = "GPU_UUID_1"
|
2020-04-03 18:40:58 +00:00
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
driver.gpuRuntime = true
|
|
|
|
|
|
|
|
// Should error if a runtime was explicitly set that doesn't match gpu runtime
|
|
|
|
cfg.Runtime = "nvidia"
|
2020-05-12 14:14:54 +00:00
|
|
|
c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
2020-04-03 18:40:58 +00:00
|
|
|
require.NoError(t, err)
|
2020-05-12 14:14:54 +00:00
|
|
|
require.Equal(t, "nvidia", c.HostConfig.Runtime)
|
2020-04-03 18:40:58 +00:00
|
|
|
|
|
|
|
cfg.Runtime = "custom"
|
|
|
|
_, err = driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.Error(t, err)
|
2020-05-12 14:14:54 +00:00
|
|
|
require.Contains(t, err.Error(), "conflicting runtime requests")
|
|
|
|
}
|
|
|
|
|
2020-05-12 15:03:08 +00:00
|
|
|
func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-05-12 14:14:54 +00:00
|
|
|
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
driver.gpuRuntime = true
|
2020-05-12 15:03:08 +00:00
|
|
|
driver.config.allowRuntimes = map[string]struct{}{
|
2020-06-25 20:22:46 +00:00
|
|
|
"runc": {},
|
|
|
|
"custom": {},
|
2020-05-12 14:14:54 +00:00
|
|
|
}
|
|
|
|
|
2020-05-12 15:03:08 +00:00
|
|
|
allowRuntime := []string{
|
2020-05-12 14:14:54 +00:00
|
|
|
"", // default always works
|
|
|
|
"runc",
|
|
|
|
"custom",
|
|
|
|
}
|
|
|
|
|
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
2020-05-12 15:03:08 +00:00
|
|
|
for _, runtime := range allowRuntime {
|
2020-05-12 14:14:54 +00:00
|
|
|
t.Run(runtime, func(t *testing.T) {
|
|
|
|
cfg.Runtime = runtime
|
|
|
|
c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, runtime, c.HostConfig.Runtime)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("not allowed: denied", func(t *testing.T) {
|
|
|
|
cfg.Runtime = "denied"
|
|
|
|
_, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.Error(t, err)
|
2020-05-12 15:17:58 +00:00
|
|
|
require.Contains(t, err.Error(), `runtime "denied" is not allowed`)
|
2020-05-12 14:14:54 +00:00
|
|
|
})
|
|
|
|
|
2020-04-03 18:40:58 +00:00
|
|
|
}
|
|
|
|
|
2019-10-24 18:00:37 +00:00
|
|
|
func TestDockerDriver_CreateContainerConfig_User(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-10-24 18:00:37 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2019-10-24 18:00:37 +00:00
|
|
|
task.User = "random-user-1"
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
|
|
|
|
c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, task.User, c.Config.User)
|
|
|
|
}
|
|
|
|
|
2019-10-17 13:53:46 +00:00
|
|
|
func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-10-17 13:53:46 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2019-10-17 13:53:46 +00:00
|
|
|
task.AllocID = uuid.Generate()
|
|
|
|
task.JobName = "redis-demo-job"
|
|
|
|
|
|
|
|
cfg.Labels = map[string]string{
|
|
|
|
"user_label": "user_value",
|
2019-10-18 18:45:45 +00:00
|
|
|
|
|
|
|
// com.hashicorp.nomad. labels are reserved and
|
|
|
|
// cannot be overridden
|
|
|
|
"com.hashicorp.nomad.alloc_id": "bad_value",
|
2019-10-17 13:53:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
|
|
|
|
c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
expectedLabels := map[string]string{
|
|
|
|
// user provided labels
|
|
|
|
"user_label": "user_value",
|
2021-03-08 13:59:52 +00:00
|
|
|
// default label
|
2019-10-18 18:45:45 +00:00
|
|
|
"com.hashicorp.nomad.alloc_id": task.AllocID,
|
2019-10-17 13:53:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, expectedLabels, c.Config.Labels)
|
|
|
|
}
|
|
|
|
|
2019-02-28 21:40:18 +00:00
|
|
|
func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-02-28 21:40:18 +00:00
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
loggingConfig DockerLogging
|
2019-06-17 16:50:23 +00:00
|
|
|
expectedConfig DockerLogging
|
2019-02-28 21:40:18 +00:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
"simple type",
|
|
|
|
DockerLogging{Type: "fluentd"},
|
2019-06-17 16:50:23 +00:00
|
|
|
DockerLogging{
|
|
|
|
Type: "fluentd",
|
|
|
|
Config: map[string]string{},
|
|
|
|
},
|
2019-02-28 21:40:18 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"simple driver",
|
|
|
|
DockerLogging{Driver: "fluentd"},
|
2019-06-17 16:50:23 +00:00
|
|
|
DockerLogging{
|
|
|
|
Type: "fluentd",
|
|
|
|
Config: map[string]string{},
|
|
|
|
},
|
2019-02-28 21:40:18 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"type takes precedence",
|
|
|
|
DockerLogging{
|
|
|
|
Type: "json-file",
|
|
|
|
Driver: "fluentd",
|
|
|
|
},
|
2019-06-17 16:50:23 +00:00
|
|
|
DockerLogging{
|
|
|
|
Type: "json-file",
|
|
|
|
Config: map[string]string{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"user config takes precedence, even if no type provided",
|
|
|
|
DockerLogging{
|
|
|
|
Type: "",
|
|
|
|
Config: map[string]string{"max-file": "3", "max-size": "10m"},
|
|
|
|
},
|
|
|
|
DockerLogging{
|
|
|
|
Type: "",
|
|
|
|
Config: map[string]string{"max-file": "3", "max-size": "10m"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"defaults to json-file w/ log rotation",
|
|
|
|
DockerLogging{
|
|
|
|
Type: "",
|
|
|
|
},
|
|
|
|
DockerLogging{
|
|
|
|
Type: "json-file",
|
|
|
|
Config: map[string]string{"max-file": "2", "max-size": "2m"},
|
|
|
|
},
|
2019-02-28 21:40:18 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2019-02-28 21:40:18 +00:00
|
|
|
|
|
|
|
cfg.Logging = c.loggingConfig
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
|
|
|
|
cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-06-17 16:50:23 +00:00
|
|
|
require.Equal(t, c.expectedConfig.Type, cc.HostConfig.LogConfig.Type)
|
|
|
|
require.Equal(t, c.expectedConfig.Config["max-file"], cc.HostConfig.LogConfig.Config["max-file"])
|
|
|
|
require.Equal(t, c.expectedConfig.Config["max-size"], cc.HostConfig.LogConfig.Config["max-size"])
|
2019-02-28 21:40:18 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-15 19:13:50 +00:00
|
|
|
func TestDockerDriver_CreateContainerConfig_Mounts(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-12-15 19:13:50 +00:00
|
|
|
|
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
|
|
|
|
cfg.Mounts = []DockerMount{
|
2021-10-01 13:59:55 +00:00
|
|
|
{
|
2020-12-15 19:13:50 +00:00
|
|
|
Type: "bind",
|
|
|
|
Target: "/map-bind-target",
|
|
|
|
Source: "/map-source",
|
|
|
|
},
|
2021-10-01 13:59:55 +00:00
|
|
|
{
|
2020-12-15 19:13:50 +00:00
|
|
|
Type: "tmpfs",
|
|
|
|
Target: "/map-tmpfs-target",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
cfg.MountsList = []DockerMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/list-bind-target",
|
|
|
|
Source: "/list-source",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Type: "tmpfs",
|
|
|
|
Target: "/list-tmpfs-target",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedSrcPrefix := "/"
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
expectedSrcPrefix = "redis-demo\\"
|
|
|
|
}
|
|
|
|
expected := []docker.HostMount{
|
|
|
|
// from mount map
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/map-bind-target",
|
|
|
|
Source: expectedSrcPrefix + "map-source",
|
|
|
|
BindOptions: &docker.BindOptions{},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Type: "tmpfs",
|
|
|
|
Target: "/map-tmpfs-target",
|
|
|
|
TempfsOptions: &docker.TempfsOptions{},
|
|
|
|
},
|
|
|
|
// from mount list
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/list-bind-target",
|
|
|
|
Source: expectedSrcPrefix + "list-source",
|
|
|
|
BindOptions: &docker.BindOptions{},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Type: "tmpfs",
|
|
|
|
Target: "/list-tmpfs-target",
|
|
|
|
TempfsOptions: &docker.TempfsOptions{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
driver.config.Volumes.Enabled = true
|
|
|
|
|
|
|
|
cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
found := cc.HostConfig.Mounts
|
|
|
|
sort.Slice(found, func(i, j int) bool { return strings.Compare(found[i].Target, found[j].Target) < 0 })
|
|
|
|
sort.Slice(expected, func(i, j int) bool {
|
|
|
|
return strings.Compare(expected[i].Target, expected[j].Target) < 0
|
|
|
|
})
|
|
|
|
|
|
|
|
require.Equal(t, expected, found)
|
|
|
|
}
|
|
|
|
|
2018-12-18 01:03:43 +00:00
|
|
|
func TestDockerDriver_CreateContainerConfigWithRuntimes(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2018-12-18 01:03:43 +00:00
|
|
|
testCases := []struct {
|
|
|
|
description string
|
|
|
|
gpuRuntimeSet bool
|
|
|
|
expectToReturnError bool
|
|
|
|
expectedRuntime string
|
|
|
|
nvidiaDevicesProvided bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
description: "gpu devices are provided, docker driver was able to detect nvidia-runtime 1",
|
|
|
|
gpuRuntimeSet: true,
|
|
|
|
expectToReturnError: false,
|
|
|
|
expectedRuntime: "nvidia",
|
|
|
|
nvidiaDevicesProvided: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
description: "gpu devices are provided, docker driver was able to detect nvidia-runtime 2",
|
|
|
|
gpuRuntimeSet: true,
|
|
|
|
expectToReturnError: false,
|
|
|
|
expectedRuntime: "nvidia-runtime-modified-name",
|
|
|
|
nvidiaDevicesProvided: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
description: "no gpu devices provided - no runtime should be set",
|
|
|
|
gpuRuntimeSet: true,
|
|
|
|
expectToReturnError: false,
|
|
|
|
expectedRuntime: "nvidia",
|
|
|
|
nvidiaDevicesProvided: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
description: "no gpuRuntime supported by docker driver",
|
|
|
|
gpuRuntimeSet: false,
|
|
|
|
expectToReturnError: true,
|
|
|
|
expectedRuntime: "nvidia",
|
|
|
|
nvidiaDevicesProvided: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, testCase := range testCases {
|
|
|
|
t.Run(testCase.description, func(t *testing.T) {
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2018-12-18 01:03:43 +00:00
|
|
|
|
2020-05-12 15:17:58 +00:00
|
|
|
dh := dockerDriverHarness(t, map[string]interface{}{
|
|
|
|
"allow_runtimes": []string{"runc", "nvidia", "nvidia-runtime-modified-name"},
|
|
|
|
})
|
2018-12-18 01:03:43 +00:00
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
|
|
|
|
driver.gpuRuntime = testCase.gpuRuntimeSet
|
|
|
|
driver.config.GPURuntimeName = testCase.expectedRuntime
|
|
|
|
if testCase.nvidiaDevicesProvided {
|
2020-06-30 13:34:59 +00:00
|
|
|
task.DeviceEnv["NVIDIA_VISIBLE_DEVICES"] = "GPU_UUID_1"
|
2018-12-18 01:03:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
if testCase.expectToReturnError {
|
|
|
|
require.NotNil(t, err)
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
if testCase.nvidiaDevicesProvided {
|
|
|
|
require.Equal(t, testCase.expectedRuntime, c.HostConfig.Runtime)
|
|
|
|
} else {
|
|
|
|
// no nvidia devices provided -> no point to use nvidia runtime
|
|
|
|
require.Equal(t, "", c.HostConfig.Runtime)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
func TestDockerDriver_Capabilities(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Capabilities not supported on windows")
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []struct {
|
|
|
|
Name string
|
|
|
|
CapAdd []string
|
|
|
|
CapDrop []string
|
2020-10-12 12:47:05 +00:00
|
|
|
Allowlist string
|
2018-11-06 05:39:48 +00:00
|
|
|
StartError string
|
|
|
|
}{
|
|
|
|
{
|
2020-10-12 12:47:05 +00:00
|
|
|
Name: "default-allowlist-add-allowed",
|
2018-11-06 05:39:48 +00:00
|
|
|
CapAdd: []string{"fowner", "mknod"},
|
2021-05-15 20:48:01 +00:00
|
|
|
CapDrop: []string{"all"},
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
{
|
2020-10-12 12:47:05 +00:00
|
|
|
Name: "default-allowlist-add-forbidden",
|
2018-11-06 05:39:48 +00:00
|
|
|
CapAdd: []string{"net_admin"},
|
2021-05-15 20:48:01 +00:00
|
|
|
StartError: "net_admin",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
{
|
2020-10-12 12:47:05 +00:00
|
|
|
Name: "default-allowlist-drop-existing",
|
2021-05-15 20:48:01 +00:00
|
|
|
CapDrop: []string{"fowner", "mknod", "net_raw"},
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
{
|
2020-10-12 12:47:05 +00:00
|
|
|
Name: "restrictive-allowlist-drop-all",
|
2021-05-15 20:48:01 +00:00
|
|
|
CapDrop: []string{"all"},
|
|
|
|
Allowlist: "fowner,mknod",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
{
|
2020-10-12 12:47:05 +00:00
|
|
|
Name: "restrictive-allowlist-add-allowed",
|
2018-11-06 05:39:48 +00:00
|
|
|
CapAdd: []string{"fowner", "mknod"},
|
2021-05-15 20:48:01 +00:00
|
|
|
CapDrop: []string{"all"},
|
|
|
|
Allowlist: "mknod,fowner",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
{
|
2020-10-12 12:47:05 +00:00
|
|
|
Name: "restrictive-allowlist-add-forbidden",
|
2018-11-06 05:39:48 +00:00
|
|
|
CapAdd: []string{"net_admin", "mknod"},
|
2021-05-15 20:48:01 +00:00
|
|
|
CapDrop: []string{"all"},
|
2020-10-12 12:47:05 +00:00
|
|
|
Allowlist: "fowner,mknod",
|
2021-05-15 20:48:01 +00:00
|
|
|
StartError: "net_admin",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
{
|
2020-10-12 12:47:05 +00:00
|
|
|
Name: "permissive-allowlist",
|
2021-05-15 20:48:01 +00:00
|
|
|
CapAdd: []string{"mknod", "net_admin"},
|
|
|
|
Allowlist: "all",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
{
|
2020-10-12 12:47:05 +00:00
|
|
|
Name: "permissive-allowlist-add-all",
|
2018-11-06 05:39:48 +00:00
|
|
|
CapAdd: []string{"all"},
|
2021-05-15 20:48:01 +00:00
|
|
|
Allowlist: "all",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.Name, func(t *testing.T) {
|
|
|
|
client := newTestDockerClient(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
if len(tc.CapAdd) > 0 {
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.CapAdd = tc.CapAdd
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
if len(tc.CapDrop) > 0 {
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.CapDrop = tc.CapDrop
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
dockerDriver, ok := d.Impl().(*Driver)
|
|
|
|
require.True(t, ok)
|
2020-10-12 12:47:05 +00:00
|
|
|
if tc.Allowlist != "" {
|
|
|
|
dockerDriver.config.AllowCaps = strings.Split(tc.Allowlist, ",")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
2019-05-30 19:35:22 +00:00
|
|
|
defer d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
if err == nil && tc.StartError != "" {
|
|
|
|
t.Fatalf("Expected error in start: %v", tc.StartError)
|
|
|
|
} else if err != nil {
|
|
|
|
if tc.StartError == "" {
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
} else {
|
|
|
|
require.Contains(t, err.Error(), tc.StartError)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
handle, ok := dockerDriver.tasks.Get(task.ID)
|
|
|
|
require.True(t, ok)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-15 03:04:33 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Exactly(t, tc.CapAdd, container.HostConfig.CapAdd)
|
|
|
|
require.Exactly(t, tc.CapDrop, container.HostConfig.CapDrop)
|
2018-11-06 05:39:48 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_DNS(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2020-08-17 14:22:08 +00:00
|
|
|
testutil.ExecCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-08-17 14:22:08 +00:00
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
cfg *drivers.DNSConfig
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "nil DNSConfig",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "basic",
|
|
|
|
cfg: &drivers.DNSConfig{
|
|
|
|
Servers: []string{"1.1.1.1", "1.0.0.1"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "full",
|
|
|
|
cfg: &drivers.DNSConfig{
|
|
|
|
Servers: []string{"1.1.1.1", "1.0.0.1"},
|
|
|
|
Searches: []string{"local.test", "node.consul"},
|
|
|
|
Options: []string{"ndots:2", "edns0"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-08-17 14:22:08 +00:00
|
|
|
for _, c := range cases {
|
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
task.DNS = c.cfg
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-08-17 14:22:08 +00:00
|
|
|
_, d, _, cleanup := dockerSetup(t, task, nil)
|
|
|
|
defer cleanup()
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-08-17 14:22:08 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
|
|
|
|
dtestutil.TestTaskDNSConfig(t, d, task.ID, c.cfg)
|
|
|
|
}
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-10-15 19:53:25 +00:00
|
|
|
func TestDockerDriver_Init(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2021-10-15 19:53:25 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Windows does not support init.")
|
|
|
|
}
|
|
|
|
|
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
|
|
|
|
cfg.Init = true
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
|
|
|
defer cleanup()
|
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
|
|
|
|
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, cfg.Init, container.HostConfig.Init)
|
|
|
|
}
|
|
|
|
|
2020-06-25 16:30:16 +00:00
|
|
|
func TestDockerDriver_CPUSetCPUs(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-06-25 16:30:16 +00:00
|
|
|
testutil.DockerCompatible(t)
|
client: enable support for cgroups v2
This PR introduces support for using Nomad on systems with cgroups v2 [1]
enabled as the cgroups controller mounted on /sys/fs/cgroups. Newer Linux
distros like Ubuntu 21.10 are shipping with cgroups v2 only, causing problems
for Nomad users.
Nomad mostly "just works" with cgroups v2 due to the indirection via libcontainer,
but not so for managing cpuset cgroups. Before, Nomad has been making use of
a feature in v1 where a PID could be a member of more than one cgroup. In v2
this is no longer possible, and so the logic around computing cpuset values
must be modified. When Nomad detects v2, it manages cpuset values in-process,
rather than making use of cgroup heirarchy inheritence via shared/reserved
parents.
Nomad will only activate the v2 logic when it detects cgroups2 is mounted at
/sys/fs/cgroups. This means on systems running in hybrid mode with cgroups2
mounted at /sys/fs/cgroups/unified (as is typical) Nomad will continue to
use the v1 logic, and should operate as before. Systems that do not support
cgroups v2 are also not affected.
When v2 is activated, Nomad will create a parent called nomad.slice (unless
otherwise configured in Client conifg), and create cgroups for tasks using
naming convention <allocID>-<task>.scope. These follow the naming convention
set by systemd and also used by Docker when cgroups v2 is detected.
Client nodes now export a new fingerprint attribute, unique.cgroups.version
which will be set to 'v1' or 'v2' to indicate the cgroups regime in use by
Nomad.
The new cpuset management strategy fixes #11705, where docker tasks that
spawned processes on startup would "leak". In cgroups v2, the PIDs are
started in the cgroup they will always live in, and thus the cause of
the leak is eliminated.
[1] https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
Closes #11289
Fixes #11705 #11773 #11933
2022-02-28 22:24:01 +00:00
|
|
|
testutil.CgroupsCompatible(t)
|
2020-06-25 16:30:16 +00:00
|
|
|
|
|
|
|
testCases := []struct {
|
|
|
|
Name string
|
|
|
|
CPUSetCPUs string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
Name: "Single CPU",
|
|
|
|
CPUSetCPUs: "0",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "Comma separated list of CPUs",
|
2020-06-25 20:27:16 +00:00
|
|
|
CPUSetCPUs: "0,1",
|
2020-06-25 16:30:16 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "Range of CPUs",
|
2020-06-25 20:27:16 +00:00
|
|
|
CPUSetCPUs: "0-1",
|
2020-06-25 16:30:16 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, testCase := range testCases {
|
|
|
|
t.Run(testCase.Name, func(t *testing.T) {
|
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
|
|
|
|
cfg.CPUSetCPUs = testCase.CPUSetCPUs
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
|
|
|
defer cleanup()
|
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
|
|
|
|
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, cfg.CPUSetCPUs, container.HostConfig.CPUSetCPUs)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-01 16:43:43 +00:00
|
|
|
func TestDockerDriver_MemoryHardLimit(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-06-01 16:43:43 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Windows does not support MemoryReservation")
|
|
|
|
}
|
|
|
|
|
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
|
|
|
|
cfg.MemoryHardLimit = 300
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
|
|
|
defer cleanup()
|
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
|
|
|
|
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, task.Resources.LinuxResources.MemoryLimitBytes, container.HostConfig.MemoryReservation)
|
|
|
|
require.Equal(t, cfg.MemoryHardLimit*1024*1024, container.HostConfig.Memory)
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
func TestDockerDriver_MACAddress(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2019-02-20 12:48:02 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Windows docker does not support setting MacAddress")
|
|
|
|
}
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.MacAddress = "00:16:3e:00:00:00"
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Equal(t, cfg.MacAddress, container.NetworkSettings.MacAddress)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerWorkDir(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.WorkDir = "/some/path"
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2019-01-11 13:28:40 +00:00
|
|
|
require.Equal(t, cfg.WorkDir, filepath.ToSlash(container.Config.WorkingDir))
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func inSlice(needle string, haystack []string) bool {
|
|
|
|
for _, h := range haystack {
|
|
|
|
if h == needle {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_PortsNoMap(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, _, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
res := ports[0]
|
|
|
|
dyn := ports[1]
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
// Verify that the correct ports are EXPOSED
|
|
|
|
expectedExposedPorts := map[docker.Port]struct{}{
|
|
|
|
docker.Port(fmt.Sprintf("%d/tcp", res)): {},
|
|
|
|
docker.Port(fmt.Sprintf("%d/udp", res)): {},
|
|
|
|
docker.Port(fmt.Sprintf("%d/tcp", dyn)): {},
|
|
|
|
docker.Port(fmt.Sprintf("%d/udp", dyn)): {},
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
hostIP := "127.0.0.1"
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
hostIP = ""
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
// Verify that the correct ports are FORWARDED
|
|
|
|
expectedPortBindings := map[docker.Port][]docker.PortBinding{
|
2019-01-11 13:28:40 +00:00
|
|
|
docker.Port(fmt.Sprintf("%d/tcp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
|
|
|
|
docker.Port(fmt.Sprintf("%d/udp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
|
|
|
|
docker.Port(fmt.Sprintf("%d/tcp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
|
|
|
|
docker.Port(fmt.Sprintf("%d/udp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_PortsMapping(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
res := ports[0]
|
|
|
|
dyn := ports[1]
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.PortMap = map[string]int{
|
|
|
|
"main": 8080,
|
|
|
|
"REDIS": 6379,
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-09-04 13:33:35 +00:00
|
|
|
// Verify that the port environment variables are set
|
|
|
|
require.Contains(t, container.Config.Env, "NOMAD_PORT_main=8080")
|
|
|
|
require.Contains(t, container.Config.Env, "NOMAD_PORT_REDIS=6379")
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
// Verify that the correct ports are EXPOSED
|
|
|
|
expectedExposedPorts := map[docker.Port]struct{}{
|
|
|
|
docker.Port("8080/tcp"): {},
|
|
|
|
docker.Port("8080/udp"): {},
|
|
|
|
docker.Port("6379/tcp"): {},
|
|
|
|
docker.Port("6379/udp"): {},
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
hostIP := "127.0.0.1"
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
hostIP = ""
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
// Verify that the correct ports are FORWARDED
|
|
|
|
expectedPortBindings := map[docker.Port][]docker.PortBinding{
|
2019-01-11 13:28:40 +00:00
|
|
|
docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
|
|
|
|
docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
|
|
|
|
docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
|
|
|
|
docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2020-08-11 22:30:22 +00:00
|
|
|
func TestDockerDriver_CreateContainerConfig_Ports(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-08-11 22:30:22 +00:00
|
|
|
|
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
hostIP := "127.0.0.1"
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
hostIP = ""
|
|
|
|
}
|
|
|
|
portmappings := structs.AllocatedPorts(make([]structs.AllocatedPortMapping, len(ports)))
|
|
|
|
portmappings[0] = structs.AllocatedPortMapping{
|
|
|
|
Label: "main",
|
|
|
|
Value: ports[0],
|
|
|
|
HostIP: hostIP,
|
|
|
|
To: 8080,
|
|
|
|
}
|
|
|
|
portmappings[1] = structs.AllocatedPortMapping{
|
|
|
|
Label: "REDIS",
|
|
|
|
Value: ports[1],
|
|
|
|
HostIP: hostIP,
|
|
|
|
To: 6379,
|
|
|
|
}
|
|
|
|
task.Resources.Ports = &portmappings
|
|
|
|
cfg.Ports = []string{"main", "REDIS"}
|
|
|
|
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
|
|
|
|
c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, "org/repo:0.1", c.Config.Image)
|
|
|
|
|
|
|
|
// Verify that the correct ports are FORWARDED
|
|
|
|
expectedPortBindings := map[docker.Port][]docker.PortBinding{
|
|
|
|
docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[0])}},
|
|
|
|
docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[0])}},
|
|
|
|
docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[1])}},
|
|
|
|
docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[1])}},
|
|
|
|
}
|
|
|
|
require.Exactly(t, expectedPortBindings, c.HostConfig.PortBindings)
|
|
|
|
|
|
|
|
}
|
2019-09-04 13:33:35 +00:00
|
|
|
func TestDockerDriver_CreateContainerConfig_PortsMapping(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-09-04 13:33:35 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
|
|
|
res := ports[0]
|
|
|
|
dyn := ports[1]
|
2019-09-04 13:33:35 +00:00
|
|
|
cfg.PortMap = map[string]int{
|
|
|
|
"main": 8080,
|
|
|
|
"REDIS": 6379,
|
|
|
|
}
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
|
|
|
|
c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, "org/repo:0.1", c.Config.Image)
|
|
|
|
require.Contains(t, c.Config.Env, "NOMAD_PORT_main=8080")
|
|
|
|
require.Contains(t, c.Config.Env, "NOMAD_PORT_REDIS=6379")
|
|
|
|
|
|
|
|
// Verify that the correct ports are FORWARDED
|
|
|
|
hostIP := "127.0.0.1"
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
hostIP = ""
|
|
|
|
}
|
|
|
|
expectedPortBindings := map[docker.Port][]docker.PortBinding{
|
|
|
|
docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
|
|
|
|
docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
|
|
|
|
docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
|
|
|
|
docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
|
|
|
|
}
|
|
|
|
require.Exactly(t, expectedPortBindings, c.HostConfig.PortBindings)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
func TestDockerDriver_CleanupContainer(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2019-01-11 13:28:40 +00:00
|
|
|
cfg.Command = "echo"
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.Args = []string{"hello"}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
2019-05-29 22:38:43 +00:00
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if !res.Successful() {
|
|
|
|
t.Fatalf("err: %v", res)
|
|
|
|
}
|
|
|
|
|
2019-05-29 22:38:43 +00:00
|
|
|
err = d.DestroyTask(task.ID, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
time.Sleep(3 * time.Second)
|
|
|
|
|
|
|
|
// Ensure that the container isn't present
|
2018-11-20 02:51:26 +00:00
|
|
|
_, err := client.InspectContainer(handle.containerID)
|
2018-11-06 05:39:48 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("expected to not get container")
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-29 22:38:43 +00:00
|
|
|
func TestDockerDriver_EnableImageGC(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-05-29 22:38:43 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2019-05-29 22:38:43 +00:00
|
|
|
cfg.Command = "echo"
|
|
|
|
cfg.Args = []string{"hello"}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
client := newTestDockerClient(t)
|
|
|
|
driver := dockerDriverHarness(t, map[string]interface{}{
|
|
|
|
"gc": map[string]interface{}{
|
|
|
|
"container": true,
|
|
|
|
"image": true,
|
|
|
|
"image_delay": "2s",
|
|
|
|
},
|
|
|
|
})
|
|
|
|
cleanup := driver.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
|
2019-05-30 19:35:22 +00:00
|
|
|
cleanSlate(client, cfg.Image)
|
2019-05-29 22:38:43 +00:00
|
|
|
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
_, _, err := driver.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
dockerDriver, ok := driver.Impl().(*Driver)
|
|
|
|
require.True(t, ok)
|
|
|
|
_, ok = dockerDriver.tasks.Get(task.ID)
|
|
|
|
require.True(t, ok)
|
|
|
|
|
|
|
|
waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
select {
|
|
|
|
case res := <-waitCh:
|
|
|
|
if !res.Successful() {
|
|
|
|
t.Fatalf("err: %v", res)
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
|
|
|
// we haven't called DestroyTask, image should be present
|
|
|
|
_, err = client.InspectImage(cfg.Image)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = dockerDriver.DestroyTask(task.ID, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// image_delay is 3s, so image should still be around for a bit
|
|
|
|
_, err = client.InspectImage(cfg.Image)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Ensure image was removed
|
|
|
|
tu.WaitForResult(func() (bool, error) {
|
|
|
|
if _, err := client.InspectImage(cfg.Image); err == nil {
|
|
|
|
return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_DisableImageGC(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-05-29 22:38:43 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2019-05-29 22:38:43 +00:00
|
|
|
cfg.Command = "echo"
|
|
|
|
cfg.Args = []string{"hello"}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
client := newTestDockerClient(t)
|
|
|
|
driver := dockerDriverHarness(t, map[string]interface{}{
|
|
|
|
"gc": map[string]interface{}{
|
|
|
|
"container": true,
|
|
|
|
"image": false,
|
|
|
|
"image_delay": "1s",
|
|
|
|
},
|
|
|
|
})
|
|
|
|
cleanup := driver.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
|
2019-05-30 19:35:22 +00:00
|
|
|
cleanSlate(client, cfg.Image)
|
2019-05-29 22:38:43 +00:00
|
|
|
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
_, _, err := driver.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
dockerDriver, ok := driver.Impl().(*Driver)
|
|
|
|
require.True(t, ok)
|
|
|
|
handle, ok := dockerDriver.tasks.Get(task.ID)
|
|
|
|
require.True(t, ok)
|
|
|
|
|
|
|
|
waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
select {
|
|
|
|
case res := <-waitCh:
|
|
|
|
if !res.Successful() {
|
|
|
|
t.Fatalf("err: %v", res)
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
|
|
|
// we haven't called DestroyTask, image should be present
|
|
|
|
_, err = client.InspectImage(handle.containerImage)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = dockerDriver.DestroyTask(task.ID, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// image_delay is 1s, wait a little longer
|
|
|
|
time.Sleep(3 * time.Second)
|
|
|
|
|
|
|
|
// image should not have been removed or scheduled to be removed
|
|
|
|
_, err = client.InspectImage(cfg.Image)
|
|
|
|
require.NoError(t, err)
|
|
|
|
dockerDriver.coordinator.imageLock.Lock()
|
|
|
|
_, ok = dockerDriver.coordinator.deleteFuture[handle.containerImage]
|
|
|
|
require.False(t, ok, "image should not be registered for deletion")
|
|
|
|
dockerDriver.coordinator.imageLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2019-06-03 19:17:57 +00:00
|
|
|
func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-06-03 19:17:57 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2019-06-03 19:17:57 +00:00
|
|
|
cfg.Command = "echo"
|
|
|
|
cfg.Args = []string{"hello"}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
client := newTestDockerClient(t)
|
|
|
|
driver := dockerDriverHarness(t, map[string]interface{}{
|
|
|
|
"gc": map[string]interface{}{
|
|
|
|
"container": true,
|
|
|
|
"image": true,
|
|
|
|
"image_delay": "0s",
|
|
|
|
},
|
|
|
|
})
|
|
|
|
cleanup := driver.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
cleanSlate(client, cfg.Image)
|
|
|
|
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
_, _, err := driver.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
dockerDriver, ok := driver.Impl().(*Driver)
|
|
|
|
require.True(t, ok)
|
|
|
|
h, ok := dockerDriver.tasks.Get(task.ID)
|
|
|
|
require.True(t, ok)
|
|
|
|
|
|
|
|
waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
select {
|
|
|
|
case res := <-waitCh:
|
|
|
|
if !res.Successful() {
|
|
|
|
t.Fatalf("err: %v", res)
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove the container out-of-band
|
|
|
|
require.NoError(t, client.RemoveContainer(docker.RemoveContainerOptions{
|
|
|
|
ID: h.containerID,
|
|
|
|
}))
|
|
|
|
|
|
|
|
require.NoError(t, dockerDriver.DestroyTask(task.ID, false))
|
|
|
|
|
|
|
|
// Ensure image was removed
|
|
|
|
tu.WaitForResult(func() (bool, error) {
|
|
|
|
if _, err := client.InspectImage(cfg.Image); err == nil {
|
|
|
|
return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Ensure that task handle was removed
|
|
|
|
_, ok = dockerDriver.tasks.Get(task.ID)
|
|
|
|
require.False(t, ok)
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
func TestDockerDriver_Stats(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2019-01-11 13:28:40 +00:00
|
|
|
cfg.Command = "sleep"
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.Args = []string{"1000"}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
_, d, handle, cleanup := dockerSetup(t, task, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
go func() {
|
2018-12-11 20:27:50 +00:00
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
|
|
|
ch, err := handle.Stats(ctx, 1*time.Second)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
select {
|
|
|
|
case ru := <-ch:
|
|
|
|
assert.NotNil(t, ru.ResourceUsage)
|
|
|
|
case <-time.After(3 * time.Second):
|
|
|
|
assert.Fail(t, "stats timeout")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if res.Successful() {
|
|
|
|
t.Fatalf("should err: %v", res)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-27 19:03:58 +00:00
|
|
|
func setupDockerVolumes(t *testing.T, cfg map[string]interface{}, hostpath string) (*drivers.TaskConfig, *dtestutil.DriverHarness, *TaskConfig, string, func()) {
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
randfn := fmt.Sprintf("test-%d", rand.Int())
|
|
|
|
hostfile := filepath.Join(hostpath, randfn)
|
2019-01-11 13:28:40 +00:00
|
|
|
var containerPath string
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
containerPath = "C:\\data"
|
|
|
|
} else {
|
|
|
|
containerPath = "/mnt/vol"
|
|
|
|
}
|
2018-11-06 05:39:48 +00:00
|
|
|
containerFile := filepath.Join(containerPath, randfn)
|
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
taskCfg := newTaskConfig("", []string{"touch", containerFile})
|
|
|
|
taskCfg.Volumes = []string{fmt.Sprintf("%s:%s", hostpath, containerPath)}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "ls",
|
2019-03-28 22:16:52 +00:00
|
|
|
AllocID: uuid.Generate(),
|
2018-11-09 04:38:47 +00:00
|
|
|
Env: map[string]string{"VOL_PATH": containerPath},
|
|
|
|
Resources: basicResources,
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, cfg)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
return task, d, &taskCfg, hostfile, cleanup
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_VolumesDisabled(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg := map[string]interface{}{
|
2018-11-20 03:58:05 +00:00
|
|
|
"volumes": map[string]interface{}{
|
|
|
|
"enabled": false,
|
|
|
|
},
|
|
|
|
"gc": map[string]interface{}{
|
|
|
|
"image": false,
|
|
|
|
},
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
2022-05-12 15:42:40 +00:00
|
|
|
tmpvol := t.TempDir()
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task, driver, _, _, cleanup := setupDockerVolumes(t, cfg, tmpvol)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2022-05-12 15:42:40 +00:00
|
|
|
_, _, err := driver.StartTask(task)
|
2019-05-30 19:35:22 +00:00
|
|
|
defer driver.DestroyTask(task.ID, true)
|
|
|
|
if err == nil {
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Fail(t, "Started driver successfully when volumes should have been disabled.")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Relative paths should still be allowed
|
|
|
|
{
|
2018-11-09 04:38:47 +00:00
|
|
|
task, driver, _, fn, cleanup := setupDockerVolumes(t, cfg, ".")
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := driver.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer driver.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
waitCh, err := driver.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if !res.Successful() {
|
|
|
|
t.Fatalf("unexpected err: %v", res)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
if _, err := ioutil.ReadFile(filepath.Join(task.TaskDir().Dir, fn)); err != nil {
|
2018-11-06 05:39:48 +00:00
|
|
|
t.Fatalf("unexpected error reading %s: %v", fn, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Volume Drivers should be rejected (error)
|
|
|
|
{
|
2018-11-09 04:38:47 +00:00
|
|
|
task, driver, taskCfg, _, cleanup := setupDockerVolumes(t, cfg, "fake_flocker_vol")
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
taskCfg.VolumeDriver = "flocker"
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg))
|
|
|
|
|
2019-05-30 19:35:22 +00:00
|
|
|
_, _, err := driver.StartTask(task)
|
|
|
|
defer driver.DestroyTask(task.ID, true)
|
|
|
|
if err == nil {
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Fail(t, "Started driver successfully when volume drivers should have been disabled.")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
2018-12-11 19:22:50 +00:00
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
func TestDockerDriver_VolumesEnabled(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-11-11 15:03:46 +00:00
|
|
|
cfg := map[string]interface{}{
|
|
|
|
"volumes": map[string]interface{}{
|
|
|
|
"enabled": true,
|
|
|
|
},
|
|
|
|
"gc": map[string]interface{}{
|
|
|
|
"image": false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2022-05-12 15:42:40 +00:00
|
|
|
tmpvol := t.TempDir()
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
// Evaluate symlinks so it works on MacOS
|
2022-05-12 15:42:40 +00:00
|
|
|
tmpvol, err := filepath.EvalSymlinks(tmpvol)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-11-11 15:03:46 +00:00
|
|
|
task, driver, _, hostpath, cleanup := setupDockerVolumes(t, cfg, tmpvol)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err = driver.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer driver.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
waitCh, err := driver.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if !res.Successful() {
|
|
|
|
t.Fatalf("unexpected err: %v", res)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := ioutil.ReadFile(hostpath); err != nil {
|
|
|
|
t.Fatalf("unexpected error reading %s: %v", hostpath, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Mounts(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
goodMount := DockerMount{
|
|
|
|
Target: "/nomad",
|
|
|
|
VolumeOptions: DockerVolumeOptions{
|
|
|
|
Labels: map[string]string{"foo": "bar"},
|
|
|
|
DriverConfig: DockerVolumeDriverConfig{
|
|
|
|
Name: "local",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
},
|
2018-11-09 04:38:47 +00:00
|
|
|
ReadOnly: true,
|
|
|
|
Source: "test",
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
goodMount.Target = "C:\\nomad"
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
cases := []struct {
|
|
|
|
Name string
|
2018-11-09 04:38:47 +00:00
|
|
|
Mounts []DockerMount
|
2018-11-06 05:39:48 +00:00
|
|
|
Error string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
Name: "good-one",
|
|
|
|
Error: "",
|
2018-11-09 04:38:47 +00:00
|
|
|
Mounts: []DockerMount{goodMount},
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
{
|
2018-11-09 04:38:47 +00:00
|
|
|
Name: "duplicate",
|
|
|
|
Error: "Duplicate mount point",
|
|
|
|
Mounts: []DockerMount{goodMount, goodMount, goodMount},
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.Name, func(t *testing.T) {
|
2018-12-15 05:08:23 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
2020-11-11 15:03:46 +00:00
|
|
|
driver := d.Impl().(*Driver)
|
|
|
|
driver.config.Volumes.Enabled = true
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
// Build the task
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2019-01-11 13:28:40 +00:00
|
|
|
cfg.Command = "sleep"
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.Args = []string{"10000"}
|
|
|
|
cfg.Mounts = c.Mounts
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
|
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
if err == nil && c.Error != "" {
|
|
|
|
t.Fatalf("expected error: %v", c.Error)
|
|
|
|
} else if err != nil {
|
|
|
|
if c.Error == "" {
|
|
|
|
t.Fatalf("unexpected error in prestart: %v", err)
|
|
|
|
} else if !strings.Contains(err.Error(), c.Error) {
|
|
|
|
t.Fatalf("expected error %q; got %v", c.Error, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_AuthConfiguration(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
path := "./test-resources/docker/auth.json"
|
|
|
|
cases := []struct {
|
|
|
|
Repo string
|
|
|
|
AuthConfig *docker.AuthConfiguration
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
Repo: "lolwhat.com/what:1337",
|
|
|
|
AuthConfig: nil,
|
|
|
|
},
|
|
|
|
{
|
2022-05-17 15:24:19 +00:00
|
|
|
Repo: "redis:7",
|
2018-11-06 05:39:48 +00:00
|
|
|
AuthConfig: &docker.AuthConfiguration{
|
|
|
|
Username: "test",
|
|
|
|
Password: "1234",
|
|
|
|
Email: "",
|
|
|
|
ServerAddress: "https://index.docker.io/v1/",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2022-05-17 15:24:19 +00:00
|
|
|
Repo: "quay.io/redis:7",
|
2018-11-06 05:39:48 +00:00
|
|
|
AuthConfig: &docker.AuthConfiguration{
|
|
|
|
Username: "test",
|
|
|
|
Password: "5678",
|
|
|
|
Email: "",
|
|
|
|
ServerAddress: "quay.io",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2022-05-17 15:24:19 +00:00
|
|
|
Repo: "other.io/redis:7",
|
2018-11-06 05:39:48 +00:00
|
|
|
AuthConfig: &docker.AuthConfiguration{
|
|
|
|
Username: "test",
|
|
|
|
Password: "abcd",
|
|
|
|
Email: "",
|
|
|
|
ServerAddress: "https://other.io/v1/",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
for _, c := range cases {
|
2018-11-06 05:39:48 +00:00
|
|
|
act, err := authFromDockerConfig(path)(c.Repo)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Exactly(t, c.AuthConfig, act)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-13 16:47:37 +00:00
|
|
|
func TestDockerDriver_AuthFromTaskConfig(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-03-13 16:47:37 +00:00
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
Auth DockerAuth
|
|
|
|
AuthConfig *docker.AuthConfiguration
|
2019-03-13 18:27:28 +00:00
|
|
|
Desc string
|
2019-03-13 16:47:37 +00:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
Auth: DockerAuth{},
|
|
|
|
AuthConfig: nil,
|
2019-03-13 18:27:28 +00:00
|
|
|
Desc: "Empty Config",
|
2019-03-13 16:47:37 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Auth: DockerAuth{
|
|
|
|
Username: "foo",
|
|
|
|
Password: "bar",
|
|
|
|
Email: "foo@bar.com",
|
|
|
|
ServerAddr: "www.foobar.com",
|
|
|
|
},
|
|
|
|
AuthConfig: &docker.AuthConfiguration{
|
|
|
|
Username: "foo",
|
|
|
|
Password: "bar",
|
|
|
|
Email: "foo@bar.com",
|
|
|
|
ServerAddress: "www.foobar.com",
|
|
|
|
},
|
2019-03-13 18:27:28 +00:00
|
|
|
Desc: "All fields set",
|
2019-03-13 16:47:37 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
Auth: DockerAuth{
|
|
|
|
Username: "foo",
|
|
|
|
Password: "bar",
|
|
|
|
ServerAddr: "www.foobar.com",
|
|
|
|
},
|
|
|
|
AuthConfig: &docker.AuthConfiguration{
|
|
|
|
Username: "foo",
|
|
|
|
Password: "bar",
|
|
|
|
ServerAddress: "www.foobar.com",
|
|
|
|
},
|
2019-03-13 18:27:28 +00:00
|
|
|
Desc: "Email not set",
|
2019-03-13 16:47:37 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
2019-03-13 18:27:28 +00:00
|
|
|
t.Run(c.Desc, func(t *testing.T) {
|
|
|
|
act, err := authFromTaskConfig(&TaskConfig{Auth: c.Auth})("test")
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Exactly(t, c.AuthConfig, act)
|
|
|
|
})
|
2019-03-13 16:47:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
func TestDockerDriver_OOMKilled(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2022-07-28 18:47:06 +00:00
|
|
|
// waiting on upstream fix for cgroups v2
|
|
|
|
// see https://github.com/hashicorp/nomad/issues/13119
|
|
|
|
testutil.CgroupsCompatibleV1(t)
|
2019-02-20 12:48:02 +00:00
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
taskCfg := newTaskConfig("", []string{"sh", "-c", `sleep 2 && x=a && while true; do x="$x$x"; done`})
|
2018-11-12 12:39:55 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "oom-killed",
|
2019-03-28 22:16:52 +00:00
|
|
|
AllocID: uuid.Generate(),
|
2018-11-12 12:39:55 +00:00
|
|
|
Resources: basicResources,
|
|
|
|
}
|
2018-12-14 16:06:14 +00:00
|
|
|
task.Resources.LinuxResources.MemoryLimitBytes = 10 * 1024 * 1024
|
2018-12-14 00:21:41 +00:00
|
|
|
task.Resources.NomadResources.Memory.MemoryMB = 10
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-14 16:06:14 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-14 16:06:14 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-12-14 16:06:14 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-14 16:06:14 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-12 12:39:55 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if res.Successful() {
|
|
|
|
t.Fatalf("expected error, but container exited successful")
|
|
|
|
}
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
if !res.OOMKilled {
|
2018-11-06 05:39:48 +00:00
|
|
|
t.Fatalf("not killed by OOM killer: %s", res.Err)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("Successfully killed by OOM killer")
|
|
|
|
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
brokenConfigs := []DockerDevice{
|
|
|
|
{
|
|
|
|
HostPath: "",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
2018-11-12 12:39:55 +00:00
|
|
|
{
|
|
|
|
HostPath: "/dev/sda1",
|
|
|
|
CgroupPermissions: "rxb",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
testCases := []struct {
|
2018-11-12 12:39:55 +00:00
|
|
|
deviceConfig []DockerDevice
|
2018-11-06 05:39:48 +00:00
|
|
|
err error
|
|
|
|
}{
|
2018-11-12 12:39:55 +00:00
|
|
|
{brokenConfigs[:1], fmt.Errorf("host path must be set in configuration for devices")},
|
|
|
|
{brokenConfigs[1:], fmt.Errorf("invalid cgroup permission string: \"rxb\"")},
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
for _, tc := range testCases {
|
|
|
|
task, cfg, ports := dockerTask(t)
|
2018-11-12 12:39:55 +00:00
|
|
|
cfg.Devices = tc.deviceConfig
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
defer cleanup()
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), tc.err.Error())
|
2019-12-04 00:15:11 +00:00
|
|
|
freeport.Return(ports)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Device_Success(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
if runtime.GOOS != "linux" {
|
|
|
|
t.Skip("test device mounts only on linux")
|
|
|
|
}
|
|
|
|
|
|
|
|
hostPath := "/dev/random"
|
|
|
|
containerPath := "/dev/myrandom"
|
|
|
|
perms := "rwm"
|
|
|
|
|
|
|
|
expectedDevice := docker.Device{
|
|
|
|
PathOnHost: hostPath,
|
|
|
|
PathInContainer: containerPath,
|
|
|
|
CgroupPermissions: perms,
|
|
|
|
}
|
2018-11-12 12:39:55 +00:00
|
|
|
config := DockerDevice{
|
|
|
|
HostPath: hostPath,
|
|
|
|
ContainerPath: containerPath,
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2018-11-12 12:39:55 +00:00
|
|
|
cfg.Devices = []DockerDevice{config}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, driver, handle, cleanup := dockerSetup(t, task, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NotEmpty(t, container.HostConfig.Devices, "Expected one device")
|
|
|
|
require.Equal(t, expectedDevice, container.HostConfig.Devices[0], "Incorrect device ")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Entrypoint(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-01-11 13:28:40 +00:00
|
|
|
entrypoint := []string{"sh", "-c"}
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2018-11-12 12:39:55 +00:00
|
|
|
cfg.Entrypoint = entrypoint
|
2018-12-14 15:58:31 +00:00
|
|
|
cfg.Command = strings.Join(busyboxLongRunningCmd, " ")
|
2018-12-04 04:08:52 +00:00
|
|
|
cfg.Args = []string{}
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, driver, handle, cleanup := dockerSetup(t, task, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
require.Len(t, container.Config.Entrypoint, 2, "Expected one entrypoint")
|
|
|
|
require.Equal(t, entrypoint, container.Config.Entrypoint, "Incorrect entrypoint ")
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_ReadonlyRootfs(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2019-02-20 12:48:02 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Windows Docker does not support root filesystem in read-only mode")
|
|
|
|
}
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2018-11-12 12:39:55 +00:00
|
|
|
cfg.ReadonlyRootfs = true
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2020-05-27 01:08:25 +00:00
|
|
|
client, driver, handle, cleanup := dockerSetup(t, task, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
require.True(t, container.HostConfig.ReadonlyRootfs, "ReadonlyRootfs option not set")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// fakeDockerClient can be used in places that accept an interface for the
|
|
|
|
// docker client such as createContainer.
|
|
|
|
type fakeDockerClient struct{}
|
|
|
|
|
|
|
|
func (fakeDockerClient) CreateContainer(docker.CreateContainerOptions) (*docker.Container, error) {
|
|
|
|
return nil, fmt.Errorf("volume is attached on another node")
|
|
|
|
}
|
|
|
|
func (fakeDockerClient) InspectContainer(id string) (*docker.Container, error) {
|
|
|
|
panic("not implemented")
|
|
|
|
}
|
|
|
|
func (fakeDockerClient) ListContainers(docker.ListContainersOptions) ([]docker.APIContainers, error) {
|
|
|
|
panic("not implemented")
|
|
|
|
}
|
|
|
|
func (fakeDockerClient) RemoveContainer(opts docker.RemoveContainerOptions) error {
|
|
|
|
panic("not implemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDockerDriver_VolumeError asserts volume related errors when creating a
|
|
|
|
// container are recoverable.
|
|
|
|
func TestDockerDriver_VolumeError(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
// setup
|
2019-12-04 00:15:11 +00:00
|
|
|
_, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2018-11-12 12:39:55 +00:00
|
|
|
driver := dockerDriverHarness(t, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
// assert volume error is recoverable
|
2019-06-15 02:32:55 +00:00
|
|
|
_, err := driver.Impl().(*Driver).createContainer(fakeDockerClient{}, docker.CreateContainerOptions{Config: &docker.Config{}}, cfg.Image)
|
2018-11-06 05:39:48 +00:00
|
|
|
require.True(t, structs.IsRecoverable(err))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-01-07 13:27:06 +00:00
|
|
|
testutil.DockerCompatible(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
expectedPrefix := "2001:db8:1::242:ac11"
|
|
|
|
expectedAdvertise := true
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2018-11-12 12:39:55 +00:00
|
|
|
cfg.AdvertiseIPv6Addr = expectedAdvertise
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
client := newTestDockerClient(t)
|
|
|
|
|
|
|
|
// Make sure IPv6 is enabled
|
|
|
|
net, err := client.NetworkInfo("bridge")
|
|
|
|
if err != nil {
|
|
|
|
t.Skip("error retrieving bridge network information, skipping")
|
|
|
|
}
|
|
|
|
if net == nil || !net.EnableIPv6 {
|
|
|
|
t.Skip("IPv6 not enabled on bridge network, skipping")
|
|
|
|
}
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
driver := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := driver.MkAllocDir(task, true)
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
defer cleanup()
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
_, network, err := driver.StartTask(task)
|
|
|
|
defer driver.DestroyTask(task.ID, true)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
require.Equal(t, expectedAdvertise, network.AutoAdvertise, "Wrong autoadvertise. Expect: %s, got: %s", expectedAdvertise, network.AutoAdvertise)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
if !strings.HasPrefix(network.IP, expectedPrefix) {
|
|
|
|
t.Fatalf("Got IP address %q want ip address with prefix %q", network.IP, expectedPrefix)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
handle, ok := driver.Impl().(*Driver).tasks.Get(task.ID)
|
|
|
|
require.True(t, ok)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-04 04:08:52 +00:00
|
|
|
require.NoError(t, driver.WaitUntilStarted(task.ID, time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
if !strings.HasPrefix(container.NetworkSettings.GlobalIPv6Address, expectedPrefix) {
|
|
|
|
t.Fatalf("Got GlobalIPv6address %s want GlobalIPv6address with prefix %s", expectedPrefix, container.NetworkSettings.GlobalIPv6Address)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestParseDockerImage(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
tests := []struct {
|
|
|
|
Image string
|
|
|
|
Repo string
|
|
|
|
Tag string
|
|
|
|
}{
|
|
|
|
{"library/hello-world:1.0", "library/hello-world", "1.0"},
|
|
|
|
{"library/hello-world", "library/hello-world", "latest"},
|
|
|
|
{"library/hello-world:latest", "library/hello-world", "latest"},
|
|
|
|
{"library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", "library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", ""},
|
|
|
|
}
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(test.Image, func(t *testing.T) {
|
|
|
|
repo, tag := parseDockerImage(test.Image)
|
|
|
|
require.Equal(t, test.Repo, repo)
|
|
|
|
require.Equal(t, test.Tag, tag)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerImageRef(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
tests := []struct {
|
|
|
|
Image string
|
|
|
|
Repo string
|
|
|
|
Tag string
|
|
|
|
}{
|
|
|
|
{"library/hello-world:1.0", "library/hello-world", "1.0"},
|
|
|
|
{"library/hello-world:latest", "library/hello-world", "latest"},
|
|
|
|
{"library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", "library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", ""},
|
|
|
|
}
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(test.Image, func(t *testing.T) {
|
|
|
|
image := dockerImageRef(test.Repo, test.Tag)
|
|
|
|
require.Equal(t, test.Image, image)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
func waitForExist(t *testing.T, client *docker.Client, containerID string) {
|
|
|
|
tu.WaitForResult(func() (bool, error) {
|
|
|
|
container, err := client.InspectContainer(containerID)
|
|
|
|
if err != nil {
|
|
|
|
if _, ok := err.(*docker.NoSuchContainer); !ok {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return container != nil, nil
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
}
|
2019-09-13 16:59:14 +00:00
|
|
|
|
|
|
|
// TestDockerDriver_CreationIdempotent asserts that createContainer and
|
|
|
|
// and startContainers functions are idempotent, as we have some retry
|
|
|
|
// logic there without ensureing we delete/destroy containers
|
|
|
|
func TestDockerDriver_CreationIdempotent(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-09-13 16:59:14 +00:00
|
|
|
testutil.DockerCompatible(t)
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, cfg, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2019-09-13 16:59:14 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
client := newTestDockerClient(t)
|
|
|
|
driver := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := driver.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
|
|
|
|
d, ok := driver.Impl().(*Driver)
|
|
|
|
require.True(t, ok)
|
|
|
|
|
|
|
|
_, err := d.createImage(task, cfg, client)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
containerCfg, err := d.createContainerConfig(task, cfg, cfg.Image)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
c, err := d.createContainer(client, containerCfg, cfg.Image)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer client.RemoveContainer(docker.RemoveContainerOptions{
|
|
|
|
ID: c.ID,
|
|
|
|
Force: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
// calling createContainer again creates a new one and remove old one
|
|
|
|
c2, err := d.createContainer(client, containerCfg, cfg.Image)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer client.RemoveContainer(docker.RemoveContainerOptions{
|
|
|
|
ID: c2.ID,
|
|
|
|
Force: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
require.NotEqual(t, c.ID, c2.ID)
|
|
|
|
// old container was destroyed
|
|
|
|
{
|
|
|
|
_, err := client.InspectContainer(c.ID)
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), NoSuchContainerError)
|
|
|
|
}
|
|
|
|
|
|
|
|
// now start container twice
|
|
|
|
require.NoError(t, d.startContainer(c2))
|
|
|
|
require.NoError(t, d.startContainer(c2))
|
|
|
|
|
|
|
|
tu.WaitForResult(func() (bool, error) {
|
|
|
|
c, err := client.InspectContainer(c2.ID)
|
|
|
|
if err != nil {
|
|
|
|
return false, fmt.Errorf("failed to get container status: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !c.State.Running {
|
|
|
|
return false, fmt.Errorf("container is not running but %v", c.State)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
}
|
2019-11-20 00:05:15 +00:00
|
|
|
|
|
|
|
// TestDockerDriver_CreateContainerConfig_CPUHardLimit asserts that a default
|
|
|
|
// CPU quota and period are set when cpu_hard_limit = true.
|
|
|
|
func TestDockerDriver_CreateContainerConfig_CPUHardLimit(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-11-20 00:05:15 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
task, _, ports := dockerTask(t)
|
|
|
|
defer freeport.Return(ports)
|
2019-11-20 00:05:15 +00:00
|
|
|
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
schema, _ := driver.TaskConfigSchema()
|
|
|
|
spec, _ := hclspecutils.Convert(schema)
|
|
|
|
|
|
|
|
val, _, _ := hclutils.ParseHclInterface(map[string]interface{}{
|
|
|
|
"image": "foo/bar",
|
|
|
|
"cpu_hard_limit": true,
|
|
|
|
}, spec, nil)
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeDriverConfig(val))
|
|
|
|
cfg := &TaskConfig{}
|
|
|
|
require.NoError(t, task.DecodeDriverConfig(cfg))
|
|
|
|
c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.NotZero(t, c.HostConfig.CPUQuota)
|
|
|
|
require.NotZero(t, c.HostConfig.CPUPeriod)
|
|
|
|
}
|
2020-05-31 17:38:27 +00:00
|
|
|
|
|
|
|
func TestDockerDriver_memoryLimits(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-05-31 17:38:27 +00:00
|
|
|
|
2021-03-26 20:16:06 +00:00
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
driverMemoryMB int64
|
|
|
|
taskResources drivers.MemoryResources
|
|
|
|
expectedHard int64
|
|
|
|
expectedSoft int64
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
"plain request",
|
|
|
|
0,
|
|
|
|
drivers.MemoryResources{MemoryMB: 10},
|
|
|
|
10 * 1024 * 1024,
|
|
|
|
0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"with driver max",
|
|
|
|
20,
|
|
|
|
drivers.MemoryResources{MemoryMB: 10},
|
|
|
|
20 * 1024 * 1024,
|
|
|
|
10 * 1024 * 1024,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"with resources max",
|
|
|
|
20,
|
|
|
|
drivers.MemoryResources{MemoryMB: 10, MemoryMaxMB: 20},
|
|
|
|
20 * 1024 * 1024,
|
|
|
|
10 * 1024 * 1024,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"with driver and resources max: higher driver",
|
|
|
|
30,
|
|
|
|
drivers.MemoryResources{MemoryMB: 10, MemoryMaxMB: 20},
|
|
|
|
30 * 1024 * 1024,
|
|
|
|
10 * 1024 * 1024,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"with driver and resources max: higher resources",
|
|
|
|
20,
|
|
|
|
drivers.MemoryResources{MemoryMB: 10, MemoryMaxMB: 30},
|
|
|
|
30 * 1024 * 1024,
|
|
|
|
10 * 1024 * 1024,
|
|
|
|
},
|
|
|
|
}
|
2020-05-31 17:38:27 +00:00
|
|
|
|
2021-03-26 20:16:06 +00:00
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
|
|
|
hard, soft := memoryLimits(c.driverMemoryMB, c.taskResources)
|
|
|
|
require.Equal(t, c.expectedHard, hard)
|
|
|
|
require.Equal(t, c.expectedSoft, soft)
|
|
|
|
})
|
|
|
|
}
|
2020-05-31 17:38:27 +00:00
|
|
|
}
|
2020-09-30 16:36:26 +00:00
|
|
|
|
2022-05-17 20:11:57 +00:00
|
|
|
func TestDockerDriver_cgroupParent(t *testing.T) {
|
|
|
|
ci.Parallel(t)
|
|
|
|
|
|
|
|
t.Run("v1", func(t *testing.T) {
|
|
|
|
testutil.CgroupsCompatibleV1(t)
|
|
|
|
|
|
|
|
parent := cgroupParent(&drivers.Resources{
|
|
|
|
LinuxResources: &drivers.LinuxResources{
|
|
|
|
CpusetCgroupPath: "/sys/fs/cgroup/cpuset/nomad",
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.Equal(t, "", parent)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("v2", func(t *testing.T) {
|
|
|
|
testutil.CgroupsCompatibleV2(t)
|
|
|
|
|
|
|
|
parent := cgroupParent(&drivers.Resources{
|
|
|
|
LinuxResources: &drivers.LinuxResources{
|
|
|
|
CpusetCgroupPath: "/sys/fs/cgroup/nomad.slice",
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.Equal(t, "nomad.slice", parent)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-09-30 16:36:26 +00:00
|
|
|
func TestDockerDriver_parseSignal(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-09-30 16:36:26 +00:00
|
|
|
|
2021-04-22 18:45:16 +00:00
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
runtime string
|
|
|
|
specifiedSignal string
|
|
|
|
expectedSignal string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "default",
|
|
|
|
runtime: runtime.GOOS,
|
|
|
|
specifiedSignal: "",
|
|
|
|
expectedSignal: "SIGTERM",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "set",
|
|
|
|
runtime: runtime.GOOS,
|
|
|
|
specifiedSignal: "SIGHUP",
|
|
|
|
expectedSignal: "SIGHUP",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "windows conversion",
|
|
|
|
runtime: "windows",
|
|
|
|
specifiedSignal: "SIGINT",
|
|
|
|
expectedSignal: "SIGTERM",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "not signal",
|
|
|
|
runtime: runtime.GOOS,
|
|
|
|
specifiedSignal: "SIGDOESNOTEXIST",
|
|
|
|
expectedSignal: "", // throws error
|
|
|
|
},
|
|
|
|
}
|
2020-09-30 16:36:26 +00:00
|
|
|
|
2021-04-22 18:45:16 +00:00
|
|
|
for _, tc := range tests {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
s, err := parseSignal(tc.runtime, tc.specifiedSignal)
|
|
|
|
if tc.expectedSignal == "" {
|
|
|
|
require.Error(t, err, "invalid signal")
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, s.(syscall.Signal), s)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2020-09-30 16:36:26 +00:00
|
|
|
|
2021-04-22 18:45:16 +00:00
|
|
|
// This test asserts that Nomad isn't overriding the STOPSIGNAL in a Dockerfile
|
|
|
|
func TestDockerDriver_StopSignal(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
testutil.DockerCompatible(t)
|
2021-04-22 18:45:16 +00:00
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Skipped on windows, we don't have image variants available")
|
|
|
|
}
|
2020-09-30 16:36:26 +00:00
|
|
|
|
2021-04-22 18:45:16 +00:00
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
variant string
|
|
|
|
jobKillSignal string
|
|
|
|
expectedSignals []string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "stopsignal-only",
|
|
|
|
variant: "stopsignal",
|
|
|
|
jobKillSignal: "",
|
|
|
|
expectedSignals: []string{"19", "9"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "stopsignal-killsignal",
|
|
|
|
variant: "stopsignal",
|
|
|
|
jobKillSignal: "SIGTERM",
|
|
|
|
expectedSignals: []string{"15", "19", "9"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "killsignal-only",
|
|
|
|
variant: "",
|
|
|
|
jobKillSignal: "SIGTERM",
|
|
|
|
expectedSignals: []string{"15", "15", "9"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "nosignals-default",
|
|
|
|
variant: "",
|
|
|
|
jobKillSignal: "",
|
|
|
|
expectedSignals: []string{"15", "9"},
|
|
|
|
},
|
|
|
|
}
|
2020-09-30 16:36:26 +00:00
|
|
|
|
2021-04-22 18:45:16 +00:00
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
|
|
|
taskCfg := newTaskConfig(c.variant, []string{"sleep", "9901"})
|
|
|
|
|
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: c.name,
|
|
|
|
AllocID: uuid.Generate(),
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
|
|
|
|
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
if c.variant == "stopsignal" {
|
|
|
|
copyImage(t, task.TaskDir(), "busybox_stopsignal.tar") // Default busybox image with STOPSIGNAL 19 added
|
|
|
|
} else {
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
}
|
|
|
|
|
|
|
|
client := newTestDockerClient(t)
|
|
|
|
|
|
|
|
listener := make(chan *docker.APIEvents)
|
|
|
|
err := client.AddEventListener(listener)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
err := client.RemoveEventListener(listener)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}()
|
|
|
|
|
|
|
|
_, _, err = d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
|
|
|
|
2021-05-10 21:58:19 +00:00
|
|
|
stopErr := make(chan error, 1)
|
2021-04-22 18:45:16 +00:00
|
|
|
go func() {
|
|
|
|
err := d.StopTask(task.ID, 1*time.Second, c.jobKillSignal)
|
2021-05-07 17:01:00 +00:00
|
|
|
stopErr <- err
|
2021-04-22 18:45:16 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
timeout := time.After(10 * time.Second)
|
|
|
|
var receivedSignals []string
|
|
|
|
WAIT:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case msg := <-listener:
|
|
|
|
// Only add kill signals
|
|
|
|
if msg.Action == "kill" {
|
|
|
|
sig := msg.Actor.Attributes["signal"]
|
|
|
|
receivedSignals = append(receivedSignals, sig)
|
|
|
|
|
|
|
|
if reflect.DeepEqual(receivedSignals, c.expectedSignals) {
|
|
|
|
break WAIT
|
|
|
|
}
|
|
|
|
}
|
2021-05-07 17:01:00 +00:00
|
|
|
case err := <-stopErr:
|
2021-05-10 21:58:19 +00:00
|
|
|
require.NoError(t, err, "stop task failed")
|
2021-04-22 18:45:16 +00:00
|
|
|
case <-timeout:
|
|
|
|
// timeout waiting for signals
|
|
|
|
require.Equal(t, c.expectedSignals, receivedSignals, "timed out waiting for expected signals")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2020-09-30 16:36:26 +00:00
|
|
|
}
|