2018-11-06 05:39:48 +00:00
|
|
|
package docker
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2018-11-09 04:38:47 +00:00
|
|
|
"io"
|
2018-11-06 05:39:48 +00:00
|
|
|
"io/ioutil"
|
|
|
|
"math/rand"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"reflect"
|
|
|
|
"runtime"
|
|
|
|
"runtime/debug"
|
2018-12-04 21:46:16 +00:00
|
|
|
"sort"
|
2018-11-06 05:39:48 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2018-11-27 19:03:58 +00:00
|
|
|
dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
docker "github.com/fsouza/go-dockerclient"
|
|
|
|
"github.com/hashicorp/consul/lib/freeport"
|
|
|
|
hclog "github.com/hashicorp/go-hclog"
|
|
|
|
"github.com/hashicorp/nomad/client/allocdir"
|
2018-11-30 11:18:39 +00:00
|
|
|
"github.com/hashicorp/nomad/client/taskenv"
|
2018-11-06 05:39:48 +00:00
|
|
|
"github.com/hashicorp/nomad/client/testutil"
|
|
|
|
"github.com/hashicorp/nomad/helper/testlog"
|
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"github.com/hashicorp/nomad/plugins/base"
|
|
|
|
"github.com/hashicorp/nomad/plugins/drivers"
|
|
|
|
"github.com/hashicorp/nomad/plugins/shared/loader"
|
|
|
|
tu "github.com/hashicorp/nomad/testutil"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
basicResources = &drivers.Resources{
|
|
|
|
NomadResources: &structs.Resources{
|
|
|
|
MemoryMB: 256,
|
2018-12-14 16:06:14 +00:00
|
|
|
CPU: 512,
|
2018-11-06 05:39:48 +00:00
|
|
|
DiskMB: 20,
|
|
|
|
},
|
|
|
|
LinuxResources: &drivers.LinuxResources{
|
2018-12-14 16:06:14 +00:00
|
|
|
CPUShares: 512,
|
2018-11-06 05:39:48 +00:00
|
|
|
MemoryLimitBytes: 256 * 1024 * 1024,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
func dockerIsRemote(t *testing.T) bool {
|
|
|
|
client, err := docker.NewClientFromEnv()
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Technically this could be a local tcp socket but for testing purposes
|
|
|
|
// we'll just assume that tcp is only used for remote connections.
|
|
|
|
if client.Endpoint()[0:3] == "tcp" {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-12-14 15:58:31 +00:00
|
|
|
var (
|
|
|
|
// busyboxImageID is the ID stored in busybox.tar
|
|
|
|
busyboxImageID = "busybox:1.29.3"
|
|
|
|
|
|
|
|
// busyboxImageID is the ID stored in busybox_glibc.tar
|
|
|
|
busyboxGlibcImageID = "busybox:1.29.3-glibc"
|
|
|
|
|
|
|
|
// busyboxImageID is the ID stored in busybox_musl.tar
|
|
|
|
busyboxMuslImageID = "busybox:1.29.3-musl"
|
|
|
|
|
|
|
|
// busyboxLongRunningCmd is a busybox command that runs indefinitely, and
|
|
|
|
// ideally responds to SIGINT/SIGTERM. Sadly, busybox:1.29.3 /bin/sleep doesn't.
|
|
|
|
busyboxLongRunningCmd = []string{"/bin/nc", "-l", "-p", "3000", "127.0.0.1"}
|
|
|
|
)
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
// Returns a task with a reserved and dynamic port. The ports are returned
|
|
|
|
// respectively.
|
2018-11-09 04:38:47 +00:00
|
|
|
func dockerTask(t *testing.T) (*drivers.TaskConfig, *TaskConfig, []int) {
|
2018-11-06 05:39:48 +00:00
|
|
|
ports := freeport.GetT(t, 2)
|
|
|
|
dockerReserved := ports[0]
|
|
|
|
dockerDynamic := ports[1]
|
|
|
|
|
|
|
|
cfg := TaskConfig{
|
2018-12-14 15:58:31 +00:00
|
|
|
Image: busyboxImageID,
|
2018-11-06 05:39:48 +00:00
|
|
|
LoadImage: "busybox.tar",
|
2018-12-14 15:58:31 +00:00
|
|
|
Command: busyboxLongRunningCmd[0],
|
|
|
|
Args: busyboxLongRunningCmd[1:],
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "redis-demo",
|
|
|
|
Resources: &drivers.Resources{
|
|
|
|
NomadResources: &structs.Resources{
|
|
|
|
MemoryMB: 256,
|
|
|
|
CPU: 512,
|
|
|
|
Networks: []*structs.NetworkResource{
|
|
|
|
{
|
|
|
|
IP: "127.0.0.1",
|
|
|
|
ReservedPorts: []structs.Port{{Label: "main", Value: dockerReserved}},
|
|
|
|
DynamicPorts: []structs.Port{{Label: "REDIS", Value: dockerDynamic}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
LinuxResources: &drivers.LinuxResources{
|
|
|
|
CPUShares: 512,
|
|
|
|
MemoryLimitBytes: 256 * 1024 * 1024,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&cfg))
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
return task, &cfg, ports
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// dockerSetup does all of the basic setup you need to get a running docker
|
|
|
|
// process up and running for testing. Use like:
|
|
|
|
//
|
|
|
|
// task := taskTemplate()
|
|
|
|
// // do custom task configuration
|
|
|
|
// client, handle, cleanup := dockerSetup(t, task)
|
|
|
|
// defer cleanup()
|
|
|
|
// // do test stuff
|
|
|
|
//
|
|
|
|
// If there is a problem during setup this function will abort or skip the test
|
|
|
|
// and indicate the reason.
|
2018-11-27 19:03:58 +00:00
|
|
|
func dockerSetup(t *testing.T, task *drivers.TaskConfig) (*docker.Client, *dtestutil.DriverHarness, *taskHandle, func()) {
|
2018-11-06 05:39:48 +00:00
|
|
|
client := newTestDockerClient(t)
|
2018-11-09 04:38:47 +00:00
|
|
|
driver := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := driver.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
_, _, err := driver.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
dockerDriver, ok := driver.Impl().(*Driver)
|
|
|
|
require.True(t, ok)
|
|
|
|
handle, ok := dockerDriver.tasks.Get(task.ID)
|
|
|
|
require.True(t, ok)
|
|
|
|
|
|
|
|
return client, driver, handle, func() {
|
|
|
|
driver.DestroyTask(task.ID, true)
|
|
|
|
cleanup()
|
|
|
|
}
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// dockerDriverHarness wires up everything needed to launch a task with a docker driver.
|
|
|
|
// A driver plugin interface and cleanup function is returned
|
2018-11-27 19:03:58 +00:00
|
|
|
func dockerDriverHarness(t *testing.T, cfg map[string]interface{}) *dtestutil.DriverHarness {
|
2018-11-06 05:39:48 +00:00
|
|
|
logger := testlog.HCLogger(t)
|
2018-11-27 19:03:58 +00:00
|
|
|
harness := dtestutil.NewDriverHarness(t, NewDockerDriver(logger))
|
2018-11-09 04:38:47 +00:00
|
|
|
if cfg == nil {
|
|
|
|
cfg = map[string]interface{}{
|
2018-11-20 03:58:05 +00:00
|
|
|
"gc": map[string]interface{}{
|
|
|
|
"image_delay": "1s",
|
|
|
|
},
|
2018-11-09 04:38:47 +00:00
|
|
|
}
|
|
|
|
}
|
2018-11-06 05:39:48 +00:00
|
|
|
plugLoader, err := loader.NewPluginLoader(&loader.PluginLoaderConfig{
|
|
|
|
Logger: logger,
|
|
|
|
PluginDir: "./plugins",
|
|
|
|
InternalPlugins: map[loader.PluginID]*loader.InternalPluginConfig{
|
2018-11-25 16:53:21 +00:00
|
|
|
PluginID: {
|
2018-11-09 04:38:47 +00:00
|
|
|
Config: cfg,
|
2018-11-06 05:39:48 +00:00
|
|
|
Factory: func(hclog.Logger) interface{} {
|
|
|
|
return harness
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
instance, err := plugLoader.Dispense(pluginName, base.PluginTypeDriver, nil, logger)
|
|
|
|
require.NoError(t, err)
|
2018-11-27 19:03:58 +00:00
|
|
|
driver, ok := instance.Plugin().(*dtestutil.DriverHarness)
|
2018-11-06 05:39:48 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatal("plugin instance is not a driver... wat?")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
return driver
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newTestDockerClient(t *testing.T) *docker.Client {
|
|
|
|
t.Helper()
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
|
|
|
client, err := docker.NewClientFromEnv()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to initialize client: %s\nStack\n%s", err, debug.Stack())
|
|
|
|
}
|
|
|
|
return client
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
// This test should always pass, even if docker daemon is not available
|
|
|
|
func TestDockerDriver_Fingerprint(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx := testDockerDriverContexts(t, &structs.Task{Name: "foo", Driver: "docker", Resources: basicResources})
|
|
|
|
//ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"}
|
|
|
|
defer ctx.Destroy()
|
|
|
|
d := NewDockerDriver(ctx.DriverCtx)
|
|
|
|
node := &structs.Node{
|
|
|
|
Attributes: make(map[string]string),
|
|
|
|
}
|
|
|
|
|
2018-12-01 16:10:39 +00:00
|
|
|
request := &fingerprint.FingerprintRequest{Config: &config.Config{}, Node: node}
|
|
|
|
var response fingerprint.FingerprintResponse
|
2018-11-06 05:39:48 +00:00
|
|
|
err := d.Fingerprint(request, &response)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
attributes := response.Attributes
|
|
|
|
if testutil.DockerIsConnected(t) && attributes["driver.docker"] == "" {
|
|
|
|
t.Fatalf("Fingerprinter should detect when docker is available")
|
|
|
|
}
|
|
|
|
|
|
|
|
if attributes["driver.docker"] != "1" {
|
|
|
|
t.Log("Docker daemon not available. The remainder of the docker tests will be skipped.")
|
|
|
|
} else {
|
|
|
|
|
|
|
|
// if docker is available, make sure that the response is tagged as
|
|
|
|
// applicable
|
|
|
|
if !response.Detected {
|
|
|
|
t.Fatalf("expected response to be applicable")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("Found docker version %s", attributes["driver.docker.version"])
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDockerDriver_Fingerprint_Bridge asserts that if Docker is running we set
|
|
|
|
// the bridge network's IP as a node attribute. See #2785
|
|
|
|
func TestDockerDriver_Fingerprint_Bridge(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("requires Docker")
|
|
|
|
}
|
|
|
|
if runtime.GOOS != "linux" {
|
|
|
|
t.Skip("expect only on linux")
|
|
|
|
}
|
|
|
|
|
|
|
|
// This seems fragile, so we might need to reconsider this test if it
|
|
|
|
// proves flaky
|
|
|
|
expectedAddr, err := sockaddr.GetInterfaceIP("docker0")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get ip for docker0: %v", err)
|
|
|
|
}
|
|
|
|
if expectedAddr == "" {
|
|
|
|
t.Fatalf("unable to get ip for docker bridge")
|
|
|
|
}
|
|
|
|
|
|
|
|
conf := testConfig(t)
|
|
|
|
conf.Node = mock.Node()
|
|
|
|
dd := NewDockerDriver(NewDriverContext("", "", "", "", conf, conf.Node, testlog.Logger(t), nil))
|
|
|
|
|
2018-12-01 16:10:39 +00:00
|
|
|
request := &fingerprint.FingerprintRequest{Config: conf, Node: conf.Node}
|
|
|
|
var response fingerprint.FingerprintResponse
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
err = dd.Fingerprint(request, &response)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error fingerprinting docker: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !response.Detected {
|
|
|
|
t.Fatalf("expected response to be applicable")
|
|
|
|
}
|
|
|
|
|
|
|
|
attributes := response.Attributes
|
|
|
|
if attributes == nil {
|
|
|
|
t.Fatalf("expected attributes to be set")
|
|
|
|
}
|
|
|
|
|
|
|
|
if attributes["driver.docker"] == "" {
|
|
|
|
t.Fatalf("expected Docker to be enabled but false was returned")
|
|
|
|
}
|
|
|
|
|
|
|
|
if found := attributes["driver.docker.bridge_ip"]; found != expectedAddr {
|
|
|
|
t.Fatalf("expected bridge ip %q but found: %q", expectedAddr, found)
|
|
|
|
}
|
|
|
|
t.Logf("docker bridge ip: %q", attributes["driver.docker.bridge_ip"])
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Check_DockerHealthStatus(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("requires Docker")
|
|
|
|
}
|
|
|
|
if runtime.GOOS != "linux" {
|
|
|
|
t.Skip("expect only on linux")
|
|
|
|
}
|
|
|
|
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
expectedAddr, err := sockaddr.GetInterfaceIP("docker0")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get ip for docker0: %v", err)
|
|
|
|
}
|
|
|
|
if expectedAddr == "" {
|
|
|
|
t.Fatalf("unable to get ip for docker bridge")
|
|
|
|
}
|
|
|
|
|
|
|
|
conf := testConfig(t)
|
|
|
|
conf.Node = mock.Node()
|
|
|
|
dd := NewDockerDriver(NewDriverContext("", "", "", "", conf, conf.Node, testlog.Logger(t), nil))
|
|
|
|
|
|
|
|
request := &cstructs.HealthCheckRequest{}
|
|
|
|
var response cstructs.HealthCheckResponse
|
|
|
|
|
|
|
|
dc, ok := dd.(fingerprint.HealthCheck)
|
|
|
|
require.True(ok)
|
|
|
|
err = dc.HealthCheck(request, &response)
|
|
|
|
require.Nil(err)
|
|
|
|
|
|
|
|
driverInfo := response.Drivers["docker"]
|
|
|
|
require.NotNil(driverInfo)
|
|
|
|
require.True(driverInfo.Healthy)
|
|
|
|
}*/
|
|
|
|
|
|
|
|
func TestDockerDriver_Start_Wait(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
|
|
|
taskCfg := TaskConfig{
|
2018-12-14 15:58:31 +00:00
|
|
|
Image: busyboxImageID,
|
2018-11-06 05:39:48 +00:00
|
|
|
LoadImage: "busybox.tar",
|
2018-12-14 15:58:31 +00:00
|
|
|
Command: busyboxLongRunningCmd[0],
|
|
|
|
Args: busyboxLongRunningCmd[1:],
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "nc-demo",
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
2018-11-06 05:39:48 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
|
|
|
|
// Attempt to wait
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case <-waitCh:
|
2018-11-14 11:20:35 +00:00
|
|
|
t.Fatalf("wait channel should not have received an exit result")
|
2018-11-06 05:39:48 +00:00
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*1) * time.Second):
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
func TestDockerDriver_Start_WaitFinish(t *testing.T) {
|
2018-11-06 05:39:48 +00:00
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
|
|
|
|
taskCfg := TaskConfig{
|
2018-12-14 15:58:31 +00:00
|
|
|
Image: busyboxImageID,
|
2018-11-09 04:38:47 +00:00
|
|
|
LoadImage: "busybox.tar",
|
|
|
|
Command: "/bin/echo",
|
|
|
|
Args: []string{"hello"},
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "nc-demo",
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
|
|
|
|
// Attempt to wait
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if !res.Successful() {
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Fail(t, "ExitResult should be successful: %v", res)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Fail(t, "timeout")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDockerDriver_Start_StoppedContainer asserts that Nomad will detect a
|
|
|
|
// stopped task container, remove it, and start a new container.
|
|
|
|
//
|
|
|
|
// See https://github.com/hashicorp/nomad/issues/3419
|
|
|
|
func TestDockerDriver_Start_StoppedContainer(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
taskCfg := TaskConfig{
|
2018-12-14 15:58:31 +00:00
|
|
|
Image: busyboxImageID,
|
2018-11-09 04:38:47 +00:00
|
|
|
LoadImage: "busybox.tar",
|
|
|
|
Command: "sleep",
|
|
|
|
Args: []string{"9001"},
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "nc-demo",
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
|
|
|
|
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-04 12:31:30 +00:00
|
|
|
client := newTestDockerClient(t)
|
|
|
|
imageID, err := d.Impl().(*Driver).loadImage(task, &taskCfg, client)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEmpty(t, imageID)
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
// Create a container of the same name but don't start it. This mimics
|
|
|
|
// the case of dockerd getting restarted and stopping containers while
|
|
|
|
// Nomad is watching them.
|
|
|
|
opts := docker.CreateContainerOptions{
|
2018-11-09 04:38:47 +00:00
|
|
|
Name: strings.Replace(task.ID, "/", "_", -1),
|
2018-11-06 05:39:48 +00:00
|
|
|
Config: &docker.Config{
|
2018-12-14 15:58:31 +00:00
|
|
|
Image: busyboxImageID,
|
2018-11-06 05:39:48 +00:00
|
|
|
Cmd: []string{"sleep", "9000"},
|
|
|
|
},
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
if _, err := client.CreateContainer(opts); err != nil {
|
|
|
|
t.Fatalf("error creating initial container: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-12-04 12:31:30 +00:00
|
|
|
_, _, err = d.StartTask(task)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
|
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Start_LoadImage(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
|
|
|
|
taskCfg := TaskConfig{
|
2018-12-14 15:58:31 +00:00
|
|
|
Image: busyboxImageID,
|
2018-11-09 04:38:47 +00:00
|
|
|
LoadImage: "busybox.tar",
|
|
|
|
Command: "/bin/sh",
|
|
|
|
Args: []string{
|
|
|
|
"-c",
|
|
|
|
"echo hello > $NOMAD_TASK_DIR/output",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "busybox-demo",
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
defer d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if !res.Successful() {
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Fail(t, "ExitResult should be successful: %v", res)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Fail(t, "timeout")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that data was written to the shared alloc directory.
|
2018-11-09 04:38:47 +00:00
|
|
|
outputFile := filepath.Join(task.TaskDir().LocalDir, "output")
|
2018-11-06 05:39:48 +00:00
|
|
|
act, err := ioutil.ReadFile(outputFile)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't read expected output: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
exp := "hello"
|
|
|
|
if strings.TrimSpace(string(act)) != exp {
|
|
|
|
t.Fatalf("Command outputted %v; want %v", act, exp)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
|
|
|
|
taskCfg := TaskConfig{
|
|
|
|
Image: "127.0.0.1:32121/foo", // bad path
|
|
|
|
Command: "/bin/echo",
|
|
|
|
Args: []string{
|
|
|
|
"hello",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "busybox-demo",
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
if rerr, ok := err.(*structs.RecoverableError); !ok {
|
|
|
|
t.Fatalf("want recoverable error: %+v", err)
|
|
|
|
} else if !rerr.IsRecoverable() {
|
|
|
|
t.Fatalf("error not recoverable: %+v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
// This test requires that the alloc dir be mounted into docker as a volume.
|
|
|
|
// Because this cannot happen when docker is run remotely, e.g. when running
|
|
|
|
// docker in a VM, we skip this when we detect Docker is being run remotely.
|
|
|
|
if !testutil.DockerIsConnected(t) || dockerIsRemote(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
|
|
|
exp := []byte{'w', 'i', 'n'}
|
|
|
|
file := "output.txt"
|
2018-11-09 04:38:47 +00:00
|
|
|
taskCfg := TaskConfig{
|
2018-12-14 15:58:31 +00:00
|
|
|
Image: busyboxImageID,
|
2018-11-09 04:38:47 +00:00
|
|
|
LoadImage: "busybox.tar",
|
|
|
|
Command: "/bin/sh",
|
|
|
|
Args: []string{
|
|
|
|
"-c",
|
|
|
|
fmt.Sprintf(`sleep 1; echo -n %s > $%s/%s`,
|
2018-11-30 11:18:39 +00:00
|
|
|
string(exp), taskenv.AllocDir, file),
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "busybox-demo",
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
|
|
|
|
// Attempt to wait
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if !res.Successful() {
|
2018-11-12 12:39:55 +00:00
|
|
|
require.Fail(t, fmt.Sprintf("ExitResult should be successful: %v", res))
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Fail(t, "timeout")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check that data was written to the shared alloc directory.
|
2018-11-09 04:38:47 +00:00
|
|
|
outputFile := filepath.Join(task.TaskDir().SharedAllocDir, file)
|
2018-11-06 05:39:48 +00:00
|
|
|
act, err := ioutil.ReadFile(outputFile)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Couldn't read expected output: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(act, exp) {
|
|
|
|
t.Fatalf("Command outputted %v; want %v", act, exp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
|
|
|
|
taskCfg := TaskConfig{
|
2018-12-14 15:58:31 +00:00
|
|
|
Image: busyboxImageID,
|
2018-11-09 04:38:47 +00:00
|
|
|
LoadImage: "busybox.tar",
|
2018-12-14 15:58:31 +00:00
|
|
|
Command: busyboxLongRunningCmd[0],
|
|
|
|
Args: busyboxLongRunningCmd[1:],
|
2018-11-09 04:38:47 +00:00
|
|
|
}
|
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "busybox-demo",
|
2018-11-06 05:39:48 +00:00
|
|
|
Resources: basicResources,
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
|
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
go func(t *testing.T) {
|
2018-11-06 05:39:48 +00:00
|
|
|
time.Sleep(100 * time.Millisecond)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.StopTask(task.ID, time.Second, "SIGINT"))
|
2018-11-12 12:39:55 +00:00
|
|
|
}(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
// Attempt to wait
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if res.Successful() {
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Fail(t, "ExitResult should err: %v", res)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
require.Fail(t, "timeout")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Start_KillTimeout(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
timeout := 2 * time.Second
|
2018-11-09 04:38:47 +00:00
|
|
|
taskCfg := TaskConfig{
|
2018-12-14 15:58:31 +00:00
|
|
|
Image: busyboxImageID,
|
2018-11-09 04:38:47 +00:00
|
|
|
LoadImage: "busybox.tar",
|
|
|
|
Command: "/bin/sleep",
|
|
|
|
Args: []string{
|
|
|
|
"10",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "busybox-demo",
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
|
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
defer d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
var killSent time.Time
|
2018-11-06 05:39:48 +00:00
|
|
|
go func() {
|
2018-11-09 04:38:47 +00:00
|
|
|
time.Sleep(100 * time.Millisecond)
|
2018-11-06 05:39:48 +00:00
|
|
|
killSent = time.Now()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.StopTask(task.ID, timeout, "SIGUSR1"))
|
2018-11-06 05:39:48 +00:00
|
|
|
}()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
// Attempt to wait
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var killed time.Time
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
killed = time.Now()
|
2018-11-09 04:38:47 +00:00
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
require.Fail(t, "timeout")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.True(t, killed.Sub(killSent) > timeout)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_StartN(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require := require.New(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
task1, _, _ := dockerTask(t)
|
|
|
|
task2, _, _ := dockerTask(t)
|
|
|
|
task3, _, _ := dockerTask(t)
|
2018-11-09 04:38:47 +00:00
|
|
|
taskList := []*drivers.TaskConfig{task1, task2, task3}
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
t.Logf("Starting %d tasks", len(taskList))
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
// Let's spin up a bunch of things
|
2018-11-09 04:38:47 +00:00
|
|
|
for _, task := range taskList {
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
defer d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
t.Log("All tasks are started. Terminating...")
|
2018-11-09 04:38:47 +00:00
|
|
|
for _, task := range taskList {
|
|
|
|
require.NoError(d.StopTask(task.ID, time.Second, "SIGINT"))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
// Attempt to wait
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
select {
|
|
|
|
case <-waitCh:
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
require.Fail("timeout waiting on task")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Log("Test complete!")
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_StartNVersions(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require := require.New(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task1, cfg1, _ := dockerTask(t)
|
2018-12-14 15:58:31 +00:00
|
|
|
cfg1.Image = busyboxImageID
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg1.LoadImage = "busybox.tar"
|
|
|
|
require.NoError(task1.EncodeConcreteDriverConfig(cfg1))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task2, cfg2, _ := dockerTask(t)
|
2018-12-14 15:58:31 +00:00
|
|
|
cfg2.Image = busyboxMuslImageID
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg2.LoadImage = "busybox_musl.tar"
|
|
|
|
require.NoError(task2.EncodeConcreteDriverConfig(cfg2))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task3, cfg3, _ := dockerTask(t)
|
2018-12-14 15:58:31 +00:00
|
|
|
cfg3.Image = busyboxGlibcImageID
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg3.LoadImage = "busybox_glibc.tar"
|
|
|
|
require.NoError(task3.EncodeConcreteDriverConfig(cfg3))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
taskList := []*drivers.TaskConfig{task1, task2, task3}
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
t.Logf("Starting %d tasks", len(taskList))
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
// Let's spin up a bunch of things
|
2018-11-09 04:38:47 +00:00
|
|
|
for _, task := range taskList {
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
copyImage(t, task.TaskDir(), "busybox_musl.tar")
|
|
|
|
copyImage(t, task.TaskDir(), "busybox_glibc.tar")
|
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
|
2018-12-15 03:04:33 +00:00
|
|
|
require.NoError(d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
t.Log("All tasks are started. Terminating...")
|
2018-11-09 04:38:47 +00:00
|
|
|
for _, task := range taskList {
|
|
|
|
require.NoError(d.StopTask(task.ID, time.Second, "SIGINT"))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
// Attempt to wait
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
select {
|
|
|
|
case <-waitCh:
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
require.Fail("timeout waiting on task")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
t.Log("Test complete!")
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_NetworkMode_Host(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
expected := "host"
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
taskCfg := TaskConfig{
|
2018-12-14 15:58:31 +00:00
|
|
|
Image: busyboxImageID,
|
2018-11-09 04:38:47 +00:00
|
|
|
LoadImage: "busybox.tar",
|
2018-12-14 15:58:31 +00:00
|
|
|
Command: busyboxLongRunningCmd[0],
|
|
|
|
Args: busyboxLongRunningCmd[1:],
|
2018-11-09 04:38:47 +00:00
|
|
|
NetworkMode: expected,
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "busybox-demo",
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-15 03:04:33 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-09 04:38:47 +00:00
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
|
|
|
|
dockerDriver, ok := d.Impl().(*Driver)
|
|
|
|
require.True(t, ok)
|
|
|
|
|
|
|
|
handle, ok := dockerDriver.tasks.Get(task.ID)
|
|
|
|
require.True(t, ok)
|
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-06 05:39:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
actual := container.HostConfig.NetworkMode
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Equal(t, expected, actual)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require := require.New(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
// Because go-dockerclient doesn't provide api for query network aliases, just check that
|
|
|
|
// a container can be created with a 'network_aliases' property
|
|
|
|
|
|
|
|
// Create network, network-scoped alias is supported only for containers in user defined networks
|
|
|
|
client := newTestDockerClient(t)
|
|
|
|
networkOpts := docker.CreateNetworkOptions{Name: "foobar", Driver: "bridge"}
|
|
|
|
network, err := client.CreateNetwork(networkOpts)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(err)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer client.RemoveNetwork(network.ID)
|
|
|
|
|
|
|
|
expected := []string{"foobar"}
|
2018-11-09 04:38:47 +00:00
|
|
|
taskCfg := TaskConfig{
|
2018-12-14 15:58:31 +00:00
|
|
|
Image: busyboxImageID,
|
2018-11-09 04:38:47 +00:00
|
|
|
LoadImage: "busybox.tar",
|
2018-12-14 15:58:31 +00:00
|
|
|
Command: busyboxLongRunningCmd[0],
|
|
|
|
Args: busyboxLongRunningCmd[1:],
|
2018-11-09 04:38:47 +00:00
|
|
|
NetworkMode: network.Name,
|
|
|
|
NetworkAliases: expected,
|
|
|
|
}
|
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
2018-11-12 12:39:55 +00:00
|
|
|
Name: "busybox",
|
2018-11-09 04:38:47 +00:00
|
|
|
Resources: basicResources,
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err = d.StartTask(task)
|
|
|
|
require.NoError(err)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-09 04:38:47 +00:00
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
dockerDriver, ok := d.Impl().(*Driver)
|
|
|
|
require.True(ok)
|
|
|
|
|
|
|
|
handle, ok := dockerDriver.tasks.Get(task.ID)
|
|
|
|
require.True(ok)
|
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
_, err = client.InspectContainer(handle.containerID)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(err)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Sysctl_Ulimit(t *testing.T) {
|
2018-11-09 04:38:47 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
expectedUlimits := map[string]string{
|
|
|
|
"nproc": "4242",
|
|
|
|
"nofile": "2048:4096",
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.Sysctl = map[string]string{
|
|
|
|
"net.core.somaxconn": "16384",
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.Ulimit = expectedUlimits
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-12-15 03:04:33 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-06 05:39:48 +00:00
|
|
|
assert.Nil(t, err, "unexpected error: %v", err)
|
|
|
|
|
|
|
|
want := "16384"
|
|
|
|
got := container.HostConfig.Sysctls["net.core.somaxconn"]
|
|
|
|
assert.Equal(t, want, got, "Wrong net.core.somaxconn config for docker job. Expect: %s, got: %s", want, got)
|
|
|
|
|
|
|
|
expectedUlimitLen := 2
|
|
|
|
actualUlimitLen := len(container.HostConfig.Ulimits)
|
|
|
|
assert.Equal(t, want, got, "Wrong number of ulimit configs for docker job. Expect: %d, got: %d", expectedUlimitLen, actualUlimitLen)
|
|
|
|
|
|
|
|
for _, got := range container.HostConfig.Ulimits {
|
|
|
|
if expectedStr, ok := expectedUlimits[got.Name]; !ok {
|
|
|
|
t.Errorf("%s config unexpected for docker job.", got.Name)
|
|
|
|
} else {
|
|
|
|
if !strings.Contains(expectedStr, ":") {
|
|
|
|
expectedStr = expectedStr + ":" + expectedStr
|
|
|
|
}
|
|
|
|
|
|
|
|
splitted := strings.SplitN(expectedStr, ":", 2)
|
|
|
|
soft, _ := strconv.Atoi(splitted[0])
|
|
|
|
hard, _ := strconv.Atoi(splitted[1])
|
|
|
|
assert.Equal(t, int64(soft), got.Soft, "Wrong soft %s ulimit for docker job. Expect: %d, got: %d", got.Name, soft, got.Soft)
|
|
|
|
assert.Equal(t, int64(hard), got.Hard, "Wrong hard %s ulimit for docker job. Expect: %d, got: %d", got.Name, hard, got.Hard)
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Sysctl_Ulimit_Errors(t *testing.T) {
|
2018-11-09 04:38:47 +00:00
|
|
|
brokenConfigs := []map[string]string{
|
2018-11-25 16:53:21 +00:00
|
|
|
{
|
2018-11-06 05:39:48 +00:00
|
|
|
"nofile": "",
|
|
|
|
},
|
2018-11-25 16:53:21 +00:00
|
|
|
{
|
2018-11-06 05:39:48 +00:00
|
|
|
"nofile": "abc:1234",
|
|
|
|
},
|
2018-11-25 16:53:21 +00:00
|
|
|
{
|
2018-11-06 05:39:48 +00:00
|
|
|
"nofile": "1234:abc",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
testCases := []struct {
|
|
|
|
ulimitConfig map[string]string
|
2018-11-06 05:39:48 +00:00
|
|
|
err error
|
|
|
|
}{
|
2018-11-09 04:38:47 +00:00
|
|
|
{brokenConfigs[0], fmt.Errorf("Malformed ulimit specification nofile: \"\", cannot be empty")},
|
|
|
|
{brokenConfigs[1], fmt.Errorf("Malformed soft ulimit nofile: abc:1234")},
|
|
|
|
{brokenConfigs[2], fmt.Errorf("Malformed hard ulimit nofile: 1234:abc")},
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
for _, tc := range testCases {
|
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.Ulimit = tc.ulimitConfig
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NotNil(t, err, "Expected non nil error")
|
|
|
|
require.Contains(t, err.Error(), tc.err.Error())
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Labels(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.Labels = map[string]string{
|
|
|
|
"label1": "value1",
|
|
|
|
"label2": "value2",
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-06 05:39:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Equal(t, 2, len(container.Config.Labels))
|
|
|
|
for k, v := range cfg.Labels {
|
|
|
|
require.Equal(t, v, container.Config.Labels[k])
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_ForcePull(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.ForcePull = true
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
_, err := client.InspectContainer(handle.containerID)
|
2018-11-06 05:39:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.LoadImage = ""
|
|
|
|
cfg.Image = "library/busybox@sha256:58ac43b2cc92c687a32c8be6278e50a063579655fe3090125dcb2af0ff9e1a64"
|
2018-11-06 05:39:48 +00:00
|
|
|
localDigest := "sha256:8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7"
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.ForcePull = true
|
2018-12-14 15:58:31 +00:00
|
|
|
cfg.Command = busyboxLongRunningCmd[0]
|
|
|
|
cfg.Args = busyboxLongRunningCmd[1:]
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-06 05:39:48 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, localDigest, container.Image)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_SecurityOpt(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.SecurityOpt = []string{"seccomp=unconfined"}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-06 05:39:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Exactly(t, cfg.SecurityOpt, container.HostConfig.SecurityOpt)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2018-11-20 14:49:50 +00:00
|
|
|
func TestDockerDriver_CreateContainerConfig(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
opt := map[string]string{"size": "120G"}
|
|
|
|
|
|
|
|
cfg.StorageOpt = opt
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
|
|
|
|
c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, "org/repo:0.1", c.Config.Image)
|
|
|
|
require.EqualValues(t, opt, c.HostConfig.StorageOpt)
|
|
|
|
}
|
2018-11-06 05:39:48 +00:00
|
|
|
func TestDockerDriver_Capabilities(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
t.Skip("Capabilities not supported on windows")
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []struct {
|
|
|
|
Name string
|
|
|
|
CapAdd []string
|
|
|
|
CapDrop []string
|
|
|
|
Whitelist string
|
|
|
|
StartError string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
Name: "default-whitelist-add-allowed",
|
|
|
|
CapAdd: []string{"fowner", "mknod"},
|
|
|
|
CapDrop: []string{"all"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "default-whitelist-add-forbidden",
|
|
|
|
CapAdd: []string{"net_admin"},
|
|
|
|
StartError: "net_admin",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "default-whitelist-drop-existing",
|
|
|
|
CapDrop: []string{"fowner", "mknod"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "restrictive-whitelist-drop-all",
|
|
|
|
CapDrop: []string{"all"},
|
|
|
|
Whitelist: "fowner,mknod",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "restrictive-whitelist-add-allowed",
|
|
|
|
CapAdd: []string{"fowner", "mknod"},
|
|
|
|
CapDrop: []string{"all"},
|
|
|
|
Whitelist: "fowner,mknod",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "restrictive-whitelist-add-forbidden",
|
|
|
|
CapAdd: []string{"net_admin", "mknod"},
|
|
|
|
CapDrop: []string{"all"},
|
|
|
|
Whitelist: "fowner,mknod",
|
|
|
|
StartError: "net_admin",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "permissive-whitelist",
|
|
|
|
CapAdd: []string{"net_admin", "mknod"},
|
|
|
|
Whitelist: "all",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "permissive-whitelist-add-all",
|
|
|
|
CapAdd: []string{"all"},
|
|
|
|
Whitelist: "all",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.Name, func(t *testing.T) {
|
|
|
|
client := newTestDockerClient(t)
|
2018-11-09 04:38:47 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
2018-11-06 05:39:48 +00:00
|
|
|
if len(tc.CapAdd) > 0 {
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.CapAdd = tc.CapAdd
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
if len(tc.CapDrop) > 0 {
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg.CapDrop = tc.CapDrop
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
dockerDriver, ok := d.Impl().(*Driver)
|
|
|
|
require.True(t, ok)
|
2018-11-06 05:39:48 +00:00
|
|
|
if tc.Whitelist != "" {
|
2018-11-09 04:38:47 +00:00
|
|
|
dockerDriver.config.AllowCaps = strings.Split(tc.Whitelist, ",")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
2018-11-06 05:39:48 +00:00
|
|
|
if err == nil && tc.StartError != "" {
|
|
|
|
t.Fatalf("Expected error in start: %v", tc.StartError)
|
|
|
|
} else if err != nil {
|
|
|
|
if tc.StartError == "" {
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
} else {
|
|
|
|
require.Contains(t, err.Error(), tc.StartError)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
handle, ok := dockerDriver.tasks.Get(task.ID)
|
|
|
|
require.True(t, ok)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-15 03:04:33 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Exactly(t, tc.CapAdd, container.HostConfig.CapAdd)
|
|
|
|
require.Exactly(t, tc.CapDrop, container.HostConfig.CapDrop)
|
2018-11-06 05:39:48 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_DNS(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.DNSServers = []string{"8.8.8.8", "8.8.4.4"}
|
|
|
|
cfg.DNSSearchDomains = []string{"example.com", "example.org", "example.net"}
|
|
|
|
cfg.DNSOptions = []string{"ndots:1"}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Exactly(t, cfg.DNSServers, container.HostConfig.DNS)
|
|
|
|
require.Exactly(t, cfg.DNSSearchDomains, container.HostConfig.DNSSearch)
|
|
|
|
require.Exactly(t, cfg.DNSOptions, container.HostConfig.DNSOptions)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_MACAddress(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.MacAddress = "00:16:3e:00:00:00"
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Equal(t, cfg.MacAddress, container.NetworkSettings.MacAddress)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerWorkDir(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.WorkDir = "/some/path"
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Equal(t, cfg.WorkDir, container.Config.WorkingDir)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func inSlice(needle string, haystack []string) bool {
|
|
|
|
for _, h := range haystack {
|
|
|
|
if h == needle {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_PortsNoMap(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task, _, port := dockerTask(t)
|
|
|
|
res := port[0]
|
|
|
|
dyn := port[1]
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
// Verify that the correct ports are EXPOSED
|
|
|
|
expectedExposedPorts := map[docker.Port]struct{}{
|
|
|
|
docker.Port(fmt.Sprintf("%d/tcp", res)): {},
|
|
|
|
docker.Port(fmt.Sprintf("%d/udp", res)): {},
|
|
|
|
docker.Port(fmt.Sprintf("%d/tcp", dyn)): {},
|
|
|
|
docker.Port(fmt.Sprintf("%d/udp", dyn)): {},
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
// Verify that the correct ports are FORWARDED
|
|
|
|
expectedPortBindings := map[docker.Port][]docker.PortBinding{
|
|
|
|
docker.Port(fmt.Sprintf("%d/tcp", res)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}},
|
|
|
|
docker.Port(fmt.Sprintf("%d/udp", res)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}},
|
|
|
|
docker.Port(fmt.Sprintf("%d/tcp", dyn)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}},
|
|
|
|
docker.Port(fmt.Sprintf("%d/udp", dyn)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}},
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_PortsMapping(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task, cfg, port := dockerTask(t)
|
|
|
|
res := port[0]
|
|
|
|
dyn := port[1]
|
|
|
|
cfg.PortMap = map[string]int{
|
|
|
|
"main": 8080,
|
|
|
|
"REDIS": 6379,
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
// Verify that the correct ports are EXPOSED
|
|
|
|
expectedExposedPorts := map[docker.Port]struct{}{
|
|
|
|
docker.Port("8080/tcp"): {},
|
|
|
|
docker.Port("8080/udp"): {},
|
|
|
|
docker.Port("6379/tcp"): {},
|
|
|
|
docker.Port("6379/udp"): {},
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
// Verify that the correct ports are FORWARDED
|
|
|
|
expectedPortBindings := map[docker.Port][]docker.PortBinding{
|
|
|
|
docker.Port("8080/tcp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}},
|
|
|
|
docker.Port("8080/udp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}},
|
|
|
|
docker.Port("6379/tcp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}},
|
|
|
|
docker.Port("6379/udp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}},
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_User(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
task.User = "alice"
|
|
|
|
cfg.Command = "/bin/sleep"
|
|
|
|
cfg.Args = []string{"10000"}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
2018-11-06 05:39:48 +00:00
|
|
|
if err == nil {
|
2018-11-09 04:38:47 +00:00
|
|
|
d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
t.Fatalf("Should've failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !strings.Contains(err.Error(), "alice") {
|
|
|
|
t.Fatalf("Expected failure string not found, found %q instead", err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_CleanupContainer(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.Command = "/bin/echo"
|
|
|
|
cfg.Args = []string{"hello"}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
client, d, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if !res.Successful() {
|
|
|
|
t.Fatalf("err: %v", res)
|
|
|
|
}
|
|
|
|
|
|
|
|
time.Sleep(3 * time.Second)
|
|
|
|
|
|
|
|
// Ensure that the container isn't present
|
2018-11-20 02:51:26 +00:00
|
|
|
_, err := client.InspectContainer(handle.containerID)
|
2018-11-06 05:39:48 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("expected to not get container")
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Stats(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.Command = "/bin/sleep"
|
|
|
|
cfg.Args = []string{"1000"}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, d, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
time.Sleep(3 * time.Second)
|
|
|
|
ru, err := handle.Stats()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if ru.ResourceUsage == nil {
|
2018-11-09 04:38:47 +00:00
|
|
|
d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
t.Fatalf("expected resource usage")
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
}()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if res.Successful() {
|
|
|
|
t.Fatalf("should err: %v", res)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-27 19:03:58 +00:00
|
|
|
func setupDockerVolumes(t *testing.T, cfg map[string]interface{}, hostpath string) (*drivers.TaskConfig, *dtestutil.DriverHarness, *TaskConfig, string, func()) {
|
2018-11-06 05:39:48 +00:00
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
|
|
|
randfn := fmt.Sprintf("test-%d", rand.Int())
|
|
|
|
hostfile := filepath.Join(hostpath, randfn)
|
|
|
|
containerPath := "/mnt/vol"
|
|
|
|
containerFile := filepath.Join(containerPath, randfn)
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
taskCfg := &TaskConfig{
|
2018-12-14 15:58:31 +00:00
|
|
|
Image: busyboxImageID,
|
2018-11-09 04:38:47 +00:00
|
|
|
LoadImage: "busybox.tar",
|
|
|
|
Command: "touch",
|
|
|
|
Args: []string{containerFile},
|
|
|
|
Volumes: []string{fmt.Sprintf("%s:%s", hostpath, containerPath)},
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "ls",
|
|
|
|
Env: map[string]string{"VOL_PATH": containerPath},
|
|
|
|
Resources: basicResources,
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
d := dockerDriverHarness(t, cfg)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
return task, d, taskCfg, hostfile, cleanup
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_VolumesDisabled(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg := map[string]interface{}{
|
2018-11-20 03:58:05 +00:00
|
|
|
"volumes": map[string]interface{}{
|
|
|
|
"enabled": false,
|
|
|
|
},
|
|
|
|
"gc": map[string]interface{}{
|
|
|
|
"image": false,
|
|
|
|
},
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
tmpvol, err := ioutil.TempDir("", "nomadtest_docker_volumesdisabled")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error creating temporary dir: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task, driver, _, _, cleanup := setupDockerVolumes(t, cfg, tmpvol)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
if _, _, err := driver.StartTask(task); err == nil {
|
|
|
|
require.Fail(t, "Started driver successfully when volumes should have been disabled.")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Relative paths should still be allowed
|
|
|
|
{
|
2018-11-09 04:38:47 +00:00
|
|
|
task, driver, _, fn, cleanup := setupDockerVolumes(t, cfg, ".")
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err := driver.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer driver.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
waitCh, err := driver.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if !res.Successful() {
|
|
|
|
t.Fatalf("unexpected err: %v", res)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
if _, err := ioutil.ReadFile(filepath.Join(task.TaskDir().Dir, fn)); err != nil {
|
2018-11-06 05:39:48 +00:00
|
|
|
t.Fatalf("unexpected error reading %s: %v", fn, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Volume Drivers should be rejected (error)
|
|
|
|
{
|
2018-11-09 04:38:47 +00:00
|
|
|
task, driver, taskCfg, _, cleanup := setupDockerVolumes(t, cfg, "fake_flocker_vol")
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
taskCfg.VolumeDriver = "flocker"
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg))
|
|
|
|
|
|
|
|
if _, _, err := driver.StartTask(task); err == nil {
|
|
|
|
require.Fail(t, "Started driver successfully when volume drivers should have been disabled.")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-12-11 19:22:50 +00:00
|
|
|
func TestDockerDriver_BindMountsHonorVolumesEnabledFlag(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
allocDir := "/tmp/nomad/alloc-dir"
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
requiresVolumes bool
|
|
|
|
|
|
|
|
volumeDriver string
|
|
|
|
volumes []string
|
|
|
|
|
|
|
|
expectedVolumes []string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "basic plugin",
|
|
|
|
requiresVolumes: true,
|
|
|
|
volumeDriver: "nfs",
|
|
|
|
volumes: []string{"test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "absolute default driver",
|
|
|
|
requiresVolumes: true,
|
|
|
|
volumeDriver: "",
|
|
|
|
volumes: []string{"/abs/test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"/abs/test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "absolute local driver",
|
|
|
|
requiresVolumes: true,
|
|
|
|
volumeDriver: "local",
|
|
|
|
volumes: []string{"/abs/test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"/abs/test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "relative default driver",
|
|
|
|
requiresVolumes: false,
|
|
|
|
volumeDriver: "",
|
|
|
|
volumes: []string{"test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"/tmp/nomad/alloc-dir/demo/test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "relative local driver",
|
|
|
|
requiresVolumes: false,
|
|
|
|
volumeDriver: "local",
|
|
|
|
volumes: []string{"test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"/tmp/nomad/alloc-dir/demo/test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "relative outside task-dir default driver",
|
|
|
|
requiresVolumes: false,
|
|
|
|
volumeDriver: "",
|
|
|
|
volumes: []string{"../test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"/tmp/nomad/alloc-dir/test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "relative outside task-dir local driver",
|
|
|
|
requiresVolumes: false,
|
|
|
|
volumeDriver: "local",
|
|
|
|
volumes: []string{"../test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"/tmp/nomad/alloc-dir/test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "relative outside alloc-dir default driver",
|
|
|
|
requiresVolumes: true,
|
|
|
|
volumeDriver: "",
|
|
|
|
volumes: []string{"../../test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"/tmp/nomad/test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "relative outside task-dir local driver",
|
|
|
|
requiresVolumes: true,
|
|
|
|
volumeDriver: "local",
|
|
|
|
volumes: []string{"../../test-path:/tmp/taskpath"},
|
|
|
|
expectedVolumes: []string{"/tmp/nomad/test-path:/tmp/taskpath"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("with volumes enabled", func(t *testing.T) {
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
driver.config.Volumes.Enabled = true
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.VolumeDriver = c.volumeDriver
|
|
|
|
cfg.Volumes = c.volumes
|
|
|
|
|
|
|
|
task.AllocDir = allocDir
|
|
|
|
task.Name = "demo"
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
for _, v := range c.expectedVolumes {
|
|
|
|
require.Contains(t, cc.HostConfig.Binds, v)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("with volumes disabled", func(t *testing.T) {
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
driver.config.Volumes.Enabled = false
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.VolumeDriver = c.volumeDriver
|
|
|
|
cfg.Volumes = c.volumes
|
|
|
|
|
|
|
|
task.AllocDir = allocDir
|
|
|
|
task.Name = "demo"
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
if c.requiresVolumes {
|
|
|
|
require.Error(t, err, "volumes are not enabled")
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
for _, v := range c.expectedVolumes {
|
|
|
|
require.Contains(t, cc.HostConfig.Binds, v)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
func TestDockerDriver_VolumesEnabled(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
|
|
|
tmpvol, err := ioutil.TempDir("", "nomadtest_docker_volumesenabled")
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
// Evaluate symlinks so it works on MacOS
|
|
|
|
tmpvol, err = filepath.EvalSymlinks(tmpvol)
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
task, driver, _, hostpath, cleanup := setupDockerVolumes(t, nil, tmpvol)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
_, _, err = driver.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer driver.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
waitCh, err := driver.WaitTask(context.Background(), task.ID)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-09 04:38:47 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if !res.Successful() {
|
|
|
|
t.Fatalf("unexpected err: %v", res)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := ioutil.ReadFile(hostpath); err != nil {
|
|
|
|
t.Fatalf("unexpected error reading %s: %v", hostpath, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Mounts(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
goodMount := DockerMount{
|
|
|
|
Target: "/nomad",
|
|
|
|
VolumeOptions: DockerVolumeOptions{
|
|
|
|
Labels: map[string]string{"foo": "bar"},
|
|
|
|
DriverConfig: DockerVolumeDriverConfig{
|
|
|
|
Name: "local",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
},
|
2018-11-09 04:38:47 +00:00
|
|
|
ReadOnly: true,
|
|
|
|
Source: "test",
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
Name string
|
2018-11-09 04:38:47 +00:00
|
|
|
Mounts []DockerMount
|
2018-11-06 05:39:48 +00:00
|
|
|
Error string
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
Name: "good-one",
|
|
|
|
Error: "",
|
2018-11-09 04:38:47 +00:00
|
|
|
Mounts: []DockerMount{goodMount},
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
{
|
2018-11-09 04:38:47 +00:00
|
|
|
Name: "duplicate",
|
|
|
|
Error: "Duplicate mount point",
|
|
|
|
Mounts: []DockerMount{goodMount, goodMount, goodMount},
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.Name, func(t *testing.T) {
|
2018-12-15 05:08:23 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
// Build the task
|
2018-11-09 04:38:47 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.Command = "/bin/sleep"
|
|
|
|
cfg.Args = []string{"10000"}
|
|
|
|
cfg.Mounts = c.Mounts
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
|
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
if err == nil && c.Error != "" {
|
|
|
|
t.Fatalf("expected error: %v", c.Error)
|
|
|
|
} else if err != nil {
|
|
|
|
if c.Error == "" {
|
|
|
|
t.Fatalf("unexpected error in prestart: %v", err)
|
|
|
|
} else if !strings.Contains(err.Error(), c.Error) {
|
|
|
|
t.Fatalf("expected error %q; got %v", c.Error, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-26 21:45:01 +00:00
|
|
|
func TestDockerDriver_MountsSerialization(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
allocDir := "/tmp/nomad/alloc-dir"
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
requiresVolumes bool
|
|
|
|
passedMounts []DockerMount
|
|
|
|
expectedMounts []docker.HostMount
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "basic volume",
|
|
|
|
passedMounts: []DockerMount{
|
|
|
|
{
|
|
|
|
Target: "/nomad",
|
|
|
|
ReadOnly: true,
|
|
|
|
Source: "test",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedMounts: []docker.HostMount{
|
|
|
|
{
|
|
|
|
Type: "volume",
|
|
|
|
Target: "/nomad",
|
|
|
|
Source: "test",
|
|
|
|
ReadOnly: true,
|
|
|
|
VolumeOptions: &docker.VolumeOptions{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "basic bind",
|
|
|
|
passedMounts: []DockerMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/nomad",
|
|
|
|
Source: "test",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedMounts: []docker.HostMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/nomad",
|
|
|
|
Source: "/tmp/nomad/alloc-dir/demo/test",
|
|
|
|
BindOptions: &docker.BindOptions{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "basic absolute bind",
|
|
|
|
requiresVolumes: true,
|
|
|
|
passedMounts: []DockerMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/nomad",
|
|
|
|
Source: "/tmp/test",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedMounts: []docker.HostMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/nomad",
|
|
|
|
Source: "/tmp/test",
|
|
|
|
BindOptions: &docker.BindOptions{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2018-12-11 19:22:50 +00:00
|
|
|
name: "bind relative outside",
|
|
|
|
requiresVolumes: true,
|
2018-11-26 21:45:01 +00:00
|
|
|
passedMounts: []DockerMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/nomad",
|
|
|
|
Source: "../../test",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedMounts: []docker.HostMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Target: "/nomad",
|
|
|
|
Source: "/tmp/nomad/test",
|
|
|
|
BindOptions: &docker.BindOptions{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-11-26 22:22:57 +00:00
|
|
|
{
|
|
|
|
name: "basic tmpfs",
|
|
|
|
requiresVolumes: false,
|
|
|
|
passedMounts: []DockerMount{
|
|
|
|
{
|
|
|
|
Type: "tmpfs",
|
|
|
|
Target: "/nomad",
|
|
|
|
TmpfsOptions: DockerTmpfsOptions{
|
|
|
|
SizeBytes: 321,
|
|
|
|
Mode: 0666,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expectedMounts: []docker.HostMount{
|
|
|
|
{
|
|
|
|
Type: "tmpfs",
|
|
|
|
Target: "/nomad",
|
|
|
|
TempfsOptions: &docker.TempfsOptions{
|
|
|
|
SizeBytes: 321,
|
|
|
|
Mode: 0666,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-11-26 21:45:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("with volumes enabled", func(t *testing.T) {
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
driver.config.Volumes.Enabled = true
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.Mounts = c.passedMounts
|
|
|
|
|
|
|
|
task.AllocDir = allocDir
|
|
|
|
task.Name = "demo"
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, c.expectedMounts, cc.HostConfig.Mounts)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("with volumes disabled", func(t *testing.T) {
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
driver.config.Volumes.Enabled = false
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.Mounts = c.passedMounts
|
|
|
|
|
|
|
|
task.AllocDir = allocDir
|
|
|
|
task.Name = "demo"
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
if c.requiresVolumes {
|
|
|
|
require.Error(t, err, "volumes are not enabled")
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, c.expectedMounts, cc.HostConfig.Mounts)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-12-04 21:46:16 +00:00
|
|
|
// TestDockerDriver_CreateContainerConfig_MountsCombined asserts that
|
|
|
|
// devices and mounts set by device managers/plugins are honored
|
|
|
|
// and present in docker.CreateContainerOptions, and that it is appended
|
|
|
|
// to any devices/mounts a user sets in the task config.
|
|
|
|
func TestDockerDriver_CreateContainerConfig_MountsCombined(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
|
|
|
|
task.Devices = []*drivers.DeviceConfig{
|
|
|
|
{
|
|
|
|
HostPath: "/dev/fuse",
|
|
|
|
TaskPath: "/container/dev/task-fuse",
|
|
|
|
Permissions: "rw",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
task.Mounts = []*drivers.MountConfig{
|
|
|
|
{
|
|
|
|
HostPath: "/tmp/task-mount",
|
|
|
|
TaskPath: "/container/tmp/task-mount",
|
|
|
|
Readonly: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg.Devices = []DockerDevice{
|
|
|
|
{
|
|
|
|
HostPath: "/dev/stdout",
|
|
|
|
ContainerPath: "/container/dev/cfg-stdout",
|
|
|
|
CgroupPermissions: "rwm",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
cfg.Mounts = []DockerMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Source: "/tmp/cfg-mount",
|
|
|
|
Target: "/container/tmp/cfg-mount",
|
|
|
|
ReadOnly: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
|
|
|
|
dh := dockerDriverHarness(t, nil)
|
|
|
|
driver := dh.Impl().(*Driver)
|
|
|
|
|
|
|
|
c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
expectedMounts := []docker.HostMount{
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Source: "/tmp/cfg-mount",
|
|
|
|
Target: "/container/tmp/cfg-mount",
|
|
|
|
ReadOnly: false,
|
|
|
|
BindOptions: &docker.BindOptions{},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Type: "bind",
|
|
|
|
Source: "/tmp/task-mount",
|
|
|
|
Target: "/container/tmp/task-mount",
|
|
|
|
ReadOnly: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
foundMounts := c.HostConfig.Mounts
|
|
|
|
sort.Slice(foundMounts, func(i, j int) bool {
|
|
|
|
return foundMounts[i].Target < foundMounts[j].Target
|
|
|
|
})
|
|
|
|
require.EqualValues(t, expectedMounts, foundMounts)
|
|
|
|
|
|
|
|
expectedDevices := []docker.Device{
|
|
|
|
{
|
|
|
|
PathOnHost: "/dev/stdout",
|
|
|
|
PathInContainer: "/container/dev/cfg-stdout",
|
|
|
|
CgroupPermissions: "rwm",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
PathOnHost: "/dev/fuse",
|
|
|
|
PathInContainer: "/container/dev/task-fuse",
|
|
|
|
CgroupPermissions: "rw",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
foundDevices := c.HostConfig.Devices
|
|
|
|
sort.Slice(foundDevices, func(i, j int) bool {
|
|
|
|
return foundDevices[i].PathInContainer < foundDevices[j].PathInContainer
|
|
|
|
})
|
|
|
|
require.EqualValues(t, expectedDevices, foundDevices)
|
|
|
|
}
|
|
|
|
|
2018-11-06 05:39:48 +00:00
|
|
|
// TestDockerDriver_Cleanup ensures Cleanup removes only downloaded images.
|
|
|
|
func TestDockerDriver_Cleanup(t *testing.T) {
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-12-04 04:08:52 +00:00
|
|
|
// using a small image and an specific point release to avoid accidental conflicts with other tasks
|
|
|
|
imageName := "busybox:1.27.1"
|
2018-11-09 04:38:47 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "cleanup_test",
|
|
|
|
Resources: basicResources,
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
cfg := &TaskConfig{
|
2018-12-04 04:08:52 +00:00
|
|
|
Image: imageName,
|
|
|
|
Command: "/bin/sleep",
|
|
|
|
Args: []string{"100"},
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-12-04 04:08:52 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-09 04:38:47 +00:00
|
|
|
client, driver, handle, cleanup := dockerSetup(t, task)
|
|
|
|
defer cleanup()
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-15 03:04:33 +00:00
|
|
|
require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-09 04:38:47 +00:00
|
|
|
// Cleanup
|
|
|
|
require.NoError(t, driver.DestroyTask(task.ID, true))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
// Ensure image was removed
|
|
|
|
tu.WaitForResult(func() (bool, error) {
|
2018-11-09 04:38:47 +00:00
|
|
|
if _, err := client.InspectImage(imageName); err == nil {
|
2018-11-06 05:39:48 +00:00
|
|
|
return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", imageName)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// The image doesn't exist which shouldn't be an error when calling
|
|
|
|
// Cleanup, so call it again to make sure.
|
2018-11-09 04:38:47 +00:00
|
|
|
require.NoError(t, driver.Impl().(*Driver).cleanupImage(handle))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_AuthConfiguration(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
|
|
|
path := "./test-resources/docker/auth.json"
|
|
|
|
cases := []struct {
|
|
|
|
Repo string
|
|
|
|
AuthConfig *docker.AuthConfiguration
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
Repo: "lolwhat.com/what:1337",
|
|
|
|
AuthConfig: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Repo: "redis:3.2",
|
|
|
|
AuthConfig: &docker.AuthConfiguration{
|
|
|
|
Username: "test",
|
|
|
|
Password: "1234",
|
|
|
|
Email: "",
|
|
|
|
ServerAddress: "https://index.docker.io/v1/",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Repo: "quay.io/redis:3.2",
|
|
|
|
AuthConfig: &docker.AuthConfiguration{
|
|
|
|
Username: "test",
|
|
|
|
Password: "5678",
|
|
|
|
Email: "",
|
|
|
|
ServerAddress: "quay.io",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Repo: "other.io/redis:3.2",
|
|
|
|
AuthConfig: &docker.AuthConfiguration{
|
|
|
|
Username: "test",
|
|
|
|
Password: "abcd",
|
|
|
|
Email: "",
|
|
|
|
ServerAddress: "https://other.io/v1/",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
for _, c := range cases {
|
2018-11-06 05:39:48 +00:00
|
|
|
act, err := authFromDockerConfig(path)(c.Repo)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Exactly(t, c.AuthConfig, act)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_OOMKilled(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-12-14 16:06:14 +00:00
|
|
|
taskCfg := TaskConfig{
|
2018-12-14 15:58:31 +00:00
|
|
|
Image: busyboxImageID,
|
2018-11-12 12:39:55 +00:00
|
|
|
LoadImage: "busybox.tar",
|
2018-12-14 16:06:14 +00:00
|
|
|
Command: "/bin/sh",
|
|
|
|
Args: []string{"-c", `/bin/sleep 2 && x=a && while true; do x="$x$x"; done`},
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
2018-11-12 12:39:55 +00:00
|
|
|
task := &drivers.TaskConfig{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Name: "oom-killed",
|
|
|
|
Resources: basicResources,
|
|
|
|
}
|
2018-12-14 16:06:14 +00:00
|
|
|
task.Resources.LinuxResources.MemoryLimitBytes = 10 * 1024 * 1024
|
|
|
|
task.Resources.NomadResources.MemoryMB = 10
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-14 16:06:14 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-14 16:06:14 +00:00
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-12-14 16:06:14 +00:00
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-14 16:06:14 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
defer d.DestroyTask(task.ID, true)
|
|
|
|
|
|
|
|
waitCh, err := d.WaitTask(context.Background(), task.ID)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
select {
|
2018-11-12 12:39:55 +00:00
|
|
|
case res := <-waitCh:
|
2018-11-06 05:39:48 +00:00
|
|
|
if res.Successful() {
|
|
|
|
t.Fatalf("expected error, but container exited successful")
|
|
|
|
}
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
if !res.OOMKilled {
|
2018-11-06 05:39:48 +00:00
|
|
|
t.Fatalf("not killed by OOM killer: %s", res.Err)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("Successfully killed by OOM killer")
|
|
|
|
|
|
|
|
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
brokenConfigs := []DockerDevice{
|
|
|
|
{
|
|
|
|
HostPath: "",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
2018-11-12 12:39:55 +00:00
|
|
|
{
|
|
|
|
HostPath: "/dev/sda1",
|
|
|
|
CgroupPermissions: "rxb",
|
2018-11-06 05:39:48 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
test_cases := []struct {
|
2018-11-12 12:39:55 +00:00
|
|
|
deviceConfig []DockerDevice
|
2018-11-06 05:39:48 +00:00
|
|
|
err error
|
|
|
|
}{
|
2018-11-12 12:39:55 +00:00
|
|
|
{brokenConfigs[:1], fmt.Errorf("host path must be set in configuration for devices")},
|
|
|
|
{brokenConfigs[1:], fmt.Errorf("invalid cgroup permission string: \"rxb\"")},
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range test_cases {
|
2018-11-12 12:39:55 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.Devices = tc.deviceConfig
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
|
|
|
d := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := d.MkAllocDir(task, true)
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
defer cleanup()
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
_, _, err := d.StartTask(task)
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), tc.err.Error())
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Device_Success(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
|
|
|
if runtime.GOOS != "linux" {
|
|
|
|
t.Skip("test device mounts only on linux")
|
|
|
|
}
|
|
|
|
|
|
|
|
hostPath := "/dev/random"
|
|
|
|
containerPath := "/dev/myrandom"
|
|
|
|
perms := "rwm"
|
|
|
|
|
|
|
|
expectedDevice := docker.Device{
|
|
|
|
PathOnHost: hostPath,
|
|
|
|
PathInContainer: containerPath,
|
|
|
|
CgroupPermissions: perms,
|
|
|
|
}
|
2018-11-12 12:39:55 +00:00
|
|
|
config := DockerDevice{
|
|
|
|
HostPath: hostPath,
|
|
|
|
ContainerPath: containerPath,
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.Devices = []DockerDevice{config}
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
client, driver, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NotEmpty(t, container.HostConfig.Devices, "Expected one device")
|
|
|
|
require.Equal(t, expectedDevice, container.HostConfig.Devices[0], "Incorrect device ")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_Entrypoint(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
|
|
|
entrypoint := []string{"/bin/sh", "-c"}
|
2018-11-12 12:39:55 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.Entrypoint = entrypoint
|
2018-12-14 15:58:31 +00:00
|
|
|
cfg.Command = strings.Join(busyboxLongRunningCmd, " ")
|
2018-12-04 04:08:52 +00:00
|
|
|
cfg.Args = []string{}
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
client, driver, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
require.Len(t, container.Config.Entrypoint, 2, "Expected one entrypoint")
|
|
|
|
require.Equal(t, entrypoint, container.Config.Entrypoint, "Incorrect entrypoint ")
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_ReadonlyRootfs(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.ReadonlyRootfs = true
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
client, driver, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
require.True(t, container.HostConfig.ReadonlyRootfs, "ReadonlyRootfs option not set")
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// fakeDockerClient can be used in places that accept an interface for the
|
|
|
|
// docker client such as createContainer.
|
|
|
|
type fakeDockerClient struct{}
|
|
|
|
|
|
|
|
func (fakeDockerClient) CreateContainer(docker.CreateContainerOptions) (*docker.Container, error) {
|
|
|
|
return nil, fmt.Errorf("volume is attached on another node")
|
|
|
|
}
|
|
|
|
func (fakeDockerClient) InspectContainer(id string) (*docker.Container, error) {
|
|
|
|
panic("not implemented")
|
|
|
|
}
|
|
|
|
func (fakeDockerClient) ListContainers(docker.ListContainersOptions) ([]docker.APIContainers, error) {
|
|
|
|
panic("not implemented")
|
|
|
|
}
|
|
|
|
func (fakeDockerClient) RemoveContainer(opts docker.RemoveContainerOptions) error {
|
|
|
|
panic("not implemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDockerDriver_VolumeError asserts volume related errors when creating a
|
|
|
|
// container are recoverable.
|
|
|
|
func TestDockerDriver_VolumeError(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
|
|
|
|
// setup
|
2018-11-12 12:39:55 +00:00
|
|
|
_, cfg, _ := dockerTask(t)
|
|
|
|
driver := dockerDriverHarness(t, nil)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
// assert volume error is recoverable
|
2018-11-12 12:39:55 +00:00
|
|
|
_, err := driver.Impl().(*Driver).createContainer(fakeDockerClient{}, docker.CreateContainerOptions{Config: &docker.Config{}}, cfg)
|
2018-11-06 05:39:48 +00:00
|
|
|
require.True(t, structs.IsRecoverable(err))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedPrefix := "2001:db8:1::242:ac11"
|
|
|
|
expectedAdvertise := true
|
2018-11-12 12:39:55 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.AdvertiseIPv6Addr = expectedAdvertise
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
client := newTestDockerClient(t)
|
|
|
|
|
|
|
|
// Make sure IPv6 is enabled
|
|
|
|
net, err := client.NetworkInfo("bridge")
|
|
|
|
if err != nil {
|
|
|
|
t.Skip("error retrieving bridge network information, skipping")
|
|
|
|
}
|
|
|
|
if net == nil || !net.EnableIPv6 {
|
|
|
|
t.Skip("IPv6 not enabled on bridge network, skipping")
|
|
|
|
}
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
driver := dockerDriverHarness(t, nil)
|
|
|
|
cleanup := driver.MkAllocDir(task, true)
|
|
|
|
copyImage(t, task.TaskDir(), "busybox.tar")
|
|
|
|
defer cleanup()
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
_, network, err := driver.StartTask(task)
|
|
|
|
defer driver.DestroyTask(task.ID, true)
|
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
require.Equal(t, expectedAdvertise, network.AutoAdvertise, "Wrong autoadvertise. Expect: %s, got: %s", expectedAdvertise, network.AutoAdvertise)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
if !strings.HasPrefix(network.IP, expectedPrefix) {
|
|
|
|
t.Fatalf("Got IP address %q want ip address with prefix %q", network.IP, expectedPrefix)
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
handle, ok := driver.Impl().(*Driver).tasks.Get(task.ID)
|
|
|
|
require.True(t, ok)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-12-04 04:08:52 +00:00
|
|
|
require.NoError(t, driver.WaitUntilStarted(task.ID, time.Second))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, err)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
|
|
|
if !strings.HasPrefix(container.NetworkSettings.GlobalIPv6Address, expectedPrefix) {
|
|
|
|
t.Fatalf("Got GlobalIPv6address %s want GlobalIPv6address with prefix %s", expectedPrefix, container.NetworkSettings.GlobalIPv6Address)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestParseDockerImage(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
Image string
|
|
|
|
Repo string
|
|
|
|
Tag string
|
|
|
|
}{
|
|
|
|
{"library/hello-world:1.0", "library/hello-world", "1.0"},
|
|
|
|
{"library/hello-world", "library/hello-world", "latest"},
|
|
|
|
{"library/hello-world:latest", "library/hello-world", "latest"},
|
|
|
|
{"library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", "library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", ""},
|
|
|
|
}
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(test.Image, func(t *testing.T) {
|
|
|
|
repo, tag := parseDockerImage(test.Image)
|
|
|
|
require.Equal(t, test.Repo, repo)
|
|
|
|
require.Equal(t, test.Tag, tag)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerImageRef(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
Image string
|
|
|
|
Repo string
|
|
|
|
Tag string
|
|
|
|
}{
|
|
|
|
{"library/hello-world:1.0", "library/hello-world", "1.0"},
|
|
|
|
{"library/hello-world:latest", "library/hello-world", "latest"},
|
|
|
|
{"library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", "library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", ""},
|
|
|
|
}
|
|
|
|
for _, test := range tests {
|
|
|
|
t.Run(test.Image, func(t *testing.T) {
|
|
|
|
image := dockerImageRef(test.Repo, test.Tag)
|
|
|
|
require.Equal(t, test.Image, image)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDockerDriver_CPUCFSPeriod(t *testing.T) {
|
|
|
|
if !tu.IsTravis() {
|
|
|
|
t.Parallel()
|
|
|
|
}
|
|
|
|
if !testutil.DockerIsConnected(t) {
|
|
|
|
t.Skip("Docker not connected")
|
|
|
|
}
|
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
task, cfg, _ := dockerTask(t)
|
|
|
|
cfg.CPUHardLimit = true
|
|
|
|
cfg.CPUCFSPeriod = 1000000
|
|
|
|
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-12 12:39:55 +00:00
|
|
|
client, _, handle, cleanup := dockerSetup(t, task)
|
2018-11-06 05:39:48 +00:00
|
|
|
defer cleanup()
|
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
waitForExist(t, client, handle.containerID)
|
2018-11-06 05:39:48 +00:00
|
|
|
|
2018-11-20 02:51:26 +00:00
|
|
|
container, err := client.InspectContainer(handle.containerID)
|
2018-11-12 12:39:55 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, cfg.CPUCFSPeriod, container.HostConfig.CPUPeriod)
|
|
|
|
}
|
2018-11-09 04:38:47 +00:00
|
|
|
|
|
|
|
func waitForExist(t *testing.T, client *docker.Client, containerID string) {
|
|
|
|
tu.WaitForResult(func() (bool, error) {
|
|
|
|
container, err := client.InspectContainer(containerID)
|
|
|
|
if err != nil {
|
|
|
|
if _, ok := err.(*docker.NoSuchContainer); !ok {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return container != nil, nil
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func copyImage(t *testing.T, taskDir *allocdir.TaskDir, image string) {
|
|
|
|
dst := filepath.Join(taskDir.LocalDir, image)
|
|
|
|
copyFile(filepath.Join("./test-resources/docker", image), dst, t)
|
|
|
|
}
|
|
|
|
|
|
|
|
// copyFile moves an existing file to the destination
|
|
|
|
func copyFile(src, dst string, t *testing.T) {
|
|
|
|
in, err := os.Open(src)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
|
|
|
|
}
|
|
|
|
defer in.Close()
|
|
|
|
out, err := os.Create(dst)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err := out.Close(); err != nil {
|
|
|
|
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if _, err = io.Copy(out, in); err != nil {
|
|
|
|
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
|
|
|
|
}
|
|
|
|
if err := out.Sync(); err != nil {
|
|
|
|
t.Fatalf("copying %v -> %v failed: %v", src, dst, err)
|
|
|
|
}
|
2018-11-06 05:39:48 +00:00
|
|
|
}
|