docker: finished porting tests

This commit is contained in:
Nick Ethier 2018-11-12 07:39:55 -05:00
parent 69049d37f5
commit ee51cb6a93
No known key found for this signature in database
GPG key ID: 07C1A3ECED90D24A
6 changed files with 238 additions and 278 deletions

View file

@ -2,6 +2,7 @@ package docker
import ( import (
"fmt" "fmt"
"sync"
"testing" "testing"
"time" "time"
@ -16,6 +17,7 @@ type mockImageClient struct {
idToName map[string]string idToName map[string]string
removed map[string]int removed map[string]int
pullDelay time.Duration pullDelay time.Duration
lock sync.Mutex
} }
func newMockImageClient(idToName map[string]string, pullDelay time.Duration) *mockImageClient { func newMockImageClient(idToName map[string]string, pullDelay time.Duration) *mockImageClient {
@ -29,17 +31,23 @@ func newMockImageClient(idToName map[string]string, pullDelay time.Duration) *mo
func (m *mockImageClient) PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) error { func (m *mockImageClient) PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) error {
time.Sleep(m.pullDelay) time.Sleep(m.pullDelay)
m.lock.Lock()
defer m.lock.Unlock()
m.pulled[opts.Repository]++ m.pulled[opts.Repository]++
return nil return nil
} }
func (m *mockImageClient) InspectImage(id string) (*docker.Image, error) { func (m *mockImageClient) InspectImage(id string) (*docker.Image, error) {
m.lock.Lock()
defer m.lock.Unlock()
return &docker.Image{ return &docker.Image{
ID: m.idToName[id], ID: m.idToName[id],
}, nil }, nil
} }
func (m *mockImageClient) RemoveImage(id string) error { func (m *mockImageClient) RemoveImage(id string) error {
m.lock.Lock()
defer m.lock.Unlock()
m.removed[id]++ m.removed[id]++
return nil return nil
} }
@ -62,19 +70,23 @@ func TestDockerCoordinator_ConcurrentPulls(t *testing.T) {
// Create a coordinator // Create a coordinator
coordinator := NewDockerCoordinator(config) coordinator := NewDockerCoordinator(config)
id := "" id, _ := coordinator.PullImage(image, nil, uuid.Generate(), nil)
for i := 0; i < 10; i++ { for i := 0; i < 9; i++ {
go func() { go func() {
id, _ = coordinator.PullImage(image, nil, uuid.Generate(), nil) coordinator.PullImage(image, nil, uuid.Generate(), nil)
}() }()
} }
testutil.WaitForResult(func() (bool, error) { testutil.WaitForResult(func() (bool, error) {
mock.lock.Lock()
defer mock.lock.Unlock()
p := mock.pulled[image] p := mock.pulled[image]
if p >= 10 { if p >= 10 {
return false, fmt.Errorf("Wrong number of pulls: %d", p) return false, fmt.Errorf("Wrong number of pulls: %d", p)
} }
coordinator.imageLock.Lock()
defer coordinator.imageLock.Unlock()
// Check the reference count // Check the reference count
if references := coordinator.imageRefCount[id]; len(references) != 10 { if references := coordinator.imageRefCount[id]; len(references) != 10 {
return false, fmt.Errorf("Got reference count %d; want %d", len(references), 10) return false, fmt.Errorf("Got reference count %d; want %d", len(references), 10)
@ -143,6 +155,8 @@ func TestDockerCoordinator_Pull_Remove(t *testing.T) {
// Check that only one delete happened // Check that only one delete happened
testutil.WaitForResult(func() (bool, error) { testutil.WaitForResult(func() (bool, error) {
mock.lock.Lock()
defer mock.lock.Unlock()
removes := mock.removed[id] removes := mock.removed[id]
return removes == 1, fmt.Errorf("Wrong number of removes: %d", removes) return removes == 1, fmt.Errorf("Wrong number of removes: %d", removes)
}, func(err error) { }, func(err error) {
@ -150,9 +164,11 @@ func TestDockerCoordinator_Pull_Remove(t *testing.T) {
}) })
// Make sure there is no future still // Make sure there is no future still
coordinator.imageLock.Lock()
if _, ok := coordinator.deleteFuture[id]; ok { if _, ok := coordinator.deleteFuture[id]; ok {
t.Fatal("Got delete future") t.Fatal("Got delete future")
} }
coordinator.imageLock.Unlock()
} }
func TestDockerCoordinator_Remove_Cancel(t *testing.T) { func TestDockerCoordinator_Remove_Cancel(t *testing.T) {

View file

@ -561,6 +561,8 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *struc
return nil, nil, fmt.Errorf("Failed to create container configuration for image %q (%q): %v", driverConfig.Image, id, err) return nil, nil, fmt.Errorf("Failed to create container configuration for image %q (%q): %v", driverConfig.Image, id, err)
} }
startAttempts := 0
CREATE:
container, err := d.createContainer(client, containerCfg, &driverConfig) container, err := d.createContainer(client, containerCfg, &driverConfig)
if err != nil { if err != nil {
d.logger.Error("failed to create container", "error", err) d.logger.Error("failed to create container", "error", err)
@ -576,6 +578,16 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *struc
// Start the container // Start the container
if err := d.startContainer(container); err != nil { if err := d.startContainer(container); err != nil {
d.logger.Error("failed to start container", "container_id", container.ID, "error", err) d.logger.Error("failed to start container", "container_id", container.ID, "error", err)
client.RemoveContainer(docker.RemoveContainerOptions{
ID: container.ID,
Force: true,
})
// Some sort of docker race bug, recreating the container usually works
if strings.Contains(err.Error(), "OCI runtime create failed: container with id exists:") && startAttempts < 5 {
startAttempts++
d.logger.Debug("reattempting container create/start sequence", "attempt", startAttempts, "container_id", id)
goto CREATE
}
return nil, nil, nstructs.NewRecoverableError(fmt.Errorf("Failed to start container %s: %s", container.ID, err), nstructs.IsRecoverable(err)) return nil, nil, nstructs.NewRecoverableError(fmt.Errorf("Failed to start container %s: %s", container.ID, err), nstructs.IsRecoverable(err))
} }
@ -962,6 +974,7 @@ func (d *Driver) createContainerConfig(task *drivers.TaskConfig, driverConfig *T
if err != nil { if err != nil {
return c, err return c, err
} }
logger.Trace("binding volumes", "volumes", binds)
// create the config block that will later be consumed by go-dockerclient // create the config block that will later be consumed by go-dockerclient
config := &docker.Config{ config := &docker.Config{
@ -998,6 +1011,8 @@ func (d *Driver) createContainerConfig(task *drivers.TaskConfig, driverConfig *T
// multiply the time by the number of cores available // multiply the time by the number of cores available
// See https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sec-cpu // See https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sec-cpu
if driverConfig.CPUHardLimit { if driverConfig.CPUHardLimit {
numCores := runtime.NumCPU()
percentTicks := float64(task.Resources.NomadResources.CPU) / float64(task.Resources.NomadResources.CPU)
if driverConfig.CPUCFSPeriod < 0 || driverConfig.CPUCFSPeriod > 1000000 { if driverConfig.CPUCFSPeriod < 0 || driverConfig.CPUCFSPeriod > 1000000 {
return c, fmt.Errorf("invalid value for cpu_cfs_period") return c, fmt.Errorf("invalid value for cpu_cfs_period")
} }
@ -1005,7 +1020,7 @@ func (d *Driver) createContainerConfig(task *drivers.TaskConfig, driverConfig *T
driverConfig.CPUCFSPeriod = task.Resources.LinuxResources.CPUPeriod driverConfig.CPUCFSPeriod = task.Resources.LinuxResources.CPUPeriod
} }
hostConfig.CPUPeriod = driverConfig.CPUCFSPeriod hostConfig.CPUPeriod = driverConfig.CPUCFSPeriod
hostConfig.CPUQuota = task.Resources.LinuxResources.CPUQuota hostConfig.CPUQuota = int64(percentTicks*float64(driverConfig.CPUCFSPeriod)) * int64(numCores)
} }
// Windows does not support MemorySwap/MemorySwappiness #2193 // Windows does not support MemorySwap/MemorySwappiness #2193
@ -1081,6 +1096,19 @@ func (d *Driver) createContainerConfig(task *drivers.TaskConfig, driverConfig *T
if len(driverConfig.Devices) > 0 { if len(driverConfig.Devices) > 0 {
var devices []docker.Device var devices []docker.Device
for _, device := range driverConfig.Devices { for _, device := range driverConfig.Devices {
if device.HostPath == "" {
return c, fmt.Errorf("host path must be set in configuration for devices")
}
if device.CgroupPermissions != "" {
for _, char := range device.CgroupPermissions {
ch := string(char)
if ch != "r" && ch != "w" && ch != "m" {
return c, fmt.Errorf("invalid cgroup permission string: %q", device.CgroupPermissions)
}
}
} else {
device.CgroupPermissions = "rwm"
}
dev := docker.Device{ dev := docker.Device{
PathOnHost: device.HostPath, PathOnHost: device.HostPath,
PathInContainer: device.ContainerPath, PathInContainer: device.ContainerPath,

View file

@ -5,8 +5,12 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"testing" "testing"
"time"
"github.com/hashicorp/nomad/client/testutil"
tu "github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -40,7 +44,6 @@ func TestDockerDriver_authFromHelper(t *testing.T) {
require.Equal(t, []byte("https://registry.local:5000"), content) require.Equal(t, []byte("https://registry.local:5000"), content)
} }
/*
func TestDockerDriver_PidsLimit(t *testing.T) { func TestDockerDriver_PidsLimit(t *testing.T) {
if !tu.IsTravis() { if !tu.IsTravis() {
t.Parallel() t.Parallel()
@ -48,49 +51,33 @@ func TestDockerDriver_PidsLimit(t *testing.T) {
if !testutil.DockerIsConnected(t) { if !testutil.DockerIsConnected(t) {
t.Skip("Docker not connected") t.Skip("Docker not connected")
} }
require := require.New(t)
task, _, _ := dockerTask(t) task, cfg, _ := dockerTask(t)
task.Config["pids_limit"] = "1" cfg.PidsLimit = 1
task.Config["command"] = "/bin/sh" cfg.Command = "/bin/sh"
task.Config["args"] = []string{"-c", "sleep 2 & sleep 2"} cfg.Args = []string{"-c", "sleep 2 & sleep 2"}
require.NoError(task.EncodeConcreteDriverConfig(cfg))
ctx := testDockerDriverContexts(t, task) _, driver, _, cleanup := dockerSetup(t, task)
defer ctx.Destroy() defer cleanup()
d := NewDockerDriver(ctx.DriverCtx)
// Copy the image into the task's directory driver.WaitUntilStarted(task.ID, time.Duration(tu.TestMultiplier()*5)*time.Second)
copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar")
_, err := d.Prestart(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("error in prestart: %v", err)
}
resp, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
defer resp.Handle.Kill()
select {
case res := <-resp.Handle.WaitCh():
if res.Successful() {
t.Fatalf("expected error, but container exited successful")
}
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
t.Fatalf("timeout")
}
// XXX Logging doesn't work on OSX so just test on Linux // XXX Logging doesn't work on OSX so just test on Linux
// Check that data was written to the directory. // Check that data was written to the directory.
outputFile := filepath.Join(ctx.ExecCtx.TaskDir.LogDir, "redis-demo.stderr.0") outputFile := filepath.Join(task.TaskDir().LogDir, "redis-demo.stderr.0")
exp := "can't fork"
tu.WaitForResult(func() (bool, error) {
act, err := ioutil.ReadFile(outputFile) act, err := ioutil.ReadFile(outputFile)
if err != nil { if err != nil {
t.Fatalf("Couldn't read expected output: %v", err) return false, err
} }
exp := "can't fork"
if !strings.Contains(string(act), exp) { if !strings.Contains(string(act), exp) {
t.Fatalf("Expected failed fork: %q", act) return false, fmt.Errorf("Expected %q in output %q", exp, string(act))
} }
return true, nil
}*/ }, func(err error) {
require.NoError(err)
})
}

View file

@ -597,7 +597,7 @@ func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
select { select {
case res := <-waitCh: case res := <-waitCh:
if !res.Successful() { if !res.Successful() {
require.Fail(t, "ExitResult should be successful: %v", res) require.Fail(t, fmt.Sprintf("ExitResult should be successful: %v", res))
} }
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
require.Fail(t, "timeout") require.Fail(t, "timeout")
@ -648,10 +648,10 @@ func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
defer d.DestroyTask(task.ID, true) defer d.DestroyTask(task.ID, true)
go func() { go func(t *testing.T) {
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
require.NoError(t, d.StopTask(task.ID, time.Second, "SIGINT")) require.NoError(t, d.StopTask(task.ID, time.Second, "SIGINT"))
}() }(t)
// Attempt to wait // Attempt to wait
waitCh, err := d.WaitTask(context.Background(), task.ID) waitCh, err := d.WaitTask(context.Background(), task.ID)
@ -910,7 +910,7 @@ func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) {
} }
task := &drivers.TaskConfig{ task := &drivers.TaskConfig{
ID: uuid.Generate(), ID: uuid.Generate(),
Name: "busybox-demo", Name: "busybox",
Resources: basicResources, Resources: basicResources,
} }
require.NoError(task.EncodeConcreteDriverConfig(&taskCfg)) require.NoError(task.EncodeConcreteDriverConfig(&taskCfg))
@ -922,8 +922,7 @@ func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) {
_, _, err = d.StartTask(task) _, _, err = d.StartTask(task)
require.NoError(err) require.NoError(err)
require.NoError(d.WaitUntilStarted(task.ID, 5*time.Second))
d.WaitUntilStarted(task.ID, 5*time.Second)
defer d.DestroyTask(task.ID, true) defer d.DestroyTask(task.ID, true)
@ -934,9 +933,7 @@ func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) {
require.True(ok) require.True(ok)
_, err = client.InspectContainer(handle.container.ID) _, err = client.InspectContainer(handle.container.ID)
if err != nil { require.NoError(err)
t.Fatalf("err: %v", err)
}
} }
func TestDockerDriver_Sysctl_Ulimit(t *testing.T) { func TestDockerDriver_Sysctl_Ulimit(t *testing.T) {
@ -1755,7 +1752,6 @@ func TestDockerDriver_Cleanup(t *testing.T) {
} }
/*
func TestDockerDriver_AuthConfiguration(t *testing.T) { func TestDockerDriver_AuthConfiguration(t *testing.T) {
if !tu.IsTravis() { if !tu.IsTravis() {
t.Parallel() t.Parallel()
@ -1802,15 +1798,10 @@ func TestDockerDriver_AuthConfiguration(t *testing.T) {
}, },
} }
for i, c := range cases { for _, c := range cases {
act, err := authFromDockerConfig(path)(c.Repo) act, err := authFromDockerConfig(path)(c.Repo)
if err != nil { require.NoError(t, err)
t.Fatalf("Test %d failed: %v", i+1, err) require.Exactly(t, c.AuthConfig, act)
}
if !reflect.DeepEqual(act, c.AuthConfig) {
t.Fatalf("Test %d failed: Unexpected auth config: got %+v; want %+v", i+1, act, c.AuthConfig)
}
} }
} }
@ -1822,38 +1813,32 @@ func TestDockerDriver_OOMKilled(t *testing.T) {
t.Skip("Docker not connected") t.Skip("Docker not connected")
} }
task := &structs.Task{ cfg := &TaskConfig{
Name: "oom-killed", Image: "busybox",
Driver: "docker", LoadImage: "busybox.tar",
Config: map[string]interface{}{ Command: "sh",
"image": "busybox", Args: []string{"-c", "x=a; while true; do eval x='$x$x'; done"},
"load": "busybox.tar",
"command": "sh",
// Incrementally creates a bigger and bigger variable.
"args": []string{"-c", "x=a; while true; do eval x='$x$x'; done"},
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
},
Resources: &structs.Resources{
CPU: 250,
MemoryMB: 10,
DiskMB: 20,
Networks: []*structs.NetworkResource{},
},
} }
task := &drivers.TaskConfig{
ID: uuid.Generate(),
Name: "oom-killed",
Resources: basicResources,
}
task.Resources.LinuxResources.MemoryLimitBytes = 4 * 1024 * 1024
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
_, handle, cleanup := dockerSetup(t, task) _, driver, _, cleanup := dockerSetup(t, task)
defer cleanup() defer cleanup()
waitCh, err := driver.WaitTask(context.Background(), task.ID)
require.NoError(t, err)
select { select {
case res := <-handle.WaitCh(): case res := <-waitCh:
if res.Successful() { if res.Successful() {
t.Fatalf("expected error, but container exited successful") t.Fatalf("expected error, but container exited successful")
} }
if res.Err.Error() != "OOM Killed" { if !res.OOMKilled {
t.Fatalf("not killed by OOM killer: %s", res.Err) t.Fatalf("not killed by OOM killer: %s", res.Err)
} }
@ -1872,36 +1857,36 @@ func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) {
t.Skip("Docker not connected") t.Skip("Docker not connected")
} }
brokenConfigs := []interface{}{ brokenConfigs := []DockerDevice{
map[string]interface{}{ {
"host_path": "", HostPath: "",
}, },
map[string]interface{}{ {
"host_path": "/dev/sda1", HostPath: "/dev/sda1",
"cgroup_permissions": "rxb", CgroupPermissions: "rxb",
}, },
} }
test_cases := []struct { test_cases := []struct {
deviceConfig interface{} deviceConfig []DockerDevice
err error err error
}{ }{
{[]interface{}{brokenConfigs[0]}, fmt.Errorf("host path must be set in configuration for devices")}, {brokenConfigs[:1], fmt.Errorf("host path must be set in configuration for devices")},
{[]interface{}{brokenConfigs[1]}, fmt.Errorf("invalid cgroup permission string: \"rxb\"")}, {brokenConfigs[1:], fmt.Errorf("invalid cgroup permission string: \"rxb\"")},
} }
for _, tc := range test_cases { for _, tc := range test_cases {
task, _, _ := dockerTask(t) task, cfg, _ := dockerTask(t)
task.Config["devices"] = tc.deviceConfig cfg.Devices = tc.deviceConfig
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
d := dockerDriverHarness(t, nil)
cleanup := d.MkAllocDir(task, true)
copyImage(t, task.TaskDir(), "busybox.tar")
defer cleanup()
ctx := testDockerDriverContexts(t, task) _, _, err := d.StartTask(task)
driver := NewDockerDriver(ctx.DriverCtx) require.Error(t, err)
copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar") require.Contains(t, err.Error(), tc.err.Error())
defer ctx.Destroy()
if _, err := driver.Prestart(ctx.ExecCtx, task); err == nil || err.Error() != tc.err.Error() {
t.Fatalf("error expected in prestart, got %v, expected %v", err, tc.err)
}
} }
} }
@ -1926,26 +1911,24 @@ func TestDockerDriver_Device_Success(t *testing.T) {
PathInContainer: containerPath, PathInContainer: containerPath,
CgroupPermissions: perms, CgroupPermissions: perms,
} }
config := map[string]interface{}{ config := DockerDevice{
"host_path": hostPath, HostPath: hostPath,
"container_path": containerPath, ContainerPath: containerPath,
} }
task, _, _ := dockerTask(t) task, cfg, _ := dockerTask(t)
task.Config["devices"] = []interface{}{config} cfg.Devices = []DockerDevice{config}
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
client, handle, cleanup := dockerSetup(t, task) client, driver, handle, cleanup := dockerSetup(t, task)
defer cleanup() defer cleanup()
require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
waitForExist(t, client, handle) container, err := client.InspectContainer(handle.container.ID)
require.NoError(t, err)
container, err := client.InspectContainer(handle.ContainerID()) require.NotEmpty(t, container.HostConfig.Devices, "Expected one device")
if err != nil { require.Equal(t, expectedDevice, container.HostConfig.Devices[0], "Incorrect device ")
t.Fatalf("err: %v", err)
}
assert.NotEmpty(t, container.HostConfig.Devices, "Expected one device")
assert.Equal(t, expectedDevice, container.HostConfig.Devices[0], "Incorrect device ")
} }
func TestDockerDriver_Entrypoint(t *testing.T) { func TestDockerDriver_Entrypoint(t *testing.T) {
@ -1957,25 +1940,24 @@ func TestDockerDriver_Entrypoint(t *testing.T) {
} }
entrypoint := []string{"/bin/sh", "-c"} entrypoint := []string{"/bin/sh", "-c"}
task, _, _ := dockerTask(t) task, cfg, _ := dockerTask(t)
task.Config["entrypoint"] = entrypoint cfg.Entrypoint = entrypoint
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
client, handle, cleanup := dockerSetup(t, task) client, driver, handle, cleanup := dockerSetup(t, task)
defer cleanup() defer cleanup()
waitForExist(t, client, handle) require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
container, err := client.InspectContainer(handle.ContainerID()) container, err := client.InspectContainer(handle.container.ID)
if err != nil { require.NoError(t, err)
t.Fatalf("err: %v", err)
}
require.Len(t, container.Config.Entrypoint, 2, "Expected one entrypoint") require.Len(t, container.Config.Entrypoint, 2, "Expected one entrypoint")
require.Equal(t, entrypoint, container.Config.Entrypoint, "Incorrect entrypoint ") require.Equal(t, entrypoint, container.Config.Entrypoint, "Incorrect entrypoint ")
} }
func TestDockerDriver_Kill(t *testing.T) { func TestDockerDriver_Kill(t *testing.T) {
assert := assert.New(t) require := require.New(t)
if !tu.IsTravis() { if !tu.IsTravis() {
t.Parallel() t.Parallel()
} }
@ -1984,41 +1966,24 @@ func TestDockerDriver_Kill(t *testing.T) {
} }
// Tasks started with a signal that is not supported should not error // Tasks started with a signal that is not supported should not error
task := &structs.Task{ task := &drivers.TaskConfig{
ID: uuid.Generate(),
Name: "nc-demo", Name: "nc-demo",
Driver: "docker",
KillSignal: "SIGKILL",
Config: map[string]interface{}{
"load": "busybox.tar",
"image": "busybox",
"command": "/bin/nc",
"args": []string{"-l", "127.0.0.1", "-p", "0"},
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
},
Resources: basicResources, Resources: basicResources,
} }
ctx := testDockerDriverContexts(t, task) cfg := &TaskConfig{
defer ctx.Destroy() LoadImage: "busybox.tar",
d := NewDockerDriver(ctx.DriverCtx) Image: "busybox",
copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar") Command: "/bin/nc",
Args: []string{"-l", "127.0.0.1", "-p", "0"},
_, err := d.Prestart(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("error in prestart: %v", err)
} }
resp, err := d.Start(ctx.ExecCtx, task) require.NoError(task.EncodeConcreteDriverConfig(cfg))
assert.Nil(err) _, driver, handle, cleanup := dockerSetup(t, task)
assert.NotNil(resp.Handle) defer cleanup()
require.NoError(driver.WaitUntilStarted(task.ID, 5*time.Second))
handle := resp.Handle.(*DockerHandle) require.NoError(handle.Kill(time.Second, os.Interrupt))
waitForExist(t, client, handle)
err = handle.Kill()
assert.Nil(err)
} }
func TestDockerDriver_ReadonlyRootfs(t *testing.T) { func TestDockerDriver_ReadonlyRootfs(t *testing.T) {
@ -2029,18 +1994,18 @@ func TestDockerDriver_ReadonlyRootfs(t *testing.T) {
t.Skip("Docker not connected") t.Skip("Docker not connected")
} }
task, _, _ := dockerTask(t) task, cfg, _ := dockerTask(t)
task.Config["readonly_rootfs"] = true cfg.ReadonlyRootfs = true
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
client, handle, cleanup := dockerSetup(t, task) client, driver, handle, cleanup := dockerSetup(t, task)
defer cleanup() defer cleanup()
require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
waitForExist(t, client, handle) container, err := client.InspectContainer(handle.container.ID)
require.NoError(t, err)
container, err := client.InspectContainer(handle.ContainerID()) require.True(t, container.HostConfig.ReadonlyRootfs, "ReadonlyRootfs option not set")
assert.Nil(t, err, "Error inspecting container: %v", err)
assert.True(t, container.HostConfig.ReadonlyRootfs, "ReadonlyRootfs option not set")
} }
// fakeDockerClient can be used in places that accept an interface for the // fakeDockerClient can be used in places that accept an interface for the
@ -2068,13 +2033,11 @@ func TestDockerDriver_VolumeError(t *testing.T) {
} }
// setup // setup
task, _, _ := dockerTask(t) _, cfg, _ := dockerTask(t)
tctx := testDockerDriverContexts(t, task) driver := dockerDriverHarness(t, nil)
driver := NewDockerDriver(tctx.DriverCtx).(*DockerDriver)
driver.driverConfig = &DockerDriverConfig{ImageName: "test"}
// assert volume error is recoverable // assert volume error is recoverable
_, err := driver.createContainer(fakeDockerClient{}, docker.CreateContainerOptions{}) _, err := driver.Impl().(*Driver).createContainer(fakeDockerClient{}, docker.CreateContainerOptions{Config: &docker.Config{}}, cfg)
require.True(t, structs.IsRecoverable(err)) require.True(t, structs.IsRecoverable(err))
} }
@ -2088,25 +2051,9 @@ func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) {
expectedPrefix := "2001:db8:1::242:ac11" expectedPrefix := "2001:db8:1::242:ac11"
expectedAdvertise := true expectedAdvertise := true
task := &structs.Task{ task, cfg, _ := dockerTask(t)
Name: "nc-demo", cfg.AdvertiseIPv6Addr = expectedAdvertise
Driver: "docker", require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
Config: map[string]interface{}{
"image": "busybox",
"load": "busybox.tar",
"command": "/bin/nc",
"args": []string{"-l", "127.0.0.1", "-p", "0"},
"advertise_ipv6_address": expectedAdvertise,
},
Resources: &structs.Resources{
MemoryMB: 256,
CPU: 512,
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
},
}
client := newTestDockerClient(t) client := newTestDockerClient(t)
@ -2119,41 +2066,28 @@ func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) {
t.Skip("IPv6 not enabled on bridge network, skipping") t.Skip("IPv6 not enabled on bridge network, skipping")
} }
tctx := testDockerDriverContexts(t, task) driver := dockerDriverHarness(t, nil)
driver := NewDockerDriver(tctx.DriverCtx) cleanup := driver.MkAllocDir(task, true)
copyImage(t, tctx.ExecCtx.TaskDir, "busybox.tar") copyImage(t, task.TaskDir(), "busybox.tar")
defer tctx.Destroy() defer cleanup()
presp, err := driver.Prestart(tctx.ExecCtx, task) _, network, err := driver.StartTask(task)
defer driver.Cleanup(tctx.ExecCtx, presp.CreatedResources) defer driver.DestroyTask(task.ID, true)
if err != nil { require.NoError(t, err)
t.Fatalf("Error in prestart: %v", err)
require.Equal(t, expectedAdvertise, network.AutoAdvertise, "Wrong autoadvertise. Expect: %s, got: %s", expectedAdvertise, network.AutoAdvertise)
if !strings.HasPrefix(network.IP, expectedPrefix) {
t.Fatalf("Got IP address %q want ip address with prefix %q", network.IP, expectedPrefix)
} }
sresp, err := driver.Start(tctx.ExecCtx, task) handle, ok := driver.Impl().(*Driver).tasks.Get(task.ID)
if err != nil { require.True(t, ok)
t.Fatalf("Error in start: %v", err)
}
if sresp.Handle == nil { driver.WaitUntilStarted(task.ID, time.Second)
t.Fatalf("handle is nil\nStack\n%s", debug.Stack())
}
assert.Equal(t, expectedAdvertise, sresp.Network.AutoAdvertise, "Wrong autoadvertise. Expect: %s, got: %s", expectedAdvertise, sresp.Network.AutoAdvertise) container, err := client.InspectContainer(handle.container.ID)
require.NoError(t, err)
if !strings.HasPrefix(sresp.Network.IP, expectedPrefix) {
t.Fatalf("Got IP address %q want ip address with prefix %q", sresp.Network.IP, expectedPrefix)
}
defer sresp.Handle.Kill()
handle := sresp.Handle.(*DockerHandle)
waitForExist(t, client, handle)
container, err := client.InspectContainer(handle.ContainerID())
if err != nil {
t.Fatalf("Error inspecting container: %v", err)
}
if !strings.HasPrefix(container.NetworkSettings.GlobalIPv6Address, expectedPrefix) { if !strings.HasPrefix(container.NetworkSettings.GlobalIPv6Address, expectedPrefix) {
t.Fatalf("Got GlobalIPv6address %s want GlobalIPv6address with prefix %s", expectedPrefix, container.NetworkSettings.GlobalIPv6Address) t.Fatalf("Got GlobalIPv6address %s want GlobalIPv6address with prefix %s", expectedPrefix, container.NetworkSettings.GlobalIPv6Address)
@ -2206,18 +2140,21 @@ func TestDockerDriver_CPUCFSPeriod(t *testing.T) {
t.Skip("Docker not connected") t.Skip("Docker not connected")
} }
task, _, _ := dockerTask(t) task, cfg, _ := dockerTask(t)
task.Config["cpu_hard_limit"] = true cfg.CPUHardLimit = true
task.Config["cpu_cfs_period"] = 1000000 cfg.CPUCFSPeriod = 1000000
require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
client, handle, cleanup := dockerSetup(t, task) client, _, handle, cleanup := dockerSetup(t, task)
defer cleanup() defer cleanup()
waitForExist(t, client, handle) waitForExist(t, client, handle.container.ID)
container, err := client.InspectContainer(handle.ContainerID()) container, err := client.InspectContainer(handle.container.ID)
assert.Nil(t, err, "Error inspecting container: %v", err) require.NoError(t, err)
}*/
require.Equal(t, cfg.CPUCFSPeriod, container.HostConfig.CPUPeriod)
}
func waitForExist(t *testing.T, client *docker.Client, containerID string) { func waitForExist(t *testing.T, client *docker.Client, containerID string) {
tu.WaitForResult(func() (bool, error) { tu.WaitForResult(func() (bool, error) {

View file

@ -2,18 +2,19 @@
package docker package docker
/*
import ( import (
"context"
"fmt"
"io/ioutil" "io/ioutil"
"os"
"path/filepath" "path/filepath"
"strings" "strings"
"syscall"
"testing" "testing"
"time" "time"
"github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/nomad/structs"
tu "github.com/hashicorp/nomad/testutil" tu "github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/require"
) )
func TestDockerDriver_Signal(t *testing.T) { func TestDockerDriver_Signal(t *testing.T) {
@ -24,33 +25,19 @@ func TestDockerDriver_Signal(t *testing.T) {
t.Skip("Docker not connected") t.Skip("Docker not connected")
} }
task := &structs.Task{ task, cfg, _ := dockerTask(t)
Name: "redis-demo", cfg.Command = "/bin/sh"
Driver: "docker", cfg.Args = []string{"local/test.sh"}
Config: map[string]interface{}{ require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
"image": "busybox",
"load": "busybox.tar",
"command": "/bin/sh",
"args": []string{"local/test.sh"},
},
Resources: &structs.Resources{
MemoryMB: 256,
CPU: 512,
},
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 10,
},
}
ctx := testDockerDriverContexts(t, task) driver := dockerDriverHarness(t, nil)
defer ctx.Destroy() cleanup := driver.MkAllocDir(task, true)
d := NewDockerDriver(ctx.DriverCtx) defer cleanup()
// Copy the image into the task's directory // Copy the image into the task's directory
copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar") copyImage(t, task.TaskDir(), "busybox.tar")
testFile := filepath.Join(ctx.ExecCtx.TaskDir.LocalDir, "test.sh") testFile := filepath.Join(task.TaskDir().LocalDir, "test.sh")
testData := []byte(` testData := []byte(`
at_term() { at_term() {
echo 'Terminated.' > $NOMAD_TASK_DIR/output echo 'Terminated.' > $NOMAD_TASK_DIR/output
@ -62,38 +49,30 @@ while true; do
sleep 0.2 sleep 0.2
done done
`) `)
if err := ioutil.WriteFile(testFile, testData, 0777); err != nil { require.NoError(t, ioutil.WriteFile(testFile, testData, 0777))
t.Fatalf("Failed to write data: %v", err) _, _, err := driver.StartTask(task)
} require.NoError(t, err)
defer driver.DestroyTask(task.ID, true)
require.NoError(t, driver.WaitUntilStarted(task.ID, time.Duration(tu.TestMultiplier()*5)*time.Second))
handle, ok := driver.Impl().(*Driver).tasks.Get(task.ID)
require.True(t, ok)
_, err := d.Prestart(ctx.ExecCtx, task) waitForExist(t, newTestDockerClient(t), handle.container.ID)
if err != nil { require.NoError(t, handle.Kill(time.Duration(tu.TestMultiplier()*5)*time.Second, os.Interrupt))
t.Fatalf("error in prestart: %v", err)
}
resp, err := d.Start(ctx.ExecCtx, task)
if err != nil {
t.Fatalf("err: %v", err)
}
defer resp.Handle.Kill()
waitForExist(t, resp.Handle.(*DockerHandle).client, resp.Handle.(*DockerHandle))
time.Sleep(1 * time.Second)
if err := resp.Handle.Signal(syscall.SIGINT); err != nil {
t.Fatalf("Signal returned an error: %v", err)
}
waitCh, err := driver.WaitTask(context.Background(), task.ID)
require.NoError(t, err)
select { select {
case res := <-resp.Handle.WaitCh(): case res := <-waitCh:
if res.Successful() { if res.Successful() {
t.Fatalf("should err: %v", res) require.Fail(t, "should err: %v", res)
} }
case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
t.Fatalf("timeout") require.Fail(t, "timeout")
} }
// Check the log file to see it exited because of the signal // Check the log file to see it exited because of the signal
outputFile := filepath.Join(ctx.ExecCtx.TaskDir.LocalDir, "output") outputFile := filepath.Join(task.TaskDir().LocalDir, "output")
act, err := ioutil.ReadFile(outputFile) act, err := ioutil.ReadFile(outputFile)
if err != nil { if err != nil {
t.Fatalf("Couldn't read expected output: %v", err) t.Fatalf("Couldn't read expected output: %v", err)
@ -103,4 +82,17 @@ done
if strings.TrimSpace(string(act)) != exp { if strings.TrimSpace(string(act)) != exp {
t.Fatalf("Command outputted %v; want %v", act, exp) t.Fatalf("Command outputted %v; want %v", act, exp)
} }
}*/ }
func TestDockerDriver_containerBinds(t *testing.T) {
task, cfg, _ := dockerTask(t)
driver := dockerDriverHarness(t, nil)
cleanup := driver.MkAllocDir(task, false)
defer cleanup()
binds, err := driver.Impl().(*Driver).containerBinds(task, cfg)
require.NoError(t, err)
require.Contains(t, binds, fmt.Sprintf("%s:/alloc", task.TaskDir().SharedAllocDir))
require.Contains(t, binds, fmt.Sprintf("%s:/local", task.TaskDir().LocalDir))
require.Contains(t, binds, fmt.Sprintf("%s:/secrets", task.TaskDir().SecretsDir))
}

View file

@ -54,7 +54,7 @@ func loadDockerConfig(file string) (*configfile.ConfigFile, error) {
// parseRepositoryInfo takes a repo and returns the Docker RepositoryInfo. This // parseRepositoryInfo takes a repo and returns the Docker RepositoryInfo. This
// is useful for interacting with a Docker config object. // is useful for interacting with a Docker config object.
func parseRepositoryInfo(repo string) (*registry.RepositoryInfo, error) { func parseRepositoryInfo(repo string) (*registry.RepositoryInfo, error) {
name, err := reference.ParseNamed(repo) name, err := reference.ParseNormalizedNamed(repo)
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to parse named repo %q: %v", repo, err) return nil, fmt.Errorf("Failed to parse named repo %q: %v", repo, err)
} }