Merge pull request #4975 from hashicorp/fix-master-20181209

Some test fixes and remedies
This commit is contained in:
Mahmood Ali 2018-12-11 18:00:21 -05:00 committed by GitHub
commit 69b2355274
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 141 additions and 68 deletions

View file

@ -264,7 +264,7 @@ test-nomad: dev ## Run Nomad test suites
$(if $(ENABLE_RACE),GORACE="strip_path_prefix=$(GOPATH)/src") go test \
$(if $(ENABLE_RACE),-race) $(if $(VERBOSE),-v) \
-cover \
-timeout=30m \
-timeout=15m \
-tags="$(if $(HAS_LXC),lxc)" ./... $(if $(VERBOSE), >test.log ; echo $$? > exit-code)
@if [ $(VERBOSE) ] ; then \
bash -C "$(PROJECT_ROOT)/scripts/test_check.sh" ; \

View file

@ -426,7 +426,7 @@ func TestFingerprintManager_Run_Combination(t *testing.T) {
testClient, cleanup := TestClient(t, func(c *config.Config) {
c.Options = map[string]string{
"fingerprint.whitelist": " arch,cpu,memory,foo,bar ",
"fingerprint.blacklist": " memory,nomad ",
"fingerprint.blacklist": " memory,host ",
}
})
defer cleanup()
@ -449,7 +449,7 @@ func TestFingerprintManager_Run_Combination(t *testing.T) {
require.NotEqual(node.Attributes["cpu.frequency"], "")
require.NotEqual(node.Attributes["cpu.arch"], "")
require.NotContains(node.Attributes, "memory.totalbytes")
require.NotContains(node.Attributes, "nomad.version")
require.NotContains(node.Attributes, "os.name")
}
func TestFingerprintManager_Run_WhitelistDrivers(t *testing.T) {

View file

@ -1,6 +1,8 @@
package client
import (
"time"
"github.com/hashicorp/nomad/client/config"
consulApi "github.com/hashicorp/nomad/client/consul"
"github.com/hashicorp/nomad/client/fingerprint"
@ -47,10 +49,26 @@ func TestClient(t testing.T, cb func(c *config.Config)) (*Client, func()) {
t.Fatalf("err: %v", err)
}
return client, func() {
// Shutdown client
client.Shutdown()
ch := make(chan error)
// Call TestClientConfig cleanup
cleanup()
go func() {
defer close(ch)
// Shutdown client
err := client.Shutdown()
if err != nil {
t.Errorf("failed to shutdown client: %v", err)
}
// Call TestClientConfig cleanup
cleanup()
}()
select {
case <-ch:
// all good
case <-time.After(1 * time.Minute):
t.Errorf("timed out cleaning up test client")
}
}
}

View file

@ -240,8 +240,19 @@ func (a *TestAgent) Shutdown() error {
}()
// shutdown agent before endpoints
a.Server.Shutdown()
return a.Agent.Shutdown()
ch := make(chan error, 1)
go func() {
defer close(ch)
a.Server.Shutdown()
ch <- a.Agent.Shutdown()
}()
select {
case err := <-ch:
return err
case <-time.After(1 * time.Minute):
return fmt.Errorf("timed out while shutting down test agent")
}
}
func (a *TestAgent) HTTPAddr() string {

View file

@ -72,7 +72,7 @@ func dockerTask(t *testing.T) (*drivers.TaskConfig, *TaskConfig, []int) {
dockerDynamic := ports[1]
cfg := TaskConfig{
Image: "busybox",
Image: "busybox:latest",
LoadImage: "busybox.tar",
Command: "/bin/nc",
Args: []string{"-l", "127.0.0.1", "-p", "0"},

View file

@ -25,6 +25,7 @@ import (
pstructs "github.com/hashicorp/nomad/plugins/shared/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/require"
"golang.org/x/sys/unix"
)
func TestMain(m *testing.M) {
@ -189,38 +190,35 @@ func TestRawExecDriver_StartWaitStop(t *testing.T) {
ch, err := harness.WaitTask(context.Background(), handle.Config.ID)
require.NoError(err)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
result := <-ch
require.Equal(2, result.Signal)
}()
require.NoError(harness.WaitUntilStarted(task.ID, 1*time.Second))
wg.Add(1)
go func() {
defer wg.Done()
err := harness.StopTask(task.ID, 2*time.Second, "SIGINT")
require.NoError(err)
}()
waitCh := make(chan struct{})
go func() {
defer close(waitCh)
wg.Wait()
harness.StopTask(task.ID, 2*time.Second, "SIGINT")
}()
select {
case <-waitCh:
status, err := harness.InspectTask(task.ID)
require.NoError(err)
require.Equal(drivers.TaskStateExited, status.State)
case <-time.After(1 * time.Second):
case result := <-ch:
require.Equal(int(unix.SIGINT), result.Signal)
case <-time.After(10 * time.Second):
require.Fail("timeout waiting for task to shutdown")
}
// Ensure that the task is marked as dead, but account
// for WaitTask() closing channel before internal state is updated
testutil.WaitForResult(func() (bool, error) {
status, err := harness.InspectTask(task.ID)
if err != nil {
return false, fmt.Errorf("inspecting task failed: %v", err)
}
if status.State != drivers.TaskStateExited {
return false, fmt.Errorf("task hasn't exited yet; status: %v", status.State)
}
return true, nil
}, func(err error) {
require.NoError(err)
})
require.NoError(harness.DestroyTask(task.ID, true))
}

View file

@ -14,6 +14,7 @@ import (
"time"
dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
"golang.org/x/sys/unix"
"github.com/hashicorp/hcl2/hcl"
ctestutil "github.com/hashicorp/nomad/client/testutil"
@ -121,52 +122,39 @@ func TestRktDriver_Start_Wait_Stop_DNS(t *testing.T) {
require.NoError(err)
require.Nil(driverNet)
// Wait for task to start
ch, err := harness.WaitTask(context.Background(), handle.Config.ID)
require.NoError(err)
var wg sync.WaitGroup
wg.Add(1)
// Block on the channel returned by wait task
go func() {
defer wg.Done()
result := <-ch
require.Equal(15, result.Signal)
}()
// Wait until task started
require.NoError(harness.WaitUntilStarted(task.ID, 1*time.Second))
// Add to the wait group
wg.Add(1)
// Stop the task
go func() {
defer wg.Done()
err := harness.StopTask(task.ID, 1*time.Second, "SIGTERM")
require.NoError(err)
harness.StopTask(task.ID, 2*time.Second, "SIGTERM")
}()
// Wait on the wait group
waitCh := make(chan struct{})
go func() {
defer close(waitCh)
wg.Wait()
}()
// Verify that the task exited
select {
case <-waitCh:
status, err := harness.InspectTask(task.ID)
require.NoError(err)
require.Equal(drivers.TaskStateExited, status.State)
case <-time.After(2 * time.Second):
case result := <-ch:
require.Equal(int(unix.SIGTERM), result.Signal)
case <-time.After(10 * time.Second):
require.Fail("timeout waiting for task to shutdown")
}
require.NoError(harness.DestroyTask(task.ID, true))
// Ensure that the task is marked as dead, but account
// for WaitTask() closing channel before internal state is updated
testutil.WaitForResult(func() (bool, error) {
status, err := harness.InspectTask(task.ID)
if err != nil {
return false, fmt.Errorf("inspecting task failed: %v", err)
}
if status.State != drivers.TaskStateExited {
return false, fmt.Errorf("task hasn't exited yet; status: %v", status.State)
}
return true, nil
}, func(err error) {
require.NoError(err)
})
require.NoError(harness.DestroyTask(task.ID, true))
}
// Verifies waiting on task to exit cleanly

View file

@ -169,11 +169,24 @@ func TestExecutor_ClientCleanup(t *testing.T) {
execCmd.ResourceLimits = true
ps, err := executor.Launch(execCmd)
require.NoError(err)
require.NotZero(ps.Pid)
time.Sleep(500 * time.Millisecond)
require.NoError(executor.Shutdown("SIGINT", 100*time.Millisecond))
executor.Wait()
ch := make(chan interface{})
go func() {
executor.Wait()
close(ch)
}()
select {
case <-ch:
// all good
case <-time.After(5 * time.Second):
require.Fail("timeout waiting for exec to shutdown")
}
output := execCmd.stdout.(*bufferCloser).String()
require.NotZero(len(output))

View file

@ -6,6 +6,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"syscall"
"testing"
@ -56,6 +57,8 @@ func testExecutorCommand(t *testing.T) (*ExecCommand, *allocdir.AllocDir) {
DiskMB: task.Resources.DiskMB,
},
}
setupRootfs(t, td.Dir)
configureTLogging(cmd)
return cmd, allocDir
}
@ -121,6 +124,7 @@ func TestExecutor_Start_Wait(pt *testing.T) {
execCmd, allocDir := testExecutorCommand(t)
execCmd.Cmd = "/bin/echo"
execCmd.Args = []string{"hello world"}
defer allocDir.Destroy()
executor := factory(testlog.HCLogger(t))
defer executor.Shutdown("", 0)
@ -188,7 +192,7 @@ func TestExecutor_Start_Kill(pt *testing.T) {
require := require.New(t)
execCmd, allocDir := testExecutorCommand(t)
execCmd.Cmd = "/bin/sleep"
execCmd.Args = []string{"10 && hello world"}
execCmd.Args = []string{"10"}
defer allocDir.Destroy()
executor := factory(testlog.HCLogger(t))
defer executor.Shutdown("", 0)
@ -279,3 +283,36 @@ func TestUniversalExecutor_LookupPath(t *testing.T) {
require.Nil(err)
}
// setupRoootfs setups the rootfs for libcontainer executor
// It uses busybox to make some binaries available - somewhat cheaper
// than mounting the underlying host filesystem
func setupRootfs(t *testing.T, rootfs string) {
paths := []string{
"/bin/sh",
"/bin/sleep",
"/bin/echo",
"/bin/date",
}
for _, p := range paths {
setupRootfsBinary(t, rootfs, p)
}
}
// setupRootfsBinary installs a busybox link in the desired path
func setupRootfsBinary(t *testing.T, rootfs, path string) {
t.Helper()
dst := filepath.Join(rootfs, path)
err := os.MkdirAll(filepath.Dir(dst), 666)
require.NoError(t, err)
src := filepath.Join(
"test-resources", "busybox",
fmt.Sprintf("busybox-%s-%s", runtime.GOOS, runtime.GOARCH),
)
err = os.Link(src, dst)
require.NoError(t, err)
}

View file

@ -0,0 +1,5 @@
Downloaded busybox https://busybox.net/downloads/binaries/, unmodified.
Busybox binaries are statically linked binaries, that is a multi-call binary. It's a single binary that can act like many commonly used utilities (e.g. /bin/sh, echo, sleep, etc). More info is found in https://busybox.net/downloads/BusyBox.html .
Busybox is GPLv2: https://www.busybox.net/license.html .

BIN
drivers/shared/executor/test-resources/busybox/busybox-linux-amd64 (Stored with Git LFS) Executable file

Binary file not shown.