Merge branch 'main' into tlefebvre/fix-wrong-drivernetworkmanager-interface
This commit is contained in:
commit
1a4db3523d
|
@ -9,7 +9,6 @@ references:
|
|||
|
||||
# common references
|
||||
common_envs: &common_envs
|
||||
GOMAXPROCS: 1
|
||||
NOMAD_SLOW_TEST: 1
|
||||
GOTESTSUM_JUNITFILE: /tmp/test-reports/results.xml
|
||||
GOTESTSUM_JSONFILE: /tmp/test-reports/testjsonfile.json
|
||||
|
@ -520,7 +519,7 @@ executors:
|
|||
working_directory: ~/go/src/github.com/hashicorp/nomad
|
||||
machine:
|
||||
image: *go_machine_image
|
||||
resource_class: medium
|
||||
resource_class: large
|
||||
environment: &machine_env
|
||||
<<: *common_envs
|
||||
GOLANG_VERSION: 1.17.5
|
||||
|
|
|
@ -3,10 +3,13 @@ package acl
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCapabilitySet(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
var cs capabilitySet = make(map[string]struct{})
|
||||
|
||||
// Check no capabilities by default
|
||||
|
@ -28,6 +31,8 @@ func TestCapabilitySet(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMaxPrivilege(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
type tcase struct {
|
||||
Privilege string
|
||||
PrecedenceOver []string
|
||||
|
@ -60,6 +65,8 @@ func TestMaxPrivilege(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestACLManagement(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
// Create management ACL
|
||||
|
@ -88,6 +95,8 @@ func TestACLManagement(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestACLMerge(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
// Merge read + write policy
|
||||
|
@ -222,6 +231,8 @@ quota {
|
|||
`
|
||||
|
||||
func TestAllowNamespace(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
tests := []struct {
|
||||
Policy string
|
||||
Allow bool
|
||||
|
@ -264,6 +275,8 @@ func TestAllowNamespace(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWildcardNamespaceMatching(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
tests := []struct {
|
||||
Policy string
|
||||
Allow bool
|
||||
|
@ -315,6 +328,8 @@ func TestWildcardNamespaceMatching(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWildcardHostVolumeMatching(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
tests := []struct {
|
||||
Policy string
|
||||
Allow bool
|
||||
|
@ -365,6 +380,8 @@ func TestWildcardHostVolumeMatching(t *testing.T) {
|
|||
}
|
||||
}
|
||||
func TestACL_matchingCapabilitySet_returnsAllMatches(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
tests := []struct {
|
||||
Policy string
|
||||
NS string
|
||||
|
@ -411,6 +428,8 @@ func TestACL_matchingCapabilitySet_returnsAllMatches(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestACL_matchingCapabilitySet_difference(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
tests := []struct {
|
||||
Policy string
|
||||
NS string
|
||||
|
|
|
@ -5,10 +5,13 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
type tcase struct {
|
||||
Raw string
|
||||
ErrStr string
|
||||
|
@ -333,6 +336,8 @@ func TestParse(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParse_BadInput(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
inputs := []string{
|
||||
`namespace "\500" {}`,
|
||||
}
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
package ci
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// SkipSlow skips a slow test unless NOMAD_SLOW_TEST is set to a true value.
|
||||
func SkipSlow(t *testing.T, reason string) {
|
||||
value := os.Getenv("NOMAD_SLOW_TEST")
|
||||
run, err := strconv.ParseBool(value)
|
||||
if !run || err != nil {
|
||||
t.Skipf("Skipping slow test: %s", reason)
|
||||
}
|
||||
}
|
||||
|
||||
// Parallel runs t in parallel, unless CI is set to a true value.
|
||||
//
|
||||
// In CI (CircleCI / GitHub Actions) we get better performance by running tests
|
||||
// in serial while not restricting GOMAXPROCS.
|
||||
func Parallel(t *testing.T) {
|
||||
value := os.Getenv("CI")
|
||||
isCI, err := strconv.ParseBool(value)
|
||||
if !isCI || err != nil {
|
||||
t.Parallel()
|
||||
}
|
||||
}
|
|
@ -4,6 +4,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
|
@ -13,6 +14,8 @@ import (
|
|||
)
|
||||
|
||||
func TestClient_ACL_resolveTokenValue(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
s1, _, _, cleanupS1 := testACLServer(t, nil)
|
||||
defer cleanupS1()
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
@ -62,6 +65,8 @@ func TestClient_ACL_resolveTokenValue(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_ACL_resolvePolicies(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
s1, _, root, cleanupS1 := testACLServer(t, nil)
|
||||
defer cleanupS1()
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
@ -102,6 +107,8 @@ func TestClient_ACL_resolvePolicies(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_ACL_ResolveToken_Disabled(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
s1, _, cleanupS1 := testServer(t, nil)
|
||||
defer cleanupS1()
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
@ -118,6 +125,8 @@ func TestClient_ACL_ResolveToken_Disabled(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_ACL_ResolveToken(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
s1, _, _, cleanupS1 := testACLServer(t, nil)
|
||||
defer cleanupS1()
|
||||
testutil.WaitForLeader(t, s1.RPC)
|
||||
|
@ -167,7 +176,7 @@ func TestClient_ACL_ResolveToken(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_ACL_ResolveSecretToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
s1, _, _, cleanupS1 := testACLServer(t, nil)
|
||||
defer cleanupS1()
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/go-msgpack/codec"
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
sframer "github.com/hashicorp/nomad/client/lib/streamframer"
|
||||
cstructs "github.com/hashicorp/nomad/client/structs"
|
||||
|
@ -24,7 +25,8 @@ import (
|
|||
)
|
||||
|
||||
func TestMonitor_Monitor(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
|
||||
// start server and client
|
||||
|
@ -105,7 +107,8 @@ OUTER:
|
|||
}
|
||||
|
||||
func TestMonitor_Monitor_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
|
||||
// start server
|
||||
|
@ -217,7 +220,8 @@ func TestMonitor_Monitor_ACL(t *testing.T) {
|
|||
|
||||
// Test that by default with no acl, endpoint is disabled
|
||||
func TestAgentProfile_DefaultDisabled(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
|
||||
// start server and client
|
||||
|
@ -243,7 +247,8 @@ func TestAgentProfile_DefaultDisabled(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgentProfile(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
|
||||
// start server and client
|
||||
|
@ -290,7 +295,8 @@ func TestAgentProfile(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgentProfile_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
|
||||
// start server
|
||||
|
@ -355,7 +361,7 @@ func TestAgentProfile_ACL(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgentHost(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// start server and client
|
||||
s1, cleanup := nomad.TestServer(t, nil)
|
||||
|
@ -380,7 +386,7 @@ func TestAgentHost(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAgentHost_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
s, root, cleanupS := nomad.TestACLServer(t, nil)
|
||||
defer cleanupS()
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/go-msgpack/codec"
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
cstructs "github.com/hashicorp/nomad/client/structs"
|
||||
"github.com/hashicorp/nomad/helper/pluginutils/catalog"
|
||||
|
@ -27,7 +28,8 @@ import (
|
|||
)
|
||||
|
||||
func TestAllocations_Restart(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
client, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
@ -66,7 +68,7 @@ func TestAllocations_Restart(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocations_Restart_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
server, addr, root, cleanupS := testACLServer(t, nil)
|
||||
|
@ -142,8 +144,9 @@ func TestAllocations_Restart_ACL(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocations_GarbageCollectAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
client, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -153,7 +156,7 @@ func TestAllocations_GarbageCollectAll(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocations_GarbageCollectAll_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
server, addr, root, cleanupS := testACLServer(t, nil)
|
||||
|
@ -206,8 +209,9 @@ func TestAllocations_GarbageCollectAll_ACL(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocations_GarbageCollect(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
client, cleanup := TestClient(t, func(c *config.Config) {
|
||||
c.GCDiskUsageThreshold = 100.0
|
||||
})
|
||||
|
@ -249,7 +253,7 @@ func TestAllocations_GarbageCollect(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocations_GarbageCollect_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
server, addr, root, cleanupS := testACLServer(t, nil)
|
||||
|
@ -322,7 +326,7 @@ func TestAllocations_GarbageCollect_ACL(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocations_Signal(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
client, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
@ -348,7 +352,7 @@ func TestAllocations_Signal(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocations_Signal_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
server, addr, root, cleanupS := testACLServer(t, nil)
|
||||
|
@ -420,8 +424,9 @@ func TestAllocations_Signal_ACL(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocations_Stats(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
client, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -453,7 +458,7 @@ func TestAllocations_Stats(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocations_Stats_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
server, addr, root, cleanupS := testACLServer(t, nil)
|
||||
|
@ -525,7 +530,7 @@ func TestAllocations_Stats_ACL(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAlloc_ExecStreaming(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a server and client
|
||||
|
@ -629,7 +634,7 @@ OUTER:
|
|||
}
|
||||
|
||||
func TestAlloc_ExecStreaming_NoAllocation(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a server and client
|
||||
|
@ -684,7 +689,7 @@ func TestAlloc_ExecStreaming_NoAllocation(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAlloc_ExecStreaming_DisableRemoteExec(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a server and client
|
||||
|
@ -740,7 +745,7 @@ func TestAlloc_ExecStreaming_DisableRemoteExec(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAlloc_ExecStreaming_ACL_Basic(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// Start a server and client
|
||||
s, root, cleanupS := nomad.TestACLServer(t, nil)
|
||||
|
@ -843,7 +848,7 @@ func TestAlloc_ExecStreaming_ACL_Basic(t *testing.T) {
|
|||
// TestAlloc_ExecStreaming_ACL_WithIsolation_Image asserts that token only needs
|
||||
// alloc-exec acl policy when image isolation is used
|
||||
func TestAlloc_ExecStreaming_ACL_WithIsolation_Image(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
isolation := drivers.FSIsolationImage
|
||||
|
||||
// Start a server and client
|
||||
|
@ -987,7 +992,7 @@ func TestAlloc_ExecStreaming_ACL_WithIsolation_Image(t *testing.T) {
|
|||
// TestAlloc_ExecStreaming_ACL_WithIsolation_Chroot asserts that token only needs
|
||||
// alloc-exec acl policy when chroot isolation is used
|
||||
func TestAlloc_ExecStreaming_ACL_WithIsolation_Chroot(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
if runtime.GOOS != "linux" || unix.Geteuid() != 0 {
|
||||
t.Skip("chroot isolation requires linux root")
|
||||
|
@ -1136,7 +1141,7 @@ func TestAlloc_ExecStreaming_ACL_WithIsolation_Chroot(t *testing.T) {
|
|||
// TestAlloc_ExecStreaming_ACL_WithIsolation_None asserts that token needs
|
||||
// alloc-node-exec acl policy as well when no isolation is used
|
||||
func TestAlloc_ExecStreaming_ACL_WithIsolation_None(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
isolation := drivers.FSIsolationNone
|
||||
|
||||
// Start a server and client
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/command/agent"
|
||||
"github.com/hashicorp/nomad/nomad"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
|
@ -26,7 +27,7 @@ func TestPrevAlloc_StreamAllocDir_TLS(t *testing.T) {
|
|||
clientCertFn = "../helper/tlsutil/testdata/global-client.pem"
|
||||
clientKeyFn = "../helper/tlsutil/testdata/global-client-key.pem"
|
||||
)
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
server, cleanupS := nomad.TestServer(t, func(c *nomad.Config) {
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -48,6 +49,8 @@ var (
|
|||
|
||||
// Test that AllocDir.Build builds just the alloc directory.
|
||||
func TestAllocDir_BuildAlloc(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
tmp, err := ioutil.TempDir("", "AllocDir")
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create temp dir: %v", err)
|
||||
|
@ -97,7 +100,9 @@ func MountCompatible(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocDir_MountSharedAlloc(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
MountCompatible(t)
|
||||
|
||||
tmp, err := ioutil.TempDir("", "AllocDir")
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create temp dir: %v", err)
|
||||
|
@ -143,6 +148,8 @@ func TestAllocDir_MountSharedAlloc(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocDir_Snapshot(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
tmp, err := ioutil.TempDir("", "AllocDir")
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create temp dir: %v", err)
|
||||
|
@ -223,6 +230,8 @@ func TestAllocDir_Snapshot(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocDir_Move(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
tmp1, err := ioutil.TempDir("", "AllocDir")
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create temp dir: %v", err)
|
||||
|
@ -291,6 +300,8 @@ func TestAllocDir_Move(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocDir_EscapeChecking(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
tmp, err := ioutil.TempDir("", "AllocDir")
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create temp dir: %v", err)
|
||||
|
@ -332,6 +343,7 @@ func TestAllocDir_EscapeChecking(t *testing.T) {
|
|||
|
||||
// Test that `nomad fs` can't read secrets
|
||||
func TestAllocDir_ReadAt_SecretDir(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
tmp := t.TempDir()
|
||||
|
||||
d := NewAllocDir(testlog.HCLogger(t), tmp, "test")
|
||||
|
@ -359,6 +371,8 @@ func TestAllocDir_ReadAt_SecretDir(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocDir_SplitPath(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
dir, err := ioutil.TempDir("", "tmpdirtest")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
|
@ -382,6 +396,7 @@ func TestAllocDir_SplitPath(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocDir_CreateDir(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
if syscall.Geteuid() != 0 {
|
||||
t.Skip("Must be root to run test")
|
||||
}
|
||||
|
@ -423,6 +438,8 @@ func TestAllocDir_CreateDir(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPathFuncs(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
dir, err := ioutil.TempDir("", "nomadtest-pathfuncs")
|
||||
if err != nil {
|
||||
t.Fatalf("error creating temp dir: %v", err)
|
||||
|
@ -458,7 +475,9 @@ func TestPathFuncs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocDir_DetectContentType(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
inputPath := "input/"
|
||||
var testFiles []string
|
||||
err := filepath.Walk(inputPath, func(path string, info os.FileInfo, err error) error {
|
||||
|
@ -494,6 +513,7 @@ func TestAllocDir_DetectContentType(t *testing.T) {
|
|||
// Warning: If this test fails it may fill your disk before failing, so be
|
||||
// careful and/or confident.
|
||||
func TestAllocDir_SkipAllocDir(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
MountCompatible(t)
|
||||
|
||||
// Create root, alloc, and other dirs
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
@ -49,9 +50,11 @@ func isMount(path string) error {
|
|||
// TestLinuxRootSecretDir asserts secret dir creation and removal are
|
||||
// idempotent.
|
||||
func TestLinuxRootSecretDir(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
if unix.Geteuid() != 0 {
|
||||
t.Skip("Must be run as root")
|
||||
}
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "nomadtest-rootsecretdir")
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create tempdir for test: %v", err)
|
||||
|
@ -109,9 +112,11 @@ func TestLinuxRootSecretDir(t *testing.T) {
|
|||
// TestLinuxUnprivilegedSecretDir asserts secret dir creation and removal are
|
||||
// idempotent.
|
||||
func TestLinuxUnprivilegedSecretDir(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
if unix.Geteuid() == 0 {
|
||||
t.Skip("Must not be run as root")
|
||||
}
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "nomadtest-secretdir")
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create tempdir for test: %s", err)
|
||||
|
|
|
@ -6,11 +6,14 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
)
|
||||
|
||||
// Test that building a chroot will skip nonexistent directories.
|
||||
func TestTaskDir_EmbedNonexistent(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
tmp, err := ioutil.TempDir("", "AllocDir")
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create temp dir: %v", err)
|
||||
|
@ -33,6 +36,8 @@ func TestTaskDir_EmbedNonexistent(t *testing.T) {
|
|||
|
||||
// Test that building a chroot copies files from the host into the task dir.
|
||||
func TestTaskDir_EmbedDirs(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
tmp, err := ioutil.TempDir("", "AllocDir")
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create temp dir: %v", err)
|
||||
|
@ -87,6 +92,7 @@ func TestTaskDir_EmbedDirs(t *testing.T) {
|
|||
|
||||
// Test that task dirs for image based isolation don't require root.
|
||||
func TestTaskDir_NonRoot_Image(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
if os.Geteuid() == 0 {
|
||||
t.Skip("test should be run as non-root user")
|
||||
}
|
||||
|
@ -110,9 +116,11 @@ func TestTaskDir_NonRoot_Image(t *testing.T) {
|
|||
|
||||
// Test that task dirs with no isolation don't require root.
|
||||
func TestTaskDir_NonRoot(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
if os.Geteuid() == 0 {
|
||||
t.Skip("test should be run as non-root user")
|
||||
}
|
||||
|
||||
tmp, err := ioutil.TempDir("", "AllocDir")
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create temp dir: %v", err)
|
||||
|
@ -134,5 +142,4 @@ func TestTaskDir_NonRoot(t *testing.T) {
|
|||
if _, err = os.Stat(td.SharedTaskDir); !os.IsNotExist(err) {
|
||||
t.Fatalf("Expected a NotExist error for shared alloc dir in task dir: %q", td.SharedTaskDir)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"time"
|
||||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/consul"
|
||||
cstructs "github.com/hashicorp/nomad/client/structs"
|
||||
agentconsul "github.com/hashicorp/nomad/command/agent/consul"
|
||||
|
@ -19,7 +20,7 @@ import (
|
|||
)
|
||||
|
||||
func TestTracker_Checks_Healthy(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = 1 // let's speed things up
|
||||
|
@ -90,7 +91,7 @@ func TestTracker_Checks_Healthy(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTracker_Checks_PendingPostStop_Healthy(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.LifecycleAllocWithPoststopDeploy()
|
||||
alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = 1 // let's speed things up
|
||||
|
@ -130,7 +131,7 @@ func TestTracker_Checks_PendingPostStop_Healthy(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTracker_Succeeded_PostStart_Healthy(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.LifecycleAllocWithPoststartDeploy()
|
||||
alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = time.Millisecond * 1
|
||||
|
@ -171,7 +172,7 @@ func TestTracker_Succeeded_PostStart_Healthy(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTracker_Checks_Unhealthy(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = 1 // let's speed things up
|
||||
|
@ -261,7 +262,7 @@ func TestTracker_Checks_Unhealthy(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTracker_Healthy_IfBothTasksAndConsulChecksAreHealthy(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
logger := testlog.HCLogger(t)
|
||||
|
@ -312,7 +313,7 @@ func TestTracker_Healthy_IfBothTasksAndConsulChecksAreHealthy(t *testing.T) {
|
|||
// TestTracker_Checks_Healthy_Before_TaskHealth asserts that we mark an alloc
|
||||
// healthy, if the checks pass before task health pass
|
||||
func TestTracker_Checks_Healthy_Before_TaskHealth(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = 1 // let's speed things up
|
||||
|
@ -419,7 +420,7 @@ func TestTracker_Checks_Healthy_Before_TaskHealth(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTracker_Checks_OnUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
desc string
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allochealth"
|
||||
"github.com/hashicorp/nomad/client/allocwatcher"
|
||||
cconsul "github.com/hashicorp/nomad/client/consul"
|
||||
|
@ -30,7 +31,7 @@ func destroy(ar *allocRunner) {
|
|||
// TestAllocRunner_AllocState_Initialized asserts that getting TaskStates via
|
||||
// AllocState() are initialized even before the AllocRunner has run.
|
||||
func TestAllocRunner_AllocState_Initialized(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
|
||||
|
@ -49,7 +50,7 @@ func TestAllocRunner_AllocState_Initialized(t *testing.T) {
|
|||
// TestAllocRunner_TaskLeader_KillTG asserts that when a leader task dies the
|
||||
// entire task group is killed.
|
||||
func TestAllocRunner_TaskLeader_KillTG(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name]
|
||||
|
@ -239,7 +240,7 @@ func TestAllocRunner_Lifecycle_Poststart(t *testing.T) {
|
|||
// TestAllocRunner_TaskMain_KillTG asserts that when main tasks die the
|
||||
// entire task group is killed.
|
||||
func TestAllocRunner_TaskMain_KillTG(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name]
|
||||
|
@ -398,6 +399,8 @@ func TestAllocRunner_TaskMain_KillTG(t *testing.T) {
|
|||
// postop lifecycle hook starts all 3 tasks, only
|
||||
// the ephemeral one finishes, and the other 2 exit when the alloc is stopped.
|
||||
func TestAllocRunner_Lifecycle_Poststop(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.LifecycleAlloc()
|
||||
tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name]
|
||||
|
||||
|
@ -478,7 +481,7 @@ func TestAllocRunner_Lifecycle_Poststop(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocRunner_TaskGroup_ShutdownDelay(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name]
|
||||
|
@ -608,7 +611,7 @@ func TestAllocRunner_TaskGroup_ShutdownDelay(t *testing.T) {
|
|||
// TestAllocRunner_TaskLeader_StopTG asserts that when stopping an alloc with a
|
||||
// leader the leader is stopped before other tasks.
|
||||
func TestAllocRunner_TaskLeader_StopTG(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name]
|
||||
|
@ -707,7 +710,7 @@ func TestAllocRunner_TaskLeader_StopTG(t *testing.T) {
|
|||
// not stopped as it does not exist.
|
||||
// See https://github.com/hashicorp/nomad/issues/3420#issuecomment-341666932
|
||||
func TestAllocRunner_TaskLeader_StopRestoredTG(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name]
|
||||
|
@ -785,7 +788,7 @@ func TestAllocRunner_TaskLeader_StopRestoredTG(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocRunner_Restore_LifecycleHooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.LifecycleAlloc()
|
||||
|
||||
|
@ -823,7 +826,7 @@ func TestAllocRunner_Restore_LifecycleHooks(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocRunner_Update_Semantics(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
updatedAlloc := func(a *structs.Allocation) *structs.Allocation {
|
||||
|
@ -876,7 +879,7 @@ func TestAllocRunner_Update_Semantics(t *testing.T) {
|
|||
// TestAllocRunner_DeploymentHealth_Healthy_Migration asserts that health is
|
||||
// reported for services that got migrated; not just part of deployments.
|
||||
func TestAllocRunner_DeploymentHealth_Healthy_Migration(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
|
||||
|
@ -924,7 +927,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_Migration(t *testing.T) {
|
|||
// TestAllocRunner_DeploymentHealth_Healthy_NoChecks asserts that the health
|
||||
// watcher will mark the allocation as healthy based on task states alone.
|
||||
func TestAllocRunner_DeploymentHealth_Healthy_NoChecks(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
|
||||
|
@ -987,7 +990,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_NoChecks(t *testing.T) {
|
|||
// TestAllocRunner_DeploymentHealth_Unhealthy_Checks asserts that the health
|
||||
// watcher will mark the allocation as unhealthy with failing checks.
|
||||
func TestAllocRunner_DeploymentHealth_Unhealthy_Checks(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -1082,7 +1085,7 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_Checks(t *testing.T) {
|
|||
// TestAllocRunner_Destroy asserts that Destroy kills and cleans up a running
|
||||
// alloc.
|
||||
func TestAllocRunner_Destroy(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// Ensure task takes some time
|
||||
alloc := mock.BatchAlloc()
|
||||
|
@ -1144,7 +1147,7 @@ func TestAllocRunner_Destroy(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocRunner_SimpleRun(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
|
||||
|
@ -1179,7 +1182,7 @@ func TestAllocRunner_SimpleRun(t *testing.T) {
|
|||
// TestAllocRunner_MoveAllocDir asserts that a rescheduled
|
||||
// allocation copies ephemeral disk content from previous alloc run
|
||||
func TestAllocRunner_MoveAllocDir(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// Step 1: start and run a task
|
||||
alloc := mock.BatchAlloc()
|
||||
|
@ -1236,7 +1239,7 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) {
|
|||
// retrying fetching an artifact, other tasks in the group should be able
|
||||
// to proceed.
|
||||
func TestAllocRunner_HandlesArtifactFailure(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
rp := &structs.RestartPolicy{
|
||||
|
@ -1296,6 +1299,8 @@ func TestAllocRunner_HandlesArtifactFailure(t *testing.T) {
|
|||
|
||||
// Test that alloc runner kills tasks in task group when another task fails
|
||||
func TestAllocRunner_TaskFailed_KillTG(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name]
|
||||
alloc.Job.TaskGroups[0].RestartPolicy.Attempts = 0
|
||||
|
@ -1425,7 +1430,7 @@ func TestAllocRunner_TaskFailed_KillTG(t *testing.T) {
|
|||
|
||||
// Test that alloc becoming terminal should destroy the alloc runner
|
||||
func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
alloc := mock.BatchAlloc()
|
||||
tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name]
|
||||
alloc.Job.TaskGroups[0].RestartPolicy.Attempts = 0
|
||||
|
@ -1513,7 +1518,7 @@ func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) {
|
|||
|
||||
// TestAllocRunner_PersistState_Destroyed asserts that destroyed allocs don't persist anymore
|
||||
func TestAllocRunner_PersistState_Destroyed(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
taskName := alloc.Job.LookupTaskGroup(alloc.TaskGroup).Tasks[0].Name
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/consul"
|
||||
"github.com/hashicorp/nomad/client/state"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
|
@ -25,7 +26,7 @@ import (
|
|||
// DesiredStatus=Stop, persisting the update, but crashing before terminating
|
||||
// the task.
|
||||
func TestAllocRunner_Restore_RunningTerminal(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// 1. Run task
|
||||
// 2. Shutdown alloc runner
|
||||
|
@ -143,7 +144,7 @@ func TestAllocRunner_Restore_RunningTerminal(t *testing.T) {
|
|||
// TestAllocRunner_Restore_CompletedBatch asserts that restoring a completed
|
||||
// batch alloc doesn't run it again
|
||||
func TestAllocRunner_Restore_CompletedBatch(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// 1. Run task and wait for it to complete
|
||||
// 2. Start new alloc runner
|
||||
|
@ -228,7 +229,7 @@ func TestAllocRunner_Restore_CompletedBatch(t *testing.T) {
|
|||
// prestart hooks failed, then the alloc and subsequent tasks transition
|
||||
// to failed state
|
||||
func TestAllocRunner_PreStartFailuresLeadToFailed(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.Type = structs.JobTypeBatch
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
|
@ -24,7 +25,7 @@ import (
|
|||
// Consul unix socket hook's Prerun method is called and stopped with the
|
||||
// Postrun method is called.
|
||||
func TestConsulGRPCSocketHook_PrerunPostrun_Ok(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// As of Consul 1.6.0 the test server does not support the gRPC
|
||||
// endpoint so we have to fake it.
|
||||
|
@ -101,7 +102,7 @@ func TestConsulGRPCSocketHook_PrerunPostrun_Ok(t *testing.T) {
|
|||
// TestConsulGRPCSocketHook_Prerun_Error asserts that invalid Consul addresses cause
|
||||
// Prerun to return an error if the alloc requires a grpc proxy.
|
||||
func TestConsulGRPCSocketHook_Prerun_Error(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
|
@ -153,7 +154,7 @@ func TestConsulGRPCSocketHook_Prerun_Error(t *testing.T) {
|
|||
// TestConsulGRPCSocketHook_proxy_Unix asserts that the destination can be a unix
|
||||
// socket path.
|
||||
func TestConsulGRPCSocketHook_proxy_Unix(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
dir, err := ioutil.TempDir("", "nomadtest_proxy_Unix")
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
|
@ -14,7 +15,7 @@ import (
|
|||
)
|
||||
|
||||
func TestConsulSocketHook_PrerunPostrun_Ok(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
fakeConsul, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
@ -89,7 +90,7 @@ func TestConsulSocketHook_PrerunPostrun_Ok(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConsulHTTPSocketHook_Prerun_Error(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
|
@ -29,6 +30,7 @@ var _ interfaces.RunnerPostrunHook = (*csiHook)(nil)
|
|||
// var _ interfaces.RunnerUpdateHook = (*csiHook)(nil)
|
||||
|
||||
func TestCSIHook(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
logger := testlog.HCLogger(t)
|
||||
|
|
|
@ -78,7 +78,7 @@ func newGroupServiceHook(cfg groupServiceHookConfig) *groupServiceHook {
|
|||
delay: shutdownDelay,
|
||||
networkStatusGetter: cfg.networkStatusGetter,
|
||||
logger: cfg.logger.Named(groupServiceHookName),
|
||||
services: cfg.alloc.Job.LookupTaskGroup(cfg.alloc.TaskGroup).Services,
|
||||
services: tg.Services,
|
||||
shutdownDelayCtx: cfg.shutdownDelayCtx,
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
ctestutil "github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/consul"
|
||||
"github.com/hashicorp/nomad/client/taskenv"
|
||||
|
@ -27,7 +28,7 @@ var _ interfaces.RunnerTaskRestartHook = (*groupServiceHook)(nil)
|
|||
// TestGroupServiceHook_NoGroupServices asserts calling group service hooks
|
||||
// without group services does not error.
|
||||
func TestGroupServiceHook_NoGroupServices(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.TaskGroups[0].Services = []*structs.Service{{
|
||||
|
@ -65,7 +66,7 @@ func TestGroupServiceHook_NoGroupServices(t *testing.T) {
|
|||
// TestGroupServiceHook_ShutdownDelayUpdate asserts calling group service hooks
|
||||
// update updates the hooks delay value.
|
||||
func TestGroupServiceHook_ShutdownDelayUpdate(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.TaskGroups[0].ShutdownDelay = helper.TimeToPtr(10 * time.Second)
|
||||
|
@ -102,7 +103,7 @@ func TestGroupServiceHook_ShutdownDelayUpdate(t *testing.T) {
|
|||
// TestGroupServiceHook_GroupServices asserts group service hooks with group
|
||||
// services does not error.
|
||||
func TestGroupServiceHook_GroupServices(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.ConnectAlloc()
|
||||
logger := testlog.HCLogger(t)
|
||||
|
@ -136,7 +137,7 @@ func TestGroupServiceHook_GroupServices(t *testing.T) {
|
|||
// TestGroupServiceHook_Error asserts group service hooks with group
|
||||
// services but no group network is handled gracefully.
|
||||
func TestGroupServiceHook_NoNetwork(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.TaskGroups[0].Networks = []*structs.NetworkResource{}
|
||||
|
@ -180,7 +181,7 @@ func TestGroupServiceHook_NoNetwork(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGroupServiceHook_getWorkloadServices(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.TaskGroups[0].Networks = []*structs.NetworkResource{}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"time"
|
||||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/consul"
|
||||
cstructs "github.com/hashicorp/nomad/client/structs"
|
||||
|
@ -84,7 +85,7 @@ func (m *mockHealthSetter) HasHealth() bool {
|
|||
// TestHealthHook_PrerunPostrun asserts a health hook does not error if it is
|
||||
// run and postrunned.
|
||||
func TestHealthHook_PrerunPostrun(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
@ -121,7 +122,7 @@ func TestHealthHook_PrerunPostrun(t *testing.T) {
|
|||
|
||||
// TestHealthHook_PrerunUpdatePostrun asserts Updates may be applied concurrently.
|
||||
func TestHealthHook_PrerunUpdatePostrun(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
|
@ -160,7 +161,7 @@ func TestHealthHook_PrerunUpdatePostrun(t *testing.T) {
|
|||
// TestHealthHook_UpdatePrerunPostrun asserts that a hook may have Update
|
||||
// called before Prerun.
|
||||
func TestHealthHook_UpdatePrerunPostrun(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
|
@ -203,7 +204,7 @@ func TestHealthHook_UpdatePrerunPostrun(t *testing.T) {
|
|||
|
||||
// TestHealthHook_Postrun asserts that a hook may have only Postrun called.
|
||||
func TestHealthHook_Postrun(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
@ -222,7 +223,7 @@ func TestHealthHook_Postrun(t *testing.T) {
|
|||
// TestHealthHook_SetHealth_healthy asserts SetHealth is called when health status is
|
||||
// set. Uses task state and health checks.
|
||||
func TestHealthHook_SetHealth_healthy(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
|
@ -302,7 +303,7 @@ func TestHealthHook_SetHealth_healthy(t *testing.T) {
|
|||
|
||||
// TestHealthHook_SetHealth_unhealthy asserts SetHealth notices unhealthy allocs
|
||||
func TestHealthHook_SetHealth_unhealthy(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
|
@ -386,7 +387,7 @@ func TestHealthHook_SetHealth_unhealthy(t *testing.T) {
|
|||
|
||||
// TestHealthHook_SystemNoop asserts that system jobs return the noop tracker.
|
||||
func TestHealthHook_SystemNoop(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
h := newAllocHealthWatcherHook(testlog.HCLogger(t), mock.SystemAlloc(), nil, nil, nil)
|
||||
|
||||
|
@ -407,7 +408,7 @@ func TestHealthHook_SystemNoop(t *testing.T) {
|
|||
|
||||
// TestHealthHook_BatchNoop asserts that batch jobs return the noop tracker.
|
||||
func TestHealthHook_BatchNoop(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
h := newAllocHealthWatcherHook(testlog.HCLogger(t), mock.BatchAlloc(), nil, nil, nil)
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ package allocrunner
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/taskenv"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
|
@ -42,6 +43,8 @@ func (m *mockNetworkStatusSetter) SetNetworkStatus(status *structs.AllocNetworkS
|
|||
// Test that the prerun and postrun hooks call the setter with the expected spec when
|
||||
// the network mode is not host
|
||||
func TestNetworkHook_Prerun_Postrun(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.TaskGroups[0].Networks = []*structs.NetworkResource{
|
||||
{
|
||||
|
|
|
@ -3,6 +3,7 @@ package allocrunner
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/pluginmanager"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
|
@ -63,6 +64,8 @@ func (m *mockDriverManager) Dispense(driver string) (drivers.DriverPlugin, error
|
|||
}
|
||||
|
||||
func TestNewNetworkManager(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
alloc *structs.Allocation
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"testing"
|
||||
|
||||
cni "github.com/containerd/go-cni"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -16,6 +17,8 @@ import (
|
|||
// TestCNI_cniToAllocNet_Fallback asserts if a CNI plugin result lacks an IP on
|
||||
// its sandbox interface, the first IP found is used.
|
||||
func TestCNI_cniToAllocNet_Fallback(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
// Calico's CNI plugin v3.12.3 has been observed to return the
|
||||
// following:
|
||||
cniResult := &cni.CNIResult{
|
||||
|
@ -47,6 +50,8 @@ func TestCNI_cniToAllocNet_Fallback(t *testing.T) {
|
|||
// result lacks any IP addresses. This has not been observed, but Nomad still
|
||||
// must guard against invalid results from external plugins.
|
||||
func TestCNI_cniToAllocNet_Invalid(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cniResult := &cni.CNIResult{
|
||||
Interfaces: map[string]*cni.Config{
|
||||
"eth0": {},
|
||||
|
|
|
@ -5,16 +5,17 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/taskrunner"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTaskHookCoordinator_OnlyMainApp(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
tasks := alloc.Job.TaskGroups[0].Tasks
|
||||
task := tasks[0]
|
||||
|
@ -28,6 +29,8 @@ func TestTaskHookCoordinator_OnlyMainApp(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskHookCoordinator_PrestartRunsBeforeMain(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
alloc := mock.LifecycleAlloc()
|
||||
|
@ -48,6 +51,8 @@ func TestTaskHookCoordinator_PrestartRunsBeforeMain(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskHookCoordinator_MainRunsAfterPrestart(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
alloc := mock.LifecycleAlloc()
|
||||
|
@ -92,6 +97,8 @@ func TestTaskHookCoordinator_MainRunsAfterPrestart(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskHookCoordinator_MainRunsAfterManyInitTasks(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
alloc := mock.LifecycleAlloc()
|
||||
|
@ -137,6 +144,8 @@ func TestTaskHookCoordinator_MainRunsAfterManyInitTasks(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskHookCoordinator_FailedInitTask(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
alloc := mock.LifecycleAlloc()
|
||||
|
@ -182,6 +191,8 @@ func TestTaskHookCoordinator_FailedInitTask(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskHookCoordinator_SidecarNeverStarts(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
alloc := mock.LifecycleAlloc()
|
||||
|
@ -225,6 +236,8 @@ func TestTaskHookCoordinator_SidecarNeverStarts(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskHookCoordinator_PoststartStartsAfterMain(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
alloc := mock.LifecycleAlloc()
|
||||
|
@ -280,6 +293,7 @@ func isChannelClosed(ch <-chan struct{}) bool {
|
|||
}
|
||||
|
||||
func TestHasSidecarTasks(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
falseV, trueV := false, true
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/taskenv"
|
||||
|
@ -33,7 +34,7 @@ func (m *mockEmitter) EmitEvent(ev *structs.TaskEvent) {
|
|||
// TestTaskRunner_ArtifactHook_Recoverable asserts that failures to download
|
||||
// artifacts are a recoverable error.
|
||||
func TestTaskRunner_ArtifactHook_Recoverable(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
me := &mockEmitter{}
|
||||
artifactHook := newArtifactHook(me, testlog.HCLogger(t))
|
||||
|
@ -66,7 +67,7 @@ func TestTaskRunner_ArtifactHook_Recoverable(t *testing.T) {
|
|||
// already downloaded artifacts when subsequent artifacts fail and cause a
|
||||
// restart.
|
||||
func TestTaskRunner_ArtifactHook_PartialDone(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
me := &mockEmitter{}
|
||||
artifactHook := newArtifactHook(me, testlog.HCLogger(t))
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
consultest "github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/taskenv"
|
||||
|
@ -35,7 +36,7 @@ func getTestConsul(t *testing.T) *consultest.TestServer {
|
|||
}
|
||||
|
||||
func TestConnectNativeHook_Name(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
name := new(connectNativeHook).Name()
|
||||
require.Equal(t, "connect_native", name)
|
||||
}
|
||||
|
@ -61,7 +62,7 @@ func cleanupCertDirs(t *testing.T, original, secrets string) {
|
|||
}
|
||||
|
||||
func TestConnectNativeHook_copyCertificate(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
f, d := setupCertDirs(t)
|
||||
defer cleanupCertDirs(t, f, d)
|
||||
|
@ -81,7 +82,7 @@ func TestConnectNativeHook_copyCertificate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConnectNativeHook_copyCertificates(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
f, d := setupCertDirs(t)
|
||||
defer cleanupCertDirs(t, f, d)
|
||||
|
@ -109,7 +110,7 @@ func TestConnectNativeHook_copyCertificates(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConnectNativeHook_tlsEnv(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// the hook config comes from client config
|
||||
emptyHook := new(connectNativeHook)
|
||||
|
@ -163,7 +164,7 @@ func TestConnectNativeHook_tlsEnv(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConnectNativeHook_bridgeEnv_bridge(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
t.Run("without tls", func(t *testing.T) {
|
||||
hook := new(connectNativeHook)
|
||||
|
@ -208,7 +209,7 @@ func TestConnectNativeHook_bridgeEnv_bridge(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConnectNativeHook_bridgeEnv_host(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
hook := new(connectNativeHook)
|
||||
hook.alloc = mock.ConnectNativeAlloc("host")
|
||||
|
@ -227,7 +228,7 @@ func TestConnectNativeHook_bridgeEnv_host(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConnectNativeHook_hostEnv_host(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
hook := new(connectNativeHook)
|
||||
hook.alloc = mock.ConnectNativeAlloc("host")
|
||||
|
@ -249,7 +250,7 @@ func TestConnectNativeHook_hostEnv_host(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConnectNativeHook_hostEnv_bridge(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
hook := new(connectNativeHook)
|
||||
hook.alloc = mock.ConnectNativeAlloc("bridge")
|
||||
|
@ -269,7 +270,7 @@ func TestConnectNativeHook_hostEnv_bridge(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_ConnectNativeHook_Noop(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
|
@ -307,7 +308,7 @@ func TestTaskRunner_ConnectNativeHook_Noop(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_ConnectNativeHook_Ok(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
testutil.RequireConsul(t)
|
||||
|
||||
testConsul := getTestConsul(t)
|
||||
|
@ -372,7 +373,7 @@ func TestTaskRunner_ConnectNativeHook_Ok(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_ConnectNativeHook_with_SI_token(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
testutil.RequireConsul(t)
|
||||
|
||||
testConsul := getTestConsul(t)
|
||||
|
@ -445,7 +446,7 @@ func TestTaskRunner_ConnectNativeHook_with_SI_token(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_ConnectNativeHook_shareTLS(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
testutil.RequireConsul(t)
|
||||
|
||||
try := func(t *testing.T, shareSSL *bool) {
|
||||
|
@ -566,7 +567,7 @@ func checkFilesInDir(t *testing.T, dir string, includes, excludes []string) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_ConnectNativeHook_shareTLS_override(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
testutil.RequireConsul(t)
|
||||
|
||||
fakeCert, fakeCertDir := setupCertDirs(t)
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/devicemanager"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
|
@ -15,7 +16,7 @@ import (
|
|||
)
|
||||
|
||||
func TestDeviceHook_CorrectDevice(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
dm := devicemanager.NoopMockManager()
|
||||
|
@ -97,7 +98,7 @@ func TestDeviceHook_CorrectDevice(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDeviceHook_IncorrectDevice(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
dm := devicemanager.NoopMockManager()
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
|
@ -21,7 +22,7 @@ var _ interfaces.TaskPrestartHook = (*dispatchHook)(nil)
|
|||
// TestTaskRunner_DispatchHook_NoPayload asserts that the hook is a noop and is
|
||||
// marked as done if there is no dispatch payload.
|
||||
func TestTaskRunner_DispatchHook_NoPayload(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
ctx := context.Background()
|
||||
|
@ -57,7 +58,7 @@ func TestTaskRunner_DispatchHook_NoPayload(t *testing.T) {
|
|||
// TestTaskRunner_DispatchHook_Ok asserts that dispatch payloads are written to
|
||||
// a file in the task dir.
|
||||
func TestTaskRunner_DispatchHook_Ok(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
ctx := context.Background()
|
||||
|
@ -101,7 +102,7 @@ func TestTaskRunner_DispatchHook_Ok(t *testing.T) {
|
|||
// TestTaskRunner_DispatchHook_Error asserts that on an error dispatch payloads
|
||||
// are not written and Done=false.
|
||||
func TestTaskRunner_DispatchHook_Error(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
ctx := context.Background()
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
"time"
|
||||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/taskenv"
|
||||
|
@ -53,7 +54,7 @@ func writeTmp(t *testing.T, s string, fm os.FileMode) string {
|
|||
}
|
||||
|
||||
func TestEnvoyBootstrapHook_maybeLoadSIToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// This test fails when running as root because the test case for checking
|
||||
// the error condition when the file is unreadable fails (root can read the
|
||||
|
@ -94,7 +95,7 @@ func TestEnvoyBootstrapHook_maybeLoadSIToken(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvoyBootstrapHook_decodeTriState(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require.Equal(t, "", decodeTriState(nil))
|
||||
require.Equal(t, "true", decodeTriState(helper.BoolToPtr(true)))
|
||||
|
@ -118,7 +119,7 @@ var (
|
|||
)
|
||||
|
||||
func TestEnvoyBootstrapHook_envoyBootstrapArgs(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
t.Run("excluding SI token", func(t *testing.T) {
|
||||
ebArgs := envoyBootstrapArgs{
|
||||
|
@ -227,7 +228,7 @@ func TestEnvoyBootstrapHook_envoyBootstrapArgs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvoyBootstrapHook_envoyBootstrapEnv(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
environment := []string{"foo=bar", "baz=1"}
|
||||
|
||||
|
@ -291,7 +292,7 @@ type envoyConfig struct {
|
|||
// TestEnvoyBootstrapHook_with_SI_token asserts the bootstrap file written for
|
||||
// Envoy contains a Consul SI token.
|
||||
func TestEnvoyBootstrapHook_with_SI_token(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
testutil.RequireConsul(t)
|
||||
|
||||
testConsul := getTestConsul(t)
|
||||
|
@ -392,7 +393,7 @@ func TestEnvoyBootstrapHook_with_SI_token(t *testing.T) {
|
|||
// creates Envoy's bootstrap.json configuration based on Connect proxy sidecars
|
||||
// registered for the task.
|
||||
func TestTaskRunner_EnvoyBootstrapHook_sidecar_ok(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
testutil.RequireConsul(t)
|
||||
|
||||
testConsul := getTestConsul(t)
|
||||
|
@ -487,7 +488,7 @@ func TestTaskRunner_EnvoyBootstrapHook_sidecar_ok(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_EnvoyBootstrapHook_gateway_ok(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
testConsul := getTestConsul(t)
|
||||
|
@ -570,7 +571,7 @@ func TestTaskRunner_EnvoyBootstrapHook_gateway_ok(t *testing.T) {
|
|||
// TestTaskRunner_EnvoyBootstrapHook_Noop asserts that the Envoy bootstrap hook
|
||||
// is a noop for non-Connect proxy sidecar / gateway tasks.
|
||||
func TestTaskRunner_EnvoyBootstrapHook_Noop(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
|
@ -607,7 +608,7 @@ func TestTaskRunner_EnvoyBootstrapHook_Noop(t *testing.T) {
|
|||
// bootstrap hook returns a Recoverable error if the bootstrap command runs but
|
||||
// fails.
|
||||
func TestTaskRunner_EnvoyBootstrapHook_RecoverableError(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
testutil.RequireConsul(t)
|
||||
|
||||
testConsul := getTestConsul(t)
|
||||
|
@ -685,7 +686,7 @@ func TestTaskRunner_EnvoyBootstrapHook_RecoverableError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_EnvoyBootstrapHook_retryTimeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
testConsul := getTestConsul(t)
|
||||
|
@ -812,6 +813,8 @@ func TestTaskRunner_EnvoyBootstrapHook_extractNameAndKind(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_EnvoyBootstrapHook_grpcAddress(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
bridgeH := newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig(
|
||||
mock.ConnectIngressGatewayAlloc("bridge"),
|
||||
new(config.ConsulConfig),
|
||||
|
@ -841,6 +844,8 @@ func TestTaskRunner_EnvoyBootstrapHook_grpcAddress(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_EnvoyBootstrapHook_isConnectKind(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
require.True(t, isConnectKind(structs.ConnectProxyPrefix))
|
||||
require.True(t, isConnectKind(structs.ConnectIngressPrefix))
|
||||
require.True(t, isConnectKind(structs.ConnectTerminatingPrefix))
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
ifs "github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/taskenv"
|
||||
|
@ -24,7 +25,7 @@ var (
|
|||
)
|
||||
|
||||
func TestEnvoyVersionHook_semver(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
t.Run("with v", func(t *testing.T) {
|
||||
result, err := semver("v1.2.3")
|
||||
|
@ -45,7 +46,7 @@ func TestEnvoyVersionHook_semver(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvoyVersionHook_taskImage(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
t.Run("absent", func(t *testing.T) {
|
||||
result := (*envoyVersionHook)(nil).taskImage(map[string]interface{}{
|
||||
|
@ -70,7 +71,7 @@ func TestEnvoyVersionHook_taskImage(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvoyVersionHook_tweakImage(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
image := envoy.ImageFormat
|
||||
|
||||
|
@ -106,7 +107,7 @@ func TestEnvoyVersionHook_tweakImage(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvoyVersionHook_interpolateImage(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
hook := (*envoyVersionHook)(nil)
|
||||
|
||||
|
@ -156,7 +157,7 @@ func TestEnvoyVersionHook_interpolateImage(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvoyVersionHook_skip(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
h := new(envoyVersionHook)
|
||||
|
||||
|
@ -221,7 +222,7 @@ func TestEnvoyVersionHook_skip(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_EnvoyVersionHook_Prestart_standard(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
|
@ -264,7 +265,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_standard(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_EnvoyVersionHook_Prestart_custom(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
|
@ -308,7 +309,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_custom(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_EnvoyVersionHook_Prestart_skip(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
|
@ -355,7 +356,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_skip(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_EnvoyVersionHook_Prestart_fallback(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
|
@ -396,7 +397,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_fallback(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_EnvoyVersionHook_Prestart_error(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -14,7 +15,7 @@ var _ structs.Recoverable = (*hookError)(nil)
|
|||
// TestHookError_Recoverable asserts that a NewHookError is recoverable if
|
||||
// passed a recoverable error.
|
||||
func TestHookError_Recoverable(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// Create root error
|
||||
root := errors.New("test error")
|
||||
|
@ -36,7 +37,7 @@ func TestHookError_Recoverable(t *testing.T) {
|
|||
// TestHookError_Unrecoverable asserts that a NewHookError is not recoverable
|
||||
// unless it is passed a recoverable error.
|
||||
func TestHookError_Unrecoverable(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// Create error
|
||||
err := errors.New("test error")
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"testing"
|
||||
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
|
@ -24,7 +25,7 @@ var _ interfaces.TaskStopHook = (*logmonHook)(nil)
|
|||
// TestTaskRunner_LogmonHook_LoadReattach unit tests loading logmon reattach
|
||||
// config from persisted hook state.
|
||||
func TestTaskRunner_LogmonHook_LoadReattach(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// No hook data should return nothing
|
||||
cfg, err := reattachConfigFromHookData(nil)
|
||||
|
@ -60,7 +61,7 @@ func TestTaskRunner_LogmonHook_LoadReattach(t *testing.T) {
|
|||
// first time Prestart is called, reattached to on subsequent restarts, and
|
||||
// killed on Stop.
|
||||
func TestTaskRunner_LogmonHook_StartStop(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
|
@ -25,7 +26,7 @@ import (
|
|||
// Nomad client is restarting and asserts failing to reattach to logmon causes
|
||||
// nomad to spawn a new logmon.
|
||||
func TestTaskRunner_LogmonHook_StartCrashStop(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -94,7 +95,7 @@ func TestTaskRunner_LogmonHook_StartCrashStop(t *testing.T) {
|
|||
// TestTaskRunner_LogmonHook_ShutdownMidStart simulates logmon crashing while the
|
||||
// Nomad client is calling Start() and asserts that we recover and spawn a new logmon.
|
||||
func TestTaskRunner_LogmonHook_ShutdownMidStart(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
|
|
@ -5,10 +5,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/plugins/drivers"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func testPolicy(success bool, mode string) *structs.RestartPolicy {
|
||||
|
@ -34,7 +34,7 @@ func testExitResult(exit int) *drivers.ExitResult {
|
|||
}
|
||||
|
||||
func TestClient_RestartTracker_ModeDelay(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
p := testPolicy(true, structs.RestartPolicyModeDelay)
|
||||
rt := NewRestartTracker(p, structs.JobTypeService, nil)
|
||||
for i := 0; i < p.Attempts; i++ {
|
||||
|
@ -60,7 +60,7 @@ func TestClient_RestartTracker_ModeDelay(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_RestartTracker_ModeFail(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
p := testPolicy(true, structs.RestartPolicyModeFail)
|
||||
rt := NewRestartTracker(p, structs.JobTypeSystem, nil)
|
||||
for i := 0; i < p.Attempts; i++ {
|
||||
|
@ -80,7 +80,7 @@ func TestClient_RestartTracker_ModeFail(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_RestartTracker_NoRestartOnSuccess(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
p := testPolicy(false, structs.RestartPolicyModeDelay)
|
||||
rt := NewRestartTracker(p, structs.JobTypeBatch, nil)
|
||||
if state, _ := rt.SetExitResult(testExitResult(0)).GetState(); state != structs.TaskTerminated {
|
||||
|
@ -89,7 +89,7 @@ func TestClient_RestartTracker_NoRestartOnSuccess(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_RestartTracker_ZeroAttempts(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
p := testPolicy(true, structs.RestartPolicyModeFail)
|
||||
p.Attempts = 0
|
||||
|
||||
|
@ -122,7 +122,7 @@ func TestClient_RestartTracker_ZeroAttempts(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_RestartTracker_TaskKilled(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
p := testPolicy(true, structs.RestartPolicyModeFail)
|
||||
p.Attempts = 0
|
||||
rt := NewRestartTracker(p, structs.JobTypeService, nil)
|
||||
|
@ -132,7 +132,7 @@ func TestClient_RestartTracker_TaskKilled(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_RestartTracker_RestartTriggered(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
p := testPolicy(true, structs.RestartPolicyModeFail)
|
||||
p.Attempts = 0
|
||||
rt := NewRestartTracker(p, structs.JobTypeService, nil)
|
||||
|
@ -142,7 +142,7 @@ func TestClient_RestartTracker_RestartTriggered(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_RestartTracker_RestartTriggered_Failure(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
p := testPolicy(true, structs.RestartPolicyModeFail)
|
||||
p.Attempts = 1
|
||||
rt := NewRestartTracker(p, structs.JobTypeService, nil)
|
||||
|
@ -155,7 +155,7 @@ func TestClient_RestartTracker_RestartTriggered_Failure(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_RestartTracker_StartError_Recoverable_Fail(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
p := testPolicy(true, structs.RestartPolicyModeFail)
|
||||
rt := NewRestartTracker(p, structs.JobTypeSystem, nil)
|
||||
recErr := structs.NewRecoverableError(fmt.Errorf("foo"), true)
|
||||
|
@ -176,7 +176,7 @@ func TestClient_RestartTracker_StartError_Recoverable_Fail(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_RestartTracker_StartError_Recoverable_Delay(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
p := testPolicy(true, structs.RestartPolicyModeDelay)
|
||||
rt := NewRestartTracker(p, structs.JobTypeSystem, nil)
|
||||
recErr := structs.NewRecoverableError(fmt.Errorf("foo"), true)
|
||||
|
@ -201,7 +201,7 @@ func TestClient_RestartTracker_StartError_Recoverable_Delay(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_RestartTracker_Lifecycle(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
testCase := []struct {
|
||||
name string
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/consul/api"
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/consul"
|
||||
"github.com/hashicorp/nomad/client/taskenv"
|
||||
|
@ -63,6 +64,8 @@ type heartbeat struct {
|
|||
// TestScript_Exec_Cancel asserts cancelling a script check shortcircuits
|
||||
// any running scripts.
|
||||
func TestScript_Exec_Cancel(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
exec, cancel := newBlockingScriptExec()
|
||||
defer cancel()
|
||||
|
||||
|
@ -89,7 +92,7 @@ func TestScript_Exec_Cancel(t *testing.T) {
|
|||
// TestScript_Exec_TimeoutBasic asserts a script will be killed when the
|
||||
// timeout is reached.
|
||||
func TestScript_Exec_TimeoutBasic(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
exec, cancel := newBlockingScriptExec()
|
||||
defer cancel()
|
||||
|
||||
|
@ -130,7 +133,7 @@ func TestScript_Exec_TimeoutBasic(t *testing.T) {
|
|||
// the timeout is reached and always set a critical status regardless of what
|
||||
// Exec returns.
|
||||
func TestScript_Exec_TimeoutCritical(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
logger := testlog.HCLogger(t)
|
||||
hb := newFakeHeartbeater()
|
||||
script := newScriptMock(hb, sleeperExec{}, logger, time.Hour, time.Nanosecond)
|
||||
|
@ -151,6 +154,8 @@ func TestScript_Exec_TimeoutCritical(t *testing.T) {
|
|||
// TestScript_Exec_Shutdown asserts a script will be executed once more
|
||||
// when told to shutdown.
|
||||
func TestScript_Exec_Shutdown(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
shutdown := make(chan struct{})
|
||||
exec := newSimpleExec(0, nil)
|
||||
logger := testlog.HCLogger(t)
|
||||
|
@ -180,6 +185,7 @@ func TestScript_Exec_Shutdown(t *testing.T) {
|
|||
// TestScript_Exec_Codes asserts script exit codes are translated to their
|
||||
// corresponding Consul health check status.
|
||||
func TestScript_Exec_Codes(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
exec := newScriptedExec([]execResult{
|
||||
{[]byte("output"), 1, nil},
|
||||
|
@ -224,6 +230,7 @@ func TestScript_Exec_Codes(t *testing.T) {
|
|||
// TestScript_TaskEnvInterpolation asserts that script check hooks are
|
||||
// interpolated in the same way that services are
|
||||
func TestScript_TaskEnvInterpolation(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
consulClient := consul.NewMockConsulServiceClient(t, logger)
|
||||
|
@ -288,6 +295,8 @@ func TestScript_TaskEnvInterpolation(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestScript_associated(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
t.Run("neither set", func(t *testing.T) {
|
||||
require.False(t, new(scriptCheckHook).associated("task1", "", ""))
|
||||
})
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/consul"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
|
@ -53,6 +54,7 @@ func TestUpdate_beforePoststart(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_serviceHook_multipleDeRegisterCall(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
logger := testlog.HCLogger(t)
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
consulapi "github.com/hashicorp/nomad/client/consul"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
|
@ -46,7 +47,7 @@ func sidecar(task string) (string, structs.TaskKind) {
|
|||
}
|
||||
|
||||
func TestSIDSHook_recoverToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
r := require.New(t)
|
||||
|
||||
secrets := tmpDir(t)
|
||||
|
@ -71,7 +72,7 @@ func TestSIDSHook_recoverToken(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSIDSHook_recoverToken_empty(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
r := require.New(t)
|
||||
|
||||
secrets := tmpDir(t)
|
||||
|
@ -92,6 +93,7 @@ func TestSIDSHook_recoverToken_empty(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSIDSHook_recoverToken_unReadable(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
// This test fails when running as root because the test case for checking
|
||||
// the error condition when the file is unreadable fails (root can read the
|
||||
// file even though the permissions are set to 0200).
|
||||
|
@ -99,7 +101,6 @@ func TestSIDSHook_recoverToken_unReadable(t *testing.T) {
|
|||
t.Skip("test only works as non-root")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
r := require.New(t)
|
||||
|
||||
secrets := tmpDir(t)
|
||||
|
@ -122,7 +123,7 @@ func TestSIDSHook_recoverToken_unReadable(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSIDSHook_writeToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
r := require.New(t)
|
||||
|
||||
secrets := tmpDir(t)
|
||||
|
@ -139,6 +140,7 @@ func TestSIDSHook_writeToken(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSIDSHook_writeToken_unWritable(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
// This test fails when running as root because the test case for checking
|
||||
// the error condition when the file is unreadable fails (root can read the
|
||||
// file even though the permissions are set to 0200).
|
||||
|
@ -146,7 +148,6 @@ func TestSIDSHook_writeToken_unWritable(t *testing.T) {
|
|||
t.Skip("test only works as non-root")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
r := require.New(t)
|
||||
|
||||
secrets := tmpDir(t)
|
||||
|
@ -162,7 +163,7 @@ func TestSIDSHook_writeToken_unWritable(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_SIDSHook_writeToken_nonExistent(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
r := require.New(t)
|
||||
|
||||
base := tmpDir(t)
|
||||
|
@ -176,7 +177,7 @@ func Test_SIDSHook_writeToken_nonExistent(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSIDSHook_deriveSIToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
r := require.New(t)
|
||||
|
||||
taskName, taskKind := sidecar("task1")
|
||||
|
@ -197,7 +198,7 @@ func TestSIDSHook_deriveSIToken(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSIDSHook_deriveSIToken_timeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
r := require.New(t)
|
||||
|
||||
siClient := consulapi.NewMockServiceIdentitiesClient()
|
||||
|
@ -227,7 +228,7 @@ func TestSIDSHook_deriveSIToken_timeout(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSIDSHook_computeBackoff(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
try := func(i int, exp time.Duration) {
|
||||
result := computeBackoff(i)
|
||||
|
@ -243,7 +244,7 @@ func TestSIDSHook_computeBackoff(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSIDSHook_backoff(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
r := require.New(t)
|
||||
|
||||
ctx := context.Background()
|
||||
|
@ -252,7 +253,7 @@ func TestSIDSHook_backoff(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSIDSHook_backoffKilled(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
r := require.New(t)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1)
|
||||
|
@ -263,6 +264,7 @@ func TestSIDSHook_backoffKilled(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_DeriveSIToken_UnWritableTokenFile(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
// Normally this test would live in test_runner_test.go, but since it requires
|
||||
// root and the check for root doesn't like Windows, we put this file in here
|
||||
// for now.
|
||||
|
@ -274,7 +276,6 @@ func TestTaskRunner_DeriveSIToken_UnWritableTokenFile(t *testing.T) {
|
|||
t.Skip("test only works as non-root")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
r := require.New(t)
|
||||
|
||||
alloc := mock.BatchConnectAlloc()
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
cstructs "github.com/hashicorp/nomad/client/structs"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
|
@ -82,7 +83,7 @@ func (m *mockDriverStats) Called() int {
|
|||
// TestTaskRunner_StatsHook_PoststartExited asserts the stats hook starts and
|
||||
// stops.
|
||||
func TestTaskRunner_StatsHook_PoststartExited(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
logger := testlog.HCLogger(t)
|
||||
|
@ -114,7 +115,7 @@ func TestTaskRunner_StatsHook_PoststartExited(t *testing.T) {
|
|||
// TestTaskRunner_StatsHook_Periodic asserts the stats hook collects stats on
|
||||
// an interval.
|
||||
func TestTaskRunner_StatsHook_Periodic(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
logger := testlog.HCLogger(t)
|
||||
|
@ -179,7 +180,7 @@ WAITING:
|
|||
// TestTaskRunner_StatsHook_NotImplemented asserts the stats hook stops if the
|
||||
// driver returns NotImplemented.
|
||||
func TestTaskRunner_StatsHook_NotImplemented(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
logger := testlog.HCLogger(t)
|
||||
|
@ -208,7 +209,7 @@ func TestTaskRunner_StatsHook_NotImplemented(t *testing.T) {
|
|||
// TestTaskRunner_StatsHook_Backoff asserts that stats hook does some backoff
|
||||
// even if the driver doesn't support intervals well
|
||||
func TestTaskRunner_StatsHook_Backoff(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
su := newMockStatsUpdater()
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/kr/pretty"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -141,7 +142,7 @@ func runTestTaskRunner(t *testing.T, alloc *structs.Allocation, taskName string)
|
|||
}
|
||||
|
||||
func TestTaskRunner_BuildTaskConfig_CPU_Memory(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
|
@ -209,7 +210,7 @@ func TestTaskRunner_BuildTaskConfig_CPU_Memory(t *testing.T) {
|
|||
// TestTaskRunner_Stop_ExitCode asserts that the exit code is captured on a task, even if it's stopped
|
||||
func TestTaskRunner_Stop_ExitCode(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
alloc.Job.TaskGroups[0].Count = 1
|
||||
|
@ -258,7 +259,7 @@ func TestTaskRunner_Stop_ExitCode(t *testing.T) {
|
|||
// TestTaskRunner_Restore_Running asserts restoring a running task does not
|
||||
// rerun the task.
|
||||
func TestTaskRunner_Restore_Running(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
|
@ -314,7 +315,7 @@ func TestTaskRunner_Restore_Running(t *testing.T) {
|
|||
// returned once it is running and waiting in pending along with a cleanup
|
||||
// func.
|
||||
func setupRestoreFailureTest(t *testing.T, alloc *structs.Allocation) (*TaskRunner, *Config, func()) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Driver = "raw_exec"
|
||||
|
@ -388,6 +389,8 @@ func setupRestoreFailureTest(t *testing.T, alloc *structs.Allocation) (*TaskRunn
|
|||
// TestTaskRunner_Restore_Restart asserts restoring a dead task blocks until
|
||||
// MarkAlive is called. #1795
|
||||
func TestTaskRunner_Restore_Restart(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
newTR, conf, cleanup := setupRestoreFailureTest(t, mock.Alloc())
|
||||
defer cleanup()
|
||||
|
||||
|
@ -405,6 +408,8 @@ func TestTaskRunner_Restore_Restart(t *testing.T) {
|
|||
// TestTaskRunner_Restore_Kill asserts restoring a dead task blocks until
|
||||
// the task is killed. #1795
|
||||
func TestTaskRunner_Restore_Kill(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
newTR, _, cleanup := setupRestoreFailureTest(t, mock.Alloc())
|
||||
defer cleanup()
|
||||
|
||||
|
@ -430,6 +435,8 @@ func TestTaskRunner_Restore_Kill(t *testing.T) {
|
|||
// TestTaskRunner_Restore_Update asserts restoring a dead task blocks until
|
||||
// Update is called. #1795
|
||||
func TestTaskRunner_Restore_Update(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
newTR, conf, cleanup := setupRestoreFailureTest(t, mock.Alloc())
|
||||
defer cleanup()
|
||||
|
||||
|
@ -454,7 +461,7 @@ func TestTaskRunner_Restore_Update(t *testing.T) {
|
|||
// TestTaskRunner_Restore_System asserts restoring a dead system task does not
|
||||
// block.
|
||||
func TestTaskRunner_Restore_System(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.Type = structs.JobTypeSystem
|
||||
|
@ -527,7 +534,7 @@ func TestTaskRunner_Restore_System(t *testing.T) {
|
|||
// TestTaskRunner_TaskEnv_Interpolated asserts driver configurations are
|
||||
// interpolated.
|
||||
func TestTaskRunner_TaskEnv_Interpolated(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
|
@ -571,7 +578,7 @@ func TestTaskRunner_TaskEnv_Interpolated(t *testing.T) {
|
|||
// not host paths.
|
||||
func TestTaskRunner_TaskEnv_Chroot(t *testing.T) {
|
||||
ctestutil.ExecCompatible(t)
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
|
@ -629,7 +636,7 @@ func TestTaskRunner_TaskEnv_Chroot(t *testing.T) {
|
|||
// not host paths. Host env vars should also be excluded.
|
||||
func TestTaskRunner_TaskEnv_Image(t *testing.T) {
|
||||
ctestutil.DockerCompatible(t)
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
|
@ -672,7 +679,7 @@ func TestTaskRunner_TaskEnv_Image(t *testing.T) {
|
|||
|
||||
// TestTaskRunner_TaskEnv_None asserts raw_exec uses host paths and env vars.
|
||||
func TestTaskRunner_TaskEnv_None(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
|
@ -715,7 +722,7 @@ func TestTaskRunner_TaskEnv_None(t *testing.T) {
|
|||
|
||||
// Test that devices get sent to the driver
|
||||
func TestTaskRunner_DevicePropogation(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Create a mock alloc that has a gpu
|
||||
|
@ -812,7 +819,7 @@ func (h *mockEnvHook) Prestart(ctx context.Context, req *interfaces.TaskPrestart
|
|||
// hook environments set restores the environment without re-running done
|
||||
// hooks.
|
||||
func TestTaskRunner_Restore_HookEnv(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
|
@ -849,7 +856,7 @@ func TestTaskRunner_Restore_HookEnv(t *testing.T) {
|
|||
// This test asserts that we can recover from an "external" plugin exiting by
|
||||
// retrieving a new instance of the driver and recovering the task.
|
||||
func TestTaskRunner_RecoverFromDriverExiting(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Create an allocation using the mock driver that exits simulating the
|
||||
|
@ -922,7 +929,7 @@ func TestTaskRunner_RecoverFromDriverExiting(t *testing.T) {
|
|||
// TestTaskRunner_ShutdownDelay asserts services are removed from Consul
|
||||
// ${shutdown_delay} seconds before killing the process.
|
||||
func TestTaskRunner_ShutdownDelay(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -1006,7 +1013,7 @@ WAIT:
|
|||
// Consul and tasks are killed without waiting for ${shutdown_delay}
|
||||
// when the alloc has the NoShutdownDelay transition flag set.
|
||||
func TestTaskRunner_NoShutdownDelay(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// don't set this too high so that we don't block the test runner
|
||||
// on shutting down the agent if the test fails
|
||||
|
@ -1081,7 +1088,7 @@ func TestTaskRunner_NoShutdownDelay(t *testing.T) {
|
|||
// TestTaskRunner_Dispatch_Payload asserts that a dispatch job runs and the
|
||||
// payload was written to disk.
|
||||
func TestTaskRunner_Dispatch_Payload(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -1127,7 +1134,7 @@ func TestTaskRunner_Dispatch_Payload(t *testing.T) {
|
|||
// TestTaskRunner_SignalFailure asserts that signal errors are properly
|
||||
// propagated from the driver to TaskRunner.
|
||||
func TestTaskRunner_SignalFailure(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -1149,7 +1156,7 @@ func TestTaskRunner_SignalFailure(t *testing.T) {
|
|||
// TestTaskRunner_RestartTask asserts that restarting a task works and emits a
|
||||
// Restarting event.
|
||||
func TestTaskRunner_RestartTask(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -1201,7 +1208,7 @@ func TestTaskRunner_RestartTask(t *testing.T) {
|
|||
// TestTaskRunner_CheckWatcher_Restart asserts that when enabled an unhealthy
|
||||
// Consul check will cause a task to restart following restart policy rules.
|
||||
func TestTaskRunner_CheckWatcher_Restart(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
|
||||
|
@ -1319,7 +1326,7 @@ func useMockEnvoyBootstrapHook(tr *TaskRunner) {
|
|||
// TestTaskRunner_BlockForSIDSToken asserts tasks do not start until a Consul
|
||||
// Service Identity token is derived.
|
||||
func TestTaskRunner_BlockForSIDSToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
r := require.New(t)
|
||||
|
||||
alloc := mock.BatchConnectAlloc()
|
||||
|
@ -1387,7 +1394,7 @@ func TestTaskRunner_BlockForSIDSToken(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_DeriveSIToken_Retry(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
r := require.New(t)
|
||||
|
||||
alloc := mock.BatchConnectAlloc()
|
||||
|
@ -1446,7 +1453,7 @@ func TestTaskRunner_DeriveSIToken_Retry(t *testing.T) {
|
|||
// TestTaskRunner_DeriveSIToken_Unrecoverable asserts that an unrecoverable error
|
||||
// from deriving a service identity token will fail a task.
|
||||
func TestTaskRunner_DeriveSIToken_Unrecoverable(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
r := require.New(t)
|
||||
|
||||
alloc := mock.BatchConnectAlloc()
|
||||
|
@ -1503,7 +1510,7 @@ func TestTaskRunner_DeriveSIToken_Unrecoverable(t *testing.T) {
|
|||
// TestTaskRunner_BlockForVaultToken asserts tasks do not start until a vault token
|
||||
// is derived.
|
||||
func TestTaskRunner_BlockForVaultToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -1581,7 +1588,7 @@ func TestTaskRunner_BlockForVaultToken(t *testing.T) {
|
|||
// returned when deriving a vault token a task will continue to block while
|
||||
// it's retried.
|
||||
func TestTaskRunner_DeriveToken_Retry(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
alloc := mock.BatchAlloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
task.Vault = &structs.Vault{Policies: []string{"default"}}
|
||||
|
@ -1645,7 +1652,7 @@ func TestTaskRunner_DeriveToken_Retry(t *testing.T) {
|
|||
// TestTaskRunner_DeriveToken_Unrecoverable asserts that an unrecoverable error
|
||||
// from deriving a vault token will fail a task.
|
||||
func TestTaskRunner_DeriveToken_Unrecoverable(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// Use a batch job with no restarts
|
||||
alloc := mock.BatchAlloc()
|
||||
|
@ -1690,7 +1697,7 @@ func TestTaskRunner_DeriveToken_Unrecoverable(t *testing.T) {
|
|||
// TestTaskRunner_Download_ChrootExec asserts that downloaded artifacts may be
|
||||
// executed in a chroot.
|
||||
func TestTaskRunner_Download_ChrootExec(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
ctestutil.ExecCompatible(t)
|
||||
|
||||
ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Dir("."))))
|
||||
|
@ -1731,7 +1738,7 @@ func TestTaskRunner_Download_ChrootExec(t *testing.T) {
|
|||
// TestTaskRunner_Download_Exec asserts that downloaded artifacts may be
|
||||
// executed in a driver without filesystem isolation.
|
||||
func TestTaskRunner_Download_RawExec(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Dir("."))))
|
||||
defer ts.Close()
|
||||
|
@ -1771,7 +1778,7 @@ func TestTaskRunner_Download_RawExec(t *testing.T) {
|
|||
// TestTaskRunner_Download_List asserts that multiple artificats are downloaded
|
||||
// before a task is run.
|
||||
func TestTaskRunner_Download_List(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Dir("."))))
|
||||
defer ts.Close()
|
||||
|
||||
|
@ -1820,7 +1827,7 @@ func TestTaskRunner_Download_List(t *testing.T) {
|
|||
// TestTaskRunner_Download_Retries asserts that failed artifact downloads are
|
||||
// retried according to the task's restart policy.
|
||||
func TestTaskRunner_Download_Retries(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// Create an allocation that has a task with bad artifacts.
|
||||
alloc := mock.BatchAlloc()
|
||||
|
@ -1866,7 +1873,7 @@ func TestTaskRunner_Download_Retries(t *testing.T) {
|
|||
// TestTaskRunner_DriverNetwork asserts that a driver's network is properly
|
||||
// used in services and checks.
|
||||
func TestTaskRunner_DriverNetwork(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -2002,7 +2009,7 @@ func TestTaskRunner_DriverNetwork(t *testing.T) {
|
|||
// TestTaskRunner_RestartSignalTask_NotRunning asserts resilience to failures
|
||||
// when a restart or signal is triggered and the task is not running.
|
||||
func TestTaskRunner_RestartSignalTask_NotRunning(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -2069,7 +2076,7 @@ func TestTaskRunner_RestartSignalTask_NotRunning(t *testing.T) {
|
|||
// TestTaskRunner_Run_RecoverableStartError asserts tasks are restarted if they
|
||||
// return a recoverable error from StartTask.
|
||||
func TestTaskRunner_Run_RecoverableStartError(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -2111,7 +2118,7 @@ func TestTaskRunner_Run_RecoverableStartError(t *testing.T) {
|
|||
|
||||
// TestTaskRunner_Template_Artifact asserts that tasks can use artifacts as templates.
|
||||
func TestTaskRunner_Template_Artifact(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
ts := httptest.NewServer(http.FileServer(http.Dir(".")))
|
||||
defer ts.Close()
|
||||
|
@ -2171,7 +2178,7 @@ func TestTaskRunner_Template_Artifact(t *testing.T) {
|
|||
// that fails to render in PreStart can gracefully be shutdown by
|
||||
// either killCtx or shutdownCtx
|
||||
func TestTaskRunner_Template_BlockingPreStart(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -2233,7 +2240,7 @@ func TestTaskRunner_Template_BlockingPreStart(t *testing.T) {
|
|||
// TestTaskRunner_Template_NewVaultToken asserts that a new vault token is
|
||||
// created when rendering template and that it is revoked on alloc completion
|
||||
func TestTaskRunner_Template_NewVaultToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -2312,7 +2319,7 @@ func TestTaskRunner_Template_NewVaultToken(t *testing.T) {
|
|||
// TestTaskRunner_VaultManager_Restart asserts that the alloc is restarted when the alloc
|
||||
// derived vault token expires, when task is configured with Restart change mode
|
||||
func TestTaskRunner_VaultManager_Restart(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -2385,7 +2392,7 @@ func TestTaskRunner_VaultManager_Restart(t *testing.T) {
|
|||
// TestTaskRunner_VaultManager_Signal asserts that the alloc is signalled when the alloc
|
||||
// derived vault token expires, when task is configured with signal change mode
|
||||
func TestTaskRunner_VaultManager_Signal(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -2449,7 +2456,7 @@ func TestTaskRunner_VaultManager_Signal(t *testing.T) {
|
|||
// TestTaskRunner_UnregisterConsul_Retries asserts a task is unregistered from
|
||||
// Consul when waiting to be retried.
|
||||
func TestTaskRunner_UnregisterConsul_Retries(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
// Make the restart policy try one ctx.update
|
||||
|
@ -2509,7 +2516,7 @@ func testWaitForTaskToStart(t *testing.T, tr *TaskRunner) {
|
|||
// TestTaskRunner_BaseLabels tests that the base labels for the task metrics
|
||||
// are set appropriately.
|
||||
func TestTaskRunner_BaseLabels(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
alloc := mock.BatchAlloc()
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"time"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/helper/testtask"
|
||||
|
@ -23,6 +24,8 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
|
||||
func TestTasklet_Exec_HappyPath(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
results := []execResult{
|
||||
{[]byte("output"), 0, nil},
|
||||
{[]byte("output"), 1, nil},
|
||||
|
@ -53,6 +56,8 @@ func TestTasklet_Exec_HappyPath(t *testing.T) {
|
|||
// TestTasklet_Exec_Cancel asserts cancelling a tasklet short-circuits
|
||||
// any running executions the tasklet
|
||||
func TestTasklet_Exec_Cancel(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
exec, cancel := newBlockingScriptExec()
|
||||
defer cancel()
|
||||
tm := newTaskletMock(exec, testlog.HCLogger(t), time.Hour, time.Hour)
|
||||
|
@ -85,7 +90,7 @@ func TestTasklet_Exec_Cancel(t *testing.T) {
|
|||
// TestTasklet_Exec_Timeout asserts a tasklet script will be killed
|
||||
// when the timeout is reached.
|
||||
func TestTasklet_Exec_Timeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
exec, cancel := newBlockingScriptExec()
|
||||
defer cancel()
|
||||
|
||||
|
@ -125,6 +130,8 @@ func TestTasklet_Exec_Timeout(t *testing.T) {
|
|||
// TestTasklet_Exec_Shutdown asserts a script will be executed once more
|
||||
// when told to shutdown.
|
||||
func TestTasklet_Exec_Shutdown(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
exec := newSimpleExec(0, nil)
|
||||
shutdown := make(chan struct{})
|
||||
tm := newTaskletMock(exec, testlog.HCLogger(t), time.Hour, 3*time.Second)
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
|
||||
templateconfig "github.com/hashicorp/consul-template/config"
|
||||
ctestutil "github.com/hashicorp/consul/sdk/testutil"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/client/taskenv"
|
||||
|
@ -230,7 +231,7 @@ func (h *testHarness) stop() {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_InvalidConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
hooks := NewMockTaskHooks()
|
||||
clientConfig := &config.Config{Region: "global"}
|
||||
taskDir := "foo"
|
||||
|
@ -371,7 +372,7 @@ func TestTaskTemplateManager_InvalidConfig(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_HostPath(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
// Make a template that will render immediately and write it to a tmp file
|
||||
f, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
|
@ -463,7 +464,7 @@ func TestTaskTemplateManager_HostPath(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Unblock_Static(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
// Make a template that will render immediately
|
||||
content := "hello, world!"
|
||||
file := "my.tmpl"
|
||||
|
@ -497,7 +498,7 @@ func TestTaskTemplateManager_Unblock_Static(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Permissions(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
// Make a template that will render immediately
|
||||
content := "hello, world!"
|
||||
file := "my.tmpl"
|
||||
|
@ -532,7 +533,7 @@ func TestTaskTemplateManager_Permissions(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Unblock_Static_NomadEnv(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
// Make a template that will render immediately
|
||||
content := `Hello Nomad Task: {{env "NOMAD_TASK_NAME"}}`
|
||||
expected := fmt.Sprintf("Hello Nomad Task: %s", TestTaskName)
|
||||
|
@ -567,7 +568,7 @@ func TestTaskTemplateManager_Unblock_Static_NomadEnv(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Unblock_Static_AlreadyRendered(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
// Make a template that will render immediately
|
||||
content := "hello, world!"
|
||||
file := "my.tmpl"
|
||||
|
@ -608,7 +609,7 @@ func TestTaskTemplateManager_Unblock_Static_AlreadyRendered(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Unblock_Consul(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
// Make a template that will render based on a key in Consul
|
||||
key := "foo"
|
||||
content := "barbaz"
|
||||
|
@ -654,7 +655,7 @@ func TestTaskTemplateManager_Unblock_Consul(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Unblock_Vault(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
// Make a template that will render based on a key in Vault
|
||||
vaultPath := "secret/data/password"
|
||||
|
@ -704,7 +705,7 @@ func TestTaskTemplateManager_Unblock_Vault(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
// Make a template that will render immediately
|
||||
staticContent := "hello, world!"
|
||||
staticFile := "my.tmpl"
|
||||
|
@ -772,7 +773,7 @@ func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) {
|
|||
// TestTaskTemplateManager_FirstRender_Restored tests that a task that's been
|
||||
// restored renders and triggers its change mode if the template has changed
|
||||
func TestTaskTemplateManager_FirstRender_Restored(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
// Make a template that will render based on a key in Vault
|
||||
vaultPath := "secret/data/password"
|
||||
|
@ -869,7 +870,7 @@ OUTER:
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Rerender_Noop(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
// Make a template that will render based on a key in Consul
|
||||
key := "foo"
|
||||
content1 := "bar"
|
||||
|
@ -938,7 +939,7 @@ func TestTaskTemplateManager_Rerender_Noop(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Rerender_Signal(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
// Make a template that renders based on a key in Consul and sends SIGALRM
|
||||
key1 := "foo"
|
||||
content1_1 := "bar"
|
||||
|
@ -1038,7 +1039,7 @@ OUTER:
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Rerender_Restart(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
// Make a template that renders based on a key in Consul and sends restart
|
||||
key1 := "bam"
|
||||
content1_1 := "cat"
|
||||
|
@ -1102,7 +1103,7 @@ OUTER:
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Interpolate_Destination(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
// Make a template that will have its destination interpolated
|
||||
content := "hello, world!"
|
||||
file := "${node.unique.id}.tmpl"
|
||||
|
@ -1137,7 +1138,7 @@ func TestTaskTemplateManager_Interpolate_Destination(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Signal_Error(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Make a template that renders based on a key in Consul and sends SIGALRM
|
||||
|
@ -1189,7 +1190,7 @@ func TestTaskTemplateManager_Signal_Error(t *testing.T) {
|
|||
// process environment variables. nomad host process environment variables
|
||||
// are to be treated the same as not found environment variables.
|
||||
func TestTaskTemplateManager_FiltersEnvVars(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
defer os.Setenv("NOMAD_TASK_NAME", os.Getenv("NOMAD_TASK_NAME"))
|
||||
os.Setenv("NOMAD_TASK_NAME", "should be overridden by task")
|
||||
|
@ -1233,7 +1234,7 @@ TEST_ENV_NOT_FOUND: {{env "` + testenv + `_NOTFOUND" }}`
|
|||
// TestTaskTemplateManager_Env asserts templates with the env flag set are read
|
||||
// into the task's environment.
|
||||
func TestTaskTemplateManager_Env(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
template := &structs.Template{
|
||||
EmbeddedTmpl: `
|
||||
# Comment lines are ok
|
||||
|
@ -1276,7 +1277,7 @@ ANYTHING_goes=Spaces are=ok!
|
|||
// TestTaskTemplateManager_Env_Missing asserts the core env
|
||||
// template processing function returns errors when files don't exist
|
||||
func TestTaskTemplateManager_Env_Missing(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
d, err := ioutil.TempDir("", "ct_env_missing")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -1311,7 +1312,7 @@ func TestTaskTemplateManager_Env_Missing(t *testing.T) {
|
|||
// TestTaskTemplateManager_Env_InterpolatedDest asserts the core env
|
||||
// template processing function handles interpolated destinations
|
||||
func TestTaskTemplateManager_Env_InterpolatedDest(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
d, err := ioutil.TempDir("", "ct_env_interpolated")
|
||||
|
@ -1352,7 +1353,7 @@ func TestTaskTemplateManager_Env_InterpolatedDest(t *testing.T) {
|
|||
// template processing function returns combined env vars from multiple
|
||||
// templates correctly.
|
||||
func TestTaskTemplateManager_Env_Multi(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
d, err := ioutil.TempDir("", "ct_env_missing")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
@ -1398,7 +1399,7 @@ func TestTaskTemplateManager_Env_Multi(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskTemplateManager_Rerender_Env(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
// Make a template that renders based on a key in Consul and sends restart
|
||||
key1 := "bam"
|
||||
key2 := "bar"
|
||||
|
@ -1480,7 +1481,7 @@ OUTER:
|
|||
// TestTaskTemplateManager_Config_ServerName asserts the tls_server_name
|
||||
// setting is propagated to consul-template's configuration. See #2776
|
||||
func TestTaskTemplateManager_Config_ServerName(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
c := config.DefaultConfig()
|
||||
c.VaultConfig = &sconfig.VaultConfig{
|
||||
Enabled: helper.BoolToPtr(true),
|
||||
|
@ -1504,7 +1505,7 @@ func TestTaskTemplateManager_Config_ServerName(t *testing.T) {
|
|||
// TestTaskTemplateManager_Config_VaultNamespace asserts the Vault namespace setting is
|
||||
// propagated to consul-template's configuration.
|
||||
func TestTaskTemplateManager_Config_VaultNamespace(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
testNS := "test-namespace"
|
||||
|
@ -1535,7 +1536,7 @@ func TestTaskTemplateManager_Config_VaultNamespace(t *testing.T) {
|
|||
// TestTaskTemplateManager_Config_VaultNamespace asserts the Vault namespace setting is
|
||||
// propagated to consul-template's configuration.
|
||||
func TestTaskTemplateManager_Config_VaultNamespace_TaskOverride(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
testNS := "test-namespace"
|
||||
|
@ -1570,7 +1571,7 @@ func TestTaskTemplateManager_Config_VaultNamespace_TaskOverride(t *testing.T) {
|
|||
// TestTaskTemplateManager_Escapes asserts that when sandboxing is enabled
|
||||
// interpolated paths are not incorrectly treated as escaping the alloc dir.
|
||||
func TestTaskTemplateManager_Escapes(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
clientConf := config.DefaultConfig()
|
||||
require.False(t, clientConf.TemplateConfig.DisableSandbox, "expected sandbox to be disabled")
|
||||
|
@ -1822,7 +1823,7 @@ func TestTaskTemplateManager_BlockedEvents(t *testing.T) {
|
|||
// then subsequently sets 0, 1, 2 keys
|
||||
// then asserts that templates are still blocked on 3 and 4,
|
||||
// and check that we got the relevant task events
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Make a template that will render based on a key in Consul
|
||||
|
@ -1920,7 +1921,7 @@ WAIT_LOOP:
|
|||
// configuration is accurately mapped from the client to the TaskTemplateManager
|
||||
// and that any operator defined boundaries are enforced.
|
||||
func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
testNS := "test-namespace"
|
||||
|
||||
|
@ -2126,7 +2127,7 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) {
|
|||
// configuration is accurately mapped from the template to the TaskTemplateManager's
|
||||
// template config.
|
||||
func TestTaskTemplateManager_Template_Wait_Set(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
c := config.DefaultConfig()
|
||||
c.Node = mock.Node()
|
||||
|
|
|
@ -3,6 +3,7 @@ package taskrunner
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/client/taskenv"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -10,7 +11,7 @@ import (
|
|||
)
|
||||
|
||||
func TestTaskRunner_Validate_UserEnforcement(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
taskEnv := taskenv.NewEmptyBuilder().Build()
|
||||
conf := config.DefaultConfig()
|
||||
|
@ -35,7 +36,7 @@ func TestTaskRunner_Validate_UserEnforcement(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskRunner_Validate_ServiceName(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
builder := taskenv.NewEmptyBuilder()
|
||||
conf := config.DefaultConfig()
|
||||
|
|
|
@ -3,6 +3,7 @@ package taskrunner
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
|
||||
"github.com/hashicorp/nomad/client/pluginmanager/csimanager"
|
||||
cstructs "github.com/hashicorp/nomad/client/structs"
|
||||
|
@ -16,6 +17,8 @@ import (
|
|||
)
|
||||
|
||||
func TestVolumeHook_PartitionMountsByVolume_Works(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
mounts := []*structs.VolumeMount{
|
||||
{
|
||||
Volume: "foo",
|
||||
|
@ -68,6 +71,7 @@ func TestVolumeHook_PartitionMountsByVolume_Works(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestVolumeHook_prepareCSIVolumes(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
req := &interfaces.TaskPrestartRequest{
|
||||
Task: &structs.Task{
|
||||
|
@ -157,6 +161,7 @@ func TestVolumeHook_prepareCSIVolumes(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestVolumeHook_Interpolation(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
task := alloc.Job.TaskGroups[0].Tasks[0]
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"time"
|
||||
|
||||
hclog "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
cstructs "github.com/hashicorp/nomad/client/structs"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
|
@ -88,6 +89,8 @@ func newConfig(t *testing.T) (Config, func()) {
|
|||
// TestPrevAlloc_Noop asserts that when no previous allocation is set the noop
|
||||
// implementation is returned that does not block or perform migrations.
|
||||
func TestPrevAlloc_Noop(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
conf, cleanup := newConfig(t)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -114,7 +117,8 @@ func TestPrevAlloc_Noop(t *testing.T) {
|
|||
// TestPrevAlloc_LocalPrevAlloc_Block asserts that when a previous alloc runner
|
||||
// is set a localPrevAlloc will block on it.
|
||||
func TestPrevAlloc_LocalPrevAlloc_Block(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
conf, cleanup := newConfig(t)
|
||||
|
||||
defer cleanup()
|
||||
|
@ -181,7 +185,8 @@ func TestPrevAlloc_LocalPrevAlloc_Block(t *testing.T) {
|
|||
// TestPrevAlloc_LocalPrevAlloc_Terminated asserts that when a previous alloc
|
||||
// runner has already terminated the watcher does not block on the broadcaster.
|
||||
func TestPrevAlloc_LocalPrevAlloc_Terminated(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
conf, cleanup := newConfig(t)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -201,7 +206,8 @@ func TestPrevAlloc_LocalPrevAlloc_Terminated(t *testing.T) {
|
|||
// streaming a tar cause the migration to be cancelled and no files are written
|
||||
// (migrations are atomic).
|
||||
func TestPrevAlloc_StreamAllocDir_Error(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
dest, err := ioutil.TempDir("", "nomadtest-")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
ctestutil "github.com/hashicorp/nomad/client/testutil"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
)
|
||||
|
@ -22,8 +23,9 @@ import (
|
|||
// TestPrevAlloc_StreamAllocDir_Ok asserts that streaming a tar to an alloc dir
|
||||
// works.
|
||||
func TestPrevAlloc_StreamAllocDir_Ok(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
ctestutil.RequireRoot(t)
|
||||
t.Parallel()
|
||||
|
||||
dir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -13,7 +14,7 @@ import (
|
|||
// TestPrevAlloc_GroupPrevAllocWatcher_Block asserts that when there are
|
||||
// prevAllocs is set a groupPrevAllocWatcher will block on them
|
||||
func TestPrevAlloc_GroupPrevAllocWatcher_Block(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
conf, cleanup := newConfig(t)
|
||||
|
||||
defer cleanup()
|
||||
|
@ -80,7 +81,8 @@ func TestPrevAlloc_GroupPrevAllocWatcher_Block(t *testing.T) {
|
|||
// multiple prevAllocs is set a groupPrevAllocWatcher will block until all
|
||||
// are complete
|
||||
func TestPrevAlloc_GroupPrevAllocWatcher_BlockMulti(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
conf1, cleanup1 := newConfig(t)
|
||||
defer cleanup1()
|
||||
conf1.Alloc.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/client/structs"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
|
@ -12,8 +13,9 @@ import (
|
|||
)
|
||||
|
||||
func TestClientStats_Stats(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
client, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -26,7 +28,7 @@ func TestClientStats_Stats(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClientStats_Stats_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
server, addr, root, cleanupS := testACLServer(t, nil)
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"time"
|
||||
|
||||
memdb "github.com/hashicorp/go-memdb"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
trstate "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
consulApi "github.com/hashicorp/nomad/client/consul"
|
||||
|
@ -45,7 +46,8 @@ func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string, fu
|
|||
}
|
||||
|
||||
func TestClient_StartStop(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
client, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
if err := client.Shutdown(); err != nil {
|
||||
|
@ -56,7 +58,7 @@ func TestClient_StartStop(t *testing.T) {
|
|||
// Certain labels for metrics are dependant on client initial setup. This tests
|
||||
// that the client has properly initialized before we assign values to labels
|
||||
func TestClient_BaseLabels(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
client, cleanup := TestClient(t, nil)
|
||||
|
@ -81,7 +83,7 @@ func TestClient_BaseLabels(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_RPC(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
_, addr, cleanupS1 := testServer(t, nil)
|
||||
defer cleanupS1()
|
||||
|
@ -102,7 +104,7 @@ func TestClient_RPC(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_RPC_FireRetryWatchers(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
_, addr, cleanupS1 := testServer(t, nil)
|
||||
defer cleanupS1()
|
||||
|
@ -131,7 +133,7 @@ func TestClient_RPC_FireRetryWatchers(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_RPC_Passthrough(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
s1, _, cleanupS1 := testServer(t, nil)
|
||||
defer cleanupS1()
|
||||
|
@ -152,7 +154,7 @@ func TestClient_RPC_Passthrough(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_Fingerprint(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
c, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
@ -175,7 +177,7 @@ func TestClient_Fingerprint(t *testing.T) {
|
|||
// TestClient_Fingerprint_Periodic asserts that driver node attributes are
|
||||
// periodically fingerprinted.
|
||||
func TestClient_Fingerprint_Periodic(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
c1, cleanup := TestClient(t, func(c *config.Config) {
|
||||
confs := []*nconfig.PluginConfig{
|
||||
|
@ -253,7 +255,8 @@ func TestClient_Fingerprint_Periodic(t *testing.T) {
|
|||
// TestClient_MixedTLS asserts that when a server is running with TLS enabled
|
||||
// it will reject any RPC connections from clients that lack TLS. See #2525
|
||||
func TestClient_MixedTLS(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
const (
|
||||
cafile = "../helper/tlsutil/testdata/ca.pem"
|
||||
foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
|
||||
|
@ -300,7 +303,7 @@ func TestClient_MixedTLS(t *testing.T) {
|
|||
// enabled -- but their certificates are signed by different CAs -- they're
|
||||
// unable to communicate.
|
||||
func TestClient_BadTLS(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
const (
|
||||
cafile = "../helper/tlsutil/testdata/ca.pem"
|
||||
|
@ -356,7 +359,7 @@ func TestClient_BadTLS(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_Register(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
s1, _, cleanupS1 := testServer(t, nil)
|
||||
defer cleanupS1()
|
||||
|
@ -389,7 +392,7 @@ func TestClient_Register(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_Heartbeat(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
s1, _, cleanupS1 := testServer(t, func(c *nomad.Config) {
|
||||
c.MinHeartbeatTTL = 50 * time.Millisecond
|
||||
|
@ -426,7 +429,7 @@ func TestClient_Heartbeat(t *testing.T) {
|
|||
// TestClient_UpdateAllocStatus that once running allocations send updates to
|
||||
// the server.
|
||||
func TestClient_UpdateAllocStatus(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
s1, _, cleanupS1 := testServer(t, nil)
|
||||
defer cleanupS1()
|
||||
|
@ -452,7 +455,7 @@ func TestClient_UpdateAllocStatus(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_WatchAllocs(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
s1, _, cleanupS1 := testServer(t, nil)
|
||||
defer cleanupS1()
|
||||
|
@ -552,7 +555,7 @@ func waitTilNodeReady(client *Client, t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_SaveRestoreState(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
s1, _, cleanupS1 := testServer(t, nil)
|
||||
defer cleanupS1()
|
||||
|
@ -653,7 +656,7 @@ func TestClient_SaveRestoreState(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_AddAllocError(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
s1, _, cleanupS1 := testServer(t, nil)
|
||||
|
@ -729,7 +732,8 @@ func TestClient_AddAllocError(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_Init(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
dir, err := ioutil.TempDir("", "nomad")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
@ -759,7 +763,7 @@ func TestClient_Init(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_BlockedAllocations(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
s1, _, cleanupS1 := testServer(t, nil)
|
||||
defer cleanupS1()
|
||||
|
@ -872,7 +876,7 @@ func TestClient_BlockedAllocations(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_ValidateMigrateToken_ValidToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
c, cleanup := TestClient(t, func(c *config.Config) {
|
||||
|
@ -888,7 +892,7 @@ func TestClient_ValidateMigrateToken_ValidToken(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_ValidateMigrateToken_InvalidToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
c, cleanup := TestClient(t, func(c *config.Config) {
|
||||
|
@ -904,7 +908,7 @@ func TestClient_ValidateMigrateToken_InvalidToken(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_ValidateMigrateToken_ACLDisabled(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
c, cleanup := TestClient(t, func(c *config.Config) {})
|
||||
|
@ -914,7 +918,7 @@ func TestClient_ValidateMigrateToken_ACLDisabled(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_ReloadTLS_UpgradePlaintextToTLS(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
s1, addr, cleanupS1 := testServer(t, func(c *nomad.Config) {
|
||||
|
@ -990,7 +994,7 @@ func TestClient_ReloadTLS_UpgradePlaintextToTLS(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_ReloadTLS_DowngradeTLSToPlaintext(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
s1, addr, cleanupS1 := testServer(t, func(c *nomad.Config) {
|
||||
|
@ -1067,7 +1071,8 @@ func TestClient_ReloadTLS_DowngradeTLSToPlaintext(t *testing.T) {
|
|||
// TestClient_ServerList tests client methods that interact with the internal
|
||||
// nomad server list.
|
||||
func TestClient_ServerList(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
client, cleanup := TestClient(t, func(c *config.Config) {})
|
||||
defer cleanup()
|
||||
|
||||
|
@ -1090,7 +1095,8 @@ func TestClient_ServerList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_UpdateNodeFromDevicesAccumulates(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
client, cleanup := TestClient(t, func(c *config.Config) {})
|
||||
defer cleanup()
|
||||
|
||||
|
@ -1188,7 +1194,7 @@ func TestClient_UpdateNodeFromDevicesAccumulates(t *testing.T) {
|
|||
// TestClient_UpdateNodeFromFingerprintKeepsConfig asserts manually configured
|
||||
// network interfaces take precedence over fingerprinted ones.
|
||||
func TestClient_UpdateNodeFromFingerprintKeepsConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip("assertions assume linux platform")
|
||||
}
|
||||
|
@ -1266,7 +1272,7 @@ func TestClient_UpdateNodeFromFingerprintKeepsConfig(t *testing.T) {
|
|||
|
||||
// Support multiple IP addresses (ipv4 vs. 6, e.g.) on the configured network interface
|
||||
func Test_UpdateNodeFromFingerprintMultiIP(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
var dev string
|
||||
switch runtime.GOOS {
|
||||
|
@ -1304,6 +1310,8 @@ func Test_UpdateNodeFromFingerprintMultiIP(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_computeAllocatedDeviceStats(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
c := &Client{logger: logger}
|
||||
|
||||
|
@ -1400,8 +1408,9 @@ func TestClient_computeAllocatedDeviceStats(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_getAllocatedResources(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
client, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -1515,7 +1524,8 @@ func TestClient_getAllocatedResources(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClient_updateNodeFromDriverUpdatesAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
client, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -1598,7 +1608,7 @@ func TestClient_updateNodeFromDriverUpdatesAll(t *testing.T) {
|
|||
|
||||
// COMPAT(0.12): remove once upgrading from 0.9.5 is no longer supported
|
||||
func TestClient_hasLocalState(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
c, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
@ -1638,7 +1648,7 @@ func TestClient_hasLocalState(t *testing.T) {
|
|||
}
|
||||
|
||||
func Test_verifiedTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
logger := testlog.HCLogger(t)
|
||||
|
||||
// produce a result and check against expected tasks and/or error output
|
||||
|
|
|
@ -5,11 +5,14 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/consul-template/config"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigRead(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
config := Config{}
|
||||
|
||||
actual := config.Read("cake")
|
||||
|
@ -26,6 +29,8 @@ func TestConfigRead(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConfigReadDefault(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
config := Config{}
|
||||
|
||||
expected := "vanilla"
|
||||
|
@ -50,6 +55,8 @@ func mockWaitConfig() *WaitConfig {
|
|||
}
|
||||
|
||||
func TestWaitConfig_Copy(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
Wait *WaitConfig
|
||||
|
@ -95,6 +102,8 @@ func TestWaitConfig_Copy(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWaitConfig_IsEmpty(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
Wait *WaitConfig
|
||||
|
@ -127,6 +136,8 @@ func TestWaitConfig_IsEmpty(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWaitConfig_IsEqual(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
Wait *WaitConfig
|
||||
|
@ -170,6 +181,8 @@ func TestWaitConfig_IsEqual(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWaitConfig_IsValid(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
Retry *WaitConfig
|
||||
|
@ -223,6 +236,8 @@ func TestWaitConfig_IsValid(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWaitConfig_Merge(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
Target *WaitConfig
|
||||
|
@ -280,6 +295,8 @@ func TestWaitConfig_Merge(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestWaitConfig_ToConsulTemplate(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
expected := config.WaitConfig{
|
||||
Enabled: helper.BoolToPtr(true),
|
||||
Min: helper.TimeToPtr(5 * time.Second),
|
||||
|
@ -307,6 +324,8 @@ func mockRetryConfig() *RetryConfig {
|
|||
}
|
||||
}
|
||||
func TestRetryConfig_Copy(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
Retry *RetryConfig
|
||||
|
@ -382,6 +401,8 @@ func TestRetryConfig_Copy(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRetryConfig_IsEmpty(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
Retry *RetryConfig
|
||||
|
@ -414,6 +435,8 @@ func TestRetryConfig_IsEmpty(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRetryConfig_IsEqual(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
Retry *RetryConfig
|
||||
|
@ -502,6 +525,8 @@ func TestRetryConfig_IsEqual(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRetryConfig_IsValid(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
Retry *RetryConfig
|
||||
|
@ -570,6 +595,8 @@ func TestRetryConfig_IsValid(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRetryConfig_Merge(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
Target *RetryConfig
|
||||
|
@ -645,6 +672,8 @@ func TestRetryConfig_Merge(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRetryConfig_ToConsulTemplate(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
expected := config.RetryConfig{
|
||||
Enabled: helper.BoolToPtr(true),
|
||||
Attempts: helper.IntToPtr(5),
|
||||
|
|
|
@ -4,12 +4,15 @@ import (
|
|||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSI_DeriveTokens(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
dFunc := func(alloc *structs.Allocation, taskNames []string) (map[string]string, error) {
|
||||
return map[string]string{"a": "b"}, nil
|
||||
|
@ -21,6 +24,8 @@ func TestSI_DeriveTokens(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSI_DeriveTokens_error(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
dFunc := func(alloc *structs.Allocation, taskNames []string) (map[string]string, error) {
|
||||
return nil, errors.New("some failure")
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/dynamicplugins"
|
||||
"github.com/hashicorp/nomad/client/structs"
|
||||
nstructs "github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -25,7 +26,7 @@ var fakeNodePlugin = &dynamicplugins.PluginInfo{
|
|||
}
|
||||
|
||||
func TestCSIController_AttachVolume(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -172,7 +173,7 @@ func TestCSIController_AttachVolume(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCSIController_ValidateVolume(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -275,7 +276,7 @@ func TestCSIController_ValidateVolume(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCSIController_DetachVolume(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -358,7 +359,7 @@ func TestCSIController_DetachVolume(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCSIController_CreateVolume(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -460,7 +461,7 @@ func TestCSIController_CreateVolume(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCSIController_DeleteVolume(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -524,7 +525,7 @@ func TestCSIController_DeleteVolume(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCSIController_ListVolumes(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -632,7 +633,7 @@ func TestCSIController_ListVolumes(t *testing.T) {
|
|||
}
|
||||
}
|
||||
func TestCSIController_CreateSnapshot(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -725,7 +726,7 @@ func TestCSIController_CreateSnapshot(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCSIController_DeleteSnapshot(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -789,7 +790,7 @@ func TestCSIController_DeleteSnapshot(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCSIController_ListSnapshots(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -893,7 +894,7 @@ func TestCSIController_ListSnapshots(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCSINode_DetachVolume(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/state"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/helper/pluginutils/loader"
|
||||
|
@ -234,7 +235,7 @@ func nvidiaAndIntelDefaultPlugins(catalog *loader.MockCatalog) {
|
|||
|
||||
// Test collecting statistics from all devices
|
||||
func TestManager_AllStats(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
config, _, catalog := baseTestConfig(t)
|
||||
|
@ -283,7 +284,7 @@ func TestManager_AllStats(t *testing.T) {
|
|||
|
||||
// Test collecting statistics from a particular device
|
||||
func TestManager_DeviceStats(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
config, _, catalog := baseTestConfig(t)
|
||||
|
@ -330,7 +331,7 @@ func TestManager_DeviceStats(t *testing.T) {
|
|||
|
||||
// Test reserving a particular device
|
||||
func TestManager_Reserve(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
r := require.New(t)
|
||||
|
||||
config, _, catalog := baseTestConfig(t)
|
||||
|
@ -428,7 +429,7 @@ func TestManager_Reserve(t *testing.T) {
|
|||
|
||||
// Test that shutdown shutsdown the plugins
|
||||
func TestManager_Shutdown(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
config, _, catalog := baseTestConfig(t)
|
||||
|
@ -455,7 +456,7 @@ func TestManager_Shutdown(t *testing.T) {
|
|||
|
||||
// Test that startup shutsdown previously launched plugins
|
||||
func TestManager_Run_ShutdownOld(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
config, _, catalog := baseTestConfig(t)
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/client/pluginmanager/drivermanager"
|
||||
"github.com/hashicorp/nomad/helper/pluginutils/catalog"
|
||||
|
@ -16,7 +17,7 @@ import (
|
|||
// TestDriverManager_Fingerprint_Run asserts that node is populated with
|
||||
// driver fingerprints
|
||||
func TestDriverManager_Fingerprint_Run(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
testClient, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
@ -54,7 +55,7 @@ func TestDriverManager_Fingerprint_Run(t *testing.T) {
|
|||
// TestDriverManager_Fingerprint_Run asserts that node is populated with
|
||||
// driver fingerprints and it's updated periodically
|
||||
func TestDriverManager_Fingerprint_Periodic(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
testClient, cleanup := TestClient(t, func(c *config.Config) {
|
||||
pluginConfig := []*nconfig.PluginConfig{
|
||||
|
@ -124,7 +125,7 @@ func TestDriverManager_Fingerprint_Periodic(t *testing.T) {
|
|||
// TestDriverManager_NodeAttributes_Run asserts that node attributes are populated
|
||||
// in addition to node.Drivers until we fully deprecate it
|
||||
func TestDriverManager_NodeAttributes_Run(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
testClient, cleanup := TestClient(t, func(c *config.Config) {
|
||||
c.Options = map[string]string{
|
||||
|
|
|
@ -7,11 +7,13 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPluginEventBroadcaster_SendsMessagesToAllClients(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
b := newPluginEventBroadcaster()
|
||||
defer close(b.stopCh)
|
||||
var rcv1, rcv2 bool
|
||||
|
@ -37,7 +39,7 @@ func TestPluginEventBroadcaster_SendsMessagesToAllClients(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPluginEventBroadcaster_UnsubscribeWorks(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
b := newPluginEventBroadcaster()
|
||||
defer close(b.stopCh)
|
||||
|
@ -66,7 +68,8 @@ func TestPluginEventBroadcaster_UnsubscribeWorks(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDynamicRegistry_RegisterPlugin_SendsUpdateEvents(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
r := NewRegistry(nil, nil)
|
||||
|
||||
ctx, cancelFn := context.WithCancel(context.Background())
|
||||
|
@ -104,7 +107,8 @@ func TestDynamicRegistry_RegisterPlugin_SendsUpdateEvents(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDynamicRegistry_DeregisterPlugin_SendsUpdateEvents(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
r := NewRegistry(nil, nil)
|
||||
|
||||
ctx, cancelFn := context.WithCancel(context.Background())
|
||||
|
@ -147,6 +151,8 @@ func TestDynamicRegistry_DeregisterPlugin_SendsUpdateEvents(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDynamicRegistry_DispensePlugin_Works(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
dispenseFn := func(i *PluginInfo) (interface{}, error) {
|
||||
return struct{}{}, nil
|
||||
}
|
||||
|
@ -174,7 +180,8 @@ func TestDynamicRegistry_DispensePlugin_Works(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDynamicRegistry_IsolatePluginTypes(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
r := NewRegistry(nil, nil)
|
||||
|
||||
err := r.RegisterPlugin(&PluginInfo{
|
||||
|
@ -200,7 +207,8 @@ func TestDynamicRegistry_IsolatePluginTypes(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDynamicRegistry_StateStore(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
dispenseFn := func(i *PluginInfo) (interface{}, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
@ -226,8 +234,8 @@ func TestDynamicRegistry_StateStore(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestDynamicRegistry_ConcurrentAllocs(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
t.Parallel()
|
||||
dispenseFn := func(i *PluginInfo) (interface{}, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
|
|
@ -3,12 +3,15 @@ package fingerprint
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
func TestArchFingerprint(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := NewArchFingerprint(testlog.HCLogger(t))
|
||||
node := &structs.Node{
|
||||
Attributes: make(map[string]string),
|
||||
|
|
|
@ -8,11 +8,14 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBridgeFingerprint_detect(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := &BridgeFingerprint{logger: testlog.HCLogger(t)}
|
||||
require.NoError(t, f.detect("ip_tables"))
|
||||
|
||||
|
@ -73,6 +76,8 @@ kernel/net/bridge/bridgeRHEL.ko.xz: kernel/net/802/stp.ko.xz kernel/net/llc/llc.
|
|||
)
|
||||
|
||||
func TestBridgeFingerprint_search(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := &BridgeFingerprint{logger: testlog.HCLogger(t)}
|
||||
|
||||
t.Run("dynamic loaded module", func(t *testing.T) {
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -41,6 +42,8 @@ func (m *MountPointDetectorEmptyMountPoint) MountPoint() (string, error) {
|
|||
}
|
||||
|
||||
func TestCGroupFingerprint(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
{
|
||||
f := &CGroupFingerprint{
|
||||
logger: testlog.HCLogger(t),
|
||||
|
|
|
@ -3,6 +3,7 @@ package fingerprint
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -13,6 +14,8 @@ import (
|
|||
var _ ReloadableFingerprint = &CNIFingerprint{}
|
||||
|
||||
func TestCNIFingerprint(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
req *FingerprintRequest
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
agentconsul "github.com/hashicorp/nomad/command/agent/consul"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
|
@ -47,7 +48,7 @@ func newConsulFingerPrint(t *testing.T) *ConsulFingerprint {
|
|||
}
|
||||
|
||||
func TestConsulFingerprint_server(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
fp := newConsulFingerPrint(t)
|
||||
|
||||
|
@ -83,7 +84,7 @@ func TestConsulFingerprint_server(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConsulFingerprint_version(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
fp := newConsulFingerPrint(t)
|
||||
|
||||
|
@ -119,7 +120,7 @@ func TestConsulFingerprint_version(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConsulFingerprint_sku(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
fp := newConsulFingerPrint(t)
|
||||
|
||||
|
@ -171,7 +172,7 @@ func TestConsulFingerprint_sku(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConsulFingerprint_revision(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
fp := newConsulFingerPrint(t)
|
||||
|
||||
|
@ -199,7 +200,7 @@ func TestConsulFingerprint_revision(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConsulFingerprint_dc(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
fp := newConsulFingerPrint(t)
|
||||
|
||||
|
@ -227,7 +228,7 @@ func TestConsulFingerprint_dc(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConsulFingerprint_segment(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
fp := newConsulFingerPrint(t)
|
||||
|
||||
|
@ -262,7 +263,7 @@ func TestConsulFingerprint_segment(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConsulFingerprint_connect(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
fp := newConsulFingerPrint(t)
|
||||
|
||||
|
@ -291,7 +292,7 @@ func TestConsulFingerprint_connect(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConsulFingerprint_grpc(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
fp := newConsulFingerPrint(t)
|
||||
|
||||
|
@ -321,7 +322,7 @@ func TestConsulFingerprint_grpc(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConsulFingerprint_namespaces(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
fp := newConsulFingerPrint(t)
|
||||
|
||||
|
@ -362,6 +363,8 @@ func TestConsulFingerprint_namespaces(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConsulFingerprint_Fingerprint_oss(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cf := newConsulFingerPrint(t)
|
||||
|
||||
ts, cfg := fakeConsul(fakeConsulPayload(t, "test_fixtures/consul/agent_self_oss.json"))
|
||||
|
@ -449,6 +452,8 @@ func TestConsulFingerprint_Fingerprint_oss(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConsulFingerprint_Fingerprint_ent(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cf := newConsulFingerPrint(t)
|
||||
|
||||
ts, cfg := fakeConsul(fakeConsulPayload(t, "test_fixtures/consul/agent_self_ent.json"))
|
||||
|
|
|
@ -4,12 +4,15 @@ import (
|
|||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
func TestCPUFingerprint(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := NewCPUFingerprint(testlog.HCLogger(t))
|
||||
node := &structs.Node{
|
||||
Attributes: make(map[string]string),
|
||||
|
@ -58,6 +61,8 @@ func TestCPUFingerprint(t *testing.T) {
|
|||
// TestCPUFingerprint_OverrideCompute asserts that setting cpu_total_compute in
|
||||
// the client config overrides the detected CPU freq (if any).
|
||||
func TestCPUFingerprint_OverrideCompute(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := NewCPUFingerprint(testlog.HCLogger(t))
|
||||
node := &structs.Node{
|
||||
Attributes: make(map[string]string),
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -13,6 +14,8 @@ import (
|
|||
)
|
||||
|
||||
func TestEnvAWSFingerprint_nonAws(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := NewEnvAWSFingerprint(testlog.HCLogger(t))
|
||||
f.(*EnvAWSFingerprint).endpoint = "http://127.0.0.1/latest"
|
||||
|
||||
|
@ -28,6 +31,8 @@ func TestEnvAWSFingerprint_nonAws(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvAWSFingerprint_aws(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
endpoint, cleanup := startFakeEC2Metadata(t, awsStubs)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -69,6 +74,8 @@ func TestEnvAWSFingerprint_aws(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNetworkFingerprint_AWS(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
endpoint, cleanup := startFakeEC2Metadata(t, awsStubs)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -97,6 +104,8 @@ func TestNetworkFingerprint_AWS(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNetworkFingerprint_AWS_network(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
endpoint, cleanup := startFakeEC2Metadata(t, awsStubs)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -158,6 +167,8 @@ func TestNetworkFingerprint_AWS_network(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNetworkFingerprint_AWS_NoNetwork(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
endpoint, cleanup := startFakeEC2Metadata(t, noNetworkAWSStubs)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -181,6 +192,8 @@ func TestNetworkFingerprint_AWS_NoNetwork(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNetworkFingerprint_AWS_IncompleteImitation(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
endpoint, cleanup := startFakeEC2Metadata(t, incompleteAWSImitationStubs)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -203,6 +216,8 @@ func TestNetworkFingerprint_AWS_IncompleteImitation(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCPUFingerprint_AWS_InstanceFound(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
endpoint, cleanup := startFakeEC2Metadata(t, awsStubs)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -224,6 +239,8 @@ func TestCPUFingerprint_AWS_InstanceFound(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCPUFingerprint_AWS_OverrideCompute(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
endpoint, cleanup := startFakeEC2Metadata(t, awsStubs)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -247,6 +264,8 @@ func TestCPUFingerprint_AWS_OverrideCompute(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCPUFingerprint_AWS_InstanceNotFound(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
endpoint, cleanup := startFakeEC2Metadata(t, unknownInstanceType)
|
||||
defer cleanup()
|
||||
|
||||
|
|
|
@ -9,12 +9,15 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
func TestAzureFingerprint_nonAzure(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
os.Setenv("AZURE_ENV_URL", "http://127.0.0.1/metadata/instance/")
|
||||
f := NewEnvAzureFingerprint(testlog.HCLogger(t))
|
||||
node := &structs.Node{
|
||||
|
@ -211,9 +214,13 @@ const AZURE_routes = `
|
|||
`
|
||||
|
||||
func TestFingerprint_AzureWithExternalIp(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
testFingerprint_Azure(t, true)
|
||||
}
|
||||
|
||||
func TestFingerprint_AzureWithoutExternalIp(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
testFingerprint_Azure(t, false)
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -16,6 +17,8 @@ import (
|
|||
)
|
||||
|
||||
func TestDigitalOceanFingerprint_nonDigitalOcean(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
os.Setenv("DO_ENV_URL", "http://127.0.0.1/metadata/v1/")
|
||||
f := NewEnvDigitalOceanFingerprint(testlog.HCLogger(t))
|
||||
node := &structs.Node{
|
||||
|
@ -39,6 +42,8 @@ func TestDigitalOceanFingerprint_nonDigitalOcean(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFingerprint_DigitalOcean(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
node := &structs.Node{
|
||||
Attributes: make(map[string]string),
|
||||
}
|
||||
|
|
|
@ -9,12 +9,15 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
func TestGCEFingerprint_nonGCE(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
os.Setenv("GCE_ENV_URL", "http://127.0.0.1/computeMetadata/v1/instance/")
|
||||
f := NewEnvGCEFingerprint(testlog.HCLogger(t))
|
||||
node := &structs.Node{
|
||||
|
@ -207,9 +210,13 @@ const GCE_routes = `
|
|||
`
|
||||
|
||||
func TestFingerprint_GCEWithExternalIp(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
testFingerprint_GCE(t, true)
|
||||
}
|
||||
|
||||
func TestFingerprint_GCEWithoutExternalIp(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
testFingerprint_GCE(t, false)
|
||||
}
|
||||
|
|
|
@ -3,12 +3,15 @@ package fingerprint
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
func TestHostFingerprint(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := NewHostFingerprint(testlog.HCLogger(t))
|
||||
node := &structs.Node{
|
||||
Attributes: make(map[string]string),
|
||||
|
|
|
@ -3,6 +3,7 @@ package fingerprint
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -11,6 +12,8 @@ import (
|
|||
)
|
||||
|
||||
func TestMemoryFingerprint(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
|
||||
f := NewMemoryFingerprint(testlog.HCLogger(t))
|
||||
|
@ -31,6 +34,8 @@ func TestMemoryFingerprint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestMemoryFingerprint_Override(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := NewMemoryFingerprint(testlog.HCLogger(t))
|
||||
node := &structs.Node{
|
||||
Attributes: make(map[string]string),
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -182,6 +183,8 @@ func (n *NetworkInterfaceDetectorMultipleInterfaces) Addrs(intf *net.Interface)
|
|||
}
|
||||
|
||||
func TestNetworkFingerprint_basic(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
if v := os.Getenv(skipOnlineTestsEnvVar); v != "" {
|
||||
t.Skipf("Environment variable %+q not empty, skipping test", skipOnlineTestsEnvVar)
|
||||
}
|
||||
|
@ -237,6 +240,8 @@ func TestNetworkFingerprint_basic(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNetworkFingerprint_default_device_absent(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorOnlyLo{}}
|
||||
node := &structs.Node{
|
||||
Attributes: make(map[string]string),
|
||||
|
@ -260,6 +265,8 @@ func TestNetworkFingerprint_default_device_absent(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNetworkFingerPrint_default_device(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorOnlyLo{}}
|
||||
node := &structs.Node{
|
||||
Attributes: make(map[string]string),
|
||||
|
@ -311,6 +318,8 @@ func TestNetworkFingerPrint_default_device(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNetworkFingerPrint_LinkLocal_Allowed(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}}
|
||||
node := &structs.Node{
|
||||
Attributes: make(map[string]string),
|
||||
|
@ -358,6 +367,8 @@ func TestNetworkFingerPrint_LinkLocal_Allowed(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNetworkFingerPrint_LinkLocal_Allowed_MixedIntf(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}}
|
||||
node := &structs.Node{
|
||||
Attributes: make(map[string]string),
|
||||
|
@ -412,6 +423,8 @@ func TestNetworkFingerPrint_LinkLocal_Allowed_MixedIntf(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNetworkFingerPrint_LinkLocal_Disallowed(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}}
|
||||
node := &structs.Node{
|
||||
Attributes: make(map[string]string),
|
||||
|
@ -441,6 +454,8 @@ func TestNetworkFingerPrint_LinkLocal_Disallowed(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNetworkFingerPrint_MultipleAliases(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}}
|
||||
node := &structs.Node{
|
||||
Attributes: make(map[string]string),
|
||||
|
@ -488,6 +503,8 @@ func TestNetworkFingerPrint_MultipleAliases(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNetworkFingerPrint_HostNetworkReservedPorts(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
hostNetworks map[string]*structs.ClientHostNetworkConfig
|
||||
|
|
|
@ -3,6 +3,7 @@ package fingerprint
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -10,6 +11,8 @@ import (
|
|||
)
|
||||
|
||||
func TestNomadFingerprint(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
f := NewNomadFingerprint(testlog.HCLogger(t))
|
||||
|
||||
v := "foo"
|
||||
|
|
|
@ -3,11 +3,14 @@ package fingerprint
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
func TestSignalFingerprint(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
fp := NewSignalFingerprint(testlog.HCLogger(t))
|
||||
node := &structs.Node{
|
||||
Attributes: make(map[string]string),
|
||||
|
|
|
@ -4,11 +4,14 @@ import (
|
|||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
func TestStorageFingerprint(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
fp := NewStorageFingerprint(testlog.HCLogger(t))
|
||||
node := &structs.Node{
|
||||
Attributes: make(map[string]string),
|
||||
|
|
|
@ -3,6 +3,7 @@ package fingerprint
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -10,6 +11,8 @@ import (
|
|||
)
|
||||
|
||||
func TestVaultFingerprint(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
tv := testutil.NewTestVault(t)
|
||||
defer tv.Stop()
|
||||
|
||||
|
|
|
@ -3,13 +3,15 @@ package client
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFingerprintManager_Run_ResourcesFingerprint(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
testClient, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -33,7 +35,7 @@ func TestFingerprintManager_Run_ResourcesFingerprint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFimgerprintManager_Run_InWhitelist(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
testClient, cleanup := TestClient(t, func(c *config.Config) {
|
||||
|
@ -62,12 +64,13 @@ func TestFimgerprintManager_Run_InWhitelist(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFingerprintManager_Run_InDenylist(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
testClient, cleanup := TestClient(t, func(c *config.Config) {
|
||||
c.Options = map[string]string{
|
||||
"fingerprint.allowlist": " arch,memory,foo,bar ",
|
||||
"fingerprint.denylist": " cpu ",
|
||||
"fingerprint.denylist": " cpu ",
|
||||
}
|
||||
})
|
||||
defer cleanup()
|
||||
|
@ -91,13 +94,13 @@ func TestFingerprintManager_Run_InDenylist(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFingerprintManager_Run_Combination(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
testClient, cleanup := TestClient(t, func(c *config.Config) {
|
||||
c.Options = map[string]string{
|
||||
"fingerprint.allowlist": " arch,cpu,memory,foo,bar ",
|
||||
"fingerprint.denylist": " memory,host ",
|
||||
"fingerprint.denylist": " memory,host ",
|
||||
}
|
||||
})
|
||||
defer cleanup()
|
||||
|
@ -123,7 +126,7 @@ func TestFingerprintManager_Run_Combination(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFingerprintManager_Run_CombinationLegacyNames(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
testClient, cleanup := TestClient(t, func(c *config.Config) {
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/go-msgpack/codec"
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
sframer "github.com/hashicorp/nomad/client/lib/streamframer"
|
||||
|
@ -50,7 +51,7 @@ func (n nopWriteCloser) Close() error {
|
|||
}
|
||||
|
||||
func TestFS_Stat_NoAlloc(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a client
|
||||
|
@ -71,7 +72,7 @@ func TestFS_Stat_NoAlloc(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_Stat(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a server and client
|
||||
|
@ -108,7 +109,7 @@ func TestFS_Stat(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_Stat_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// Start a server
|
||||
s, root, cleanupS := nomad.TestACLServer(t, nil)
|
||||
|
@ -183,7 +184,7 @@ func TestFS_Stat_ACL(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_List_NoAlloc(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a client
|
||||
|
@ -204,7 +205,7 @@ func TestFS_List_NoAlloc(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_List(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a server and client
|
||||
|
@ -241,7 +242,7 @@ func TestFS_List(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_List_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// Start a server
|
||||
s, root, cleanupS := nomad.TestACLServer(t, nil)
|
||||
|
@ -316,7 +317,7 @@ func TestFS_List_ACL(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_Stream_NoAlloc(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a client
|
||||
|
@ -391,7 +392,7 @@ OUTER:
|
|||
}
|
||||
|
||||
func TestFS_Stream_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// Start a server
|
||||
s, root, cleanupS := nomad.TestACLServer(t, nil)
|
||||
|
@ -519,7 +520,7 @@ func TestFS_Stream_ACL(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_Stream(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a server and client
|
||||
|
@ -635,7 +636,7 @@ func (r *ReadWriteCloseChecker) Close() error {
|
|||
}
|
||||
|
||||
func TestFS_Stream_Follow(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a server and client
|
||||
|
@ -732,7 +733,7 @@ OUTER:
|
|||
}
|
||||
|
||||
func TestFS_Stream_Limit(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a server and client
|
||||
|
@ -826,7 +827,7 @@ OUTER:
|
|||
}
|
||||
|
||||
func TestFS_Logs_NoAlloc(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a client
|
||||
|
@ -904,7 +905,7 @@ OUTER:
|
|||
// TestFS_Logs_TaskPending asserts that trying to stream logs for tasks which
|
||||
// have not started returns a 404 error.
|
||||
func TestFS_Logs_TaskPending(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a server and client
|
||||
|
@ -1019,7 +1020,7 @@ func TestFS_Logs_TaskPending(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_Logs_ACL(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a server
|
||||
|
@ -1150,7 +1151,7 @@ func TestFS_Logs_ACL(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_Logs(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a server and client
|
||||
|
@ -1251,7 +1252,7 @@ OUTER:
|
|||
}
|
||||
|
||||
func TestFS_Logs_Follow(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
// Start a server and client
|
||||
|
@ -1555,7 +1556,8 @@ func TestFS_findClosest(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_streamFile_NoFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
c, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -1578,7 +1580,7 @@ func TestFS_streamFile_NoFile(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_streamFile_Modify(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
c, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
@ -1649,7 +1651,8 @@ func TestFS_streamFile_Modify(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_streamFile_Truncate(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
c, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
||||
|
@ -1752,10 +1755,10 @@ func TestFS_streamFile_Truncate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_streamImpl_Delete(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("Windows does not allow us to delete a file while it is open")
|
||||
}
|
||||
t.Parallel()
|
||||
|
||||
c, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
@ -1828,7 +1831,7 @@ func TestFS_streamImpl_Delete(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_logsImpl_NoFollow(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
c, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
@ -1897,7 +1900,7 @@ func TestFS_logsImpl_NoFollow(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestFS_logsImpl_Follow(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
c, cleanup := TestClient(t, nil)
|
||||
defer cleanup()
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/client/stats"
|
||||
|
@ -37,7 +38,8 @@ func exitAllocRunner(runners ...AllocRunner) {
|
|||
}
|
||||
|
||||
func TestIndexedGCAllocPQ(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
pq := NewIndexedGCAllocPQ()
|
||||
|
||||
ar1, cleanup1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
||||
|
@ -122,7 +124,8 @@ func (m *MockStatsCollector) Stats() *stats.HostStats {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_MarkForCollection(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
|
||||
|
||||
|
@ -138,7 +141,8 @@ func TestAllocGarbageCollector_MarkForCollection(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_Collect(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
|
||||
|
||||
|
@ -164,7 +168,8 @@ func TestAllocGarbageCollector_Collect(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_CollectAll(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
|
||||
|
||||
|
@ -184,7 +189,8 @@ func TestAllocGarbageCollector_CollectAll(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
statsCollector := &MockStatsCollector{}
|
||||
conf := gcConfig()
|
||||
|
@ -226,7 +232,8 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T)
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
statsCollector := &MockStatsCollector{}
|
||||
conf := gcConfig()
|
||||
|
@ -269,7 +276,8 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
statsCollector := &MockStatsCollector{}
|
||||
conf := gcConfig()
|
||||
|
@ -308,7 +316,8 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
statsCollector := &MockStatsCollector{}
|
||||
conf := gcConfig()
|
||||
|
@ -348,6 +357,8 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T)
|
|||
// TestAllocGarbageCollector_MakeRoomFor_MaxAllocs asserts that when making room for new
|
||||
// allocs, terminal allocs are GC'd until old_allocs + new_allocs <= limit
|
||||
func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
const maxAllocs = 6
|
||||
require := require.New(t)
|
||||
|
||||
|
@ -494,7 +505,8 @@ func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
statsCollector := &MockStatsCollector{}
|
||||
conf := gcConfig()
|
||||
|
@ -533,7 +545,8 @@ func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
statsCollector := &MockStatsCollector{}
|
||||
conf := gcConfig()
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -12,7 +13,7 @@ import (
|
|||
)
|
||||
|
||||
func TestHeartbeatStop_allocHook(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
server, _, cleanupS1 := testServer(t, nil)
|
||||
defer cleanupS1()
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/lib/fifo"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
|
@ -17,6 +18,8 @@ import (
|
|||
)
|
||||
|
||||
func TestLogmon_Start_rotate(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
var stdoutFifoPath, stderrFifoPath string
|
||||
|
||||
|
@ -77,6 +80,8 @@ func TestLogmon_Start_rotate(t *testing.T) {
|
|||
// asserts that calling Start twice restarts the log rotator and that any logs
|
||||
// published while the listener was unavailable are received.
|
||||
func TestLogmon_Start_restart_flusheslogs(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("windows does not support pushing data to a pipe with no servers")
|
||||
}
|
||||
|
@ -184,6 +189,8 @@ func TestLogmon_Start_restart_flusheslogs(t *testing.T) {
|
|||
|
||||
// asserts that calling Start twice restarts the log rotator
|
||||
func TestLogmon_Start_restart(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
var stdoutFifoPath, stderrFifoPath string
|
||||
|
||||
|
@ -280,7 +287,7 @@ func (panicWriter) Close() error {
|
|||
// TestLogmon_NewError asserts that newLogRotatorWrapper will return an error
|
||||
// if its unable to create the necessray files.
|
||||
func TestLogmon_NewError(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// Pick a path that does not exist
|
||||
path := filepath.Join(uuid.Generate(), uuid.Generate(), uuid.Generate())
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper/mount"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
|
@ -39,7 +40,7 @@ func TestVolumeManager_ensureStagingDir(t *testing.T) {
|
|||
if !checkMountSupport() {
|
||||
t.Skip("mount point detection not supported for this platform")
|
||||
}
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -136,7 +137,7 @@ func TestVolumeManager_stageVolume(t *testing.T) {
|
|||
if !checkMountSupport() {
|
||||
t.Skip("mount point detection not supported for this platform")
|
||||
}
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -217,7 +218,7 @@ func TestVolumeManager_unstageVolume(t *testing.T) {
|
|||
if !checkMountSupport() {
|
||||
t.Skip("mount point detection not supported for this platform")
|
||||
}
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -280,7 +281,7 @@ func TestVolumeManager_publishVolume(t *testing.T) {
|
|||
t.Skip("mount point detection not supported for this platform")
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -406,7 +407,7 @@ func TestVolumeManager_unpublishVolume(t *testing.T) {
|
|||
if !checkMountSupport() {
|
||||
t.Skip("mount point detection not supported for this platform")
|
||||
}
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
|
@ -471,7 +472,7 @@ func TestVolumeManager_MountVolumeEvents(t *testing.T) {
|
|||
if !checkMountSupport() {
|
||||
t.Skip("mount point detection not supported for this platform")
|
||||
}
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
tmpPath := tmpDir(t)
|
||||
defer os.RemoveAll(tmpPath)
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
plugin "github.com/hashicorp/go-plugin"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/pluginmanager"
|
||||
"github.com/hashicorp/nomad/client/state"
|
||||
"github.com/hashicorp/nomad/helper/pluginutils/loader"
|
||||
|
@ -101,7 +102,7 @@ func noopUpdater(string, *structs.DriverInfo) {}
|
|||
func noopEventHandlerFactory(string, string) EventHandler { return nil }
|
||||
|
||||
func TestManager_Fingerprint(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
fpChan, _, mgr := testSetup(t)
|
||||
var infos []*structs.DriverInfo
|
||||
|
@ -168,7 +169,7 @@ func TestManager_Fingerprint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestManager_TaskEvents(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
fpChan, evChan, mgr := testSetup(t)
|
||||
go mgr.Run()
|
||||
|
@ -199,7 +200,7 @@ func TestManager_TaskEvents(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestManager_Run_AllowedDrivers(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
fpChan, _, mgr := testSetup(t)
|
||||
mgr.allowedDrivers = map[string]struct{}{"foo": {}}
|
||||
|
@ -219,7 +220,7 @@ func TestManager_Run_AllowedDrivers(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestManager_Run_BlockedDrivers(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
fpChan, _, mgr := testSetup(t)
|
||||
mgr.blockedDrivers = map[string]struct{}{"mock": {}}
|
||||
|
@ -239,7 +240,7 @@ func TestManager_Run_BlockedDrivers(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestManager_Run_AllowedBlockedDrivers_Combined(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
drvs := map[string]drivers.DriverPlugin{}
|
||||
fpChs := map[string]chan *drivers.Fingerprint{}
|
||||
|
|
|
@ -6,12 +6,13 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPluginGroup_RegisterAndRun(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
var hasRun bool
|
||||
|
@ -29,7 +30,7 @@ func TestPluginGroup_RegisterAndRun(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPluginGroup_Shutdown(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
var stack []int
|
||||
|
@ -66,7 +67,7 @@ func TestPluginGroup_Shutdown(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPluginGroup_WaitForFirstFingerprint(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
managerCh := make(chan struct{})
|
||||
|
@ -95,7 +96,7 @@ func TestPluginGroup_WaitForFirstFingerprint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestPluginGroup_WaitForFirstFingerprint_Timeout(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
managerCh := make(chan struct{})
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/nomad"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -13,7 +14,7 @@ import (
|
|||
)
|
||||
|
||||
func TestRpc_streamingRpcConn_badEndpoint(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
s1, cleanupS1 := nomad.TestServer(t, nil)
|
||||
|
@ -51,7 +52,7 @@ func TestRpc_streamingRpcConn_badEndpoint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRpc_streamingRpcConn_badEndpoint_TLS(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
require := require.New(t)
|
||||
|
||||
const (
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
)
|
||||
|
||||
|
@ -50,6 +51,8 @@ func testManagerFailProb(t *testing.T, failPct float64) (m *Manager) {
|
|||
}
|
||||
|
||||
func TestManagerInternal_cycleServer(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
server0 := &Server{Addr: &fauxAddr{"server1"}}
|
||||
server1 := &Server{Addr: &fauxAddr{"server2"}}
|
||||
server2 := &Server{Addr: &fauxAddr{"server3"}}
|
||||
|
@ -81,6 +84,8 @@ func TestManagerInternal_cycleServer(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestManagerInternal_New(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
m := testManager(t)
|
||||
if m == nil {
|
||||
t.Fatalf("Manager nil")
|
||||
|
@ -97,6 +102,8 @@ func TestManagerInternal_New(t *testing.T) {
|
|||
|
||||
// func (l *serverList) refreshServerRebalanceTimer() {
|
||||
func TestManagerInternal_refreshServerRebalanceTimer(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
type clusterSizes struct {
|
||||
numNodes int32
|
||||
numServers int
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/servers"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -47,6 +48,8 @@ func testManagerFailProb(t *testing.T, failPct float64) (m *servers.Manager) {
|
|||
}
|
||||
|
||||
func TestServers_SetServers(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
m := testManager(t)
|
||||
var num int
|
||||
|
@ -82,6 +85,8 @@ func TestServers_SetServers(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServers_FindServer(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
m := testManager(t)
|
||||
|
||||
if m.FindServer() != nil {
|
||||
|
@ -126,6 +131,8 @@ func TestServers_FindServer(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServers_New(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
logger := testlog.HCLogger(t)
|
||||
shutdownCh := make(chan struct{})
|
||||
m := servers.New(logger, shutdownCh, &fauxConnPool{})
|
||||
|
@ -135,6 +142,8 @@ func TestServers_New(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServers_NotifyFailedServer(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
m := testManager(t)
|
||||
|
||||
if m.NumServers() != 0 {
|
||||
|
@ -194,6 +203,8 @@ func TestServers_NotifyFailedServer(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServers_NumServers(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
m := testManager(t)
|
||||
var num int
|
||||
num = m.NumServers()
|
||||
|
@ -210,6 +221,8 @@ func TestServers_NumServers(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServers_RebalanceServers(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
const failPct = 0.5
|
||||
m := testManagerFailProb(t, failPct)
|
||||
const maxServers = 100
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
trstate "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state"
|
||||
dmstate "github.com/hashicorp/nomad/client/devicemanager/state"
|
||||
"github.com/hashicorp/nomad/client/dynamicplugins"
|
||||
|
@ -62,7 +63,7 @@ func testDB(t *testing.T, f func(*testing.T, StateDB)) {
|
|||
// TestStateDB_Allocations asserts the behavior of GetAllAllocations, PutAllocation, and
|
||||
// DeleteAllocationBucket for all operational StateDB implementations.
|
||||
func TestStateDB_Allocations(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
testDB(t, func(t *testing.T, db StateDB) {
|
||||
require := require.New(t)
|
||||
|
@ -147,7 +148,7 @@ func ceilDiv(a, b int) int {
|
|||
// TestStateDB_Batch asserts the behavior of PutAllocation, PutNetworkStatus and
|
||||
// DeleteAllocationBucket in batch mode, for all operational StateDB implementations.
|
||||
func TestStateDB_Batch(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
testDB(t, func(t *testing.T, db StateDB) {
|
||||
require := require.New(t)
|
||||
|
@ -255,7 +256,7 @@ func TestStateDB_Batch(t *testing.T) {
|
|||
// TestStateDB_TaskState asserts the behavior of task state related StateDB
|
||||
// methods.
|
||||
func TestStateDB_TaskState(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
testDB(t, func(t *testing.T, db StateDB) {
|
||||
require := require.New(t)
|
||||
|
@ -307,7 +308,7 @@ func TestStateDB_TaskState(t *testing.T) {
|
|||
// TestStateDB_DeviceManager asserts the behavior of device manager state related StateDB
|
||||
// methods.
|
||||
func TestStateDB_DeviceManager(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
testDB(t, func(t *testing.T, db StateDB) {
|
||||
require := require.New(t)
|
||||
|
@ -332,7 +333,7 @@ func TestStateDB_DeviceManager(t *testing.T) {
|
|||
// TestStateDB_DriverManager asserts the behavior of device manager state related StateDB
|
||||
// methods.
|
||||
func TestStateDB_DriverManager(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
testDB(t, func(t *testing.T, db StateDB) {
|
||||
require := require.New(t)
|
||||
|
@ -357,7 +358,7 @@ func TestStateDB_DriverManager(t *testing.T) {
|
|||
// TestStateDB_DynamicRegistry asserts the behavior of dynamic registry state related StateDB
|
||||
// methods.
|
||||
func TestStateDB_DynamicRegistry(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
testDB(t, func(t *testing.T, db StateDB) {
|
||||
require := require.New(t)
|
||||
|
@ -382,7 +383,7 @@ func TestStateDB_DynamicRegistry(t *testing.T) {
|
|||
// TestStateDB_Upgrade asserts calling Upgrade on new databases always
|
||||
// succeeds.
|
||||
func TestStateDB_Upgrade(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
testDB(t, func(t *testing.T, db StateDB) {
|
||||
require.NoError(t, db.Upgrade())
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/allocrunner"
|
||||
"github.com/hashicorp/nomad/client/allocwatcher"
|
||||
clientconfig "github.com/hashicorp/nomad/client/config"
|
||||
|
@ -32,7 +33,7 @@ import (
|
|||
// TestBoltStateDB_Upgrade_Ok asserts upgading an old state db does not error
|
||||
// during upgrade and restore.
|
||||
func TestBoltStateDB_UpgradeOld_Ok(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
dbFromTestFile := func(t *testing.T, dir, fn string) *BoltStateDB {
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper/boltdd"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
|
@ -32,7 +33,7 @@ func setupBoltDB(t *testing.T) (*bbolt.DB, func()) {
|
|||
|
||||
// TestUpgrade_NeedsUpgrade_New asserts new state dbs do not need upgrading.
|
||||
func TestUpgrade_NeedsUpgrade_New(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// Setting up a new StateDB should initialize it at the latest version.
|
||||
db, cleanup := setupBoltStateDB(t)
|
||||
|
@ -47,7 +48,7 @@ func TestUpgrade_NeedsUpgrade_New(t *testing.T) {
|
|||
// TestUpgrade_NeedsUpgrade_Old asserts state dbs with just the alloctions
|
||||
// bucket *do* need upgrading.
|
||||
func TestUpgrade_NeedsUpgrade_Old(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
db, cleanup := setupBoltDB(t)
|
||||
defer cleanup()
|
||||
|
@ -77,7 +78,7 @@ func TestUpgrade_NeedsUpgrade_Old(t *testing.T) {
|
|||
// NeedsUpgrade if an invalid db version is found. This is a safety measure to
|
||||
// prevent invalid and unintentional upgrades when downgrading Nomad.
|
||||
func TestUpgrade_NeedsUpgrade_Error(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := [][]byte{
|
||||
{'"', '2', '"'}, // wrong type
|
||||
|
@ -107,7 +108,7 @@ func TestUpgrade_NeedsUpgrade_Error(t *testing.T) {
|
|||
// TestUpgrade_DeleteInvalidAllocs asserts invalid allocations are deleted
|
||||
// during state upgades instead of failing the entire agent.
|
||||
func TestUpgrade_DeleteInvalidAllocs_NoAlloc(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
bdb, cleanup := setupBoltDB(t)
|
||||
defer cleanup()
|
||||
|
@ -152,7 +153,7 @@ func TestUpgrade_DeleteInvalidAllocs_NoAlloc(t *testing.T) {
|
|||
// TestUpgrade_DeleteInvalidTaskEntries asserts invalid entries under a task
|
||||
// bucket are deleted.
|
||||
func TestUpgrade_upgradeTaskBucket_InvalidEntries(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
db, cleanup := setupBoltDB(t)
|
||||
defer cleanup()
|
||||
|
|
|
@ -6,12 +6,15 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
shelpers "github.com/hashicorp/nomad/helper/stats"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCpuStatsPercent(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cs := NewCpuStats()
|
||||
cs.Percent(79.7)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
@ -23,6 +26,8 @@ func TestCpuStatsPercent(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHostStats_CPU(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
assert := assert.New(t)
|
||||
assert.Nil(shelpers.Init())
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -13,7 +14,7 @@ import (
|
|||
// TestAllocBroadcaster_SendRecv asserts the latest sends to a broadcaster are
|
||||
// received by listeners.
|
||||
func TestAllocBroadcaster_SendRecv(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
b := NewAllocBroadcaster(testlog.HCLogger(t))
|
||||
defer b.Close()
|
||||
|
@ -47,7 +48,7 @@ func TestAllocBroadcaster_SendRecv(t *testing.T) {
|
|||
|
||||
// TestAllocBroadcaster_RecvBlocks asserts listeners are blocked until a send occurs.
|
||||
func TestAllocBroadcaster_RecvBlocks(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
b := NewAllocBroadcaster(testlog.HCLogger(t))
|
||||
|
@ -87,7 +88,7 @@ func TestAllocBroadcaster_RecvBlocks(t *testing.T) {
|
|||
// TestAllocBroadcaster_Concurrency asserts that the broadcaster behaves
|
||||
// correctly with concurrent listeners being added and closed.
|
||||
func TestAllocBroadcaster_Concurrency(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
b := NewAllocBroadcaster(testlog.HCLogger(t))
|
||||
|
@ -164,7 +165,7 @@ func TestAllocBroadcaster_Concurrency(t *testing.T) {
|
|||
// TestAllocBroadcaster_PrimeListener asserts that newly created listeners are
|
||||
// primed with the last sent alloc.
|
||||
func TestAllocBroadcaster_PrimeListener(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
b := NewAllocBroadcaster(testlog.HCLogger(t))
|
||||
defer b.Close()
|
||||
|
@ -188,7 +189,7 @@ func TestAllocBroadcaster_PrimeListener(t *testing.T) {
|
|||
// TestAllocBroadcaster_Closed asserts that newly created listeners are
|
||||
// primed with the last sent alloc even when the broadcaster is closed.
|
||||
func TestAllocBroadcaster_Closed(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
b := NewAllocBroadcaster(testlog.HCLogger(t))
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
hcl "github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/gohcl"
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -62,6 +63,8 @@ func testEnvBuilder() *Builder {
|
|||
}
|
||||
|
||||
func TestEnvironment_ParseAndReplace_Env(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
env := testEnvBuilder()
|
||||
|
||||
input := []string{fmt.Sprintf(`"${%v}"!`, envOneKey), fmt.Sprintf("${%s}${%s}", envOneKey, envTwoKey)}
|
||||
|
@ -74,6 +77,8 @@ func TestEnvironment_ParseAndReplace_Env(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvironment_ParseAndReplace_Meta(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
input := []string{fmt.Sprintf("${%v%v}", nodeMetaPrefix, metaKey)}
|
||||
exp := []string{metaVal}
|
||||
env := testEnvBuilder()
|
||||
|
@ -85,6 +90,8 @@ func TestEnvironment_ParseAndReplace_Meta(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvironment_ParseAndReplace_Attr(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
input := []string{fmt.Sprintf("${%v%v}", nodeAttributePrefix, attrKey)}
|
||||
exp := []string{attrVal}
|
||||
env := testEnvBuilder()
|
||||
|
@ -96,6 +103,8 @@ func TestEnvironment_ParseAndReplace_Attr(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvironment_ParseAndReplace_Node(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
input := []string{fmt.Sprintf("${%v}", nodeNameKey), fmt.Sprintf("${%v}", nodeClassKey)}
|
||||
exp := []string{nodeName, nodeClass}
|
||||
env := testEnvBuilder()
|
||||
|
@ -107,6 +116,8 @@ func TestEnvironment_ParseAndReplace_Node(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvironment_ParseAndReplace_Mixed(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
input := []string{
|
||||
fmt.Sprintf("${%v}${%v%v}", nodeNameKey, nodeAttributePrefix, attrKey),
|
||||
fmt.Sprintf("${%v}${%v%v}", nodeClassKey, nodeMetaPrefix, metaKey),
|
||||
|
@ -126,6 +137,8 @@ func TestEnvironment_ParseAndReplace_Mixed(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvironment_ReplaceEnv_Mixed(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
input := fmt.Sprintf("${%v}${%v%v}", nodeNameKey, nodeAttributePrefix, attrKey)
|
||||
exp := fmt.Sprintf("%v%v", nodeName, attrVal)
|
||||
env := testEnvBuilder()
|
||||
|
@ -137,6 +150,8 @@ func TestEnvironment_ReplaceEnv_Mixed(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvironment_AsList(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
n := mock.Node()
|
||||
n.Meta = map[string]string{
|
||||
"metaKey": "metaVal",
|
||||
|
@ -227,7 +242,7 @@ func TestEnvironment_AsList(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvironment_AllValues(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
n := mock.Node()
|
||||
n.Meta = map[string]string{
|
||||
|
@ -431,6 +446,8 @@ func TestEnvironment_AllValues(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvironment_VaultToken(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
n := mock.Node()
|
||||
a := mock.Alloc()
|
||||
env := NewBuilder(n, a, a.Job.TaskGroups[0].Tasks[0], "global")
|
||||
|
@ -491,6 +508,8 @@ func TestEnvironment_VaultToken(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvironment_Envvars(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
envMap := map[string]string{"foo": "baz", "bar": "bang"}
|
||||
n := mock.Node()
|
||||
a := mock.Alloc()
|
||||
|
@ -512,6 +531,8 @@ func TestEnvironment_Envvars(t *testing.T) {
|
|||
// TestEnvironment_HookVars asserts hook env vars are LWW and deletes of later
|
||||
// writes allow earlier hook's values to be visible.
|
||||
func TestEnvironment_HookVars(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
n := mock.Node()
|
||||
a := mock.Alloc()
|
||||
builder := NewBuilder(n, a, a.Job.TaskGroups[0].Tasks[0], "global")
|
||||
|
@ -548,6 +569,8 @@ func TestEnvironment_HookVars(t *testing.T) {
|
|||
// TestEnvironment_DeviceHookVars asserts device hook env vars are accessible
|
||||
// separately.
|
||||
func TestEnvironment_DeviceHookVars(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
n := mock.Node()
|
||||
a := mock.Alloc()
|
||||
|
@ -573,6 +596,8 @@ func TestEnvironment_DeviceHookVars(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvironment_Interpolate(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
n := mock.Node()
|
||||
n.Attributes["arch"] = "x86"
|
||||
n.NodeClass = "test class"
|
||||
|
@ -598,6 +623,8 @@ func TestEnvironment_Interpolate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvironment_AppendHostEnvvars(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
host := os.Environ()
|
||||
if len(host) < 2 {
|
||||
t.Skip("No host environment variables. Can't test")
|
||||
|
@ -620,6 +647,8 @@ func TestEnvironment_AppendHostEnvvars(t *testing.T) {
|
|||
// converted to underscores in environment variables.
|
||||
// See: https://github.com/hashicorp/nomad/issues/2405
|
||||
func TestEnvironment_DashesInTaskName(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
a := mock.Alloc()
|
||||
task := a.Job.TaskGroups[0].Tasks[0]
|
||||
task.Env = map[string]string{
|
||||
|
@ -639,6 +668,8 @@ func TestEnvironment_DashesInTaskName(t *testing.T) {
|
|||
// TestEnvironment_UpdateTask asserts env vars and task meta are updated when a
|
||||
// task is updated.
|
||||
func TestEnvironment_UpdateTask(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
a := mock.Alloc()
|
||||
a.Job.TaskGroups[0].Meta = map[string]string{"tgmeta": "tgmetaval"}
|
||||
task := a.Job.TaskGroups[0].Tasks[0]
|
||||
|
@ -688,6 +719,8 @@ func TestEnvironment_UpdateTask(t *testing.T) {
|
|||
// job, if an optional meta field is not set, it will get interpolated as an
|
||||
// empty string.
|
||||
func TestEnvironment_InterpolateEmptyOptionalMeta(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
a := mock.Alloc()
|
||||
a.Job.ParameterizedJob = &structs.ParameterizedJobConfig{
|
||||
|
@ -704,7 +737,7 @@ func TestEnvironment_InterpolateEmptyOptionalMeta(t *testing.T) {
|
|||
// TestEnvironment_Upsteams asserts that group.service.upstreams entries are
|
||||
// added to the environment.
|
||||
func TestEnvironment_Upstreams(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// Add some upstreams to the mock alloc
|
||||
a := mock.Alloc()
|
||||
|
@ -754,6 +787,8 @@ func TestEnvironment_Upstreams(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvironment_SetPortMapEnvs(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
envs := map[string]string{
|
||||
"foo": "bar",
|
||||
"NOMAD_PORT_ssh": "2342",
|
||||
|
@ -774,6 +809,8 @@ func TestEnvironment_SetPortMapEnvs(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEnvironment_TasklessBuilder(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
node := mock.Node()
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.Meta["jobt"] = "foo"
|
||||
|
@ -789,6 +826,8 @@ func TestEnvironment_TasklessBuilder(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestTaskEnv_ClientPath(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
builder := testEnvBuilder()
|
||||
builder.SetAllocDir("/tmp/testAlloc")
|
||||
builder.SetClientSharedAllocDir("/tmp/testAlloc/alloc")
|
||||
|
|
|
@ -3,11 +3,14 @@ package taskenv
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_InterpolateNetworks(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
testCases := []struct {
|
||||
inputTaskEnv *TaskEnv
|
||||
inputNetworks structs.Networks
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -12,7 +13,7 @@ import (
|
|||
// TestInterpolateServices asserts that all service
|
||||
// and check fields are properly interpolated.
|
||||
func TestInterpolateServices(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
services := []*structs.Service{
|
||||
{
|
||||
|
@ -107,7 +108,7 @@ var testEnv = NewTaskEnv(
|
|||
nil, nil, "", "")
|
||||
|
||||
func TestInterpolate_interpolateMapStringSliceString(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
t.Run("nil", func(t *testing.T) {
|
||||
require.Nil(t, interpolateMapStringSliceString(testEnv, nil))
|
||||
|
@ -125,7 +126,7 @@ func TestInterpolate_interpolateMapStringSliceString(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInterpolate_interpolateMapStringString(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
t.Run("nil", func(t *testing.T) {
|
||||
require.Nil(t, interpolateMapStringString(testEnv, nil))
|
||||
|
@ -143,7 +144,7 @@ func TestInterpolate_interpolateMapStringString(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInterpolate_interpolateMapStringInterface(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
t.Run("nil", func(t *testing.T) {
|
||||
require.Nil(t, interpolateMapStringInterface(testEnv, nil))
|
||||
|
@ -161,7 +162,7 @@ func TestInterpolate_interpolateMapStringInterface(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInterpolate_interpolateConnect(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
e := map[string]string{
|
||||
"tag1": "_tag1",
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
@ -11,6 +12,8 @@ import (
|
|||
// TestAddNestedKey_Ok asserts test cases that succeed when passed to
|
||||
// addNestedKey.
|
||||
func TestAddNestedKey_Ok(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
// M will be initialized if unset
|
||||
M map[string]interface{}
|
||||
|
@ -209,7 +212,7 @@ func TestAddNestedKey_Ok(t *testing.T) {
|
|||
name = fmt.Sprintf("%s-%d", name, len(tc.M))
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
if tc.M == nil {
|
||||
tc.M = map[string]interface{}{}
|
||||
}
|
||||
|
@ -222,6 +225,8 @@ func TestAddNestedKey_Ok(t *testing.T) {
|
|||
// TestAddNestedKey_Bad asserts test cases return an error when passed to
|
||||
// addNestedKey.
|
||||
func TestAddNestedKey_Bad(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
// M will be initialized if unset
|
||||
M func() map[string]interface{}
|
||||
|
@ -320,7 +325,7 @@ func TestAddNestedKey_Bad(t *testing.T) {
|
|||
name += "-cleanup"
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// Copy original M value to ensure it doesn't get altered
|
||||
if tc.M == nil {
|
||||
|
@ -341,6 +346,8 @@ func TestAddNestedKey_Bad(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCtyify_Ok(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
In map[string]interface{}
|
||||
|
@ -402,7 +409,7 @@ func TestCtyify_Ok(t *testing.T) {
|
|||
for i := range cases {
|
||||
tc := cases[i]
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// ctiyif and check for errors
|
||||
result, err := ctyify(tc.In)
|
||||
|
@ -417,6 +424,8 @@ func TestCtyify_Ok(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCtyify_Bad(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
|
||||
cases := []struct {
|
||||
Name string
|
||||
In map[string]interface{}
|
||||
|
@ -441,7 +450,7 @@ func TestCtyify_Bad(t *testing.T) {
|
|||
for i := range cases {
|
||||
tc := cases[i]
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// ctiyif and check for errors
|
||||
result, err := ctyify(tc.In)
|
||||
|
|
|
@ -1,78 +0,0 @@
|
|||
package client
|
||||
|
||||
/*
|
||||
TODO(clientv2)
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/helper/uuid"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
||||
func TestDiffAllocs(t *testing.T) {
|
||||
t.Parallel()
|
||||
alloc1 := mock.Alloc() // Ignore
|
||||
alloc2 := mock.Alloc() // Update
|
||||
alloc2u := new(structs.Allocation)
|
||||
*alloc2u = *alloc2
|
||||
alloc2u.AllocModifyIndex += 1
|
||||
alloc3 := mock.Alloc() // Remove
|
||||
alloc4 := mock.Alloc() // Add
|
||||
|
||||
exist := []*structs.Allocation{
|
||||
alloc1,
|
||||
alloc2,
|
||||
alloc3,
|
||||
}
|
||||
update := &allocUpdates{
|
||||
pulled: map[string]*structs.Allocation{
|
||||
alloc2u.ID: alloc2u,
|
||||
alloc4.ID: alloc4,
|
||||
},
|
||||
filtered: map[string]struct{}{
|
||||
alloc1.ID: {},
|
||||
},
|
||||
}
|
||||
|
||||
result := diffAllocs(exist, update)
|
||||
|
||||
if len(result.ignore) != 1 || result.ignore[0] != alloc1 {
|
||||
t.Fatalf("Bad: %#v", result.ignore)
|
||||
}
|
||||
if len(result.added) != 1 || result.added[0] != alloc4 {
|
||||
t.Fatalf("Bad: %#v", result.added)
|
||||
}
|
||||
if len(result.removed) != 1 || result.removed[0] != alloc3 {
|
||||
t.Fatalf("Bad: %#v", result.removed)
|
||||
}
|
||||
if len(result.updated) != 1 {
|
||||
t.Fatalf("Bad: %#v", result.updated)
|
||||
}
|
||||
if result.updated[0].exist != alloc2 || result.updated[0].updated != alloc2u {
|
||||
t.Fatalf("Bad: %#v", result.updated)
|
||||
}
|
||||
}
|
||||
|
||||
func TestShuffleStrings(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Generate input
|
||||
inp := make([]string, 10)
|
||||
for idx := range inp {
|
||||
inp[idx] = uuid.Generate()
|
||||
}
|
||||
|
||||
// Copy the input
|
||||
orig := make([]string, len(inp))
|
||||
copy(orig, inp)
|
||||
|
||||
// Shuffle
|
||||
shuffleStrings(inp)
|
||||
|
||||
// Ensure order is not the same
|
||||
if reflect.DeepEqual(inp, orig) {
|
||||
t.Fatalf("shuffle failed")
|
||||
}
|
||||
}
|
||||
*/
|
|
@ -5,6 +5,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/client/config"
|
||||
"github.com/hashicorp/nomad/helper/testlog"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
|
@ -15,7 +16,8 @@ import (
|
|||
)
|
||||
|
||||
func TestVaultClient_TokenRenewals(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
v := testutil.NewTestVault(t)
|
||||
defer v.Stop()
|
||||
|
@ -103,7 +105,8 @@ func TestVaultClient_TokenRenewals(t *testing.T) {
|
|||
// TestVaultClient_NamespaceSupport tests that the Vault namespace config, if present, will result in the
|
||||
// namespace header being set on the created Vault client.
|
||||
func TestVaultClient_NamespaceSupport(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
require := require.New(t)
|
||||
tr := true
|
||||
testNs := "test-namespace"
|
||||
|
@ -120,7 +123,8 @@ func TestVaultClient_NamespaceSupport(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestVaultClient_Heap(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
tr := true
|
||||
conf := config.DefaultConfig()
|
||||
conf.VaultConfig.Enabled = &tr
|
||||
|
@ -226,7 +230,8 @@ func TestVaultClient_Heap(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestVaultClient_RenewNonRenewableLease(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
v := testutil.NewTestVault(t)
|
||||
defer v.Stop()
|
||||
|
||||
|
@ -275,7 +280,8 @@ func TestVaultClient_RenewNonRenewableLease(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestVaultClient_RenewNonexistentLease(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
v := testutil.NewTestVault(t)
|
||||
defer v.Stop()
|
||||
|
||||
|
@ -311,7 +317,7 @@ func TestVaultClient_RenewNonexistentLease(t *testing.T) {
|
|||
// TestVaultClient_RenewalTime_Long asserts that for leases over 1m the renewal
|
||||
// time is jittered.
|
||||
func TestVaultClient_RenewalTime_Long(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
// highRoller is a randIntn func that always returns the max value
|
||||
highRoller := func(n int) int {
|
||||
|
@ -337,7 +343,7 @@ func TestVaultClient_RenewalTime_Long(t *testing.T) {
|
|||
// TestVaultClient_RenewalTime_Short asserts that for leases under 1m the renewal
|
||||
// time is lease/2.
|
||||
func TestVaultClient_RenewalTime_Short(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
|
||||
dice := func(int) int {
|
||||
require.Fail(t, "dice should not have been called")
|
||||
|
|
|
@ -3,13 +3,14 @@ package command
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/command/agent"
|
||||
"github.com/mitchellh/cli"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestACLBootstrapCommand(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
// create a acl-enabled server without bootstrapping the token
|
||||
|
@ -36,7 +37,7 @@ func TestACLBootstrapCommand(t *testing.T) {
|
|||
// If a bootstrap token has already been created, attempts to create more should
|
||||
// fail.
|
||||
func TestACLBootstrapCommand_ExistingBootstrapToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
config := func(c *agent.Config) {
|
||||
|
@ -60,7 +61,7 @@ func TestACLBootstrapCommand_ExistingBootstrapToken(t *testing.T) {
|
|||
|
||||
// Attempting to bootstrap a token on a non-ACL enabled server should fail.
|
||||
func TestACLBootstrapCommand_NonACLServer(t *testing.T) {
|
||||
t.Parallel()
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
|
||||
srv, _, url := testServer(t, true, nil)
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/command/agent"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/mitchellh/cli"
|
||||
|
@ -13,8 +14,8 @@ import (
|
|||
)
|
||||
|
||||
func TestACLPolicyApplyCommand(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
t.Parallel()
|
||||
config := func(c *agent.Config) {
|
||||
c.ACL.Enabled = true
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/command/agent"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -14,8 +15,8 @@ import (
|
|||
)
|
||||
|
||||
func TestACLPolicyDeleteCommand(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
t.Parallel()
|
||||
config := func(c *agent.Config) {
|
||||
c.ACL.Enabled = true
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/command/agent"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -12,8 +13,8 @@ import (
|
|||
)
|
||||
|
||||
func TestACLPolicyInfoCommand(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
t.Parallel()
|
||||
config := func(c *agent.Config) {
|
||||
c.ACL.Enabled = true
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/command/agent"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -13,8 +14,8 @@ import (
|
|||
)
|
||||
|
||||
func TestACLPolicyListCommand(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
t.Parallel()
|
||||
config := func(c *agent.Config) {
|
||||
c.ACL.Enabled = true
|
||||
}
|
||||
|
|
|
@ -4,14 +4,15 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/command/agent"
|
||||
"github.com/mitchellh/cli"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestACLTokenCreateCommand(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
t.Parallel()
|
||||
config := func(c *agent.Config) {
|
||||
c.ACL.Enabled = true
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/acl"
|
||||
"github.com/hashicorp/nomad/ci"
|
||||
"github.com/hashicorp/nomad/command/agent"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
|
@ -14,8 +15,8 @@ import (
|
|||
)
|
||||
|
||||
func TestACLTokenDeleteCommand_ViaEnvVariable(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
assert := assert.New(t)
|
||||
t.Parallel()
|
||||
config := func(c *agent.Config) {
|
||||
c.ACL.Enabled = true
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue