Divest api/ package of deps elsewhere in the nomad repo. (#5488)

* Divest api/ package of deps elsewhere in the nomad repo.

This will allow making api/ a module without then pulling in the
external repo, leading to a package name conflict.

This required some migration of tests to an apitests/ folder (can be
moved anywhere as it has no deps on it). It also required some
duplication of code, notably some test helpers from api/ -> apitests/
and part (but not all) of testutil/ -> api/testutil/.

Once there's more separation and an e.g. sdk/ folder those can be
removed in favor of a dep on the sdk/ folder, provided the sdk/ folder
doesn't depend on api/ or /.

* Also remove consul dep from api/ package

* Fix stupid linters

* Some restructuring
This commit is contained in:
Jeff Mitchell 2019-03-29 14:47:40 -04:00 committed by GitHub
parent 43270647c2
commit 13dab7dd24
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 1311 additions and 376 deletions

View File

@ -5,7 +5,7 @@ import (
"sort"
"testing"
"github.com/hashicorp/nomad/testutil"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/assert"
)

View File

@ -7,7 +7,7 @@ import (
"time"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/require"
)
@ -146,12 +146,20 @@ func TestAllocations_RescheduleInfo(t *testing.T) {
}
job.Canonicalize()
uuidGen := func() string {
ret, err := uuid.GenerateUUID()
if err != nil {
t.Fatal(err)
}
return ret
}
alloc := &Allocation{
ID: uuid.Generate(),
ID: uuidGen(),
Namespace: DefaultNamespace,
EvalID: uuid.Generate(),
EvalID: uuidGen(),
Name: "foo-bar[1]",
NodeID: uuid.Generate(),
NodeID: uuidGen(),
TaskGroup: *job.TaskGroups[0].Name,
JobID: *job.ID,
Job: job,

View File

@ -10,8 +10,8 @@ import (
"testing"
"time"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/testutil"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/assert"
)
@ -316,16 +316,24 @@ func TestQueryString(t *testing.T) {
func TestClient_NodeClient(t *testing.T) {
http := "testdomain:4646"
tlsNode := func(string, *QueryOptions) (*Node, *QueryMeta, error) {
uu, err := uuid.GenerateUUID()
if err != nil {
t.Fatal(err)
}
return &Node{
ID: uuid.Generate(),
ID: uu,
Status: "ready",
HTTPAddr: http,
TLSEnabled: true,
}, nil, nil
}
noTlsNode := func(string, *QueryOptions) (*Node, *QueryMeta, error) {
uu, err := uuid.GenerateUUID()
if err != nil {
t.Fatal(err)
}
return &Node{
ID: uuid.Generate(),
ID: uu,
Status: "ready",
HTTPAddr: http,
TLSEnabled: false,

View File

@ -10,7 +10,7 @@ import (
"time"
units "github.com/docker/go-units"
"github.com/hashicorp/nomad/testutil"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/kr/pretty"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"

View File

@ -0,0 +1,66 @@
package discover
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
)
// Checks the current executable, then $GOPATH/bin, and finally the CWD, in that
// order. If it can't be found, an error is returned.
func NomadExecutable() (string, error) {
nomadExe := "nomad"
if runtime.GOOS == "windows" {
nomadExe = "nomad.exe"
}
// Check the current executable.
bin, err := os.Executable()
if err != nil {
return "", fmt.Errorf("Failed to determine the nomad executable: %v", err)
}
if _, err := os.Stat(bin); err == nil && isNomad(bin, nomadExe) {
return bin, nil
}
// Check the $PATH
if bin, err := exec.LookPath(nomadExe); err == nil {
return bin, nil
}
// Check the $GOPATH.
bin = filepath.Join(os.Getenv("GOPATH"), "bin", nomadExe)
if _, err := os.Stat(bin); err == nil {
return bin, nil
}
// Check the CWD.
pwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("Could not find Nomad executable (%v): %v", nomadExe, err)
}
bin = filepath.Join(pwd, nomadExe)
if _, err := os.Stat(bin); err == nil {
return bin, nil
}
// Check CWD/bin
bin = filepath.Join(pwd, "bin", nomadExe)
if _, err := os.Stat(bin); err == nil {
return bin, nil
}
return "", fmt.Errorf("Could not find Nomad executable (%v)", nomadExe)
}
func isNomad(path, nomadExe string) bool {
if strings.HasSuffix(path, ".test") || strings.HasSuffix(path, ".test.exe") {
return false
}
return true
}

View File

@ -0,0 +1,139 @@
// Package freeport provides a helper for allocating free ports across multiple
// processes on the same machine.
package freeport
import (
"fmt"
"math/rand"
"net"
"sync"
"time"
"github.com/mitchellh/go-testing-interface"
)
const (
// blockSize is the size of the allocated port block. ports are given out
// consecutively from that block with roll-over for the lifetime of the
// application/test run.
blockSize = 1500
// maxBlocks is the number of available port blocks.
// lowPort + maxBlocks * blockSize must be less than 65535.
maxBlocks = 30
// lowPort is the lowest port number that should be used.
lowPort = 10000
// attempts is how often we try to allocate a port block
// before giving up.
attempts = 10
)
var (
// firstPort is the first port of the allocated block.
firstPort int
// lockLn is the system-wide mutex for the port block.
lockLn net.Listener
// mu guards nextPort
mu sync.Mutex
// once is used to do the initialization on the first call to retrieve free
// ports
once sync.Once
// port is the last allocated port.
port int
)
// initialize is used to initialize freeport.
func initialize() {
if lowPort+maxBlocks*blockSize > 65535 {
panic("freeport: block size too big or too many blocks requested")
}
rand.Seed(time.Now().UnixNano())
firstPort, lockLn = alloc()
}
// alloc reserves a port block for exclusive use for the lifetime of the
// application. lockLn serves as a system-wide mutex for the port block and is
// implemented as a TCP listener which is bound to the firstPort and which will
// be automatically released when the application terminates.
func alloc() (int, net.Listener) {
for i := 0; i < attempts; i++ {
block := int(rand.Int31n(int32(maxBlocks)))
firstPort := lowPort + block*blockSize
ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", firstPort))
if err != nil {
continue
}
// log.Printf("[DEBUG] freeport: allocated port block %d (%d-%d)", block, firstPort, firstPort+blockSize-1)
return firstPort, ln
}
panic("freeport: cannot allocate port block")
}
func tcpAddr(ip string, port int) *net.TCPAddr {
return &net.TCPAddr{IP: net.ParseIP(ip), Port: port}
}
// Get wraps the Free function and panics on any failure retrieving ports.
func Get(n int) (ports []int) {
ports, err := Free(n)
if err != nil {
panic(err)
}
return ports
}
// GetT is suitable for use when retrieving unused ports in tests. If there is
// an error retrieving free ports, the test will be failed.
func GetT(t testing.T, n int) (ports []int) {
ports, err := Free(n)
if err != nil {
t.Fatalf("Failed retrieving free port: %v", err)
}
return ports
}
// Free returns a list of free ports from the allocated port block. It is safe
// to call this method concurrently. Ports have been tested to be available on
// 127.0.0.1 TCP but there is no guarantee that they will remain free in the
// future.
func Free(n int) (ports []int, err error) {
mu.Lock()
defer mu.Unlock()
if n > blockSize-1 {
return nil, fmt.Errorf("freeport: block size too small")
}
// Reserve a port block
once.Do(initialize)
for len(ports) < n {
port++
// roll-over the port
if port < firstPort+1 || port >= firstPort+blockSize {
port = firstPort + 1
}
// if the port is in use then skip it
ln, err := net.ListenTCP("tcp", tcpAddr("127.0.0.1", port))
if err != nil {
// log.Println("[DEBUG] freeport: port already in use: ", port)
continue
}
ln.Close()
ports = append(ports, port)
}
// log.Println("[DEBUG] freeport: free ports:", ports)
return ports, nil
}

View File

@ -0,0 +1,76 @@
package testutil
import (
"net/http"
"net/http/httptest"
"sync"
)
// assert ResponseRecorder implements the http.ResponseWriter interface
var _ http.ResponseWriter = (*ResponseRecorder)(nil)
// ResponseRecorder implements a ResponseWriter which can be written to and
// read from concurrently. For use in testing streaming APIs where
// httptest.ResponseRecorder is unsafe for concurrent access. Uses
// httptest.ResponseRecorder internally and exposes most of the functionality.
type ResponseRecorder struct {
rr *httptest.ResponseRecorder
mu sync.Mutex
}
func NewResponseRecorder() *ResponseRecorder {
return &ResponseRecorder{
rr: httptest.NewRecorder(),
}
}
// Flush sets Flushed=true.
func (r *ResponseRecorder) Flush() {
r.mu.Lock()
defer r.mu.Unlock()
r.rr.Flush()
}
// Flushed returns true if Flush has been called.
func (r *ResponseRecorder) Flushed() bool {
r.mu.Lock()
defer r.mu.Unlock()
return r.rr.Flushed
}
// Header returns the response headers. Readers should call HeaderMap() to
// avoid races due to the server concurrently mutating headers.
func (r *ResponseRecorder) Header() http.Header {
r.mu.Lock()
defer r.mu.Unlock()
return r.rr.Header()
}
// HeaderMap returns the HTTP headers written before WriteHeader was called.
func (r *ResponseRecorder) HeaderMap() http.Header {
r.mu.Lock()
defer r.mu.Unlock()
return r.rr.HeaderMap
}
// Write to the underlying response buffer. Safe to call concurrent with Read.
func (r *ResponseRecorder) Write(p []byte) (int, error) {
r.mu.Lock()
defer r.mu.Unlock()
return r.rr.Body.Write(p)
}
// WriteHeader sets the response code and freezes the headers returned by
// HeaderMap. Safe to call concurrent with Read and HeaderMap.
func (r *ResponseRecorder) WriteHeader(statusCode int) {
r.mu.Lock()
defer r.mu.Unlock()
r.rr.WriteHeader(statusCode)
}
// Read available response bytes. Safe to call concurrently with Write().
func (r *ResponseRecorder) Read(p []byte) (int, error) {
r.mu.Lock()
defer r.mu.Unlock()
return r.rr.Body.Read(p)
}

View File

@ -0,0 +1,378 @@
package testutil
// TestServer is a test helper. It uses a fork/exec model to create
// a test Nomad server instance in the background and initialize it
// with some data and/or services. The test server can then be used
// to run a unit test, and offers an easy API to tear itself down
// when the test has completed. The only prerequisite is to have a nomad
// binary available on the $PATH.
//
// This package does not use Nomad's official API client. This is
// because we use TestServer to test the API client, which would
// otherwise cause an import cycle.
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/nomad/api/internal/testutil/discover"
"github.com/hashicorp/nomad/api/internal/testutil/freeport"
testing "github.com/mitchellh/go-testing-interface"
)
// TestServerConfig is the main server configuration struct.
type TestServerConfig struct {
NodeName string `json:"name,omitempty"`
DataDir string `json:"data_dir,omitempty"`
Region string `json:"region,omitempty"`
DisableCheckpoint bool `json:"disable_update_check"`
LogLevel string `json:"log_level,omitempty"`
Consul *Consul `json:"consul,omitempty"`
AdvertiseAddrs *Advertise `json:"advertise,omitempty"`
Ports *PortsConfig `json:"ports,omitempty"`
Server *ServerConfig `json:"server,omitempty"`
Client *ClientConfig `json:"client,omitempty"`
Vault *VaultConfig `json:"vault,omitempty"`
ACL *ACLConfig `json:"acl,omitempty"`
DevMode bool `json:"-"`
Stdout, Stderr io.Writer `json:"-"`
}
// Consul is used to configure the communication with Consul
type Consul struct {
Address string `json:"address,omitempty"`
Auth string `json:"auth,omitempty"`
Token string `json:"token,omitempty"`
}
// Advertise is used to configure the addresses to advertise
type Advertise struct {
HTTP string `json:"http,omitempty"`
RPC string `json:"rpc,omitempty"`
Serf string `json:"serf,omitempty"`
}
// PortsConfig is used to configure the network ports we use.
type PortsConfig struct {
HTTP int `json:"http,omitempty"`
RPC int `json:"rpc,omitempty"`
Serf int `json:"serf,omitempty"`
}
// ServerConfig is used to configure the nomad server.
type ServerConfig struct {
Enabled bool `json:"enabled"`
BootstrapExpect int `json:"bootstrap_expect"`
RaftProtocol int `json:"raft_protocol,omitempty"`
}
// ClientConfig is used to configure the client
type ClientConfig struct {
Enabled bool `json:"enabled"`
}
// VaultConfig is used to configure Vault
type VaultConfig struct {
Enabled bool `json:"enabled"`
}
// ACLConfig is used to configure ACLs
type ACLConfig struct {
Enabled bool `json:"enabled"`
}
// ServerConfigCallback is a function interface which can be
// passed to NewTestServerConfig to modify the server config.
type ServerConfigCallback func(c *TestServerConfig)
// defaultServerConfig returns a new TestServerConfig struct
// with all of the listen ports incremented by one.
func defaultServerConfig(t testing.T) *TestServerConfig {
ports := freeport.GetT(t, 3)
return &TestServerConfig{
NodeName: fmt.Sprintf("node-%d", ports[0]),
DisableCheckpoint: true,
LogLevel: "DEBUG",
Ports: &PortsConfig{
HTTP: ports[0],
RPC: ports[1],
Serf: ports[2],
},
Server: &ServerConfig{
Enabled: true,
BootstrapExpect: 1,
},
Client: &ClientConfig{
Enabled: false,
},
Vault: &VaultConfig{
Enabled: false,
},
ACL: &ACLConfig{
Enabled: false,
},
}
}
// TestServer is the main server wrapper struct.
type TestServer struct {
cmd *exec.Cmd
Config *TestServerConfig
t testing.T
HTTPAddr string
SerfAddr string
HTTPClient *http.Client
}
// NewTestServer creates a new TestServer, and makes a call to
// an optional callback function to modify the configuration.
func NewTestServer(t testing.T, cb ServerConfigCallback) *TestServer {
path, err := discover.NomadExecutable()
if err != nil {
t.Skipf("nomad not found, skipping: %v", err)
}
// Do a sanity check that we are actually running nomad
vcmd := exec.Command(path, "-version")
vcmd.Stdout = nil
vcmd.Stderr = nil
if err := vcmd.Run(); err != nil {
t.Skipf("nomad version failed: %v", err)
}
dataDir, err := ioutil.TempDir("", "nomad")
if err != nil {
t.Fatalf("err: %s", err)
}
configFile, err := ioutil.TempFile(dataDir, "nomad")
if err != nil {
defer os.RemoveAll(dataDir)
t.Fatalf("err: %s", err)
}
defer configFile.Close()
nomadConfig := defaultServerConfig(t)
nomadConfig.DataDir = dataDir
if cb != nil {
cb(nomadConfig)
}
configContent, err := json.Marshal(nomadConfig)
if err != nil {
t.Fatalf("err: %s", err)
}
if _, err := configFile.Write(configContent); err != nil {
t.Fatalf("err: %s", err)
}
configFile.Close()
stdout := io.Writer(os.Stdout)
if nomadConfig.Stdout != nil {
stdout = nomadConfig.Stdout
}
stderr := io.Writer(os.Stderr)
if nomadConfig.Stderr != nil {
stderr = nomadConfig.Stderr
}
args := []string{"agent", "-config", configFile.Name()}
if nomadConfig.DevMode {
args = append(args, "-dev")
}
// Start the server
cmd := exec.Command(path, args...)
cmd.Stdout = stdout
cmd.Stderr = stderr
if err := cmd.Start(); err != nil {
t.Fatalf("err: %s", err)
}
client := cleanhttp.DefaultClient()
server := &TestServer{
Config: nomadConfig,
cmd: cmd,
t: t,
HTTPAddr: fmt.Sprintf("127.0.0.1:%d", nomadConfig.Ports.HTTP),
SerfAddr: fmt.Sprintf("127.0.0.1:%d", nomadConfig.Ports.Serf),
HTTPClient: client,
}
// Wait for the server to be ready
if nomadConfig.Server.Enabled && nomadConfig.Server.BootstrapExpect != 0 {
server.waitForLeader()
} else {
server.waitForAPI()
}
// Wait for the client to be ready
if nomadConfig.DevMode {
server.waitForClient()
}
return server
}
// Stop stops the test Nomad server, and removes the Nomad data
// directory once we are done.
func (s *TestServer) Stop() {
defer os.RemoveAll(s.Config.DataDir)
if err := s.cmd.Process.Kill(); err != nil {
s.t.Errorf("err: %s", err)
}
// wait for the process to exit to be sure that the data dir can be
// deleted on all platforms.
s.cmd.Wait()
}
// waitForAPI waits for only the agent HTTP endpoint to start
// responding. This is an indication that the agent has started,
// but will likely return before a leader is elected.
func (s *TestServer) waitForAPI() {
WaitForResult(func() (bool, error) {
// Using this endpoint as it is does not have restricted access
resp, err := s.HTTPClient.Get(s.url("/v1/metrics"))
if err != nil {
return false, err
}
defer resp.Body.Close()
if err := s.requireOK(resp); err != nil {
return false, err
}
return true, nil
}, func(err error) {
defer s.Stop()
s.t.Fatalf("err: %s", err)
})
}
// waitForLeader waits for the Nomad server's HTTP API to become
// available, and then waits for a known leader and an index of
// 1 or more to be observed to confirm leader election is done.
func (s *TestServer) waitForLeader() {
WaitForResult(func() (bool, error) {
// Query the API and check the status code
// Using this endpoint as it is does not have restricted access
resp, err := s.HTTPClient.Get(s.url("/v1/status/leader"))
if err != nil {
return false, err
}
defer resp.Body.Close()
if err := s.requireOK(resp); err != nil {
return false, err
}
return true, nil
}, func(err error) {
defer s.Stop()
s.t.Fatalf("err: %s", err)
})
}
// waitForClient waits for the Nomad client to be ready. The function returns
// immediately if the server is not in dev mode.
func (s *TestServer) waitForClient() {
if !s.Config.DevMode {
return
}
WaitForResult(func() (bool, error) {
resp, err := s.HTTPClient.Get(s.url("/v1/nodes"))
if err != nil {
return false, err
}
defer resp.Body.Close()
if err := s.requireOK(resp); err != nil {
return false, err
}
var decoded []struct {
ID string
Status string
}
dec := json.NewDecoder(resp.Body)
if err := dec.Decode(&decoded); err != nil {
return false, err
}
if len(decoded) != 1 || decoded[0].Status != "ready" {
return false, fmt.Errorf("Node not ready: %v", decoded)
}
return true, nil
}, func(err error) {
defer s.Stop()
s.t.Fatalf("err: %s", err)
})
}
// url is a helper function which takes a relative URL and
// makes it into a proper URL against the local Nomad server.
func (s *TestServer) url(path string) string {
return fmt.Sprintf("http://%s%s", s.HTTPAddr, path)
}
// requireOK checks the HTTP response code and ensures it is acceptable.
func (s *TestServer) requireOK(resp *http.Response) error {
if resp.StatusCode != 200 {
return fmt.Errorf("Bad status code: %d", resp.StatusCode)
}
return nil
}
// put performs a new HTTP PUT request.
func (s *TestServer) put(path string, body io.Reader) *http.Response {
req, err := http.NewRequest("PUT", s.url(path), body)
if err != nil {
s.t.Fatalf("err: %s", err)
}
resp, err := s.HTTPClient.Do(req)
if err != nil {
s.t.Fatalf("err: %s", err)
}
if err := s.requireOK(resp); err != nil {
defer resp.Body.Close()
s.t.Fatal(err)
}
return resp
}
// get performs a new HTTP GET request.
func (s *TestServer) get(path string) *http.Response {
resp, err := s.HTTPClient.Get(s.url(path))
if err != nil {
s.t.Fatalf("err: %s", err)
}
if err := s.requireOK(resp); err != nil {
defer resp.Body.Close()
s.t.Fatal(err)
}
return resp
}
// encodePayload returns a new io.Reader wrapping the encoded contents
// of the payload, suitable for passing directly to a new request.
func (s *TestServer) encodePayload(payload interface{}) io.Reader {
var encoded bytes.Buffer
enc := json.NewEncoder(&encoded)
if err := enc.Encode(payload); err != nil {
s.t.Fatalf("err: %s", err)
}
return &encoded
}

View File

@ -0,0 +1,15 @@
package testutil
import (
"os"
testing "github.com/mitchellh/go-testing-interface"
)
// SkipSlow skips a slow test unless the NOMAD_SLOW_TEST environment variable
// is set.
func SkipSlow(t testing.T) {
if os.Getenv("NOMAD_SLOW_TEST") == "" {
t.Skip("Skipping slow test. Set NOMAD_SLOW_TEST=1 to run.")
}
}

View File

@ -0,0 +1,74 @@
package testutil
import (
"os"
"time"
)
type testFn func() (bool, error)
type errorFn func(error)
func WaitForResult(test testFn, error errorFn) {
WaitForResultRetries(500*TestMultiplier(), test, error)
}
func WaitForResultRetries(retries int64, test testFn, error errorFn) {
for retries > 0 {
time.Sleep(10 * time.Millisecond)
retries--
success, err := test()
if success {
return
}
if retries == 0 {
error(err)
}
}
}
// AssertUntil asserts the test function passes throughout the given duration.
// Otherwise error is called on failure.
func AssertUntil(until time.Duration, test testFn, error errorFn) {
deadline := time.Now().Add(until)
for time.Now().Before(deadline) {
success, err := test()
if !success {
error(err)
return
}
// Sleep some arbitrary fraction of the deadline
time.Sleep(until / 30)
}
}
// TestMultiplier returns a multiplier for retries and waits given environment
// the tests are being run under.
func TestMultiplier() int64 {
if IsCI() {
return 4
}
return 1
}
// Timeout takes the desired timeout and increases it if running in Travis
func Timeout(original time.Duration) time.Duration {
return original * time.Duration(TestMultiplier())
}
func IsCI() bool {
_, ok := os.LookupEnv("CI")
return ok
}
func IsTravis() bool {
_, ok := os.LookupEnv("TRAVIS")
return ok
}
func IsAppVeyor() bool {
_, ok := os.LookupEnv("APPVEYOR")
return ok
}

View File

@ -7,10 +7,8 @@ import (
"testing"
"time"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/testutil"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/kr/pretty"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -46,43 +44,6 @@ func TestJobs_Register(t *testing.T) {
}
}
func TestJobs_Parse(t *testing.T) {
t.Parallel()
c, s := makeClient(t, nil, nil)
defer s.Stop()
jobs := c.Jobs()
checkJob := func(job *Job, expectedRegion string) {
if job == nil {
t.Fatal("job should not be nil")
}
region := job.Region
if region == nil {
if expectedRegion != "" {
t.Fatalf("expected job region to be '%s' but was unset", expectedRegion)
}
} else {
if expectedRegion != *region {
t.Fatalf("expected job region '%s', but got '%s'", expectedRegion, *region)
}
}
}
job, err := jobs.ParseHCL(mock.HCL(), true)
if err != nil {
t.Fatalf("err: %s", err)
}
checkJob(job, "global")
job, err = jobs.ParseHCL(mock.HCL(), false)
if err != nil {
t.Fatalf("err: %s", err)
}
checkJob(job, "")
}
func TestJobs_Validate(t *testing.T) {
t.Parallel()
c, s := makeClient(t, nil, nil)
@ -1435,42 +1396,3 @@ func TestJobs_AddSpread(t *testing.T) {
t.Fatalf("expect: %#v, got: %#v", expect, job.Spreads)
}
}
func TestJobs_Summary_WithACL(t *testing.T) {
t.Parallel()
assert := assert.New(t)
c, s, root := makeACLClient(t, nil, nil)
defer s.Stop()
jobs := c.Jobs()
invalidToken := mock.ACLToken()
// Registering with an invalid token should fail
c.SetSecretID(invalidToken.SecretID)
job := testJob()
_, _, err := jobs.Register(job, nil)
assert.NotNil(err)
// Register with token should succeed
c.SetSecretID(root.SecretID)
resp2, wm, err := jobs.Register(job, nil)
assert.Nil(err)
assert.NotNil(resp2)
assert.NotEqual("", resp2.EvalID)
assertWriteMeta(t, wm)
// Query the job summary with an invalid token should fail
c.SetSecretID(invalidToken.SecretID)
result, _, err := jobs.Summary(*job.ID, nil)
assert.NotNil(err)
// Query the job summary with a valid token should succeed
c.SetSecretID(root.SecretID)
result, qm, err := jobs.Summary(*job.ID, nil)
assert.Nil(err)
assertQueryMeta(t, qm)
// Check that the result is what we expect
assert.Equal(*job.ID, result.JobID)
}

View File

@ -9,9 +9,7 @@ import (
"testing"
"time"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/hashicorp/nomad/api/internal/testutil"
"github.com/stretchr/testify/require"
)
@ -353,30 +351,6 @@ func TestNodes_Sort(t *testing.T) {
}
}
func TestNodes_GC(t *testing.T) {
t.Parallel()
require := require.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
nodes := c.Nodes()
err := nodes.GC(uuid.Generate(), nil)
require.NotNil(err)
require.True(structs.IsErrUnknownNode(err))
}
func TestNodes_GcAlloc(t *testing.T) {
t.Parallel()
require := require.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
nodes := c.Nodes()
err := nodes.GcAlloc(uuid.Generate(), nil)
require.NotNil(err)
require.True(structs.IsErrUnknownAllocation(err))
}
// Unittest monitorDrainMultiplex when an error occurs
func TestNodes_MonitorDrain_Multiplex_Bad(t *testing.T) {
t.Parallel()

View File

@ -3,9 +3,6 @@ package api
import (
"strings"
"testing"
"github.com/hashicorp/consul/testutil/retry"
"github.com/stretchr/testify/require"
)
func TestOperator_RaftGetConfiguration(t *testing.T) {
@ -54,70 +51,3 @@ func TestOperator_RaftRemovePeerByID(t *testing.T) {
t.Fatalf("err: %v", err)
}
}
func TestAPI_OperatorSchedulerGetSetConfiguration(t *testing.T) {
t.Parallel()
require := require.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
operator := c.Operator()
var config *SchedulerConfigurationResponse
retry.Run(t, func(r *retry.R) {
var err error
config, _, err = operator.SchedulerGetConfiguration(nil)
r.Check(err)
})
require.True(config.SchedulerConfig.PreemptionConfig.SystemSchedulerEnabled)
// Change a config setting
newConf := &SchedulerConfiguration{PreemptionConfig: PreemptionConfig{SystemSchedulerEnabled: false}}
resp, wm, err := operator.SchedulerSetConfiguration(newConf, nil)
require.Nil(err)
require.NotZero(wm.LastIndex)
require.False(resp.Updated)
config, _, err = operator.SchedulerGetConfiguration(nil)
require.Nil(err)
require.False(config.SchedulerConfig.PreemptionConfig.SystemSchedulerEnabled)
}
func TestAPI_OperatorSchedulerCASConfiguration(t *testing.T) {
t.Parallel()
require := require.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
operator := c.Operator()
var config *SchedulerConfigurationResponse
retry.Run(t, func(r *retry.R) {
var err error
config, _, err = operator.SchedulerGetConfiguration(nil)
r.Check(err)
})
require.True(config.SchedulerConfig.PreemptionConfig.SystemSchedulerEnabled)
// Pass an invalid ModifyIndex
{
newConf := &SchedulerConfiguration{
PreemptionConfig: PreemptionConfig{SystemSchedulerEnabled: false},
ModifyIndex: config.SchedulerConfig.ModifyIndex - 1,
}
resp, wm, err := operator.SchedulerCASConfiguration(newConf, nil)
require.Nil(err)
require.NotZero(wm.LastIndex)
require.False(resp.Updated)
}
// Pass a valid ModifyIndex
{
newConf := &SchedulerConfiguration{
PreemptionConfig: PreemptionConfig{SystemSchedulerEnabled: false},
ModifyIndex: config.SchedulerConfig.ModifyIndex,
}
resp, wm, err := operator.SchedulerCASConfiguration(newConf, nil)
require.Nil(err)
require.NotZero(wm.LastIndex)
require.True(resp.Updated)
}
}

View File

@ -4,7 +4,7 @@ import (
"fmt"
"testing"
"github.com/hashicorp/nomad/testutil"
"github.com/hashicorp/nomad/api/internal/testutil"
)
func TestRegionsList(t *testing.T) {

View File

@ -6,7 +6,6 @@ import (
"testing"
"time"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -392,167 +391,6 @@ func TestTaskGroup_Canonicalize_Update(t *testing.T) {
assert.Nil(t, tg.Update)
}
// Verifies that reschedule policy is merged correctly
func TestTaskGroup_Canonicalize_ReschedulePolicy(t *testing.T) {
type testCase struct {
desc string
jobReschedulePolicy *ReschedulePolicy
taskReschedulePolicy *ReschedulePolicy
expected *ReschedulePolicy
}
testCases := []testCase{
{
desc: "Default",
jobReschedulePolicy: nil,
taskReschedulePolicy: nil,
expected: &ReschedulePolicy{
Attempts: intToPtr(structs.DefaultBatchJobReschedulePolicy.Attempts),
Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
Delay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
DelayFunction: stringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
MaxDelay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
Unlimited: boolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
},
},
{
desc: "Empty job reschedule policy",
jobReschedulePolicy: &ReschedulePolicy{
Attempts: intToPtr(0),
Interval: timeToPtr(0),
Delay: timeToPtr(0),
MaxDelay: timeToPtr(0),
DelayFunction: stringToPtr(""),
Unlimited: boolToPtr(false),
},
taskReschedulePolicy: nil,
expected: &ReschedulePolicy{
Attempts: intToPtr(0),
Interval: timeToPtr(0),
Delay: timeToPtr(0),
MaxDelay: timeToPtr(0),
DelayFunction: stringToPtr(""),
Unlimited: boolToPtr(false),
},
},
{
desc: "Inherit from job",
jobReschedulePolicy: &ReschedulePolicy{
Attempts: intToPtr(1),
Interval: timeToPtr(20 * time.Second),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
taskReschedulePolicy: nil,
expected: &ReschedulePolicy{
Attempts: intToPtr(1),
Interval: timeToPtr(20 * time.Second),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
},
{
desc: "Set in task",
jobReschedulePolicy: nil,
taskReschedulePolicy: &ReschedulePolicy{
Attempts: intToPtr(5),
Interval: timeToPtr(2 * time.Minute),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
expected: &ReschedulePolicy{
Attempts: intToPtr(5),
Interval: timeToPtr(2 * time.Minute),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
},
{
desc: "Merge from job",
jobReschedulePolicy: &ReschedulePolicy{
Attempts: intToPtr(1),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
},
taskReschedulePolicy: &ReschedulePolicy{
Interval: timeToPtr(5 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
expected: &ReschedulePolicy{
Attempts: intToPtr(1),
Interval: timeToPtr(5 * time.Minute),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
},
{
desc: "Override from group",
jobReschedulePolicy: &ReschedulePolicy{
Attempts: intToPtr(1),
MaxDelay: timeToPtr(10 * time.Second),
},
taskReschedulePolicy: &ReschedulePolicy{
Attempts: intToPtr(5),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(20 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
expected: &ReschedulePolicy{
Attempts: intToPtr(5),
Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(20 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
},
{
desc: "Attempts from job, default interval",
jobReschedulePolicy: &ReschedulePolicy{
Attempts: intToPtr(1),
},
taskReschedulePolicy: nil,
expected: &ReschedulePolicy{
Attempts: intToPtr(1),
Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
Delay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
DelayFunction: stringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
MaxDelay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
Unlimited: boolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
},
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
job := &Job{
ID: stringToPtr("test"),
Reschedule: tc.jobReschedulePolicy,
Type: stringToPtr(JobTypeBatch),
}
job.Canonicalize()
tg := &TaskGroup{
Name: stringToPtr("foo"),
ReschedulePolicy: tc.taskReschedulePolicy,
}
tg.Canonicalize(job)
assert.Equal(t, tc.expected, tg.ReschedulePolicy)
})
}
}
// Verifies that migrate strategy is merged correctly
func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
type testCase struct {

View File

@ -0,0 +1,56 @@
package apitests
import (
"testing"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/testutil"
)
type configCallback func(c *api.Config)
// seen is used to track which tests we have already marked as parallel
var seen map[*testing.T]struct{}
func init() {
seen = make(map[*testing.T]struct{})
}
func makeACLClient(t *testing.T, cb1 configCallback,
cb2 testutil.ServerConfigCallback) (*api.Client, *testutil.TestServer, *api.ACLToken) {
client, server := makeClient(t, cb1, func(c *testutil.TestServerConfig) {
c.ACL.Enabled = true
if cb2 != nil {
cb2(c)
}
})
// Get the root token
root, _, err := client.ACLTokens().Bootstrap(nil)
if err != nil {
t.Fatalf("failed to bootstrap ACLs: %v", err)
}
client.SetSecretID(root.SecretID)
return client, server, root
}
func makeClient(t *testing.T, cb1 configCallback,
cb2 testutil.ServerConfigCallback) (*api.Client, *testutil.TestServer) {
// Make client config
conf := api.DefaultConfig()
if cb1 != nil {
cb1(conf)
}
// Create server
server := testutil.NewTestServer(t, cb2)
conf.Address = "http://" + server.HTTPAddr
// Create client
client, err := api.NewClient(conf)
if err != nil {
t.Fatalf("err: %v", err)
}
return client, server
}

View File

@ -0,0 +1,85 @@
package apitests
import (
"testing"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/stretchr/testify/assert"
)
func TestJobs_Parse(t *testing.T) {
t.Parallel()
c, s := makeClient(t, nil, nil)
defer s.Stop()
jobs := c.Jobs()
checkJob := func(job *api.Job, expectedRegion string) {
if job == nil {
t.Fatal("job should not be nil")
}
region := job.Region
if region == nil {
if expectedRegion != "" {
t.Fatalf("expected job region to be '%s' but was unset", expectedRegion)
}
} else {
if expectedRegion != *region {
t.Fatalf("expected job region '%s', but got '%s'", expectedRegion, *region)
}
}
}
job, err := jobs.ParseHCL(mock.HCL(), true)
if err != nil {
t.Fatalf("err: %s", err)
}
checkJob(job, "global")
job, err = jobs.ParseHCL(mock.HCL(), false)
if err != nil {
t.Fatalf("err: %s", err)
}
checkJob(job, "")
}
func TestJobs_Summary_WithACL(t *testing.T) {
t.Parallel()
assert := assert.New(t)
c, s, root := makeACLClient(t, nil, nil)
defer s.Stop()
jobs := c.Jobs()
invalidToken := mock.ACLToken()
// Registering with an invalid token should fail
c.SetSecretID(invalidToken.SecretID)
job := testJob()
_, _, err := jobs.Register(job, nil)
assert.NotNil(err)
// Register with token should succeed
c.SetSecretID(root.SecretID)
resp2, wm, err := jobs.Register(job, nil)
assert.Nil(err)
assert.NotNil(resp2)
assert.NotEqual("", resp2.EvalID)
assertWriteMeta(t, wm)
// Query the job summary with an invalid token should fail
c.SetSecretID(invalidToken.SecretID)
result, _, err := jobs.Summary(*job.ID, nil)
assert.NotNil(err)
// Query the job summary with a valid token should succeed
c.SetSecretID(root.SecretID)
result, qm, err := jobs.Summary(*job.ID, nil)
assert.Nil(err)
assertQueryMeta(t, qm)
// Check that the result is what we expect
assert.Equal(*job.ID, result.JobID)
}

View File

@ -0,0 +1,33 @@
package apitests
import (
"testing"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/require"
)
func TestNodes_GC(t *testing.T) {
t.Parallel()
require := require.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
nodes := c.Nodes()
err := nodes.GC(uuid.Generate(), nil)
require.NotNil(err)
require.True(structs.IsErrUnknownNode(err))
}
func TestNodes_GcAlloc(t *testing.T) {
t.Parallel()
require := require.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
nodes := c.Nodes()
err := nodes.GcAlloc(uuid.Generate(), nil)
require.NotNil(err)
require.True(structs.IsErrUnknownAllocation(err))
}

View File

@ -1,4 +1,4 @@
package api
package apitests
import (
"testing"
@ -6,6 +6,7 @@ import (
"fmt"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/require"
)
@ -17,7 +18,7 @@ func TestAPI_OperatorAutopilotGetSetConfiguration(t *testing.T) {
defer s.Stop()
operator := c.Operator()
var config *AutopilotConfiguration
var config *api.AutopilotConfiguration
retry.Run(t, func(r *retry.R) {
var err error
config, _, err = operator.AutopilotGetConfiguration(nil)
@ -26,7 +27,7 @@ func TestAPI_OperatorAutopilotGetSetConfiguration(t *testing.T) {
require.True(config.CleanupDeadServers)
// Change a config setting
newConf := &AutopilotConfiguration{CleanupDeadServers: false}
newConf := &api.AutopilotConfiguration{CleanupDeadServers: false}
_, err := operator.AutopilotSetConfiguration(newConf, nil)
require.Nil(err)
@ -42,7 +43,7 @@ func TestAPI_OperatorAutopilotCASConfiguration(t *testing.T) {
defer s.Stop()
operator := c.Operator()
var config *AutopilotConfiguration
var config *api.AutopilotConfiguration
retry.Run(t, func(r *retry.R) {
var err error
config, _, err = operator.AutopilotGetConfiguration(nil)
@ -52,7 +53,7 @@ func TestAPI_OperatorAutopilotCASConfiguration(t *testing.T) {
// Pass an invalid ModifyIndex
{
newConf := &AutopilotConfiguration{
newConf := &api.AutopilotConfiguration{
CleanupDeadServers: false,
ModifyIndex: config.ModifyIndex - 1,
}
@ -63,7 +64,7 @@ func TestAPI_OperatorAutopilotCASConfiguration(t *testing.T) {
// Pass a valid ModifyIndex
{
newConf := &AutopilotConfiguration{
newConf := &api.AutopilotConfiguration{
CleanupDeadServers: false,
ModifyIndex: config.ModifyIndex,
}

View File

@ -0,0 +1,76 @@
package apitests
import (
"testing"
"github.com/hashicorp/consul/testutil/retry"
"github.com/hashicorp/nomad/api"
"github.com/stretchr/testify/require"
)
func TestAPI_OperatorSchedulerGetSetConfiguration(t *testing.T) {
t.Parallel()
require := require.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
operator := c.Operator()
var config *api.SchedulerConfigurationResponse
retry.Run(t, func(r *retry.R) {
var err error
config, _, err = operator.SchedulerGetConfiguration(nil)
r.Check(err)
})
require.True(config.SchedulerConfig.PreemptionConfig.SystemSchedulerEnabled)
// Change a config setting
newConf := &api.SchedulerConfiguration{PreemptionConfig: api.PreemptionConfig{SystemSchedulerEnabled: false}}
resp, wm, err := operator.SchedulerSetConfiguration(newConf, nil)
require.Nil(err)
require.NotZero(wm.LastIndex)
require.False(resp.Updated)
config, _, err = operator.SchedulerGetConfiguration(nil)
require.Nil(err)
require.False(config.SchedulerConfig.PreemptionConfig.SystemSchedulerEnabled)
}
func TestAPI_OperatorSchedulerCASConfiguration(t *testing.T) {
t.Parallel()
require := require.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
operator := c.Operator()
var config *api.SchedulerConfigurationResponse
retry.Run(t, func(r *retry.R) {
var err error
config, _, err = operator.SchedulerGetConfiguration(nil)
r.Check(err)
})
require.True(config.SchedulerConfig.PreemptionConfig.SystemSchedulerEnabled)
// Pass an invalid ModifyIndex
{
newConf := &api.SchedulerConfiguration{
PreemptionConfig: api.PreemptionConfig{SystemSchedulerEnabled: false},
ModifyIndex: config.SchedulerConfig.ModifyIndex - 1,
}
resp, wm, err := operator.SchedulerCASConfiguration(newConf, nil)
require.Nil(err)
require.NotZero(wm.LastIndex)
require.False(resp.Updated)
}
// Pass a valid ModifyIndex
{
newConf := &api.SchedulerConfiguration{
PreemptionConfig: api.PreemptionConfig{SystemSchedulerEnabled: false},
ModifyIndex: config.SchedulerConfig.ModifyIndex,
}
resp, wm, err := operator.SchedulerCASConfiguration(newConf, nil)
require.Nil(err)
require.NotZero(wm.LastIndex)
require.True(resp.Updated)
}
}

View File

@ -1,9 +1,10 @@
package api
package apitests
import (
"encoding/json"
"testing"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/require"
)
@ -15,7 +16,7 @@ import (
// such dependency without affecting api clients.
func TestDefaultResourcesAreInSync(t *testing.T) {
apiR := DefaultResources()
apiR := api.DefaultResources()
structsR := structs.DefaultResources()
require.EqualValues(t, *structsR, toStructsResource(t, apiR))
@ -27,7 +28,7 @@ func TestDefaultResourcesAreInSync(t *testing.T) {
}
func TestMinResourcesAreInSync(t *testing.T) {
apiR := MinResources()
apiR := api.MinResources()
structsR := structs.MinResources()
require.EqualValues(t, *structsR, toStructsResource(t, apiR))
@ -50,7 +51,7 @@ func TestNewDefaultRescheulePolicyInSync(t *testing.T) {
for _, c := range cases {
t.Run(c.typ, func(t *testing.T) {
apiP := NewDefaultReschedulePolicy(c.typ)
apiP := api.NewDefaultReschedulePolicy(c.typ)
var found structs.ReschedulePolicy
toStructs(t, &found, apiP)
@ -72,8 +73,8 @@ func TestNewDefaultRestartPolicyInSync(t *testing.T) {
for _, c := range cases {
t.Run(c.typ, func(t *testing.T) {
job := Job{Type: &c.typ}
var tg TaskGroup
job := api.Job{Type: &c.typ}
var tg api.TaskGroup
tg.Canonicalize(&job)
apiP := tg.RestartPolicy
@ -86,7 +87,7 @@ func TestNewDefaultRestartPolicyInSync(t *testing.T) {
}
}
func toStructsResource(t *testing.T, in *Resources) structs.Resources {
func toStructsResource(t *testing.T, in *api.Resources) structs.Resources {
var out structs.Resources
toStructs(t, &out, in)
return out

View File

@ -0,0 +1,171 @@
package apitests
import (
"testing"
"time"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/assert"
)
// Verifies that reschedule policy is merged correctly
func TestTaskGroup_Canonicalize_ReschedulePolicy(t *testing.T) {
type testCase struct {
desc string
jobReschedulePolicy *api.ReschedulePolicy
taskReschedulePolicy *api.ReschedulePolicy
expected *api.ReschedulePolicy
}
testCases := []testCase{
{
desc: "Default",
jobReschedulePolicy: nil,
taskReschedulePolicy: nil,
expected: &api.ReschedulePolicy{
Attempts: intToPtr(structs.DefaultBatchJobReschedulePolicy.Attempts),
Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
Delay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
DelayFunction: stringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
MaxDelay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
Unlimited: boolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
},
},
{
desc: "Empty job reschedule policy",
jobReschedulePolicy: &api.ReschedulePolicy{
Attempts: intToPtr(0),
Interval: timeToPtr(0),
Delay: timeToPtr(0),
MaxDelay: timeToPtr(0),
DelayFunction: stringToPtr(""),
Unlimited: boolToPtr(false),
},
taskReschedulePolicy: nil,
expected: &api.ReschedulePolicy{
Attempts: intToPtr(0),
Interval: timeToPtr(0),
Delay: timeToPtr(0),
MaxDelay: timeToPtr(0),
DelayFunction: stringToPtr(""),
Unlimited: boolToPtr(false),
},
},
{
desc: "Inherit from job",
jobReschedulePolicy: &api.ReschedulePolicy{
Attempts: intToPtr(1),
Interval: timeToPtr(20 * time.Second),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
taskReschedulePolicy: nil,
expected: &api.ReschedulePolicy{
Attempts: intToPtr(1),
Interval: timeToPtr(20 * time.Second),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
},
{
desc: "Set in task",
jobReschedulePolicy: nil,
taskReschedulePolicy: &api.ReschedulePolicy{
Attempts: intToPtr(5),
Interval: timeToPtr(2 * time.Minute),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
expected: &api.ReschedulePolicy{
Attempts: intToPtr(5),
Interval: timeToPtr(2 * time.Minute),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
},
{
desc: "Merge from job",
jobReschedulePolicy: &api.ReschedulePolicy{
Attempts: intToPtr(1),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
},
taskReschedulePolicy: &api.ReschedulePolicy{
Interval: timeToPtr(5 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
expected: &api.ReschedulePolicy{
Attempts: intToPtr(1),
Interval: timeToPtr(5 * time.Minute),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(10 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
},
{
desc: "Override from group",
jobReschedulePolicy: &api.ReschedulePolicy{
Attempts: intToPtr(1),
MaxDelay: timeToPtr(10 * time.Second),
},
taskReschedulePolicy: &api.ReschedulePolicy{
Attempts: intToPtr(5),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(20 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
expected: &api.ReschedulePolicy{
Attempts: intToPtr(5),
Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
Delay: timeToPtr(20 * time.Second),
MaxDelay: timeToPtr(20 * time.Minute),
DelayFunction: stringToPtr("constant"),
Unlimited: boolToPtr(false),
},
},
{
desc: "Attempts from job, default interval",
jobReschedulePolicy: &api.ReschedulePolicy{
Attempts: intToPtr(1),
},
taskReschedulePolicy: nil,
expected: &api.ReschedulePolicy{
Attempts: intToPtr(1),
Interval: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
Delay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
DelayFunction: stringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
MaxDelay: timeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
Unlimited: boolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
},
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
job := &api.Job{
ID: stringToPtr("test"),
Reschedule: tc.jobReschedulePolicy,
Type: stringToPtr(api.JobTypeBatch),
}
job.Canonicalize()
tg := &api.TaskGroup{
Name: stringToPtr("foo"),
ReschedulePolicy: tc.taskReschedulePolicy,
}
tg.Canonicalize(job)
assert.Equal(t, tc.expected, tg.ReschedulePolicy)
})
}
}

View File

@ -0,0 +1,83 @@
package apitests
import (
"testing"
"time"
"github.com/hashicorp/nomad/api"
)
// boolToPtr returns the pointer to a boolean
func boolToPtr(b bool) *bool {
return &b
}
// intToPtr returns the pointer to an int
func intToPtr(i int) *int {
return &i
}
// timeToPtr returns the pointer to a time stamp
func timeToPtr(t time.Duration) *time.Duration {
return &t
}
// stringToPtr returns the pointer to a string
func stringToPtr(str string) *string {
return &str
}
func assertQueryMeta(t *testing.T, qm *api.QueryMeta) {
t.Helper()
if qm.LastIndex == 0 {
t.Fatalf("bad index: %d", qm.LastIndex)
}
if !qm.KnownLeader {
t.Fatalf("expected known leader, got none")
}
}
func assertWriteMeta(t *testing.T, wm *api.WriteMeta) {
t.Helper()
if wm.LastIndex == 0 {
t.Fatalf("bad index: %d", wm.LastIndex)
}
}
func testJob() *api.Job {
task := api.NewTask("task1", "exec").
SetConfig("command", "/bin/sleep").
Require(&api.Resources{
CPU: intToPtr(100),
MemoryMB: intToPtr(256),
}).
SetLogConfig(&api.LogConfig{
MaxFiles: intToPtr(1),
MaxFileSizeMB: intToPtr(2),
})
group := api.NewTaskGroup("group1", 1).
AddTask(task).
RequireDisk(&api.EphemeralDisk{
SizeMB: intToPtr(25),
})
job := api.NewBatchJob("job1", "redis", "region1", 1).
AddDatacenter("dc1").
AddTaskGroup(group)
return job
}
// conversions utils only used for testing
// added here to avoid linter warning
// int64ToPtr returns the pointer to an int
func int64ToPtr(i int64) *int64 {
return &i
}
// float64ToPtr returns the pointer to an float64
func float64ToPtr(f float64) *float64 {
return &f
}

View File

@ -1,6 +1,6 @@
# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid)
Generates UUID-format strings using high quality, purely random bytes. It can also parse UUID-format strings into their component bytes.
Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes.
Documentation
=============

1
vendor/github.com/hashicorp/go-uuid/go.mod generated vendored Normal file
View File

@ -0,0 +1 @@
module github.com/hashicorp/go-uuid

View File

@ -15,9 +15,11 @@ func GenerateRandomBytes(size int) ([]byte, error) {
return buf, nil
}
const uuidLen = 16
// GenerateUUID is used to generate a random UUID
func GenerateUUID() (string, error) {
buf, err := GenerateRandomBytes(16)
buf, err := GenerateRandomBytes(uuidLen)
if err != nil {
return "", err
}
@ -25,11 +27,11 @@ func GenerateUUID() (string, error) {
}
func FormatUUID(buf []byte) (string, error) {
if len(buf) != 16 {
return "", fmt.Errorf("wrong length byte slice (%d)", len(buf))
if buflen := len(buf); buflen != uuidLen {
return "", fmt.Errorf("wrong length byte slice (%d)", buflen)
}
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
return fmt.Sprintf("%x-%x-%x-%x-%x",
buf[0:4],
buf[4:6],
buf[6:8],
@ -38,16 +40,14 @@ func FormatUUID(buf []byte) (string, error) {
}
func ParseUUID(uuid string) ([]byte, error) {
if len(uuid) != 36 {
if len(uuid) != 2 * uuidLen + 4 {
return nil, fmt.Errorf("uuid string is wrong length")
}
hyph := []byte("-")
if uuid[8] != hyph[0] ||
uuid[13] != hyph[0] ||
uuid[18] != hyph[0] ||
uuid[23] != hyph[0] {
if uuid[8] != '-' ||
uuid[13] != '-' ||
uuid[18] != '-' ||
uuid[23] != '-' {
return nil, fmt.Errorf("uuid is improperly formatted")
}
@ -57,7 +57,7 @@ func ParseUUID(uuid string) ([]byte, error) {
if err != nil {
return nil, err
}
if len(ret) != 16 {
if len(ret) != uuidLen {
return nil, fmt.Errorf("decoded hex is the wrong length")
}

2
vendor/vendor.json vendored
View File

@ -200,7 +200,7 @@
{"path":"github.com/hashicorp/go-sockaddr","checksumSHA1":"J47ySO1q0gcnmoMnir1q1loKzCk=","revision":"6d291a969b86c4b633730bfc6b8b9d64c3aafed9","revisionTime":"2018-03-20T11:50:54Z"},
{"path":"github.com/hashicorp/go-sockaddr/template","checksumSHA1":"PDp9DVLvf3KWxhs4G4DpIwauMSU=","revision":"6d291a969b86c4b633730bfc6b8b9d64c3aafed9","revisionTime":"2018-03-20T11:50:54Z"},
{"path":"github.com/hashicorp/go-syslog","checksumSHA1":"xZ7Ban1x//6uUIU1xtrTbCYNHBc=","revision":"42a2b573b664dbf281bd48c3cc12c086b17a39ba"},
{"path":"github.com/hashicorp/go-uuid","checksumSHA1":"mAkPa/RLuIwN53GbwIEMATexams=","revision":"64130c7a86d732268a38cb04cfbaf0cc987fda98","revisionTime":"2016-07-17T02:21:40Z"},
{"path":"github.com/hashicorp/go-uuid","checksumSHA1":"5AxXPtBqAKyFGcttFzxT5hp/3Tk=","revision":"4f571afc59f3043a65f8fe6bf46d887b10a01d43","revisionTime":"2018-11-28T13:14:45Z"},
{"path":"github.com/hashicorp/go-version","checksumSHA1":"r0pj5dMHCghpaQZ3f1BRGoKiSWw=","revision":"b5a281d3160aa11950a6182bd9a9dc2cb1e02d50","revisionTime":"2018-08-24T00:43:55Z"},
{"path":"github.com/hashicorp/golang-lru","checksumSHA1":"d9PxF1XQGLMJZRct2R8qVM/eYlE=","revision":"a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4","revisionTime":"2016-02-07T21:47:19Z"},
{"path":"github.com/hashicorp/golang-lru/simplelru","checksumSHA1":"2nOpYjx8Sn57bqlZq17yM4YJuM4=","revision":"a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"},