open-nomad/command/testing_test.go

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

239 lines
6.0 KiB
Go
Raw Normal View History

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
2015-09-11 18:10:20 +00:00
package command
import (
"fmt"
"os"
"regexp"
2015-09-11 18:10:20 +00:00
"testing"
"time"
2015-09-11 18:10:20 +00:00
2015-09-12 21:50:05 +00:00
"github.com/hashicorp/nomad/api"
2017-07-21 04:07:32 +00:00
"github.com/hashicorp/nomad/command/agent"
"github.com/hashicorp/nomad/helper/pointer"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/shoenig/test/must"
2015-09-11 18:10:20 +00:00
)
var nonAlphaNum = regexp.MustCompile(`[^a-zA-Z0-9]+`)
2017-07-21 04:07:32 +00:00
func testServer(t *testing.T, runClient bool, cb func(*agent.Config)) (*agent.TestAgent, *api.Client, string) {
// Make a new test server
a := agent.NewTestAgent(t, t.Name(), func(config *agent.Config) {
2017-07-21 04:07:32 +00:00
config.Client.Enabled = runClient
if cb != nil {
cb(config)
}
})
t.Cleanup(a.Shutdown)
2015-09-12 21:50:05 +00:00
2017-07-21 04:07:32 +00:00
c := a.Client()
return a, c, a.HTTPAddr()
2015-09-11 18:10:20 +00:00
}
// testClient starts a new test client, blocks until it joins, and performs
// cleanup after the test is complete.
func testClient(t *testing.T, name string, cb func(*agent.Config)) (*agent.TestAgent, *api.Client, string) {
Fix flaky `operator debug` test (#12501) We introduced a `pprof-interval` argument to `operator debug` in #11938, and unfortunately this has resulted in a lot of test flakes. The actual command in use is mostly fine (although I've fixed some quirks here), so what's really happened is that the change has revealed some existing issues in the tests. Summary of changes: * Make first pprof collection synchronous to preserve the existing behavior for the common case where the pprof interval matches the duration. * Clamp `operator debug` pprof timing to that of the command. The `pprof-duration` should be no more than `duration` and the `pprof-interval` should be no more than `pprof-duration`. Clamp the values rather than throwing errors, which could change the commands that existing users might already have in debugging scripts * Testing: remove test parallelism The `operator debug` tests that stand up servers can't be run in parallel, because we don't have a way of canceling the API calls for pprof. The agent will still be running the last pprof when we exit, and that breaks the next test that talks to that same agent. (Because you can only run one pprof at a time on any process!) We could split off each subtest into its own server, but this test suite is already very slow. In future work we should fix this "for real" by making the API call cancelable. * Testing: assert against unexpected errors in `operator debug` tests. If we assert there are no unexpected error outputs, it's easier for the developer to debug when something is going wrong with the tests because the error output will be presented as a failing test, rather than just a failing exit code check. Or worse, no failing exit code check! This also forces us to be explicit about which tests will return 0 exit codes but still emit (presumably ignorable) error outputs. Additional minor bug fixes (mostly in tests) and test refactorings: * Fix text alignment on pprof Duration in `operator debug` output * Remove "done" channel from `operator debug` event stream test. The goroutine we're blocking for here already tells us it's done by sending a value, so block on that instead of an extraneous channel * Event stream test timer should start at current time, not zero * Remove noise from `operator debug` test log output. The `t.Logf` calls already are picked out from the rest of the test output by being prefixed with the filename. * Remove explicit pprof args so we use the defaults clamped from duration/interval
2022-04-07 19:00:07 +00:00
t.Logf("Starting client agent %s", name)
a := agent.NewTestAgent(t, name, func(config *agent.Config) {
if cb != nil {
cb(config)
}
})
t.Cleanup(a.Shutdown)
c := a.Client()
Fix flaky `operator debug` test (#12501) We introduced a `pprof-interval` argument to `operator debug` in #11938, and unfortunately this has resulted in a lot of test flakes. The actual command in use is mostly fine (although I've fixed some quirks here), so what's really happened is that the change has revealed some existing issues in the tests. Summary of changes: * Make first pprof collection synchronous to preserve the existing behavior for the common case where the pprof interval matches the duration. * Clamp `operator debug` pprof timing to that of the command. The `pprof-duration` should be no more than `duration` and the `pprof-interval` should be no more than `pprof-duration`. Clamp the values rather than throwing errors, which could change the commands that existing users might already have in debugging scripts * Testing: remove test parallelism The `operator debug` tests that stand up servers can't be run in parallel, because we don't have a way of canceling the API calls for pprof. The agent will still be running the last pprof when we exit, and that breaks the next test that talks to that same agent. (Because you can only run one pprof at a time on any process!) We could split off each subtest into its own server, but this test suite is already very slow. In future work we should fix this "for real" by making the API call cancelable. * Testing: assert against unexpected errors in `operator debug` tests. If we assert there are no unexpected error outputs, it's easier for the developer to debug when something is going wrong with the tests because the error output will be presented as a failing test, rather than just a failing exit code check. Or worse, no failing exit code check! This also forces us to be explicit about which tests will return 0 exit codes but still emit (presumably ignorable) error outputs. Additional minor bug fixes (mostly in tests) and test refactorings: * Fix text alignment on pprof Duration in `operator debug` output * Remove "done" channel from `operator debug` event stream test. The goroutine we're blocking for here already tells us it's done by sending a value, so block on that instead of an extraneous channel * Event stream test timer should start at current time, not zero * Remove noise from `operator debug` test log output. The `t.Logf` calls already are picked out from the rest of the test output by being prefixed with the filename. * Remove explicit pprof args so we use the defaults clamped from duration/interval
2022-04-07 19:00:07 +00:00
t.Logf("Waiting for client %s to join server(s) %s", name, a.GetConfig().Client.Servers)
testutil.WaitForClient(t, a.Agent.RPC, a.Agent.Client().NodeID(), a.Agent.Client().Region())
return a, c, a.HTTPAddr()
}
func testJob(jobID string) *api.Job {
2016-08-22 16:35:25 +00:00
task := api.NewTask("task1", "mock_driver").
SetConfig("kill_after", "1s").
SetConfig("run_for", "5s").
SetConfig("exit_code", 0).
2016-02-02 21:50:30 +00:00
Require(&api.Resources{
MemoryMB: pointer.Of(256),
CPU: pointer.Of(100),
2016-02-19 23:49:32 +00:00
}).
2016-02-11 18:42:56 +00:00
SetLogConfig(&api.LogConfig{
MaxFiles: pointer.Of(1),
MaxFileSizeMB: pointer.Of(2),
2016-02-19 23:49:32 +00:00
})
group := api.NewTaskGroup("group1", 1).
2016-08-26 04:05:21 +00:00
AddTask(task).
RequireDisk(&api.EphemeralDisk{
SizeMB: pointer.Of(20),
2016-08-26 04:05:21 +00:00
})
job := api.NewBatchJob(jobID, jobID, "global", 1).
AddDatacenter("dc1").
AddTaskGroup(group)
return job
}
func testNomadServiceJob(jobID string) *api.Job {
j := testJob(jobID)
j.TaskGroups[0].Services = []*api.Service{{
Name: "service1",
PortLabel: "1000",
AddressMode: "",
Address: "127.0.0.1",
Checks: []api.ServiceCheck{{
Name: "check1",
Type: "http",
Path: "/",
Interval: 1 * time.Second,
Timeout: 1 * time.Second,
}},
Provider: "nomad",
}}
return j
}
func testMultiRegionJob(jobID, region, datacenter string) *api.Job {
task := api.NewTask("task1", "mock_driver").
SetConfig("kill_after", "10s").
SetConfig("run_for", "15s").
SetConfig("exit_code", 0).
Require(&api.Resources{
MemoryMB: pointer.Of(256),
CPU: pointer.Of(100),
}).
SetLogConfig(&api.LogConfig{
MaxFiles: pointer.Of(1),
MaxFileSizeMB: pointer.Of(2),
})
group := api.NewTaskGroup("group1", 1).
AddTask(task).
RequireDisk(&api.EphemeralDisk{
SizeMB: pointer.Of(20),
})
job := api.NewServiceJob(jobID, jobID, region, 1).AddDatacenter(datacenter).AddTaskGroup(group)
job.Region = nil
job.Multiregion = &api.Multiregion{
Regions: []*api.MultiregionRegion{
{
Name: "east",
Datacenters: []string{"east-1"},
},
{
Name: "west",
Datacenters: []string{"west-1"},
},
},
}
return job
}
func waitForNodes(t *testing.T, client *api.Client) {
testutil.WaitForResult(func() (bool, error) {
nodes, _, err := client.Nodes().List(nil)
if err != nil {
return false, err
}
for _, node := range nodes {
if _, ok := node.Drivers["mock_driver"]; ok &&
node.Status == structs.NodeStatusReady {
return true, nil
}
}
return false, fmt.Errorf("no ready nodes")
}, func(err error) {
must.NoError(t, err)
})
}
func waitForJobAllocsStatus(t *testing.T, client *api.Client, jobID string, status string, token string) {
testutil.WaitForResult(func() (bool, error) {
q := &api.QueryOptions{AuthToken: token}
allocs, _, err := client.Jobs().Allocations(jobID, true, q)
if err != nil {
return false, fmt.Errorf("failed to query job allocs: %v", err)
}
if len(allocs) == 0 {
return false, fmt.Errorf("no allocs")
}
for _, alloc := range allocs {
if alloc.ClientStatus != status {
return false, fmt.Errorf("alloc status is %q not %q", alloc.ClientStatus, status)
}
}
return true, nil
}, func(err error) {
must.NoError(t, err)
})
}
func waitForAllocStatus(t *testing.T, client *api.Client, allocID string, status string) {
testutil.WaitForResult(func() (bool, error) {
alloc, _, err := client.Allocations().Info(allocID, nil)
if err != nil {
return false, err
}
if alloc.ClientStatus == status {
return true, nil
}
return false, fmt.Errorf("alloc status is %q not %q", alloc.ClientStatus, status)
}, func(err error) {
must.NoError(t, err)
})
}
func waitForAllocRunning(t *testing.T, client *api.Client, allocID string) {
waitForAllocStatus(t, client, allocID, api.AllocClientStatusRunning)
}
func waitForCheckStatus(t *testing.T, client *api.Client, allocID, status string) {
testutil.WaitForResult(func() (bool, error) {
results, err := client.Allocations().Checks(allocID, nil)
if err != nil {
return false, err
}
// pick a check, any check will do
for _, check := range results {
if check.Status == status {
return true, nil
}
}
return false, fmt.Errorf("no check with status: %s", status)
}, func(err error) {
t.Fatalf("timed out waiting for alloc to be running: %v", err)
})
}
func getAllocFromJob(t *testing.T, client *api.Client, jobID string) string {
var allocID string
if allocations, _, err := client.Jobs().Allocations(jobID, false, nil); err == nil {
if len(allocations) > 0 {
allocID = allocations[0].ID
}
}
must.NotEq(t, "", allocID, must.Sprint("expected to find an evaluation after running job", jobID))
return allocID
}
func getTempFile(t *testing.T, name string) (string, func()) {
f, err := os.CreateTemp("", name)
must.NoError(t, err)
must.NoError(t, f.Close())
return f.Name(), func() {
_ = os.Remove(f.Name())
}
}